summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichael Marineau <marineam@gentoo.org>2008-02-23 23:33:39 +0000
committerMichael Marineau <marineam@gentoo.org>2008-02-23 23:33:39 +0000
commitb9ea36c39d989a831dd70d7d79f88de62d013eb7 (patch)
treea73d1dae055ebdd94e83eac1314a1785920f1c12
parentReleasing 2.6.21-2 (diff)
downloadxen-b9ea36c39d989a831dd70d7d79f88de62d013eb7.tar.gz
xen-b9ea36c39d989a831dd70d7d79f88de62d013eb7.tar.bz2
xen-b9ea36c39d989a831dd70d7d79f88de62d013eb7.zip
Replace the 2.6.22 patches with Suse's xen patchset which actually works. :-)
svn path=/patches/; revision=75
-rw-r--r--trunk/2.6.22/00000_README18
-rw-r--r--trunk/2.6.22/01012_linux-2.6.22.13.patch42
-rw-r--r--trunk/2.6.22/01013_linux-2.6.22.14.patch1319
-rw-r--r--trunk/2.6.22/01014_linux-2.6.22.15.patch1096
-rw-r--r--trunk/2.6.22/01015_linux-2.6.22.16.patch27
-rw-r--r--trunk/2.6.22/01016_linux-2.6.22.17.patch1360
-rw-r--r--trunk/2.6.22/01017_linux-2.6.22.18.patch14
-rw-r--r--trunk/2.6.22/20001_x86-early-quirks-unificiation.patch1237
-rw-r--r--trunk/2.6.22/20001_xen.patch93118
-rw-r--r--trunk/2.6.22/20002_add-console-use-vt.patch158
-rw-r--r--trunk/2.6.22/20003_linux-2.6.19-rc1-kexec-move_segment_code-i386.patch1172
-rw-r--r--trunk/2.6.22/20004_linux-2.6.19-rc1-kexec-move_segment_code-x86_64.patch1164
-rw-r--r--trunk/2.6.22/20005_blktap-aio-16_03_06.patch1209
-rw-r--r--trunk/2.6.22/20006_fix-ide-cd-pio-mode.patch136
-rw-r--r--trunk/2.6.22/20007_i386-mach-io-check-nmi.patch153
-rw-r--r--trunk/2.6.22/20008_net-csum.patch150
-rw-r--r--trunk/2.6.22/20009_xenoprof-generic.patch1669
-rw-r--r--trunk/2.6.22/20010_softlockup-no-idle-hz.patch175
-rw-r--r--trunk/2.6.22/20011_xen3-auto-xen-arch.patch149825
-rw-r--r--trunk/2.6.22/20012_xen3-auto-xen-drivers.patch128404
-rw-r--r--trunk/2.6.22/20013_xen3-auto-include-xen-interface.patch18771
-rw-r--r--trunk/2.6.22/20014_xen3-auto-xen-kconfig.patch1887
-rw-r--r--trunk/2.6.22/20015_xen3-auto-common.patch12101
-rw-r--r--trunk/2.6.22/20016_xen3-auto-arch-i386.patch1483
-rw-r--r--trunk/2.6.22/20017_xen3-auto-arch-x86_64.patch1502
-rw-r--r--trunk/2.6.22/20018_15130-x86_64-vsyscall-user.patch151
-rw-r--r--trunk/2.6.22/20019_15181-dma-tracking.patch1551
-rw-r--r--trunk/2.6.22/20020_30-bit-field-booleans.patch138
-rw-r--r--trunk/2.6.22/20021_42-freeze.patch167
-rw-r--r--trunk/2.6.22/20022_67-edd.patch1209
-rw-r--r--trunk/2.6.22/20023_70-edid.patch1118
-rw-r--r--trunk/2.6.22/20024_79-balloon-highmem.patch142
-rw-r--r--trunk/2.6.22/20025_80-blk-teardown.patch157
-rw-r--r--trunk/2.6.22/20026_81-clock-was-set.patch148
-rw-r--r--trunk/2.6.22/20027_82-blkdev-wait.patch192
-rw-r--r--trunk/2.6.22/20028_93-swiotlb.patch1146
-rw-r--r--trunk/2.6.22/20029_95-privcmd-wrlock.patch172
-rw-r--r--trunk/2.6.22/20030_136-pae-vmalloc-sync-all.patch1 (renamed from trunk/2.6.22/20002_fix-pae-vmalloc-sync.patch)41
-rw-r--r--trunk/2.6.22/20031_137-netfront-copy-release.patch1128
-rw-r--r--trunk/2.6.22/20032_141-driver-autoload.patch1120
-rw-r--r--trunk/2.6.22/20033_144-xenbus-dev-wait.patch1104
-rw-r--r--trunk/2.6.22/20034_145-xenbus-error-path.patch124
-rw-r--r--trunk/2.6.22/20035_148-blkfront-no-bounce-bufs.patch125
-rw-r--r--trunk/2.6.22/20036_152-netloop-check-cloned-skb.patch135
-rw-r--r--trunk/2.6.22/20037_157-netfront-skb-deref.patch135
-rw-r--r--trunk/2.6.22/20038_252-l1-entry-update-highpte.patch1 (renamed from trunk/2.6.22/40001_i386-fix-xen_l1_entry_update-for-highptes.patch)17
-rw-r--r--trunk/2.6.22/20039_265-ptep_get_and_clear.patch174
-rw-r--r--trunk/2.6.22/20040_xen3-fixup-common.patch1365
-rw-r--r--trunk/2.6.22/20041_xen3-fixup-arch-i386.patch176
-rw-r--r--trunk/2.6.22/20042_xen3-fixup-arch-x86_64.patch1103
-rw-r--r--trunk/2.6.22/20043_xen3-patch-2.6.18.patch1394
-rw-r--r--trunk/2.6.22/20044_xen3-patch-2.6.19.patch112637
-rw-r--r--trunk/2.6.22/20045_xen3-patch-2.6.20.patch17592
-rw-r--r--trunk/2.6.22/20046_xen3-patch-2.6.21.patch15107
-rw-r--r--trunk/2.6.22/20047_xen3-patch-2.6.22.patch17866
-rw-r--r--trunk/2.6.22/20048_xen3-patch-2.6.22.5-6.patch130
-rw-r--r--trunk/2.6.22/20049_xen3-patch-2.6.22.6-7.patch1 (renamed from trunk/2.6.22/20003_fix-ia32entry-xen.patch)32
-rw-r--r--trunk/2.6.22/20050_xen3-patch-2.6.22.11-12.patch188
-rw-r--r--trunk/2.6.22/20051_xen3-x86-early-quirks-unificiation.patch125
-rw-r--r--trunk/2.6.22/20052_xen3-x86-fam10-l3cache.patch128
-rw-r--r--trunk/2.6.22/20053_xen3-aux-at_vector_size.patch147
-rw-r--r--trunk/2.6.22/20054_xen-balloon-min.patch177
-rw-r--r--trunk/2.6.22/20055_xen-modular-blktap.patch141
-rw-r--r--trunk/2.6.22/20056_xen-x86-panic-no-reboot.patch158
-rw-r--r--trunk/2.6.22/20057_xen-i386-panic-on-oops.patch127
-rw-r--r--trunk/2.6.22/20058_xen-x86-kconfig-no-cpu_freq.patch135
-rw-r--r--trunk/2.6.22/20059_xen-configurable-console.patch1181
-rw-r--r--trunk/2.6.22/20060_xen-x86_64-init-cleanup.patch1294
-rw-r--r--trunk/2.6.22/20061_xen-balloon-max-target.patch132
-rw-r--r--trunk/2.6.22/20062_xen-x86-dcr-fallback.patch1158
-rw-r--r--trunk/2.6.22/20063_xen-x86-consistent-nmi.patch1345
-rw-r--r--trunk/2.6.22/20064_xen-x86-no-lapic.patch11426
-rw-r--r--trunk/2.6.22/20065_xen-no-video-select.patch121
-rw-r--r--trunk/2.6.22/20066_xen-blkback-bimodal-suse.patch139
-rw-r--r--trunk/2.6.22/20067_xen-console-default.patch141
-rw-r--r--trunk/2.6.22/20068_xen-x86-panic-smp.patch196
-rw-r--r--trunk/2.6.22/20069_xen-split-pt-lock.patch1220
-rw-r--r--trunk/2.6.22/20070_xen-blkif-protocol-fallback-hack.patch1229
-rw-r--r--trunk/2.6.22/20071_xen-x86-pXX_val.patch1434
-rw-r--r--trunk/2.6.22/20072_xen-x86_64-physmap-nx.patch136
-rw-r--r--trunk/2.6.22/20073_xen-i386-kconfig-msr.patch118
-rw-r--r--trunk/2.6.22/20074_xen-x86_64-entry.patch142
-rw-r--r--trunk/2.6.22/20075_xen-intel-agp.patch133
-rw-r--r--trunk/2.6.22/20076_xen-blkback-cdrom.patch1277
-rw-r--r--trunk/2.6.22/20077_xen-isa-dma.patch1543
-rw-r--r--trunk/2.6.22/20078_xen-i386-set-fixmap.patch1126
-rw-r--r--trunk/2.6.22/20079_xenfb-module-param.patch1108
87 files changed, 137946 insertions, 93165 deletions
diff --git a/trunk/2.6.22/00000_README b/trunk/2.6.22/00000_README
index a1ae84d..82c139d 100644
--- a/trunk/2.6.22/00000_README
+++ b/trunk/2.6.22/00000_README
@@ -11,24 +11,18 @@ Numbering
---------
0xxxx Gentoo, not related to Xen. (in case we pull something from extras)
-2xxxx Ubuntu, we use their Xen patch for >=2.6.22
-4xxxx Misc
+2xxxx Suse, we are using their Xen patch for 2.6.22
5xxxx Gentoo, Xen and other fixes for Redhat and/or Debian patches.
Patches
-------
-20001_xen.patch
- Big fat xen patch, from Ubuntu's 2.6.22-14.46
+0xxxx_linux-2.6.22.???
+ Kernel.org 2.6.22.y releases that are not included in genpatches.
-20002_fix-pae-vmalloc-sync.patch
-
-20003_fix-ia32entry-xen.patch
- Security fix, CVE-2007-4573
-
-40001_i386-fix-xen_l1_entry_update-for-highptes.patch
- Fix for kernels compiled with CONFIG_HIGHPTE.
- Pulled from linux-2.6.18-xen.hg, changeset e79729740288.
+2xxxx_???
+ Xen patches from Suse's kernel. Note that they are named *.patch1
+ to make sure unipatch does the correct thing.
50001_make-install.patch
Handle make install in a semi-sane way that plays nice with
diff --git a/trunk/2.6.22/01012_linux-2.6.22.13.patch b/trunk/2.6.22/01012_linux-2.6.22.13.patch
new file mode 100644
index 0000000..cfd8333
--- /dev/null
+++ b/trunk/2.6.22/01012_linux-2.6.22.13.patch
@@ -0,0 +1,42 @@
+Subject: Linux 2.6.22.13
+From: Greg Kroah-Hartman <gregkh@suse.de>
+
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+diff --git a/kernel/exit.c b/kernel/exit.c
+index 5c8ecba..e3adc46 100644
+--- a/kernel/exit.c
++++ b/kernel/exit.c
+@@ -1336,8 +1336,7 @@ static int wait_task_stopped(struct task_struct *p, int delayed_group_leader,
+ int why = (p->ptrace & PT_PTRACED) ? CLD_TRAPPED : CLD_STOPPED;
+
+ exit_code = p->exit_code;
+- if (unlikely(!exit_code) ||
+- unlikely(p->state & TASK_TRACED))
++ if (unlikely(!exit_code) || unlikely(p->exit_state))
+ goto bail_ref;
+ return wait_noreap_copyout(p, pid, uid,
+ why, (exit_code << 8) | 0x7f,
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index e33fb3d..2e1d8e7 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -994,6 +994,9 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
+ if (before(TCP_SKB_CB(ack_skb)->ack_seq, prior_snd_una - tp->max_window))
+ return 0;
+
++ if (!tp->packets_out)
++ goto out;
++
+ /* SACK fastpath:
+ * if the only SACK change is the increase of the end_seq of
+ * the first block then only apply that SACK block
+@@ -1262,6 +1265,8 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
+ (!tp->frto_highmark || after(tp->snd_una, tp->frto_highmark)))
+ tcp_update_reordering(sk, ((tp->fackets_out + 1) - reord), 0);
+
++out:
++
+ #if FASTRETRANS_DEBUG > 0
+ BUG_TRAP((int)tp->sacked_out >= 0);
+ BUG_TRAP((int)tp->lost_out >= 0);
diff --git a/trunk/2.6.22/01013_linux-2.6.22.14.patch b/trunk/2.6.22/01013_linux-2.6.22.14.patch
new file mode 100644
index 0000000..aea3379
--- /dev/null
+++ b/trunk/2.6.22/01013_linux-2.6.22.14.patch
@@ -0,0 +1,1319 @@
+Subject: Linux 2.6.22.14
+From: Greg Kroah-Hartman <gregkh@suse.de>
+
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+diff --git a/arch/i386/kernel/tsc.c b/arch/i386/kernel/tsc.c
+index f64b81f..8e02ed6 100644
+--- a/arch/i386/kernel/tsc.c
++++ b/arch/i386/kernel/tsc.c
+@@ -122,7 +122,7 @@ unsigned long native_calculate_cpu_khz(void)
+ {
+ unsigned long long start, end;
+ unsigned long count;
+- u64 delta64;
++ u64 delta64 = (u64)ULLONG_MAX;
+ int i;
+ unsigned long flags;
+
+@@ -134,6 +134,7 @@ unsigned long native_calculate_cpu_khz(void)
+ rdtscll(start);
+ mach_countup(&count);
+ rdtscll(end);
++ delta64 = min(delta64, (end - start));
+ }
+ /*
+ * Error: ECTCNEVERSET
+@@ -144,8 +145,6 @@ unsigned long native_calculate_cpu_khz(void)
+ if (count <= 1)
+ goto err;
+
+- delta64 = end - start;
+-
+ /* cpu freq too fast: */
+ if (delta64 > (1ULL<<32))
+ goto err;
+diff --git a/drivers/i2c/busses/i2c-pasemi.c b/drivers/i2c/busses/i2c-pasemi.c
+index 58e3271..dcf5dec 100644
+--- a/drivers/i2c/busses/i2c-pasemi.c
++++ b/drivers/i2c/busses/i2c-pasemi.c
+@@ -51,6 +51,7 @@ struct pasemi_smbus {
+ #define MRXFIFO_DATA_M 0x000000ff
+
+ #define SMSTA_XEN 0x08000000
++#define SMSTA_MTN 0x00200000
+
+ #define CTL_MRR 0x00000400
+ #define CTL_MTR 0x00000200
+@@ -98,6 +99,10 @@ static unsigned int pasemi_smb_waitready(struct pasemi_smbus *smbus)
+ status = reg_read(smbus, REG_SMSTA);
+ }
+
++ /* Got NACK? */
++ if (status & SMSTA_MTN)
++ return -ENXIO;
++
+ if (timeout < 0) {
+ dev_warn(&smbus->dev->dev, "Timeout, status 0x%08x\n", status);
+ reg_write(smbus, REG_SMSTA, status);
+diff --git a/drivers/i2c/chips/eeprom.c b/drivers/i2c/chips/eeprom.c
+index bfce13c..5ad36ab 100644
+--- a/drivers/i2c/chips/eeprom.c
++++ b/drivers/i2c/chips/eeprom.c
+@@ -125,13 +125,20 @@ static ssize_t eeprom_read(struct kobject *kobj, char *buf, loff_t off, size_t c
+ for (slice = off >> 5; slice <= (off + count - 1) >> 5; slice++)
+ eeprom_update_client(client, slice);
+
+- /* Hide Vaio security settings to regular users (16 first bytes) */
+- if (data->nature == VAIO && off < 16 && !capable(CAP_SYS_ADMIN)) {
+- size_t in_row1 = 16 - off;
+- in_row1 = min(in_row1, count);
+- memset(buf, 0, in_row1);
+- if (count - in_row1 > 0)
+- memcpy(buf + in_row1, &data->data[16], count - in_row1);
++ /* Hide Vaio private settings to regular users:
++ - BIOS passwords: bytes 0x00 to 0x0f
++ - UUID: bytes 0x10 to 0x1f
++ - Serial number: 0xc0 to 0xdf */
++ if (data->nature == VAIO && !capable(CAP_SYS_ADMIN)) {
++ int i;
++
++ for (i = 0; i < count; i++) {
++ if ((off + i <= 0x1f) ||
++ (off + i >= 0xc0 && off + i <= 0xdf))
++ buf[i] = 0;
++ else
++ buf[i] = data->data[off + i];
++ }
+ } else {
+ memcpy(buf, &data->data[off], count);
+ }
+@@ -195,14 +202,18 @@ static int eeprom_detect(struct i2c_adapter *adapter, int address, int kind)
+ goto exit_kfree;
+
+ /* Detect the Vaio nature of EEPROMs.
+- We use the "PCG-" prefix as the signature. */
++ We use the "PCG-" or "VGN-" prefix as the signature. */
+ if (address == 0x57) {
+- if (i2c_smbus_read_byte_data(new_client, 0x80) == 'P'
+- && i2c_smbus_read_byte(new_client) == 'C'
+- && i2c_smbus_read_byte(new_client) == 'G'
+- && i2c_smbus_read_byte(new_client) == '-') {
++ char name[4];
++
++ name[0] = i2c_smbus_read_byte_data(new_client, 0x80);
++ name[1] = i2c_smbus_read_byte(new_client);
++ name[2] = i2c_smbus_read_byte(new_client);
++ name[3] = i2c_smbus_read_byte(new_client);
++
++ if (!memcmp(name, "PCG-", 4) || !memcmp(name, "VGN-", 4)) {
+ dev_info(&new_client->dev, "Vaio EEPROM detected, "
+- "enabling password protection\n");
++ "enabling privacy protection\n");
+ data->nature = VAIO;
+ }
+ }
+diff --git a/drivers/ide/pci/serverworks.c b/drivers/ide/pci/serverworks.c
+index d9c4fd1..096a081 100644
+--- a/drivers/ide/pci/serverworks.c
++++ b/drivers/ide/pci/serverworks.c
+@@ -101,6 +101,7 @@ static u8 svwks_udma_filter(ide_drive_t *drive)
+ mode = 2;
+
+ switch(mode) {
++ case 3: mask = 0x3f; break;
+ case 2: mask = 0x1f; break;
+ case 1: mask = 0x07; break;
+ default: mask = 0x00; break;
+diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
+index 7a69a18..4484a64 100644
+--- a/drivers/isdn/hardware/avm/b1.c
++++ b/drivers/isdn/hardware/avm/b1.c
+@@ -321,12 +321,15 @@ void b1_reset_ctr(struct capi_ctr *ctrl)
+ avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
+ avmcard *card = cinfo->card;
+ unsigned int port = card->port;
++ unsigned long flags;
+
+ b1_reset(port);
+ b1_reset(port);
+
+ memset(cinfo->version, 0, sizeof(cinfo->version));
++ spin_lock_irqsave(&card->lock, flags);
+ capilib_release(&cinfo->ncci_head);
++ spin_unlock_irqrestore(&card->lock, flags);
+ capi_ctr_reseted(ctrl);
+ }
+
+@@ -361,9 +364,8 @@ void b1_release_appl(struct capi_ctr *ctrl, u16 appl)
+ unsigned int port = card->port;
+ unsigned long flags;
+
+- capilib_release_appl(&cinfo->ncci_head, appl);
+-
+ spin_lock_irqsave(&card->lock, flags);
++ capilib_release_appl(&cinfo->ncci_head, appl);
+ b1_put_byte(port, SEND_RELEASE);
+ b1_put_word(port, appl);
+ spin_unlock_irqrestore(&card->lock, flags);
+@@ -380,27 +382,27 @@ u16 b1_send_message(struct capi_ctr *ctrl, struct sk_buff *skb)
+ u8 subcmd = CAPIMSG_SUBCOMMAND(skb->data);
+ u16 dlen, retval;
+
++ spin_lock_irqsave(&card->lock, flags);
+ if (CAPICMD(cmd, subcmd) == CAPI_DATA_B3_REQ) {
+ retval = capilib_data_b3_req(&cinfo->ncci_head,
+ CAPIMSG_APPID(skb->data),
+ CAPIMSG_NCCI(skb->data),
+ CAPIMSG_MSGID(skb->data));
+- if (retval != CAPI_NOERROR)
++ if (retval != CAPI_NOERROR) {
++ spin_unlock_irqrestore(&card->lock, flags);
+ return retval;
++ }
+
+ dlen = CAPIMSG_DATALEN(skb->data);
+
+- spin_lock_irqsave(&card->lock, flags);
+ b1_put_byte(port, SEND_DATA_B3_REQ);
+ b1_put_slice(port, skb->data, len);
+ b1_put_slice(port, skb->data + len, dlen);
+- spin_unlock_irqrestore(&card->lock, flags);
+ } else {
+- spin_lock_irqsave(&card->lock, flags);
+ b1_put_byte(port, SEND_MESSAGE);
+ b1_put_slice(port, skb->data, len);
+- spin_unlock_irqrestore(&card->lock, flags);
+ }
++ spin_unlock_irqrestore(&card->lock, flags);
+
+ dev_kfree_skb_any(skb);
+ return CAPI_NOERROR;
+@@ -534,17 +536,17 @@ irqreturn_t b1_interrupt(int interrupt, void *devptr)
+
+ ApplId = (unsigned) b1_get_word(card->port);
+ MsgLen = b1_get_slice(card->port, card->msgbuf);
+- spin_unlock_irqrestore(&card->lock, flags);
+ if (!(skb = alloc_skb(MsgLen, GFP_ATOMIC))) {
+ printk(KERN_ERR "%s: incoming packet dropped\n",
+ card->name);
++ spin_unlock_irqrestore(&card->lock, flags);
+ } else {
+ memcpy(skb_put(skb, MsgLen), card->msgbuf, MsgLen);
+ if (CAPIMSG_CMD(skb->data) == CAPI_DATA_B3_CONF)
+ capilib_data_b3_conf(&cinfo->ncci_head, ApplId,
+ CAPIMSG_NCCI(skb->data),
+ CAPIMSG_MSGID(skb->data));
+-
++ spin_unlock_irqrestore(&card->lock, flags);
+ capi_ctr_handle_message(ctrl, ApplId, skb);
+ }
+ break;
+@@ -554,21 +556,17 @@ irqreturn_t b1_interrupt(int interrupt, void *devptr)
+ ApplId = b1_get_word(card->port);
+ NCCI = b1_get_word(card->port);
+ WindowSize = b1_get_word(card->port);
+- spin_unlock_irqrestore(&card->lock, flags);
+-
+ capilib_new_ncci(&cinfo->ncci_head, ApplId, NCCI, WindowSize);
+-
++ spin_unlock_irqrestore(&card->lock, flags);
+ break;
+
+ case RECEIVE_FREE_NCCI:
+
+ ApplId = b1_get_word(card->port);
+ NCCI = b1_get_word(card->port);
+- spin_unlock_irqrestore(&card->lock, flags);
+-
+ if (NCCI != 0xffffffff)
+ capilib_free_ncci(&cinfo->ncci_head, ApplId, NCCI);
+-
++ spin_unlock_irqrestore(&card->lock, flags);
+ break;
+
+ case RECEIVE_START:
+diff --git a/drivers/isdn/hardware/avm/c4.c b/drivers/isdn/hardware/avm/c4.c
+index d58f927..8710cf6 100644
+--- a/drivers/isdn/hardware/avm/c4.c
++++ b/drivers/isdn/hardware/avm/c4.c
+@@ -727,6 +727,7 @@ static void c4_send_init(avmcard *card)
+ {
+ struct sk_buff *skb;
+ void *p;
++ unsigned long flags;
+
+ skb = alloc_skb(15, GFP_ATOMIC);
+ if (!skb) {
+@@ -744,12 +745,15 @@ static void c4_send_init(avmcard *card)
+ skb_put(skb, (u8 *)p - (u8 *)skb->data);
+
+ skb_queue_tail(&card->dma->send_queue, skb);
++ spin_lock_irqsave(&card->lock, flags);
+ c4_dispatch_tx(card);
++ spin_unlock_irqrestore(&card->lock, flags);
+ }
+
+ static int queue_sendconfigword(avmcard *card, u32 val)
+ {
+ struct sk_buff *skb;
++ unsigned long flags;
+ void *p;
+
+ skb = alloc_skb(3+4, GFP_ATOMIC);
+@@ -766,7 +770,9 @@ static int queue_sendconfigword(avmcard *card, u32 val)
+ skb_put(skb, (u8 *)p - (u8 *)skb->data);
+
+ skb_queue_tail(&card->dma->send_queue, skb);
++ spin_lock_irqsave(&card->lock, flags);
+ c4_dispatch_tx(card);
++ spin_unlock_irqrestore(&card->lock, flags);
+ return 0;
+ }
+
+@@ -986,7 +992,9 @@ static void c4_release_appl(struct capi_ctr *ctrl, u16 appl)
+ struct sk_buff *skb;
+ void *p;
+
++ spin_lock_irqsave(&card->lock, flags);
+ capilib_release_appl(&cinfo->ncci_head, appl);
++ spin_unlock_irqrestore(&card->lock, flags);
+
+ if (ctrl->cnr == card->cardnr) {
+ skb = alloc_skb(7, GFP_ATOMIC);
+@@ -1019,7 +1027,8 @@ static u16 c4_send_message(struct capi_ctr *ctrl, struct sk_buff *skb)
+ u16 retval = CAPI_NOERROR;
+ unsigned long flags;
+
+- if (CAPIMSG_CMD(skb->data) == CAPI_DATA_B3_REQ) {
++ spin_lock_irqsave(&card->lock, flags);
++ if (CAPIMSG_CMD(skb->data) == CAPI_DATA_B3_REQ) {
+ retval = capilib_data_b3_req(&cinfo->ncci_head,
+ CAPIMSG_APPID(skb->data),
+ CAPIMSG_NCCI(skb->data),
+@@ -1027,10 +1036,9 @@ static u16 c4_send_message(struct capi_ctr *ctrl, struct sk_buff *skb)
+ }
+ if (retval == CAPI_NOERROR) {
+ skb_queue_tail(&card->dma->send_queue, skb);
+- spin_lock_irqsave(&card->lock, flags);
+ c4_dispatch_tx(card);
+- spin_unlock_irqrestore(&card->lock, flags);
+ }
++ spin_unlock_irqrestore(&card->lock, flags);
+ return retval;
+ }
+
+diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
+index 765fb75..06f6ec3 100644
+--- a/drivers/net/forcedeth.c
++++ b/drivers/net/forcedeth.c
+@@ -987,7 +987,7 @@ static void nv_enable_irq(struct net_device *dev)
+ if (np->msi_flags & NV_MSI_X_ENABLED)
+ enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
+ else
+- enable_irq(dev->irq);
++ enable_irq(np->pci_dev->irq);
+ } else {
+ enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
+ enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
+@@ -1003,7 +1003,7 @@ static void nv_disable_irq(struct net_device *dev)
+ if (np->msi_flags & NV_MSI_X_ENABLED)
+ disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
+ else
+- disable_irq(dev->irq);
++ disable_irq(np->pci_dev->irq);
+ } else {
+ disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
+ disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
+@@ -1600,7 +1600,7 @@ static void nv_do_rx_refill(unsigned long data)
+ if (np->msi_flags & NV_MSI_X_ENABLED)
+ disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
+ else
+- disable_irq(dev->irq);
++ disable_irq(np->pci_dev->irq);
+ } else {
+ disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
+ }
+@@ -1618,7 +1618,7 @@ static void nv_do_rx_refill(unsigned long data)
+ if (np->msi_flags & NV_MSI_X_ENABLED)
+ enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
+ else
+- enable_irq(dev->irq);
++ enable_irq(np->pci_dev->irq);
+ } else {
+ enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
+ }
+@@ -3556,10 +3556,12 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
+ if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
+ if ((ret = pci_enable_msi(np->pci_dev)) == 0) {
+ np->msi_flags |= NV_MSI_ENABLED;
++ dev->irq = np->pci_dev->irq;
+ if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) {
+ printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
+ pci_disable_msi(np->pci_dev);
+ np->msi_flags &= ~NV_MSI_ENABLED;
++ dev->irq = np->pci_dev->irq;
+ goto out_err;
+ }
+
+@@ -3622,7 +3624,7 @@ static void nv_do_nic_poll(unsigned long data)
+ if (np->msi_flags & NV_MSI_X_ENABLED)
+ disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
+ else
+- disable_irq_lockdep(dev->irq);
++ disable_irq_lockdep(np->pci_dev->irq);
+ mask = np->irqmask;
+ } else {
+ if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
+@@ -3640,6 +3642,8 @@ static void nv_do_nic_poll(unsigned long data)
+ }
+ np->nic_poll_irq = 0;
+
++ /* disable_irq() contains synchronize_irq, thus no irq handler can run now */
++
+ if (np->recover_error) {
+ np->recover_error = 0;
+ printk(KERN_INFO "forcedeth: MAC in recoverable error state\n");
+@@ -3676,7 +3680,6 @@ static void nv_do_nic_poll(unsigned long data)
+ }
+ }
+
+- /* FIXME: Do we need synchronize_irq(dev->irq) here? */
+
+ writel(mask, base + NvRegIrqMask);
+ pci_push(base);
+@@ -3689,7 +3692,7 @@ static void nv_do_nic_poll(unsigned long data)
+ if (np->msi_flags & NV_MSI_X_ENABLED)
+ enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
+ else
+- enable_irq_lockdep(dev->irq);
++ enable_irq_lockdep(np->pci_dev->irq);
+ } else {
+ if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
+ nv_nic_irq_rx(0, dev);
+@@ -4943,7 +4946,7 @@ static int nv_close(struct net_device *dev)
+ np->in_shutdown = 1;
+ spin_unlock_irq(&np->lock);
+ netif_poll_disable(dev);
+- synchronize_irq(dev->irq);
++ synchronize_irq(np->pci_dev->irq);
+
+ del_timer_sync(&np->oom_kick);
+ del_timer_sync(&np->nic_poll);
+diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c
+index bec83cb..7e40105 100644
+--- a/drivers/scsi/hptiop.c
++++ b/drivers/scsi/hptiop.c
+@@ -377,8 +377,9 @@ static void hptiop_host_request_callback(struct hptiop_hba *hba, u32 tag)
+ scp->result = SAM_STAT_CHECK_CONDITION;
+ memset(&scp->sense_buffer,
+ 0, sizeof(scp->sense_buffer));
+- memcpy(&scp->sense_buffer,
+- &req->sg_list, le32_to_cpu(req->dataxfer_length));
++ memcpy(&scp->sense_buffer, &req->sg_list,
++ min(sizeof(scp->sense_buffer),
++ le32_to_cpu(req->dataxfer_length)));
+ break;
+
+ default:
+diff --git a/drivers/usb/core/hcd.h b/drivers/usb/core/hcd.h
+index ef50fa4..87f6467 100644
+--- a/drivers/usb/core/hcd.h
++++ b/drivers/usb/core/hcd.h
+@@ -19,6 +19,8 @@
+
+ #ifdef __KERNEL__
+
++#include <linux/rwsem.h>
++
+ /* This file contains declarations of usbcore internals that are mostly
+ * used or exposed by Host Controller Drivers.
+ */
+@@ -464,5 +466,9 @@ static inline void usbmon_urb_complete(struct usb_bus *bus, struct urb *urb) {}
+ : (in_interrupt () ? "in_interrupt" : "can sleep"))
+
+
+-#endif /* __KERNEL__ */
++/* This rwsem is for use only by the hub driver and ehci-hcd.
++ * Nobody else should touch it.
++ */
++extern struct rw_semaphore ehci_cf_port_reset_rwsem;
+
++#endif /* __KERNEL__ */
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index a1c1a11..bc93e06 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -117,6 +117,12 @@ MODULE_PARM_DESC(use_both_schemes,
+ "try the other device initialization scheme if the "
+ "first one fails");
+
++/* Mutual exclusion for EHCI CF initialization. This interferes with
++ * port reset on some companion controllers.
++ */
++DECLARE_RWSEM(ehci_cf_port_reset_rwsem);
++EXPORT_SYMBOL_GPL(ehci_cf_port_reset_rwsem);
++
+
+ static inline char *portspeed(int portstatus)
+ {
+@@ -1513,6 +1519,11 @@ static int hub_port_reset(struct usb_hub *hub, int port1,
+ {
+ int i, status;
+
++ /* Block EHCI CF initialization during the port reset.
++ * Some companion controllers don't like it when they mix.
++ */
++ down_read(&ehci_cf_port_reset_rwsem);
++
+ /* Reset the port */
+ for (i = 0; i < PORT_RESET_TRIES; i++) {
+ status = set_port_feature(hub->hdev,
+@@ -1543,7 +1554,7 @@ static int hub_port_reset(struct usb_hub *hub, int port1,
+ usb_set_device_state(udev, status
+ ? USB_STATE_NOTATTACHED
+ : USB_STATE_DEFAULT);
+- return status;
++ goto done;
+ }
+
+ dev_dbg (hub->intfdev,
+@@ -1556,6 +1567,8 @@ static int hub_port_reset(struct usb_hub *hub, int port1,
+ "Cannot enable port %i. Maybe the USB cable is bad?\n",
+ port1);
+
++ done:
++ up_read(&ehci_cf_port_reset_rwsem);
+ return status;
+ }
+
+diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
+index 099aff6..5caa8b3 100644
+--- a/drivers/usb/host/ehci-hcd.c
++++ b/drivers/usb/host/ehci-hcd.c
+@@ -566,10 +566,18 @@ static int ehci_run (struct usb_hcd *hcd)
+ * are explicitly handed to companion controller(s), so no TT is
+ * involved with the root hub. (Except where one is integrated,
+ * and there's no companion controller unless maybe for USB OTG.)
++ *
++ * Turning on the CF flag will transfer ownership of all ports
++ * from the companions to the EHCI controller. If any of the
++ * companions are in the middle of a port reset at the time, it
++ * could cause trouble. Write-locking ehci_cf_port_reset_rwsem
++ * guarantees that no resets are in progress.
+ */
++ down_write(&ehci_cf_port_reset_rwsem);
+ hcd->state = HC_STATE_RUNNING;
+ ehci_writel(ehci, FLAG_CF, &ehci->regs->configured_flag);
+ ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */
++ up_write(&ehci_cf_port_reset_rwsem);
+
+ temp = HC_VERSION(ehci_readl(ehci, &ehci->caps->hc_capbase));
+ ehci_info (ehci,
+diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
+index 4f8282a..c36eb79 100644
+--- a/drivers/usb/serial/generic.c
++++ b/drivers/usb/serial/generic.c
+@@ -190,14 +190,15 @@ int usb_serial_generic_write(struct usb_serial_port *port, const unsigned char *
+
+ /* only do something if we have a bulk out endpoint */
+ if (serial->num_bulk_out) {
+- spin_lock_bh(&port->lock);
++ unsigned long flags;
++ spin_lock_irqsave(&port->lock, flags);
+ if (port->write_urb_busy) {
+- spin_unlock_bh(&port->lock);
++ spin_unlock_irqrestore(&port->lock, flags);
+ dbg("%s - already writing", __FUNCTION__);
+ return 0;
+ }
+ port->write_urb_busy = 1;
+- spin_unlock_bh(&port->lock);
++ spin_unlock_irqrestore(&port->lock, flags);
+
+ count = (count > port->bulk_out_size) ? port->bulk_out_size : count;
+
+diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c
+index 0683b51..6f22419 100644
+--- a/drivers/usb/serial/kobil_sct.c
++++ b/drivers/usb/serial/kobil_sct.c
+@@ -82,6 +82,7 @@ static int kobil_tiocmset(struct usb_serial_port *port, struct file *file,
+ unsigned int set, unsigned int clear);
+ static void kobil_read_int_callback( struct urb *urb );
+ static void kobil_write_callback( struct urb *purb );
++static void kobil_set_termios(struct usb_serial_port *port, struct ktermios *old);
+
+
+ static struct usb_device_id id_table [] = {
+@@ -119,6 +120,7 @@ static struct usb_serial_driver kobil_device = {
+ .attach = kobil_startup,
+ .shutdown = kobil_shutdown,
+ .ioctl = kobil_ioctl,
++ .set_termios = kobil_set_termios,
+ .tiocmget = kobil_tiocmget,
+ .tiocmset = kobil_tiocmset,
+ .open = kobil_open,
+@@ -137,7 +139,6 @@ struct kobil_private {
+ int cur_pos; // index of the next char to send in buf
+ __u16 device_type;
+ int line_state;
+- struct ktermios internal_termios;
+ };
+
+
+@@ -216,7 +217,7 @@ static void kobil_shutdown (struct usb_serial *serial)
+
+ static int kobil_open (struct usb_serial_port *port, struct file *filp)
+ {
+- int i, result = 0;
++ int result = 0;
+ struct kobil_private *priv;
+ unsigned char *transfer_buffer;
+ int transfer_buffer_length = 8;
+@@ -242,16 +243,6 @@ static int kobil_open (struct usb_serial_port *port, struct file *filp)
+ port->tty->termios->c_iflag = IGNBRK | IGNPAR | IXOFF;
+ port->tty->termios->c_oflag &= ~ONLCR; // do NOT translate CR to CR-NL (0x0A -> 0x0A 0x0D)
+
+- // set up internal termios structure
+- priv->internal_termios.c_iflag = port->tty->termios->c_iflag;
+- priv->internal_termios.c_oflag = port->tty->termios->c_oflag;
+- priv->internal_termios.c_cflag = port->tty->termios->c_cflag;
+- priv->internal_termios.c_lflag = port->tty->termios->c_lflag;
+-
+- for (i=0; i<NCCS; i++) {
+- priv->internal_termios.c_cc[i] = port->tty->termios->c_cc[i];
+- }
+-
+ // allocate memory for transfer buffer
+ transfer_buffer = kzalloc(transfer_buffer_length, GFP_KERNEL);
+ if (! transfer_buffer) {
+@@ -358,24 +349,26 @@ static void kobil_close (struct usb_serial_port *port, struct file *filp)
+ }
+
+
+-static void kobil_read_int_callback( struct urb *purb)
++static void kobil_read_int_callback(struct urb *urb)
+ {
+ int result;
+- struct usb_serial_port *port = (struct usb_serial_port *) purb->context;
++ struct usb_serial_port *port = urb->context;
+ struct tty_struct *tty;
+- unsigned char *data = purb->transfer_buffer;
++ unsigned char *data = urb->transfer_buffer;
++ int status = urb->status;
+ // char *dbg_data;
+
+ dbg("%s - port %d", __FUNCTION__, port->number);
+
+- if (purb->status) {
+- dbg("%s - port %d Read int status not zero: %d", __FUNCTION__, port->number, purb->status);
++ if (status) {
++ dbg("%s - port %d Read int status not zero: %d",
++ __FUNCTION__, port->number, status);
+ return;
+ }
+-
+- tty = port->tty;
+- if (purb->actual_length) {
+-
++
++ tty = port->tty;
++ if (urb->actual_length) {
++
+ // BEGIN DEBUG
+ /*
+ dbg_data = kzalloc((3 * purb->actual_length + 10) * sizeof(char), GFP_KERNEL);
+@@ -390,15 +383,15 @@ static void kobil_read_int_callback( struct urb *purb)
+ */
+ // END DEBUG
+
+- tty_buffer_request_room(tty, purb->actual_length);
+- tty_insert_flip_string(tty, data, purb->actual_length);
++ tty_buffer_request_room(tty, urb->actual_length);
++ tty_insert_flip_string(tty, data, urb->actual_length);
+ tty_flip_buffer_push(tty);
+ }
+
+ // someone sets the dev to 0 if the close method has been called
+ port->interrupt_in_urb->dev = port->serial->dev;
+
+- result = usb_submit_urb( port->interrupt_in_urb, GFP_ATOMIC );
++ result = usb_submit_urb(port->interrupt_in_urb, GFP_ATOMIC);
+ dbg("%s - port %d Send read URB returns: %i", __FUNCTION__, port->number, result);
+ }
+
+@@ -605,102 +598,79 @@ static int kobil_tiocmset(struct usb_serial_port *port, struct file *file,
+ return (result < 0) ? result : 0;
+ }
+
+-
+-static int kobil_ioctl(struct usb_serial_port *port, struct file *file,
+- unsigned int cmd, unsigned long arg)
++static void kobil_set_termios(struct usb_serial_port *port, struct ktermios *old)
+ {
+ struct kobil_private * priv;
+ int result;
+ unsigned short urb_val = 0;
+- unsigned char *transfer_buffer;
+- int transfer_buffer_length = 8;
+- char *settings;
+- void __user *user_arg = (void __user *)arg;
++ int c_cflag = port->tty->termios->c_cflag;
++ speed_t speed;
++ void * settings;
+
+ priv = usb_get_serial_port_data(port);
+- if ((priv->device_type == KOBIL_USBTWIN_PRODUCT_ID) || (priv->device_type == KOBIL_KAAN_SIM_PRODUCT_ID)) {
++ if (priv->device_type == KOBIL_USBTWIN_PRODUCT_ID || priv->device_type == KOBIL_KAAN_SIM_PRODUCT_ID)
+ // This device doesn't support ioctl calls
+- return 0;
+- }
+-
+- switch (cmd) {
+- case TCGETS: // 0x5401
+- if (!access_ok(VERIFY_WRITE, user_arg, sizeof(struct ktermios))) {
+- dbg("%s - port %d Error in access_ok", __FUNCTION__, port->number);
+- return -EFAULT;
+- }
+- if (kernel_termios_to_user_termios((struct ktermios __user *)arg,
+- &priv->internal_termios))
+- return -EFAULT;
+- return 0;
+-
+- case TCSETS: // 0x5402
+- if (!(port->tty->termios)) {
+- dbg("%s - port %d Error: port->tty->termios is NULL", __FUNCTION__, port->number);
+- return -ENOTTY;
+- }
+- if (!access_ok(VERIFY_READ, user_arg, sizeof(struct ktermios))) {
+- dbg("%s - port %d Error in access_ok", __FUNCTION__, port->number);
+- return -EFAULT;
+- }
+- if (user_termios_to_kernel_termios(&priv->internal_termios,
+- (struct ktermios __user *)arg))
+- return -EFAULT;
+-
+- settings = kzalloc(50, GFP_KERNEL);
+- if (! settings) {
+- return -ENOBUFS;
+- }
++ return;
+
+- switch (priv->internal_termios.c_cflag & CBAUD) {
+- case B1200:
++ switch (speed = tty_get_baud_rate(port->tty)) {
++ case 1200:
+ urb_val = SUSBCR_SBR_1200;
+- strcat(settings, "1200 ");
+ break;
+- case B9600:
++ case 9600:
+ default:
+ urb_val = SUSBCR_SBR_9600;
+- strcat(settings, "9600 ");
+ break;
+- }
++ }
++ urb_val |= (c_cflag & CSTOPB) ? SUSBCR_SPASB_2StopBits : SUSBCR_SPASB_1StopBit;
+
+- urb_val |= (priv->internal_termios.c_cflag & CSTOPB) ? SUSBCR_SPASB_2StopBits : SUSBCR_SPASB_1StopBit;
+- strcat(settings, (priv->internal_termios.c_cflag & CSTOPB) ? "2 StopBits " : "1 StopBit ");
++ settings = kzalloc(50, GFP_KERNEL);
++ if (! settings)
++ return;
+
+- if (priv->internal_termios.c_cflag & PARENB) {
+- if (priv->internal_termios.c_cflag & PARODD) {
+- urb_val |= SUSBCR_SPASB_OddParity;
+- strcat(settings, "Odd Parity");
+- } else {
+- urb_val |= SUSBCR_SPASB_EvenParity;
+- strcat(settings, "Even Parity");
+- }
++ sprintf(settings, "%d ", speed);
++
++ if (c_cflag & PARENB) {
++ if (c_cflag & PARODD) {
++ urb_val |= SUSBCR_SPASB_OddParity;
++ strcat(settings, "Odd Parity");
+ } else {
+- urb_val |= SUSBCR_SPASB_NoParity;
+- strcat(settings, "No Parity");
++ urb_val |= SUSBCR_SPASB_EvenParity;
++ strcat(settings, "Even Parity");
+ }
+- dbg("%s - port %d setting port to: %s", __FUNCTION__, port->number, settings );
++ } else {
++ urb_val |= SUSBCR_SPASB_NoParity;
++ strcat(settings, "No Parity");
++ }
+
+- result = usb_control_msg( port->serial->dev,
+- usb_rcvctrlpipe(port->serial->dev, 0 ),
+- SUSBCRequest_SetBaudRateParityAndStopBits,
+- USB_TYPE_VENDOR | USB_RECIP_ENDPOINT | USB_DIR_OUT,
+- urb_val,
+- 0,
+- settings,
+- 0,
+- KOBIL_TIMEOUT
+- );
++ result = usb_control_msg( port->serial->dev,
++ usb_rcvctrlpipe(port->serial->dev, 0 ),
++ SUSBCRequest_SetBaudRateParityAndStopBits,
++ USB_TYPE_VENDOR | USB_RECIP_ENDPOINT | USB_DIR_OUT,
++ urb_val,
++ 0,
++ settings,
++ 0,
++ KOBIL_TIMEOUT
++ );
++ kfree(settings);
++}
+
+- dbg("%s - port %d Send set_baudrate URB returns: %i", __FUNCTION__, port->number, result);
+- kfree(settings);
++static int kobil_ioctl(struct usb_serial_port *port, struct file * file, unsigned int cmd, unsigned long arg)
++{
++ struct kobil_private * priv = usb_get_serial_port_data(port);
++ unsigned char *transfer_buffer;
++ int transfer_buffer_length = 8;
++ int result;
++
++ if (priv->device_type == KOBIL_USBTWIN_PRODUCT_ID || priv->device_type == KOBIL_KAAN_SIM_PRODUCT_ID)
++ // This device doesn't support ioctl calls
+ return 0;
+
++ switch (cmd) {
+ case TCFLSH: // 0x540B
+ transfer_buffer = kmalloc(transfer_buffer_length, GFP_KERNEL);
+- if (! transfer_buffer) {
++ if (! transfer_buffer)
+ return -ENOBUFS;
+- }
+
+ result = usb_control_msg( port->serial->dev,
+ usb_rcvctrlpipe(port->serial->dev, 0 ),
+@@ -714,15 +684,13 @@ static int kobil_ioctl(struct usb_serial_port *port, struct file *file,
+ );
+
+ dbg("%s - port %d Send reset_all_queues (FLUSH) URB returns: %i", __FUNCTION__, port->number, result);
+-
+ kfree(transfer_buffer);
+- return ((result < 0) ? -EFAULT : 0);
+-
++ return (result < 0) ? -EFAULT : 0;
++ default:
++ return -ENOIOCTLCMD;
+ }
+- return -ENOIOCTLCMD;
+ }
+
+-
+ static int __init kobil_init (void)
+ {
+ int retval;
+diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
+index a480b09..3175288 100644
+--- a/fs/ocfs2/aops.c
++++ b/fs/ocfs2/aops.c
+@@ -661,6 +661,27 @@ static void ocfs2_clear_page_regions(struct page *page,
+ }
+
+ /*
++ * Nonsparse file systems fully allocate before we get to the write
++ * code. This prevents ocfs2_write() from tagging the write as an
++ * allocating one, which means ocfs2_map_page_blocks() might try to
++ * read-in the blocks at the tail of our file. Avoid reading them by
++ * testing i_size against each block offset.
++ */
++static int ocfs2_should_read_blk(struct inode *inode, struct page *page,
++ unsigned int block_start)
++{
++ u64 offset = page_offset(page) + block_start;
++
++ if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)))
++ return 1;
++
++ if (i_size_read(inode) > offset)
++ return 1;
++
++ return 0;
++}
++
++/*
+ * Some of this taken from block_prepare_write(). We already have our
+ * mapping by now though, and the entire write will be allocating or
+ * it won't, so not much need to use BH_New.
+@@ -711,7 +732,8 @@ int ocfs2_map_page_blocks(struct page *page, u64 *p_blkno,
+ if (!buffer_uptodate(bh))
+ set_buffer_uptodate(bh);
+ } else if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
+- (block_start < from || block_end > to)) {
++ ocfs2_should_read_blk(inode, page, block_start) &&
++ (block_start < from || block_end > to)) {
+ ll_rw_block(READ, 1, &bh);
+ *wait_bh++=bh;
+ }
+diff --git a/include/linux/netlink.h b/include/linux/netlink.h
+index 2e23353..b2834d8 100644
+--- a/include/linux/netlink.h
++++ b/include/linux/netlink.h
+@@ -173,7 +173,7 @@ extern int netlink_unregister_notifier(struct notifier_block *nb);
+ /* finegrained unicast helpers: */
+ struct sock *netlink_getsockbyfilp(struct file *filp);
+ int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock,
+- long timeo, struct sock *ssk);
++ long *timeo, struct sock *ssk);
+ void netlink_detachskb(struct sock *sk, struct sk_buff *skb);
+ int netlink_sendskb(struct sock *sk, struct sk_buff *skb, int protocol);
+
+diff --git a/ipc/mqueue.c b/ipc/mqueue.c
+index a242c83..1eef14b 100644
+--- a/ipc/mqueue.c
++++ b/ipc/mqueue.c
+@@ -1014,6 +1014,8 @@ asmlinkage long sys_mq_notify(mqd_t mqdes,
+ return -EINVAL;
+ }
+ if (notification.sigev_notify == SIGEV_THREAD) {
++ long timeo;
++
+ /* create the notify skb */
+ nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL);
+ ret = -ENOMEM;
+@@ -1042,8 +1044,8 @@ retry:
+ goto out;
+ }
+
+- ret = netlink_attachskb(sock, nc, 0,
+- MAX_SCHEDULE_TIMEOUT, NULL);
++ timeo = MAX_SCHEDULE_TIMEOUT;
++ ret = netlink_attachskb(sock, nc, 0, &timeo, NULL);
+ if (ret == 1)
+ goto retry;
+ if (ret) {
+diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
+index 7e52eb0..589b1e4 100644
+--- a/kernel/futex_compat.c
++++ b/kernel/futex_compat.c
+@@ -29,6 +29,15 @@ fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
+ return 0;
+ }
+
++static void __user *futex_uaddr(struct robust_list *entry,
++ compat_long_t futex_offset)
++{
++ compat_uptr_t base = ptr_to_compat(entry);
++ void __user *uaddr = compat_ptr(base + futex_offset);
++
++ return uaddr;
++}
++
+ /*
+ * Walk curr->robust_list (very carefully, it's a userspace list!)
+ * and mark any locks found there dead, and notify any waiters.
+@@ -61,18 +70,23 @@ void compat_exit_robust_list(struct task_struct *curr)
+ if (fetch_robust_entry(&upending, &pending,
+ &head->list_op_pending, &pip))
+ return;
+- if (pending)
+- handle_futex_death((void __user *)pending + futex_offset, curr, pip);
++ if (pending) {
++ void __user *uaddr = futex_uaddr(pending,
++ futex_offset);
++ handle_futex_death(uaddr, curr, pip);
++ }
+
+ while (entry != (struct robust_list __user *) &head->list) {
+ /*
+ * A pending lock might already be on the list, so
+ * dont process it twice:
+ */
+- if (entry != pending)
+- if (handle_futex_death((void __user *)entry + futex_offset,
+- curr, pi))
++ if (entry != pending) {
++ void __user *uaddr = futex_uaddr(entry,
++ futex_offset);
++ if (handle_futex_death(uaddr, curr, pi))
+ return;
++ }
+
+ /*
+ * Fetch the next entry in the list:
+diff --git a/kernel/params.c b/kernel/params.c
+index 8e8ca8f..1f17b58 100644
+--- a/kernel/params.c
++++ b/kernel/params.c
+@@ -591,19 +591,16 @@ static void __init param_sysfs_builtin(void)
+
+ for (i=0; i < __stop___param - __start___param; i++) {
+ char *dot;
+- size_t kplen;
++ size_t max_name_len;
+
+ kp = &__start___param[i];
+- kplen = strlen(kp->name);
++ max_name_len =
++ min_t(size_t, MAX_KBUILD_MODNAME, strlen(kp->name));
+
+- /* We do not handle args without periods. */
+- if (kplen > MAX_KBUILD_MODNAME) {
+- DEBUGP("kernel parameter name is too long: %s\n", kp->name);
+- continue;
+- }
+- dot = memchr(kp->name, '.', kplen);
++ dot = memchr(kp->name, '.', max_name_len);
+ if (!dot) {
+- DEBUGP("couldn't find period in %s\n", kp->name);
++ DEBUGP("couldn't find period in first %d characters "
++ "of %s\n", MAX_KBUILD_MODNAME, kp->name);
+ continue;
+ }
+ name_len = dot - kp->name;
+diff --git a/mm/page-writeback.c b/mm/page-writeback.c
+index eec1481..2d39627 100644
+--- a/mm/page-writeback.c
++++ b/mm/page-writeback.c
+@@ -674,8 +674,10 @@ retry:
+
+ ret = (*writepage)(page, wbc, data);
+
+- if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE))
++ if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
+ unlock_page(page);
++ ret = 0;
++ }
+ if (ret || (--(wbc->nr_to_write) <= 0))
+ done = 1;
+ if (wbc->nonblocking && bdi_write_congested(bdi)) {
+diff --git a/mm/shmem.c b/mm/shmem.c
+index b6aae2b..2320b60 100644
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -911,6 +911,21 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
+ struct inode *inode;
+
+ BUG_ON(!PageLocked(page));
++ /*
++ * shmem_backing_dev_info's capabilities prevent regular writeback or
++ * sync from ever calling shmem_writepage; but a stacking filesystem
++ * may use the ->writepage of its underlying filesystem, in which case
++ * we want to do nothing when that underlying filesystem is tmpfs
++ * (writing out to swap is useful as a response to memory pressure, but
++ * of no use to stabilize the data) - just redirty the page, unlock it
++ * and claim success in this case. AOP_WRITEPAGE_ACTIVATE, and the
++ * page_mapped check below, must be avoided unless we're in reclaim.
++ */
++ if (!wbc->for_reclaim) {
++ set_page_dirty(page);
++ unlock_page(page);
++ return 0;
++ }
+ BUG_ON(page_mapped(page));
+
+ mapping = page->mapping;
+diff --git a/mm/slub.c b/mm/slub.c
+index e0cf621..648f2c7 100644
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -1431,28 +1431,8 @@ new_slab:
+ page = new_slab(s, gfpflags, node);
+ if (page) {
+ cpu = smp_processor_id();
+- if (s->cpu_slab[cpu]) {
+- /*
+- * Someone else populated the cpu_slab while we
+- * enabled interrupts, or we have gotten scheduled
+- * on another cpu. The page may not be on the
+- * requested node even if __GFP_THISNODE was
+- * specified. So we need to recheck.
+- */
+- if (node == -1 ||
+- page_to_nid(s->cpu_slab[cpu]) == node) {
+- /*
+- * Current cpuslab is acceptable and we
+- * want the current one since its cache hot
+- */
+- discard_slab(s, page);
+- page = s->cpu_slab[cpu];
+- slab_lock(page);
+- goto load_freelist;
+- }
+- /* New slab does not fit our expectations */
++ if (s->cpu_slab[cpu])
+ flush_slab(s, s->cpu_slab[cpu], cpu);
+- }
+ slab_lock(page);
+ SetSlabFrozen(page);
+ s->cpu_slab[cpu] = page;
+diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
+index ab86137..630ebb7 100644
+--- a/net/ipv4/ipcomp.c
++++ b/net/ipv4/ipcomp.c
+@@ -17,6 +17,7 @@
+ #include <asm/scatterlist.h>
+ #include <asm/semaphore.h>
+ #include <linux/crypto.h>
++#include <linux/err.h>
+ #include <linux/pfkeyv2.h>
+ #include <linux/percpu.h>
+ #include <linux/smp.h>
+@@ -355,7 +356,7 @@ static struct crypto_comp **ipcomp_alloc_tfms(const char *alg_name)
+ for_each_possible_cpu(cpu) {
+ struct crypto_comp *tfm = crypto_alloc_comp(alg_name, 0,
+ CRYPTO_ALG_ASYNC);
+- if (!tfm)
++ if (IS_ERR(tfm))
+ goto error;
+ *per_cpu_ptr(tfms, cpu) = tfm;
+ }
+diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c
+index 1ee50b5..3680f64 100644
+--- a/net/ipv6/ipcomp6.c
++++ b/net/ipv6/ipcomp6.c
+@@ -37,6 +37,7 @@
+ #include <asm/scatterlist.h>
+ #include <asm/semaphore.h>
+ #include <linux/crypto.h>
++#include <linux/err.h>
+ #include <linux/pfkeyv2.h>
+ #include <linux/random.h>
+ #include <linux/percpu.h>
+@@ -366,7 +367,7 @@ static struct crypto_comp **ipcomp6_alloc_tfms(const char *alg_name)
+ for_each_possible_cpu(cpu) {
+ struct crypto_comp *tfm = crypto_alloc_comp(alg_name, 0,
+ CRYPTO_ALG_ASYNC);
+- if (!tfm)
++ if (IS_ERR(tfm))
+ goto error;
+ *per_cpu_ptr(tfms, cpu) = tfm;
+ }
+diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
+index ccdd5d2..2721ff4 100644
+--- a/net/netfilter/nf_conntrack_proto_tcp.c
++++ b/net/netfilter/nf_conntrack_proto_tcp.c
+@@ -839,6 +839,22 @@ static int tcp_packet(struct nf_conn *conntrack,
+ new_state = tcp_conntracks[dir][index][old_state];
+
+ switch (new_state) {
++ case TCP_CONNTRACK_SYN_SENT:
++ if (old_state < TCP_CONNTRACK_TIME_WAIT)
++ break;
++ if ((conntrack->proto.tcp.seen[!dir].flags &
++ IP_CT_TCP_FLAG_CLOSE_INIT)
++ || (conntrack->proto.tcp.last_dir == dir
++ && conntrack->proto.tcp.last_index == TCP_RST_SET)) {
++ /* Attempt to reopen a closed/aborted connection.
++ * Delete this connection and look up again. */
++ write_unlock_bh(&tcp_lock);
++ if (del_timer(&conntrack->timeout))
++ conntrack->timeout.function((unsigned long)
++ conntrack);
++ return -NF_REPEAT;
++ }
++ /* Fall through */
+ case TCP_CONNTRACK_IGNORE:
+ /* Ignored packets:
+ *
+@@ -888,27 +904,6 @@ static int tcp_packet(struct nf_conn *conntrack,
+ nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
+ "nf_ct_tcp: invalid state ");
+ return -NF_ACCEPT;
+- case TCP_CONNTRACK_SYN_SENT:
+- if (old_state < TCP_CONNTRACK_TIME_WAIT)
+- break;
+- if ((conntrack->proto.tcp.seen[dir].flags &
+- IP_CT_TCP_FLAG_CLOSE_INIT)
+- || after(ntohl(th->seq),
+- conntrack->proto.tcp.seen[dir].td_end)) {
+- /* Attempt to reopen a closed connection.
+- * Delete this connection and look up again. */
+- write_unlock_bh(&tcp_lock);
+- if (del_timer(&conntrack->timeout))
+- conntrack->timeout.function((unsigned long)
+- conntrack);
+- return -NF_REPEAT;
+- } else {
+- write_unlock_bh(&tcp_lock);
+- if (LOG_INVALID(IPPROTO_TCP))
+- nf_log_packet(pf, 0, skb, NULL, NULL,
+- NULL, "nf_ct_tcp: invalid SYN");
+- return -NF_ACCEPT;
+- }
+ case TCP_CONNTRACK_CLOSE:
+ if (index == TCP_RST_SET
+ && ((test_bit(IPS_SEEN_REPLY_BIT, &conntrack->status)
+@@ -941,6 +936,7 @@ static int tcp_packet(struct nf_conn *conntrack,
+ in_window:
+ /* From now on we have got in-window packets */
+ conntrack->proto.tcp.last_index = index;
++ conntrack->proto.tcp.last_dir = dir;
+
+ DEBUGP("tcp_conntracks: src=%u.%u.%u.%u:%hu dst=%u.%u.%u.%u:%hu "
+ "syn=%i ack=%i fin=%i rst=%i old=%i new=%i\n",
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index 1f15821..6ac83c2 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -732,7 +732,7 @@ struct sock *netlink_getsockbyfilp(struct file *filp)
+ * 1: repeat lookup - reference dropped while waiting for socket memory.
+ */
+ int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock,
+- long timeo, struct sock *ssk)
++ long *timeo, struct sock *ssk)
+ {
+ struct netlink_sock *nlk;
+
+@@ -741,7 +741,7 @@ int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock,
+ if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
+ test_bit(0, &nlk->state)) {
+ DECLARE_WAITQUEUE(wait, current);
+- if (!timeo) {
++ if (!*timeo) {
+ if (!ssk || nlk_sk(ssk)->pid == 0)
+ netlink_overrun(sk);
+ sock_put(sk);
+@@ -755,7 +755,7 @@ int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock,
+ if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
+ test_bit(0, &nlk->state)) &&
+ !sock_flag(sk, SOCK_DEAD))
+- timeo = schedule_timeout(timeo);
++ *timeo = schedule_timeout(*timeo);
+
+ __set_current_state(TASK_RUNNING);
+ remove_wait_queue(&nlk->wait, &wait);
+@@ -763,7 +763,7 @@ int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock,
+
+ if (signal_pending(current)) {
+ kfree_skb(skb);
+- return sock_intr_errno(timeo);
++ return sock_intr_errno(*timeo);
+ }
+ return 1;
+ }
+@@ -827,7 +827,7 @@ retry:
+ kfree_skb(skb);
+ return PTR_ERR(sk);
+ }
+- err = netlink_attachskb(sk, skb, nonblock, timeo, ssk);
++ err = netlink_attachskb(sk, skb, nonblock, &timeo, ssk);
+ if (err == 1)
+ goto retry;
+ if (err)
+diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
+index f2686ea..1d36265 100644
+--- a/net/sched/cls_u32.c
++++ b/net/sched/cls_u32.c
+@@ -107,7 +107,7 @@ static struct tc_u_common *u32_list;
+
+ static __inline__ unsigned u32_hash_fold(u32 key, struct tc_u32_sel *sel, u8 fshift)
+ {
+- unsigned h = (key & sel->hmask)>>fshift;
++ unsigned h = ntohl(key & sel->hmask)>>fshift;
+
+ return h;
+ }
+@@ -631,7 +631,7 @@ static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle,
+ n->handle = handle;
+ {
+ u8 i = 0;
+- u32 mask = s->hmask;
++ u32 mask = ntohl(s->hmask);
+ if (mask) {
+ while (!(mask & 1)) {
+ i++;
+diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
+index f05ad9a..656ccd9 100644
+--- a/net/sched/sch_teql.c
++++ b/net/sched/sch_teql.c
+@@ -263,6 +263,9 @@ __teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *
+ static __inline__ int
+ teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *dev)
+ {
++ if (dev->qdisc == &noop_qdisc)
++ return -ENODEV;
++
+ if (dev->hard_header == NULL ||
+ skb->dst == NULL ||
+ skb->dst->neighbour == NULL)
+diff --git a/net/socket.c b/net/socket.c
+index 48bd793..8211578 100644
+--- a/net/socket.c
++++ b/net/socket.c
+@@ -1246,11 +1246,14 @@ asmlinkage long sys_socketpair(int family, int type, int protocol,
+ goto out_release_both;
+
+ fd1 = sock_alloc_fd(&newfile1);
+- if (unlikely(fd1 < 0))
++ if (unlikely(fd1 < 0)) {
++ err = fd1;
+ goto out_release_both;
++ }
+
+ fd2 = sock_alloc_fd(&newfile2);
+ if (unlikely(fd2 < 0)) {
++ err = fd2;
+ put_filp(newfile1);
+ put_unused_fd(fd1);
+ goto out_release_both;
+diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
+index e3964fc..d5b2f53 100644
+--- a/sound/pci/hda/patch_sigmatel.c
++++ b/sound/pci/hda/patch_sigmatel.c
+@@ -153,8 +153,9 @@ static hda_nid_t stac925x_dac_nids[1] = {
+ 0x02,
+ };
+
+-static hda_nid_t stac925x_dmic_nids[1] = {
+- 0x15,
++#define STAC925X_NUM_DMICS 1
++static hda_nid_t stac925x_dmic_nids[STAC925X_NUM_DMICS + 1] = {
++ 0x15, 0
+ };
+
+ static hda_nid_t stac922x_adc_nids[2] = {
+@@ -181,8 +182,9 @@ static hda_nid_t stac9205_mux_nids[2] = {
+ 0x19, 0x1a
+ };
+
+-static hda_nid_t stac9205_dmic_nids[2] = {
+- 0x17, 0x18,
++#define STAC9205_NUM_DMICS 2
++static hda_nid_t stac9205_dmic_nids[STAC9205_NUM_DMICS + 1] = {
++ 0x17, 0x18, 0
+ };
+
+ static hda_nid_t stac9200_pin_nids[8] = {
+@@ -1972,7 +1974,7 @@ static int patch_stac925x(struct hda_codec *codec)
+ case 0x83847633: /* STAC9202D */
+ case 0x83847636: /* STAC9251 */
+ case 0x83847637: /* STAC9251D */
+- spec->num_dmics = 1;
++ spec->num_dmics = STAC925X_NUM_DMICS;
+ spec->dmic_nids = stac925x_dmic_nids;
+ break;
+ default:
+@@ -2202,7 +2204,7 @@ static int patch_stac9205(struct hda_codec *codec)
+ spec->mux_nids = stac9205_mux_nids;
+ spec->num_muxes = ARRAY_SIZE(stac9205_mux_nids);
+ spec->dmic_nids = stac9205_dmic_nids;
+- spec->num_dmics = ARRAY_SIZE(stac9205_dmic_nids);
++ spec->num_dmics = STAC9205_NUM_DMICS;
+ spec->dmux_nid = 0x1d;
+
+ spec->init = stac9205_core_init;
+diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c
+index 3b3ef65..75dcb9a 100644
+--- a/sound/pci/rme9652/hdsp.c
++++ b/sound/pci/rme9652/hdsp.c
+@@ -3108,6 +3108,9 @@ static int hdsp_dds_offset(struct hdsp *hdsp)
+ unsigned int dds_value = hdsp->dds_value;
+ int system_sample_rate = hdsp->system_sample_rate;
+
++ if (!dds_value)
++ return 0;
++
+ n = DDS_NUMERATOR;
+ /*
+ * dds_value = n / rate
diff --git a/trunk/2.6.22/01014_linux-2.6.22.15.patch b/trunk/2.6.22/01014_linux-2.6.22.15.patch
new file mode 100644
index 0000000..320c021
--- /dev/null
+++ b/trunk/2.6.22/01014_linux-2.6.22.15.patch
@@ -0,0 +1,1096 @@
+Subject: Linux 2.6.22.15
+From: Greg Kroah-Hartman <gregkh@suse.de>
+
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+diff --git a/crypto/algapi.c b/crypto/algapi.c
+index f137a43..ec286a2 100644
+--- a/crypto/algapi.c
++++ b/crypto/algapi.c
+@@ -98,6 +98,9 @@ static void crypto_remove_spawn(struct crypto_spawn *spawn,
+ return;
+
+ inst->alg.cra_flags |= CRYPTO_ALG_DEAD;
++ if (hlist_unhashed(&inst->list))
++ return;
++
+ if (!tmpl || !crypto_tmpl_get(tmpl))
+ return;
+
+@@ -333,9 +336,6 @@ int crypto_register_instance(struct crypto_template *tmpl,
+ LIST_HEAD(list);
+ int err = -EINVAL;
+
+- if (inst->alg.cra_destroy)
+- goto err;
+-
+ err = crypto_check_alg(&inst->alg);
+ if (err)
+ goto err;
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index 3400b3e..e722f83 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -1241,7 +1241,7 @@ static void ahci_host_intr(struct ata_port *ap)
+ struct ata_eh_info *ehi = &ap->eh_info;
+ struct ahci_port_priv *pp = ap->private_data;
+ u32 status, qc_active;
+- int rc, known_irq = 0;
++ int rc;
+
+ status = readl(port_mmio + PORT_IRQ_STAT);
+ writel(status, port_mmio + PORT_IRQ_STAT);
+@@ -1257,74 +1257,11 @@ static void ahci_host_intr(struct ata_port *ap)
+ qc_active = readl(port_mmio + PORT_CMD_ISSUE);
+
+ rc = ata_qc_complete_multiple(ap, qc_active, NULL);
+- if (rc > 0)
+- return;
+ if (rc < 0) {
+ ehi->err_mask |= AC_ERR_HSM;
+ ehi->action |= ATA_EH_SOFTRESET;
+ ata_port_freeze(ap);
+- return;
+- }
+-
+- /* hmmm... a spurious interupt */
+-
+- /* if !NCQ, ignore. No modern ATA device has broken HSM
+- * implementation for non-NCQ commands.
+- */
+- if (!ap->sactive)
+- return;
+-
+- if (status & PORT_IRQ_D2H_REG_FIS) {
+- if (!pp->ncq_saw_d2h)
+- ata_port_printk(ap, KERN_INFO,
+- "D2H reg with I during NCQ, "
+- "this message won't be printed again\n");
+- pp->ncq_saw_d2h = 1;
+- known_irq = 1;
+- }
+-
+- if (status & PORT_IRQ_DMAS_FIS) {
+- if (!pp->ncq_saw_dmas)
+- ata_port_printk(ap, KERN_INFO,
+- "DMAS FIS during NCQ, "
+- "this message won't be printed again\n");
+- pp->ncq_saw_dmas = 1;
+- known_irq = 1;
+- }
+-
+- if (status & PORT_IRQ_SDB_FIS) {
+- const __le32 *f = pp->rx_fis + RX_FIS_SDB;
+-
+- if (le32_to_cpu(f[1])) {
+- /* SDB FIS containing spurious completions
+- * might be dangerous, whine and fail commands
+- * with HSM violation. EH will turn off NCQ
+- * after several such failures.
+- */
+- ata_ehi_push_desc(ehi,
+- "spurious completions during NCQ "
+- "issue=0x%x SAct=0x%x FIS=%08x:%08x",
+- readl(port_mmio + PORT_CMD_ISSUE),
+- readl(port_mmio + PORT_SCR_ACT),
+- le32_to_cpu(f[0]), le32_to_cpu(f[1]));
+- ehi->err_mask |= AC_ERR_HSM;
+- ehi->action |= ATA_EH_SOFTRESET;
+- ata_port_freeze(ap);
+- } else {
+- if (!pp->ncq_saw_sdb)
+- ata_port_printk(ap, KERN_INFO,
+- "spurious SDB FIS %08x:%08x during NCQ, "
+- "this message won't be printed again\n",
+- le32_to_cpu(f[0]), le32_to_cpu(f[1]));
+- pp->ncq_saw_sdb = 1;
+- }
+- known_irq = 1;
+ }
+-
+- if (!known_irq)
+- ata_port_printk(ap, KERN_INFO, "spurious interrupt "
+- "(irq_stat 0x%x active_tag 0x%x sactive 0x%x)\n",
+- status, ap->active_tag, ap->sactive);
+ }
+
+ static void ahci_irq_clear(struct ata_port *ap)
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index e6e403f..22b6368 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -3785,6 +3785,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
+ /* Devices where NCQ should be avoided */
+ /* NCQ is slow */
+ { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
++ { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
+ /* http://thread.gmane.org/gmane.linux.ide/14907 */
+ { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
+ /* NCQ is broken */
+@@ -3803,15 +3804,6 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
+ { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
+ { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
+ { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
+- /* Drives which do spurious command completion */
+- { "HTS541680J9SA00", "SB2IC7EP", ATA_HORKAGE_NONCQ, },
+- { "HTS541612J9SA00", "SBDIC7JP", ATA_HORKAGE_NONCQ, },
+- { "Hitachi HTS541616J9SA00", "SB4OC70P", ATA_HORKAGE_NONCQ, },
+- { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
+- { "FUJITSU MHV2080BH", "00840028", ATA_HORKAGE_NONCQ, },
+- { "ST9160821AS", "3.CLF", ATA_HORKAGE_NONCQ, },
+- { "ST3160812AS", "3.AD", ATA_HORKAGE_NONCQ, },
+- { "SAMSUNG HD401LJ", "ZZ100-15", ATA_HORKAGE_NONCQ, },
+
+ /* End Marker */
+ { }
+diff --git a/drivers/atm/he.c b/drivers/atm/he.c
+index d33aba6..3b64a99 100644
+--- a/drivers/atm/he.c
++++ b/drivers/atm/he.c
+@@ -394,6 +394,11 @@ he_init_one(struct pci_dev *pci_dev, const struct pci_device_id *pci_ent)
+ he_dev->atm_dev->dev_data = he_dev;
+ atm_dev->dev_data = he_dev;
+ he_dev->number = atm_dev->number;
++#ifdef USE_TASKLET
++ tasklet_init(&he_dev->tasklet, he_tasklet, (unsigned long) he_dev);
++#endif
++ spin_lock_init(&he_dev->global_lock);
++
+ if (he_start(atm_dev)) {
+ he_stop(he_dev);
+ err = -ENODEV;
+@@ -1173,11 +1178,6 @@ he_start(struct atm_dev *dev)
+ if ((err = he_init_irq(he_dev)) != 0)
+ return err;
+
+-#ifdef USE_TASKLET
+- tasklet_init(&he_dev->tasklet, he_tasklet, (unsigned long) he_dev);
+-#endif
+- spin_lock_init(&he_dev->global_lock);
+-
+ /* 4.11 enable pci bus controller state machines */
+ host_cntl |= (OUTFF_ENB | CMDFF_ENB |
+ QUICK_RD_RETRY | QUICK_WR_RETRY | PERR_INT_ENB);
+diff --git a/drivers/block/rd.c b/drivers/block/rd.c
+index a1512da..e30bd9e 100644
+--- a/drivers/block/rd.c
++++ b/drivers/block/rd.c
+@@ -189,6 +189,18 @@ static int ramdisk_set_page_dirty(struct page *page)
+ return 0;
+ }
+
++/*
++ * releasepage is called by pagevec_strip/try_to_release_page if
++ * buffers_heads_over_limit is true. Without a releasepage function
++ * try_to_free_buffers is called instead. That can unset the dirty
++ * bit of our ram disk pages, which will be eventually freed, even
++ * if the page is still in use.
++ */
++static int ramdisk_releasepage(struct page *page, gfp_t dummy)
++{
++ return 0;
++}
++
+ static const struct address_space_operations ramdisk_aops = {
+ .readpage = ramdisk_readpage,
+ .prepare_write = ramdisk_prepare_write,
+@@ -196,6 +208,7 @@ static const struct address_space_operations ramdisk_aops = {
+ .writepage = ramdisk_writepage,
+ .set_page_dirty = ramdisk_set_page_dirty,
+ .writepages = ramdisk_writepages,
++ .releasepage = ramdisk_releasepage,
+ };
+
+ static int rd_blkdev_pagecache_IO(int rw, struct bio_vec *vec, sector_t sector,
+diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
+index c97330b..eb9a247 100644
+--- a/drivers/isdn/i4l/isdn_common.c
++++ b/drivers/isdn/i4l/isdn_common.c
+@@ -1514,6 +1514,7 @@ isdn_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
+ if (copy_from_user(&iocts, argp,
+ sizeof(isdn_ioctl_struct)))
+ return -EFAULT;
++ iocts.drvid[sizeof(iocts.drvid)-1] = 0;
+ if (strlen(iocts.drvid)) {
+ if ((p = strchr(iocts.drvid, ',')))
+ *p = 0;
+@@ -1598,6 +1599,7 @@ isdn_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
+ if (copy_from_user(&iocts, argp,
+ sizeof(isdn_ioctl_struct)))
+ return -EFAULT;
++ iocts.drvid[sizeof(iocts.drvid)-1] = 0;
+ if (strlen(iocts.drvid)) {
+ drvidx = -1;
+ for (i = 0; i < ISDN_MAX_DRIVERS; i++)
+@@ -1642,7 +1644,7 @@ isdn_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
+ } else {
+ p = (char __user *) iocts.arg;
+ for (i = 0; i < 10; i++) {
+- sprintf(bname, "%s%s",
++ snprintf(bname, sizeof(bname), "%s%s",
+ strlen(dev->drv[drvidx]->msn2eaz[i]) ?
+ dev->drv[drvidx]->msn2eaz[i] : "_",
+ (i < 9) ? "," : "\0");
+@@ -1672,6 +1674,7 @@ isdn_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
+ char *p;
+ if (copy_from_user(&iocts, argp, sizeof(isdn_ioctl_struct)))
+ return -EFAULT;
++ iocts.drvid[sizeof(iocts.drvid)-1] = 0;
+ if (strlen(iocts.drvid)) {
+ if ((p = strchr(iocts.drvid, ',')))
+ *p = 0;
+diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
+index aa83277..75e1423 100644
+--- a/drivers/isdn/i4l/isdn_net.c
++++ b/drivers/isdn/i4l/isdn_net.c
+@@ -2126,7 +2126,7 @@ isdn_net_find_icall(int di, int ch, int idx, setup_parm *setup)
+ u_long flags;
+ isdn_net_dev *p;
+ isdn_net_phone *n;
+- char nr[32];
++ char nr[ISDN_MSNLEN];
+ char *my_eaz;
+
+ /* Search name in netdev-chain */
+@@ -2135,7 +2135,7 @@ isdn_net_find_icall(int di, int ch, int idx, setup_parm *setup)
+ nr[1] = '\0';
+ printk(KERN_INFO "isdn_net: Incoming call without OAD, assuming '0'\n");
+ } else
+- strcpy(nr, setup->phone);
++ strlcpy(nr, setup->phone, ISDN_MSNLEN);
+ si1 = (int) setup->si1;
+ si2 = (int) setup->si2;
+ if (!setup->eazmsn[0]) {
+@@ -2802,7 +2802,7 @@ isdn_net_setcfg(isdn_net_ioctl_cfg * cfg)
+ chidx = -1;
+ }
+ }
+- strcpy(lp->msn, cfg->eaz);
++ strlcpy(lp->msn, cfg->eaz, sizeof(lp->msn));
+ lp->pre_device = drvidx;
+ lp->pre_channel = chidx;
+ lp->onhtime = cfg->onhtime;
+@@ -2951,7 +2951,7 @@ isdn_net_addphone(isdn_net_ioctl_phone * phone)
+ if (p) {
+ if (!(n = kmalloc(sizeof(isdn_net_phone), GFP_KERNEL)))
+ return -ENOMEM;
+- strcpy(n->num, phone->phone);
++ strlcpy(n->num, phone->phone, sizeof(n->num));
+ n->next = p->local->phone[phone->outgoing & 1];
+ p->local->phone[phone->outgoing & 1] = n;
+ return 0;
+diff --git a/drivers/net/atl1/atl1_main.c b/drivers/net/atl1/atl1_main.c
+index 6862c11..1b7a5a8 100644
+--- a/drivers/net/atl1/atl1_main.c
++++ b/drivers/net/atl1/atl1_main.c
+@@ -2097,21 +2097,26 @@ static int __devinit atl1_probe(struct pci_dev *pdev,
+ struct net_device *netdev;
+ struct atl1_adapter *adapter;
+ static int cards_found = 0;
+- bool pci_using_64 = true;
+ int err;
+
+ err = pci_enable_device(pdev);
+ if (err)
+ return err;
+
+- err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
++ /*
++ * The atl1 chip can DMA to 64-bit addresses, but it uses a single
++ * shared register for the high 32 bits, so only a single, aligned,
++ * 4 GB physical address range can be used at a time.
++ *
++ * Supporting 64-bit DMA on this hardware is more trouble than it's
++ * worth. It is far easier to limit to 32-bit DMA than update
++ * various kernel subsystems to support the mechanics required by a
++ * fixed-high-32-bit system.
++ */
++ err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+ if (err) {
+- err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+- if (err) {
+- dev_err(&pdev->dev, "no usable DMA configuration\n");
+- goto err_dma;
+- }
+- pci_using_64 = false;
++ dev_err(&pdev->dev, "no usable DMA configuration\n");
++ goto err_dma;
+ }
+ /* Mark all PCI regions associated with PCI device
+ * pdev as being reserved by owner atl1_driver_name
+@@ -2176,7 +2181,6 @@ static int __devinit atl1_probe(struct pci_dev *pdev,
+
+ netdev->ethtool_ops = &atl1_ethtool_ops;
+ adapter->bd_number = cards_found;
+- adapter->pci_using_64 = pci_using_64;
+
+ /* setup the private structure */
+ err = atl1_sw_init(adapter);
+@@ -2193,9 +2197,6 @@ static int __devinit atl1_probe(struct pci_dev *pdev,
+ */
+ /* netdev->features |= NETIF_F_TSO; */
+
+- if (pci_using_64)
+- netdev->features |= NETIF_F_HIGHDMA;
+-
+ netdev->features |= NETIF_F_LLTX;
+
+ /*
+diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
+index 06f6ec3..36b3a66 100644
+--- a/drivers/net/forcedeth.c
++++ b/drivers/net/forcedeth.c
+@@ -5283,19 +5283,15 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
+ if (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_PHY_INIT) {
+ np->mac_in_use = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_ST;
+ dprintk(KERN_INFO "%s: mgmt unit is running. mac in use %x.\n", pci_name(pci_dev), np->mac_in_use);
+- for (i = 0; i < 5000; i++) {
+- msleep(1);
+- if (nv_mgmt_acquire_sema(dev)) {
+- /* management unit setup the phy already? */
+- if ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK) ==
+- NVREG_XMITCTL_SYNC_PHY_INIT) {
+- /* phy is inited by mgmt unit */
+- phyinitialized = 1;
+- dprintk(KERN_INFO "%s: Phy already initialized by mgmt unit.\n", pci_name(pci_dev));
+- } else {
+- /* we need to init the phy */
+- }
+- break;
++ if (nv_mgmt_acquire_sema(dev)) {
++ /* management unit setup the phy already? */
++ if ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK) ==
++ NVREG_XMITCTL_SYNC_PHY_INIT) {
++ /* phy is inited by mgmt unit */
++ phyinitialized = 1;
++ dprintk(KERN_INFO "%s: Phy already initialized by mgmt unit.\n", pci_name(pci_dev));
++ } else {
++ /* we need to init the phy */
+ }
+ }
+ }
+@@ -5553,6 +5549,22 @@ static struct pci_device_id pci_tbl[] = {
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_27),
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
+ },
++ { /* MCP79 Ethernet Controller */
++ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_36),
++ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
++ },
++ { /* MCP79 Ethernet Controller */
++ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_37),
++ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
++ },
++ { /* MCP79 Ethernet Controller */
++ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_38),
++ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
++ },
++ { /* MCP79 Ethernet Controller */
++ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_39),
++ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
++ },
+ {0,},
+ };
+
+diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
+index 5caa8b3..ba78f8e 100644
+--- a/drivers/usb/host/ehci-hcd.c
++++ b/drivers/usb/host/ehci-hcd.c
+@@ -571,12 +571,15 @@ static int ehci_run (struct usb_hcd *hcd)
+ * from the companions to the EHCI controller. If any of the
+ * companions are in the middle of a port reset at the time, it
+ * could cause trouble. Write-locking ehci_cf_port_reset_rwsem
+- * guarantees that no resets are in progress.
++ * guarantees that no resets are in progress. After we set CF,
++ * a short delay lets the hardware catch up; new resets shouldn't
++ * be started before the port switching actions could complete.
+ */
+ down_write(&ehci_cf_port_reset_rwsem);
+ hcd->state = HC_STATE_RUNNING;
+ ehci_writel(ehci, FLAG_CF, &ehci->regs->configured_flag);
+ ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */
++ msleep(5);
+ up_write(&ehci_cf_port_reset_rwsem);
+
+ temp = HC_VERSION(ehci_readl(ehci, &ehci->caps->hc_capbase));
+diff --git a/drivers/usb/image/microtek.c b/drivers/usb/image/microtek.c
+index 51bd80d..3acfd1a 100644
+--- a/drivers/usb/image/microtek.c
++++ b/drivers/usb/image/microtek.c
+@@ -823,7 +823,7 @@ static int mts_usb_probe(struct usb_interface *intf,
+ goto out_kfree2;
+
+ new_desc->host->hostdata[0] = (unsigned long)new_desc;
+- if (scsi_add_host(new_desc->host, NULL)) {
++ if (scsi_add_host(new_desc->host, &dev->dev)) {
+ err_retval = -EIO;
+ goto out_host_put;
+ }
+diff --git a/drivers/video/fb_ddc.c b/drivers/video/fb_ddc.c
+index f836137..a0df632 100644
+--- a/drivers/video/fb_ddc.c
++++ b/drivers/video/fb_ddc.c
+@@ -56,13 +56,12 @@ unsigned char *fb_ddc_read(struct i2c_adapter *adapter)
+ int i, j;
+
+ algo_data->setscl(algo_data->data, 1);
+- algo_data->setscl(algo_data->data, 0);
+
+ for (i = 0; i < 3; i++) {
+ /* For some old monitors we need the
+ * following process to initialize/stop DDC
+ */
+- algo_data->setsda(algo_data->data, 0);
++ algo_data->setsda(algo_data->data, 1);
+ msleep(13);
+
+ algo_data->setscl(algo_data->data, 1);
+@@ -97,14 +96,15 @@ unsigned char *fb_ddc_read(struct i2c_adapter *adapter)
+ algo_data->setsda(algo_data->data, 1);
+ msleep(15);
+ algo_data->setscl(algo_data->data, 0);
++ algo_data->setsda(algo_data->data, 0);
+ if (edid)
+ break;
+ }
+ /* Release the DDC lines when done or the Apple Cinema HD display
+ * will switch off
+ */
+- algo_data->setsda(algo_data->data, 0);
+- algo_data->setscl(algo_data->data, 0);
++ algo_data->setsda(algo_data->data, 1);
++ algo_data->setscl(algo_data->data, 1);
+
+ return edid;
+ }
+diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c
+index 6ca2d24..f83d235 100644
+--- a/fs/nfsd/nfsfh.c
++++ b/fs/nfsd/nfsfh.c
+@@ -565,13 +565,23 @@ enum fsid_source fsid_source(struct svc_fh *fhp)
+ case FSID_DEV:
+ case FSID_ENCODE_DEV:
+ case FSID_MAJOR_MINOR:
+- return FSIDSOURCE_DEV;
++ if (fhp->fh_export->ex_dentry->d_inode->i_sb->s_type->fs_flags
++ & FS_REQUIRES_DEV)
++ return FSIDSOURCE_DEV;
++ break;
+ case FSID_NUM:
+- return FSIDSOURCE_FSID;
+- default:
+ if (fhp->fh_export->ex_flags & NFSEXP_FSID)
+ return FSIDSOURCE_FSID;
+- else
+- return FSIDSOURCE_UUID;
++ break;
++ default:
++ break;
+ }
++ /* either a UUID type filehandle, or the filehandle doesn't
++ * match the export.
++ */
++ if (fhp->fh_export->ex_flags & NFSEXP_FSID)
++ return FSIDSOURCE_FSID;
++ if (fhp->fh_export->ex_uuid)
++ return FSIDSOURCE_UUID;
++ return FSIDSOURCE_DEV;
+ }
+diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
+index c1ffa1b..887c2ce 100644
+--- a/include/linux/pci_ids.h
++++ b/include/linux/pci_ids.h
+@@ -1239,6 +1239,10 @@
+ #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP67_IDE 0x0560
+ #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_IDE 0x056C
+ #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP77_IDE 0x0759
++#define PCI_DEVICE_ID_NVIDIA_NVENET_36 0x0AB0
++#define PCI_DEVICE_ID_NVIDIA_NVENET_37 0x0AB1
++#define PCI_DEVICE_ID_NVIDIA_NVENET_38 0x0AB2
++#define PCI_DEVICE_ID_NVIDIA_NVENET_39 0x0AB3
+
+ #define PCI_VENDOR_ID_IMS 0x10e0
+ #define PCI_DEVICE_ID_IMS_TT128 0x9128
+diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
+index 1c4eb41..9c4ad75 100644
+--- a/include/linux/thread_info.h
++++ b/include/linux/thread_info.h
+@@ -7,12 +7,25 @@
+ #ifndef _LINUX_THREAD_INFO_H
+ #define _LINUX_THREAD_INFO_H
+
++#include <linux/types.h>
++
+ /*
+- * System call restart block.
++ * System call restart block.
+ */
+ struct restart_block {
+ long (*fn)(struct restart_block *);
+- unsigned long arg0, arg1, arg2, arg3;
++ union {
++ struct {
++ unsigned long arg0, arg1, arg2, arg3;
++ };
++ /* For futex_wait */
++ struct {
++ u32 *uaddr;
++ u32 val;
++ u32 flags;
++ u64 time;
++ } futex;
++ };
+ };
+
+ extern long do_no_restart_syscall(struct restart_block *parm);
+diff --git a/include/net/tcp.h b/include/net/tcp.h
+index a99b4f6..c05e018 100644
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -1258,6 +1258,9 @@ static inline void tcp_insert_write_queue_before(struct sk_buff *new,
+ struct sock *sk)
+ {
+ __skb_insert(new, skb->prev, skb, &sk->sk_write_queue);
++
++ if (sk->sk_send_head == skb)
++ sk->sk_send_head = new;
+ }
+
+ static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk)
+diff --git a/kernel/exit.c b/kernel/exit.c
+index e3adc46..369dae2 100644
+--- a/kernel/exit.c
++++ b/kernel/exit.c
+@@ -1339,7 +1339,7 @@ static int wait_task_stopped(struct task_struct *p, int delayed_group_leader,
+ if (unlikely(!exit_code) || unlikely(p->exit_state))
+ goto bail_ref;
+ return wait_noreap_copyout(p, pid, uid,
+- why, (exit_code << 8) | 0x7f,
++ why, exit_code,
+ infop, ru);
+ }
+
+diff --git a/kernel/futex.c b/kernel/futex.c
+index 9b57f7e..592cf07 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -1129,9 +1129,9 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
+
+ /*
+ * In case we must use restart_block to restart a futex_wait,
+- * we encode in the 'arg3' shared capability
++ * we encode in the 'flags' shared capability
+ */
+-#define ARG3_SHARED 1
++#define FLAGS_SHARED 1
+
+ static long futex_wait_restart(struct restart_block *restart);
+ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
+@@ -1272,12 +1272,13 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
+ struct restart_block *restart;
+ restart = &current_thread_info()->restart_block;
+ restart->fn = futex_wait_restart;
+- restart->arg0 = (unsigned long)uaddr;
+- restart->arg1 = (unsigned long)val;
+- restart->arg2 = (unsigned long)abs_time;
+- restart->arg3 = 0;
++ restart->futex.uaddr = (u32 *)uaddr;
++ restart->futex.val = val;
++ restart->futex.time = abs_time->tv64;
++ restart->futex.flags = 0;
++
+ if (fshared)
+- restart->arg3 |= ARG3_SHARED;
++ restart->futex.flags |= FLAGS_SHARED;
+ return -ERESTART_RESTARTBLOCK;
+ }
+
+@@ -1293,15 +1294,15 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
+
+ static long futex_wait_restart(struct restart_block *restart)
+ {
+- u32 __user *uaddr = (u32 __user *)restart->arg0;
+- u32 val = (u32)restart->arg1;
+- ktime_t *abs_time = (ktime_t *)restart->arg2;
++ u32 __user *uaddr = (u32 __user *)restart->futex.uaddr;
+ struct rw_semaphore *fshared = NULL;
++ ktime_t t;
+
++ t.tv64 = restart->futex.time;
+ restart->fn = do_no_restart_syscall;
+- if (restart->arg3 & ARG3_SHARED)
++ if (restart->futex.flags & FLAGS_SHARED)
+ fshared = &current->mm->mmap_sem;
+- return (long)futex_wait(uaddr, fshared, val, abs_time);
++ return (long)futex_wait(uaddr, fshared, restart->futex.val, &t);
+ }
+
+
+diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
+index 23c03f4..355e867 100644
+--- a/kernel/hrtimer.c
++++ b/kernel/hrtimer.c
+@@ -825,6 +825,14 @@ hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
+ #ifdef CONFIG_TIME_LOW_RES
+ tim = ktime_add(tim, base->resolution);
+ #endif
++ /*
++ * Careful here: User space might have asked for a
++ * very long sleep, so the add above might result in a
++ * negative number, which enqueues the timer in front
++ * of the queue.
++ */
++ if (tim.tv64 < 0)
++ tim.tv64 = KTIME_MAX;
+ }
+ timer->expires = tim;
+
+diff --git a/kernel/sys.c b/kernel/sys.c
+index afd9b93..28e8364 100644
+--- a/kernel/sys.c
++++ b/kernel/sys.c
+@@ -31,7 +31,6 @@
+ #include <linux/cn_proc.h>
+ #include <linux/getcpu.h>
+ #include <linux/task_io_accounting_ops.h>
+-#include <linux/cpu.h>
+
+ #include <linux/compat.h>
+ #include <linux/syscalls.h>
+@@ -866,7 +865,6 @@ EXPORT_SYMBOL_GPL(kernel_halt);
+ void kernel_power_off(void)
+ {
+ kernel_shutdown_prepare(SYSTEM_POWER_OFF);
+- disable_nonboot_cpus();
+ printk(KERN_EMERG "Power down.\n");
+ machine_power_off();
+ }
+diff --git a/lib/libcrc32c.c b/lib/libcrc32c.c
+index 60f4680..1f3a52e 100644
+--- a/lib/libcrc32c.c
++++ b/lib/libcrc32c.c
+@@ -33,7 +33,6 @@
+ #include <linux/crc32c.h>
+ #include <linux/compiler.h>
+ #include <linux/module.h>
+-#include <asm/byteorder.h>
+
+ MODULE_AUTHOR("Clay Haapala <chaapala@cisco.com>");
+ MODULE_DESCRIPTION("CRC32c (Castagnoli) calculations");
+@@ -161,15 +160,13 @@ static const u32 crc32c_table[256] = {
+ */
+
+ u32 __attribute_pure__
+-crc32c_le(u32 seed, unsigned char const *data, size_t length)
++crc32c_le(u32 crc, unsigned char const *data, size_t length)
+ {
+- u32 crc = __cpu_to_le32(seed);
+-
+ while (length--)
+ crc =
+ crc32c_table[(crc ^ *data++) & 0xFFL] ^ (crc >> 8);
+
+- return __le32_to_cpu(crc);
++ return crc;
+ }
+
+ #endif /* CRC_LE_BITS == 8 */
+diff --git a/lib/textsearch.c b/lib/textsearch.c
+index 88c98a2..be8bda3 100644
+--- a/lib/textsearch.c
++++ b/lib/textsearch.c
+@@ -7,7 +7,7 @@
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Authors: Thomas Graf <tgraf@suug.ch>
+- * Pablo Neira Ayuso <pablo@eurodev.net>
++ * Pablo Neira Ayuso <pablo@netfilter.org>
+ *
+ * ==========================================================================
+ *
+@@ -250,7 +250,8 @@ unsigned int textsearch_find_continuous(struct ts_config *conf,
+ * the various search algorithms.
+ *
+ * Returns a new textsearch configuration according to the specified
+- * parameters or a ERR_PTR().
++ * parameters or a ERR_PTR(). If a zero length pattern is passed, this
++ * function returns EINVAL.
+ */
+ struct ts_config *textsearch_prepare(const char *algo, const void *pattern,
+ unsigned int len, gfp_t gfp_mask, int flags)
+@@ -259,6 +260,9 @@ struct ts_config *textsearch_prepare(const char *algo, const void *pattern,
+ struct ts_config *conf;
+ struct ts_ops *ops;
+
++ if (len == 0)
++ return ERR_PTR(-EINVAL);
++
+ ops = lookup_ts_algo(algo);
+ #ifdef CONFIG_KMOD
+ /*
+diff --git a/mm/shmem.c b/mm/shmem.c
+index 2320b60..d1c65fb 100644
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -1066,7 +1066,7 @@ shmem_alloc_page(gfp_t gfp, struct shmem_inode_info *info,
+ pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx);
+ pvma.vm_pgoff = idx;
+ pvma.vm_end = PAGE_SIZE;
+- page = alloc_page_vma(gfp | __GFP_ZERO, &pvma, 0);
++ page = alloc_page_vma(gfp, &pvma, 0);
+ mpol_free(pvma.vm_policy);
+ return page;
+ }
+@@ -1086,7 +1086,7 @@ shmem_swapin(struct shmem_inode_info *info,swp_entry_t entry,unsigned long idx)
+ static inline struct page *
+ shmem_alloc_page(gfp_t gfp,struct shmem_inode_info *info, unsigned long idx)
+ {
+- return alloc_page(gfp | __GFP_ZERO);
++ return alloc_page(gfp);
+ }
+ #endif
+
+@@ -1295,6 +1295,7 @@ repeat:
+
+ info->alloced++;
+ spin_unlock(&info->lock);
++ clear_highpage(filepage);
+ flush_dcache_page(filepage);
+ SetPageUptodate(filepage);
+ }
+diff --git a/net/bridge/br.c b/net/bridge/br.c
+index 848b8fa..94ae4d2 100644
+--- a/net/bridge/br.c
++++ b/net/bridge/br.c
+@@ -39,7 +39,7 @@ static int __init br_init(void)
+
+ err = br_fdb_init();
+ if (err)
+- goto err_out1;
++ goto err_out;
+
+ err = br_netfilter_init();
+ if (err)
+@@ -65,6 +65,8 @@ err_out3:
+ err_out2:
+ br_netfilter_fini();
+ err_out1:
++ br_fdb_fini();
++err_out:
+ llc_sap_put(br_stp_sap);
+ return err;
+ }
+diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
+index 420bbb9..fb2c7cc 100644
+--- a/net/bridge/br_input.c
++++ b/net/bridge/br_input.c
+@@ -127,6 +127,7 @@ static inline int is_link_local(const unsigned char *dest)
+ struct sk_buff *br_handle_frame(struct net_bridge_port *p, struct sk_buff *skb)
+ {
+ const unsigned char *dest = eth_hdr(skb)->h_dest;
++ int (*rhook)(struct sk_buff **pskb);
+
+ if (!is_valid_ether_addr(eth_hdr(skb)->h_source))
+ goto drop;
+@@ -148,9 +149,9 @@ struct sk_buff *br_handle_frame(struct net_bridge_port *p, struct sk_buff *skb)
+
+ switch (p->state) {
+ case BR_STATE_FORWARDING:
+-
+- if (br_should_route_hook) {
+- if (br_should_route_hook(&skb))
++ rhook = rcu_dereference(br_should_route_hook);
++ if (rhook != NULL) {
++ if (rhook(&skb))
+ return skb;
+ dest = eth_hdr(skb)->h_dest;
+ }
+diff --git a/net/bridge/netfilter/ebtable_broute.c b/net/bridge/netfilter/ebtable_broute.c
+index d37ce04..bc17cf5 100644
+--- a/net/bridge/netfilter/ebtable_broute.c
++++ b/net/bridge/netfilter/ebtable_broute.c
+@@ -70,13 +70,13 @@ static int __init ebtable_broute_init(void)
+ if (ret < 0)
+ return ret;
+ /* see br_input.c */
+- br_should_route_hook = ebt_broute;
++ rcu_assign_pointer(br_should_route_hook, ebt_broute);
+ return ret;
+ }
+
+ static void __exit ebtable_broute_fini(void)
+ {
+- br_should_route_hook = NULL;
++ rcu_assign_pointer(br_should_route_hook, NULL);
+ synchronize_net();
+ ebt_unregister_table(&broute_table);
+ }
+diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
+index d46e453..b51ee15 100644
+--- a/net/decnet/dn_dev.c
++++ b/net/decnet/dn_dev.c
+@@ -651,16 +651,18 @@ static int dn_nl_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
+ struct dn_dev *dn_db;
+ struct ifaddrmsg *ifm;
+ struct dn_ifaddr *ifa, **ifap;
+- int err = -EADDRNOTAVAIL;
++ int err;
+
+ err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, dn_ifa_policy);
+ if (err < 0)
+ goto errout;
+
++ err = -ENODEV;
+ ifm = nlmsg_data(nlh);
+ if ((dn_db = dn_dev_by_index(ifm->ifa_index)) == NULL)
+ goto errout;
+
++ err = -EADDRNOTAVAIL;
+ for (ifap = &dn_db->ifa_list; (ifa = *ifap); ifap = &ifa->ifa_next) {
+ if (tb[IFA_LOCAL] &&
+ nla_memcmp(tb[IFA_LOCAL], &ifa->ifa_local, 2))
+diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
+index e00767e..84097ee 100644
+--- a/net/ipv4/arp.c
++++ b/net/ipv4/arp.c
+@@ -110,12 +110,8 @@
+ #include <net/tcp.h>
+ #include <net/sock.h>
+ #include <net/arp.h>
+-#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
+ #include <net/ax25.h>
+-#if defined(CONFIG_NETROM) || defined(CONFIG_NETROM_MODULE)
+ #include <net/netrom.h>
+-#endif
+-#endif
+ #if defined(CONFIG_ATM_CLIP) || defined(CONFIG_ATM_CLIP_MODULE)
+ #include <net/atmclip.h>
+ struct neigh_table *clip_tbl_hook;
+@@ -729,20 +725,10 @@ static int arp_process(struct sk_buff *skb)
+ htons(dev_type) != arp->ar_hrd)
+ goto out;
+ break;
+-#ifdef CONFIG_NET_ETHERNET
+ case ARPHRD_ETHER:
+-#endif
+-#ifdef CONFIG_TR
+ case ARPHRD_IEEE802_TR:
+-#endif
+-#ifdef CONFIG_FDDI
+ case ARPHRD_FDDI:
+-#endif
+-#ifdef CONFIG_NET_FC
+ case ARPHRD_IEEE802:
+-#endif
+-#if defined(CONFIG_NET_ETHERNET) || defined(CONFIG_TR) || \
+- defined(CONFIG_FDDI) || defined(CONFIG_NET_FC)
+ /*
+ * ETHERNET, Token Ring and Fibre Channel (which are IEEE 802
+ * devices, according to RFC 2625) devices will accept ARP
+@@ -757,21 +743,16 @@ static int arp_process(struct sk_buff *skb)
+ arp->ar_pro != htons(ETH_P_IP))
+ goto out;
+ break;
+-#endif
+-#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
+ case ARPHRD_AX25:
+ if (arp->ar_pro != htons(AX25_P_IP) ||
+ arp->ar_hrd != htons(ARPHRD_AX25))
+ goto out;
+ break;
+-#if defined(CONFIG_NETROM) || defined(CONFIG_NETROM_MODULE)
+ case ARPHRD_NETROM:
+ if (arp->ar_pro != htons(AX25_P_IP) ||
+ arp->ar_hrd != htons(ARPHRD_NETROM))
+ goto out;
+ break;
+-#endif
+-#endif
+ }
+
+ /* Understand only these message types */
+diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
+index ea02f00..3b01a5f 100644
+--- a/net/ipv4/netfilter/nf_nat_core.c
++++ b/net/ipv4/netfilter/nf_nat_core.c
+@@ -633,7 +633,7 @@ static int clean_nat(struct nf_conn *i, void *data)
+
+ if (!nat)
+ return 0;
+- memset(nat, 0, sizeof(nat));
++ memset(nat, 0, sizeof(*nat));
+ i->status &= ~(IPS_NAT_MASK | IPS_NAT_DONE_MASK | IPS_SEQ_ADJUST);
+ return 0;
+ }
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index 29ca63e..4aa2551 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -3150,18 +3150,14 @@ static int ip_rt_acct_read(char *buffer, char **start, off_t offset,
+ offset /= sizeof(u32);
+
+ if (length > 0) {
+- u32 *src = ((u32 *) IP_RT_ACCT_CPU(0)) + offset;
+ u32 *dst = (u32 *) buffer;
+
+- /* Copy first cpu. */
+ *start = buffer;
+- memcpy(dst, src, length);
++ memset(dst, 0, length);
+
+- /* Add the other cpus in, one int at a time */
+ for_each_possible_cpu(i) {
+ unsigned int j;
+-
+- src = ((u32 *) IP_RT_ACCT_CPU(i)) + offset;
++ u32 *src = ((u32 *) IP_RT_ACCT_CPU(i)) + offset;
+
+ for (j = 0; j < length/4; j++)
+ dst[j] += src[j];
+diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
+index 53ef0f4..6ea1306 100644
+--- a/net/ipv4/sysctl_net_ipv4.c
++++ b/net/ipv4/sysctl_net_ipv4.c
+@@ -121,7 +121,7 @@ static int sysctl_tcp_congestion_control(ctl_table *table, int __user *name,
+
+ tcp_get_default_congestion_control(val);
+ ret = sysctl_string(&tbl, name, nlen, oldval, oldlenp, newval, newlen);
+- if (ret == 0 && newval && newlen)
++ if (ret == 1 && newval && newlen)
+ ret = tcp_set_default_congestion_control(val);
+ return ret;
+ }
+diff --git a/net/ipv4/tcp_illinois.c b/net/ipv4/tcp_illinois.c
+index b2b2256..31dd8c5 100644
+--- a/net/ipv4/tcp_illinois.c
++++ b/net/ipv4/tcp_illinois.c
+@@ -300,7 +300,7 @@ static u32 tcp_illinois_ssthresh(struct sock *sk)
+ struct illinois *ca = inet_csk_ca(sk);
+
+ /* Multiplicative decrease */
+- return max((tp->snd_cwnd * ca->beta) >> BETA_SHIFT, 2U);
++ return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->beta) >> BETA_SHIFT), 2U);
+ }
+
+
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index 53232dd..eee57e6 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -1279,7 +1279,6 @@ static int tcp_mtu_probe(struct sock *sk)
+
+ skb = tcp_send_head(sk);
+ tcp_insert_write_queue_before(nskb, skb, sk);
+- tcp_advance_send_head(sk, skb);
+
+ TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq;
+ TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size;
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index e26b473..6d614c0 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -2285,6 +2285,9 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
+ break;
+ }
+
++ if (!idev && dev->mtu >= IPV6_MIN_MTU)
++ idev = ipv6_add_dev(dev);
++
+ if (idev)
+ idev->if_flags |= IF_READY;
+ } else {
+@@ -2349,12 +2352,18 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
+ break;
+
+ case NETDEV_CHANGEMTU:
+- if ( idev && dev->mtu >= IPV6_MIN_MTU) {
++ if (idev && dev->mtu >= IPV6_MIN_MTU) {
+ rt6_mtu_change(dev, dev->mtu);
+ idev->cnf.mtu6 = dev->mtu;
+ break;
+ }
+
++ if (!idev && dev->mtu >= IPV6_MIN_MTU) {
++ idev = ipv6_add_dev(dev);
++ if (idev)
++ break;
++ }
++
+ /* MTU falled under IPV6_MIN_MTU. Stop IPv6 on this interface. */
+
+ case NETDEV_DOWN:
+diff --git a/net/key/af_key.c b/net/key/af_key.c
+index 0f8304b..ca0db0f 100644
+--- a/net/key/af_key.c
++++ b/net/key/af_key.c
+@@ -1543,7 +1543,7 @@ static int pfkey_get(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr,
+
+ out_hdr = (struct sadb_msg *) out_skb->data;
+ out_hdr->sadb_msg_version = hdr->sadb_msg_version;
+- out_hdr->sadb_msg_type = SADB_DUMP;
++ out_hdr->sadb_msg_type = SADB_GET;
+ out_hdr->sadb_msg_satype = pfkey_proto2satype(proto);
+ out_hdr->sadb_msg_errno = 0;
+ out_hdr->sadb_msg_reserved = 0;
+diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
+index 15fe8f6..fe7b3d8 100644
+--- a/net/netfilter/xt_TCPMSS.c
++++ b/net/netfilter/xt_TCPMSS.c
+@@ -178,10 +178,8 @@ xt_tcpmss_target6(struct sk_buff **pskb,
+
+ nexthdr = ipv6h->nexthdr;
+ tcphoff = ipv6_skip_exthdr(*pskb, sizeof(*ipv6h), &nexthdr);
+- if (tcphoff < 0) {
+- WARN_ON(1);
++ if (tcphoff < 0)
+ return NF_DROP;
+- }
+ ret = tcpmss_mangle_packet(pskb, targinfo, tcphoff,
+ sizeof(*ipv6h) + sizeof(struct tcphdr));
+ if (ret < 0)
+diff --git a/net/rxrpc/Kconfig b/net/rxrpc/Kconfig
+index e662f1d..0d3103c 100644
+--- a/net/rxrpc/Kconfig
++++ b/net/rxrpc/Kconfig
+@@ -5,6 +5,7 @@
+ config AF_RXRPC
+ tristate "RxRPC session sockets"
+ depends on INET && EXPERIMENTAL
++ select CRYPTO
+ select KEYS
+ help
+ Say Y or M here to include support for RxRPC session sockets (just
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index d70fa30..ae80150 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -1608,8 +1608,15 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
+ mutex_lock(&u->readlock);
+
+ skb = skb_recv_datagram(sk, flags, noblock, &err);
+- if (!skb)
++ if (!skb) {
++ unix_state_lock(sk);
++ /* Signal EOF on disconnected non-blocking SEQPACKET socket. */
++ if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
++ (sk->sk_shutdown & RCV_SHUTDOWN))
++ err = 0;
++ unix_state_unlock(sk);
+ goto out_unlock;
++ }
+
+ wake_up_interruptible(&u->peer_wait);
+
+diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
+index dfacb9c..7775488 100644
+--- a/net/xfrm/xfrm_state.c
++++ b/net/xfrm/xfrm_state.c
+@@ -371,7 +371,7 @@ int __xfrm_state_delete(struct xfrm_state *x)
+ * The xfrm_state_alloc call gives a reference, and that
+ * is what we are dropping here.
+ */
+- __xfrm_state_put(x);
++ xfrm_state_put(x);
+ err = 0;
+ }
+
diff --git a/trunk/2.6.22/01015_linux-2.6.22.16.patch b/trunk/2.6.22/01015_linux-2.6.22.16.patch
new file mode 100644
index 0000000..34ae110
--- /dev/null
+++ b/trunk/2.6.22/01015_linux-2.6.22.16.patch
@@ -0,0 +1,27 @@
+Subject: Linux 2.6.22.16
+From: Greg Kroah-Hartman <gregkh@suse.de>
+
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+diff --git a/fs/namei.c b/fs/namei.c
+index 5e2d98d..8e209ce 100644
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -1543,7 +1543,7 @@ int may_open(struct nameidata *nd, int acc_mode, int flag)
+ if (S_ISLNK(inode->i_mode))
+ return -ELOOP;
+
+- if (S_ISDIR(inode->i_mode) && (flag & FMODE_WRITE))
++ if (S_ISDIR(inode->i_mode) && (acc_mode & MAY_WRITE))
+ return -EISDIR;
+
+ error = vfs_permission(nd, acc_mode);
+@@ -1562,7 +1562,7 @@ int may_open(struct nameidata *nd, int acc_mode, int flag)
+ return -EACCES;
+
+ flag &= ~O_TRUNC;
+- } else if (IS_RDONLY(inode) && (flag & FMODE_WRITE))
++ } else if (IS_RDONLY(inode) && (acc_mode & MAY_WRITE))
+ return -EROFS;
+ /*
+ * An append-only file must be opened in append mode for writing.
diff --git a/trunk/2.6.22/01016_linux-2.6.22.17.patch b/trunk/2.6.22/01016_linux-2.6.22.17.patch
new file mode 100644
index 0000000..4f735d5
--- /dev/null
+++ b/trunk/2.6.22/01016_linux-2.6.22.17.patch
@@ -0,0 +1,1360 @@
+Subject: Linux 2.6.22.17
+From: Greg Kroah-Hartman <gregkh@suse.de>
+
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
+index 4f2f453..c84b7cc 100644
+--- a/arch/powerpc/mm/hash_utils_64.c
++++ b/arch/powerpc/mm/hash_utils_64.c
+@@ -795,7 +795,7 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
+
+ #ifdef CONFIG_PPC_MM_SLICES
+ /* We only prefault standard pages for now */
+- if (unlikely(get_slice_psize(mm, ea) != mm->context.user_psize));
++ if (unlikely(get_slice_psize(mm, ea) != mm->context.user_psize))
+ return;
+ #endif
+
+diff --git a/arch/sparc64/kernel/chmc.c b/arch/sparc64/kernel/chmc.c
+index 777d345..6d4f02e 100644
+--- a/arch/sparc64/kernel/chmc.c
++++ b/arch/sparc64/kernel/chmc.c
+@@ -1,7 +1,6 @@
+-/* $Id: chmc.c,v 1.4 2002/01/08 16:00:14 davem Exp $
+- * memctrlr.c: Driver for UltraSPARC-III memory controller.
++/* memctrlr.c: Driver for UltraSPARC-III memory controller.
+ *
+- * Copyright (C) 2001 David S. Miller (davem@redhat.com)
++ * Copyright (C) 2001, 2007 David S. Miller (davem@davemloft.net)
+ */
+
+ #include <linux/module.h>
+@@ -16,6 +15,7 @@
+ #include <linux/init.h>
+ #include <asm/spitfire.h>
+ #include <asm/chmctrl.h>
++#include <asm/cpudata.h>
+ #include <asm/oplib.h>
+ #include <asm/prom.h>
+ #include <asm/io.h>
+@@ -242,8 +242,11 @@ int chmc_getunumber(int syndrome_code,
+ */
+ static u64 read_mcreg(struct mctrl_info *mp, unsigned long offset)
+ {
+- unsigned long ret;
+- int this_cpu = get_cpu();
++ unsigned long ret, this_cpu;
++
++ preempt_disable();
++
++ this_cpu = real_hard_smp_processor_id();
+
+ if (mp->portid == this_cpu) {
+ __asm__ __volatile__("ldxa [%1] %2, %0"
+@@ -255,7 +258,8 @@ static u64 read_mcreg(struct mctrl_info *mp, unsigned long offset)
+ : "r" (mp->regs + offset),
+ "i" (ASI_PHYS_BYPASS_EC_E));
+ }
+- put_cpu();
++
++ preempt_enable();
+
+ return ret;
+ }
+diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S
+index 8059531..193791c 100644
+--- a/arch/sparc64/kernel/entry.S
++++ b/arch/sparc64/kernel/entry.S
+@@ -2593,3 +2593,15 @@ sun4v_mmustat_info:
+ retl
+ nop
+ .size sun4v_mmustat_info, .-sun4v_mmustat_info
++
++ .globl sun4v_mmu_demap_all
++ .type sun4v_mmu_demap_all,#function
++sun4v_mmu_demap_all:
++ clr %o0
++ clr %o1
++ mov HV_MMU_ALL, %o2
++ mov HV_FAST_MMU_DEMAP_ALL, %o5
++ ta HV_FAST_TRAP
++ retl
++ nop
++ .size sun4v_mmu_demap_all, .-sun4v_mmu_demap_all
+diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
+index 4dcd7d0..3ddd99c 100644
+--- a/arch/sparc64/kernel/smp.c
++++ b/arch/sparc64/kernel/smp.c
+@@ -403,7 +403,7 @@ static __inline__ void spitfire_xcall_deliver(u64 data0, u64 data1, u64 data2, c
+ */
+ static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
+ {
+- u64 pstate, ver;
++ u64 pstate, ver, busy_mask;
+ int nack_busy_id, is_jbus, need_more;
+
+ if (cpus_empty(mask))
+@@ -435,14 +435,20 @@ retry:
+ "i" (ASI_INTR_W));
+
+ nack_busy_id = 0;
++ busy_mask = 0;
+ {
+ int i;
+
+ for_each_cpu_mask(i, mask) {
+ u64 target = (i << 14) | 0x70;
+
+- if (!is_jbus)
++ if (is_jbus) {
++ busy_mask |= (0x1UL << (i * 2));
++ } else {
+ target |= (nack_busy_id << 24);
++ busy_mask |= (0x1UL <<
++ (nack_busy_id * 2));
++ }
+ __asm__ __volatile__(
+ "stxa %%g0, [%0] %1\n\t"
+ "membar #Sync\n\t"
+@@ -458,15 +464,16 @@ retry:
+
+ /* Now, poll for completion. */
+ {
+- u64 dispatch_stat;
++ u64 dispatch_stat, nack_mask;
+ long stuck;
+
+ stuck = 100000 * nack_busy_id;
++ nack_mask = busy_mask << 1;
+ do {
+ __asm__ __volatile__("ldxa [%%g0] %1, %0"
+ : "=r" (dispatch_stat)
+ : "i" (ASI_INTR_DISPATCH_STAT));
+- if (dispatch_stat == 0UL) {
++ if (!(dispatch_stat & (busy_mask | nack_mask))) {
+ __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
+ : : "r" (pstate));
+ if (unlikely(need_more)) {
+@@ -483,12 +490,12 @@ retry:
+ }
+ if (!--stuck)
+ break;
+- } while (dispatch_stat & 0x5555555555555555UL);
++ } while (dispatch_stat & busy_mask);
+
+ __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
+ : : "r" (pstate));
+
+- if ((dispatch_stat & ~(0x5555555555555555UL)) == 0) {
++ if (dispatch_stat & busy_mask) {
+ /* Busy bits will not clear, continue instead
+ * of freezing up on this cpu.
+ */
+diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
+index 3010227..ed2484d 100644
+--- a/arch/sparc64/mm/init.c
++++ b/arch/sparc64/mm/init.c
+@@ -1135,14 +1135,9 @@ static void __init mark_kpte_bitmap(unsigned long start, unsigned long end)
+ }
+ }
+
+-static void __init kernel_physical_mapping_init(void)
++static void __init init_kpte_bitmap(void)
+ {
+ unsigned long i;
+-#ifdef CONFIG_DEBUG_PAGEALLOC
+- unsigned long mem_alloced = 0UL;
+-#endif
+-
+- read_obp_memory("reg", &pall[0], &pall_ents);
+
+ for (i = 0; i < pall_ents; i++) {
+ unsigned long phys_start, phys_end;
+@@ -1151,14 +1146,24 @@ static void __init kernel_physical_mapping_init(void)
+ phys_end = phys_start + pall[i].reg_size;
+
+ mark_kpte_bitmap(phys_start, phys_end);
++ }
++}
+
++static void __init kernel_physical_mapping_init(void)
++{
+ #ifdef CONFIG_DEBUG_PAGEALLOC
++ unsigned long i, mem_alloced = 0UL;
++
++ for (i = 0; i < pall_ents; i++) {
++ unsigned long phys_start, phys_end;
++
++ phys_start = pall[i].phys_addr;
++ phys_end = phys_start + pall[i].reg_size;
++
+ mem_alloced += kernel_map_range(phys_start, phys_end,
+ PAGE_KERNEL);
+-#endif
+ }
+
+-#ifdef CONFIG_DEBUG_PAGEALLOC
+ printk("Allocated %ld bytes for kernel page tables.\n",
+ mem_alloced);
+
+@@ -1400,6 +1405,10 @@ void __init paging_init(void)
+
+ inherit_prom_mappings();
+
++ read_obp_memory("reg", &pall[0], &pall_ents);
++
++ init_kpte_bitmap();
++
+ /* Ok, we can use our TLB miss and window trap handlers safely. */
+ setup_tba();
+
+@@ -1854,7 +1863,9 @@ void __flush_tlb_all(void)
+ "wrpr %0, %1, %%pstate"
+ : "=r" (pstate)
+ : "i" (PSTATE_IE));
+- if (tlb_type == spitfire) {
++ if (tlb_type == hypervisor) {
++ sun4v_mmu_demap_all();
++ } else if (tlb_type == spitfire) {
+ for (i = 0; i < 64; i++) {
+ /* Spitfire Errata #32 workaround */
+ /* NOTE: Always runs on spitfire, so no
+diff --git a/drivers/acpi/dispatcher/dsobject.c b/drivers/acpi/dispatcher/dsobject.c
+index a474ca2..954ac8c 100644
+--- a/drivers/acpi/dispatcher/dsobject.c
++++ b/drivers/acpi/dispatcher/dsobject.c
+@@ -137,6 +137,71 @@ acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
+ return_ACPI_STATUS(status);
+ }
+ }
++
++ /* Special object resolution for elements of a package */
++
++ if ((op->common.parent->common.aml_opcode == AML_PACKAGE_OP) ||
++ (op->common.parent->common.aml_opcode ==
++ AML_VAR_PACKAGE_OP)) {
++ /*
++ * Attempt to resolve the node to a value before we insert it into
++ * the package. If this is a reference to a common data type,
++ * resolve it immediately. According to the ACPI spec, package
++ * elements can only be "data objects" or method references.
++ * Attempt to resolve to an Integer, Buffer, String or Package.
++ * If cannot, return the named reference (for things like Devices,
++ * Methods, etc.) Buffer Fields and Fields will resolve to simple
++ * objects (int/buf/str/pkg).
++ *
++ * NOTE: References to things like Devices, Methods, Mutexes, etc.
++ * will remain as named references. This behavior is not described
++ * in the ACPI spec, but it appears to be an oversight.
++ */
++ obj_desc = (union acpi_operand_object *)op->common.node;
++
++ status =
++ acpi_ex_resolve_node_to_value(ACPI_CAST_INDIRECT_PTR
++ (struct
++ acpi_namespace_node,
++ &obj_desc),
++ walk_state);
++ if (ACPI_FAILURE(status)) {
++ return_ACPI_STATUS(status);
++ }
++
++ switch (op->common.node->type) {
++ /*
++ * For these types, we need the actual node, not the subobject.
++ * However, the subobject got an extra reference count above.
++ */
++ case ACPI_TYPE_MUTEX:
++ case ACPI_TYPE_METHOD:
++ case ACPI_TYPE_POWER:
++ case ACPI_TYPE_PROCESSOR:
++ case ACPI_TYPE_EVENT:
++ case ACPI_TYPE_REGION:
++ case ACPI_TYPE_DEVICE:
++ case ACPI_TYPE_THERMAL:
++
++ obj_desc =
++ (union acpi_operand_object *)op->common.
++ node;
++ break;
++
++ default:
++ break;
++ }
++
++ /*
++ * If above resolved to an operand object, we are done. Otherwise,
++ * we have a NS node, we must create the package entry as a named
++ * reference.
++ */
++ if (ACPI_GET_DESCRIPTOR_TYPE(obj_desc) !=
++ ACPI_DESC_TYPE_NAMED) {
++ goto exit;
++ }
++ }
+ }
+
+ /* Create and init a new internal ACPI object */
+@@ -156,6 +221,7 @@ acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
+ return_ACPI_STATUS(status);
+ }
+
++ exit:
+ *obj_desc_ptr = obj_desc;
+ return_ACPI_STATUS(AE_OK);
+ }
+@@ -356,12 +422,25 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
+ arg = arg->common.next;
+ for (i = 0; arg && (i < element_count); i++) {
+ if (arg->common.aml_opcode == AML_INT_RETURN_VALUE_OP) {
+-
+- /* This package element is already built, just get it */
+-
+- obj_desc->package.elements[i] =
+- ACPI_CAST_PTR(union acpi_operand_object,
+- arg->common.node);
++ if (arg->common.node->type == ACPI_TYPE_METHOD) {
++ /*
++ * A method reference "looks" to the parser to be a method
++ * invocation, so we special case it here
++ */
++ arg->common.aml_opcode = AML_INT_NAMEPATH_OP;
++ status =
++ acpi_ds_build_internal_object(walk_state,
++ arg,
++ &obj_desc->
++ package.
++ elements[i]);
++ } else {
++ /* This package element is already built, just get it */
++
++ obj_desc->package.elements[i] =
++ ACPI_CAST_PTR(union acpi_operand_object,
++ arg->common.node);
++ }
+ } else {
+ status = acpi_ds_build_internal_object(walk_state, arg,
+ &obj_desc->
+diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
+index 14ced85..0c205b0 100644
+--- a/drivers/atm/nicstar.c
++++ b/drivers/atm/nicstar.c
+@@ -625,14 +625,6 @@ static int __devinit ns_init_card(int i, struct pci_dev *pcidev)
+ if (mac[i] == NULL)
+ nicstar_init_eprom(card->membase);
+
+- if (request_irq(pcidev->irq, &ns_irq_handler, IRQF_DISABLED | IRQF_SHARED, "nicstar", card) != 0)
+- {
+- printk("nicstar%d: can't allocate IRQ %d.\n", i, pcidev->irq);
+- error = 9;
+- ns_init_card_error(card, error);
+- return error;
+- }
+-
+ /* Set the VPI/VCI MSb mask to zero so we can receive OAM cells */
+ writel(0x00000000, card->membase + VPM);
+
+@@ -858,8 +850,6 @@ static int __devinit ns_init_card(int i, struct pci_dev *pcidev)
+ card->iovpool.count++;
+ }
+
+- card->intcnt = 0;
+-
+ /* Configure NICStAR */
+ if (card->rct_size == 4096)
+ ns_cfg_rctsize = NS_CFG_RCTSIZE_4096_ENTRIES;
+@@ -868,6 +858,15 @@ static int __devinit ns_init_card(int i, struct pci_dev *pcidev)
+
+ card->efbie = 1;
+
++ card->intcnt = 0;
++ if (request_irq(pcidev->irq, &ns_irq_handler, IRQF_DISABLED | IRQF_SHARED, "nicstar", card) != 0)
++ {
++ printk("nicstar%d: can't allocate IRQ %d.\n", i, pcidev->irq);
++ error = 9;
++ ns_init_card_error(card, error);
++ return error;
++ }
++
+ /* Register device */
+ card->atmdev = atm_dev_register("nicstar", &atm_ops, -1, NULL);
+ if (card->atmdev == NULL)
+diff --git a/drivers/char/drm/drm_vm.c b/drivers/char/drm/drm_vm.c
+index b5c5b9f..e2d7be9 100644
+--- a/drivers/char/drm/drm_vm.c
++++ b/drivers/char/drm/drm_vm.c
+@@ -520,6 +520,7 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
+ vma->vm_ops = &drm_vm_dma_ops;
+
+ vma->vm_flags |= VM_RESERVED; /* Don't swap */
++ vma->vm_flags |= VM_DONTEXPAND;
+
+ vma->vm_file = filp; /* Needed for drm_vm_open() */
+ drm_vm_open_locked(vma);
+@@ -669,6 +670,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
+ return -EINVAL; /* This should never happen. */
+ }
+ vma->vm_flags |= VM_RESERVED; /* Don't swap */
++ vma->vm_flags |= VM_DONTEXPAND;
+
+ vma->vm_file = filp; /* Needed for drm_vm_open() */
+ drm_vm_open_locked(vma);
+diff --git a/drivers/char/mspec.c b/drivers/char/mspec.c
+index 7ac3061..5685b7a 100644
+--- a/drivers/char/mspec.c
++++ b/drivers/char/mspec.c
+@@ -265,7 +265,8 @@ mspec_mmap(struct file *file, struct vm_area_struct *vma, int type)
+ vdata->refcnt = ATOMIC_INIT(1);
+ vma->vm_private_data = vdata;
+
+- vma->vm_flags |= (VM_IO | VM_LOCKED | VM_RESERVED | VM_PFNMAP);
++ vma->vm_flags |= (VM_IO | VM_LOCKED | VM_RESERVED | VM_PFNMAP |
++ VM_DONTEXPAND);
+ if (vdata->type == MSPEC_FETCHOP || vdata->type == MSPEC_UNCACHED)
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ vma->vm_ops = &mspec_vm_ops;
+diff --git a/drivers/connector/cn_queue.c b/drivers/connector/cn_queue.c
+index 296f510..12ceed5 100644
+--- a/drivers/connector/cn_queue.c
++++ b/drivers/connector/cn_queue.c
+@@ -99,8 +99,8 @@ int cn_queue_add_callback(struct cn_queue_dev *dev, char *name, struct cb_id *id
+ spin_unlock_bh(&dev->queue_lock);
+
+ if (found) {
+- atomic_dec(&dev->refcnt);
+ cn_queue_free_callback(cbq);
++ atomic_dec(&dev->refcnt);
+ return -EINVAL;
+ }
+
+diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
+index 59b9943..ad55baa 100644
+--- a/drivers/net/cassini.c
++++ b/drivers/net/cassini.c
+@@ -336,30 +336,6 @@ static inline void cas_mask_intr(struct cas *cp)
+ cas_disable_irq(cp, i);
+ }
+
+-static inline void cas_buffer_init(cas_page_t *cp)
+-{
+- struct page *page = cp->buffer;
+- atomic_set((atomic_t *)&page->lru.next, 1);
+-}
+-
+-static inline int cas_buffer_count(cas_page_t *cp)
+-{
+- struct page *page = cp->buffer;
+- return atomic_read((atomic_t *)&page->lru.next);
+-}
+-
+-static inline void cas_buffer_inc(cas_page_t *cp)
+-{
+- struct page *page = cp->buffer;
+- atomic_inc((atomic_t *)&page->lru.next);
+-}
+-
+-static inline void cas_buffer_dec(cas_page_t *cp)
+-{
+- struct page *page = cp->buffer;
+- atomic_dec((atomic_t *)&page->lru.next);
+-}
+-
+ static void cas_enable_irq(struct cas *cp, const int ring)
+ {
+ if (ring == 0) { /* all but TX_DONE */
+@@ -497,7 +473,6 @@ static int cas_page_free(struct cas *cp, cas_page_t *page)
+ {
+ pci_unmap_page(cp->pdev, page->dma_addr, cp->page_size,
+ PCI_DMA_FROMDEVICE);
+- cas_buffer_dec(page);
+ __free_pages(page->buffer, cp->page_order);
+ kfree(page);
+ return 0;
+@@ -527,7 +502,6 @@ static cas_page_t *cas_page_alloc(struct cas *cp, const gfp_t flags)
+ page->buffer = alloc_pages(flags, cp->page_order);
+ if (!page->buffer)
+ goto page_err;
+- cas_buffer_init(page);
+ page->dma_addr = pci_map_page(cp->pdev, page->buffer, 0,
+ cp->page_size, PCI_DMA_FROMDEVICE);
+ return page;
+@@ -606,7 +580,7 @@ static void cas_spare_recover(struct cas *cp, const gfp_t flags)
+ list_for_each_safe(elem, tmp, &list) {
+ cas_page_t *page = list_entry(elem, cas_page_t, list);
+
+- if (cas_buffer_count(page) > 1)
++ if (page_count(page->buffer) > 1)
+ continue;
+
+ list_del(elem);
+@@ -1374,7 +1348,7 @@ static inline cas_page_t *cas_page_spare(struct cas *cp, const int index)
+ cas_page_t *page = cp->rx_pages[1][index];
+ cas_page_t *new;
+
+- if (cas_buffer_count(page) == 1)
++ if (page_count(page->buffer) == 1)
+ return page;
+
+ new = cas_page_dequeue(cp);
+@@ -1394,7 +1368,7 @@ static cas_page_t *cas_page_swap(struct cas *cp, const int ring,
+ cas_page_t **page1 = cp->rx_pages[1];
+
+ /* swap if buffer is in use */
+- if (cas_buffer_count(page0[index]) > 1) {
++ if (page_count(page0[index]->buffer) > 1) {
+ cas_page_t *new = cas_page_spare(cp, index);
+ if (new) {
+ page1[index] = page0[index];
+@@ -1979,6 +1953,7 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
+ struct cas_page *page;
+ struct sk_buff *skb;
+ void *addr, *crcaddr;
++ __sum16 csum;
+ char *p;
+
+ hlen = CAS_VAL(RX_COMP2_HDR_SIZE, words[1]);
+@@ -2062,10 +2037,10 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
+
+ skb_shinfo(skb)->nr_frags++;
+ skb->data_len += hlen - swivel;
++ skb->truesize += hlen - swivel;
+ skb->len += hlen - swivel;
+
+ get_page(page->buffer);
+- cas_buffer_inc(page);
+ frag->page = page->buffer;
+ frag->page_offset = off;
+ frag->size = hlen - swivel;
+@@ -2090,7 +2065,6 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
+ frag++;
+
+ get_page(page->buffer);
+- cas_buffer_inc(page);
+ frag->page = page->buffer;
+ frag->page_offset = 0;
+ frag->size = hlen;
+@@ -2158,14 +2132,15 @@ end_copy_pkt:
+ skb_put(skb, alloclen);
+ }
+
+- i = CAS_VAL(RX_COMP4_TCP_CSUM, words[3]);
++ csum = (__force __sum16)htons(CAS_VAL(RX_COMP4_TCP_CSUM, words[3]));
+ if (cp->crc_size) {
+ /* checksum includes FCS. strip it out. */
+- i = csum_fold(csum_partial(crcaddr, cp->crc_size, i));
++ csum = csum_fold(csum_partial(crcaddr, cp->crc_size,
++ csum_unfold(csum)));
+ if (addr)
+ cas_page_unmap(addr);
+ }
+- skb->csum = ntohs(i ^ 0xffff);
++ skb->csum = csum_unfold(~csum);
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ skb->protocol = eth_type_trans(skb, cp->dev);
+ return len;
+@@ -2253,7 +2228,7 @@ static int cas_post_rxds_ringN(struct cas *cp, int ring, int num)
+ released = 0;
+ while (entry != last) {
+ /* make a new buffer if it's still in use */
+- if (cas_buffer_count(page[entry]) > 1) {
++ if (page_count(page[entry]->buffer) > 1) {
+ cas_page_t *new = cas_page_dequeue(cp);
+ if (!new) {
+ /* let the timer know that we need to
+diff --git a/drivers/net/cassini.h b/drivers/net/cassini.h
+index a970804..a201431 100644
+--- a/drivers/net/cassini.h
++++ b/drivers/net/cassini.h
+@@ -4122,8 +4122,8 @@ cas_saturn_patch_t cas_saturn_patch[] = {
+ inserted into
+ outgoing frame. */
+ struct cas_tx_desc {
+- u64 control;
+- u64 buffer;
++ __le64 control;
++ __le64 buffer;
+ };
+
+ /* descriptor ring for free buffers contains page-sized buffers. the index
+@@ -4131,8 +4131,8 @@ struct cas_tx_desc {
+ * the completion ring.
+ */
+ struct cas_rx_desc {
+- u64 index;
+- u64 buffer;
++ __le64 index;
++ __le64 buffer;
+ };
+
+ /* received packets are put on the completion ring. */
+@@ -4210,10 +4210,10 @@ struct cas_rx_desc {
+ #define RX_INDEX_RELEASE 0x0000000000002000ULL
+
+ struct cas_rx_comp {
+- u64 word1;
+- u64 word2;
+- u64 word3;
+- u64 word4;
++ __le64 word1;
++ __le64 word2;
++ __le64 word3;
++ __le64 word4;
+ };
+
+ enum link_state {
+@@ -4252,7 +4252,7 @@ struct cas_init_block {
+ struct cas_rx_comp rxcs[N_RX_COMP_RINGS][INIT_BLOCK_RX_COMP];
+ struct cas_rx_desc rxds[N_RX_DESC_RINGS][INIT_BLOCK_RX_DESC];
+ struct cas_tx_desc txds[N_TX_RINGS][INIT_BLOCK_TX];
+- u64 tx_compwb;
++ __le64 tx_compwb;
+ };
+
+ /* tiny buffers to deal with target abort issue. we allocate a bit
+diff --git a/drivers/net/chelsio/cxgb2.c b/drivers/net/chelsio/cxgb2.c
+index 231ce43..a82a1fa 100644
+--- a/drivers/net/chelsio/cxgb2.c
++++ b/drivers/net/chelsio/cxgb2.c
+@@ -370,6 +370,8 @@ static char stats_strings[][ETH_GSTRING_LEN] = {
+ "TxInternalMACXmitError",
+ "TxFramesWithExcessiveDeferral",
+ "TxFCSErrors",
++ "TxJumboFramesOk",
++ "TxJumboOctetsOk",
+
+ "RxOctetsOK",
+ "RxOctetsBad",
+@@ -388,15 +390,16 @@ static char stats_strings[][ETH_GSTRING_LEN] = {
+ "RxInRangeLengthErrors",
+ "RxOutOfRangeLengthField",
+ "RxFrameTooLongErrors",
++ "RxJumboFramesOk",
++ "RxJumboOctetsOk",
+
+ /* Port stats */
+- "RxPackets",
+ "RxCsumGood",
+- "TxPackets",
+ "TxCsumOffload",
+ "TxTso",
+ "RxVlan",
+ "TxVlan",
++ "TxNeedHeadroom",
+
+ /* Interrupt stats */
+ "rx drops",
+@@ -454,23 +457,56 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
+ const struct cmac_statistics *s;
+ const struct sge_intr_counts *t;
+ struct sge_port_stats ss;
+- unsigned int len;
+
+ s = mac->ops->statistics_update(mac, MAC_STATS_UPDATE_FULL);
+-
+- len = sizeof(u64)*(&s->TxFCSErrors + 1 - &s->TxOctetsOK);
+- memcpy(data, &s->TxOctetsOK, len);
+- data += len;
+-
+- len = sizeof(u64)*(&s->RxFrameTooLongErrors + 1 - &s->RxOctetsOK);
+- memcpy(data, &s->RxOctetsOK, len);
+- data += len;
+-
++ t = t1_sge_get_intr_counts(adapter->sge);
+ t1_sge_get_port_stats(adapter->sge, dev->if_port, &ss);
+- memcpy(data, &ss, sizeof(ss));
+- data += sizeof(ss);
+
+- t = t1_sge_get_intr_counts(adapter->sge);
++ *data++ = s->TxOctetsOK;
++ *data++ = s->TxOctetsBad;
++ *data++ = s->TxUnicastFramesOK;
++ *data++ = s->TxMulticastFramesOK;
++ *data++ = s->TxBroadcastFramesOK;
++ *data++ = s->TxPauseFrames;
++ *data++ = s->TxFramesWithDeferredXmissions;
++ *data++ = s->TxLateCollisions;
++ *data++ = s->TxTotalCollisions;
++ *data++ = s->TxFramesAbortedDueToXSCollisions;
++ *data++ = s->TxUnderrun;
++ *data++ = s->TxLengthErrors;
++ *data++ = s->TxInternalMACXmitError;
++ *data++ = s->TxFramesWithExcessiveDeferral;
++ *data++ = s->TxFCSErrors;
++ *data++ = s->TxJumboFramesOK;
++ *data++ = s->TxJumboOctetsOK;
++
++ *data++ = s->RxOctetsOK;
++ *data++ = s->RxOctetsBad;
++ *data++ = s->RxUnicastFramesOK;
++ *data++ = s->RxMulticastFramesOK;
++ *data++ = s->RxBroadcastFramesOK;
++ *data++ = s->RxPauseFrames;
++ *data++ = s->RxFCSErrors;
++ *data++ = s->RxAlignErrors;
++ *data++ = s->RxSymbolErrors;
++ *data++ = s->RxDataErrors;
++ *data++ = s->RxSequenceErrors;
++ *data++ = s->RxRuntErrors;
++ *data++ = s->RxJabberErrors;
++ *data++ = s->RxInternalMACRcvError;
++ *data++ = s->RxInRangeLengthErrors;
++ *data++ = s->RxOutOfRangeLengthField;
++ *data++ = s->RxFrameTooLongErrors;
++ *data++ = s->RxJumboFramesOK;
++ *data++ = s->RxJumboOctetsOK;
++
++ *data++ = ss.rx_cso_good;
++ *data++ = ss.tx_cso;
++ *data++ = ss.tx_tso;
++ *data++ = ss.vlan_xtract;
++ *data++ = ss.vlan_insert;
++ *data++ = ss.tx_need_hdrroom;
++
+ *data++ = t->rx_drops;
+ *data++ = t->pure_rsps;
+ *data++ = t->unhandled_irqs;
+diff --git a/drivers/net/chelsio/pm3393.c b/drivers/net/chelsio/pm3393.c
+index 678778a..2117c4f 100644
+--- a/drivers/net/chelsio/pm3393.c
++++ b/drivers/net/chelsio/pm3393.c
+@@ -45,7 +45,7 @@
+
+ #include <linux/crc32.h>
+
+-#define OFFSET(REG_ADDR) (REG_ADDR << 2)
++#define OFFSET(REG_ADDR) ((REG_ADDR) << 2)
+
+ /* Max frame size PM3393 can handle. Includes Ethernet header and CRC. */
+ #define MAX_FRAME_SIZE 9600
+@@ -428,69 +428,26 @@ static int pm3393_set_speed_duplex_fc(struct cmac *cmac, int speed, int duplex,
+ return 0;
+ }
+
+-static void pm3393_rmon_update(struct adapter *adapter, u32 offs, u64 *val,
+- int over)
+-{
+- u32 val0, val1, val2;
+-
+- t1_tpi_read(adapter, offs, &val0);
+- t1_tpi_read(adapter, offs + 4, &val1);
+- t1_tpi_read(adapter, offs + 8, &val2);
+-
+- *val &= ~0ull << 40;
+- *val |= val0 & 0xffff;
+- *val |= (val1 & 0xffff) << 16;
+- *val |= (u64)(val2 & 0xff) << 32;
+-
+- if (over)
+- *val += 1ull << 40;
++#define RMON_UPDATE(mac, name, stat_name) \
++{ \
++ t1_tpi_read((mac)->adapter, OFFSET(name), &val0); \
++ t1_tpi_read((mac)->adapter, OFFSET((name)+1), &val1); \
++ t1_tpi_read((mac)->adapter, OFFSET((name)+2), &val2); \
++ (mac)->stats.stat_name = (u64)(val0 & 0xffff) | \
++ ((u64)(val1 & 0xffff) << 16) | \
++ ((u64)(val2 & 0xff) << 32) | \
++ ((mac)->stats.stat_name & \
++ 0xffffff0000000000ULL); \
++ if (ro & \
++ (1ULL << ((name - SUNI1x10GEXP_REG_MSTAT_COUNTER_0_LOW) >> 2))) \
++ (mac)->stats.stat_name += 1ULL << 40; \
+ }
+
+ static const struct cmac_statistics *pm3393_update_statistics(struct cmac *mac,
+ int flag)
+ {
+- static struct {
+- unsigned int reg;
+- unsigned int offset;
+- } hw_stats [] = {
+-
+-#define HW_STAT(name, stat_name) \
+- { name, (&((struct cmac_statistics *)NULL)->stat_name) - (u64 *)NULL }
+-
+- /* Rx stats */
+- HW_STAT(RxOctetsReceivedOK, RxOctetsOK),
+- HW_STAT(RxUnicastFramesReceivedOK, RxUnicastFramesOK),
+- HW_STAT(RxMulticastFramesReceivedOK, RxMulticastFramesOK),
+- HW_STAT(RxBroadcastFramesReceivedOK, RxBroadcastFramesOK),
+- HW_STAT(RxPAUSEMACCtrlFramesReceived, RxPauseFrames),
+- HW_STAT(RxFrameCheckSequenceErrors, RxFCSErrors),
+- HW_STAT(RxFramesLostDueToInternalMACErrors,
+- RxInternalMACRcvError),
+- HW_STAT(RxSymbolErrors, RxSymbolErrors),
+- HW_STAT(RxInRangeLengthErrors, RxInRangeLengthErrors),
+- HW_STAT(RxFramesTooLongErrors , RxFrameTooLongErrors),
+- HW_STAT(RxJabbers, RxJabberErrors),
+- HW_STAT(RxFragments, RxRuntErrors),
+- HW_STAT(RxUndersizedFrames, RxRuntErrors),
+- HW_STAT(RxJumboFramesReceivedOK, RxJumboFramesOK),
+- HW_STAT(RxJumboOctetsReceivedOK, RxJumboOctetsOK),
+-
+- /* Tx stats */
+- HW_STAT(TxOctetsTransmittedOK, TxOctetsOK),
+- HW_STAT(TxFramesLostDueToInternalMACTransmissionError,
+- TxInternalMACXmitError),
+- HW_STAT(TxTransmitSystemError, TxFCSErrors),
+- HW_STAT(TxUnicastFramesTransmittedOK, TxUnicastFramesOK),
+- HW_STAT(TxMulticastFramesTransmittedOK, TxMulticastFramesOK),
+- HW_STAT(TxBroadcastFramesTransmittedOK, TxBroadcastFramesOK),
+- HW_STAT(TxPAUSEMACCtrlFramesTransmitted, TxPauseFrames),
+- HW_STAT(TxJumboFramesReceivedOK, TxJumboFramesOK),
+- HW_STAT(TxJumboOctetsReceivedOK, TxJumboOctetsOK)
+- }, *p = hw_stats;
+- u64 ro;
+- u32 val0, val1, val2, val3;
+- u64 *stats = (u64 *) &mac->stats;
+- unsigned int i;
++ u64 ro;
++ u32 val0, val1, val2, val3;
+
+ /* Snap the counters */
+ pmwrite(mac, SUNI1x10GEXP_REG_MSTAT_CONTROL,
+@@ -504,14 +461,35 @@ static const struct cmac_statistics *pm3393_update_statistics(struct cmac *mac,
+ ro = ((u64)val0 & 0xffff) | (((u64)val1 & 0xffff) << 16) |
+ (((u64)val2 & 0xffff) << 32) | (((u64)val3 & 0xffff) << 48);
+
+- for (i = 0; i < ARRAY_SIZE(hw_stats); i++) {
+- unsigned reg = p->reg - SUNI1x10GEXP_REG_MSTAT_COUNTER_0_LOW;
+-
+- pm3393_rmon_update((mac)->adapter, OFFSET(p->reg),
+- stats + p->offset, ro & (reg >> 2));
+- }
+-
+-
++ /* Rx stats */
++ RMON_UPDATE(mac, RxOctetsReceivedOK, RxOctetsOK);
++ RMON_UPDATE(mac, RxUnicastFramesReceivedOK, RxUnicastFramesOK);
++ RMON_UPDATE(mac, RxMulticastFramesReceivedOK, RxMulticastFramesOK);
++ RMON_UPDATE(mac, RxBroadcastFramesReceivedOK, RxBroadcastFramesOK);
++ RMON_UPDATE(mac, RxPAUSEMACCtrlFramesReceived, RxPauseFrames);
++ RMON_UPDATE(mac, RxFrameCheckSequenceErrors, RxFCSErrors);
++ RMON_UPDATE(mac, RxFramesLostDueToInternalMACErrors,
++ RxInternalMACRcvError);
++ RMON_UPDATE(mac, RxSymbolErrors, RxSymbolErrors);
++ RMON_UPDATE(mac, RxInRangeLengthErrors, RxInRangeLengthErrors);
++ RMON_UPDATE(mac, RxFramesTooLongErrors , RxFrameTooLongErrors);
++ RMON_UPDATE(mac, RxJabbers, RxJabberErrors);
++ RMON_UPDATE(mac, RxFragments, RxRuntErrors);
++ RMON_UPDATE(mac, RxUndersizedFrames, RxRuntErrors);
++ RMON_UPDATE(mac, RxJumboFramesReceivedOK, RxJumboFramesOK);
++ RMON_UPDATE(mac, RxJumboOctetsReceivedOK, RxJumboOctetsOK);
++
++ /* Tx stats */
++ RMON_UPDATE(mac, TxOctetsTransmittedOK, TxOctetsOK);
++ RMON_UPDATE(mac, TxFramesLostDueToInternalMACTransmissionError,
++ TxInternalMACXmitError);
++ RMON_UPDATE(mac, TxTransmitSystemError, TxFCSErrors);
++ RMON_UPDATE(mac, TxUnicastFramesTransmittedOK, TxUnicastFramesOK);
++ RMON_UPDATE(mac, TxMulticastFramesTransmittedOK, TxMulticastFramesOK);
++ RMON_UPDATE(mac, TxBroadcastFramesTransmittedOK, TxBroadcastFramesOK);
++ RMON_UPDATE(mac, TxPAUSEMACCtrlFramesTransmitted, TxPauseFrames);
++ RMON_UPDATE(mac, TxJumboFramesReceivedOK, TxJumboFramesOK);
++ RMON_UPDATE(mac, TxJumboOctetsReceivedOK, TxJumboOctetsOK);
+
+ return &mac->stats;
+ }
+diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c
+index e4f874a..d77f1eb 100644
+--- a/drivers/net/chelsio/sge.c
++++ b/drivers/net/chelsio/sge.c
+@@ -986,11 +986,10 @@ void t1_sge_get_port_stats(const struct sge *sge, int port,
+ for_each_possible_cpu(cpu) {
+ struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[port], cpu);
+
+- ss->rx_packets += st->rx_packets;
+ ss->rx_cso_good += st->rx_cso_good;
+- ss->tx_packets += st->tx_packets;
+ ss->tx_cso += st->tx_cso;
+ ss->tx_tso += st->tx_tso;
++ ss->tx_need_hdrroom += st->tx_need_hdrroom;
+ ss->vlan_xtract += st->vlan_xtract;
+ ss->vlan_insert += st->vlan_insert;
+ }
+@@ -1379,11 +1378,10 @@ static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
+ }
+ __skb_pull(skb, sizeof(*p));
+
+- skb->dev->last_rx = jiffies;
+ st = per_cpu_ptr(sge->port_stats[p->iff], smp_processor_id());
+- st->rx_packets++;
+
+ skb->protocol = eth_type_trans(skb, adapter->port[p->iff].dev);
++ skb->dev->last_rx = jiffies;
+ if ((adapter->flags & RX_CSUM_ENABLED) && p->csum == 0xffff &&
+ skb->protocol == htons(ETH_P_IP) &&
+ (skb->data[9] == IPPROTO_TCP || skb->data[9] == IPPROTO_UDP)) {
+@@ -1851,7 +1849,8 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+ struct adapter *adapter = dev->priv;
+ struct sge *sge = adapter->sge;
+- struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[dev->if_port], smp_processor_id());
++ struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[dev->if_port],
++ smp_processor_id());
+ struct cpl_tx_pkt *cpl;
+ struct sk_buff *orig_skb = skb;
+ int ret;
+@@ -1859,6 +1858,18 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ if (skb->protocol == htons(ETH_P_CPL5))
+ goto send;
+
++ /*
++ * We are using a non-standard hard_header_len.
++ * Allocate more header room in the rare cases it is not big enough.
++ */
++ if (unlikely(skb_headroom(skb) < dev->hard_header_len - ETH_HLEN)) {
++ skb = skb_realloc_headroom(skb, sizeof(struct cpl_tx_pkt_lso));
++ ++st->tx_need_hdrroom;
++ dev_kfree_skb_any(orig_skb);
++ if (!skb)
++ return NETDEV_TX_OK;
++ }
++
+ if (skb_shinfo(skb)->gso_size) {
+ int eth_type;
+ struct cpl_tx_pkt_lso *hdr;
+@@ -1892,24 +1903,6 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ return NETDEV_TX_OK;
+ }
+
+- /*
+- * We are using a non-standard hard_header_len and some kernel
+- * components, such as pktgen, do not handle it right.
+- * Complain when this happens but try to fix things up.
+- */
+- if (unlikely(skb_headroom(skb) < dev->hard_header_len - ETH_HLEN)) {
+- pr_debug("%s: headroom %d header_len %d\n", dev->name,
+- skb_headroom(skb), dev->hard_header_len);
+-
+- if (net_ratelimit())
+- printk(KERN_ERR "%s: inadequate headroom in "
+- "Tx packet\n", dev->name);
+- skb = skb_realloc_headroom(skb, sizeof(*cpl));
+- dev_kfree_skb_any(orig_skb);
+- if (!skb)
+- return NETDEV_TX_OK;
+- }
+-
+ if (!(adapter->flags & UDP_CSUM_CAPABLE) &&
+ skb->ip_summed == CHECKSUM_PARTIAL &&
+ ip_hdr(skb)->protocol == IPPROTO_UDP) {
+@@ -1955,7 +1948,6 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ cpl->vlan_valid = 0;
+
+ send:
+- st->tx_packets++;
+ dev->trans_start = jiffies;
+ ret = t1_sge_tx(skb, adapter, 0, dev);
+
+diff --git a/drivers/net/chelsio/sge.h b/drivers/net/chelsio/sge.h
+index d132a0e..80165f9 100644
+--- a/drivers/net/chelsio/sge.h
++++ b/drivers/net/chelsio/sge.h
+@@ -57,13 +57,12 @@ struct sge_intr_counts {
+ };
+
+ struct sge_port_stats {
+- u64 rx_packets; /* # of Ethernet packets received */
+ u64 rx_cso_good; /* # of successful RX csum offloads */
+- u64 tx_packets; /* # of TX packets */
+ u64 tx_cso; /* # of TX checksum offloads */
+ u64 tx_tso; /* # of TSO requests */
+ u64 vlan_xtract; /* # of VLAN tag extractions */
+ u64 vlan_insert; /* # of VLAN tag insertions */
++ u64 tx_need_hdrroom; /* # of TX skbs in need of more header room */
+ };
+
+ struct sk_buff;
+diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
+index 60d2944..4ebb6ea 100644
+--- a/drivers/net/usb/kaweth.c
++++ b/drivers/net/usb/kaweth.c
+@@ -70,7 +70,7 @@
+ #define KAWETH_TX_TIMEOUT (5 * HZ)
+ #define KAWETH_SCRATCH_SIZE 32
+ #define KAWETH_FIRMWARE_BUF_SIZE 4096
+-#define KAWETH_CONTROL_TIMEOUT (30 * HZ)
++#define KAWETH_CONTROL_TIMEOUT (30000)
+
+ #define KAWETH_STATUS_BROKEN 0x0000001
+ #define KAWETH_STATUS_CLOSING 0x0000002
+diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
+index 6240b97..3bbc5c4 100644
+--- a/drivers/net/usb/mcs7830.c
++++ b/drivers/net/usb/mcs7830.c
+@@ -94,7 +94,7 @@ static int mcs7830_get_reg(struct usbnet *dev, u16 index, u16 size, void *data)
+
+ ret = usb_control_msg(xdev, usb_rcvctrlpipe(xdev, 0), MCS7830_RD_BREQ,
+ MCS7830_RD_BMREQ, 0x0000, index, data,
+- size, msecs_to_jiffies(MCS7830_CTRL_TIMEOUT));
++ size, MCS7830_CTRL_TIMEOUT);
+ return ret;
+ }
+
+@@ -105,7 +105,7 @@ static int mcs7830_set_reg(struct usbnet *dev, u16 index, u16 size, void *data)
+
+ ret = usb_control_msg(xdev, usb_sndctrlpipe(xdev, 0), MCS7830_WR_BREQ,
+ MCS7830_WR_BMREQ, 0x0000, index, data,
+- size, msecs_to_jiffies(MCS7830_CTRL_TIMEOUT));
++ size, MCS7830_CTRL_TIMEOUT);
+ return ret;
+ }
+
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index 749e7d8..9f90c10 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -465,6 +465,12 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_31, quirk
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_0, quirk_ich6_lpc_acpi );
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_2, quirk_ich6_lpc_acpi );
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_3, quirk_ich6_lpc_acpi );
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_1, quirk_ich6_lpc_acpi );
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_4, quirk_ich6_lpc_acpi );
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_2, quirk_ich6_lpc_acpi );
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_4, quirk_ich6_lpc_acpi );
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_7, quirk_ich6_lpc_acpi );
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_8, quirk_ich6_lpc_acpi );
+
+ /*
+ * VIA ACPI: One IO region pointed to by longword at
+diff --git a/fs/exec.c b/fs/exec.c
+index 3da429d..224e973 100644
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -1561,6 +1561,12 @@ int do_coredump(long signr, int exit_code, struct pt_regs * regs)
+ but keep the previous behaviour for now. */
+ if (!ispipe && !S_ISREG(inode->i_mode))
+ goto close_fail;
++ /*
++ * Dont allow local users get cute and trick others to coredump
++ * into their pre-created files:
++ */
++ if (inode->i_uid != current->fsuid)
++ goto close_fail;
+ if (!file->f_op)
+ goto close_fail;
+ if (!file->f_op->write)
+diff --git a/fs/ncpfs/mmap.c b/fs/ncpfs/mmap.c
+index 70a6911..f87de97 100644
+--- a/fs/ncpfs/mmap.c
++++ b/fs/ncpfs/mmap.c
+@@ -47,9 +47,6 @@ static struct page* ncp_file_mmap_nopage(struct vm_area_struct *area,
+ pos = address - area->vm_start + (area->vm_pgoff << PAGE_SHIFT);
+
+ count = PAGE_SIZE;
+- if (address + PAGE_SIZE > area->vm_end) {
+- count = area->vm_end - address;
+- }
+ /* what we can read in one go */
+ bufsize = NCP_SERVER(inode)->buffer_size;
+
+diff --git a/include/asm-sparc64/hypervisor.h b/include/asm-sparc64/hypervisor.h
+index db2130a..a63a1f6 100644
+--- a/include/asm-sparc64/hypervisor.h
++++ b/include/asm-sparc64/hypervisor.h
+@@ -709,6 +709,10 @@ extern unsigned long sun4v_mmu_tsb_ctx0(unsigned long num_descriptions,
+ */
+ #define HV_FAST_MMU_DEMAP_ALL 0x24
+
++#ifndef __ASSEMBLY__
++extern void sun4v_mmu_demap_all(void);
++#endif
++
+ /* mmu_map_perm_addr()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_MMU_MAP_PERM_ADDR
+diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
+index 887c2ce..c6c9d48 100644
+--- a/include/linux/pci_ids.h
++++ b/include/linux/pci_ids.h
+@@ -2285,6 +2285,8 @@
+ #define PCI_DEVICE_ID_INTEL_ICH9_4 0x2914
+ #define PCI_DEVICE_ID_INTEL_ICH9_5 0x2919
+ #define PCI_DEVICE_ID_INTEL_ICH9_6 0x2930
++#define PCI_DEVICE_ID_INTEL_ICH9_7 0x2916
++#define PCI_DEVICE_ID_INTEL_ICH9_8 0x2918
+ #define PCI_DEVICE_ID_INTEL_82855PM_HB 0x3340
+ #define PCI_DEVICE_ID_INTEL_82830_HB 0x3575
+ #define PCI_DEVICE_ID_INTEL_82830_CGC 0x3577
+diff --git a/kernel/relay.c b/kernel/relay.c
+index 95db8c7..24db7e8 100644
+--- a/kernel/relay.c
++++ b/kernel/relay.c
+@@ -91,6 +91,7 @@ int relay_mmap_buf(struct rchan_buf *buf, struct vm_area_struct *vma)
+ return -EINVAL;
+
+ vma->vm_ops = &relay_file_mmap_ops;
++ vma->vm_flags |= VM_DONTEXPAND;
+ vma->vm_private_data = buf;
+ buf->chan->cb->buf_mapped(buf, filp);
+
+diff --git a/mm/mmap.c b/mm/mmap.c
+index 906ed40..33fb671 100644
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -2157,7 +2157,7 @@ int install_special_mapping(struct mm_struct *mm,
+ vma->vm_start = addr;
+ vma->vm_end = addr + len;
+
+- vma->vm_flags = vm_flags | mm->def_flags;
++ vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
+ vma->vm_page_prot = protection_map[vma->vm_flags & 7];
+
+ vma->vm_ops = &special_mapping_vmops;
+diff --git a/net/atm/mpc.c b/net/atm/mpc.c
+index 7c85aa5..181c1c8 100644
+--- a/net/atm/mpc.c
++++ b/net/atm/mpc.c
+@@ -542,6 +542,13 @@ static int mpc_send_packet(struct sk_buff *skb, struct net_device *dev)
+ if (eth->h_proto != htons(ETH_P_IP))
+ goto non_ip; /* Multi-Protocol Over ATM :-) */
+
++ /* Weed out funny packets (e.g., AF_PACKET or raw). */
++ if (skb->len < ETH_HLEN + sizeof(struct iphdr))
++ goto non_ip;
++ skb_set_network_header(skb, ETH_HLEN);
++ if (skb->len < ETH_HLEN + ip_hdr(skb)->ihl * 4 || ip_hdr(skb)->ihl < 5)
++ goto non_ip;
++
+ while (i < mpc->number_of_mps_macs) {
+ if (!compare_ether_addr(eth->h_dest, (mpc->mps_macs + i*ETH_ALEN)))
+ if ( send_via_shortcut(skb, mpc) == 0 ) /* try shortcut */
+diff --git a/net/ax25/ax25_in.c b/net/ax25/ax25_in.c
+index 0ddaff0..8a9f0ac 100644
+--- a/net/ax25/ax25_in.c
++++ b/net/ax25/ax25_in.c
+@@ -124,7 +124,7 @@ int ax25_rx_iframe(ax25_cb *ax25, struct sk_buff *skb)
+ }
+
+ skb_pull(skb, 1); /* Remove PID */
+- skb_reset_mac_header(skb);
++ skb->mac_header = skb->network_header;
+ skb_reset_network_header(skb);
+ skb->dev = ax25->ax25_dev->dev;
+ skb->pkt_type = PACKET_HOST;
+diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
+index 0dcc245..9607d78 100644
+--- a/net/ipv4/devinet.c
++++ b/net/ipv4/devinet.c
+@@ -1030,7 +1030,7 @@ static void inetdev_changename(struct net_device *dev, struct in_device *in_dev)
+ memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
+ if (named++ == 0)
+ continue;
+- dot = strchr(ifa->ifa_label, ':');
++ dot = strchr(old, ':');
+ if (dot == NULL) {
+ sprintf(old, ":%d", named);
+ dot = old;
+diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
+index 6328293..724b612 100644
+--- a/net/ipv4/ip_gre.c
++++ b/net/ipv4/ip_gre.c
+@@ -613,7 +613,7 @@ static int ipgre_rcv(struct sk_buff *skb)
+ offset += 4;
+ }
+
+- skb_reset_mac_header(skb);
++ skb->mac_header = skb->network_header;
+ __pskb_pull(skb, offset);
+ skb_reset_network_header(skb);
+ skb_postpull_rcsum(skb, skb_transport_header(skb), offset);
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index 4aa2551..8f443ed 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -2885,11 +2885,10 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb)
+ int idx, s_idx;
+
+ s_h = cb->args[0];
++ if (s_h < 0)
++ s_h = 0;
+ s_idx = idx = cb->args[1];
+- for (h = 0; h <= rt_hash_mask; h++) {
+- if (h < s_h) continue;
+- if (h > s_h)
+- s_idx = 0;
++ for (h = s_h; h <= rt_hash_mask; h++) {
+ rcu_read_lock_bh();
+ for (rt = rcu_dereference(rt_hash_table[h].chain), idx = 0; rt;
+ rt = rcu_dereference(rt->u.dst.rt_next), idx++) {
+@@ -2906,6 +2905,7 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb)
+ dst_release(xchg(&skb->dst, NULL));
+ }
+ rcu_read_unlock_bh();
++ s_idx = 0;
+ }
+
+ done:
+diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
+index dcd7e32..73708b5 100644
+--- a/net/irda/af_irda.c
++++ b/net/irda/af_irda.c
+@@ -1115,8 +1115,6 @@ static int irda_create(struct socket *sock, int protocol)
+ self->max_sdu_size_rx = TTP_SAR_UNBOUND;
+ break;
+ default:
+- IRDA_ERROR("%s: protocol not supported!\n",
+- __FUNCTION__);
+ return -ESOCKTNOSUPPORT;
+ }
+ break;
+diff --git a/net/key/af_key.c b/net/key/af_key.c
+index ca0db0f..0be3be2 100644
+--- a/net/key/af_key.c
++++ b/net/key/af_key.c
+@@ -2777,12 +2777,22 @@ static struct sadb_msg *pfkey_get_base_msg(struct sk_buff *skb, int *errp)
+
+ static inline int aalg_tmpl_set(struct xfrm_tmpl *t, struct xfrm_algo_desc *d)
+ {
+- return t->aalgos & (1 << d->desc.sadb_alg_id);
++ unsigned int id = d->desc.sadb_alg_id;
++
++ if (id >= sizeof(t->aalgos) * 8)
++ return 0;
++
++ return (t->aalgos >> id) & 1;
+ }
+
+ static inline int ealg_tmpl_set(struct xfrm_tmpl *t, struct xfrm_algo_desc *d)
+ {
+- return t->ealgos & (1 << d->desc.sadb_alg_id);
++ unsigned int id = d->desc.sadb_alg_id;
++
++ if (id >= sizeof(t->ealgos) * 8)
++ return 0;
++
++ return (t->ealgos >> id) & 1;
+ }
+
+ static int count_ah_combs(struct xfrm_tmpl *t)
+diff --git a/net/netrom/nr_dev.c b/net/netrom/nr_dev.c
+index c7b5d93..69e77d5 100644
+--- a/net/netrom/nr_dev.c
++++ b/net/netrom/nr_dev.c
+@@ -56,7 +56,7 @@ int nr_rx_ip(struct sk_buff *skb, struct net_device *dev)
+
+ /* Spoof incoming device */
+ skb->dev = dev;
+- skb_reset_mac_header(skb);
++ skb->mac_header = skb->network_header;
+ skb_reset_network_header(skb);
+ skb->pkt_type = PACKET_HOST;
+
+diff --git a/net/x25/x25_forward.c b/net/x25/x25_forward.c
+index 8738ec7..3447803 100644
+--- a/net/x25/x25_forward.c
++++ b/net/x25/x25_forward.c
+@@ -118,13 +118,14 @@ int x25_forward_data(int lci, struct x25_neigh *from, struct sk_buff *skb) {
+ goto out;
+
+ if ( (skbn = pskb_copy(skb, GFP_ATOMIC)) == NULL){
+- goto out;
++ goto output;
+
+ }
+ x25_transmit_link(skbn, nb);
+
+- x25_neigh_put(nb);
+ rc = 1;
++output:
++ x25_neigh_put(nb);
+ out:
+ return rc;
+ }
+diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
+index b48f06f..1c86a23 100644
+--- a/net/xfrm/xfrm_policy.c
++++ b/net/xfrm/xfrm_policy.c
+@@ -1479,8 +1479,9 @@ restart:
+
+ if (sk && sk->sk_policy[1]) {
+ policy = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl);
++ err = PTR_ERR(policy);
+ if (IS_ERR(policy))
+- return PTR_ERR(policy);
++ goto dropdst;
+ }
+
+ if (!policy) {
+@@ -1491,8 +1492,9 @@ restart:
+
+ policy = flow_cache_lookup(fl, dst_orig->ops->family,
+ dir, xfrm_policy_lookup);
++ err = PTR_ERR(policy);
+ if (IS_ERR(policy))
+- return PTR_ERR(policy);
++ goto dropdst;
+ }
+
+ if (!policy)
+@@ -1661,8 +1663,9 @@ restart:
+ return 0;
+
+ error:
+- dst_release(dst_orig);
+ xfrm_pols_put(pols, npols);
++dropdst:
++ dst_release(dst_orig);
+ *dst_p = NULL;
+ return err;
+ }
+diff --git a/sound/oss/via82cxxx_audio.c b/sound/oss/via82cxxx_audio.c
+index 5d3c037..f95aa09 100644
+--- a/sound/oss/via82cxxx_audio.c
++++ b/sound/oss/via82cxxx_audio.c
+@@ -2104,6 +2104,7 @@ static struct page * via_mm_nopage (struct vm_area_struct * vma,
+ {
+ struct via_info *card = vma->vm_private_data;
+ struct via_channel *chan = &card->ch_out;
++ unsigned long max_bufs;
+ struct page *dmapage;
+ unsigned long pgoff;
+ int rd, wr;
+@@ -2127,14 +2128,11 @@ static struct page * via_mm_nopage (struct vm_area_struct * vma,
+ rd = card->ch_in.is_mapped;
+ wr = card->ch_out.is_mapped;
+
+-#ifndef VIA_NDEBUG
+- {
+- unsigned long max_bufs = chan->frag_number;
+- if (rd && wr) max_bufs *= 2;
+- /* via_dsp_mmap() should ensure this */
+- assert (pgoff < max_bufs);
+- }
+-#endif
++ max_bufs = chan->frag_number;
++ if (rd && wr)
++ max_bufs *= 2;
++ if (pgoff >= max_bufs)
++ return NOPAGE_SIGBUS;
+
+ /* if full-duplex (read+write) and we have two sets of bufs,
+ * then the playback buffers come first, sez soundcard.c */
+diff --git a/sound/usb/usx2y/usX2Yhwdep.c b/sound/usb/usx2y/usX2Yhwdep.c
+index b76b3dd..e617d7e 100644
+--- a/sound/usb/usx2y/usX2Yhwdep.c
++++ b/sound/usb/usx2y/usX2Yhwdep.c
+@@ -88,7 +88,7 @@ static int snd_us428ctls_mmap(struct snd_hwdep * hw, struct file *filp, struct v
+ us428->us428ctls_sharedmem->CtlSnapShotLast = -2;
+ }
+ area->vm_ops = &us428ctls_vm_ops;
+- area->vm_flags |= VM_RESERVED;
++ area->vm_flags |= VM_RESERVED | VM_DONTEXPAND;
+ area->vm_private_data = hw->private_data;
+ return 0;
+ }
+diff --git a/sound/usb/usx2y/usx2yhwdeppcm.c b/sound/usb/usx2y/usx2yhwdeppcm.c
+index a5e7bcd..6e70520 100644
+--- a/sound/usb/usx2y/usx2yhwdeppcm.c
++++ b/sound/usb/usx2y/usx2yhwdeppcm.c
+@@ -728,7 +728,7 @@ static int snd_usX2Y_hwdep_pcm_mmap(struct snd_hwdep * hw, struct file *filp, st
+ return -ENODEV;
+ }
+ area->vm_ops = &snd_usX2Y_hwdep_pcm_vm_ops;
+- area->vm_flags |= VM_RESERVED;
++ area->vm_flags |= VM_RESERVED | VM_DONTEXPAND;
+ area->vm_private_data = hw->private_data;
+ return 0;
+ }
diff --git a/trunk/2.6.22/01017_linux-2.6.22.18.patch b/trunk/2.6.22/01017_linux-2.6.22.18.patch
new file mode 100644
index 0000000..4f87816
--- /dev/null
+++ b/trunk/2.6.22/01017_linux-2.6.22.18.patch
@@ -0,0 +1,14 @@
+diff --git a/fs/splice.c b/fs/splice.c
+index e263d3b..dbbe267 100644
+--- a/fs/splice.c
++++ b/fs/splice.c
+@@ -1182,6 +1182,9 @@ static int get_iovec_page_array(const struct iovec __user *iov,
+ if (unlikely(!base))
+ break;
+
++ if (!access_ok(VERIFY_READ, base, len))
++ break;
++
+ /*
+ * Get this base offset and number of pages, then map
+ * in the user pages.
diff --git a/trunk/2.6.22/20001_x86-early-quirks-unificiation.patch1 b/trunk/2.6.22/20001_x86-early-quirks-unificiation.patch1
new file mode 100644
index 0000000..12dd843
--- /dev/null
+++ b/trunk/2.6.22/20001_x86-early-quirks-unificiation.patch1
@@ -0,0 +1,237 @@
+Subject: x86: Unify i386 and x86-64 early quirks
+
+They were already very similar; just use the same file now.
+
+Cc: lenb@kernel.org
+
+Signed-off-by: Andi Kleen <ak@suse.de>
+
+---
+ arch/i386/kernel/Makefile | 2
+ arch/i386/kernel/acpi/Makefile | 3 -
+ arch/i386/kernel/acpi/earlyquirk.c | 84 -------------------------------------
+ arch/i386/kernel/setup.c | 4 -
+ arch/x86_64/kernel/early-quirks.c | 11 ++++
+ include/asm-i386/acpi.h | 6 --
+ include/asm-i386/dma.h | 2
+ include/asm-x86_64/io_apic.h | 2
+ include/asm-x86_64/proto.h | 2
+ 9 files changed, 18 insertions(+), 98 deletions(-)
+
+--- a/arch/i386/kernel/Makefile 2007-08-27 14:01:19.000000000 -0400
++++ b/arch/i386/kernel/Makefile 2007-08-27 14:02:11.000000000 -0400
+@@ -17,6 +17,7 @@ obj-$(CONFIG_MCA) += mca.o
+ obj-$(CONFIG_X86_MSR) += msr.o
+ obj-$(CONFIG_X86_CPUID) += cpuid.o
+ obj-$(CONFIG_MICROCODE) += microcode.o
++obj-$(CONFIG_PCI) += early-quirks.o
+ obj-$(CONFIG_APM) += apm.o
+ obj-$(CONFIG_X86_SMP) += smp.o smpboot.o tsc_sync.o
+ obj-$(CONFIG_SMP) += smpcommon.o
+@@ -84,4 +85,5 @@ $(obj)/vsyscall-syms.o: $(src)/vsyscall.
+
+ k8-y += ../../x86_64/kernel/k8.o
+ stacktrace-y += ../../x86_64/kernel/stacktrace.o
++early-quirks-y += ../../x86_64/kernel/early-quirks.o
+
+--- a/arch/i386/kernel/acpi/Makefile 2007-07-08 19:32:17.000000000 -0400
++++ b/arch/i386/kernel/acpi/Makefile 2007-08-27 14:02:11.000000000 -0400
+@@ -1,7 +1,4 @@
+ obj-$(CONFIG_ACPI) += boot.o
+-ifneq ($(CONFIG_PCI),)
+-obj-$(CONFIG_X86_IO_APIC) += earlyquirk.o
+-endif
+ obj-$(CONFIG_ACPI_SLEEP) += sleep.o wakeup.o
+
+ ifneq ($(CONFIG_ACPI_PROCESSOR),)
+--- a/arch/i386/kernel/acpi/earlyquirk.c 2007-07-08 19:32:17.000000000 -0400
++++ /dev/null 1970-01-01 00:00:00.000000000 +0000
+@@ -1,84 +0,0 @@
+-/*
+- * Do early PCI probing for bug detection when the main PCI subsystem is
+- * not up yet.
+- */
+-#include <linux/init.h>
+-#include <linux/kernel.h>
+-#include <linux/pci.h>
+-#include <linux/acpi.h>
+-
+-#include <asm/pci-direct.h>
+-#include <asm/acpi.h>
+-#include <asm/apic.h>
+-
+-#ifdef CONFIG_ACPI
+-
+-static int __init nvidia_hpet_check(struct acpi_table_header *header)
+-{
+- return 0;
+-}
+-#endif
+-
+-static int __init check_bridge(int vendor, int device)
+-{
+-#ifdef CONFIG_ACPI
+- static int warned;
+- /* According to Nvidia all timer overrides are bogus unless HPET
+- is enabled. */
+- if (!acpi_use_timer_override && vendor == PCI_VENDOR_ID_NVIDIA) {
+- if (!warned && acpi_table_parse(ACPI_SIG_HPET,
+- nvidia_hpet_check)) {
+- warned = 1;
+- acpi_skip_timer_override = 1;
+- printk(KERN_INFO "Nvidia board "
+- "detected. Ignoring ACPI "
+- "timer override.\n");
+- printk(KERN_INFO "If you got timer trouble "
+- "try acpi_use_timer_override\n");
+-
+- }
+- }
+-#endif
+- if (vendor == PCI_VENDOR_ID_ATI && timer_over_8254 == 1) {
+- timer_over_8254 = 0;
+- printk(KERN_INFO "ATI board detected. Disabling timer routing "
+- "over 8254.\n");
+- }
+- return 0;
+-}
+-
+-void __init check_acpi_pci(void)
+-{
+- int num, slot, func;
+-
+- /* Assume the machine supports type 1. If not it will
+- always read ffffffff and should not have any side effect.
+- Actually a few buggy systems can machine check. Allow the user
+- to disable it by command line option at least -AK */
+- if (!early_pci_allowed())
+- return;
+-
+- /* Poor man's PCI discovery */
+- for (num = 0; num < 32; num++) {
+- for (slot = 0; slot < 32; slot++) {
+- for (func = 0; func < 8; func++) {
+- u32 class;
+- u32 vendor;
+- class = read_pci_config(num, slot, func,
+- PCI_CLASS_REVISION);
+- if (class == 0xffffffff)
+- break;
+-
+- if ((class >> 16) != PCI_CLASS_BRIDGE_PCI)
+- continue;
+-
+- vendor = read_pci_config(num, slot, func,
+- PCI_VENDOR_ID);
+-
+- if (check_bridge(vendor & 0xffff, vendor >> 16))
+- return;
+- }
+-
+- }
+- }
+-}
+--- a/arch/i386/kernel/setup.c 2007-07-08 19:32:17.000000000 -0400
++++ b/arch/i386/kernel/setup.c 2007-08-27 14:01:19.000000000 -0400
+@@ -627,9 +627,7 @@ void __init setup_arch(char **cmdline_p)
+ #endif
+
+ #ifdef CONFIG_PCI
+-#ifdef CONFIG_X86_IO_APIC
+- check_acpi_pci(); /* Checks more than just ACPI actually */
+-#endif
++ early_quirks();
+ #endif
+
+ #ifdef CONFIG_ACPI
+--- a/arch/x86_64/kernel/early-quirks.c 2007-07-08 19:32:17.000000000 -0400
++++ b/arch/x86_64/kernel/early-quirks.c 2007-08-27 14:04:27.000000000 -0400
+@@ -13,9 +13,14 @@
+ #include <linux/acpi.h>
+ #include <linux/pci_ids.h>
+ #include <asm/pci-direct.h>
+-#include <asm/proto.h>
++#include <asm/io_apic.h>
++#include <asm/apic.h>
+ #include <asm/dma.h>
+
++#ifdef CONFIG_X86_64
++#include <asm/proto.h>
++#endif
++
+ static void __init via_bugs(void)
+ {
+ #ifdef CONFIG_IOMMU
+@@ -39,6 +44,7 @@ static int __init nvidia_hpet_check(stru
+ static void __init nvidia_bugs(void)
+ {
+ #ifdef CONFIG_ACPI
++#ifdef CONFIG_X86_IO_APIC
+ /*
+ * All timer overrides on Nvidia are
+ * wrong unless HPET is enabled.
+@@ -58,17 +64,20 @@ static void __init nvidia_bugs(void)
+ "try acpi_use_timer_override\n");
+ }
+ #endif
++#endif
+ /* RED-PEN skip them on mptables too? */
+
+ }
+
+ static void __init ati_bugs(void)
+ {
++#ifdef CONFIG_X86_IO_APIC
+ if (timer_over_8254 == 1) {
+ timer_over_8254 = 0;
+ printk(KERN_INFO
+ "ATI board detected. Disabling timer routing over 8254.\n");
+ }
++#endif
+ }
+
+ struct chipset {
+--- a/include/asm-i386/acpi.h 2007-07-08 19:32:17.000000000 -0400
++++ b/include/asm-i386/acpi.h 2007-08-27 14:02:03.000000000 -0400
+@@ -81,11 +81,7 @@ int __acpi_release_global_lock(unsigned
+ :"=r"(n_hi), "=r"(n_lo) \
+ :"0"(n_hi), "1"(n_lo))
+
+-#ifdef CONFIG_X86_IO_APIC
+-extern void check_acpi_pci(void);
+-#else
+-static inline void check_acpi_pci(void) { }
+-#endif
++extern void early_quirks(void);
+
+ #ifdef CONFIG_ACPI
+ extern int acpi_lapic;
+--- a/include/asm-i386/dma.h 2007-07-08 19:32:17.000000000 -0400
++++ b/include/asm-i386/dma.h 2007-08-27 14:01:19.000000000 -0400
+@@ -294,4 +294,6 @@ extern int isa_dma_bridge_buggy;
+ #define isa_dma_bridge_buggy (0)
+ #endif
+
++#define MAX_DMA32_PFN ((4UL*1024*1024*1024) >> PAGE_SHIFT)
++
+ #endif /* _ASM_DMA_H */
+--- a/include/asm-x86_64/io_apic.h 2007-07-08 19:32:17.000000000 -0400
++++ b/include/asm-x86_64/io_apic.h 2007-08-27 14:01:51.000000000 -0400
+@@ -127,4 +127,6 @@ void enable_NMI_through_LVT0 (void * dum
+
+ extern spinlock_t i8259A_lock;
+
++extern int timer_over_8254;
++
+ #endif
+--- a/include/asm-x86_64/proto.h 2007-07-08 19:32:17.000000000 -0400
++++ b/include/asm-x86_64/proto.h 2007-08-27 14:01:19.000000000 -0400
+@@ -106,8 +106,6 @@ extern int fix_aperture;
+ extern int reboot_force;
+ extern int notsc_setup(char *);
+
+-extern int timer_over_8254;
+-
+ extern int gsi_irq_sharing(int gsi);
+
+ extern void smp_local_timer_interrupt(void);
diff --git a/trunk/2.6.22/20001_xen.patch b/trunk/2.6.22/20001_xen.patch
deleted file mode 100644
index 4792f89..0000000
--- a/trunk/2.6.22/20001_xen.patch
+++ /dev/null
@@ -1,93118 +0,0 @@
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/i386/boot-xen/Makefile ubuntu-gutsy-xen/arch/i386/boot-xen/Makefile
---- ubuntu-gutsy/arch/i386/boot-xen/Makefile 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/arch/i386/boot-xen/Makefile 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,21 @@
-+
-+OBJCOPYFLAGS := -g --strip-unneeded
-+
-+vmlinuz: vmlinux-stripped FORCE
-+ $(call if_changed,gzip)
-+
-+vmlinux-stripped: vmlinux FORCE
-+ $(call if_changed,objcopy)
-+
-+INSTALL_ROOT := $(patsubst %/boot,%,$(INSTALL_PATH))
-+
-+XINSTALL_NAME ?= $(KERNELRELEASE)
-+install:
-+ mkdir -p $(INSTALL_ROOT)/boot
-+ ln -f -s vmlinuz-$(XINSTALL_NAME)$(INSTALL_SUFFIX) $(INSTALL_ROOT)/boot/vmlinuz-$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(XENGUEST)$(INSTALL_SUFFIX)
-+ rm -f $(INSTALL_ROOT)/boot/vmlinuz-$(XINSTALL_NAME)$(INSTALL_SUFFIX)
-+ install -m0644 vmlinuz $(INSTALL_ROOT)/boot/vmlinuz-$(XINSTALL_NAME)$(INSTALL_SUFFIX)
-+ install -m0644 vmlinux $(INSTALL_ROOT)/boot/vmlinux-syms-$(XINSTALL_NAME)$(INSTALL_SUFFIX)
-+ install -m0664 .config $(INSTALL_ROOT)/boot/config-$(XINSTALL_NAME)$(INSTALL_SUFFIX)
-+ install -m0664 System.map $(INSTALL_ROOT)/boot/System.map-$(XINSTALL_NAME)$(INSTALL_SUFFIX)
-+ ln -f -s vmlinuz-$(XINSTALL_NAME)$(INSTALL_SUFFIX) $(INSTALL_ROOT)/boot/vmlinuz-$(VERSION).$(PATCHLEVEL)$(XENGUEST)$(INSTALL_SUFFIX)
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/i386/Kconfig ubuntu-gutsy-xen/arch/i386/Kconfig
---- ubuntu-gutsy/arch/i386/Kconfig 2007-08-18 09:40:28.000000000 -0400
-+++ ubuntu-gutsy-xen/arch/i386/Kconfig 2007-08-18 12:38:02.000000000 -0400
-@@ -21,15 +21,17 @@
- config CLOCKSOURCE_WATCHDOG
- bool
- default y
-+ depends on !X86_XEN
-
- config GENERIC_CLOCKEVENTS
- bool
- default y
-+ depends on !X86_XEN
-
- config GENERIC_CLOCKEVENTS_BROADCAST
- bool
- default y
-- depends on X86_LOCAL_APIC
-+ depends on X86_LOCAL_APIC && !X86_XEN
-
- config LOCKDEP_SUPPORT
- bool
-@@ -131,6 +133,15 @@
- help
- Choose this option if your computer is a standard PC or compatible.
-
-+config X86_XEN
-+ bool "Xen-compatible"
-+ select X86_UP_APIC if !SMP && XEN_PRIVILEGED_GUEST
-+ select X86_UP_IOAPIC if !SMP && XEN_PRIVILEGED_GUEST
-+ select SWIOTLB
-+ help
-+ Choose this option if you plan to run this kernel on top of the
-+ Xen Hypervisor.
-+
- config X86_ELAN
- bool "AMD Elan"
- help
-@@ -213,7 +224,7 @@
- config PARAVIRT
- bool "Paravirtualization support (EXPERIMENTAL)"
- depends on EXPERIMENTAL
-- depends on !(X86_VISWS || X86_VOYAGER)
-+ depends on !(X86_VISWS || X86_VOYAGER || X86_XEN)
- help
- Paravirtualization is a way of running multiple instances of
- Linux on the same machine, under a hypervisor. This option
-@@ -261,6 +272,7 @@
-
- config HPET_TIMER
- bool "HPET Timer Support"
-+ depends on !X86_XEN
- help
- This enables the use of the HPET for the kernel's internal timer.
- HPET is the next generation timer replacing legacy 8254s.
-@@ -311,7 +323,7 @@
-
- config X86_UP_APIC
- bool "Local APIC support on uniprocessors"
-- depends on !SMP && !(X86_VISWS || X86_VOYAGER || X86_GENERICARCH)
-+ depends on !SMP && !(X86_VISWS || X86_VOYAGER || X86_GENERICARCH || XEN_UNPRIVILEGED_GUEST)
- help
- A local APIC (Advanced Programmable Interrupt Controller) is an
- integrated interrupt controller in the CPU. If you have a single-CPU
-@@ -336,12 +348,12 @@
-
- config X86_LOCAL_APIC
- bool
-- depends on X86_UP_APIC || ((X86_VISWS || SMP) && !X86_VOYAGER) || X86_GENERICARCH
-+ depends on X86_UP_APIC || ((X86_VISWS || SMP) && !(X86_VOYAGER || XEN_UNPRIVILEGED_GUEST)) || X86_GENERICARCH
- default y
-
- config X86_IO_APIC
- bool
-- depends on X86_UP_IOAPIC || (SMP && !(X86_VISWS || X86_VOYAGER)) || X86_GENERICARCH
-+ depends on X86_UP_IOAPIC || (SMP && !(X86_VISWS || X86_VOYAGER || XEN_UNPRIVILEGED_GUEST)) || X86_GENERICARCH
- default y
-
- config X86_VISWS_APIC
-@@ -351,7 +363,7 @@
-
- config X86_MCE
- bool "Machine Check Exception"
-- depends on !X86_VOYAGER
-+ depends on !(X86_VOYAGER || X86_XEN)
- ---help---
- Machine Check Exception support allows the processor to notify the
- kernel if it detects a problem (e.g. overheating, component failure).
-@@ -450,6 +462,7 @@
-
- config MICROCODE
- tristate "/dev/cpu/microcode - Intel IA32 CPU microcode support"
-+ depends on !XEN_UNPRIVILEGED_GUEST
- select FW_LOADER
- ---help---
- If you say Y here and also to "/dev file system support" in the
-@@ -488,6 +501,10 @@
- with major 203 and minors 0 to 31 for /dev/cpu/0/cpuid to
- /dev/cpu/31/cpuid.
-
-+config SWIOTLB
-+ bool
-+ default n
-+
- source "drivers/firmware/Kconfig"
-
- choice
-@@ -674,6 +691,7 @@
-
- config MATH_EMULATION
- bool "Math emulation"
-+ depends on !X86_XEN
- ---help---
- Linux can emulate a math coprocessor (used for floating point
- operations) if you don't have one. 486DX and Pentium processors have
-@@ -699,6 +717,8 @@
-
- config MTRR
- bool "MTRR (Memory Type Range Register) support"
-+ depends on !XEN_UNPRIVILEGED_GUEST
-+ default y if X86_XEN
- ---help---
- On Intel P6 family processors (Pentium Pro, Pentium II and later)
- the Memory Type Range Registers (MTRRs) may be used to control
-@@ -733,7 +753,7 @@
-
- config EFI
- bool "Boot from EFI support"
-- depends on ACPI
-+ depends on ACPI && !X86_XEN
- default n
- ---help---
- This enables the kernel to boot on EFI platforms using
-@@ -751,7 +771,7 @@
-
- config IRQBALANCE
- bool "Enable kernel irq balancing"
-- depends on SMP && X86_IO_APIC
-+ depends on SMP && X86_IO_APIC && !X86_XEN
- default y
- help
- The default yes will allow the kernel to do irq load balancing.
-@@ -785,6 +805,7 @@
-
- config KEXEC
- bool "kexec system call"
-+ depends on !XEN_UNPRIVILEGED_GUEST
- help
- kexec is a system call that implements the ability to shutdown your
- current kernel, and to start another kernel. It is like a reboot
-@@ -856,7 +877,7 @@
-
- config RELOCATABLE
- bool "Build a relocatable kernel(EXPERIMENTAL)"
-- depends on EXPERIMENTAL
-+ depends on EXPERIMENTAL && !X86_XEN
- help
- This builds a kernel image that retains relocation information
- so it can be loaded someplace besides the default 1MB.
-@@ -917,15 +938,17 @@
- depends on HIGHMEM
-
- menu "Power management options (ACPI, APM)"
-- depends on !X86_VOYAGER
-+ depends on !(X86_VOYAGER || XEN_UNPRIVILEGED_GUEST)
-
-+if !X86_XEN
- source kernel/power/Kconfig
-+endif
-
- source "drivers/acpi/Kconfig"
-
- menuconfig APM
- tristate "APM (Advanced Power Management) BIOS support"
-- depends on PM && !X86_VISWS
-+ depends on PM && !(X86_VISWS || X86_XEN)
- ---help---
- APM is a BIOS specification for saving power using several different
- techniques. This is mostly useful for battery powered laptops with
-@@ -1051,7 +1074,9 @@
-
- endif # APM
-
-+if !X86_XEN
- source "arch/i386/kernel/cpu/cpufreq/Kconfig"
-+endif
-
- endmenu
-
-@@ -1061,7 +1086,7 @@
- bool "PCI support" if !X86_VISWS
- depends on !X86_VOYAGER
- default y if X86_VISWS
-- select ARCH_SUPPORTS_MSI if (X86_LOCAL_APIC && X86_IO_APIC)
-+ select ARCH_SUPPORTS_MSI if (X86_LOCAL_APIC && X86_IO_APIC && !X86_XEN)
- help
- Find out whether you have a PCI motherboard. PCI is the name of a
- bus system, i.e. the way the CPU talks to the other stuff inside
-@@ -1094,6 +1119,7 @@
-
- config PCI_GOBIOS
- bool "BIOS"
-+ depends on !X86_XEN
-
- config PCI_GOMMCONFIG
- bool "MMConfig"
-@@ -1101,6 +1127,13 @@
- config PCI_GODIRECT
- bool "Direct"
-
-+config PCI_GOXEN_FE
-+ bool "Xen PCI Frontend"
-+ depends on X86_XEN
-+ help
-+ The PCI device frontend driver allows the kernel to import arbitrary
-+ PCI devices from a PCI backend to support PCI driver domains.
-+
- config PCI_GOANY
- bool "Any"
-
-@@ -1108,7 +1141,7 @@
-
- config PCI_BIOS
- bool
-- depends on !X86_VISWS && PCI && (PCI_GOBIOS || PCI_GOANY)
-+ depends on !(X86_VISWS || X86_XEN) && PCI && (PCI_GOBIOS || PCI_GOANY)
- default y
-
- config PCI_DIRECT
-@@ -1121,6 +1154,18 @@
- depends on PCI && ACPI && (PCI_GOMMCONFIG || PCI_GOANY)
- default y
-
-+config XEN_PCIDEV_FRONTEND
-+ bool
-+ depends on PCI && X86_XEN && (PCI_GOXEN_FE || PCI_GOANY)
-+ default y
-+
-+config XEN_PCIDEV_FE_DEBUG
-+ bool "Xen PCI Frontend Debugging"
-+ depends on XEN_PCIDEV_FRONTEND
-+ default n
-+ help
-+ Enables some debug statements within the PCI Frontend.
-+
- source "drivers/pci/pcie/Kconfig"
-
- source "drivers/pci/Kconfig"
-@@ -1131,7 +1176,7 @@
-
- config ISA
- bool "ISA support"
-- depends on !(X86_VOYAGER || X86_VISWS)
-+ depends on !(X86_VOYAGER || X86_VISWS || X86_XEN)
- help
- Find out whether you have ISA slots on your motherboard. ISA is the
- name of a bus system, i.e. the way the CPU talks to the other stuff
-@@ -1158,7 +1203,7 @@
- source "drivers/eisa/Kconfig"
-
- config MCA
-- bool "MCA support" if !(X86_VISWS || X86_VOYAGER)
-+ bool "MCA support" if !(X86_VISWS || X86_VOYAGER || X86_XEN)
- default y if X86_VOYAGER
- help
- MicroChannel Architecture is found in some IBM PS/2 machines and
-@@ -1234,6 +1279,8 @@
-
- source "crypto/Kconfig"
-
-+source "drivers/xen/Kconfig"
-+
- source "lib/Kconfig"
-
- #
-@@ -1259,7 +1306,7 @@
-
- config X86_HT
- bool
-- depends on SMP && !(X86_VISWS || X86_VOYAGER)
-+ depends on SMP && !(X86_VISWS || X86_VOYAGER || X86_XEN)
- default y
-
- config X86_BIOS_REBOOT
-@@ -1272,6 +1319,16 @@
- depends on X86_SMP || (X86_VOYAGER && SMP)
- default y
-
-+config X86_NO_TSS
-+ bool
-+ depends on X86_XEN
-+ default y
-+
-+config X86_NO_IDT
-+ bool
-+ depends on X86_XEN
-+ default y
-+
- config KTIME_SCALAR
- bool
- default y
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/i386/Kconfig.cpu ubuntu-gutsy-xen/arch/i386/Kconfig.cpu
---- ubuntu-gutsy/arch/i386/Kconfig.cpu 2007-08-18 09:40:28.000000000 -0400
-+++ ubuntu-gutsy-xen/arch/i386/Kconfig.cpu 2007-08-18 12:38:02.000000000 -0400
-@@ -274,7 +274,7 @@
-
- config X86_F00F_BUG
- bool
-- depends on M586MMX || M586TSC || M586 || M486 || M386
-+ depends on (M586MMX || M586TSC || M586 || M486 || M386) && !X86_NO_IDT
- default y
-
- config X86_WP_WORKS_OK
-@@ -299,7 +299,7 @@
-
- config X86_CMPXCHG64
- bool
-- depends on X86_PAE
-+ depends on X86_PAE || X86_XEN
- default y
-
- config X86_ALIGNMENT_16
-@@ -334,7 +334,7 @@
-
- config X86_TSC
- bool
-- depends on (MWINCHIP3D || MWINCHIP2 || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2) && !X86_NUMAQ
-+ depends on (MWINCHIP3D || MWINCHIP2 || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2) && !X86_NUMAQ && !X86_XEN
- default y
-
- # this should be set for all -march=.. options where the compiler
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/i386/Kconfig.debug ubuntu-gutsy-xen/arch/i386/Kconfig.debug
---- ubuntu-gutsy/arch/i386/Kconfig.debug 2007-08-18 09:40:28.000000000 -0400
-+++ ubuntu-gutsy-xen/arch/i386/Kconfig.debug 2007-08-18 12:38:02.000000000 -0400
-@@ -85,6 +85,7 @@
- config DOUBLEFAULT
- default y
- bool "Enable doublefault exception handler" if EMBEDDED
-+ depends on !X86_NO_TSS
- help
- This option allows trapping of rare doublefault exceptions that
- would otherwise cause a system to silently reboot. Disabling this
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/i386/kernel/acpi/boot-xen.c ubuntu-gutsy-xen/arch/i386/kernel/acpi/boot-xen.c
---- ubuntu-gutsy/arch/i386/kernel/acpi/boot-xen.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/arch/i386/kernel/acpi/boot-xen.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,1307 @@
-+/*
-+ * boot.c - Architecture-Specific Low-Level ACPI Boot Support
-+ *
-+ * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
-+ * Copyright (C) 2001 Jun Nakajima <jun.nakajima@intel.com>
-+ *
-+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program; if not, write to the Free Software
-+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-+ *
-+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-+ */
-+
-+#include <linux/init.h>
-+#include <linux/acpi.h>
-+#include <linux/acpi_pmtmr.h>
-+#include <linux/efi.h>
-+#include <linux/cpumask.h>
-+#include <linux/module.h>
-+#include <linux/dmi.h>
-+#include <linux/irq.h>
-+#include <linux/bootmem.h>
-+#include <linux/ioport.h>
-+
-+#include <asm/pgtable.h>
-+#include <asm/io_apic.h>
-+#include <asm/apic.h>
-+#include <asm/io.h>
-+#include <asm/mpspec.h>
-+
-+static int __initdata acpi_force = 0;
-+
-+#ifdef CONFIG_ACPI
-+int acpi_disabled = 0;
-+#else
-+int acpi_disabled = 1;
-+#endif
-+EXPORT_SYMBOL(acpi_disabled);
-+
-+#ifdef CONFIG_X86_64
-+
-+#include <asm/proto.h>
-+
-+static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id) { return 0; }
-+
-+
-+#else /* X86 */
-+
-+#ifdef CONFIG_X86_LOCAL_APIC
-+#include <mach_apic.h>
-+#include <mach_mpparse.h>
-+#endif /* CONFIG_X86_LOCAL_APIC */
-+
-+#endif /* X86 */
-+
-+#define BAD_MADT_ENTRY(entry, end) ( \
-+ (!entry) || (unsigned long)entry + sizeof(*entry) > end || \
-+ ((struct acpi_subtable_header *)entry)->length < sizeof(*entry))
-+
-+#define PREFIX "ACPI: "
-+
-+int acpi_noirq; /* skip ACPI IRQ initialization */
-+int acpi_pci_disabled __initdata; /* skip ACPI PCI scan and IRQ initialization */
-+int acpi_ht __initdata = 1; /* enable HT */
-+
-+int acpi_lapic;
-+int acpi_ioapic;
-+int acpi_strict;
-+EXPORT_SYMBOL(acpi_strict);
-+
-+u8 acpi_sci_flags __initdata;
-+int acpi_sci_override_gsi __initdata;
-+int acpi_skip_timer_override __initdata;
-+int acpi_use_timer_override __initdata;
-+
-+#if defined(CONFIG_X86_LOCAL_APIC) && !defined(CONFIG_XEN)
-+static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
-+#endif
-+
-+#ifndef __HAVE_ARCH_CMPXCHG
-+#warning ACPI uses CMPXCHG, i486 and later hardware
-+#endif
-+
-+/* --------------------------------------------------------------------------
-+ Boot-time Configuration
-+ -------------------------------------------------------------------------- */
-+
-+/*
-+ * The default interrupt routing model is PIC (8259). This gets
-+ * overriden if IOAPICs are enumerated (below).
-+ */
-+enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_PIC;
-+
-+#if defined(CONFIG_X86_64) && !defined(CONFIG_XEN)
-+
-+/* rely on all ACPI tables being in the direct mapping */
-+char *__acpi_map_table(unsigned long phys_addr, unsigned long size)
-+{
-+ if (!phys_addr || !size)
-+ return NULL;
-+
-+ if (phys_addr+size <= (end_pfn_map << PAGE_SHIFT) + PAGE_SIZE)
-+ return __va(phys_addr);
-+
-+ return NULL;
-+}
-+
-+#else
-+
-+/*
-+ * Temporarily use the virtual area starting from FIX_IO_APIC_BASE_END,
-+ * to map the target physical address. The problem is that set_fixmap()
-+ * provides a single page, and it is possible that the page is not
-+ * sufficient.
-+ * By using this area, we can map up to MAX_IO_APICS pages temporarily,
-+ * i.e. until the next __va_range() call.
-+ *
-+ * Important Safety Note: The fixed I/O APIC page numbers are *subtracted*
-+ * from the fixed base. That's why we start at FIX_IO_APIC_BASE_END and
-+ * count idx down while incrementing the phys address.
-+ */
-+char *__acpi_map_table(unsigned long phys, unsigned long size)
-+{
-+ unsigned long base, offset, mapped_size;
-+ int idx;
-+
-+#ifndef CONFIG_XEN
-+ if (phys + size < 8 * 1024 * 1024)
-+ return __va(phys);
-+#endif
-+
-+ offset = phys & (PAGE_SIZE - 1);
-+ mapped_size = PAGE_SIZE - offset;
-+ set_fixmap(FIX_ACPI_END, phys);
-+ base = fix_to_virt(FIX_ACPI_END);
-+
-+ /*
-+ * Most cases can be covered by the below.
-+ */
-+ idx = FIX_ACPI_END;
-+ while (mapped_size < size) {
-+ if (--idx < FIX_ACPI_BEGIN)
-+ return NULL; /* cannot handle this */
-+ phys += PAGE_SIZE;
-+ set_fixmap(idx, phys);
-+ mapped_size += PAGE_SIZE;
-+ }
-+
-+ return ((unsigned char *)base + offset);
-+}
-+#endif
-+
-+#ifdef CONFIG_PCI_MMCONFIG
-+/* The physical address of the MMCONFIG aperture. Set from ACPI tables. */
-+struct acpi_mcfg_allocation *pci_mmcfg_config;
-+int pci_mmcfg_config_num;
-+
-+int __init acpi_parse_mcfg(struct acpi_table_header *header)
-+{
-+ struct acpi_table_mcfg *mcfg;
-+ unsigned long i;
-+ int config_size;
-+
-+ if (!header)
-+ return -EINVAL;
-+
-+ mcfg = (struct acpi_table_mcfg *)header;
-+
-+ /* how many config structures do we have */
-+ pci_mmcfg_config_num = 0;
-+ i = header->length - sizeof(struct acpi_table_mcfg);
-+ while (i >= sizeof(struct acpi_mcfg_allocation)) {
-+ ++pci_mmcfg_config_num;
-+ i -= sizeof(struct acpi_mcfg_allocation);
-+ };
-+ if (pci_mmcfg_config_num == 0) {
-+ printk(KERN_ERR PREFIX "MMCONFIG has no entries\n");
-+ return -ENODEV;
-+ }
-+
-+ config_size = pci_mmcfg_config_num * sizeof(*pci_mmcfg_config);
-+ pci_mmcfg_config = kmalloc(config_size, GFP_KERNEL);
-+ if (!pci_mmcfg_config) {
-+ printk(KERN_WARNING PREFIX
-+ "No memory for MCFG config tables\n");
-+ return -ENOMEM;
-+ }
-+
-+ memcpy(pci_mmcfg_config, &mcfg[1], config_size);
-+ for (i = 0; i < pci_mmcfg_config_num; ++i) {
-+ if (pci_mmcfg_config[i].address > 0xFFFFFFFF) {
-+ printk(KERN_ERR PREFIX
-+ "MMCONFIG not in low 4GB of memory\n");
-+ kfree(pci_mmcfg_config);
-+ pci_mmcfg_config_num = 0;
-+ return -ENODEV;
-+ }
-+ }
-+
-+ return 0;
-+}
-+#endif /* CONFIG_PCI_MMCONFIG */
-+
-+#ifdef CONFIG_X86_LOCAL_APIC
-+static int __init acpi_parse_madt(struct acpi_table_header *table)
-+{
-+ struct acpi_table_madt *madt = NULL;
-+
-+ if (!cpu_has_apic)
-+ return -EINVAL;
-+
-+ madt = (struct acpi_table_madt *)table;
-+ if (!madt) {
-+ printk(KERN_WARNING PREFIX "Unable to map MADT\n");
-+ return -ENODEV;
-+ }
-+
-+#ifndef CONFIG_XEN
-+ if (madt->address) {
-+ acpi_lapic_addr = (u64) madt->address;
-+
-+ printk(KERN_DEBUG PREFIX "Local APIC address 0x%08x\n",
-+ madt->address);
-+ }
-+#endif
-+
-+ acpi_madt_oem_check(madt->header.oem_id, madt->header.oem_table_id);
-+
-+ return 0;
-+}
-+
-+static int __init
-+acpi_parse_lapic(struct acpi_subtable_header * header, const unsigned long end)
-+{
-+ struct acpi_madt_local_apic *processor = NULL;
-+
-+ processor = (struct acpi_madt_local_apic *)header;
-+
-+ if (BAD_MADT_ENTRY(processor, end))
-+ return -EINVAL;
-+
-+ acpi_table_print_madt_entry(header);
-+
-+ /*
-+ * We need to register disabled CPU as well to permit
-+ * counting disabled CPUs. This allows us to size
-+ * cpus_possible_map more accurately, to permit
-+ * to not preallocating memory for all NR_CPUS
-+ * when we use CPU hotplug.
-+ */
-+ mp_register_lapic(processor->id, /* APIC ID */
-+ processor->lapic_flags & ACPI_MADT_ENABLED); /* Enabled? */
-+
-+ return 0;
-+}
-+
-+static int __init
-+acpi_parse_lapic_addr_ovr(struct acpi_subtable_header * header,
-+ const unsigned long end)
-+{
-+#ifndef CONFIG_XEN
-+ struct acpi_madt_local_apic_override *lapic_addr_ovr = NULL;
-+
-+ lapic_addr_ovr = (struct acpi_madt_local_apic_override *)header;
-+
-+ if (BAD_MADT_ENTRY(lapic_addr_ovr, end))
-+ return -EINVAL;
-+
-+ acpi_lapic_addr = lapic_addr_ovr->address;
-+#endif
-+
-+ return 0;
-+}
-+
-+static int __init
-+acpi_parse_lapic_nmi(struct acpi_subtable_header * header, const unsigned long end)
-+{
-+ struct acpi_madt_local_apic_nmi *lapic_nmi = NULL;
-+
-+ lapic_nmi = (struct acpi_madt_local_apic_nmi *)header;
-+
-+ if (BAD_MADT_ENTRY(lapic_nmi, end))
-+ return -EINVAL;
-+
-+ acpi_table_print_madt_entry(header);
-+
-+ if (lapic_nmi->lint != 1)
-+ printk(KERN_WARNING PREFIX "NMI not connected to LINT 1!\n");
-+
-+ return 0;
-+}
-+
-+#endif /*CONFIG_X86_LOCAL_APIC */
-+
-+#ifdef CONFIG_X86_IO_APIC
-+
-+static int __init
-+acpi_parse_ioapic(struct acpi_subtable_header * header, const unsigned long end)
-+{
-+ struct acpi_madt_io_apic *ioapic = NULL;
-+
-+ ioapic = (struct acpi_madt_io_apic *)header;
-+
-+ if (BAD_MADT_ENTRY(ioapic, end))
-+ return -EINVAL;
-+
-+ acpi_table_print_madt_entry(header);
-+
-+ mp_register_ioapic(ioapic->id,
-+ ioapic->address, ioapic->global_irq_base);
-+
-+ return 0;
-+}
-+
-+/*
-+ * Parse Interrupt Source Override for the ACPI SCI
-+ */
-+static void __init acpi_sci_ioapic_setup(u32 gsi, u16 polarity, u16 trigger)
-+{
-+ if (trigger == 0) /* compatible SCI trigger is level */
-+ trigger = 3;
-+
-+ if (polarity == 0) /* compatible SCI polarity is low */
-+ polarity = 3;
-+
-+ /* Command-line over-ride via acpi_sci= */
-+ if (acpi_sci_flags & ACPI_MADT_TRIGGER_MASK)
-+ trigger = (acpi_sci_flags & ACPI_MADT_TRIGGER_MASK) >> 2;
-+
-+ if (acpi_sci_flags & ACPI_MADT_POLARITY_MASK)
-+ polarity = acpi_sci_flags & ACPI_MADT_POLARITY_MASK;
-+
-+ /*
-+ * mp_config_acpi_legacy_irqs() already setup IRQs < 16
-+ * If GSI is < 16, this will update its flags,
-+ * else it will create a new mp_irqs[] entry.
-+ */
-+ mp_override_legacy_irq(gsi, polarity, trigger, gsi);
-+
-+ /*
-+ * stash over-ride to indicate we've been here
-+ * and for later update of acpi_gbl_FADT
-+ */
-+ acpi_sci_override_gsi = gsi;
-+ return;
-+}
-+
-+static int __init
-+acpi_parse_int_src_ovr(struct acpi_subtable_header * header,
-+ const unsigned long end)
-+{
-+ struct acpi_madt_interrupt_override *intsrc = NULL;
-+
-+ intsrc = (struct acpi_madt_interrupt_override *)header;
-+
-+ if (BAD_MADT_ENTRY(intsrc, end))
-+ return -EINVAL;
-+
-+ acpi_table_print_madt_entry(header);
-+
-+ if (intsrc->source_irq == acpi_gbl_FADT.sci_interrupt) {
-+ acpi_sci_ioapic_setup(intsrc->global_irq,
-+ intsrc->inti_flags & ACPI_MADT_POLARITY_MASK,
-+ (intsrc->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2);
-+ return 0;
-+ }
-+
-+ if (acpi_skip_timer_override &&
-+ intsrc->source_irq == 0 && intsrc->global_irq == 2) {
-+ printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n");
-+ return 0;
-+ }
-+
-+ mp_override_legacy_irq(intsrc->source_irq,
-+ intsrc->inti_flags & ACPI_MADT_POLARITY_MASK,
-+ (intsrc->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2,
-+ intsrc->global_irq);
-+
-+ return 0;
-+}
-+
-+static int __init
-+acpi_parse_nmi_src(struct acpi_subtable_header * header, const unsigned long end)
-+{
-+ struct acpi_madt_nmi_source *nmi_src = NULL;
-+
-+ nmi_src = (struct acpi_madt_nmi_source *)header;
-+
-+ if (BAD_MADT_ENTRY(nmi_src, end))
-+ return -EINVAL;
-+
-+ acpi_table_print_madt_entry(header);
-+
-+ /* TBD: Support nimsrc entries? */
-+
-+ return 0;
-+}
-+
-+#endif /* CONFIG_X86_IO_APIC */
-+
-+/*
-+ * acpi_pic_sci_set_trigger()
-+ *
-+ * use ELCR to set PIC-mode trigger type for SCI
-+ *
-+ * If a PIC-mode SCI is not recognized or gives spurious IRQ7's
-+ * it may require Edge Trigger -- use "acpi_sci=edge"
-+ *
-+ * Port 0x4d0-4d1 are ECLR1 and ECLR2, the Edge/Level Control Registers
-+ * for the 8259 PIC. bit[n] = 1 means irq[n] is Level, otherwise Edge.
-+ * ECLR1 is IRQ's 0-7 (IRQ 0, 1, 2 must be 0)
-+ * ECLR2 is IRQ's 8-15 (IRQ 8, 13 must be 0)
-+ */
-+
-+void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger)
-+{
-+ unsigned int mask = 1 << irq;
-+ unsigned int old, new;
-+
-+ /* Real old ELCR mask */
-+ old = inb(0x4d0) | (inb(0x4d1) << 8);
-+
-+ /*
-+ * If we use ACPI to set PCI irq's, then we should clear ELCR
-+ * since we will set it correctly as we enable the PCI irq
-+ * routing.
-+ */
-+ new = acpi_noirq ? old : 0;
-+
-+ /*
-+ * Update SCI information in the ELCR, it isn't in the PCI
-+ * routing tables..
-+ */
-+ switch (trigger) {
-+ case 1: /* Edge - clear */
-+ new &= ~mask;
-+ break;
-+ case 3: /* Level - set */
-+ new |= mask;
-+ break;
-+ }
-+
-+ if (old == new)
-+ return;
-+
-+ printk(PREFIX "setting ELCR to %04x (from %04x)\n", new, old);
-+ outb(new, 0x4d0);
-+ outb(new >> 8, 0x4d1);
-+}
-+
-+int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
-+{
-+ *irq = gsi;
-+ return 0;
-+}
-+
-+/*
-+ * success: return IRQ number (>=0)
-+ * failure: return < 0
-+ */
-+int acpi_register_gsi(u32 gsi, int triggering, int polarity)
-+{
-+ unsigned int irq;
-+ unsigned int plat_gsi = gsi;
-+
-+#ifdef CONFIG_PCI
-+ /*
-+ * Make sure all (legacy) PCI IRQs are set as level-triggered.
-+ */
-+ if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) {
-+ extern void eisa_set_level_irq(unsigned int irq);
-+
-+ if (triggering == ACPI_LEVEL_SENSITIVE)
-+ eisa_set_level_irq(gsi);
-+ }
-+#endif
-+
-+#ifdef CONFIG_X86_IO_APIC
-+ if (acpi_irq_model == ACPI_IRQ_MODEL_IOAPIC) {
-+ plat_gsi = mp_register_gsi(gsi, triggering, polarity);
-+ }
-+#endif
-+ acpi_gsi_to_irq(plat_gsi, &irq);
-+ return irq;
-+}
-+
-+EXPORT_SYMBOL(acpi_register_gsi);
-+
-+/*
-+ * ACPI based hotplug support for CPU
-+ */
-+#ifdef CONFIG_ACPI_HOTPLUG_CPU
-+int acpi_map_lsapic(acpi_handle handle, int *pcpu)
-+{
-+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
-+ union acpi_object *obj;
-+ struct acpi_madt_local_apic *lapic;
-+ cpumask_t tmp_map, new_map;
-+ u8 physid;
-+ int cpu;
-+
-+ if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
-+ return -EINVAL;
-+
-+ if (!buffer.length || !buffer.pointer)
-+ return -EINVAL;
-+
-+ obj = buffer.pointer;
-+ if (obj->type != ACPI_TYPE_BUFFER ||
-+ obj->buffer.length < sizeof(*lapic)) {
-+ kfree(buffer.pointer);
-+ return -EINVAL;
-+ }
-+
-+ lapic = (struct acpi_madt_local_apic *)obj->buffer.pointer;
-+
-+ if (lapic->header.type != ACPI_MADT_TYPE_LOCAL_APIC ||
-+ !(lapic->lapic_flags & ACPI_MADT_ENABLED)) {
-+ kfree(buffer.pointer);
-+ return -EINVAL;
-+ }
-+
-+ physid = lapic->id;
-+
-+ kfree(buffer.pointer);
-+ buffer.length = ACPI_ALLOCATE_BUFFER;
-+ buffer.pointer = NULL;
-+
-+ tmp_map = cpu_present_map;
-+ mp_register_lapic(physid, lapic->lapic_flags & ACPI_MADT_ENABLED);
-+
-+ /*
-+ * If mp_register_lapic successfully generates a new logical cpu
-+ * number, then the following will get us exactly what was mapped
-+ */
-+ cpus_andnot(new_map, cpu_present_map, tmp_map);
-+ if (cpus_empty(new_map)) {
-+ printk ("Unable to map lapic to logical cpu number\n");
-+ return -EINVAL;
-+ }
-+
-+ cpu = first_cpu(new_map);
-+
-+ *pcpu = cpu;
-+ return 0;
-+}
-+
-+EXPORT_SYMBOL(acpi_map_lsapic);
-+
-+int acpi_unmap_lsapic(int cpu)
-+{
-+ x86_cpu_to_apicid[cpu] = -1;
-+ cpu_clear(cpu, cpu_present_map);
-+ num_processors--;
-+
-+ return (0);
-+}
-+
-+EXPORT_SYMBOL(acpi_unmap_lsapic);
-+#endif /* CONFIG_ACPI_HOTPLUG_CPU */
-+
-+int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base)
-+{
-+ /* TBD */
-+ return -EINVAL;
-+}
-+
-+EXPORT_SYMBOL(acpi_register_ioapic);
-+
-+int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base)
-+{
-+ /* TBD */
-+ return -EINVAL;
-+}
-+
-+EXPORT_SYMBOL(acpi_unregister_ioapic);
-+
-+static unsigned long __init
-+acpi_scan_rsdp(unsigned long start, unsigned long length)
-+{
-+ unsigned long offset = 0;
-+ unsigned long sig_len = sizeof("RSD PTR ") - 1;
-+ unsigned long vstart = (unsigned long)isa_bus_to_virt(start);
-+
-+ /*
-+ * Scan all 16-byte boundaries of the physical memory region for the
-+ * RSDP signature.
-+ */
-+ for (offset = 0; offset < length; offset += 16) {
-+ if (strncmp((char *)(vstart + offset), "RSD PTR ", sig_len))
-+ continue;
-+ return (start + offset);
-+ }
-+
-+ return 0;
-+}
-+
-+static int __init acpi_parse_sbf(struct acpi_table_header *table)
-+{
-+ struct acpi_table_boot *sb;
-+
-+ sb = (struct acpi_table_boot *)table;
-+ if (!sb) {
-+ printk(KERN_WARNING PREFIX "Unable to map SBF\n");
-+ return -ENODEV;
-+ }
-+
-+ sbf_port = sb->cmos_index; /* Save CMOS port */
-+
-+ return 0;
-+}
-+
-+#ifdef CONFIG_HPET_TIMER
-+#include <asm/hpet.h>
-+
-+static int __init acpi_parse_hpet(struct acpi_table_header *table)
-+{
-+ struct acpi_table_hpet *hpet_tbl;
-+
-+ hpet_tbl = (struct acpi_table_hpet *)table;
-+ if (!hpet_tbl) {
-+ printk(KERN_WARNING PREFIX "Unable to map HPET\n");
-+ return -ENODEV;
-+ }
-+
-+ if (hpet_tbl->address.space_id != ACPI_SPACE_MEM) {
-+ printk(KERN_WARNING PREFIX "HPET timers must be located in "
-+ "memory.\n");
-+ return -1;
-+ }
-+
-+ hpet_address = hpet_tbl->address.address;
-+ printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n",
-+ hpet_tbl->id, hpet_address);
-+
-+ return 0;
-+}
-+#else
-+#define acpi_parse_hpet NULL
-+#endif
-+
-+static int __init acpi_parse_fadt(struct acpi_table_header *table)
-+{
-+
-+#ifdef CONFIG_X86_PM_TIMER
-+ /* detect the location of the ACPI PM Timer */
-+ if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID) {
-+ /* FADT rev. 2 */
-+ if (acpi_gbl_FADT.xpm_timer_block.space_id !=
-+ ACPI_ADR_SPACE_SYSTEM_IO)
-+ return 0;
-+
-+ pmtmr_ioport = acpi_gbl_FADT.xpm_timer_block.address;
-+ /*
-+ * "X" fields are optional extensions to the original V1.0
-+ * fields, so we must selectively expand V1.0 fields if the
-+ * corresponding X field is zero.
-+ */
-+ if (!pmtmr_ioport)
-+ pmtmr_ioport = acpi_gbl_FADT.pm_timer_block;
-+ } else {
-+ /* FADT rev. 1 */
-+ pmtmr_ioport = acpi_gbl_FADT.pm_timer_block;
-+ }
-+ if (pmtmr_ioport)
-+ printk(KERN_INFO PREFIX "PM-Timer IO Port: %#x\n",
-+ pmtmr_ioport);
-+#endif
-+ return 0;
-+}
-+
-+unsigned long __init acpi_find_rsdp(void)
-+{
-+ unsigned long rsdp_phys = 0;
-+
-+ if (efi_enabled) {
-+ if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
-+ return efi.acpi20;
-+ else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
-+ return efi.acpi;
-+ }
-+ /*
-+ * Scan memory looking for the RSDP signature. First search EBDA (low
-+ * memory) paragraphs and then search upper memory (E0000-FFFFF).
-+ */
-+ rsdp_phys = acpi_scan_rsdp(0, 0x400);
-+ if (!rsdp_phys)
-+ rsdp_phys = acpi_scan_rsdp(0xE0000, 0x20000);
-+
-+ return rsdp_phys;
-+}
-+
-+#ifdef CONFIG_X86_LOCAL_APIC
-+/*
-+ * Parse LAPIC entries in MADT
-+ * returns 0 on success, < 0 on error
-+ */
-+static int __init acpi_parse_madt_lapic_entries(void)
-+{
-+ int count;
-+
-+ if (!cpu_has_apic)
-+ return -ENODEV;
-+
-+ /*
-+ * Note that the LAPIC address is obtained from the MADT (32-bit value)
-+ * and (optionally) overriden by a LAPIC_ADDR_OVR entry (64-bit value).
-+ */
-+
-+ count =
-+ acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE,
-+ acpi_parse_lapic_addr_ovr, 0);
-+ if (count < 0) {
-+ printk(KERN_ERR PREFIX
-+ "Error parsing LAPIC address override entry\n");
-+ return count;
-+ }
-+
-+#ifndef CONFIG_XEN
-+ mp_register_lapic_address(acpi_lapic_addr);
-+#endif
-+
-+ count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC, acpi_parse_lapic,
-+ MAX_APICS);
-+ if (!count) {
-+ printk(KERN_ERR PREFIX "No LAPIC entries present\n");
-+ /* TBD: Cleanup to allow fallback to MPS */
-+ return -ENODEV;
-+ } else if (count < 0) {
-+ printk(KERN_ERR PREFIX "Error parsing LAPIC entry\n");
-+ /* TBD: Cleanup to allow fallback to MPS */
-+ return count;
-+ }
-+
-+ count =
-+ acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_NMI, acpi_parse_lapic_nmi, 0);
-+ if (count < 0) {
-+ printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n");
-+ /* TBD: Cleanup to allow fallback to MPS */
-+ return count;
-+ }
-+ return 0;
-+}
-+#endif /* CONFIG_X86_LOCAL_APIC */
-+
-+#ifdef CONFIG_X86_IO_APIC
-+/*
-+ * Parse IOAPIC related entries in MADT
-+ * returns 0 on success, < 0 on error
-+ */
-+static int __init acpi_parse_madt_ioapic_entries(void)
-+{
-+ int count;
-+
-+ /*
-+ * ACPI interpreter is required to complete interrupt setup,
-+ * so if it is off, don't enumerate the io-apics with ACPI.
-+ * If MPS is present, it will handle them,
-+ * otherwise the system will stay in PIC mode
-+ */
-+ if (acpi_disabled || acpi_noirq) {
-+ return -ENODEV;
-+ }
-+
-+ if (!cpu_has_apic)
-+ return -ENODEV;
-+
-+ /*
-+ * if "noapic" boot option, don't look for IO-APICs
-+ */
-+ if (skip_ioapic_setup) {
-+ printk(KERN_INFO PREFIX "Skipping IOAPIC probe "
-+ "due to 'noapic' option.\n");
-+ return -ENODEV;
-+ }
-+
-+ count =
-+ acpi_table_parse_madt(ACPI_MADT_TYPE_IO_APIC, acpi_parse_ioapic,
-+ MAX_IO_APICS);
-+ if (!count) {
-+ printk(KERN_ERR PREFIX "No IOAPIC entries present\n");
-+ return -ENODEV;
-+ } else if (count < 0) {
-+ printk(KERN_ERR PREFIX "Error parsing IOAPIC entry\n");
-+ return count;
-+ }
-+
-+ count =
-+ acpi_table_parse_madt(ACPI_MADT_TYPE_INTERRUPT_OVERRIDE, acpi_parse_int_src_ovr,
-+ NR_IRQ_VECTORS);
-+ if (count < 0) {
-+ printk(KERN_ERR PREFIX
-+ "Error parsing interrupt source overrides entry\n");
-+ /* TBD: Cleanup to allow fallback to MPS */
-+ return count;
-+ }
-+
-+ /*
-+ * If BIOS did not supply an INT_SRC_OVR for the SCI
-+ * pretend we got one so we can set the SCI flags.
-+ */
-+ if (!acpi_sci_override_gsi)
-+ acpi_sci_ioapic_setup(acpi_gbl_FADT.sci_interrupt, 0, 0);
-+
-+ /* Fill in identity legacy mapings where no override */
-+ mp_config_acpi_legacy_irqs();
-+
-+ count =
-+ acpi_table_parse_madt(ACPI_MADT_TYPE_NMI_SOURCE, acpi_parse_nmi_src,
-+ NR_IRQ_VECTORS);
-+ if (count < 0) {
-+ printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n");
-+ /* TBD: Cleanup to allow fallback to MPS */
-+ return count;
-+ }
-+
-+ return 0;
-+}
-+#else
-+static inline int acpi_parse_madt_ioapic_entries(void)
-+{
-+ return -1;
-+}
-+#endif /* !CONFIG_X86_IO_APIC */
-+
-+static void __init acpi_process_madt(void)
-+{
-+#ifdef CONFIG_X86_LOCAL_APIC
-+ int error;
-+
-+ if (!acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt)) {
-+
-+ /*
-+ * Parse MADT LAPIC entries
-+ */
-+ error = acpi_parse_madt_lapic_entries();
-+ if (!error) {
-+ acpi_lapic = 1;
-+
-+#ifdef CONFIG_X86_GENERICARCH
-+ generic_bigsmp_probe();
-+#endif
-+ /*
-+ * Parse MADT IO-APIC entries
-+ */
-+ error = acpi_parse_madt_ioapic_entries();
-+ if (!error) {
-+ acpi_irq_model = ACPI_IRQ_MODEL_IOAPIC;
-+ acpi_irq_balance_set(NULL);
-+ acpi_ioapic = 1;
-+
-+ smp_found_config = 1;
-+ setup_apic_routing();
-+ }
-+ }
-+ if (error == -EINVAL) {
-+ /*
-+ * Dell Precision Workstation 410, 610 come here.
-+ */
-+ printk(KERN_ERR PREFIX
-+ "Invalid BIOS MADT, disabling ACPI\n");
-+ disable_acpi();
-+ }
-+ }
-+#endif
-+ return;
-+}
-+
-+#ifdef __i386__
-+
-+static int __init disable_acpi_irq(struct dmi_system_id *d)
-+{
-+ if (!acpi_force) {
-+ printk(KERN_NOTICE "%s detected: force use of acpi=noirq\n",
-+ d->ident);
-+ acpi_noirq_set();
-+ }
-+ return 0;
-+}
-+
-+static int __init disable_acpi_pci(struct dmi_system_id *d)
-+{
-+ if (!acpi_force) {
-+ printk(KERN_NOTICE "%s detected: force use of pci=noacpi\n",
-+ d->ident);
-+ acpi_disable_pci();
-+ }
-+ return 0;
-+}
-+
-+static int __init dmi_disable_acpi(struct dmi_system_id *d)
-+{
-+ if (!acpi_force) {
-+ printk(KERN_NOTICE "%s detected: acpi off\n", d->ident);
-+ disable_acpi();
-+ } else {
-+ printk(KERN_NOTICE
-+ "Warning: DMI blacklist says broken, but acpi forced\n");
-+ }
-+ return 0;
-+}
-+
-+/*
-+ * Limit ACPI to CPU enumeration for HT
-+ */
-+static int __init force_acpi_ht(struct dmi_system_id *d)
-+{
-+ if (!acpi_force) {
-+ printk(KERN_NOTICE "%s detected: force use of acpi=ht\n",
-+ d->ident);
-+ disable_acpi();
-+ acpi_ht = 1;
-+ } else {
-+ printk(KERN_NOTICE
-+ "Warning: acpi=force overrules DMI blacklist: acpi=ht\n");
-+ }
-+ return 0;
-+}
-+
-+/*
-+ * If your system is blacklisted here, but you find that acpi=force
-+ * works for you, please contact acpi-devel@sourceforge.net
-+ */
-+static struct dmi_system_id __initdata acpi_dmi_table[] = {
-+ /*
-+ * Boxes that need ACPI disabled
-+ */
-+ {
-+ .callback = dmi_disable_acpi,
-+ .ident = "IBM Thinkpad",
-+ .matches = {
-+ DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
-+ DMI_MATCH(DMI_BOARD_NAME, "2629H1G"),
-+ },
-+ },
-+
-+ /*
-+ * Boxes that need acpi=ht
-+ */
-+ {
-+ .callback = force_acpi_ht,
-+ .ident = "FSC Primergy T850",
-+ .matches = {
-+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
-+ DMI_MATCH(DMI_PRODUCT_NAME, "PRIMERGY T850"),
-+ },
-+ },
-+ {
-+ .callback = force_acpi_ht,
-+ .ident = "DELL GX240",
-+ .matches = {
-+ DMI_MATCH(DMI_BOARD_VENDOR, "Dell Computer Corporation"),
-+ DMI_MATCH(DMI_BOARD_NAME, "OptiPlex GX240"),
-+ },
-+ },
-+ {
-+ .callback = force_acpi_ht,
-+ .ident = "HP VISUALIZE NT Workstation",
-+ .matches = {
-+ DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
-+ DMI_MATCH(DMI_PRODUCT_NAME, "HP VISUALIZE NT Workstation"),
-+ },
-+ },
-+ {
-+ .callback = force_acpi_ht,
-+ .ident = "Compaq Workstation W8000",
-+ .matches = {
-+ DMI_MATCH(DMI_SYS_VENDOR, "Compaq"),
-+ DMI_MATCH(DMI_PRODUCT_NAME, "Workstation W8000"),
-+ },
-+ },
-+ {
-+ .callback = force_acpi_ht,
-+ .ident = "ASUS P4B266",
-+ .matches = {
-+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
-+ DMI_MATCH(DMI_BOARD_NAME, "P4B266"),
-+ },
-+ },
-+ {
-+ .callback = force_acpi_ht,
-+ .ident = "ASUS P2B-DS",
-+ .matches = {
-+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
-+ DMI_MATCH(DMI_BOARD_NAME, "P2B-DS"),
-+ },
-+ },
-+ {
-+ .callback = force_acpi_ht,
-+ .ident = "ASUS CUR-DLS",
-+ .matches = {
-+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
-+ DMI_MATCH(DMI_BOARD_NAME, "CUR-DLS"),
-+ },
-+ },
-+ {
-+ .callback = force_acpi_ht,
-+ .ident = "ABIT i440BX-W83977",
-+ .matches = {
-+ DMI_MATCH(DMI_BOARD_VENDOR, "ABIT <http://www.abit.com>"),
-+ DMI_MATCH(DMI_BOARD_NAME, "i440BX-W83977 (BP6)"),
-+ },
-+ },
-+ {
-+ .callback = force_acpi_ht,
-+ .ident = "IBM Bladecenter",
-+ .matches = {
-+ DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
-+ DMI_MATCH(DMI_BOARD_NAME, "IBM eServer BladeCenter HS20"),
-+ },
-+ },
-+ {
-+ .callback = force_acpi_ht,
-+ .ident = "IBM eServer xSeries 360",
-+ .matches = {
-+ DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
-+ DMI_MATCH(DMI_BOARD_NAME, "eServer xSeries 360"),
-+ },
-+ },
-+ {
-+ .callback = force_acpi_ht,
-+ .ident = "IBM eserver xSeries 330",
-+ .matches = {
-+ DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
-+ DMI_MATCH(DMI_BOARD_NAME, "eserver xSeries 330"),
-+ },
-+ },
-+ {
-+ .callback = force_acpi_ht,
-+ .ident = "IBM eserver xSeries 440",
-+ .matches = {
-+ DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
-+ DMI_MATCH(DMI_PRODUCT_NAME, "eserver xSeries 440"),
-+ },
-+ },
-+
-+ /*
-+ * Boxes that need ACPI PCI IRQ routing disabled
-+ */
-+ {
-+ .callback = disable_acpi_irq,
-+ .ident = "ASUS A7V",
-+ .matches = {
-+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC"),
-+ DMI_MATCH(DMI_BOARD_NAME, "<A7V>"),
-+ /* newer BIOS, Revision 1011, does work */
-+ DMI_MATCH(DMI_BIOS_VERSION,
-+ "ASUS A7V ACPI BIOS Revision 1007"),
-+ },
-+ },
-+ {
-+ /*
-+ * Latest BIOS for IBM 600E (1.16) has bad pcinum
-+ * for LPC bridge, which is needed for the PCI
-+ * interrupt links to work. DSDT fix is in bug 5966.
-+ * 2645, 2646 model numbers are shared with 600/600E/600X
-+ */
-+ .callback = disable_acpi_irq,
-+ .ident = "IBM Thinkpad 600 Series 2645",
-+ .matches = {
-+ DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
-+ DMI_MATCH(DMI_BOARD_NAME, "2645"),
-+ },
-+ },
-+ {
-+ .callback = disable_acpi_irq,
-+ .ident = "IBM Thinkpad 600 Series 2646",
-+ .matches = {
-+ DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
-+ DMI_MATCH(DMI_BOARD_NAME, "2646"),
-+ },
-+ },
-+ /*
-+ * Boxes that need ACPI PCI IRQ routing and PCI scan disabled
-+ */
-+ { /* _BBN 0 bug */
-+ .callback = disable_acpi_pci,
-+ .ident = "ASUS PR-DLS",
-+ .matches = {
-+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
-+ DMI_MATCH(DMI_BOARD_NAME, "PR-DLS"),
-+ DMI_MATCH(DMI_BIOS_VERSION,
-+ "ASUS PR-DLS ACPI BIOS Revision 1010"),
-+ DMI_MATCH(DMI_BIOS_DATE, "03/21/2003")
-+ },
-+ },
-+ {
-+ .callback = disable_acpi_pci,
-+ .ident = "Acer TravelMate 36x Laptop",
-+ .matches = {
-+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
-+ DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 360"),
-+ },
-+ },
-+ {}
-+};
-+
-+#endif /* __i386__ */
-+
-+/*
-+ * acpi_boot_table_init() and acpi_boot_init()
-+ * called from setup_arch(), always.
-+ * 1. checksums all tables
-+ * 2. enumerates lapics
-+ * 3. enumerates io-apics
-+ *
-+ * acpi_table_init() is separate to allow reading SRAT without
-+ * other side effects.
-+ *
-+ * side effects of acpi_boot_init:
-+ * acpi_lapic = 1 if LAPIC found
-+ * acpi_ioapic = 1 if IOAPIC found
-+ * if (acpi_lapic && acpi_ioapic) smp_found_config = 1;
-+ * if acpi_blacklisted() acpi_disabled = 1;
-+ * acpi_irq_model=...
-+ * ...
-+ *
-+ * return value: (currently ignored)
-+ * 0: success
-+ * !0: failure
-+ */
-+
-+int __init acpi_boot_table_init(void)
-+{
-+ int error;
-+
-+#ifdef __i386__
-+ dmi_check_system(acpi_dmi_table);
-+#endif
-+
-+ /*
-+ * If acpi_disabled, bail out
-+ * One exception: acpi=ht continues far enough to enumerate LAPICs
-+ */
-+ if (acpi_disabled && !acpi_ht)
-+ return 1;
-+
-+ /*
-+ * Initialize the ACPI boot-time table parser.
-+ */
-+ error = acpi_table_init();
-+ if (error) {
-+ disable_acpi();
-+ return error;
-+ }
-+
-+ acpi_table_parse(ACPI_SIG_BOOT, acpi_parse_sbf);
-+
-+ /*
-+ * blacklist may disable ACPI entirely
-+ */
-+ error = acpi_blacklisted();
-+ if (error) {
-+ if (acpi_force) {
-+ printk(KERN_WARNING PREFIX "acpi=force override\n");
-+ } else {
-+ printk(KERN_WARNING PREFIX "Disabling ACPI support\n");
-+ disable_acpi();
-+ return error;
-+ }
-+ }
-+
-+ return 0;
-+}
-+
-+int __init acpi_boot_init(void)
-+{
-+ /*
-+ * If acpi_disabled, bail out
-+ * One exception: acpi=ht continues far enough to enumerate LAPICs
-+ */
-+ if (acpi_disabled && !acpi_ht)
-+ return 1;
-+
-+ acpi_table_parse(ACPI_SIG_BOOT, acpi_parse_sbf);
-+
-+ /*
-+ * set sci_int and PM timer address
-+ */
-+ acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt);
-+
-+ /*
-+ * Process the Multiple APIC Description Table (MADT), if present
-+ */
-+ acpi_process_madt();
-+
-+ acpi_table_parse(ACPI_SIG_HPET, acpi_parse_hpet);
-+
-+ return 0;
-+}
-+
-+static int __init parse_acpi(char *arg)
-+{
-+ if (!arg)
-+ return -EINVAL;
-+
-+ /* "acpi=off" disables both ACPI table parsing and interpreter */
-+ if (strcmp(arg, "off") == 0) {
-+ disable_acpi();
-+ }
-+ /* acpi=force to over-ride black-list */
-+ else if (strcmp(arg, "force") == 0) {
-+ acpi_force = 1;
-+ acpi_ht = 1;
-+ acpi_disabled = 0;
-+ }
-+ /* acpi=strict disables out-of-spec workarounds */
-+ else if (strcmp(arg, "strict") == 0) {
-+ acpi_strict = 1;
-+ }
-+ /* Limit ACPI just to boot-time to enable HT */
-+ else if (strcmp(arg, "ht") == 0) {
-+ if (!acpi_force)
-+ disable_acpi();
-+ acpi_ht = 1;
-+ }
-+ /* "acpi=noirq" disables ACPI interrupt routing */
-+ else if (strcmp(arg, "noirq") == 0) {
-+ acpi_noirq_set();
-+ } else {
-+ /* Core will printk when we return error. */
-+ return -EINVAL;
-+ }
-+ return 0;
-+}
-+early_param("acpi", parse_acpi);
-+
-+/* FIXME: Using pci= for an ACPI parameter is a travesty. */
-+static int __init parse_pci(char *arg)
-+{
-+ if (arg && strcmp(arg, "noacpi") == 0)
-+ acpi_disable_pci();
-+ return 0;
-+}
-+early_param("pci", parse_pci);
-+
-+#ifdef CONFIG_X86_IO_APIC
-+static int __init parse_acpi_skip_timer_override(char *arg)
-+{
-+ acpi_skip_timer_override = 1;
-+ return 0;
-+}
-+early_param("acpi_skip_timer_override", parse_acpi_skip_timer_override);
-+
-+static int __init parse_acpi_use_timer_override(char *arg)
-+{
-+ acpi_use_timer_override = 1;
-+ return 0;
-+}
-+early_param("acpi_use_timer_override", parse_acpi_use_timer_override);
-+#endif /* CONFIG_X86_IO_APIC */
-+
-+static int __init setup_acpi_sci(char *s)
-+{
-+ if (!s)
-+ return -EINVAL;
-+ if (!strcmp(s, "edge"))
-+ acpi_sci_flags = ACPI_MADT_TRIGGER_EDGE |
-+ (acpi_sci_flags & ~ACPI_MADT_TRIGGER_MASK);
-+ else if (!strcmp(s, "level"))
-+ acpi_sci_flags = ACPI_MADT_TRIGGER_LEVEL |
-+ (acpi_sci_flags & ~ACPI_MADT_TRIGGER_MASK);
-+ else if (!strcmp(s, "high"))
-+ acpi_sci_flags = ACPI_MADT_POLARITY_ACTIVE_HIGH |
-+ (acpi_sci_flags & ~ACPI_MADT_POLARITY_MASK);
-+ else if (!strcmp(s, "low"))
-+ acpi_sci_flags = ACPI_MADT_POLARITY_ACTIVE_LOW |
-+ (acpi_sci_flags & ~ACPI_MADT_POLARITY_MASK);
-+ else
-+ return -EINVAL;
-+ return 0;
-+}
-+early_param("acpi_sci", setup_acpi_sci);
-+
-+int __acpi_acquire_global_lock(unsigned int *lock)
-+{
-+ unsigned int old, new, val;
-+ do {
-+ old = *lock;
-+ new = (((old & ~0x3) + 2) + ((old >> 1) & 0x1));
-+ val = cmpxchg(lock, old, new);
-+ } while (unlikely (val != old));
-+ return (new < 3) ? -1 : 0;
-+}
-+
-+int __acpi_release_global_lock(unsigned int *lock)
-+{
-+ unsigned int old, new, val;
-+ do {
-+ old = *lock;
-+ new = old & ~0x3;
-+ val = cmpxchg(lock, old, new);
-+ } while (unlikely (val != old));
-+ return old & 0x1;
-+}
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/i386/kernel/acpi/earlyquirk.c ubuntu-gutsy-xen/arch/i386/kernel/acpi/earlyquirk.c
---- ubuntu-gutsy/arch/i386/kernel/acpi/earlyquirk.c 2007-08-18 09:40:28.000000000 -0400
-+++ ubuntu-gutsy-xen/arch/i386/kernel/acpi/earlyquirk.c 2007-08-18 12:38:02.000000000 -0400
-@@ -11,7 +11,7 @@
- #include <asm/acpi.h>
- #include <asm/apic.h>
-
--#ifdef CONFIG_ACPI
-+#if defined(CONFIG_ACPI) && !defined(CONFIG_XEN)
-
- static int __init nvidia_hpet_check(struct acpi_table_header *header)
- {
-@@ -21,6 +21,7 @@
-
- static int __init check_bridge(int vendor, int device)
- {
-+#ifndef CONFIG_XEN
- #ifdef CONFIG_ACPI
- static int warned;
- /* According to Nvidia all timer overrides are bogus unless HPET
-@@ -44,6 +45,7 @@
- printk(KERN_INFO "ATI board detected. Disabling timer routing "
- "over 8254.\n");
- }
-+#endif
- return 0;
- }
-
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/i386/kernel/acpi/Makefile ubuntu-gutsy-xen/arch/i386/kernel/acpi/Makefile
---- ubuntu-gutsy/arch/i386/kernel/acpi/Makefile 2007-08-18 09:40:28.000000000 -0400
-+++ ubuntu-gutsy-xen/arch/i386/kernel/acpi/Makefile 2007-08-18 12:38:02.000000000 -0400
-@@ -8,3 +8,9 @@
- obj-y += cstate.o processor.o
- endif
-
-+ifdef CONFIG_XEN
-+include $(srctree)/scripts/Makefile.xen
-+n-obj-xen := cstate.o
-+obj-y := $(call filterxen, $(obj-y), $(n-obj-xen))
-+obj-y := $(call cherrypickxen, $(obj-y), $(src))
-+endif
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/i386/kernel/apic-xen.c ubuntu-gutsy-xen/arch/i386/kernel/apic-xen.c
---- ubuntu-gutsy/arch/i386/kernel/apic-xen.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/arch/i386/kernel/apic-xen.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,54 @@
-+/*
-+ * Local APIC handling stubs
-+ */
-+
-+#include <linux/init.h>
-+
-+#include <asm/hw_irq.h>
-+
-+/*
-+ * Debug level, exported for io_apic.c
-+ */
-+int apic_verbosity;
-+
-+static int __init apic_set_verbosity(char *str)
-+{
-+ if (strcmp("debug", str) == 0)
-+ apic_verbosity = APIC_DEBUG;
-+ else if (strcmp("verbose", str) == 0)
-+ apic_verbosity = APIC_VERBOSE;
-+ return 1;
-+}
-+
-+__setup("apic=", apic_set_verbosity);
-+
-+#ifdef CONFIG_X86_64
-+/*
-+ * 'what should we do if we get a hw irq event on an illegal vector'.
-+ * each architecture has to answer this themselves.
-+ */
-+void ack_bad_irq(unsigned int irq)
-+{
-+ printk("unexpected IRQ trap at irq %02x\n", irq);
-+}
-+#endif
-+
-+int setup_profiling_timer(unsigned int multiplier)
-+{
-+ return -EINVAL;
-+}
-+
-+/*
-+ * This initializes the IO-APIC and APIC hardware if this is
-+ * a UP kernel.
-+ */
-+int __init APIC_init_uniprocessor (void)
-+{
-+#ifdef CONFIG_X86_IO_APIC
-+ if (smp_found_config)
-+ if (!skip_ioapic_setup && nr_ioapics)
-+ setup_IO_APIC();
-+#endif
-+
-+ return 0;
-+}
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/i386/kernel/asm-offsets.c ubuntu-gutsy-xen/arch/i386/kernel/asm-offsets.c
---- ubuntu-gutsy/arch/i386/kernel/asm-offsets.c 2007-08-18 09:40:28.000000000 -0400
-+++ ubuntu-gutsy-xen/arch/i386/kernel/asm-offsets.c 2007-08-18 12:38:02.000000000 -0400
-@@ -16,6 +16,9 @@
- #include <asm/processor.h>
- #include <asm/thread_info.h>
- #include <asm/elf.h>
-+#ifdef CONFIG_XEN
-+#include <xen/interface/xen.h>
-+#endif
-
- #define DEFINE(sym, val) \
- asm volatile("\n->" #sym " %0 " #val : : "i" (val))
-@@ -55,6 +58,7 @@
- OFFSET(TI_exec_domain, thread_info, exec_domain);
- OFFSET(TI_flags, thread_info, flags);
- OFFSET(TI_status, thread_info, status);
-+ OFFSET(TI_cpu, thread_info, cpu);
- OFFSET(TI_preempt_count, thread_info, preempt_count);
- OFFSET(TI_addr_limit, thread_info, addr_limit);
- OFFSET(TI_restart_block, thread_info, restart_block);
-@@ -92,9 +96,14 @@
- OFFSET(pbe_orig_address, pbe, orig_address);
- OFFSET(pbe_next, pbe, next);
-
-+#ifndef CONFIG_X86_NO_TSS
- /* Offset from the sysenter stack to tss.esp0 */
-- DEFINE(TSS_sysenter_esp0, offsetof(struct tss_struct, x86_tss.esp0) -
-+ DEFINE(SYSENTER_stack_esp0, offsetof(struct tss_struct, x86_tss.esp0) -
- sizeof(struct tss_struct));
-+#else
-+ /* sysenter stack points directly to esp0 */
-+ DEFINE(SYSENTER_stack_esp0, 0);
-+#endif
-
- DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
- DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
-@@ -106,6 +115,11 @@
-
- OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
-
-+#ifdef CONFIG_XEN
-+ BLANK();
-+ OFFSET(XEN_START_mfn_list, start_info, mfn_list);
-+#endif
-+
- #ifdef CONFIG_PARAVIRT
- BLANK();
- OFFSET(PARAVIRT_enabled, paravirt_ops, paravirt_enabled);
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/i386/kernel/cpu/amd.c ubuntu-gutsy-xen/arch/i386/kernel/cpu/amd.c
---- ubuntu-gutsy/arch/i386/kernel/cpu/amd.c 2007-08-18 09:40:28.000000000 -0400
-+++ ubuntu-gutsy-xen/arch/i386/kernel/cpu/amd.c 2007-08-18 12:42:35.000000000 -0400
-@@ -57,6 +57,7 @@
- #endif
-
- int force_mwait __cpuinitdata;
-+static int local_apic_timer_disabled;
-
- static void __cpuinit init_amd(struct cpuinfo_x86 *c)
- {
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/i386/kernel/cpu/common-xen.c ubuntu-gutsy-xen/arch/i386/kernel/cpu/common-xen.c
---- ubuntu-gutsy/arch/i386/kernel/cpu/common-xen.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/arch/i386/kernel/cpu/common-xen.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,751 @@
-+#include <linux/init.h>
-+#include <linux/string.h>
-+#include <linux/delay.h>
-+#include <linux/smp.h>
-+#include <linux/module.h>
-+#include <linux/percpu.h>
-+#include <linux/bootmem.h>
-+#include <asm/semaphore.h>
-+#include <asm/processor.h>
-+#include <asm/i387.h>
-+#include <asm/msr.h>
-+#include <asm/io.h>
-+#include <asm/mmu_context.h>
-+#include <asm/mtrr.h>
-+#include <asm/mce.h>
-+#ifdef CONFIG_X86_LOCAL_APIC
-+#include <asm/mpspec.h>
-+#include <asm/apic.h>
-+#include <mach_apic.h>
-+#else
-+#ifdef CONFIG_XEN
-+#define phys_pkg_id(a,b) a
-+#endif
-+#endif
-+#include <asm/hypervisor.h>
-+
-+#include "cpu.h"
-+
-+DEFINE_PER_CPU(struct gdt_page, gdt_page) = { .gdt = {
-+ [GDT_ENTRY_KERNEL_CS] = { 0x0000ffff, 0x00cf9a00 },
-+ [GDT_ENTRY_KERNEL_DS] = { 0x0000ffff, 0x00cf9200 },
-+ [GDT_ENTRY_DEFAULT_USER_CS] = { 0x0000ffff, 0x00cffa00 },
-+ [GDT_ENTRY_DEFAULT_USER_DS] = { 0x0000ffff, 0x00cff200 },
-+#ifndef CONFIG_XEN
-+ /*
-+ * Segments used for calling PnP BIOS have byte granularity.
-+ * They code segments and data segments have fixed 64k limits,
-+ * the transfer segment sizes are set at run time.
-+ */
-+ [GDT_ENTRY_PNPBIOS_CS32] = { 0x0000ffff, 0x00409a00 },/* 32-bit code */
-+ [GDT_ENTRY_PNPBIOS_CS16] = { 0x0000ffff, 0x00009a00 },/* 16-bit code */
-+ [GDT_ENTRY_PNPBIOS_DS] = { 0x0000ffff, 0x00009200 }, /* 16-bit data */
-+ [GDT_ENTRY_PNPBIOS_TS1] = { 0x00000000, 0x00009200 },/* 16-bit data */
-+ [GDT_ENTRY_PNPBIOS_TS2] = { 0x00000000, 0x00009200 },/* 16-bit data */
-+ /*
-+ * The APM segments have byte granularity and their bases
-+ * are set at run time. All have 64k limits.
-+ */
-+ [GDT_ENTRY_APMBIOS_BASE] = { 0x0000ffff, 0x00409a00 },/* 32-bit code */
-+ /* 16-bit code */
-+ [GDT_ENTRY_APMBIOS_BASE+1] = { 0x0000ffff, 0x00009a00 },
-+ [GDT_ENTRY_APMBIOS_BASE+2] = { 0x0000ffff, 0x00409200 }, /* data */
-+
-+ [GDT_ENTRY_ESPFIX_SS] = { 0x00000000, 0x00c09200 },
-+#endif
-+ [GDT_ENTRY_PERCPU] = { 0x00000000, 0x00000000 },
-+} };
-+EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
-+
-+static int cachesize_override __cpuinitdata = -1;
-+static int disable_x86_fxsr __cpuinitdata;
-+static int disable_x86_serial_nr __cpuinitdata = 1;
-+static int disable_x86_sep __cpuinitdata;
-+
-+struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {};
-+
-+extern int disable_pse;
-+
-+static void __cpuinit default_init(struct cpuinfo_x86 * c)
-+{
-+ /* Not much we can do here... */
-+ /* Check if at least it has cpuid */
-+ if (c->cpuid_level == -1) {
-+ /* No cpuid. It must be an ancient CPU */
-+ if (c->x86 == 4)
-+ strcpy(c->x86_model_id, "486");
-+ else if (c->x86 == 3)
-+ strcpy(c->x86_model_id, "386");
-+ }
-+}
-+
-+static struct cpu_dev __cpuinitdata default_cpu = {
-+ .c_init = default_init,
-+ .c_vendor = "Unknown",
-+};
-+static struct cpu_dev * this_cpu __cpuinitdata = &default_cpu;
-+
-+static int __init cachesize_setup(char *str)
-+{
-+ get_option (&str, &cachesize_override);
-+ return 1;
-+}
-+__setup("cachesize=", cachesize_setup);
-+
-+int __cpuinit get_model_name(struct cpuinfo_x86 *c)
-+{
-+ unsigned int *v;
-+ char *p, *q;
-+
-+ if (cpuid_eax(0x80000000) < 0x80000004)
-+ return 0;
-+
-+ v = (unsigned int *) c->x86_model_id;
-+ cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
-+ cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
-+ cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
-+ c->x86_model_id[48] = 0;
-+
-+ /* Intel chips right-justify this string for some dumb reason;
-+ undo that brain damage */
-+ p = q = &c->x86_model_id[0];
-+ while ( *p == ' ' )
-+ p++;
-+ if ( p != q ) {
-+ while ( *p )
-+ *q++ = *p++;
-+ while ( q <= &c->x86_model_id[48] )
-+ *q++ = '\0'; /* Zero-pad the rest */
-+ }
-+
-+ return 1;
-+}
-+
-+
-+void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
-+{
-+ unsigned int n, dummy, ecx, edx, l2size;
-+
-+ n = cpuid_eax(0x80000000);
-+
-+ if (n >= 0x80000005) {
-+ cpuid(0x80000005, &dummy, &dummy, &ecx, &edx);
-+ printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
-+ edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
-+ c->x86_cache_size=(ecx>>24)+(edx>>24);
-+ }
-+
-+ if (n < 0x80000006) /* Some chips just has a large L1. */
-+ return;
-+
-+ ecx = cpuid_ecx(0x80000006);
-+ l2size = ecx >> 16;
-+
-+ /* do processor-specific cache resizing */
-+ if (this_cpu->c_size_cache)
-+ l2size = this_cpu->c_size_cache(c,l2size);
-+
-+ /* Allow user to override all this if necessary. */
-+ if (cachesize_override != -1)
-+ l2size = cachesize_override;
-+
-+ if ( l2size == 0 )
-+ return; /* Again, no L2 cache is possible */
-+
-+ c->x86_cache_size = l2size;
-+
-+ printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
-+ l2size, ecx & 0xFF);
-+}
-+
-+/* Naming convention should be: <Name> [(<Codename>)] */
-+/* This table only is used unless init_<vendor>() below doesn't set it; */
-+/* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used */
-+
-+/* Look up CPU names by table lookup. */
-+static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c)
-+{
-+ struct cpu_model_info *info;
-+
-+ if ( c->x86_model >= 16 )
-+ return NULL; /* Range check */
-+
-+ if (!this_cpu)
-+ return NULL;
-+
-+ info = this_cpu->c_models;
-+
-+ while (info && info->family) {
-+ if (info->family == c->x86)
-+ return info->model_names[c->x86_model];
-+ info++;
-+ }
-+ return NULL; /* Not found */
-+}
-+
-+
-+static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c, int early)
-+{
-+ char *v = c->x86_vendor_id;
-+ int i;
-+ static int printed;
-+
-+ for (i = 0; i < X86_VENDOR_NUM; i++) {
-+ if (cpu_devs[i]) {
-+ if (!strcmp(v,cpu_devs[i]->c_ident[0]) ||
-+ (cpu_devs[i]->c_ident[1] &&
-+ !strcmp(v,cpu_devs[i]->c_ident[1]))) {
-+ c->x86_vendor = i;
-+ if (!early)
-+ this_cpu = cpu_devs[i];
-+ return;
-+ }
-+ }
-+ }
-+ if (!printed) {
-+ printed++;
-+ printk(KERN_ERR "CPU: Vendor unknown, using generic init.\n");
-+ printk(KERN_ERR "CPU: Your system may be unstable.\n");
-+ }
-+ c->x86_vendor = X86_VENDOR_UNKNOWN;
-+ this_cpu = &default_cpu;
-+}
-+
-+
-+static int __init x86_fxsr_setup(char * s)
-+{
-+ /* Tell all the other CPU's to not use it... */
-+ disable_x86_fxsr = 1;
-+
-+ /*
-+ * ... and clear the bits early in the boot_cpu_data
-+ * so that the bootup process doesn't try to do this
-+ * either.
-+ */
-+ clear_bit(X86_FEATURE_FXSR, boot_cpu_data.x86_capability);
-+ clear_bit(X86_FEATURE_XMM, boot_cpu_data.x86_capability);
-+ return 1;
-+}
-+__setup("nofxsr", x86_fxsr_setup);
-+
-+
-+static int __init x86_sep_setup(char * s)
-+{
-+ disable_x86_sep = 1;
-+ return 1;
-+}
-+__setup("nosep", x86_sep_setup);
-+
-+
-+/* Standard macro to see if a specific flag is changeable */
-+static inline int flag_is_changeable_p(u32 flag)
-+{
-+ u32 f1, f2;
-+
-+ asm("pushfl\n\t"
-+ "pushfl\n\t"
-+ "popl %0\n\t"
-+ "movl %0,%1\n\t"
-+ "xorl %2,%0\n\t"
-+ "pushl %0\n\t"
-+ "popfl\n\t"
-+ "pushfl\n\t"
-+ "popl %0\n\t"
-+ "popfl\n\t"
-+ : "=&r" (f1), "=&r" (f2)
-+ : "ir" (flag));
-+
-+ return ((f1^f2) & flag) != 0;
-+}
-+
-+
-+/* Probe for the CPUID instruction */
-+static int __cpuinit have_cpuid_p(void)
-+{
-+ return flag_is_changeable_p(X86_EFLAGS_ID);
-+}
-+
-+void __init cpu_detect(struct cpuinfo_x86 *c)
-+{
-+ /* Get vendor name */
-+ cpuid(0x00000000, &c->cpuid_level,
-+ (int *)&c->x86_vendor_id[0],
-+ (int *)&c->x86_vendor_id[8],
-+ (int *)&c->x86_vendor_id[4]);
-+
-+ c->x86 = 4;
-+ if (c->cpuid_level >= 0x00000001) {
-+ u32 junk, tfms, cap0, misc;
-+ cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
-+ c->x86 = (tfms >> 8) & 15;
-+ c->x86_model = (tfms >> 4) & 15;
-+ if (c->x86 == 0xf)
-+ c->x86 += (tfms >> 20) & 0xff;
-+ if (c->x86 >= 0x6)
-+ c->x86_model += ((tfms >> 16) & 0xF) << 4;
-+ c->x86_mask = tfms & 15;
-+ if (cap0 & (1<<19))
-+ c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8;
-+ }
-+}
-+
-+/* Do minimum CPU detection early.
-+ Fields really needed: vendor, cpuid_level, family, model, mask, cache alignment.
-+ The others are not touched to avoid unwanted side effects.
-+
-+ WARNING: this function is only called on the BP. Don't add code here
-+ that is supposed to run on all CPUs. */
-+static void __init early_cpu_detect(void)
-+{
-+ struct cpuinfo_x86 *c = &boot_cpu_data;
-+
-+ c->x86_cache_alignment = 32;
-+
-+ if (!have_cpuid_p())
-+ return;
-+
-+ cpu_detect(c);
-+
-+ get_cpu_vendor(c, 1);
-+}
-+
-+static void __cpuinit generic_identify(struct cpuinfo_x86 * c)
-+{
-+ u32 tfms, xlvl;
-+ int ebx;
-+
-+ if (have_cpuid_p()) {
-+ /* Get vendor name */
-+ cpuid(0x00000000, &c->cpuid_level,
-+ (int *)&c->x86_vendor_id[0],
-+ (int *)&c->x86_vendor_id[8],
-+ (int *)&c->x86_vendor_id[4]);
-+
-+ get_cpu_vendor(c, 0);
-+ /* Initialize the standard set of capabilities */
-+ /* Note that the vendor-specific code below might override */
-+
-+ /* Intel-defined flags: level 0x00000001 */
-+ if ( c->cpuid_level >= 0x00000001 ) {
-+ u32 capability, excap;
-+ cpuid(0x00000001, &tfms, &ebx, &excap, &capability);
-+ c->x86_capability[0] = capability;
-+ c->x86_capability[4] = excap;
-+ c->x86 = (tfms >> 8) & 15;
-+ c->x86_model = (tfms >> 4) & 15;
-+ if (c->x86 == 0xf)
-+ c->x86 += (tfms >> 20) & 0xff;
-+ if (c->x86 >= 0x6)
-+ c->x86_model += ((tfms >> 16) & 0xF) << 4;
-+ c->x86_mask = tfms & 15;
-+#ifdef CONFIG_X86_HT
-+ c->apicid = phys_pkg_id((ebx >> 24) & 0xFF, 0);
-+#else
-+ c->apicid = (ebx >> 24) & 0xFF;
-+#endif
-+ if (c->x86_capability[0] & (1<<19))
-+ c->x86_clflush_size = ((ebx >> 8) & 0xff) * 8;
-+ } else {
-+ /* Have CPUID level 0 only - unheard of */
-+ c->x86 = 4;
-+ }
-+
-+ /* AMD-defined flags: level 0x80000001 */
-+ xlvl = cpuid_eax(0x80000000);
-+ if ( (xlvl & 0xffff0000) == 0x80000000 ) {
-+ if ( xlvl >= 0x80000001 ) {
-+ c->x86_capability[1] = cpuid_edx(0x80000001);
-+ c->x86_capability[6] = cpuid_ecx(0x80000001);
-+ }
-+ if ( xlvl >= 0x80000004 )
-+ get_model_name(c); /* Default name */
-+ }
-+ }
-+
-+ early_intel_workaround(c);
-+
-+#ifdef CONFIG_X86_HT
-+ c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff;
-+#endif
-+}
-+
-+static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
-+{
-+ if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr ) {
-+ /* Disable processor serial number */
-+ unsigned long lo,hi;
-+ rdmsr(MSR_IA32_BBL_CR_CTL,lo,hi);
-+ lo |= 0x200000;
-+ wrmsr(MSR_IA32_BBL_CR_CTL,lo,hi);
-+ printk(KERN_NOTICE "CPU serial number disabled.\n");
-+ clear_bit(X86_FEATURE_PN, c->x86_capability);
-+
-+ /* Disabling the serial number may affect the cpuid level */
-+ c->cpuid_level = cpuid_eax(0);
-+ }
-+}
-+
-+static int __init x86_serial_nr_setup(char *s)
-+{
-+ disable_x86_serial_nr = 0;
-+ return 1;
-+}
-+__setup("serialnumber", x86_serial_nr_setup);
-+
-+
-+
-+/*
-+ * This does the hard work of actually picking apart the CPU stuff...
-+ */
-+static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
-+{
-+ int i;
-+
-+ c->loops_per_jiffy = loops_per_jiffy;
-+ c->x86_cache_size = -1;
-+ c->x86_vendor = X86_VENDOR_UNKNOWN;
-+ c->cpuid_level = -1; /* CPUID not detected */
-+ c->x86_model = c->x86_mask = 0; /* So far unknown... */
-+ c->x86_vendor_id[0] = '\0'; /* Unset */
-+ c->x86_model_id[0] = '\0'; /* Unset */
-+ c->x86_max_cores = 1;
-+ c->x86_clflush_size = 32;
-+ memset(&c->x86_capability, 0, sizeof c->x86_capability);
-+
-+ if (!have_cpuid_p()) {
-+ /* First of all, decide if this is a 486 or higher */
-+ /* It's a 486 if we can modify the AC flag */
-+ if ( flag_is_changeable_p(X86_EFLAGS_AC) )
-+ c->x86 = 4;
-+ else
-+ c->x86 = 3;
-+ }
-+
-+ generic_identify(c);
-+
-+ printk(KERN_DEBUG "CPU: After generic identify, caps:");
-+ for (i = 0; i < NCAPINTS; i++)
-+ printk(" %08lx", c->x86_capability[i]);
-+ printk("\n");
-+
-+ if (this_cpu->c_identify) {
-+ this_cpu->c_identify(c);
-+
-+ printk(KERN_DEBUG "CPU: After vendor identify, caps:");
-+ for (i = 0; i < NCAPINTS; i++)
-+ printk(" %08lx", c->x86_capability[i]);
-+ printk("\n");
-+ }
-+
-+ /*
-+ * Vendor-specific initialization. In this section we
-+ * canonicalize the feature flags, meaning if there are
-+ * features a certain CPU supports which CPUID doesn't
-+ * tell us, CPUID claiming incorrect flags, or other bugs,
-+ * we handle them here.
-+ *
-+ * At the end of this section, c->x86_capability better
-+ * indicate the features this CPU genuinely supports!
-+ */
-+ if (this_cpu->c_init)
-+ this_cpu->c_init(c);
-+
-+ /* Disable the PN if appropriate */
-+ squash_the_stupid_serial_number(c);
-+
-+ /*
-+ * The vendor-specific functions might have changed features. Now
-+ * we do "generic changes."
-+ */
-+
-+ /* TSC disabled? */
-+ if ( tsc_disable )
-+ clear_bit(X86_FEATURE_TSC, c->x86_capability);
-+
-+ /* FXSR disabled? */
-+ if (disable_x86_fxsr) {
-+ clear_bit(X86_FEATURE_FXSR, c->x86_capability);
-+ clear_bit(X86_FEATURE_XMM, c->x86_capability);
-+ }
-+
-+ /* SEP disabled? */
-+ if (disable_x86_sep)
-+ clear_bit(X86_FEATURE_SEP, c->x86_capability);
-+
-+ if (disable_pse)
-+ clear_bit(X86_FEATURE_PSE, c->x86_capability);
-+
-+ /* If the model name is still unset, do table lookup. */
-+ if ( !c->x86_model_id[0] ) {
-+ char *p;
-+ p = table_lookup_model(c);
-+ if ( p )
-+ strcpy(c->x86_model_id, p);
-+ else
-+ /* Last resort... */
-+ sprintf(c->x86_model_id, "%02x/%02x",
-+ c->x86, c->x86_model);
-+ }
-+
-+ /* Now the feature flags better reflect actual CPU features! */
-+
-+ printk(KERN_DEBUG "CPU: After all inits, caps:");
-+ for (i = 0; i < NCAPINTS; i++)
-+ printk(" %08lx", c->x86_capability[i]);
-+ printk("\n");
-+
-+ /*
-+ * On SMP, boot_cpu_data holds the common feature set between
-+ * all CPUs; so make sure that we indicate which features are
-+ * common between the CPUs. The first time this routine gets
-+ * executed, c == &boot_cpu_data.
-+ */
-+ if ( c != &boot_cpu_data ) {
-+ /* AND the already accumulated flags with these */
-+ for ( i = 0 ; i < NCAPINTS ; i++ )
-+ boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
-+ }
-+
-+ /* Init Machine Check Exception if available. */
-+ mcheck_init(c);
-+}
-+
-+void __init identify_boot_cpu(void)
-+{
-+ identify_cpu(&boot_cpu_data);
-+ sysenter_setup();
-+ enable_sep_cpu();
-+ mtrr_bp_init();
-+}
-+
-+void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c)
-+{
-+ BUG_ON(c == &boot_cpu_data);
-+ identify_cpu(c);
-+ enable_sep_cpu();
-+ mtrr_ap_init();
-+}
-+
-+#ifdef CONFIG_X86_HT
-+void __cpuinit detect_ht(struct cpuinfo_x86 *c)
-+{
-+ u32 eax, ebx, ecx, edx;
-+ int index_msb, core_bits;
-+
-+ cpuid(1, &eax, &ebx, &ecx, &edx);
-+
-+ if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY))
-+ return;
-+
-+ smp_num_siblings = (ebx & 0xff0000) >> 16;
-+
-+ if (smp_num_siblings == 1) {
-+ printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
-+ } else if (smp_num_siblings > 1 ) {
-+
-+ if (smp_num_siblings > NR_CPUS) {
-+ printk(KERN_WARNING "CPU: Unsupported number of the "
-+ "siblings %d", smp_num_siblings);
-+ smp_num_siblings = 1;
-+ return;
-+ }
-+
-+ index_msb = get_count_order(smp_num_siblings);
-+ c->phys_proc_id = phys_pkg_id((ebx >> 24) & 0xFF, index_msb);
-+
-+ printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
-+ c->phys_proc_id);
-+
-+ smp_num_siblings = smp_num_siblings / c->x86_max_cores;
-+
-+ index_msb = get_count_order(smp_num_siblings) ;
-+
-+ core_bits = get_count_order(c->x86_max_cores);
-+
-+ c->cpu_core_id = phys_pkg_id((ebx >> 24) & 0xFF, index_msb) &
-+ ((1 << core_bits) - 1);
-+
-+ if (c->x86_max_cores > 1)
-+ printk(KERN_INFO "CPU: Processor Core ID: %d\n",
-+ c->cpu_core_id);
-+ }
-+}
-+#endif
-+
-+void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
-+{
-+ char *vendor = NULL;
-+
-+ if (c->x86_vendor < X86_VENDOR_NUM)
-+ vendor = this_cpu->c_vendor;
-+ else if (c->cpuid_level >= 0)
-+ vendor = c->x86_vendor_id;
-+
-+ if (vendor && strncmp(c->x86_model_id, vendor, strlen(vendor)))
-+ printk("%s ", vendor);
-+
-+ if (!c->x86_model_id[0])
-+ printk("%d86", c->x86);
-+ else
-+ printk("%s", c->x86_model_id);
-+
-+ if (c->x86_mask || c->cpuid_level >= 0)
-+ printk(" stepping %02x\n", c->x86_mask);
-+ else
-+ printk("\n");
-+}
-+
-+cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE;
-+
-+/* This is hacky. :)
-+ * We're emulating future behavior.
-+ * In the future, the cpu-specific init functions will be called implicitly
-+ * via the magic of initcalls.
-+ * They will insert themselves into the cpu_devs structure.
-+ * Then, when cpu_init() is called, we can just iterate over that array.
-+ */
-+
-+extern int intel_cpu_init(void);
-+extern int cyrix_init_cpu(void);
-+extern int nsc_init_cpu(void);
-+extern int amd_init_cpu(void);
-+extern int centaur_init_cpu(void);
-+extern int transmeta_init_cpu(void);
-+extern int rise_init_cpu(void);
-+extern int nexgen_init_cpu(void);
-+extern int umc_init_cpu(void);
-+
-+void __init early_cpu_init(void)
-+{
-+ intel_cpu_init();
-+ cyrix_init_cpu();
-+ nsc_init_cpu();
-+ amd_init_cpu();
-+ centaur_init_cpu();
-+ transmeta_init_cpu();
-+ rise_init_cpu();
-+ nexgen_init_cpu();
-+ umc_init_cpu();
-+ early_cpu_detect();
-+
-+#ifdef CONFIG_DEBUG_PAGEALLOC
-+ /* pse is not compatible with on-the-fly unmapping,
-+ * disable it even if the cpus claim to support it.
-+ */
-+ clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability);
-+ disable_pse = 1;
-+#endif
-+}
-+
-+/* Make sure %fs is initialized properly in idle threads */
-+struct pt_regs * __devinit idle_regs(struct pt_regs *regs)
-+{
-+ memset(regs, 0, sizeof(struct pt_regs));
-+ regs->xfs = __KERNEL_PERCPU;
-+ return regs;
-+}
-+
-+/* Current gdt points %fs at the "master" per-cpu area: after this,
-+ * it's on the real one. */
-+void switch_to_new_gdt(void)
-+{
-+ struct Xgt_desc_struct gdt_descr;
-+ unsigned long va, frames[16];
-+ int f;
-+
-+ gdt_descr.address = (long)get_cpu_gdt_table(smp_processor_id());
-+ gdt_descr.size = GDT_SIZE - 1;
-+
-+ for (va = gdt_descr.address, f = 0;
-+ va < gdt_descr.address + gdt_descr.size;
-+ va += PAGE_SIZE, f++) {
-+ frames[f] = virt_to_mfn(va);
-+ make_lowmem_page_readonly(
-+ (void *)va, XENFEAT_writable_descriptor_tables);
-+ }
-+ if (HYPERVISOR_set_gdt(frames, gdt_descr.size / 8))
-+ BUG();
-+ asm("mov %0, %%fs" : : "r" (__KERNEL_PERCPU) : "memory");
-+}
-+
-+/*
-+ * cpu_init() initializes state that is per-CPU. Some data is already
-+ * initialized (naturally) in the bootstrap process, such as the GDT
-+ * and IDT. We reload them nevertheless, this function acts as a
-+ * 'CPU state barrier', nothing should get across.
-+ */
-+void __cpuinit cpu_init(void)
-+{
-+ int cpu = smp_processor_id();
-+ struct task_struct *curr = current;
-+#ifndef CONFIG_X86_NO_TSS
-+ struct tss_struct * t = &per_cpu(init_tss, cpu);
-+#endif
-+ struct thread_struct *thread = &curr->thread;
-+
-+ if (cpu_test_and_set(cpu, cpu_initialized)) {
-+ printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
-+ for (;;) local_irq_enable();
-+ }
-+
-+ printk(KERN_INFO "Initializing CPU#%d\n", cpu);
-+
-+ if (cpu_has_vme || cpu_has_de)
-+ clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
-+ if (tsc_disable && cpu_has_tsc) {
-+ printk(KERN_NOTICE "Disabling TSC...\n");
-+ /**** FIX-HPA: DOES THIS REALLY BELONG HERE? ****/
-+ clear_bit(X86_FEATURE_TSC, boot_cpu_data.x86_capability);
-+ set_in_cr4(X86_CR4_TSD);
-+ }
-+
-+ switch_to_new_gdt();
-+
-+ /*
-+ * Set up and load the per-CPU TSS and LDT
-+ */
-+ atomic_inc(&init_mm.mm_count);
-+ curr->active_mm = &init_mm;
-+ if (curr->mm)
-+ BUG();
-+ enter_lazy_tlb(&init_mm, curr);
-+
-+ load_esp0(t, thread);
-+
-+ load_LDT(&init_mm.context);
-+
-+#ifdef CONFIG_DOUBLEFAULT
-+ /* Set up doublefault TSS pointer in the GDT */
-+ __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
-+#endif
-+
-+ /* Clear %gs. */
-+ asm volatile ("mov %0, %%gs" : : "r" (0));
-+
-+ /* Clear all 6 debug registers: */
-+ set_debugreg(0, 0);
-+ set_debugreg(0, 1);
-+ set_debugreg(0, 2);
-+ set_debugreg(0, 3);
-+ set_debugreg(0, 6);
-+ set_debugreg(0, 7);
-+
-+ /*
-+ * Force FPU initialization:
-+ */
-+ current_thread_info()->status = 0;
-+ clear_used_math();
-+ mxcsr_feature_mask_init();
-+}
-+
-+#ifdef CONFIG_HOTPLUG_CPU
-+void __cpuinit cpu_uninit(void)
-+{
-+ int cpu = raw_smp_processor_id();
-+ cpu_clear(cpu, cpu_initialized);
-+
-+ /* lazy TLB state */
-+ per_cpu(cpu_tlbstate, cpu).state = 0;
-+ per_cpu(cpu_tlbstate, cpu).active_mm = &init_mm;
-+}
-+#endif
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/i386/kernel/cpu/Makefile ubuntu-gutsy-xen/arch/i386/kernel/cpu/Makefile
---- ubuntu-gutsy/arch/i386/kernel/cpu/Makefile 2007-08-18 09:40:28.000000000 -0400
-+++ ubuntu-gutsy-xen/arch/i386/kernel/cpu/Makefile 2007-08-18 12:38:02.000000000 -0400
-@@ -19,3 +19,10 @@
- obj-$(CONFIG_CPU_FREQ) += cpufreq/
-
- obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o
-+
-+ifdef CONFIG_XEN
-+include $(srctree)/scripts/Makefile.xen
-+n-obj-xen := perfctr-watchdog.o
-+obj-y := $(call filterxen, $(obj-y), $(n-obj-xen))
-+obj-y := $(call cherrypickxen, $(obj-y), $(src))
-+endif
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/i386/kernel/cpu/mtrr/main-xen.c ubuntu-gutsy-xen/arch/i386/kernel/cpu/mtrr/main-xen.c
---- ubuntu-gutsy/arch/i386/kernel/cpu/mtrr/main-xen.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/arch/i386/kernel/cpu/mtrr/main-xen.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,196 @@
-+#include <linux/init.h>
-+#include <linux/proc_fs.h>
-+#include <linux/ctype.h>
-+#include <linux/module.h>
-+#include <linux/seq_file.h>
-+#include <asm/uaccess.h>
-+#include <linux/mutex.h>
-+
-+#include <asm/mtrr.h>
-+#include "mtrr.h"
-+
-+static DEFINE_MUTEX(mtrr_mutex);
-+
-+void generic_get_mtrr(unsigned int reg, unsigned long *base,
-+ unsigned long *size, mtrr_type * type)
-+{
-+ struct xen_platform_op op;
-+
-+ op.cmd = XENPF_read_memtype;
-+ op.u.read_memtype.reg = reg;
-+ (void)HYPERVISOR_platform_op(&op);
-+
-+ *size = op.u.read_memtype.nr_mfns;
-+ *base = op.u.read_memtype.mfn;
-+ *type = op.u.read_memtype.type;
-+}
-+
-+struct mtrr_ops generic_mtrr_ops = {
-+ .use_intel_if = 1,
-+ .get = generic_get_mtrr,
-+};
-+
-+struct mtrr_ops *mtrr_if = &generic_mtrr_ops;
-+unsigned int num_var_ranges;
-+unsigned int *usage_table;
-+
-+static void __init set_num_var_ranges(void)
-+{
-+ struct xen_platform_op op;
-+
-+ for (num_var_ranges = 0; ; num_var_ranges++) {
-+ op.cmd = XENPF_read_memtype;
-+ op.u.read_memtype.reg = num_var_ranges;
-+ if (HYPERVISOR_platform_op(&op) != 0)
-+ break;
-+ }
-+}
-+
-+static void __init init_table(void)
-+{
-+ int i, max;
-+
-+ max = num_var_ranges;
-+ if ((usage_table = kmalloc(max * sizeof *usage_table, GFP_KERNEL))
-+ == NULL) {
-+ printk(KERN_ERR "mtrr: could not allocate\n");
-+ return;
-+ }
-+ for (i = 0; i < max; i++)
-+ usage_table[i] = 0;
-+}
-+
-+int mtrr_add_page(unsigned long base, unsigned long size,
-+ unsigned int type, char increment)
-+{
-+ int error;
-+ struct xen_platform_op op;
-+
-+ mutex_lock(&mtrr_mutex);
-+
-+ op.cmd = XENPF_add_memtype;
-+ op.u.add_memtype.mfn = base;
-+ op.u.add_memtype.nr_mfns = size;
-+ op.u.add_memtype.type = type;
-+ error = HYPERVISOR_platform_op(&op);
-+ if (error) {
-+ mutex_unlock(&mtrr_mutex);
-+ BUG_ON(error > 0);
-+ return error;
-+ }
-+
-+ if (increment)
-+ ++usage_table[op.u.add_memtype.reg];
-+
-+ mutex_unlock(&mtrr_mutex);
-+
-+ return op.u.add_memtype.reg;
-+}
-+
-+static int mtrr_check(unsigned long base, unsigned long size)
-+{
-+ if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) {
-+ printk(KERN_WARNING
-+ "mtrr: size and base must be multiples of 4 kiB\n");
-+ printk(KERN_DEBUG
-+ "mtrr: size: 0x%lx base: 0x%lx\n", size, base);
-+ dump_stack();
-+ return -1;
-+ }
-+ return 0;
-+}
-+
-+int
-+mtrr_add(unsigned long base, unsigned long size, unsigned int type,
-+ char increment)
-+{
-+ if (mtrr_check(base, size))
-+ return -EINVAL;
-+ return mtrr_add_page(base >> PAGE_SHIFT, size >> PAGE_SHIFT, type,
-+ increment);
-+}
-+
-+int mtrr_del_page(int reg, unsigned long base, unsigned long size)
-+{
-+ unsigned i;
-+ mtrr_type ltype;
-+ unsigned long lbase, lsize;
-+ int error = -EINVAL;
-+ struct xen_platform_op op;
-+
-+ mutex_lock(&mtrr_mutex);
-+
-+ if (reg < 0) {
-+ /* Search for existing MTRR */
-+ for (i = 0; i < num_var_ranges; ++i) {
-+ mtrr_if->get(i, &lbase, &lsize, &ltype);
-+ if (lbase == base && lsize == size) {
-+ reg = i;
-+ break;
-+ }
-+ }
-+ if (reg < 0) {
-+ printk(KERN_DEBUG "mtrr: no MTRR for %lx000,%lx000 found\n", base,
-+ size);
-+ goto out;
-+ }
-+ }
-+ if (usage_table[reg] < 1) {
-+ printk(KERN_WARNING "mtrr: reg: %d has count=0\n", reg);
-+ goto out;
-+ }
-+ if (--usage_table[reg] < 1) {
-+ op.cmd = XENPF_del_memtype;
-+ op.u.del_memtype.handle = 0;
-+ op.u.del_memtype.reg = reg;
-+ error = HYPERVISOR_platform_op(&op);
-+ if (error) {
-+ BUG_ON(error > 0);
-+ goto out;
-+ }
-+ }
-+ error = reg;
-+ out:
-+ mutex_unlock(&mtrr_mutex);
-+ return error;
-+}
-+
-+int
-+mtrr_del(int reg, unsigned long base, unsigned long size)
-+{
-+ if (mtrr_check(base, size))
-+ return -EINVAL;
-+ return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT);
-+}
-+
-+EXPORT_SYMBOL(mtrr_add);
-+EXPORT_SYMBOL(mtrr_del);
-+
-+__init void mtrr_bp_init(void)
-+{
-+}
-+
-+void mtrr_ap_init(void)
-+{
-+}
-+
-+static int __init mtrr_init(void)
-+{
-+ struct cpuinfo_x86 *c = &boot_cpu_data;
-+
-+ if (!is_initial_xendomain())
-+ return -ENODEV;
-+
-+ if ((!cpu_has(c, X86_FEATURE_MTRR)) &&
-+ (!cpu_has(c, X86_FEATURE_K6_MTRR)) &&
-+ (!cpu_has(c, X86_FEATURE_CYRIX_ARR)) &&
-+ (!cpu_has(c, X86_FEATURE_CENTAUR_MCR)))
-+ return -ENODEV;
-+
-+ set_num_var_ranges();
-+ init_table();
-+
-+ return 0;
-+}
-+
-+subsys_initcall(mtrr_init);
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/i386/kernel/cpu/mtrr/Makefile ubuntu-gutsy-xen/arch/i386/kernel/cpu/mtrr/Makefile
---- ubuntu-gutsy/arch/i386/kernel/cpu/mtrr/Makefile 2007-08-18 09:40:28.000000000 -0400
-+++ ubuntu-gutsy-xen/arch/i386/kernel/cpu/mtrr/Makefile 2007-08-18 12:38:02.000000000 -0400
-@@ -1,3 +1,10 @@
- obj-y := main.o if.o generic.o state.o
- obj-$(CONFIG_X86_32) += amd.o cyrix.o centaur.o
-
-+ifdef CONFIG_XEN
-+include $(srctree)/scripts/Makefile.xen
-+n-obj-xen := generic.o state.o amd.o cyrix.o centaur.o
-+
-+obj-y := $(call filterxen, $(obj-y), $(n-obj-xen))
-+obj-y := $(call cherrypickxen, $(obj-y))
-+endif
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/i386/kernel/crash.c ubuntu-gutsy-xen/arch/i386/kernel/crash.c
---- ubuntu-gutsy/arch/i386/kernel/crash.c 2007-08-18 09:40:28.000000000 -0400
-+++ ubuntu-gutsy-xen/arch/i386/kernel/crash.c 2007-08-18 12:38:02.000000000 -0400
-@@ -27,7 +27,7 @@
-
- #include <mach_ipi.h>
-
--
-+#ifndef CONFIG_XEN
- /* This keeps a track of which one is crashing cpu. */
- static int crashing_cpu;
-
-@@ -112,6 +112,7 @@
- /* There are no cpus to shootdown */
- }
- #endif
-+#endif /* CONFIG_XEN */
-
- void machine_crash_shutdown(struct pt_regs *regs)
- {
-@@ -126,6 +127,7 @@
- /* The kernel is broken so disable interrupts */
- local_irq_disable();
-
-+#ifndef CONFIG_XEN
- /* Make a note of crashing cpu. Will be used in NMI callback.*/
- crashing_cpu = safe_smp_processor_id();
- nmi_shootdown_cpus();
-@@ -134,4 +136,7 @@
- disable_IO_APIC();
- #endif
- crash_save_cpu(regs, safe_smp_processor_id());
-+#else
-+ crash_save_cpu(regs, smp_processor_id());
-+#endif /* CONFIG_XEN */
- }
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/i386/kernel/e820-xen.c ubuntu-gutsy-xen/arch/i386/kernel/e820-xen.c
---- ubuntu-gutsy/arch/i386/kernel/e820-xen.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/arch/i386/kernel/e820-xen.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,997 @@
-+#include <linux/kernel.h>
-+#include <linux/types.h>
-+#include <linux/init.h>
-+#include <linux/bootmem.h>
-+#include <linux/ioport.h>
-+#include <linux/string.h>
-+#include <linux/kexec.h>
-+#include <linux/module.h>
-+#include <linux/mm.h>
-+#include <linux/efi.h>
-+#include <linux/pfn.h>
-+#include <linux/uaccess.h>
-+
-+#include <asm/pgtable.h>
-+#include <asm/page.h>
-+#include <asm/e820.h>
-+#include <asm/setup.h>
-+#include <xen/interface/memory.h>
-+
-+#ifdef CONFIG_EFI
-+int efi_enabled = 0;
-+EXPORT_SYMBOL(efi_enabled);
-+#endif
-+
-+struct e820map e820;
-+struct change_member {
-+ struct e820entry *pbios; /* pointer to original bios entry */
-+ unsigned long long addr; /* address for this change point */
-+};
-+static struct change_member change_point_list[2*E820MAX] __initdata;
-+static struct change_member *change_point[2*E820MAX] __initdata;
-+static struct e820entry *overlap_list[E820MAX] __initdata;
-+static struct e820entry new_bios[E820MAX] __initdata;
-+/* For PCI or other memory-mapped resources */
-+unsigned long pci_mem_start = 0x10000000;
-+#ifdef CONFIG_PCI
-+EXPORT_SYMBOL(pci_mem_start);
-+#endif
-+extern int user_defined_memmap;
-+struct resource data_resource = {
-+ .name = "Kernel data",
-+ .start = 0,
-+ .end = 0,
-+ .flags = IORESOURCE_BUSY | IORESOURCE_MEM
-+};
-+
-+struct resource code_resource = {
-+ .name = "Kernel code",
-+ .start = 0,
-+ .end = 0,
-+ .flags = IORESOURCE_BUSY | IORESOURCE_MEM
-+};
-+
-+static struct resource system_rom_resource = {
-+ .name = "System ROM",
-+ .start = 0xf0000,
-+ .end = 0xfffff,
-+ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
-+};
-+
-+static struct resource extension_rom_resource = {
-+ .name = "Extension ROM",
-+ .start = 0xe0000,
-+ .end = 0xeffff,
-+ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
-+};
-+
-+static struct resource adapter_rom_resources[] = { {
-+ .name = "Adapter ROM",
-+ .start = 0xc8000,
-+ .end = 0,
-+ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
-+}, {
-+ .name = "Adapter ROM",
-+ .start = 0,
-+ .end = 0,
-+ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
-+}, {
-+ .name = "Adapter ROM",
-+ .start = 0,
-+ .end = 0,
-+ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
-+}, {
-+ .name = "Adapter ROM",
-+ .start = 0,
-+ .end = 0,
-+ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
-+}, {
-+ .name = "Adapter ROM",
-+ .start = 0,
-+ .end = 0,
-+ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
-+}, {
-+ .name = "Adapter ROM",
-+ .start = 0,
-+ .end = 0,
-+ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
-+} };
-+
-+static struct resource video_rom_resource = {
-+ .name = "Video ROM",
-+ .start = 0xc0000,
-+ .end = 0xc7fff,
-+ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
-+};
-+
-+static struct resource video_ram_resource = {
-+ .name = "Video RAM area",
-+ .start = 0xa0000,
-+ .end = 0xbffff,
-+ .flags = IORESOURCE_BUSY | IORESOURCE_MEM
-+};
-+
-+static struct resource standard_io_resources[] = { {
-+ .name = "dma1",
-+ .start = 0x0000,
-+ .end = 0x001f,
-+ .flags = IORESOURCE_BUSY | IORESOURCE_IO
-+}, {
-+ .name = "pic1",
-+ .start = 0x0020,
-+ .end = 0x0021,
-+ .flags = IORESOURCE_BUSY | IORESOURCE_IO
-+}, {
-+ .name = "timer0",
-+ .start = 0x0040,
-+ .end = 0x0043,
-+ .flags = IORESOURCE_BUSY | IORESOURCE_IO
-+}, {
-+ .name = "timer1",
-+ .start = 0x0050,
-+ .end = 0x0053,
-+ .flags = IORESOURCE_BUSY | IORESOURCE_IO
-+}, {
-+ .name = "keyboard",
-+ .start = 0x0060,
-+ .end = 0x006f,
-+ .flags = IORESOURCE_BUSY | IORESOURCE_IO
-+}, {
-+ .name = "dma page reg",
-+ .start = 0x0080,
-+ .end = 0x008f,
-+ .flags = IORESOURCE_BUSY | IORESOURCE_IO
-+}, {
-+ .name = "pic2",
-+ .start = 0x00a0,
-+ .end = 0x00a1,
-+ .flags = IORESOURCE_BUSY | IORESOURCE_IO
-+}, {
-+ .name = "dma2",
-+ .start = 0x00c0,
-+ .end = 0x00df,
-+ .flags = IORESOURCE_BUSY | IORESOURCE_IO
-+}, {
-+ .name = "fpu",
-+ .start = 0x00f0,
-+ .end = 0x00ff,
-+ .flags = IORESOURCE_BUSY | IORESOURCE_IO
-+} };
-+
-+#define ROMSIGNATURE 0xaa55
-+
-+static int __init romsignature(const unsigned char *rom)
-+{
-+ const unsigned short * const ptr = (const unsigned short *)rom;
-+ unsigned short sig;
-+
-+ return probe_kernel_address(ptr, sig) == 0 && sig == ROMSIGNATURE;
-+}
-+
-+static int __init romchecksum(const unsigned char *rom, unsigned long length)
-+{
-+ unsigned char sum, c;
-+
-+ for (sum = 0; length && probe_kernel_address(rom++, c) == 0; length--)
-+ sum += c;
-+ return !length && !sum;
-+}
-+
-+static void __init probe_roms(void)
-+{
-+ const unsigned char *rom;
-+ unsigned long start, length, upper;
-+ unsigned char c;
-+ int i;
-+
-+#ifdef CONFIG_XEN
-+ /* Nothing to do if not running in dom0. */
-+ if (!is_initial_xendomain())
-+ return;
-+#endif
-+
-+ /* video rom */
-+ upper = adapter_rom_resources[0].start;
-+ for (start = video_rom_resource.start; start < upper; start += 2048) {
-+ rom = isa_bus_to_virt(start);
-+ if (!romsignature(rom))
-+ continue;
-+
-+ video_rom_resource.start = start;
-+
-+ if (probe_kernel_address(rom + 2, c) != 0)
-+ continue;
-+
-+ /* 0 < length <= 0x7f * 512, historically */
-+ length = c * 512;
-+
-+ /* if checksum okay, trust length byte */
-+ if (length && romchecksum(rom, length))
-+ video_rom_resource.end = start + length - 1;
-+
-+ request_resource(&iomem_resource, &video_rom_resource);
-+ break;
-+ }
-+
-+ start = (video_rom_resource.end + 1 + 2047) & ~2047UL;
-+ if (start < upper)
-+ start = upper;
-+
-+ /* system rom */
-+ request_resource(&iomem_resource, &system_rom_resource);
-+ upper = system_rom_resource.start;
-+
-+ /* check for extension rom (ignore length byte!) */
-+ rom = isa_bus_to_virt((unsigned long)extension_rom_resource.start);
-+ if (romsignature(rom)) {
-+ length = extension_rom_resource.end - extension_rom_resource.start + 1;
-+ if (romchecksum(rom, length)) {
-+ request_resource(&iomem_resource, &extension_rom_resource);
-+ upper = extension_rom_resource.start;
-+ }
-+ }
-+
-+ /* check for adapter roms on 2k boundaries */
-+ for (i = 0; i < ARRAY_SIZE(adapter_rom_resources) && start < upper; start += 2048) {
-+ rom = isa_bus_to_virt(start);
-+ if (!romsignature(rom))
-+ continue;
-+
-+ if (probe_kernel_address(rom + 2, c) != 0)
-+ continue;
-+
-+ /* 0 < length <= 0x7f * 512, historically */
-+ length = c * 512;
-+
-+ /* but accept any length that fits if checksum okay */
-+ if (!length || start + length > upper || !romchecksum(rom, length))
-+ continue;
-+
-+ adapter_rom_resources[i].start = start;
-+ adapter_rom_resources[i].end = start + length - 1;
-+ request_resource(&iomem_resource, &adapter_rom_resources[i]);
-+
-+ start = adapter_rom_resources[i++].end & ~2047UL;
-+ }
-+}
-+
-+#ifdef CONFIG_XEN
-+static struct e820map machine_e820;
-+#define e820 machine_e820
-+#endif
-+
-+/*
-+ * Request address space for all standard RAM and ROM resources
-+ * and also for regions reported as reserved by the e820.
-+ */
-+static void __init
-+legacy_init_iomem_resources(struct resource *code_resource, struct resource *data_resource)
-+{
-+ int i;
-+
-+ probe_roms();
-+ for (i = 0; i < e820.nr_map; i++) {
-+ struct resource *res;
-+#ifndef CONFIG_RESOURCES_64BIT
-+ if (e820.map[i].addr + e820.map[i].size > 0x100000000ULL)
-+ continue;
-+#endif
-+ res = kzalloc(sizeof(struct resource), GFP_ATOMIC);
-+ switch (e820.map[i].type) {
-+ case E820_RAM: res->name = "System RAM"; break;
-+ case E820_ACPI: res->name = "ACPI Tables"; break;
-+ case E820_NVS: res->name = "ACPI Non-volatile Storage"; break;
-+ default: res->name = "reserved";
-+ }
-+ res->start = e820.map[i].addr;
-+ res->end = res->start + e820.map[i].size - 1;
-+ res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
-+ if (request_resource(&iomem_resource, res)) {
-+ kfree(res);
-+ continue;
-+ }
-+ if (e820.map[i].type == E820_RAM) {
-+ /*
-+ * We don't know which RAM region contains kernel data,
-+ * so we try it repeatedly and let the resource manager
-+ * test it.
-+ */
-+#ifndef CONFIG_XEN
-+ request_resource(res, code_resource);
-+ request_resource(res, data_resource);
-+#endif
-+#ifdef CONFIG_KEXEC
-+ request_resource(res, &crashk_res);
-+#ifdef CONFIG_XEN
-+ xen_machine_kexec_register_resources(res);
-+#endif
-+#endif
-+ }
-+ }
-+}
-+
-+#undef e820
-+
-+/*
-+ * Request address space for all standard resources
-+ *
-+ * This is called just before pcibios_init(), which is also a
-+ * subsys_initcall, but is linked in later (in arch/i386/pci/common.c).
-+ */
-+static int __init request_standard_resources(void)
-+{
-+ int i;
-+
-+ /* Nothing to do if not running in dom0. */
-+ if (!is_initial_xendomain())
-+ return 0;
-+
-+ printk("Setting up standard PCI resources\n");
-+ if (efi_enabled)
-+ efi_initialize_iomem_resources(&code_resource, &data_resource);
-+ else
-+ legacy_init_iomem_resources(&code_resource, &data_resource);
-+
-+ /* EFI systems may still have VGA */
-+ request_resource(&iomem_resource, &video_ram_resource);
-+
-+ /* request I/O space for devices used on all i[345]86 PCs */
-+ for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++)
-+ request_resource(&ioport_resource, &standard_io_resources[i]);
-+ return 0;
-+}
-+
-+subsys_initcall(request_standard_resources);
-+
-+void __init add_memory_region(unsigned long long start,
-+ unsigned long long size, int type)
-+{
-+ int x;
-+
-+ if (!efi_enabled) {
-+ x = e820.nr_map;
-+
-+ if (x == E820MAX) {
-+ printk(KERN_ERR "Ooops! Too many entries in the memory map!\n");
-+ return;
-+ }
-+
-+ e820.map[x].addr = start;
-+ e820.map[x].size = size;
-+ e820.map[x].type = type;
-+ e820.nr_map++;
-+ }
-+} /* add_memory_region */
-+
-+/*
-+ * Sanitize the BIOS e820 map.
-+ *
-+ * Some e820 responses include overlapping entries. The following
-+ * replaces the original e820 map with a new one, removing overlaps.
-+ *
-+ */
-+int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map)
-+{
-+ struct change_member *change_tmp;
-+ unsigned long current_type, last_type;
-+ unsigned long long last_addr;
-+ int chgidx, still_changing;
-+ int overlap_entries;
-+ int new_bios_entry;
-+ int old_nr, new_nr, chg_nr;
-+ int i;
-+
-+ /*
-+ Visually we're performing the following (1,2,3,4 = memory types)...
-+
-+ Sample memory map (w/overlaps):
-+ ____22__________________
-+ ______________________4_
-+ ____1111________________
-+ _44_____________________
-+ 11111111________________
-+ ____________________33__
-+ ___________44___________
-+ __________33333_________
-+ ______________22________
-+ ___________________2222_
-+ _________111111111______
-+ _____________________11_
-+ _________________4______
-+
-+ Sanitized equivalent (no overlap):
-+ 1_______________________
-+ _44_____________________
-+ ___1____________________
-+ ____22__________________
-+ ______11________________
-+ _________1______________
-+ __________3_____________
-+ ___________44___________
-+ _____________33_________
-+ _______________2________
-+ ________________1_______
-+ _________________4______
-+ ___________________2____
-+ ____________________33__
-+ ______________________4_
-+ */
-+ /* if there's only one memory region, don't bother */
-+ if (*pnr_map < 2) {
-+ return -1;
-+ }
-+
-+ old_nr = *pnr_map;
-+
-+ /* bail out if we find any unreasonable addresses in bios map */
-+ for (i=0; i<old_nr; i++)
-+ if (biosmap[i].addr + biosmap[i].size < biosmap[i].addr) {
-+ return -1;
-+ }
-+
-+ /* create pointers for initial change-point information (for sorting) */
-+ for (i=0; i < 2*old_nr; i++)
-+ change_point[i] = &change_point_list[i];
-+
-+ /* record all known change-points (starting and ending addresses),
-+ omitting those that are for empty memory regions */
-+ chgidx = 0;
-+ for (i=0; i < old_nr; i++) {
-+ if (biosmap[i].size != 0) {
-+ change_point[chgidx]->addr = biosmap[i].addr;
-+ change_point[chgidx++]->pbios = &biosmap[i];
-+ change_point[chgidx]->addr = biosmap[i].addr + biosmap[i].size;
-+ change_point[chgidx++]->pbios = &biosmap[i];
-+ }
-+ }
-+ chg_nr = chgidx; /* true number of change-points */
-+
-+ /* sort change-point list by memory addresses (low -> high) */
-+ still_changing = 1;
-+ while (still_changing) {
-+ still_changing = 0;
-+ for (i=1; i < chg_nr; i++) {
-+ /* if <current_addr> > <last_addr>, swap */
-+ /* or, if current=<start_addr> & last=<end_addr>, swap */
-+ if ((change_point[i]->addr < change_point[i-1]->addr) ||
-+ ((change_point[i]->addr == change_point[i-1]->addr) &&
-+ (change_point[i]->addr == change_point[i]->pbios->addr) &&
-+ (change_point[i-1]->addr != change_point[i-1]->pbios->addr))
-+ )
-+ {
-+ change_tmp = change_point[i];
-+ change_point[i] = change_point[i-1];
-+ change_point[i-1] = change_tmp;
-+ still_changing=1;
-+ }
-+ }
-+ }
-+
-+ /* create a new bios memory map, removing overlaps */
-+ overlap_entries=0; /* number of entries in the overlap table */
-+ new_bios_entry=0; /* index for creating new bios map entries */
-+ last_type = 0; /* start with undefined memory type */
-+ last_addr = 0; /* start with 0 as last starting address */
-+ /* loop through change-points, determining affect on the new bios map */
-+ for (chgidx=0; chgidx < chg_nr; chgidx++)
-+ {
-+ /* keep track of all overlapping bios entries */
-+ if (change_point[chgidx]->addr == change_point[chgidx]->pbios->addr)
-+ {
-+ /* add map entry to overlap list (> 1 entry implies an overlap) */
-+ overlap_list[overlap_entries++]=change_point[chgidx]->pbios;
-+ }
-+ else
-+ {
-+ /* remove entry from list (order independent, so swap with last) */
-+ for (i=0; i<overlap_entries; i++)
-+ {
-+ if (overlap_list[i] == change_point[chgidx]->pbios)
-+ overlap_list[i] = overlap_list[overlap_entries-1];
-+ }
-+ overlap_entries--;
-+ }
-+ /* if there are overlapping entries, decide which "type" to use */
-+ /* (larger value takes precedence -- 1=usable, 2,3,4,4+=unusable) */
-+ current_type = 0;
-+ for (i=0; i<overlap_entries; i++)
-+ if (overlap_list[i]->type > current_type)
-+ current_type = overlap_list[i]->type;
-+ /* continue building up new bios map based on this information */
-+ if (current_type != last_type) {
-+ if (last_type != 0) {
-+ new_bios[new_bios_entry].size =
-+ change_point[chgidx]->addr - last_addr;
-+ /* move forward only if the new size was non-zero */
-+ if (new_bios[new_bios_entry].size != 0)
-+ if (++new_bios_entry >= E820MAX)
-+ break; /* no more space left for new bios entries */
-+ }
-+ if (current_type != 0) {
-+ new_bios[new_bios_entry].addr = change_point[chgidx]->addr;
-+ new_bios[new_bios_entry].type = current_type;
-+ last_addr=change_point[chgidx]->addr;
-+ }
-+ last_type = current_type;
-+ }
-+ }
-+ new_nr = new_bios_entry; /* retain count for new bios entries */
-+
-+ /* copy new bios mapping into original location */
-+ memcpy(biosmap, new_bios, new_nr*sizeof(struct e820entry));
-+ *pnr_map = new_nr;
-+
-+ return 0;
-+}
-+
-+/*
-+ * Copy the BIOS e820 map into a safe place.
-+ *
-+ * Sanity-check it while we're at it..
-+ *
-+ * If we're lucky and live on a modern system, the setup code
-+ * will have given us a memory map that we can use to properly
-+ * set up memory. If we aren't, we'll fake a memory map.
-+ *
-+ * We check to see that the memory map contains at least 2 elements
-+ * before we'll use it, because the detection code in setup.S may
-+ * not be perfect and most every PC known to man has two memory
-+ * regions: one from 0 to 640k, and one from 1mb up. (The IBM
-+ * thinkpad 560x, for example, does not cooperate with the memory
-+ * detection code.)
-+ */
-+int __init copy_e820_map(struct e820entry * biosmap, int nr_map)
-+{
-+#ifndef CONFIG_XEN
-+ /* Only one memory region (or negative)? Ignore it */
-+ if (nr_map < 2)
-+ return -1;
-+#else
-+ BUG_ON(nr_map < 1);
-+#endif
-+
-+ do {
-+ unsigned long long start = biosmap->addr;
-+ unsigned long long size = biosmap->size;
-+ unsigned long long end = start + size;
-+ unsigned long type = biosmap->type;
-+
-+ /* Overflow in 64 bits? Ignore the memory map. */
-+ if (start > end)
-+ return -1;
-+
-+#ifndef CONFIG_XEN
-+ /*
-+ * Some BIOSes claim RAM in the 640k - 1M region.
-+ * Not right. Fix it up.
-+ */
-+ if (type == E820_RAM) {
-+ if (start < 0x100000ULL && end > 0xA0000ULL) {
-+ if (start < 0xA0000ULL)
-+ add_memory_region(start, 0xA0000ULL-start, type);
-+ if (end <= 0x100000ULL)
-+ continue;
-+ start = 0x100000ULL;
-+ size = end - start;
-+ }
-+ }
-+#endif
-+ add_memory_region(start, size, type);
-+ } while (biosmap++,--nr_map);
-+ return 0;
-+}
-+
-+/*
-+ * Callback for efi_memory_walk.
-+ */
-+static int __init
-+efi_find_max_pfn(unsigned long start, unsigned long end, void *arg)
-+{
-+ unsigned long *max_pfn = arg, pfn;
-+
-+ if (start < end) {
-+ pfn = PFN_UP(end -1);
-+ if (pfn > *max_pfn)
-+ *max_pfn = pfn;
-+ }
-+ return 0;
-+}
-+
-+static int __init
-+efi_memory_present_wrapper(unsigned long start, unsigned long end, void *arg)
-+{
-+ memory_present(0, PFN_UP(start), PFN_DOWN(end));
-+ return 0;
-+}
-+
-+/*
-+ * Find the highest page frame number we have available
-+ */
-+void __init find_max_pfn(void)
-+{
-+ int i;
-+
-+ max_pfn = 0;
-+ if (efi_enabled) {
-+ efi_memmap_walk(efi_find_max_pfn, &max_pfn);
-+ efi_memmap_walk(efi_memory_present_wrapper, NULL);
-+ return;
-+ }
-+
-+ for (i = 0; i < e820.nr_map; i++) {
-+ unsigned long start, end;
-+ /* RAM? */
-+ if (e820.map[i].type != E820_RAM)
-+ continue;
-+ start = PFN_UP(e820.map[i].addr);
-+ end = PFN_DOWN(e820.map[i].addr + e820.map[i].size);
-+ if (start >= end)
-+ continue;
-+ if (end > max_pfn)
-+ max_pfn = end;
-+ memory_present(0, start, end);
-+ }
-+}
-+
-+/*
-+ * Free all available memory for boot time allocation. Used
-+ * as a callback function by efi_memory_walk()
-+ */
-+
-+static int __init
-+free_available_memory(unsigned long start, unsigned long end, void *arg)
-+{
-+ /* check max_low_pfn */
-+ if (start >= (max_low_pfn << PAGE_SHIFT))
-+ return 0;
-+ if (end >= (max_low_pfn << PAGE_SHIFT))
-+ end = max_low_pfn << PAGE_SHIFT;
-+ if (start < end)
-+ free_bootmem(start, end - start);
-+
-+ return 0;
-+}
-+/*
-+ * Register fully available low RAM pages with the bootmem allocator.
-+ */
-+void __init register_bootmem_low_pages(unsigned long max_low_pfn)
-+{
-+ int i;
-+
-+ if (efi_enabled) {
-+ efi_memmap_walk(free_available_memory, NULL);
-+ return;
-+ }
-+ for (i = 0; i < e820.nr_map; i++) {
-+ unsigned long curr_pfn, last_pfn, size;
-+ /*
-+ * Reserve usable low memory
-+ */
-+ if (e820.map[i].type != E820_RAM)
-+ continue;
-+ /*
-+ * We are rounding up the start address of usable memory:
-+ */
-+ curr_pfn = PFN_UP(e820.map[i].addr);
-+ if (curr_pfn >= max_low_pfn)
-+ continue;
-+ /*
-+ * ... and at the end of the usable range downwards:
-+ */
-+ last_pfn = PFN_DOWN(e820.map[i].addr + e820.map[i].size);
-+
-+#ifdef CONFIG_XEN
-+ /*
-+ * Truncate to the number of actual pages currently
-+ * present.
-+ */
-+ if (last_pfn > xen_start_info->nr_pages)
-+ last_pfn = xen_start_info->nr_pages;
-+#endif
-+
-+ if (last_pfn > max_low_pfn)
-+ last_pfn = max_low_pfn;
-+
-+ /*
-+ * .. finally, did all the rounding and playing
-+ * around just make the area go away?
-+ */
-+ if (last_pfn <= curr_pfn)
-+ continue;
-+
-+ size = last_pfn - curr_pfn;
-+ free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(size));
-+ }
-+}
-+
-+void __init e820_register_memory(void)
-+{
-+ unsigned long gapstart, gapsize, round;
-+ unsigned long long last;
-+ int i;
-+
-+#ifdef CONFIG_XEN
-+ if (is_initial_xendomain()) {
-+ struct xen_memory_map memmap;
-+
-+ memmap.nr_entries = E820MAX;
-+ set_xen_guest_handle(memmap.buffer, machine_e820.map);
-+
-+ if (HYPERVISOR_memory_op(XENMEM_machine_memory_map, &memmap))
-+ BUG();
-+ machine_e820.nr_map = memmap.nr_entries;
-+ }
-+ else
-+ machine_e820 = e820;
-+#define e820 machine_e820
-+#endif
-+
-+ /*
-+ * Search for the bigest gap in the low 32 bits of the e820
-+ * memory space.
-+ */
-+ last = 0x100000000ull;
-+ gapstart = 0x10000000;
-+ gapsize = 0x400000;
-+ i = e820.nr_map;
-+ while (--i >= 0) {
-+ unsigned long long start = e820.map[i].addr;
-+ unsigned long long end = start + e820.map[i].size;
-+
-+ /*
-+ * Since "last" is at most 4GB, we know we'll
-+ * fit in 32 bits if this condition is true
-+ */
-+ if (last > end) {
-+ unsigned long gap = last - end;
-+
-+ if (gap > gapsize) {
-+ gapsize = gap;
-+ gapstart = end;
-+ }
-+ }
-+ if (start < last)
-+ last = start;
-+ }
-+
-+ /*
-+ * See how much we want to round up: start off with
-+ * rounding to the next 1MB area.
-+ */
-+ round = 0x100000;
-+ while ((gapsize >> 4) > round)
-+ round += round;
-+ /* Fun with two's complement */
-+ pci_mem_start = (gapstart + round) & -round;
-+
-+ printk("Allocating PCI resources starting at %08lx (gap: %08lx:%08lx)\n",
-+ pci_mem_start, gapstart, gapsize);
-+}
-+
-+#undef e820
-+
-+void __init print_memory_map(char *who)
-+{
-+ int i;
-+
-+ for (i = 0; i < e820.nr_map; i++) {
-+ printk(" %s: %016Lx - %016Lx ", who,
-+ e820.map[i].addr,
-+ e820.map[i].addr + e820.map[i].size);
-+ switch (e820.map[i].type) {
-+ case E820_RAM: printk("(usable)\n");
-+ break;
-+ case E820_RESERVED:
-+ printk("(reserved)\n");
-+ break;
-+ case E820_ACPI:
-+ printk("(ACPI data)\n");
-+ break;
-+ case E820_NVS:
-+ printk("(ACPI NVS)\n");
-+ break;
-+ default: printk("type %lu\n", e820.map[i].type);
-+ break;
-+ }
-+ }
-+}
-+
-+static __init __always_inline void efi_limit_regions(unsigned long long size)
-+{
-+ unsigned long long current_addr = 0;
-+ efi_memory_desc_t *md, *next_md;
-+ void *p, *p1;
-+ int i, j;
-+
-+ j = 0;
-+ p1 = memmap.map;
-+ for (p = p1, i = 0; p < memmap.map_end; p += memmap.desc_size, i++) {
-+ md = p;
-+ next_md = p1;
-+ current_addr = md->phys_addr +
-+ PFN_PHYS(md->num_pages);
-+ if (is_available_memory(md)) {
-+ if (md->phys_addr >= size) continue;
-+ memcpy(next_md, md, memmap.desc_size);
-+ if (current_addr >= size) {
-+ next_md->num_pages -=
-+ PFN_UP(current_addr-size);
-+ }
-+ p1 += memmap.desc_size;
-+ next_md = p1;
-+ j++;
-+ } else if ((md->attribute & EFI_MEMORY_RUNTIME) ==
-+ EFI_MEMORY_RUNTIME) {
-+ /* In order to make runtime services
-+ * available we have to include runtime
-+ * memory regions in memory map */
-+ memcpy(next_md, md, memmap.desc_size);
-+ p1 += memmap.desc_size;
-+ next_md = p1;
-+ j++;
-+ }
-+ }
-+ memmap.nr_map = j;
-+ memmap.map_end = memmap.map +
-+ (memmap.nr_map * memmap.desc_size);
-+}
-+
-+void __init limit_regions(unsigned long long size)
-+{
-+ unsigned long long current_addr = 0;
-+ int i;
-+
-+ print_memory_map("limit_regions start");
-+ if (efi_enabled) {
-+ efi_limit_regions(size);
-+ return;
-+ }
-+ for (i = 0; i < e820.nr_map; i++) {
-+ current_addr = e820.map[i].addr + e820.map[i].size;
-+ if (current_addr < size)
-+ continue;
-+
-+ if (e820.map[i].type != E820_RAM)
-+ continue;
-+
-+ if (e820.map[i].addr >= size) {
-+ /*
-+ * This region starts past the end of the
-+ * requested size, skip it completely.
-+ */
-+ e820.nr_map = i;
-+ } else {
-+ e820.nr_map = i + 1;
-+ e820.map[i].size -= current_addr - size;
-+ }
-+ print_memory_map("limit_regions endfor");
-+ return;
-+ }
-+#ifdef CONFIG_XEN
-+ if (current_addr < size) {
-+ /*
-+ * The e820 map finished before our requested size so
-+ * extend the final entry to the requested address.
-+ */
-+ --i;
-+ if (e820.map[i].type == E820_RAM)
-+ e820.map[i].size -= current_addr - size;
-+ else
-+ add_memory_region(current_addr, size - current_addr, E820_RAM);
-+ }
-+#endif
-+ print_memory_map("limit_regions endfunc");
-+}
-+
-+/*
-+ * This function checks if any part of the range <start,end> is mapped
-+ * with type.
-+ */
-+int
-+e820_any_mapped(u64 start, u64 end, unsigned type)
-+{
-+ int i;
-+#ifndef CONFIG_XEN
-+ for (i = 0; i < e820.nr_map; i++) {
-+ const struct e820entry *ei = &e820.map[i];
-+#else
-+ if (!is_initial_xendomain())
-+ return 0;
-+ for (i = 0; i < machine_e820.nr_map; ++i) {
-+ const struct e820entry *ei = &machine_e820.map[i];
-+#endif
-+ if (type && ei->type != type)
-+ continue;
-+ if (ei->addr >= end || ei->addr + ei->size <= start)
-+ continue;
-+ return 1;
-+ }
-+ return 0;
-+}
-+EXPORT_SYMBOL_GPL(e820_any_mapped);
-+
-+ /*
-+ * This function checks if the entire range <start,end> is mapped with type.
-+ *
-+ * Note: this function only works correct if the e820 table is sorted and
-+ * not-overlapping, which is the case
-+ */
-+int __init
-+e820_all_mapped(unsigned long s, unsigned long e, unsigned type)
-+{
-+ u64 start = s;
-+ u64 end = e;
-+ int i;
-+
-+#ifndef CONFIG_XEN
-+ for (i = 0; i < e820.nr_map; i++) {
-+ struct e820entry *ei = &e820.map[i];
-+#else
-+ if (!is_initial_xendomain())
-+ return 0;
-+ for (i = 0; i < machine_e820.nr_map; ++i) {
-+ const struct e820entry *ei = &machine_e820.map[i];
-+#endif
-+
-+ if (type && ei->type != type)
-+ continue;
-+ /* is the region (part) in overlap with the current region ?*/
-+ if (ei->addr >= end || ei->addr + ei->size <= start)
-+ continue;
-+ /* if the region is at the beginning of <start,end> we move
-+ * start to the end of the region since it's ok until there
-+ */
-+ if (ei->addr <= start)
-+ start = ei->addr + ei->size;
-+ /* if start is now at or beyond end, we're done, full
-+ * coverage */
-+ if (start >= end)
-+ return 1; /* we're done */
-+ }
-+ return 0;
-+}
-+
-+static int __init parse_memmap(char *arg)
-+{
-+ if (!arg)
-+ return -EINVAL;
-+
-+ if (strcmp(arg, "exactmap") == 0) {
-+#ifdef CONFIG_CRASH_DUMP
-+ /* If we are doing a crash dump, we
-+ * still need to know the real mem
-+ * size before original memory map is
-+ * reset.
-+ */
-+ find_max_pfn();
-+ saved_max_pfn = max_pfn;
-+#endif
-+ e820.nr_map = 0;
-+ user_defined_memmap = 1;
-+ } else {
-+ /* If the user specifies memory size, we
-+ * limit the BIOS-provided memory map to
-+ * that size. exactmap can be used to specify
-+ * the exact map. mem=number can be used to
-+ * trim the existing memory map.
-+ */
-+ unsigned long long start_at, mem_size;
-+
-+ mem_size = memparse(arg, &arg);
-+ if (*arg == '@') {
-+ start_at = memparse(arg+1, &arg);
-+ add_memory_region(start_at, mem_size, E820_RAM);
-+ } else if (*arg == '#') {
-+ start_at = memparse(arg+1, &arg);
-+ add_memory_region(start_at, mem_size, E820_ACPI);
-+ } else if (*arg == '$') {
-+ start_at = memparse(arg+1, &arg);
-+ add_memory_region(start_at, mem_size, E820_RESERVED);
-+ } else {
-+ limit_regions(mem_size);
-+ user_defined_memmap = 1;
-+ }
-+ }
-+ return 0;
-+}
-+early_param("memmap", parse_memmap);
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/i386/kernel/early_printk-xen.c ubuntu-gutsy-xen/arch/i386/kernel/early_printk-xen.c
---- ubuntu-gutsy/arch/i386/kernel/early_printk-xen.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/arch/i386/kernel/early_printk-xen.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,2 @@
-+
-+#include "../../x86_64/kernel/early_printk-xen.c"
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/i386/kernel/entry.S ubuntu-gutsy-xen/arch/i386/kernel/entry.S
---- ubuntu-gutsy/arch/i386/kernel/entry.S 2007-08-18 09:40:28.000000000 -0400
-+++ ubuntu-gutsy-xen/arch/i386/kernel/entry.S 2007-08-18 12:38:02.000000000 -0400
-@@ -287,7 +287,7 @@
- CFI_SIGNAL_FRAME
- CFI_DEF_CFA esp, 0
- CFI_REGISTER esp, ebp
-- movl TSS_sysenter_esp0(%esp),%esp
-+ movl SYSENTER_stack_esp0(%esp),%esp
- sysenter_past_esp:
- /*
- * No need to follow this irqs on/off section: the syscall
-@@ -741,7 +741,7 @@
- * that sets up the real kernel stack. Check here, since we can't
- * allow the wrong stack to be used.
- *
-- * "TSS_sysenter_esp0+12" is because the NMI/debug handler will have
-+ * "SYSENTER_stack_esp0+12" is because the NMI/debug handler will have
- * already pushed 3 words if it hits on the sysenter instruction:
- * eflags, cs and eip.
- *
-@@ -753,7 +753,7 @@
- cmpw $__KERNEL_CS,4(%esp); \
- jne ok; \
- label: \
-- movl TSS_sysenter_esp0+offset(%esp),%esp; \
-+ movl SYSENTER_stack_esp0+offset(%esp),%esp; \
- CFI_DEF_CFA esp, 0; \
- CFI_UNDEFINED eip; \
- pushfl; \
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/i386/kernel/entry-xen.S ubuntu-gutsy-xen/arch/i386/kernel/entry-xen.S
---- ubuntu-gutsy/arch/i386/kernel/entry-xen.S 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/arch/i386/kernel/entry-xen.S 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,1247 @@
-+/*
-+ * linux/arch/i386/entry.S
-+ *
-+ * Copyright (C) 1991, 1992 Linus Torvalds
-+ */
-+
-+/*
-+ * entry.S contains the system-call and fault low-level handling routines.
-+ * This also contains the timer-interrupt handler, as well as all interrupts
-+ * and faults that can result in a task-switch.
-+ *
-+ * NOTE: This code handles signal-recognition, which happens every time
-+ * after a timer-interrupt and after each system call.
-+ *
-+ * I changed all the .align's to 4 (16 byte alignment), as that's faster
-+ * on a 486.
-+ *
-+ * Stack layout in 'syscall_exit':
-+ * ptrace needs to have all regs on the stack.
-+ * if the order here is changed, it needs to be
-+ * updated in fork.c:copy_process, signal.c:do_signal,
-+ * ptrace.c and ptrace.h
-+ *
-+ * 0(%esp) - %ebx
-+ * 4(%esp) - %ecx
-+ * 8(%esp) - %edx
-+ * C(%esp) - %esi
-+ * 10(%esp) - %edi
-+ * 14(%esp) - %ebp
-+ * 18(%esp) - %eax
-+ * 1C(%esp) - %ds
-+ * 20(%esp) - %es
-+ * 24(%esp) - %fs
-+ * 28(%esp) - orig_eax
-+ * 2C(%esp) - %eip
-+ * 30(%esp) - %cs
-+ * 34(%esp) - %eflags
-+ * 38(%esp) - %oldesp
-+ * 3C(%esp) - %oldss
-+ *
-+ * "current" is in register %ebx during any slow entries.
-+ */
-+
-+#include <linux/linkage.h>
-+#include <asm/thread_info.h>
-+#include <asm/irqflags.h>
-+#include <asm/errno.h>
-+#include <asm/segment.h>
-+#include <asm/smp.h>
-+#include <asm/page.h>
-+#include <asm/desc.h>
-+#include <asm/percpu.h>
-+#include <asm/dwarf2.h>
-+#include "irq_vectors.h"
-+#include <xen/interface/xen.h>
-+
-+/*
-+ * We use macros for low-level operations which need to be overridden
-+ * for paravirtualization. The following will never clobber any registers:
-+ * INTERRUPT_RETURN (aka. "iret")
-+ * GET_CR0_INTO_EAX (aka. "movl %cr0, %eax")
-+ * ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit").
-+ *
-+ * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must
-+ * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY).
-+ * Allowing a register to be clobbered can shrink the paravirt replacement
-+ * enough to patch inline, increasing performance.
-+ */
-+
-+#define nr_syscalls ((syscall_table_size)/4)
-+
-+CF_MASK = 0x00000001
-+TF_MASK = 0x00000100
-+IF_MASK = 0x00000200
-+DF_MASK = 0x00000400
-+NT_MASK = 0x00004000
-+VM_MASK = 0x00020000
-+/* Pseudo-eflags. */
-+NMI_MASK = 0x80000000
-+
-+#ifdef CONFIG_PREEMPT
-+#define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
-+#else
-+#define preempt_stop(clobbers)
-+#define resume_kernel restore_nocheck
-+#endif
-+
-+.macro TRACE_IRQS_IRET
-+#ifdef CONFIG_TRACE_IRQFLAGS
-+ testl $IF_MASK,PT_EFLAGS(%esp) # interrupts off?
-+ jz 1f
-+ TRACE_IRQS_ON
-+1:
-+#endif
-+.endm
-+
-+#ifdef CONFIG_VM86
-+#define resume_userspace_sig check_userspace
-+#else
-+#define resume_userspace_sig resume_userspace
-+#endif
-+
-+#define SAVE_ALL \
-+ cld; \
-+ pushl %fs; \
-+ CFI_ADJUST_CFA_OFFSET 4;\
-+ /*CFI_REL_OFFSET fs, 0;*/\
-+ pushl %es; \
-+ CFI_ADJUST_CFA_OFFSET 4;\
-+ /*CFI_REL_OFFSET es, 0;*/\
-+ pushl %ds; \
-+ CFI_ADJUST_CFA_OFFSET 4;\
-+ /*CFI_REL_OFFSET ds, 0;*/\
-+ pushl %eax; \
-+ CFI_ADJUST_CFA_OFFSET 4;\
-+ CFI_REL_OFFSET eax, 0;\
-+ pushl %ebp; \
-+ CFI_ADJUST_CFA_OFFSET 4;\
-+ CFI_REL_OFFSET ebp, 0;\
-+ pushl %edi; \
-+ CFI_ADJUST_CFA_OFFSET 4;\
-+ CFI_REL_OFFSET edi, 0;\
-+ pushl %esi; \
-+ CFI_ADJUST_CFA_OFFSET 4;\
-+ CFI_REL_OFFSET esi, 0;\
-+ pushl %edx; \
-+ CFI_ADJUST_CFA_OFFSET 4;\
-+ CFI_REL_OFFSET edx, 0;\
-+ pushl %ecx; \
-+ CFI_ADJUST_CFA_OFFSET 4;\
-+ CFI_REL_OFFSET ecx, 0;\
-+ pushl %ebx; \
-+ CFI_ADJUST_CFA_OFFSET 4;\
-+ CFI_REL_OFFSET ebx, 0;\
-+ movl $(__USER_DS), %edx; \
-+ movl %edx, %ds; \
-+ movl %edx, %es; \
-+ movl $(__KERNEL_PERCPU), %edx; \
-+ movl %edx, %fs
-+
-+#define RESTORE_INT_REGS \
-+ popl %ebx; \
-+ CFI_ADJUST_CFA_OFFSET -4;\
-+ CFI_RESTORE ebx;\
-+ popl %ecx; \
-+ CFI_ADJUST_CFA_OFFSET -4;\
-+ CFI_RESTORE ecx;\
-+ popl %edx; \
-+ CFI_ADJUST_CFA_OFFSET -4;\
-+ CFI_RESTORE edx;\
-+ popl %esi; \
-+ CFI_ADJUST_CFA_OFFSET -4;\
-+ CFI_RESTORE esi;\
-+ popl %edi; \
-+ CFI_ADJUST_CFA_OFFSET -4;\
-+ CFI_RESTORE edi;\
-+ popl %ebp; \
-+ CFI_ADJUST_CFA_OFFSET -4;\
-+ CFI_RESTORE ebp;\
-+ popl %eax; \
-+ CFI_ADJUST_CFA_OFFSET -4;\
-+ CFI_RESTORE eax
-+
-+#define RESTORE_REGS \
-+ RESTORE_INT_REGS; \
-+1: popl %ds; \
-+ CFI_ADJUST_CFA_OFFSET -4;\
-+ /*CFI_RESTORE ds;*/\
-+2: popl %es; \
-+ CFI_ADJUST_CFA_OFFSET -4;\
-+ /*CFI_RESTORE es;*/\
-+3: popl %fs; \
-+ CFI_ADJUST_CFA_OFFSET -4;\
-+ /*CFI_RESTORE fs;*/\
-+.pushsection .fixup,"ax"; \
-+4: movl $0,(%esp); \
-+ jmp 1b; \
-+5: movl $0,(%esp); \
-+ jmp 2b; \
-+6: movl $0,(%esp); \
-+ jmp 3b; \
-+.section __ex_table,"a";\
-+ .align 4; \
-+ .long 1b,4b; \
-+ .long 2b,5b; \
-+ .long 3b,6b; \
-+.popsection
-+
-+#define RING0_INT_FRAME \
-+ CFI_STARTPROC simple;\
-+ CFI_SIGNAL_FRAME;\
-+ CFI_DEF_CFA esp, 3*4;\
-+ /*CFI_OFFSET cs, -2*4;*/\
-+ CFI_OFFSET eip, -3*4
-+
-+#define RING0_EC_FRAME \
-+ CFI_STARTPROC simple;\
-+ CFI_SIGNAL_FRAME;\
-+ CFI_DEF_CFA esp, 4*4;\
-+ /*CFI_OFFSET cs, -2*4;*/\
-+ CFI_OFFSET eip, -3*4
-+
-+#define RING0_PTREGS_FRAME \
-+ CFI_STARTPROC simple;\
-+ CFI_SIGNAL_FRAME;\
-+ CFI_DEF_CFA esp, PT_OLDESP-PT_EBX;\
-+ /*CFI_OFFSET cs, PT_CS-PT_OLDESP;*/\
-+ CFI_OFFSET eip, PT_EIP-PT_OLDESP;\
-+ /*CFI_OFFSET es, PT_ES-PT_OLDESP;*/\
-+ /*CFI_OFFSET ds, PT_DS-PT_OLDESP;*/\
-+ CFI_OFFSET eax, PT_EAX-PT_OLDESP;\
-+ CFI_OFFSET ebp, PT_EBP-PT_OLDESP;\
-+ CFI_OFFSET edi, PT_EDI-PT_OLDESP;\
-+ CFI_OFFSET esi, PT_ESI-PT_OLDESP;\
-+ CFI_OFFSET edx, PT_EDX-PT_OLDESP;\
-+ CFI_OFFSET ecx, PT_ECX-PT_OLDESP;\
-+ CFI_OFFSET ebx, PT_EBX-PT_OLDESP
-+
-+ENTRY(ret_from_fork)
-+ CFI_STARTPROC
-+ pushl %eax
-+ CFI_ADJUST_CFA_OFFSET 4
-+ call schedule_tail
-+ GET_THREAD_INFO(%ebp)
-+ popl %eax
-+ CFI_ADJUST_CFA_OFFSET -4
-+ pushl $0x0202 # Reset kernel eflags
-+ CFI_ADJUST_CFA_OFFSET 4
-+ popfl
-+ CFI_ADJUST_CFA_OFFSET -4
-+ jmp syscall_exit
-+ CFI_ENDPROC
-+END(ret_from_fork)
-+
-+/*
-+ * Return to user mode is not as complex as all this looks,
-+ * but we want the default path for a system call return to
-+ * go as quickly as possible which is why some of this is
-+ * less clear than it otherwise should be.
-+ */
-+
-+ # userspace resumption stub bypassing syscall exit tracing
-+ ALIGN
-+ RING0_PTREGS_FRAME
-+ret_from_exception:
-+ preempt_stop(CLBR_ANY)
-+ret_from_intr:
-+ GET_THREAD_INFO(%ebp)
-+check_userspace:
-+ movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS
-+ movb PT_CS(%esp), %al
-+ andl $(VM_MASK | SEGMENT_RPL_MASK), %eax
-+ cmpl $USER_RPL, %eax
-+ jb resume_kernel # not returning to v8086 or userspace
-+
-+ENTRY(resume_userspace)
-+ DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
-+ # setting need_resched or sigpending
-+ # between sampling and the iret
-+ movl TI_flags(%ebp), %ecx
-+ andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
-+ # int/exception return?
-+ jne work_pending
-+ jmp restore_all
-+END(ret_from_exception)
-+
-+#ifdef CONFIG_PREEMPT
-+ENTRY(resume_kernel)
-+ DISABLE_INTERRUPTS(CLBR_ANY)
-+ cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ?
-+ jnz restore_nocheck
-+need_resched:
-+ movl TI_flags(%ebp), %ecx # need_resched set ?
-+ testb $_TIF_NEED_RESCHED, %cl
-+ jz restore_all
-+ testl $IF_MASK,PT_EFLAGS(%esp) # interrupts off (exception path) ?
-+ jz restore_all
-+ call preempt_schedule_irq
-+ jmp need_resched
-+END(resume_kernel)
-+#endif
-+ CFI_ENDPROC
-+
-+/* SYSENTER_RETURN points to after the "sysenter" instruction in
-+ the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */
-+
-+ # sysenter call handler stub
-+ENTRY(sysenter_entry)
-+ CFI_STARTPROC simple
-+ CFI_SIGNAL_FRAME
-+ CFI_DEF_CFA esp, 0
-+ CFI_REGISTER esp, ebp
-+ movl SYSENTER_stack_esp0(%esp),%esp
-+sysenter_past_esp:
-+ /*
-+ * No need to follow this irqs on/off section: the syscall
-+ * disabled irqs and here we enable it straight after entry:
-+ */
-+ ENABLE_INTERRUPTS(CLBR_NONE)
-+ pushl $(__USER_DS)
-+ CFI_ADJUST_CFA_OFFSET 4
-+ /*CFI_REL_OFFSET ss, 0*/
-+ pushl %ebp
-+ CFI_ADJUST_CFA_OFFSET 4
-+ CFI_REL_OFFSET esp, 0
-+ pushfl
-+ CFI_ADJUST_CFA_OFFSET 4
-+ pushl $(__USER_CS)
-+ CFI_ADJUST_CFA_OFFSET 4
-+ /*CFI_REL_OFFSET cs, 0*/
-+ /*
-+ * Push current_thread_info()->sysenter_return to the stack.
-+ * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
-+ * pushed above; +8 corresponds to copy_thread's esp0 setting.
-+ */
-+ pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
-+ CFI_ADJUST_CFA_OFFSET 4
-+ CFI_REL_OFFSET eip, 0
-+
-+/*
-+ * Load the potential sixth argument from user stack.
-+ * Careful about security.
-+ */
-+ cmpl $__PAGE_OFFSET-3,%ebp
-+ jae syscall_fault
-+1: movl (%ebp),%ebp
-+.section __ex_table,"a"
-+ .align 4
-+ .long 1b,syscall_fault
-+.previous
-+
-+ pushl %eax
-+ CFI_ADJUST_CFA_OFFSET 4
-+ SAVE_ALL
-+ GET_THREAD_INFO(%ebp)
-+
-+ /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
-+ testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
-+ jnz syscall_trace_entry
-+ cmpl $(nr_syscalls), %eax
-+ jae syscall_badsys
-+ call *sys_call_table(,%eax,4)
-+ movl %eax,PT_EAX(%esp)
-+ DISABLE_INTERRUPTS(CLBR_ANY)
-+ TRACE_IRQS_OFF
-+ movl TI_flags(%ebp), %ecx
-+ testw $_TIF_ALLWORK_MASK, %cx
-+ jne syscall_exit_work
-+/* if something modifies registers it must also disable sysexit */
-+ movl PT_EIP(%esp), %edx
-+ movl PT_OLDESP(%esp), %ecx
-+ xorl %ebp,%ebp
-+ TRACE_IRQS_ON
-+1: mov PT_FS(%esp), %fs
-+ ENABLE_INTERRUPTS_SYSEXIT
-+ CFI_ENDPROC
-+.pushsection .fixup,"ax"
-+2: movl $0,PT_FS(%esp)
-+ jmp 1b
-+.section __ex_table,"a"
-+ .align 4
-+ .long 1b,2b
-+.popsection
-+ENDPROC(sysenter_entry)
-+
-+ # system call handler stub
-+ENTRY(system_call)
-+ RING0_INT_FRAME # can't unwind into user space anyway
-+ pushl %eax # save orig_eax
-+ CFI_ADJUST_CFA_OFFSET 4
-+ SAVE_ALL
-+ GET_THREAD_INFO(%ebp)
-+ # system call tracing in operation / emulation
-+ /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
-+ testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
-+ jnz syscall_trace_entry
-+ cmpl $(nr_syscalls), %eax
-+ jae syscall_badsys
-+syscall_call:
-+ call *sys_call_table(,%eax,4)
-+ movl %eax,PT_EAX(%esp) # store the return value
-+syscall_exit:
-+ DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
-+ # setting need_resched or sigpending
-+ # between sampling and the iret
-+ TRACE_IRQS_OFF
-+ testl $TF_MASK,PT_EFLAGS(%esp) # If tracing set singlestep flag on exit
-+ jz no_singlestep
-+ orl $_TIF_SINGLESTEP,TI_flags(%ebp)
-+no_singlestep:
-+ movl TI_flags(%ebp), %ecx
-+ testw $_TIF_ALLWORK_MASK, %cx # current->work
-+ jne syscall_exit_work
-+
-+restore_all:
-+#ifndef CONFIG_XEN
-+ movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
-+ # Warning: PT_OLDSS(%esp) contains the wrong/random values if we
-+ # are returning to the kernel.
-+ # See comments in process.c:copy_thread() for details.
-+ movb PT_OLDSS(%esp), %ah
-+ movb PT_CS(%esp), %al
-+ andl $(VM_MASK | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
-+ cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
-+ CFI_REMEMBER_STATE
-+ je ldt_ss # returning to user-space with LDT SS
-+restore_nocheck:
-+#else
-+restore_nocheck:
-+ movl PT_EFLAGS(%esp), %eax
-+ testl $(VM_MASK|NMI_MASK), %eax
-+ CFI_REMEMBER_STATE
-+ jnz hypervisor_iret
-+ shr $9, %eax # EAX[0] == IRET_EFLAGS.IF
-+ GET_VCPU_INFO
-+ andb evtchn_upcall_mask(%esi),%al
-+ andb $1,%al # EAX[0] == IRET_EFLAGS.IF & event_mask
-+ CFI_REMEMBER_STATE
-+ jnz restore_all_enable_events # != 0 => enable event delivery
-+#endif
-+ TRACE_IRQS_IRET
-+restore_nocheck_notrace:
-+ RESTORE_REGS
-+ addl $4, %esp # skip orig_eax/error_code
-+ CFI_ADJUST_CFA_OFFSET -4
-+1: INTERRUPT_RETURN
-+.section .fixup,"ax"
-+iret_exc:
-+#ifndef CONFIG_XEN
-+ ENABLE_INTERRUPTS(CLBR_NONE)
-+#endif
-+ pushl $0 # no error code
-+ pushl $do_iret_error
-+ jmp error_code
-+.previous
-+.section __ex_table,"a"
-+ .align 4
-+ .long 1b,iret_exc
-+.previous
-+
-+ CFI_RESTORE_STATE
-+#ifndef CONFIG_XEN
-+ldt_ss:
-+ larl PT_OLDSS(%esp), %eax
-+ jnz restore_nocheck
-+ testl $0x00400000, %eax # returning to 32bit stack?
-+ jnz restore_nocheck # allright, normal return
-+
-+#ifdef CONFIG_PARAVIRT
-+ /*
-+ * The kernel can't run on a non-flat stack if paravirt mode
-+ * is active. Rather than try to fixup the high bits of
-+ * ESP, bypass this code entirely. This may break DOSemu
-+ * and/or Wine support in a paravirt VM, although the option
-+ * is still available to implement the setting of the high
-+ * 16-bits in the INTERRUPT_RETURN paravirt-op.
-+ */
-+ cmpl $0, paravirt_ops+PARAVIRT_enabled
-+ jne restore_nocheck
-+#endif
-+
-+ /* If returning to userspace with 16bit stack,
-+ * try to fix the higher word of ESP, as the CPU
-+ * won't restore it.
-+ * This is an "official" bug of all the x86-compatible
-+ * CPUs, which we can try to work around to make
-+ * dosemu and wine happy. */
-+ movl PT_OLDESP(%esp), %eax
-+ movl %esp, %edx
-+ call patch_espfix_desc
-+ pushl $__ESPFIX_SS
-+ CFI_ADJUST_CFA_OFFSET 4
-+ pushl %eax
-+ CFI_ADJUST_CFA_OFFSET 4
-+ DISABLE_INTERRUPTS(CLBR_EAX)
-+ TRACE_IRQS_OFF
-+ lss (%esp), %esp
-+ CFI_ADJUST_CFA_OFFSET -8
-+ jmp restore_nocheck
-+#else
-+ ALIGN
-+restore_all_enable_events:
-+ TRACE_IRQS_ON
-+ __ENABLE_INTERRUPTS
-+scrit: /**** START OF CRITICAL REGION ****/
-+ __TEST_PENDING
-+ jnz 14f # process more events if necessary...
-+ RESTORE_REGS
-+ addl $4, %esp
-+ CFI_ADJUST_CFA_OFFSET -4
-+1: INTERRUPT_RETURN
-+.section __ex_table,"a"
-+ .align 4
-+ .long 1b,iret_exc
-+.previous
-+14: __DISABLE_INTERRUPTS
-+ TRACE_IRQS_OFF
-+ jmp 11f
-+ecrit: /**** END OF CRITICAL REGION ****/
-+
-+ CFI_RESTORE_STATE
-+hypervisor_iret:
-+ andl $~NMI_MASK, PT_EFLAGS(%esp)
-+ RESTORE_REGS
-+ addl $4, %esp
-+ CFI_ADJUST_CFA_OFFSET -4
-+ jmp hypercall_page + (__HYPERVISOR_iret * 32)
-+#endif
-+ CFI_ENDPROC
-+ENDPROC(system_call)
-+
-+ # perform work that needs to be done immediately before resumption
-+ ALIGN
-+ RING0_PTREGS_FRAME # can't unwind into user space anyway
-+work_pending:
-+ testb $_TIF_NEED_RESCHED, %cl
-+ jz work_notifysig
-+work_resched:
-+ call schedule
-+ DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
-+ # setting need_resched or sigpending
-+ # between sampling and the iret
-+ TRACE_IRQS_OFF
-+ movl TI_flags(%ebp), %ecx
-+ andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
-+ # than syscall tracing?
-+ jz restore_all
-+ testb $_TIF_NEED_RESCHED, %cl
-+ jnz work_resched
-+
-+work_notifysig: # deal with pending signals and
-+ # notify-resume requests
-+#ifdef CONFIG_VM86
-+ testl $VM_MASK, PT_EFLAGS(%esp)
-+ movl %esp, %eax
-+ jne work_notifysig_v86 # returning to kernel-space or
-+ # vm86-space
-+ xorl %edx, %edx
-+ call do_notify_resume
-+ jmp resume_userspace_sig
-+
-+ ALIGN
-+work_notifysig_v86:
-+ pushl %ecx # save ti_flags for do_notify_resume
-+ CFI_ADJUST_CFA_OFFSET 4
-+ call save_v86_state # %eax contains pt_regs pointer
-+ popl %ecx
-+ CFI_ADJUST_CFA_OFFSET -4
-+ movl %eax, %esp
-+#else
-+ movl %esp, %eax
-+#endif
-+ xorl %edx, %edx
-+ call do_notify_resume
-+ jmp resume_userspace_sig
-+END(work_pending)
-+
-+ # perform syscall exit tracing
-+ ALIGN
-+syscall_trace_entry:
-+ movl $-ENOSYS,PT_EAX(%esp)
-+ movl %esp, %eax
-+ xorl %edx,%edx
-+ call do_syscall_trace
-+ cmpl $0, %eax
-+ jne resume_userspace # ret != 0 -> running under PTRACE_SYSEMU,
-+ # so must skip actual syscall
-+ movl PT_ORIG_EAX(%esp), %eax
-+ cmpl $(nr_syscalls), %eax
-+ jnae syscall_call
-+ jmp syscall_exit
-+END(syscall_trace_entry)
-+
-+ # perform syscall exit tracing
-+ ALIGN
-+syscall_exit_work:
-+ testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl
-+ jz work_pending
-+ TRACE_IRQS_ON
-+ ENABLE_INTERRUPTS(CLBR_ANY) # could let do_syscall_trace() call
-+ # schedule() instead
-+ movl %esp, %eax
-+ movl $1, %edx
-+ call do_syscall_trace
-+ jmp resume_userspace
-+END(syscall_exit_work)
-+ CFI_ENDPROC
-+
-+ RING0_INT_FRAME # can't unwind into user space anyway
-+syscall_fault:
-+ pushl %eax # save orig_eax
-+ CFI_ADJUST_CFA_OFFSET 4
-+ SAVE_ALL
-+ GET_THREAD_INFO(%ebp)
-+ movl $-EFAULT,PT_EAX(%esp)
-+ jmp resume_userspace
-+END(syscall_fault)
-+
-+syscall_badsys:
-+ movl $-ENOSYS,PT_EAX(%esp)
-+ jmp resume_userspace
-+END(syscall_badsys)
-+ CFI_ENDPROC
-+
-+#ifndef CONFIG_XEN
-+#define FIXUP_ESPFIX_STACK \
-+ /* since we are on a wrong stack, we cant make it a C code :( */ \
-+ PER_CPU(gdt_page, %ebx); \
-+ GET_DESC_BASE(GDT_ENTRY_ESPFIX_SS, %ebx, %eax, %ax, %al, %ah); \
-+ addl %esp, %eax; \
-+ pushl $__KERNEL_DS; \
-+ CFI_ADJUST_CFA_OFFSET 4; \
-+ pushl %eax; \
-+ CFI_ADJUST_CFA_OFFSET 4; \
-+ lss (%esp), %esp; \
-+ CFI_ADJUST_CFA_OFFSET -8;
-+#define UNWIND_ESPFIX_STACK \
-+ movl %ss, %eax; \
-+ /* see if on espfix stack */ \
-+ cmpw $__ESPFIX_SS, %ax; \
-+ jne 27f; \
-+ movl $__KERNEL_DS, %eax; \
-+ movl %eax, %ds; \
-+ movl %eax, %es; \
-+ /* switch to normal stack */ \
-+ FIXUP_ESPFIX_STACK; \
-+27:;
-+
-+/*
-+ * Build the entry stubs and pointer table with
-+ * some assembler magic.
-+ */
-+.data
-+ENTRY(interrupt)
-+.text
-+
-+ENTRY(irq_entries_start)
-+ RING0_INT_FRAME
-+vector=0
-+.rept NR_IRQS
-+ ALIGN
-+ .if vector
-+ CFI_ADJUST_CFA_OFFSET -4
-+ .endif
-+1: pushl $~(vector)
-+ CFI_ADJUST_CFA_OFFSET 4
-+ jmp common_interrupt
-+ .previous
-+ .long 1b
-+ .text
-+vector=vector+1
-+.endr
-+END(irq_entries_start)
-+
-+.previous
-+END(interrupt)
-+.previous
-+
-+/*
-+ * the CPU automatically disables interrupts when executing an IRQ vector,
-+ * so IRQ-flags tracing has to follow that:
-+ */
-+ ALIGN
-+common_interrupt:
-+ SAVE_ALL
-+ TRACE_IRQS_OFF
-+ movl %esp,%eax
-+ call do_IRQ
-+ jmp ret_from_intr
-+ENDPROC(common_interrupt)
-+ CFI_ENDPROC
-+
-+#define BUILD_INTERRUPT(name, nr) \
-+ENTRY(name) \
-+ RING0_INT_FRAME; \
-+ pushl $~(nr); \
-+ CFI_ADJUST_CFA_OFFSET 4; \
-+ SAVE_ALL; \
-+ TRACE_IRQS_OFF \
-+ movl %esp,%eax; \
-+ call smp_##name; \
-+ jmp ret_from_intr; \
-+ CFI_ENDPROC; \
-+ENDPROC(name)
-+
-+/* The include is where all of the SMP etc. interrupts come from */
-+#include "entry_arch.h"
-+
-+#else
-+#define UNWIND_ESPFIX_STACK
-+#endif
-+
-+KPROBE_ENTRY(page_fault)
-+ RING0_EC_FRAME
-+ pushl $do_page_fault
-+ CFI_ADJUST_CFA_OFFSET 4
-+ ALIGN
-+error_code:
-+ /* the function address is in %fs's slot on the stack */
-+ pushl %es
-+ CFI_ADJUST_CFA_OFFSET 4
-+ /*CFI_REL_OFFSET es, 0*/
-+ pushl %ds
-+ CFI_ADJUST_CFA_OFFSET 4
-+ /*CFI_REL_OFFSET ds, 0*/
-+ pushl %eax
-+ CFI_ADJUST_CFA_OFFSET 4
-+ CFI_REL_OFFSET eax, 0
-+ pushl %ebp
-+ CFI_ADJUST_CFA_OFFSET 4
-+ CFI_REL_OFFSET ebp, 0
-+ pushl %edi
-+ CFI_ADJUST_CFA_OFFSET 4
-+ CFI_REL_OFFSET edi, 0
-+ pushl %esi
-+ CFI_ADJUST_CFA_OFFSET 4
-+ CFI_REL_OFFSET esi, 0
-+ pushl %edx
-+ CFI_ADJUST_CFA_OFFSET 4
-+ CFI_REL_OFFSET edx, 0
-+ pushl %ecx
-+ CFI_ADJUST_CFA_OFFSET 4
-+ CFI_REL_OFFSET ecx, 0
-+ pushl %ebx
-+ CFI_ADJUST_CFA_OFFSET 4
-+ CFI_REL_OFFSET ebx, 0
-+ cld
-+ pushl %fs
-+ CFI_ADJUST_CFA_OFFSET 4
-+ /*CFI_REL_OFFSET fs, 0*/
-+ movl $(__KERNEL_PERCPU), %ecx
-+ movl %ecx, %fs
-+ UNWIND_ESPFIX_STACK
-+ popl %ecx
-+ CFI_ADJUST_CFA_OFFSET -4
-+ /*CFI_REGISTER es, ecx*/
-+ movl PT_FS(%esp), %edi # get the function address
-+ movl PT_ORIG_EAX(%esp), %edx # get the error code
-+ movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
-+ mov %ecx, PT_FS(%esp)
-+ /*CFI_REL_OFFSET fs, ES*/
-+ movl $(__USER_DS), %ecx
-+ movl %ecx, %ds
-+ movl %ecx, %es
-+ movl %esp,%eax # pt_regs pointer
-+ call *%edi
-+ jmp ret_from_exception
-+ CFI_ENDPROC
-+KPROBE_END(page_fault)
-+
-+#ifdef CONFIG_XEN
-+# A note on the "critical region" in our callback handler.
-+# We want to avoid stacking callback handlers due to events occurring
-+# during handling of the last event. To do this, we keep events disabled
-+# until we've done all processing. HOWEVER, we must enable events before
-+# popping the stack frame (can't be done atomically) and so it would still
-+# be possible to get enough handler activations to overflow the stack.
-+# Although unlikely, bugs of that kind are hard to track down, so we'd
-+# like to avoid the possibility.
-+# So, on entry to the handler we detect whether we interrupted an
-+# existing activation in its critical region -- if so, we pop the current
-+# activation and restart the handler using the previous one.
-+#
-+# The sysexit critical region is slightly different. sysexit
-+# atomically removes the entire stack frame. If we interrupt in the
-+# critical region we know that the entire frame is present and correct
-+# so we can simply throw away the new one.
-+ENTRY(hypervisor_callback)
-+ RING0_INT_FRAME
-+ pushl %eax
-+ CFI_ADJUST_CFA_OFFSET 4
-+ SAVE_ALL
-+ movl PT_EIP(%esp),%eax
-+ cmpl $scrit,%eax
-+ jb 11f
-+ cmpl $ecrit,%eax
-+ jb critical_region_fixup
-+ cmpl $sysexit_scrit,%eax
-+ jb 11f
-+ cmpl $sysexit_ecrit,%eax
-+ ja 11f
-+ addl $PT_OLDESP,%esp # Remove eflags...ebx from stack frame.
-+11: push %esp
-+ CFI_ADJUST_CFA_OFFSET 4
-+ call evtchn_do_upcall
-+ add $4,%esp
-+ CFI_ADJUST_CFA_OFFSET -4
-+ jmp ret_from_intr
-+ CFI_ENDPROC
-+
-+# [How we do the fixup]. We want to merge the current stack frame with the
-+# just-interrupted frame. How we do this depends on where in the critical
-+# region the interrupted handler was executing, and so how many saved
-+# registers are in each frame. We do this quickly using the lookup table
-+# 'critical_fixup_table'. For each byte offset in the critical region, it
-+# provides the number of bytes which have already been popped from the
-+# interrupted stack frame.
-+critical_region_fixup:
-+ movzbl critical_fixup_table-scrit(%eax),%ecx # %eax contains num bytes popped
-+ cmpb $0xff,%cl # 0xff => vcpu_info critical region
-+ jne 15f
-+ xorl %ecx,%ecx
-+15: leal (%esp,%ecx),%esi # %esi points at end of src region
-+ leal PT_OLDESP(%esp),%edi # %edi points at end of dst region
-+ shrl $2,%ecx # convert words to bytes
-+ je 17f # skip loop if nothing to copy
-+16: subl $4,%esi # pre-decrementing copy loop
-+ subl $4,%edi
-+ movl (%esi),%eax
-+ movl %eax,(%edi)
-+ loop 16b
-+17: movl %edi,%esp # final %edi is top of merged stack
-+ jmp 11b
-+
-+.section .rodata,"a"
-+critical_fixup_table:
-+ .byte 0xff,0xff,0xff # testb $0xff,(%esi) = __TEST_PENDING
-+ .byte 0xff,0xff # jnz 14f
-+ .byte 0x00 # pop %ebx
-+ .byte 0x04 # pop %ecx
-+ .byte 0x08 # pop %edx
-+ .byte 0x0c # pop %esi
-+ .byte 0x10 # pop %edi
-+ .byte 0x14 # pop %ebp
-+ .byte 0x18 # pop %eax
-+ .byte 0x1c # pop %ds
-+ .byte 0x20 # pop %es
-+ .byte 0x24,0x24 # pop %fs
-+ .byte 0x28,0x28,0x28 # add $4,%esp
-+ .byte 0x2c # iret
-+ .byte 0xff,0xff,0xff,0xff # movb $1,1(%esi)
-+ .byte 0x00,0x00 # jmp 11b
-+.previous
-+
-+# Hypervisor uses this for application faults while it executes.
-+# We get here for two reasons:
-+# 1. Fault while reloading DS, ES, FS or GS
-+# 2. Fault while executing IRET
-+# Category 1 we fix up by reattempting the load, and zeroing the segment
-+# register if the load fails.
-+# Category 2 we fix up by jumping to do_iret_error. We cannot use the
-+# normal Linux return path in this case because if we use the IRET hypercall
-+# to pop the stack frame we end up in an infinite loop of failsafe callbacks.
-+# We distinguish between categories by maintaining a status value in EAX.
-+ENTRY(failsafe_callback)
-+ pushl %eax
-+ movl $1,%eax
-+1: mov 4(%esp),%ds
-+2: mov 8(%esp),%es
-+3: mov 12(%esp),%fs
-+4: mov 16(%esp),%gs
-+ testl %eax,%eax
-+ popl %eax
-+ jz 5f
-+ addl $16,%esp # EAX != 0 => Category 2 (Bad IRET)
-+ jmp iret_exc
-+5: addl $16,%esp # EAX == 0 => Category 1 (Bad segment)
-+ RING0_INT_FRAME
-+ pushl $0
-+ SAVE_ALL
-+ jmp ret_from_exception
-+.section .fixup,"ax"; \
-+6: xorl %eax,%eax; \
-+ movl %eax,4(%esp); \
-+ jmp 1b; \
-+7: xorl %eax,%eax; \
-+ movl %eax,8(%esp); \
-+ jmp 2b; \
-+8: xorl %eax,%eax; \
-+ movl %eax,12(%esp); \
-+ jmp 3b; \
-+9: xorl %eax,%eax; \
-+ movl %eax,16(%esp); \
-+ jmp 4b; \
-+.previous; \
-+.section __ex_table,"a"; \
-+ .align 4; \
-+ .long 1b,6b; \
-+ .long 2b,7b; \
-+ .long 3b,8b; \
-+ .long 4b,9b; \
-+.previous
-+#endif
-+ CFI_ENDPROC
-+
-+ENTRY(coprocessor_error)
-+ RING0_INT_FRAME
-+ pushl $0
-+ CFI_ADJUST_CFA_OFFSET 4
-+ pushl $do_coprocessor_error
-+ CFI_ADJUST_CFA_OFFSET 4
-+ jmp error_code
-+ CFI_ENDPROC
-+END(coprocessor_error)
-+
-+ENTRY(simd_coprocessor_error)
-+ RING0_INT_FRAME
-+ pushl $0
-+ CFI_ADJUST_CFA_OFFSET 4
-+ pushl $do_simd_coprocessor_error
-+ CFI_ADJUST_CFA_OFFSET 4
-+ jmp error_code
-+ CFI_ENDPROC
-+END(simd_coprocessor_error)
-+
-+ENTRY(device_not_available)
-+ RING0_INT_FRAME
-+ pushl $-1 # mark this as an int
-+ CFI_ADJUST_CFA_OFFSET 4
-+ SAVE_ALL
-+#ifndef CONFIG_XEN
-+ GET_CR0_INTO_EAX
-+ testl $0x4, %eax # EM (math emulation bit)
-+ je device_available_emulate
-+ pushl $0 # temporary storage for ORIG_EIP
-+ CFI_ADJUST_CFA_OFFSET 4
-+ call math_emulate
-+ addl $4, %esp
-+ CFI_ADJUST_CFA_OFFSET -4
-+ jmp ret_from_exception
-+device_available_emulate:
-+#endif
-+ preempt_stop(CLBR_ANY)
-+ call math_state_restore
-+ jmp ret_from_exception
-+ CFI_ENDPROC
-+END(device_not_available)
-+
-+#ifndef CONFIG_XEN
-+/*
-+ * Debug traps and NMI can happen at the one SYSENTER instruction
-+ * that sets up the real kernel stack. Check here, since we can't
-+ * allow the wrong stack to be used.
-+ *
-+ * "SYSENTER_stack_esp0+12" is because the NMI/debug handler will have
-+ * already pushed 3 words if it hits on the sysenter instruction:
-+ * eflags, cs and eip.
-+ *
-+ * We just load the right stack, and push the three (known) values
-+ * by hand onto the new stack - while updating the return eip past
-+ * the instruction that would have done it for sysenter.
-+ */
-+#define FIX_STACK(offset, ok, label) \
-+ cmpw $__KERNEL_CS,4(%esp); \
-+ jne ok; \
-+label: \
-+ movl SYSENTER_stack_esp0+offset(%esp),%esp; \
-+ CFI_DEF_CFA esp, 0; \
-+ CFI_UNDEFINED eip; \
-+ pushfl; \
-+ CFI_ADJUST_CFA_OFFSET 4; \
-+ pushl $__KERNEL_CS; \
-+ CFI_ADJUST_CFA_OFFSET 4; \
-+ pushl $sysenter_past_esp; \
-+ CFI_ADJUST_CFA_OFFSET 4; \
-+ CFI_REL_OFFSET eip, 0
-+#endif /* CONFIG_XEN */
-+
-+KPROBE_ENTRY(debug)
-+ RING0_INT_FRAME
-+#ifndef CONFIG_XEN
-+ cmpl $sysenter_entry,(%esp)
-+ jne debug_stack_correct
-+ FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn)
-+debug_stack_correct:
-+#endif /* !CONFIG_XEN */
-+ pushl $-1 # mark this as an int
-+ CFI_ADJUST_CFA_OFFSET 4
-+ SAVE_ALL
-+ xorl %edx,%edx # error code 0
-+ movl %esp,%eax # pt_regs pointer
-+ call do_debug
-+ jmp ret_from_exception
-+ CFI_ENDPROC
-+KPROBE_END(debug)
-+
-+#ifndef CONFIG_XEN
-+/*
-+ * NMI is doubly nasty. It can happen _while_ we're handling
-+ * a debug fault, and the debug fault hasn't yet been able to
-+ * clear up the stack. So we first check whether we got an
-+ * NMI on the sysenter entry path, but after that we need to
-+ * check whether we got an NMI on the debug path where the debug
-+ * fault happened on the sysenter path.
-+ */
-+KPROBE_ENTRY(nmi)
-+ RING0_INT_FRAME
-+ pushl %eax
-+ CFI_ADJUST_CFA_OFFSET 4
-+ movl %ss, %eax
-+ cmpw $__ESPFIX_SS, %ax
-+ popl %eax
-+ CFI_ADJUST_CFA_OFFSET -4
-+ je nmi_espfix_stack
-+ cmpl $sysenter_entry,(%esp)
-+ je nmi_stack_fixup
-+ pushl %eax
-+ CFI_ADJUST_CFA_OFFSET 4
-+ movl %esp,%eax
-+ /* Do not access memory above the end of our stack page,
-+ * it might not exist.
-+ */
-+ andl $(THREAD_SIZE-1),%eax
-+ cmpl $(THREAD_SIZE-20),%eax
-+ popl %eax
-+ CFI_ADJUST_CFA_OFFSET -4
-+ jae nmi_stack_correct
-+ cmpl $sysenter_entry,12(%esp)
-+ je nmi_debug_stack_check
-+nmi_stack_correct:
-+ /* We have a RING0_INT_FRAME here */
-+ pushl %eax
-+ CFI_ADJUST_CFA_OFFSET 4
-+ SAVE_ALL
-+ xorl %edx,%edx # zero error code
-+ movl %esp,%eax # pt_regs pointer
-+ call do_nmi
-+ jmp restore_nocheck_notrace
-+ CFI_ENDPROC
-+
-+nmi_stack_fixup:
-+ RING0_INT_FRAME
-+ FIX_STACK(12,nmi_stack_correct, 1)
-+ jmp nmi_stack_correct
-+
-+nmi_debug_stack_check:
-+ /* We have a RING0_INT_FRAME here */
-+ cmpw $__KERNEL_CS,16(%esp)
-+ jne nmi_stack_correct
-+ cmpl $debug,(%esp)
-+ jb nmi_stack_correct
-+ cmpl $debug_esp_fix_insn,(%esp)
-+ ja nmi_stack_correct
-+ FIX_STACK(24,nmi_stack_correct, 1)
-+ jmp nmi_stack_correct
-+
-+nmi_espfix_stack:
-+ /* We have a RING0_INT_FRAME here.
-+ *
-+ * create the pointer to lss back
-+ */
-+ pushl %ss
-+ CFI_ADJUST_CFA_OFFSET 4
-+ pushl %esp
-+ CFI_ADJUST_CFA_OFFSET 4
-+ addw $4, (%esp)
-+ /* copy the iret frame of 12 bytes */
-+ .rept 3
-+ pushl 16(%esp)
-+ CFI_ADJUST_CFA_OFFSET 4
-+ .endr
-+ pushl %eax
-+ CFI_ADJUST_CFA_OFFSET 4
-+ SAVE_ALL
-+ FIXUP_ESPFIX_STACK # %eax == %esp
-+ xorl %edx,%edx # zero error code
-+ call do_nmi
-+ RESTORE_REGS
-+ lss 12+4(%esp), %esp # back to espfix stack
-+ CFI_ADJUST_CFA_OFFSET -24
-+1: INTERRUPT_RETURN
-+ CFI_ENDPROC
-+.section __ex_table,"a"
-+ .align 4
-+ .long 1b,iret_exc
-+.previous
-+#else
-+KPROBE_ENTRY(nmi)
-+ RING0_INT_FRAME
-+ pushl %eax
-+ CFI_ADJUST_CFA_OFFSET 4
-+ SAVE_ALL
-+ xorl %edx,%edx # zero error code
-+ movl %esp,%eax # pt_regs pointer
-+ call do_nmi
-+ orl $NMI_MASK, PT_EFLAGS(%esp)
-+ jmp restore_all
-+ CFI_ENDPROC
-+#endif
-+KPROBE_END(nmi)
-+
-+#ifdef CONFIG_PARAVIRT
-+ENTRY(native_iret)
-+1: iret
-+.section __ex_table,"a"
-+ .align 4
-+ .long 1b,iret_exc
-+.previous
-+END(native_iret)
-+
-+ENTRY(native_irq_enable_sysexit)
-+ sti
-+ sysexit
-+END(native_irq_enable_sysexit)
-+#endif
-+
-+KPROBE_ENTRY(int3)
-+ RING0_INT_FRAME
-+ pushl $-1 # mark this as an int
-+ CFI_ADJUST_CFA_OFFSET 4
-+ SAVE_ALL
-+ xorl %edx,%edx # zero error code
-+ movl %esp,%eax # pt_regs pointer
-+ call do_int3
-+ jmp ret_from_exception
-+ CFI_ENDPROC
-+KPROBE_END(int3)
-+
-+ENTRY(overflow)
-+ RING0_INT_FRAME
-+ pushl $0
-+ CFI_ADJUST_CFA_OFFSET 4
-+ pushl $do_overflow
-+ CFI_ADJUST_CFA_OFFSET 4
-+ jmp error_code
-+ CFI_ENDPROC
-+END(overflow)
-+
-+ENTRY(bounds)
-+ RING0_INT_FRAME
-+ pushl $0
-+ CFI_ADJUST_CFA_OFFSET 4
-+ pushl $do_bounds
-+ CFI_ADJUST_CFA_OFFSET 4
-+ jmp error_code
-+ CFI_ENDPROC
-+END(bounds)
-+
-+ENTRY(invalid_op)
-+ RING0_INT_FRAME
-+ pushl $0
-+ CFI_ADJUST_CFA_OFFSET 4
-+ pushl $do_invalid_op
-+ CFI_ADJUST_CFA_OFFSET 4
-+ jmp error_code
-+ CFI_ENDPROC
-+END(invalid_op)
-+
-+ENTRY(coprocessor_segment_overrun)
-+ RING0_INT_FRAME
-+ pushl $0
-+ CFI_ADJUST_CFA_OFFSET 4
-+ pushl $do_coprocessor_segment_overrun
-+ CFI_ADJUST_CFA_OFFSET 4
-+ jmp error_code
-+ CFI_ENDPROC
-+END(coprocessor_segment_overrun)
-+
-+ENTRY(invalid_TSS)
-+ RING0_EC_FRAME
-+ pushl $do_invalid_TSS
-+ CFI_ADJUST_CFA_OFFSET 4
-+ jmp error_code
-+ CFI_ENDPROC
-+END(invalid_TSS)
-+
-+ENTRY(segment_not_present)
-+ RING0_EC_FRAME
-+ pushl $do_segment_not_present
-+ CFI_ADJUST_CFA_OFFSET 4
-+ jmp error_code
-+ CFI_ENDPROC
-+END(segment_not_present)
-+
-+ENTRY(stack_segment)
-+ RING0_EC_FRAME
-+ pushl $do_stack_segment
-+ CFI_ADJUST_CFA_OFFSET 4
-+ jmp error_code
-+ CFI_ENDPROC
-+END(stack_segment)
-+
-+KPROBE_ENTRY(general_protection)
-+ RING0_EC_FRAME
-+ pushl $do_general_protection
-+ CFI_ADJUST_CFA_OFFSET 4
-+ jmp error_code
-+ CFI_ENDPROC
-+KPROBE_END(general_protection)
-+
-+ENTRY(alignment_check)
-+ RING0_EC_FRAME
-+ pushl $do_alignment_check
-+ CFI_ADJUST_CFA_OFFSET 4
-+ jmp error_code
-+ CFI_ENDPROC
-+END(alignment_check)
-+
-+ENTRY(divide_error)
-+ RING0_INT_FRAME
-+ pushl $0 # no error code
-+ CFI_ADJUST_CFA_OFFSET 4
-+ pushl $do_divide_error
-+ CFI_ADJUST_CFA_OFFSET 4
-+ jmp error_code
-+ CFI_ENDPROC
-+END(divide_error)
-+
-+#ifdef CONFIG_X86_MCE
-+ENTRY(machine_check)
-+ RING0_INT_FRAME
-+ pushl $0
-+ CFI_ADJUST_CFA_OFFSET 4
-+ pushl machine_check_vector
-+ CFI_ADJUST_CFA_OFFSET 4
-+ jmp error_code
-+ CFI_ENDPROC
-+END(machine_check)
-+#endif
-+
-+#ifndef CONFIG_XEN
-+ENTRY(spurious_interrupt_bug)
-+ RING0_INT_FRAME
-+ pushl $0
-+ CFI_ADJUST_CFA_OFFSET 4
-+ pushl $do_spurious_interrupt_bug
-+ CFI_ADJUST_CFA_OFFSET 4
-+ jmp error_code
-+ CFI_ENDPROC
-+#endif /* !CONFIG_XEN */
-+
-+ENTRY(fixup_4gb_segment)
-+ RING0_EC_FRAME
-+ pushl $do_fixup_4gb_segment
-+ CFI_ADJUST_CFA_OFFSET 4
-+ jmp error_code
-+ CFI_ENDPROC
-+END(spurious_interrupt_bug)
-+
-+ENTRY(kernel_thread_helper)
-+ pushl $0 # fake return address for unwinder
-+ CFI_STARTPROC
-+ movl %edx,%eax
-+ push %edx
-+ CFI_ADJUST_CFA_OFFSET 4
-+ call *%ebx
-+ push %eax
-+ CFI_ADJUST_CFA_OFFSET 4
-+ call do_exit
-+ CFI_ENDPROC
-+ENDPROC(kernel_thread_helper)
-+
-+.section .rodata,"a"
-+.align 4
-+#include "syscall_table.S"
-+
-+syscall_table_size=(.-sys_call_table)
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/i386/kernel/fixup.c ubuntu-gutsy-xen/arch/i386/kernel/fixup.c
---- ubuntu-gutsy/arch/i386/kernel/fixup.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/arch/i386/kernel/fixup.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,88 @@
-+/******************************************************************************
-+ * fixup.c
-+ *
-+ * Binary-rewriting of certain IA32 instructions, on notification by Xen.
-+ * Used to avoid repeated slow emulation of common instructions used by the
-+ * user-space TLS (Thread-Local Storage) libraries.
-+ *
-+ * **** NOTE ****
-+ * Issues with the binary rewriting have caused it to be removed. Instead
-+ * we rely on Xen's emulator to boot the kernel, and then print a banner
-+ * message recommending that the user disables /lib/tls.
-+ *
-+ * Copyright (c) 2004, K A Fraser
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program; if not, write to the Free Software
-+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-+ */
-+
-+#include <linux/init.h>
-+#include <linux/sched.h>
-+#include <linux/slab.h>
-+#include <linux/kernel.h>
-+#include <linux/delay.h>
-+#include <linux/version.h>
-+
-+#define DP(_f, _args...) printk(KERN_ALERT " " _f "\n" , ## _args )
-+
-+fastcall void do_fixup_4gb_segment(struct pt_regs *regs, long error_code)
-+{
-+ static unsigned long printed = 0;
-+ char info[100];
-+ int i;
-+
-+ /* Ignore statically-linked init. */
-+ if (current->tgid == 1)
-+ return;
-+
-+ HYPERVISOR_vm_assist(
-+ VMASST_CMD_disable, VMASST_TYPE_4gb_segments_notify);
-+
-+ if (test_and_set_bit(0, &printed))
-+ return;
-+
-+ sprintf(info, "%s (pid=%d)", current->comm, current->tgid);
-+
-+ DP("");
-+ DP("***************************************************************");
-+ DP("***************************************************************");
-+ DP("** WARNING: Currently emulating unsupported memory accesses **");
-+ DP("** in /lib/tls glibc libraries. The emulation is **");
-+ DP("** slow. To ensure full performance you should **");
-+ DP("** install a 'xen-friendly' (nosegneg) version of **");
-+ DP("** the library, or disable tls support by executing **");
-+ DP("** the following as root: **");
-+ DP("** mv /lib/tls /lib/tls.disabled **");
-+ DP("** Offending process: %-38.38s **", info);
-+ DP("***************************************************************");
-+ DP("***************************************************************");
-+ DP("");
-+
-+ for (i = 5; i > 0; i--) {
-+ touch_softlockup_watchdog();
-+ printk("Pausing... %d", i);
-+ mdelay(1000);
-+ printk("\b\b\b\b\b\b\b\b\b\b\b\b");
-+ }
-+
-+ printk("Continuing...\n\n");
-+}
-+
-+static int __init fixup_init(void)
-+{
-+ HYPERVISOR_vm_assist(
-+ VMASST_CMD_enable, VMASST_TYPE_4gb_segments_notify);
-+ return 0;
-+}
-+__initcall(fixup_init);
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/i386/kernel/head-xen.S ubuntu-gutsy-xen/arch/i386/kernel/head-xen.S
---- ubuntu-gutsy/arch/i386/kernel/head-xen.S 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/arch/i386/kernel/head-xen.S 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,160 @@
-+
-+
-+.text
-+#include <linux/elfnote.h>
-+#include <linux/threads.h>
-+#include <linux/linkage.h>
-+#include <asm/segment.h>
-+#include <asm/page.h>
-+#include <asm/cache.h>
-+#include <asm/thread_info.h>
-+#include <asm/asm-offsets.h>
-+#include <asm/boot.h>
-+#include <asm/dwarf2.h>
-+#include <xen/interface/xen.h>
-+#include <xen/interface/elfnote.h>
-+
-+/*
-+ * References to members of the new_cpu_data structure.
-+ */
-+
-+#define X86 new_cpu_data+CPUINFO_x86
-+#define X86_VENDOR new_cpu_data+CPUINFO_x86_vendor
-+#define X86_MODEL new_cpu_data+CPUINFO_x86_model
-+#define X86_MASK new_cpu_data+CPUINFO_x86_mask
-+#define X86_HARD_MATH new_cpu_data+CPUINFO_hard_math
-+#define X86_CPUID new_cpu_data+CPUINFO_cpuid_level
-+#define X86_CAPABILITY new_cpu_data+CPUINFO_x86_capability
-+#define X86_VENDOR_ID new_cpu_data+CPUINFO_x86_vendor_id
-+
-+.section .text.head,"ax",@progbits
-+#define VIRT_ENTRY_OFFSET 0x0
-+.org VIRT_ENTRY_OFFSET
-+ENTRY(startup_32)
-+ movl %esi,xen_start_info
-+ cld
-+
-+ /* Set up the stack pointer */
-+ movl $(init_thread_union+THREAD_SIZE),%esp
-+
-+ movl %ss,%eax
-+ movl %eax,%fs # gets reset once there's real percpu
-+
-+ /* get vendor info */
-+ xorl %eax,%eax # call CPUID with 0 -> return vendor ID
-+ XEN_CPUID
-+ movl %eax,X86_CPUID # save CPUID level
-+ movl %ebx,X86_VENDOR_ID # lo 4 chars
-+ movl %edx,X86_VENDOR_ID+4 # next 4 chars
-+ movl %ecx,X86_VENDOR_ID+8 # last 4 chars
-+
-+ movl $1,%eax # Use the CPUID instruction to get CPU type
-+ XEN_CPUID
-+ movb %al,%cl # save reg for future use
-+ andb $0x0f,%ah # mask processor family
-+ movb %ah,X86
-+ andb $0xf0,%al # mask model
-+ shrb $4,%al
-+ movb %al,X86_MODEL
-+ andb $0x0f,%cl # mask mask revision
-+ movb %cl,X86_MASK
-+ movl %edx,X86_CAPABILITY
-+
-+ movb $1,X86_HARD_MATH
-+
-+ xorl %eax,%eax # Clear GS
-+ movl %eax,%gs
-+
-+ cld # gcc2 wants the direction flag cleared at all times
-+
-+ pushl $0 # fake return address for unwinder
-+ jmp start_kernel
-+
-+#define HYPERCALL_PAGE_OFFSET 0x1000
-+.org HYPERCALL_PAGE_OFFSET
-+ENTRY(hypercall_page)
-+ CFI_STARTPROC
-+.skip 0x1000
-+ CFI_ENDPROC
-+
-+/*
-+ * Real beginning of normal "text" segment
-+ */
-+ENTRY(stext)
-+ENTRY(_stext)
-+
-+/*
-+ * BSS section
-+ */
-+.section ".bss.page_aligned","w"
-+ENTRY(empty_zero_page)
-+ .fill 4096,1,0
-+
-+/*
-+ * This starts the data section.
-+ */
-+.data
-+
-+#if CONFIG_XEN_COMPAT <= 0x030002
-+/*
-+ * __xen_guest information
-+ */
-+.macro utoa value
-+ .if (\value) < 0 || (\value) >= 0x10
-+ utoa (((\value)>>4)&0x0fffffff)
-+ .endif
-+ .if ((\value) & 0xf) < 10
-+ .byte '0' + ((\value) & 0xf)
-+ .else
-+ .byte 'A' + ((\value) & 0xf) - 10
-+ .endif
-+.endm
-+
-+.section __xen_guest
-+ .ascii "GUEST_OS=linux,GUEST_VER=2.6"
-+ .ascii ",XEN_VER=xen-3.0"
-+ .ascii ",VIRT_BASE=0x"
-+ utoa __PAGE_OFFSET
-+ .ascii ",ELF_PADDR_OFFSET=0x"
-+ utoa __PAGE_OFFSET
-+ .ascii ",VIRT_ENTRY=0x"
-+ utoa (__PAGE_OFFSET + LOAD_PHYSICAL_ADDR + VIRT_ENTRY_OFFSET)
-+ .ascii ",HYPERCALL_PAGE=0x"
-+ utoa ((LOAD_PHYSICAL_ADDR+HYPERCALL_PAGE_OFFSET)>>PAGE_SHIFT)
-+ .ascii ",FEATURES=writable_page_tables"
-+ .ascii "|writable_descriptor_tables"
-+ .ascii "|auto_translated_physmap"
-+ .ascii "|pae_pgdir_above_4gb"
-+ .ascii "|supervisor_mode_kernel"
-+#ifdef CONFIG_X86_PAE
-+ .ascii ",PAE=yes[extended-cr3]"
-+#else
-+ .ascii ",PAE=no"
-+#endif
-+ .ascii ",LOADER=generic"
-+ .byte 0
-+#endif /* CONFIG_XEN_COMPAT <= 0x030002 */
-+
-+
-+ ELFNOTE(Xen, XEN_ELFNOTE_GUEST_OS, .asciz, "linux")
-+ ELFNOTE(Xen, XEN_ELFNOTE_GUEST_VERSION, .asciz, "2.6")
-+ ELFNOTE(Xen, XEN_ELFNOTE_XEN_VERSION, .asciz, "xen-3.0")
-+ ELFNOTE(Xen, XEN_ELFNOTE_VIRT_BASE, .long, __PAGE_OFFSET)
-+#if CONFIG_XEN_COMPAT <= 0x030002
-+ ELFNOTE(Xen, XEN_ELFNOTE_PADDR_OFFSET, .long, __PAGE_OFFSET)
-+#else
-+ ELFNOTE(Xen, XEN_ELFNOTE_PADDR_OFFSET, .long, 0)
-+#endif
-+ ELFNOTE(Xen, XEN_ELFNOTE_ENTRY, .long, startup_32)
-+ ELFNOTE(Xen, XEN_ELFNOTE_HYPERCALL_PAGE, .long, hypercall_page)
-+ ELFNOTE(Xen, XEN_ELFNOTE_HV_START_LOW, .long, HYPERVISOR_VIRT_START)
-+ ELFNOTE(Xen, XEN_ELFNOTE_FEATURES, .asciz, "writable_page_tables|writable_descriptor_tables|auto_translated_physmap|pae_pgdir_above_4gb|supervisor_mode_kernel")
-+#ifdef CONFIG_X86_PAE
-+ ELFNOTE(Xen, XEN_ELFNOTE_PAE_MODE, .asciz, "yes")
-+ ELFNOTE(Xen, XEN_ELFNOTE_L1_MFN_VALID, .quad, _PAGE_PRESENT,_PAGE_PRESENT)
-+#else
-+ ELFNOTE(Xen, XEN_ELFNOTE_PAE_MODE, .asciz, "no")
-+ ELFNOTE(Xen, XEN_ELFNOTE_L1_MFN_VALID, .long, _PAGE_PRESENT,_PAGE_PRESENT)
-+#endif
-+ ELFNOTE(Xen, XEN_ELFNOTE_LOADER, .asciz, "generic")
-+ ELFNOTE(Xen, XEN_ELFNOTE_SUSPEND_CANCEL, .long, 1)
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/i386/kernel/init_task-xen.c ubuntu-gutsy-xen/arch/i386/kernel/init_task-xen.c
---- ubuntu-gutsy/arch/i386/kernel/init_task-xen.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/arch/i386/kernel/init_task-xen.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,51 @@
-+#include <linux/mm.h>
-+#include <linux/module.h>
-+#include <linux/sched.h>
-+#include <linux/init.h>
-+#include <linux/init_task.h>
-+#include <linux/fs.h>
-+#include <linux/mqueue.h>
-+
-+#include <asm/uaccess.h>
-+#include <asm/pgtable.h>
-+#include <asm/desc.h>
-+
-+static struct fs_struct init_fs = INIT_FS;
-+static struct files_struct init_files = INIT_FILES;
-+static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
-+static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
-+
-+#define swapper_pg_dir ((pgd_t *)NULL)
-+struct mm_struct init_mm = INIT_MM(init_mm);
-+#undef swapper_pg_dir
-+
-+EXPORT_SYMBOL(init_mm);
-+
-+/*
-+ * Initial thread structure.
-+ *
-+ * We need to make sure that this is THREAD_SIZE aligned due to the
-+ * way process stacks are handled. This is done by having a special
-+ * "init_task" linker map entry..
-+ */
-+union thread_union init_thread_union
-+ __attribute__((__section__(".data.init_task"))) =
-+ { INIT_THREAD_INFO(init_task) };
-+
-+/*
-+ * Initial task structure.
-+ *
-+ * All other task structs will be allocated on slabs in fork.c
-+ */
-+struct task_struct init_task = INIT_TASK(init_task);
-+
-+EXPORT_SYMBOL(init_task);
-+
-+#ifndef CONFIG_X86_NO_TSS
-+/*
-+ * per-CPU TSS segments. Threads are completely 'soft' on Linux,
-+ * no more per-task TSS's.
-+ */
-+DEFINE_PER_CPU(struct tss_struct, init_tss) ____cacheline_internodealigned_in_smp = INIT_TSS;
-+#endif
-+
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/i386/kernel/io_apic-xen.c ubuntu-gutsy-xen/arch/i386/kernel/io_apic-xen.c
---- ubuntu-gutsy/arch/i386/kernel/io_apic-xen.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/arch/i386/kernel/io_apic-xen.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,2949 @@
-+/*
-+ * Intel IO-APIC support for multi-Pentium hosts.
-+ *
-+ * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar, Hajnalka Szabo
-+ *
-+ * Many thanks to Stig Venaas for trying out countless experimental
-+ * patches and reporting/debugging problems patiently!
-+ *
-+ * (c) 1999, Multiple IO-APIC support, developed by
-+ * Ken-ichi Yaku <yaku@css1.kbnes.nec.co.jp> and
-+ * Hidemi Kishimoto <kisimoto@css1.kbnes.nec.co.jp>,
-+ * further tested and cleaned up by Zach Brown <zab@redhat.com>
-+ * and Ingo Molnar <mingo@redhat.com>
-+ *
-+ * Fixes
-+ * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
-+ * thanks to Eric Gilmore
-+ * and Rolf G. Tews
-+ * for testing these extensively
-+ * Paul Diefenbaugh : Added full ACPI support
-+ */
-+
-+#include <linux/mm.h>
-+#include <linux/interrupt.h>
-+#include <linux/init.h>
-+#include <linux/delay.h>
-+#include <linux/sched.h>
-+#include <linux/mc146818rtc.h>
-+#include <linux/compiler.h>
-+#include <linux/acpi.h>
-+#include <linux/module.h>
-+#include <linux/sysdev.h>
-+#include <linux/pci.h>
-+#include <linux/msi.h>
-+#include <linux/htirq.h>
-+#include <linux/freezer.h>
-+#include <linux/kthread.h>
-+
-+#include <asm/io.h>
-+#include <asm/smp.h>
-+#include <asm/desc.h>
-+#include <asm/timer.h>
-+#include <asm/i8259.h>
-+#include <asm/nmi.h>
-+#include <asm/msidef.h>
-+#include <asm/hypertransport.h>
-+
-+#include <mach_apic.h>
-+#include <mach_apicdef.h>
-+
-+#include "io_ports.h"
-+
-+#ifdef CONFIG_XEN
-+#include <xen/interface/xen.h>
-+#include <xen/interface/physdev.h>
-+
-+/* Fake i8259 */
-+#define make_8259A_irq(_irq) (io_apic_irqs &= ~(1UL<<(_irq)))
-+#define disable_8259A_irq(_irq) ((void)0)
-+#define i8259A_irq_pending(_irq) (0)
-+
-+unsigned long io_apic_irqs;
-+
-+#define clear_IO_APIC() ((void)0)
-+#endif /* CONFIG_XEN */
-+
-+int (*ioapic_renumber_irq)(int ioapic, int irq);
-+atomic_t irq_mis_count;
-+
-+/* Where if anywhere is the i8259 connect in external int mode */
-+static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
-+
-+static DEFINE_SPINLOCK(ioapic_lock);
-+static DEFINE_SPINLOCK(vector_lock);
-+
-+#ifndef CONFIG_XEN
-+int timer_over_8254 __initdata = 1;
-+#endif
-+
-+/*
-+ * Is the SiS APIC rmw bug present ?
-+ * -1 = don't know, 0 = no, 1 = yes
-+ */
-+int sis_apic_bug = -1;
-+
-+/*
-+ * # of IRQ routing registers
-+ */
-+int nr_ioapic_registers[MAX_IO_APICS];
-+
-+#ifndef CONFIG_XEN
-+static int disable_timer_pin_1 __initdata;
-+#endif
-+
-+/*
-+ * Rough estimation of how many shared IRQs there are, can
-+ * be changed anytime.
-+ */
-+#define MAX_PLUS_SHARED_IRQS NR_IRQS
-+#define PIN_MAP_SIZE (MAX_PLUS_SHARED_IRQS + NR_IRQS)
-+
-+/*
-+ * This is performance-critical, we want to do it O(1)
-+ *
-+ * the indexing order of this array favors 1:1 mappings
-+ * between pins and IRQs.
-+ */
-+
-+static struct irq_pin_list {
-+ int apic, pin, next;
-+} irq_2_pin[PIN_MAP_SIZE];
-+
-+#ifndef CONFIG_XEN
-+struct io_apic {
-+ unsigned int index;
-+ unsigned int unused[3];
-+ unsigned int data;
-+};
-+
-+static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx)
-+{
-+ return (void __iomem *) __fix_to_virt(FIX_IO_APIC_BASE_0 + idx)
-+ + (mp_ioapics[idx].mpc_apicaddr & ~PAGE_MASK);
-+}
-+#endif
-+
-+static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg)
-+{
-+#ifndef CONFIG_XEN
-+ struct io_apic __iomem *io_apic = io_apic_base(apic);
-+ writel(reg, &io_apic->index);
-+ return readl(&io_apic->data);
-+#else
-+ struct physdev_apic apic_op;
-+ int ret;
-+
-+ apic_op.apic_physbase = mp_ioapics[apic].mpc_apicaddr;
-+ apic_op.reg = reg;
-+ ret = HYPERVISOR_physdev_op(PHYSDEVOP_apic_read, &apic_op);
-+ if (ret)
-+ return ret;
-+ return apic_op.value;
-+#endif
-+}
-+
-+static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
-+{
-+#ifndef CONFIG_XEN
-+ struct io_apic __iomem *io_apic = io_apic_base(apic);
-+ writel(reg, &io_apic->index);
-+ writel(value, &io_apic->data);
-+#else
-+ struct physdev_apic apic_op;
-+
-+ apic_op.apic_physbase = mp_ioapics[apic].mpc_apicaddr;
-+ apic_op.reg = reg;
-+ apic_op.value = value;
-+ HYPERVISOR_physdev_op(PHYSDEVOP_apic_write, &apic_op);
-+#endif
-+}
-+
-+#ifndef CONFIG_XEN
-+/*
-+ * Re-write a value: to be used for read-modify-write
-+ * cycles where the read already set up the index register.
-+ *
-+ * Older SiS APIC requires we rewrite the index register
-+ */
-+static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value)
-+{
-+ volatile struct io_apic __iomem *io_apic = io_apic_base(apic);
-+ if (sis_apic_bug)
-+ writel(reg, &io_apic->index);
-+ writel(value, &io_apic->data);
-+}
-+#else
-+#define io_apic_modify io_apic_write
-+#endif
-+
-+union entry_union {
-+ struct { u32 w1, w2; };
-+ struct IO_APIC_route_entry entry;
-+};
-+
-+static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin)
-+{
-+ union entry_union eu;
-+ unsigned long flags;
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ eu.w1 = io_apic_read(apic, 0x10 + 2 * pin);
-+ eu.w2 = io_apic_read(apic, 0x11 + 2 * pin);
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+ return eu.entry;
-+}
-+
-+/*
-+ * When we write a new IO APIC routing entry, we need to write the high
-+ * word first! If the mask bit in the low word is clear, we will enable
-+ * the interrupt, and we need to make sure the entry is fully populated
-+ * before that happens.
-+ */
-+static void
-+__ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
-+{
-+ union entry_union eu;
-+ eu.entry = e;
-+ io_apic_write(apic, 0x11 + 2*pin, eu.w2);
-+ io_apic_write(apic, 0x10 + 2*pin, eu.w1);
-+}
-+
-+static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
-+{
-+ unsigned long flags;
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ __ioapic_write_entry(apic, pin, e);
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+}
-+
-+#ifndef CONFIG_XEN
-+/*
-+ * When we mask an IO APIC routing entry, we need to write the low
-+ * word first, in order to set the mask bit before we change the
-+ * high bits!
-+ */
-+static void ioapic_mask_entry(int apic, int pin)
-+{
-+ unsigned long flags;
-+ union entry_union eu = { .entry.mask = 1 };
-+
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ io_apic_write(apic, 0x10 + 2*pin, eu.w1);
-+ io_apic_write(apic, 0x11 + 2*pin, eu.w2);
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+}
-+#endif
-+
-+/*
-+ * The common case is 1:1 IRQ<->pin mappings. Sometimes there are
-+ * shared ISA-space IRQs, so we have to support them. We are super
-+ * fast in the common case, and fast for shared ISA-space IRQs.
-+ */
-+static void add_pin_to_irq(unsigned int irq, int apic, int pin)
-+{
-+ static int first_free_entry = NR_IRQS;
-+ struct irq_pin_list *entry = irq_2_pin + irq;
-+
-+ while (entry->next)
-+ entry = irq_2_pin + entry->next;
-+
-+ if (entry->pin != -1) {
-+ entry->next = first_free_entry;
-+ entry = irq_2_pin + entry->next;
-+ if (++first_free_entry >= PIN_MAP_SIZE)
-+ panic("io_apic.c: whoops");
-+ }
-+ entry->apic = apic;
-+ entry->pin = pin;
-+}
-+
-+#ifndef CONFIG_XEN
-+/*
-+ * Reroute an IRQ to a different pin.
-+ */
-+static void __init replace_pin_at_irq(unsigned int irq,
-+ int oldapic, int oldpin,
-+ int newapic, int newpin)
-+{
-+ struct irq_pin_list *entry = irq_2_pin + irq;
-+
-+ while (1) {
-+ if (entry->apic == oldapic && entry->pin == oldpin) {
-+ entry->apic = newapic;
-+ entry->pin = newpin;
-+ }
-+ if (!entry->next)
-+ break;
-+ entry = irq_2_pin + entry->next;
-+ }
-+}
-+
-+static void __modify_IO_APIC_irq (unsigned int irq, unsigned long enable, unsigned long disable)
-+{
-+ struct irq_pin_list *entry = irq_2_pin + irq;
-+ unsigned int pin, reg;
-+
-+ for (;;) {
-+ pin = entry->pin;
-+ if (pin == -1)
-+ break;
-+ reg = io_apic_read(entry->apic, 0x10 + pin*2);
-+ reg &= ~disable;
-+ reg |= enable;
-+ io_apic_modify(entry->apic, 0x10 + pin*2, reg);
-+ if (!entry->next)
-+ break;
-+ entry = irq_2_pin + entry->next;
-+ }
-+}
-+
-+/* mask = 1 */
-+static void __mask_IO_APIC_irq (unsigned int irq)
-+{
-+ __modify_IO_APIC_irq(irq, 0x00010000, 0);
-+}
-+
-+/* mask = 0 */
-+static void __unmask_IO_APIC_irq (unsigned int irq)
-+{
-+ __modify_IO_APIC_irq(irq, 0, 0x00010000);
-+}
-+
-+/* mask = 1, trigger = 0 */
-+static void __mask_and_edge_IO_APIC_irq (unsigned int irq)
-+{
-+ __modify_IO_APIC_irq(irq, 0x00010000, 0x00008000);
-+}
-+
-+/* mask = 0, trigger = 1 */
-+static void __unmask_and_level_IO_APIC_irq (unsigned int irq)
-+{
-+ __modify_IO_APIC_irq(irq, 0x00008000, 0x00010000);
-+}
-+
-+static void mask_IO_APIC_irq (unsigned int irq)
-+{
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ __mask_IO_APIC_irq(irq);
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+}
-+
-+static void unmask_IO_APIC_irq (unsigned int irq)
-+{
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ __unmask_IO_APIC_irq(irq);
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+}
-+
-+static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
-+{
-+ struct IO_APIC_route_entry entry;
-+
-+ /* Check delivery_mode to be sure we're not clearing an SMI pin */
-+ entry = ioapic_read_entry(apic, pin);
-+ if (entry.delivery_mode == dest_SMI)
-+ return;
-+
-+ /*
-+ * Disable it in the IO-APIC irq-routing table:
-+ */
-+ ioapic_mask_entry(apic, pin);
-+}
-+
-+static void clear_IO_APIC (void)
-+{
-+ int apic, pin;
-+
-+ for (apic = 0; apic < nr_ioapics; apic++)
-+ for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
-+ clear_IO_APIC_pin(apic, pin);
-+}
-+
-+#ifdef CONFIG_SMP
-+static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t cpumask)
-+{
-+ unsigned long flags;
-+ int pin;
-+ struct irq_pin_list *entry = irq_2_pin + irq;
-+ unsigned int apicid_value;
-+ cpumask_t tmp;
-+
-+ cpus_and(tmp, cpumask, cpu_online_map);
-+ if (cpus_empty(tmp))
-+ tmp = TARGET_CPUS;
-+
-+ cpus_and(cpumask, tmp, CPU_MASK_ALL);
-+
-+ apicid_value = cpu_mask_to_apicid(cpumask);
-+ /* Prepare to do the io_apic_write */
-+ apicid_value = apicid_value << 24;
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ for (;;) {
-+ pin = entry->pin;
-+ if (pin == -1)
-+ break;
-+ io_apic_write(entry->apic, 0x10 + 1 + pin*2, apicid_value);
-+ if (!entry->next)
-+ break;
-+ entry = irq_2_pin + entry->next;
-+ }
-+ irq_desc[irq].affinity = cpumask;
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+}
-+
-+#if defined(CONFIG_IRQBALANCE)
-+# include <asm/processor.h> /* kernel_thread() */
-+# include <linux/kernel_stat.h> /* kstat */
-+# include <linux/slab.h> /* kmalloc() */
-+# include <linux/timer.h> /* time_after() */
-+
-+#ifdef CONFIG_BALANCED_IRQ_DEBUG
-+# define TDprintk(x...) do { printk("<%ld:%s:%d>: ", jiffies, __FILE__, __LINE__); printk(x); } while (0)
-+# define Dprintk(x...) do { TDprintk(x); } while (0)
-+# else
-+# define TDprintk(x...)
-+# define Dprintk(x...)
-+# endif
-+
-+#define IRQBALANCE_CHECK_ARCH -999
-+#define MAX_BALANCED_IRQ_INTERVAL (5*HZ)
-+#define MIN_BALANCED_IRQ_INTERVAL (HZ/2)
-+#define BALANCED_IRQ_MORE_DELTA (HZ/10)
-+#define BALANCED_IRQ_LESS_DELTA (HZ)
-+
-+static int irqbalance_disabled __read_mostly = IRQBALANCE_CHECK_ARCH;
-+static int physical_balance __read_mostly;
-+static long balanced_irq_interval __read_mostly = MAX_BALANCED_IRQ_INTERVAL;
-+
-+static struct irq_cpu_info {
-+ unsigned long * last_irq;
-+ unsigned long * irq_delta;
-+ unsigned long irq;
-+} irq_cpu_data[NR_CPUS];
-+
-+#define CPU_IRQ(cpu) (irq_cpu_data[cpu].irq)
-+#define LAST_CPU_IRQ(cpu,irq) (irq_cpu_data[cpu].last_irq[irq])
-+#define IRQ_DELTA(cpu,irq) (irq_cpu_data[cpu].irq_delta[irq])
-+
-+#define IDLE_ENOUGH(cpu,now) \
-+ (idle_cpu(cpu) && ((now) - per_cpu(irq_stat, (cpu)).idle_timestamp > 1))
-+
-+#define IRQ_ALLOWED(cpu, allowed_mask) cpu_isset(cpu, allowed_mask)
-+
-+#define CPU_TO_PACKAGEINDEX(i) (first_cpu(cpu_sibling_map[i]))
-+
-+static cpumask_t balance_irq_affinity[NR_IRQS] = {
-+ [0 ... NR_IRQS-1] = CPU_MASK_ALL
-+};
-+
-+void set_balance_irq_affinity(unsigned int irq, cpumask_t mask)
-+{
-+ balance_irq_affinity[irq] = mask;
-+}
-+
-+static unsigned long move(int curr_cpu, cpumask_t allowed_mask,
-+ unsigned long now, int direction)
-+{
-+ int search_idle = 1;
-+ int cpu = curr_cpu;
-+
-+ goto inside;
-+
-+ do {
-+ if (unlikely(cpu == curr_cpu))
-+ search_idle = 0;
-+inside:
-+ if (direction == 1) {
-+ cpu++;
-+ if (cpu >= NR_CPUS)
-+ cpu = 0;
-+ } else {
-+ cpu--;
-+ if (cpu == -1)
-+ cpu = NR_CPUS-1;
-+ }
-+ } while (!cpu_online(cpu) || !IRQ_ALLOWED(cpu,allowed_mask) ||
-+ (search_idle && !IDLE_ENOUGH(cpu,now)));
-+
-+ return cpu;
-+}
-+
-+static inline void balance_irq(int cpu, int irq)
-+{
-+ unsigned long now = jiffies;
-+ cpumask_t allowed_mask;
-+ unsigned int new_cpu;
-+
-+ if (irqbalance_disabled)
-+ return;
-+
-+ cpus_and(allowed_mask, cpu_online_map, balance_irq_affinity[irq]);
-+ new_cpu = move(cpu, allowed_mask, now, 1);
-+ if (cpu != new_cpu) {
-+ set_pending_irq(irq, cpumask_of_cpu(new_cpu));
-+ }
-+}
-+
-+static inline void rotate_irqs_among_cpus(unsigned long useful_load_threshold)
-+{
-+ int i, j;
-+ Dprintk("Rotating IRQs among CPUs.\n");
-+ for_each_online_cpu(i) {
-+ for (j = 0; j < NR_IRQS; j++) {
-+ if (!irq_desc[j].action)
-+ continue;
-+ /* Is it a significant load ? */
-+ if (IRQ_DELTA(CPU_TO_PACKAGEINDEX(i),j) <
-+ useful_load_threshold)
-+ continue;
-+ balance_irq(i, j);
-+ }
-+ }
-+ balanced_irq_interval = max((long)MIN_BALANCED_IRQ_INTERVAL,
-+ balanced_irq_interval - BALANCED_IRQ_LESS_DELTA);
-+ return;
-+}
-+
-+static void do_irq_balance(void)
-+{
-+ int i, j;
-+ unsigned long max_cpu_irq = 0, min_cpu_irq = (~0);
-+ unsigned long move_this_load = 0;
-+ int max_loaded = 0, min_loaded = 0;
-+ int load;
-+ unsigned long useful_load_threshold = balanced_irq_interval + 10;
-+ int selected_irq;
-+ int tmp_loaded, first_attempt = 1;
-+ unsigned long tmp_cpu_irq;
-+ unsigned long imbalance = 0;
-+ cpumask_t allowed_mask, target_cpu_mask, tmp;
-+
-+ for_each_possible_cpu(i) {
-+ int package_index;
-+ CPU_IRQ(i) = 0;
-+ if (!cpu_online(i))
-+ continue;
-+ package_index = CPU_TO_PACKAGEINDEX(i);
-+ for (j = 0; j < NR_IRQS; j++) {
-+ unsigned long value_now, delta;
-+ /* Is this an active IRQ or balancing disabled ? */
-+ if (!irq_desc[j].action || irq_balancing_disabled(j))
-+ continue;
-+ if ( package_index == i )
-+ IRQ_DELTA(package_index,j) = 0;
-+ /* Determine the total count per processor per IRQ */
-+ value_now = (unsigned long) kstat_cpu(i).irqs[j];
-+
-+ /* Determine the activity per processor per IRQ */
-+ delta = value_now - LAST_CPU_IRQ(i,j);
-+
-+ /* Update last_cpu_irq[][] for the next time */
-+ LAST_CPU_IRQ(i,j) = value_now;
-+
-+ /* Ignore IRQs whose rate is less than the clock */
-+ if (delta < useful_load_threshold)
-+ continue;
-+ /* update the load for the processor or package total */
-+ IRQ_DELTA(package_index,j) += delta;
-+
-+ /* Keep track of the higher numbered sibling as well */
-+ if (i != package_index)
-+ CPU_IRQ(i) += delta;
-+ /*
-+ * We have sibling A and sibling B in the package
-+ *
-+ * cpu_irq[A] = load for cpu A + load for cpu B
-+ * cpu_irq[B] = load for cpu B
-+ */
-+ CPU_IRQ(package_index) += delta;
-+ }
-+ }
-+ /* Find the least loaded processor package */
-+ for_each_online_cpu(i) {
-+ if (i != CPU_TO_PACKAGEINDEX(i))
-+ continue;
-+ if (min_cpu_irq > CPU_IRQ(i)) {
-+ min_cpu_irq = CPU_IRQ(i);
-+ min_loaded = i;
-+ }
-+ }
-+ max_cpu_irq = ULONG_MAX;
-+
-+tryanothercpu:
-+ /* Look for heaviest loaded processor.
-+ * We may come back to get the next heaviest loaded processor.
-+ * Skip processors with trivial loads.
-+ */
-+ tmp_cpu_irq = 0;
-+ tmp_loaded = -1;
-+ for_each_online_cpu(i) {
-+ if (i != CPU_TO_PACKAGEINDEX(i))
-+ continue;
-+ if (max_cpu_irq <= CPU_IRQ(i))
-+ continue;
-+ if (tmp_cpu_irq < CPU_IRQ(i)) {
-+ tmp_cpu_irq = CPU_IRQ(i);
-+ tmp_loaded = i;
-+ }
-+ }
-+
-+ if (tmp_loaded == -1) {
-+ /* In the case of small number of heavy interrupt sources,
-+ * loading some of the cpus too much. We use Ingo's original
-+ * approach to rotate them around.
-+ */
-+ if (!first_attempt && imbalance >= useful_load_threshold) {
-+ rotate_irqs_among_cpus(useful_load_threshold);
-+ return;
-+ }
-+ goto not_worth_the_effort;
-+ }
-+
-+ first_attempt = 0; /* heaviest search */
-+ max_cpu_irq = tmp_cpu_irq; /* load */
-+ max_loaded = tmp_loaded; /* processor */
-+ imbalance = (max_cpu_irq - min_cpu_irq) / 2;
-+
-+ Dprintk("max_loaded cpu = %d\n", max_loaded);
-+ Dprintk("min_loaded cpu = %d\n", min_loaded);
-+ Dprintk("max_cpu_irq load = %ld\n", max_cpu_irq);
-+ Dprintk("min_cpu_irq load = %ld\n", min_cpu_irq);
-+ Dprintk("load imbalance = %lu\n", imbalance);
-+
-+ /* if imbalance is less than approx 10% of max load, then
-+ * observe diminishing returns action. - quit
-+ */
-+ if (imbalance < (max_cpu_irq >> 3)) {
-+ Dprintk("Imbalance too trivial\n");
-+ goto not_worth_the_effort;
-+ }
-+
-+tryanotherirq:
-+ /* if we select an IRQ to move that can't go where we want, then
-+ * see if there is another one to try.
-+ */
-+ move_this_load = 0;
-+ selected_irq = -1;
-+ for (j = 0; j < NR_IRQS; j++) {
-+ /* Is this an active IRQ? */
-+ if (!irq_desc[j].action)
-+ continue;
-+ if (imbalance <= IRQ_DELTA(max_loaded,j))
-+ continue;
-+ /* Try to find the IRQ that is closest to the imbalance
-+ * without going over.
-+ */
-+ if (move_this_load < IRQ_DELTA(max_loaded,j)) {
-+ move_this_load = IRQ_DELTA(max_loaded,j);
-+ selected_irq = j;
-+ }
-+ }
-+ if (selected_irq == -1) {
-+ goto tryanothercpu;
-+ }
-+
-+ imbalance = move_this_load;
-+
-+ /* For physical_balance case, we accumlated both load
-+ * values in the one of the siblings cpu_irq[],
-+ * to use the same code for physical and logical processors
-+ * as much as possible.
-+ *
-+ * NOTE: the cpu_irq[] array holds the sum of the load for
-+ * sibling A and sibling B in the slot for the lowest numbered
-+ * sibling (A), _AND_ the load for sibling B in the slot for
-+ * the higher numbered sibling.
-+ *
-+ * We seek the least loaded sibling by making the comparison
-+ * (A+B)/2 vs B
-+ */
-+ load = CPU_IRQ(min_loaded) >> 1;
-+ for_each_cpu_mask(j, cpu_sibling_map[min_loaded]) {
-+ if (load > CPU_IRQ(j)) {
-+ /* This won't change cpu_sibling_map[min_loaded] */
-+ load = CPU_IRQ(j);
-+ min_loaded = j;
-+ }
-+ }
-+
-+ cpus_and(allowed_mask,
-+ cpu_online_map,
-+ balance_irq_affinity[selected_irq]);
-+ target_cpu_mask = cpumask_of_cpu(min_loaded);
-+ cpus_and(tmp, target_cpu_mask, allowed_mask);
-+
-+ if (!cpus_empty(tmp)) {
-+
-+ Dprintk("irq = %d moved to cpu = %d\n",
-+ selected_irq, min_loaded);
-+ /* mark for change destination */
-+ set_pending_irq(selected_irq, cpumask_of_cpu(min_loaded));
-+
-+ /* Since we made a change, come back sooner to
-+ * check for more variation.
-+ */
-+ balanced_irq_interval = max((long)MIN_BALANCED_IRQ_INTERVAL,
-+ balanced_irq_interval - BALANCED_IRQ_LESS_DELTA);
-+ return;
-+ }
-+ goto tryanotherirq;
-+
-+not_worth_the_effort:
-+ /*
-+ * if we did not find an IRQ to move, then adjust the time interval
-+ * upward
-+ */
-+ balanced_irq_interval = min((long)MAX_BALANCED_IRQ_INTERVAL,
-+ balanced_irq_interval + BALANCED_IRQ_MORE_DELTA);
-+ Dprintk("IRQ worth rotating not found\n");
-+ return;
-+}
-+
-+static int balanced_irq(void *unused)
-+{
-+ int i;
-+ unsigned long prev_balance_time = jiffies;
-+ long time_remaining = balanced_irq_interval;
-+
-+ /* push everything to CPU 0 to give us a starting point. */
-+ for (i = 0 ; i < NR_IRQS ; i++) {
-+ irq_desc[i].pending_mask = cpumask_of_cpu(0);
-+ set_pending_irq(i, cpumask_of_cpu(0));
-+ }
-+
-+ for ( ; ; ) {
-+ time_remaining = schedule_timeout_interruptible(time_remaining);
-+ try_to_freeze();
-+ if (time_after(jiffies,
-+ prev_balance_time+balanced_irq_interval)) {
-+ preempt_disable();
-+ do_irq_balance();
-+ prev_balance_time = jiffies;
-+ time_remaining = balanced_irq_interval;
-+ preempt_enable();
-+ }
-+ }
-+ return 0;
-+}
-+
-+static int __init balanced_irq_init(void)
-+{
-+ int i;
-+ struct cpuinfo_x86 *c;
-+ cpumask_t tmp;
-+
-+ cpus_shift_right(tmp, cpu_online_map, 2);
-+ c = &boot_cpu_data;
-+ /* When not overwritten by the command line ask subarchitecture. */
-+ if (irqbalance_disabled == IRQBALANCE_CHECK_ARCH)
-+ irqbalance_disabled = NO_BALANCE_IRQ;
-+ if (irqbalance_disabled)
-+ return 0;
-+
-+ /* disable irqbalance completely if there is only one processor online */
-+ if (num_online_cpus() < 2) {
-+ irqbalance_disabled = 1;
-+ return 0;
-+ }
-+ /*
-+ * Enable physical balance only if more than 1 physical processor
-+ * is present
-+ */
-+ if (smp_num_siblings > 1 && !cpus_empty(tmp))
-+ physical_balance = 1;
-+
-+ for_each_online_cpu(i) {
-+ irq_cpu_data[i].irq_delta = kmalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL);
-+ irq_cpu_data[i].last_irq = kmalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL);
-+ if (irq_cpu_data[i].irq_delta == NULL || irq_cpu_data[i].last_irq == NULL) {
-+ printk(KERN_ERR "balanced_irq_init: out of memory");
-+ goto failed;
-+ }
-+ memset(irq_cpu_data[i].irq_delta,0,sizeof(unsigned long) * NR_IRQS);
-+ memset(irq_cpu_data[i].last_irq,0,sizeof(unsigned long) * NR_IRQS);
-+ }
-+
-+ printk(KERN_INFO "Starting balanced_irq\n");
-+ if (!IS_ERR(kthread_run(balanced_irq, NULL, "kirqd")))
-+ return 0;
-+ printk(KERN_ERR "balanced_irq_init: failed to spawn balanced_irq");
-+failed:
-+ for_each_possible_cpu(i) {
-+ kfree(irq_cpu_data[i].irq_delta);
-+ irq_cpu_data[i].irq_delta = NULL;
-+ kfree(irq_cpu_data[i].last_irq);
-+ irq_cpu_data[i].last_irq = NULL;
-+ }
-+ return 0;
-+}
-+
-+int __devinit irqbalance_disable(char *str)
-+{
-+ irqbalance_disabled = 1;
-+ return 1;
-+}
-+
-+__setup("noirqbalance", irqbalance_disable);
-+
-+late_initcall(balanced_irq_init);
-+#endif /* CONFIG_IRQBALANCE */
-+#endif /* CONFIG_SMP */
-+#endif
-+
-+#ifndef CONFIG_SMP
-+void fastcall send_IPI_self(int vector)
-+{
-+#ifndef CONFIG_XEN
-+ unsigned int cfg;
-+
-+ /*
-+ * Wait for idle.
-+ */
-+ apic_wait_icr_idle();
-+ cfg = APIC_DM_FIXED | APIC_DEST_SELF | vector | APIC_DEST_LOGICAL;
-+ /*
-+ * Send the IPI. The write to APIC_ICR fires this off.
-+ */
-+ apic_write_around(APIC_ICR, cfg);
-+#endif
-+}
-+#endif /* !CONFIG_SMP */
-+
-+
-+/*
-+ * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to
-+ * specific CPU-side IRQs.
-+ */
-+
-+#define MAX_PIRQS 8
-+static int pirq_entries [MAX_PIRQS];
-+static int pirqs_enabled;
-+int skip_ioapic_setup;
-+
-+static int __init ioapic_setup(char *str)
-+{
-+ skip_ioapic_setup = 1;
-+ return 1;
-+}
-+
-+__setup("noapic", ioapic_setup);
-+
-+static int __init ioapic_pirq_setup(char *str)
-+{
-+ int i, max;
-+ int ints[MAX_PIRQS+1];
-+
-+ get_options(str, ARRAY_SIZE(ints), ints);
-+
-+ for (i = 0; i < MAX_PIRQS; i++)
-+ pirq_entries[i] = -1;
-+
-+ pirqs_enabled = 1;
-+ apic_printk(APIC_VERBOSE, KERN_INFO
-+ "PIRQ redirection, working around broken MP-BIOS.\n");
-+ max = MAX_PIRQS;
-+ if (ints[0] < MAX_PIRQS)
-+ max = ints[0];
-+
-+ for (i = 0; i < max; i++) {
-+ apic_printk(APIC_VERBOSE, KERN_DEBUG
-+ "... PIRQ%d -> IRQ %d\n", i, ints[i+1]);
-+ /*
-+ * PIRQs are mapped upside down, usually.
-+ */
-+ pirq_entries[MAX_PIRQS-i-1] = ints[i+1];
-+ }
-+ return 1;
-+}
-+
-+__setup("pirq=", ioapic_pirq_setup);
-+
-+/*
-+ * Find the IRQ entry number of a certain pin.
-+ */
-+static int find_irq_entry(int apic, int pin, int type)
-+{
-+ int i;
-+
-+ for (i = 0; i < mp_irq_entries; i++)
-+ if (mp_irqs[i].mpc_irqtype == type &&
-+ (mp_irqs[i].mpc_dstapic == mp_ioapics[apic].mpc_apicid ||
-+ mp_irqs[i].mpc_dstapic == MP_APIC_ALL) &&
-+ mp_irqs[i].mpc_dstirq == pin)
-+ return i;
-+
-+ return -1;
-+}
-+
-+/*
-+ * Find the pin to which IRQ[irq] (ISA) is connected
-+ */
-+static int __init find_isa_irq_pin(int irq, int type)
-+{
-+ int i;
-+
-+ for (i = 0; i < mp_irq_entries; i++) {
-+ int lbus = mp_irqs[i].mpc_srcbus;
-+
-+ if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA ||
-+ mp_bus_id_to_type[lbus] == MP_BUS_EISA ||
-+ mp_bus_id_to_type[lbus] == MP_BUS_MCA
-+ ) &&
-+ (mp_irqs[i].mpc_irqtype == type) &&
-+ (mp_irqs[i].mpc_srcbusirq == irq))
-+
-+ return mp_irqs[i].mpc_dstirq;
-+ }
-+ return -1;
-+}
-+
-+static int __init find_isa_irq_apic(int irq, int type)
-+{
-+ int i;
-+
-+ for (i = 0; i < mp_irq_entries; i++) {
-+ int lbus = mp_irqs[i].mpc_srcbus;
-+
-+ if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA ||
-+ mp_bus_id_to_type[lbus] == MP_BUS_EISA ||
-+ mp_bus_id_to_type[lbus] == MP_BUS_MCA
-+ ) &&
-+ (mp_irqs[i].mpc_irqtype == type) &&
-+ (mp_irqs[i].mpc_srcbusirq == irq))
-+ break;
-+ }
-+ if (i < mp_irq_entries) {
-+ int apic;
-+ for(apic = 0; apic < nr_ioapics; apic++) {
-+ if (mp_ioapics[apic].mpc_apicid == mp_irqs[i].mpc_dstapic)
-+ return apic;
-+ }
-+ }
-+
-+ return -1;
-+}
-+
-+/*
-+ * Find a specific PCI IRQ entry.
-+ * Not an __init, possibly needed by modules
-+ */
-+static int pin_2_irq(int idx, int apic, int pin);
-+
-+int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
-+{
-+ int apic, i, best_guess = -1;
-+
-+ apic_printk(APIC_DEBUG, "querying PCI -> IRQ mapping bus:%d, "
-+ "slot:%d, pin:%d.\n", bus, slot, pin);
-+ if (mp_bus_id_to_pci_bus[bus] == -1) {
-+ printk(KERN_WARNING "PCI BIOS passed nonexistent PCI bus %d!\n", bus);
-+ return -1;
-+ }
-+ for (i = 0; i < mp_irq_entries; i++) {
-+ int lbus = mp_irqs[i].mpc_srcbus;
-+
-+ for (apic = 0; apic < nr_ioapics; apic++)
-+ if (mp_ioapics[apic].mpc_apicid == mp_irqs[i].mpc_dstapic ||
-+ mp_irqs[i].mpc_dstapic == MP_APIC_ALL)
-+ break;
-+
-+ if ((mp_bus_id_to_type[lbus] == MP_BUS_PCI) &&
-+ !mp_irqs[i].mpc_irqtype &&
-+ (bus == lbus) &&
-+ (slot == ((mp_irqs[i].mpc_srcbusirq >> 2) & 0x1f))) {
-+ int irq = pin_2_irq(i,apic,mp_irqs[i].mpc_dstirq);
-+
-+ if (!(apic || IO_APIC_IRQ(irq)))
-+ continue;
-+
-+ if (pin == (mp_irqs[i].mpc_srcbusirq & 3))
-+ return irq;
-+ /*
-+ * Use the first all-but-pin matching entry as a
-+ * best-guess fuzzy result for broken mptables.
-+ */
-+ if (best_guess < 0)
-+ best_guess = irq;
-+ }
-+ }
-+ return best_guess;
-+}
-+EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
-+
-+/*
-+ * This function currently is only a helper for the i386 smp boot process where
-+ * we need to reprogram the ioredtbls to cater for the cpus which have come online
-+ * so mask in all cases should simply be TARGET_CPUS
-+ */
-+#ifdef CONFIG_SMP
-+#ifndef CONFIG_XEN
-+void __init setup_ioapic_dest(void)
-+{
-+ int pin, ioapic, irq, irq_entry;
-+
-+ if (skip_ioapic_setup == 1)
-+ return;
-+
-+ for (ioapic = 0; ioapic < nr_ioapics; ioapic++) {
-+ for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) {
-+ irq_entry = find_irq_entry(ioapic, pin, mp_INT);
-+ if (irq_entry == -1)
-+ continue;
-+ irq = pin_2_irq(irq_entry, ioapic, pin);
-+ set_ioapic_affinity_irq(irq, TARGET_CPUS);
-+ }
-+
-+ }
-+}
-+#endif /* !CONFIG_XEN */
-+#endif
-+
-+/*
-+ * EISA Edge/Level control register, ELCR
-+ */
-+static int EISA_ELCR(unsigned int irq)
-+{
-+ if (irq < 16) {
-+ unsigned int port = 0x4d0 + (irq >> 3);
-+ return (inb(port) >> (irq & 7)) & 1;
-+ }
-+ apic_printk(APIC_VERBOSE, KERN_INFO
-+ "Broken MPtable reports ISA irq %d\n", irq);
-+ return 0;
-+}
-+
-+/* EISA interrupts are always polarity zero and can be edge or level
-+ * trigger depending on the ELCR value. If an interrupt is listed as
-+ * EISA conforming in the MP table, that means its trigger type must
-+ * be read in from the ELCR */
-+
-+#define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].mpc_srcbusirq))
-+#define default_EISA_polarity(idx) (0)
-+
-+/* ISA interrupts are always polarity zero edge triggered,
-+ * when listed as conforming in the MP table. */
-+
-+#define default_ISA_trigger(idx) (0)
-+#define default_ISA_polarity(idx) (0)
-+
-+/* PCI interrupts are always polarity one level triggered,
-+ * when listed as conforming in the MP table. */
-+
-+#define default_PCI_trigger(idx) (1)
-+#define default_PCI_polarity(idx) (1)
-+
-+/* MCA interrupts are always polarity zero level triggered,
-+ * when listed as conforming in the MP table. */
-+
-+#define default_MCA_trigger(idx) (1)
-+#define default_MCA_polarity(idx) (0)
-+
-+static int __init MPBIOS_polarity(int idx)
-+{
-+ int bus = mp_irqs[idx].mpc_srcbus;
-+ int polarity;
-+
-+ /*
-+ * Determine IRQ line polarity (high active or low active):
-+ */
-+ switch (mp_irqs[idx].mpc_irqflag & 3)
-+ {
-+ case 0: /* conforms, ie. bus-type dependent polarity */
-+ {
-+ switch (mp_bus_id_to_type[bus])
-+ {
-+ case MP_BUS_ISA: /* ISA pin */
-+ {
-+ polarity = default_ISA_polarity(idx);
-+ break;
-+ }
-+ case MP_BUS_EISA: /* EISA pin */
-+ {
-+ polarity = default_EISA_polarity(idx);
-+ break;
-+ }
-+ case MP_BUS_PCI: /* PCI pin */
-+ {
-+ polarity = default_PCI_polarity(idx);
-+ break;
-+ }
-+ case MP_BUS_MCA: /* MCA pin */
-+ {
-+ polarity = default_MCA_polarity(idx);
-+ break;
-+ }
-+ default:
-+ {
-+ printk(KERN_WARNING "broken BIOS!!\n");
-+ polarity = 1;
-+ break;
-+ }
-+ }
-+ break;
-+ }
-+ case 1: /* high active */
-+ {
-+ polarity = 0;
-+ break;
-+ }
-+ case 2: /* reserved */
-+ {
-+ printk(KERN_WARNING "broken BIOS!!\n");
-+ polarity = 1;
-+ break;
-+ }
-+ case 3: /* low active */
-+ {
-+ polarity = 1;
-+ break;
-+ }
-+ default: /* invalid */
-+ {
-+ printk(KERN_WARNING "broken BIOS!!\n");
-+ polarity = 1;
-+ break;
-+ }
-+ }
-+ return polarity;
-+}
-+
-+static int MPBIOS_trigger(int idx)
-+{
-+ int bus = mp_irqs[idx].mpc_srcbus;
-+ int trigger;
-+
-+ /*
-+ * Determine IRQ trigger mode (edge or level sensitive):
-+ */
-+ switch ((mp_irqs[idx].mpc_irqflag>>2) & 3)
-+ {
-+ case 0: /* conforms, ie. bus-type dependent */
-+ {
-+ switch (mp_bus_id_to_type[bus])
-+ {
-+ case MP_BUS_ISA: /* ISA pin */
-+ {
-+ trigger = default_ISA_trigger(idx);
-+ break;
-+ }
-+ case MP_BUS_EISA: /* EISA pin */
-+ {
-+ trigger = default_EISA_trigger(idx);
-+ break;
-+ }
-+ case MP_BUS_PCI: /* PCI pin */
-+ {
-+ trigger = default_PCI_trigger(idx);
-+ break;
-+ }
-+ case MP_BUS_MCA: /* MCA pin */
-+ {
-+ trigger = default_MCA_trigger(idx);
-+ break;
-+ }
-+ default:
-+ {
-+ printk(KERN_WARNING "broken BIOS!!\n");
-+ trigger = 1;
-+ break;
-+ }
-+ }
-+ break;
-+ }
-+ case 1: /* edge */
-+ {
-+ trigger = 0;
-+ break;
-+ }
-+ case 2: /* reserved */
-+ {
-+ printk(KERN_WARNING "broken BIOS!!\n");
-+ trigger = 1;
-+ break;
-+ }
-+ case 3: /* level */
-+ {
-+ trigger = 1;
-+ break;
-+ }
-+ default: /* invalid */
-+ {
-+ printk(KERN_WARNING "broken BIOS!!\n");
-+ trigger = 0;
-+ break;
-+ }
-+ }
-+ return trigger;
-+}
-+
-+static inline int irq_polarity(int idx)
-+{
-+ return MPBIOS_polarity(idx);
-+}
-+
-+static inline int irq_trigger(int idx)
-+{
-+ return MPBIOS_trigger(idx);
-+}
-+
-+static int pin_2_irq(int idx, int apic, int pin)
-+{
-+ int irq, i;
-+ int bus = mp_irqs[idx].mpc_srcbus;
-+
-+ /*
-+ * Debugging check, we are in big trouble if this message pops up!
-+ */
-+ if (mp_irqs[idx].mpc_dstirq != pin)
-+ printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n");
-+
-+ switch (mp_bus_id_to_type[bus])
-+ {
-+ case MP_BUS_ISA: /* ISA pin */
-+ case MP_BUS_EISA:
-+ case MP_BUS_MCA:
-+ {
-+ irq = mp_irqs[idx].mpc_srcbusirq;
-+ break;
-+ }
-+ case MP_BUS_PCI: /* PCI pin */
-+ {
-+ /*
-+ * PCI IRQs are mapped in order
-+ */
-+ i = irq = 0;
-+ while (i < apic)
-+ irq += nr_ioapic_registers[i++];
-+ irq += pin;
-+
-+ /*
-+ * For MPS mode, so far only needed by ES7000 platform
-+ */
-+ if (ioapic_renumber_irq)
-+ irq = ioapic_renumber_irq(apic, irq);
-+
-+ break;
-+ }
-+ default:
-+ {
-+ printk(KERN_ERR "unknown bus type %d.\n",bus);
-+ irq = 0;
-+ break;
-+ }
-+ }
-+
-+ /*
-+ * PCI IRQ command line redirection. Yes, limits are hardcoded.
-+ */
-+ if ((pin >= 16) && (pin <= 23)) {
-+ if (pirq_entries[pin-16] != -1) {
-+ if (!pirq_entries[pin-16]) {
-+ apic_printk(APIC_VERBOSE, KERN_DEBUG
-+ "disabling PIRQ%d\n", pin-16);
-+ } else {
-+ irq = pirq_entries[pin-16];
-+ apic_printk(APIC_VERBOSE, KERN_DEBUG
-+ "using PIRQ%d -> IRQ %d\n",
-+ pin-16, irq);
-+ }
-+ }
-+ }
-+ return irq;
-+}
-+
-+static inline int IO_APIC_irq_trigger(int irq)
-+{
-+ int apic, idx, pin;
-+
-+ for (apic = 0; apic < nr_ioapics; apic++) {
-+ for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
-+ idx = find_irq_entry(apic,pin,mp_INT);
-+ if ((idx != -1) && (irq == pin_2_irq(idx,apic,pin)))
-+ return irq_trigger(idx);
-+ }
-+ }
-+ /*
-+ * nonexistent IRQs are edge default
-+ */
-+ return 0;
-+}
-+
-+/* irq_vectors is indexed by the sum of all RTEs in all I/O APICs. */
-+static u8 irq_vector[NR_IRQ_VECTORS] __read_mostly; /* = { FIRST_DEVICE_VECTOR , 0 }; */
-+
-+static int __assign_irq_vector(int irq)
-+{
-+ int vector;
-+ struct physdev_irq irq_op;
-+
-+ BUG_ON((unsigned)irq >= NR_IRQ_VECTORS);
-+
-+ if (irq_vector[irq] > 0)
-+ return irq_vector[irq];
-+
-+ irq_op.irq = irq;
-+ if (HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op))
-+ return -ENOSPC;
-+
-+ vector = irq_op.vector;
-+ irq_vector[irq] = vector;
-+
-+ return vector;
-+}
-+
-+static int assign_irq_vector(int irq)
-+{
-+ unsigned long flags;
-+ int vector;
-+
-+ spin_lock_irqsave(&vector_lock, flags);
-+ vector = __assign_irq_vector(irq);
-+ spin_unlock_irqrestore(&vector_lock, flags);
-+
-+ return vector;
-+}
-+
-+#ifndef CONFIG_XEN
-+static struct irq_chip ioapic_chip;
-+
-+#define IOAPIC_AUTO -1
-+#define IOAPIC_EDGE 0
-+#define IOAPIC_LEVEL 1
-+
-+static void ioapic_register_intr(int irq, int vector, unsigned long trigger)
-+{
-+ if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
-+ trigger == IOAPIC_LEVEL)
-+ set_irq_chip_and_handler_name(irq, &ioapic_chip,
-+ handle_fasteoi_irq, "fasteoi");
-+ else
-+ set_irq_chip_and_handler_name(irq, &ioapic_chip,
-+ handle_edge_irq, "edge");
-+ set_intr_gate(vector, interrupt[irq]);
-+}
-+#else
-+#define ioapic_register_intr(_irq,_vector,_trigger) ((void)0)
-+#endif
-+
-+static void __init setup_IO_APIC_irqs(void)
-+{
-+ struct IO_APIC_route_entry entry;
-+ int apic, pin, idx, irq, first_notcon = 1, vector;
-+ unsigned long flags;
-+
-+ apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
-+
-+ for (apic = 0; apic < nr_ioapics; apic++) {
-+ for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
-+
-+ /*
-+ * add it to the IO-APIC irq-routing table:
-+ */
-+ memset(&entry,0,sizeof(entry));
-+
-+ entry.delivery_mode = INT_DELIVERY_MODE;
-+ entry.dest_mode = INT_DEST_MODE;
-+ entry.mask = 0; /* enable IRQ */
-+ entry.dest.logical.logical_dest =
-+ cpu_mask_to_apicid(TARGET_CPUS);
-+
-+ idx = find_irq_entry(apic,pin,mp_INT);
-+ if (idx == -1) {
-+ if (first_notcon) {
-+ apic_printk(APIC_VERBOSE, KERN_DEBUG
-+ " IO-APIC (apicid-pin) %d-%d",
-+ mp_ioapics[apic].mpc_apicid,
-+ pin);
-+ first_notcon = 0;
-+ } else
-+ apic_printk(APIC_VERBOSE, ", %d-%d",
-+ mp_ioapics[apic].mpc_apicid, pin);
-+ continue;
-+ }
-+
-+ entry.trigger = irq_trigger(idx);
-+ entry.polarity = irq_polarity(idx);
-+
-+ if (irq_trigger(idx)) {
-+ entry.trigger = 1;
-+ entry.mask = 1;
-+ }
-+
-+ irq = pin_2_irq(idx, apic, pin);
-+ /*
-+ * skip adding the timer int on secondary nodes, which causes
-+ * a small but painful rift in the time-space continuum
-+ */
-+ if (multi_timer_check(apic, irq))
-+ continue;
-+ else
-+ add_pin_to_irq(irq, apic, pin);
-+
-+ if (/*!apic &&*/ !IO_APIC_IRQ(irq))
-+ continue;
-+
-+ if (IO_APIC_IRQ(irq)) {
-+ vector = assign_irq_vector(irq);
-+ entry.vector = vector;
-+ ioapic_register_intr(irq, vector, IOAPIC_AUTO);
-+
-+ if (!apic && (irq < 16))
-+ disable_8259A_irq(irq);
-+ }
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ __ioapic_write_entry(apic, pin, entry);
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+ }
-+ }
-+
-+ if (!first_notcon)
-+ apic_printk(APIC_VERBOSE, " not connected.\n");
-+}
-+
-+/*
-+ * Set up the 8259A-master output pin:
-+ */
-+#ifndef CONFIG_XEN
-+static void __init setup_ExtINT_IRQ0_pin(unsigned int apic, unsigned int pin, int vector)
-+{
-+ struct IO_APIC_route_entry entry;
-+
-+ memset(&entry,0,sizeof(entry));
-+
-+ disable_8259A_irq(0);
-+
-+ /* mask LVT0 */
-+ apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
-+
-+ /*
-+ * We use logical delivery to get the timer IRQ
-+ * to the first CPU.
-+ */
-+ entry.dest_mode = INT_DEST_MODE;
-+ entry.mask = 0; /* unmask IRQ now */
-+ entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
-+ entry.delivery_mode = INT_DELIVERY_MODE;
-+ entry.polarity = 0;
-+ entry.trigger = 0;
-+ entry.vector = vector;
-+
-+ /*
-+ * The timer IRQ doesn't have to know that behind the
-+ * scene we have a 8259A-master in AEOI mode ...
-+ */
-+ irq_desc[0].chip = &ioapic_chip;
-+ set_irq_handler(0, handle_edge_irq);
-+
-+ /*
-+ * Add it to the IO-APIC irq-routing table:
-+ */
-+ ioapic_write_entry(apic, pin, entry);
-+
-+ enable_8259A_irq(0);
-+}
-+
-+void __init print_IO_APIC(void)
-+{
-+ int apic, i;
-+ union IO_APIC_reg_00 reg_00;
-+ union IO_APIC_reg_01 reg_01;
-+ union IO_APIC_reg_02 reg_02;
-+ union IO_APIC_reg_03 reg_03;
-+ unsigned long flags;
-+
-+ if (apic_verbosity == APIC_QUIET)
-+ return;
-+
-+ printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
-+ for (i = 0; i < nr_ioapics; i++)
-+ printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
-+ mp_ioapics[i].mpc_apicid, nr_ioapic_registers[i]);
-+
-+ /*
-+ * We are a bit conservative about what we expect. We have to
-+ * know about every hardware change ASAP.
-+ */
-+ printk(KERN_INFO "testing the IO APIC.......................\n");
-+
-+ for (apic = 0; apic < nr_ioapics; apic++) {
-+
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ reg_00.raw = io_apic_read(apic, 0);
-+ reg_01.raw = io_apic_read(apic, 1);
-+ if (reg_01.bits.version >= 0x10)
-+ reg_02.raw = io_apic_read(apic, 2);
-+ if (reg_01.bits.version >= 0x20)
-+ reg_03.raw = io_apic_read(apic, 3);
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+
-+ printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].mpc_apicid);
-+ printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
-+ printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID);
-+ printk(KERN_DEBUG "....... : Delivery Type: %X\n", reg_00.bits.delivery_type);
-+ printk(KERN_DEBUG "....... : LTS : %X\n", reg_00.bits.LTS);
-+
-+ printk(KERN_DEBUG ".... register #01: %08X\n", reg_01.raw);
-+ printk(KERN_DEBUG "....... : max redirection entries: %04X\n", reg_01.bits.entries);
-+
-+ printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ);
-+ printk(KERN_DEBUG "....... : IO APIC version: %04X\n", reg_01.bits.version);
-+
-+ /*
-+ * Some Intel chipsets with IO APIC VERSION of 0x1? don't have reg_02,
-+ * but the value of reg_02 is read as the previous read register
-+ * value, so ignore it if reg_02 == reg_01.
-+ */
-+ if (reg_01.bits.version >= 0x10 && reg_02.raw != reg_01.raw) {
-+ printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw);
-+ printk(KERN_DEBUG "....... : arbitration: %02X\n", reg_02.bits.arbitration);
-+ }
-+
-+ /*
-+ * Some Intel chipsets with IO APIC VERSION of 0x2? don't have reg_02
-+ * or reg_03, but the value of reg_0[23] is read as the previous read
-+ * register value, so ignore it if reg_03 == reg_0[12].
-+ */
-+ if (reg_01.bits.version >= 0x20 && reg_03.raw != reg_02.raw &&
-+ reg_03.raw != reg_01.raw) {
-+ printk(KERN_DEBUG ".... register #03: %08X\n", reg_03.raw);
-+ printk(KERN_DEBUG "....... : Boot DT : %X\n", reg_03.bits.boot_DT);
-+ }
-+
-+ printk(KERN_DEBUG ".... IRQ redirection table:\n");
-+
-+ printk(KERN_DEBUG " NR Log Phy Mask Trig IRR Pol"
-+ " Stat Dest Deli Vect: \n");
-+
-+ for (i = 0; i <= reg_01.bits.entries; i++) {
-+ struct IO_APIC_route_entry entry;
-+
-+ entry = ioapic_read_entry(apic, i);
-+
-+ printk(KERN_DEBUG " %02x %03X %02X ",
-+ i,
-+ entry.dest.logical.logical_dest,
-+ entry.dest.physical.physical_dest
-+ );
-+
-+ printk("%1d %1d %1d %1d %1d %1d %1d %02X\n",
-+ entry.mask,
-+ entry.trigger,
-+ entry.irr,
-+ entry.polarity,
-+ entry.delivery_status,
-+ entry.dest_mode,
-+ entry.delivery_mode,
-+ entry.vector
-+ );
-+ }
-+ }
-+ printk(KERN_DEBUG "IRQ to pin mappings:\n");
-+ for (i = 0; i < NR_IRQS; i++) {
-+ struct irq_pin_list *entry = irq_2_pin + i;
-+ if (entry->pin < 0)
-+ continue;
-+ printk(KERN_DEBUG "IRQ%d ", i);
-+ for (;;) {
-+ printk("-> %d:%d", entry->apic, entry->pin);
-+ if (!entry->next)
-+ break;
-+ entry = irq_2_pin + entry->next;
-+ }
-+ printk("\n");
-+ }
-+
-+ printk(KERN_INFO ".................................... done.\n");
-+
-+ return;
-+}
-+
-+#if 0
-+
-+static void print_APIC_bitfield (int base)
-+{
-+ unsigned int v;
-+ int i, j;
-+
-+ if (apic_verbosity == APIC_QUIET)
-+ return;
-+
-+ printk(KERN_DEBUG "0123456789abcdef0123456789abcdef\n" KERN_DEBUG);
-+ for (i = 0; i < 8; i++) {
-+ v = apic_read(base + i*0x10);
-+ for (j = 0; j < 32; j++) {
-+ if (v & (1<<j))
-+ printk("1");
-+ else
-+ printk("0");
-+ }
-+ printk("\n");
-+ }
-+}
-+
-+void /*__init*/ print_local_APIC(void * dummy)
-+{
-+ unsigned int v, ver, maxlvt;
-+
-+ if (apic_verbosity == APIC_QUIET)
-+ return;
-+
-+ printk("\n" KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n",
-+ smp_processor_id(), hard_smp_processor_id());
-+ v = apic_read(APIC_ID);
-+ printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, GET_APIC_ID(v));
-+ v = apic_read(APIC_LVR);
-+ printk(KERN_INFO "... APIC VERSION: %08x\n", v);
-+ ver = GET_APIC_VERSION(v);
-+ maxlvt = lapic_get_maxlvt();
-+
-+ v = apic_read(APIC_TASKPRI);
-+ printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
-+
-+ if (APIC_INTEGRATED(ver)) { /* !82489DX */
-+ v = apic_read(APIC_ARBPRI);
-+ printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v,
-+ v & APIC_ARBPRI_MASK);
-+ v = apic_read(APIC_PROCPRI);
-+ printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v);
-+ }
-+
-+ v = apic_read(APIC_EOI);
-+ printk(KERN_DEBUG "... APIC EOI: %08x\n", v);
-+ v = apic_read(APIC_RRR);
-+ printk(KERN_DEBUG "... APIC RRR: %08x\n", v);
-+ v = apic_read(APIC_LDR);
-+ printk(KERN_DEBUG "... APIC LDR: %08x\n", v);
-+ v = apic_read(APIC_DFR);
-+ printk(KERN_DEBUG "... APIC DFR: %08x\n", v);
-+ v = apic_read(APIC_SPIV);
-+ printk(KERN_DEBUG "... APIC SPIV: %08x\n", v);
-+
-+ printk(KERN_DEBUG "... APIC ISR field:\n");
-+ print_APIC_bitfield(APIC_ISR);
-+ printk(KERN_DEBUG "... APIC TMR field:\n");
-+ print_APIC_bitfield(APIC_TMR);
-+ printk(KERN_DEBUG "... APIC IRR field:\n");
-+ print_APIC_bitfield(APIC_IRR);
-+
-+ if (APIC_INTEGRATED(ver)) { /* !82489DX */
-+ if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
-+ apic_write(APIC_ESR, 0);
-+ v = apic_read(APIC_ESR);
-+ printk(KERN_DEBUG "... APIC ESR: %08x\n", v);
-+ }
-+
-+ v = apic_read(APIC_ICR);
-+ printk(KERN_DEBUG "... APIC ICR: %08x\n", v);
-+ v = apic_read(APIC_ICR2);
-+ printk(KERN_DEBUG "... APIC ICR2: %08x\n", v);
-+
-+ v = apic_read(APIC_LVTT);
-+ printk(KERN_DEBUG "... APIC LVTT: %08x\n", v);
-+
-+ if (maxlvt > 3) { /* PC is LVT#4. */
-+ v = apic_read(APIC_LVTPC);
-+ printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v);
-+ }
-+ v = apic_read(APIC_LVT0);
-+ printk(KERN_DEBUG "... APIC LVT0: %08x\n", v);
-+ v = apic_read(APIC_LVT1);
-+ printk(KERN_DEBUG "... APIC LVT1: %08x\n", v);
-+
-+ if (maxlvt > 2) { /* ERR is LVT#3. */
-+ v = apic_read(APIC_LVTERR);
-+ printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v);
-+ }
-+
-+ v = apic_read(APIC_TMICT);
-+ printk(KERN_DEBUG "... APIC TMICT: %08x\n", v);
-+ v = apic_read(APIC_TMCCT);
-+ printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v);
-+ v = apic_read(APIC_TDCR);
-+ printk(KERN_DEBUG "... APIC TDCR: %08x\n", v);
-+ printk("\n");
-+}
-+
-+void print_all_local_APICs (void)
-+{
-+ on_each_cpu(print_local_APIC, NULL, 1, 1);
-+}
-+
-+void /*__init*/ print_PIC(void)
-+{
-+ unsigned int v;
-+ unsigned long flags;
-+
-+ if (apic_verbosity == APIC_QUIET)
-+ return;
-+
-+ printk(KERN_DEBUG "\nprinting PIC contents\n");
-+
-+ spin_lock_irqsave(&i8259A_lock, flags);
-+
-+ v = inb(0xa1) << 8 | inb(0x21);
-+ printk(KERN_DEBUG "... PIC IMR: %04x\n", v);
-+
-+ v = inb(0xa0) << 8 | inb(0x20);
-+ printk(KERN_DEBUG "... PIC IRR: %04x\n", v);
-+
-+ outb(0x0b,0xa0);
-+ outb(0x0b,0x20);
-+ v = inb(0xa0) << 8 | inb(0x20);
-+ outb(0x0a,0xa0);
-+ outb(0x0a,0x20);
-+
-+ spin_unlock_irqrestore(&i8259A_lock, flags);
-+
-+ printk(KERN_DEBUG "... PIC ISR: %04x\n", v);
-+
-+ v = inb(0x4d1) << 8 | inb(0x4d0);
-+ printk(KERN_DEBUG "... PIC ELCR: %04x\n", v);
-+}
-+
-+#endif /* 0 */
-+
-+#else
-+void __init print_IO_APIC(void) { }
-+#endif /* !CONFIG_XEN */
-+
-+static void __init enable_IO_APIC(void)
-+{
-+ union IO_APIC_reg_01 reg_01;
-+ int i8259_apic, i8259_pin;
-+ int i, apic;
-+ unsigned long flags;
-+
-+ for (i = 0; i < PIN_MAP_SIZE; i++) {
-+ irq_2_pin[i].pin = -1;
-+ irq_2_pin[i].next = 0;
-+ }
-+ if (!pirqs_enabled)
-+ for (i = 0; i < MAX_PIRQS; i++)
-+ pirq_entries[i] = -1;
-+
-+ /*
-+ * The number of IO-APIC IRQ registers (== #pins):
-+ */
-+ for (apic = 0; apic < nr_ioapics; apic++) {
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ reg_01.raw = io_apic_read(apic, 1);
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+ nr_ioapic_registers[apic] = reg_01.bits.entries+1;
-+ }
-+ for(apic = 0; apic < nr_ioapics; apic++) {
-+ int pin;
-+ /* See if any of the pins is in ExtINT mode */
-+ for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
-+ struct IO_APIC_route_entry entry;
-+ entry = ioapic_read_entry(apic, pin);
-+
-+
-+ /* If the interrupt line is enabled and in ExtInt mode
-+ * I have found the pin where the i8259 is connected.
-+ */
-+ if ((entry.mask == 0) && (entry.delivery_mode == dest_ExtINT)) {
-+ ioapic_i8259.apic = apic;
-+ ioapic_i8259.pin = pin;
-+ goto found_i8259;
-+ }
-+ }
-+ }
-+ found_i8259:
-+ /* Look to see what if the MP table has reported the ExtINT */
-+ /* If we could not find the appropriate pin by looking at the ioapic
-+ * the i8259 probably is not connected the ioapic but give the
-+ * mptable a chance anyway.
-+ */
-+ i8259_pin = find_isa_irq_pin(0, mp_ExtINT);
-+ i8259_apic = find_isa_irq_apic(0, mp_ExtINT);
-+ /* Trust the MP table if nothing is setup in the hardware */
-+ if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) {
-+ printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n");
-+ ioapic_i8259.pin = i8259_pin;
-+ ioapic_i8259.apic = i8259_apic;
-+ }
-+ /* Complain if the MP table and the hardware disagree */
-+ if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) &&
-+ (i8259_pin >= 0) && (ioapic_i8259.pin >= 0))
-+ {
-+ printk(KERN_WARNING "ExtINT in hardware and MP table differ\n");
-+ }
-+
-+ /*
-+ * Do not trust the IO-APIC being empty at bootup
-+ */
-+ clear_IO_APIC();
-+}
-+
-+/*
-+ * Not an __init, needed by the reboot code
-+ */
-+void disable_IO_APIC(void)
-+{
-+ /*
-+ * Clear the IO-APIC before rebooting:
-+ */
-+ clear_IO_APIC();
-+
-+#ifndef CONFIG_XEN
-+ /*
-+ * If the i8259 is routed through an IOAPIC
-+ * Put that IOAPIC in virtual wire mode
-+ * so legacy interrupts can be delivered.
-+ */
-+ if (ioapic_i8259.pin != -1) {
-+ struct IO_APIC_route_entry entry;
-+
-+ memset(&entry, 0, sizeof(entry));
-+ entry.mask = 0; /* Enabled */
-+ entry.trigger = 0; /* Edge */
-+ entry.irr = 0;
-+ entry.polarity = 0; /* High */
-+ entry.delivery_status = 0;
-+ entry.dest_mode = 0; /* Physical */
-+ entry.delivery_mode = dest_ExtINT; /* ExtInt */
-+ entry.vector = 0;
-+ entry.dest.physical.physical_dest =
-+ GET_APIC_ID(apic_read(APIC_ID));
-+
-+ /*
-+ * Add it to the IO-APIC irq-routing table:
-+ */
-+ ioapic_write_entry(ioapic_i8259.apic, ioapic_i8259.pin, entry);
-+ }
-+ disconnect_bsp_APIC(ioapic_i8259.pin != -1);
-+#endif
-+}
-+
-+/*
-+ * function to set the IO-APIC physical IDs based on the
-+ * values stored in the MPC table.
-+ *
-+ * by Matt Domsch <Matt_Domsch@dell.com> Tue Dec 21 12:25:05 CST 1999
-+ */
-+
-+#if !defined(CONFIG_XEN) && !defined(CONFIG_X86_NUMAQ)
-+static void __init setup_ioapic_ids_from_mpc(void)
-+{
-+ union IO_APIC_reg_00 reg_00;
-+ physid_mask_t phys_id_present_map;
-+ int apic;
-+ int i;
-+ unsigned char old_id;
-+ unsigned long flags;
-+
-+ /*
-+ * Don't check I/O APIC IDs for xAPIC systems. They have
-+ * no meaning without the serial APIC bus.
-+ */
-+ if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
-+ || APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
-+ return;
-+ /*
-+ * This is broken; anything with a real cpu count has to
-+ * circumvent this idiocy regardless.
-+ */
-+ phys_id_present_map = ioapic_phys_id_map(phys_cpu_present_map);
-+
-+ /*
-+ * Set the IOAPIC ID to the value stored in the MPC table.
-+ */
-+ for (apic = 0; apic < nr_ioapics; apic++) {
-+
-+ /* Read the register 0 value */
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ reg_00.raw = io_apic_read(apic, 0);
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+
-+ old_id = mp_ioapics[apic].mpc_apicid;
-+
-+ if (mp_ioapics[apic].mpc_apicid >= get_physical_broadcast()) {
-+ printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n",
-+ apic, mp_ioapics[apic].mpc_apicid);
-+ printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
-+ reg_00.bits.ID);
-+ mp_ioapics[apic].mpc_apicid = reg_00.bits.ID;
-+ }
-+
-+ /*
-+ * Sanity check, is the ID really free? Every APIC in a
-+ * system must have a unique ID or we get lots of nice
-+ * 'stuck on smp_invalidate_needed IPI wait' messages.
-+ */
-+ if (check_apicid_used(phys_id_present_map,
-+ mp_ioapics[apic].mpc_apicid)) {
-+ printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n",
-+ apic, mp_ioapics[apic].mpc_apicid);
-+ for (i = 0; i < get_physical_broadcast(); i++)
-+ if (!physid_isset(i, phys_id_present_map))
-+ break;
-+ if (i >= get_physical_broadcast())
-+ panic("Max APIC ID exceeded!\n");
-+ printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
-+ i);
-+ physid_set(i, phys_id_present_map);
-+ mp_ioapics[apic].mpc_apicid = i;
-+ } else {
-+ physid_mask_t tmp;
-+ tmp = apicid_to_cpu_present(mp_ioapics[apic].mpc_apicid);
-+ apic_printk(APIC_VERBOSE, "Setting %d in the "
-+ "phys_id_present_map\n",
-+ mp_ioapics[apic].mpc_apicid);
-+ physids_or(phys_id_present_map, phys_id_present_map, tmp);
-+ }
-+
-+
-+ /*
-+ * We need to adjust the IRQ routing table
-+ * if the ID changed.
-+ */
-+ if (old_id != mp_ioapics[apic].mpc_apicid)
-+ for (i = 0; i < mp_irq_entries; i++)
-+ if (mp_irqs[i].mpc_dstapic == old_id)
-+ mp_irqs[i].mpc_dstapic
-+ = mp_ioapics[apic].mpc_apicid;
-+
-+ /*
-+ * Read the right value from the MPC table and
-+ * write it into the ID register.
-+ */
-+ apic_printk(APIC_VERBOSE, KERN_INFO
-+ "...changing IO-APIC physical APIC ID to %d ...",
-+ mp_ioapics[apic].mpc_apicid);
-+
-+ reg_00.bits.ID = mp_ioapics[apic].mpc_apicid;
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ io_apic_write(apic, 0, reg_00.raw);
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+
-+ /*
-+ * Sanity check
-+ */
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ reg_00.raw = io_apic_read(apic, 0);
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+ if (reg_00.bits.ID != mp_ioapics[apic].mpc_apicid)
-+ printk("could not set ID!\n");
-+ else
-+ apic_printk(APIC_VERBOSE, " ok.\n");
-+ }
-+}
-+#else
-+static void __init setup_ioapic_ids_from_mpc(void) { }
-+#endif
-+
-+#ifndef CONFIG_XEN
-+int no_timer_check __initdata;
-+
-+static int __init notimercheck(char *s)
-+{
-+ no_timer_check = 1;
-+ return 1;
-+}
-+__setup("no_timer_check", notimercheck);
-+
-+/*
-+ * There is a nasty bug in some older SMP boards, their mptable lies
-+ * about the timer IRQ. We do the following to work around the situation:
-+ *
-+ * - timer IRQ defaults to IO-APIC IRQ
-+ * - if this function detects that timer IRQs are defunct, then we fall
-+ * back to ISA timer IRQs
-+ */
-+int __init timer_irq_works(void)
-+{
-+ unsigned long t1 = jiffies;
-+
-+ if (no_timer_check)
-+ return 1;
-+
-+ local_irq_enable();
-+ /* Let ten ticks pass... */
-+ mdelay((10 * 1000) / HZ);
-+
-+ /*
-+ * Expect a few ticks at least, to be sure some possible
-+ * glue logic does not lock up after one or two first
-+ * ticks in a non-ExtINT mode. Also the local APIC
-+ * might have cached one ExtINT interrupt. Finally, at
-+ * least one tick may be lost due to delays.
-+ */
-+ if (jiffies - t1 > 4)
-+ return 1;
-+
-+ return 0;
-+}
-+
-+/*
-+ * In the SMP+IOAPIC case it might happen that there are an unspecified
-+ * number of pending IRQ events unhandled. These cases are very rare,
-+ * so we 'resend' these IRQs via IPIs, to the same CPU. It's much
-+ * better to do it this way as thus we do not have to be aware of
-+ * 'pending' interrupts in the IRQ path, except at this point.
-+ */
-+/*
-+ * Edge triggered needs to resend any interrupt
-+ * that was delayed but this is now handled in the device
-+ * independent code.
-+ */
-+
-+/*
-+ * Startup quirk:
-+ *
-+ * Starting up a edge-triggered IO-APIC interrupt is
-+ * nasty - we need to make sure that we get the edge.
-+ * If it is already asserted for some reason, we need
-+ * return 1 to indicate that is was pending.
-+ *
-+ * This is not complete - we should be able to fake
-+ * an edge even if it isn't on the 8259A...
-+ *
-+ * (We do this for level-triggered IRQs too - it cannot hurt.)
-+ */
-+static unsigned int startup_ioapic_irq(unsigned int irq)
-+{
-+ int was_pending = 0;
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ if (irq < 16) {
-+ disable_8259A_irq(irq);
-+ if (i8259A_irq_pending(irq))
-+ was_pending = 1;
-+ }
-+ __unmask_IO_APIC_irq(irq);
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+
-+ return was_pending;
-+}
-+
-+static void ack_ioapic_irq(unsigned int irq)
-+{
-+ move_native_irq(irq);
-+ ack_APIC_irq();
-+}
-+
-+static void ack_ioapic_quirk_irq(unsigned int irq)
-+{
-+ unsigned long v;
-+ int i;
-+
-+ move_native_irq(irq);
-+/*
-+ * It appears there is an erratum which affects at least version 0x11
-+ * of I/O APIC (that's the 82093AA and cores integrated into various
-+ * chipsets). Under certain conditions a level-triggered interrupt is
-+ * erroneously delivered as edge-triggered one but the respective IRR
-+ * bit gets set nevertheless. As a result the I/O unit expects an EOI
-+ * message but it will never arrive and further interrupts are blocked
-+ * from the source. The exact reason is so far unknown, but the
-+ * phenomenon was observed when two consecutive interrupt requests
-+ * from a given source get delivered to the same CPU and the source is
-+ * temporarily disabled in between.
-+ *
-+ * A workaround is to simulate an EOI message manually. We achieve it
-+ * by setting the trigger mode to edge and then to level when the edge
-+ * trigger mode gets detected in the TMR of a local APIC for a
-+ * level-triggered interrupt. We mask the source for the time of the
-+ * operation to prevent an edge-triggered interrupt escaping meanwhile.
-+ * The idea is from Manfred Spraul. --macro
-+ */
-+ i = irq_vector[irq];
-+
-+ v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1));
-+
-+ ack_APIC_irq();
-+
-+ if (!(v & (1 << (i & 0x1f)))) {
-+ atomic_inc(&irq_mis_count);
-+ spin_lock(&ioapic_lock);
-+ __mask_and_edge_IO_APIC_irq(irq);
-+ __unmask_and_level_IO_APIC_irq(irq);
-+ spin_unlock(&ioapic_lock);
-+ }
-+}
-+
-+static int ioapic_retrigger_irq(unsigned int irq)
-+{
-+ send_IPI_self(irq_vector[irq]);
-+
-+ return 1;
-+}
-+
-+static struct irq_chip ioapic_chip __read_mostly = {
-+ .name = "IO-APIC",
-+ .startup = startup_ioapic_irq,
-+ .mask = mask_IO_APIC_irq,
-+ .unmask = unmask_IO_APIC_irq,
-+ .ack = ack_ioapic_irq,
-+ .eoi = ack_ioapic_quirk_irq,
-+#ifdef CONFIG_SMP
-+ .set_affinity = set_ioapic_affinity_irq,
-+#endif
-+ .retrigger = ioapic_retrigger_irq,
-+};
-+#endif /* !CONFIG_XEN */
-+
-+static inline void init_IO_APIC_traps(void)
-+{
-+ int irq;
-+
-+ /*
-+ * NOTE! The local APIC isn't very good at handling
-+ * multiple interrupts at the same interrupt level.
-+ * As the interrupt level is determined by taking the
-+ * vector number and shifting that right by 4, we
-+ * want to spread these out a bit so that they don't
-+ * all fall in the same interrupt level.
-+ *
-+ * Also, we've got to be careful not to trash gate
-+ * 0x80, because int 0x80 is hm, kind of importantish. ;)
-+ */
-+ for (irq = 0; irq < NR_IRQS ; irq++) {
-+ int tmp = irq;
-+ if (IO_APIC_IRQ(tmp) && !irq_vector[tmp]) {
-+ /*
-+ * Hmm.. We don't have an entry for this,
-+ * so default to an old-fashioned 8259
-+ * interrupt if we can..
-+ */
-+ if (irq < 16)
-+ make_8259A_irq(irq);
-+#ifndef CONFIG_XEN
-+ else
-+ /* Strange. Oh, well.. */
-+ irq_desc[irq].chip = &no_irq_chip;
-+#endif
-+ }
-+ }
-+}
-+
-+#ifndef CONFIG_XEN
-+/*
-+ * The local APIC irq-chip implementation:
-+ */
-+
-+static void ack_apic(unsigned int irq)
-+{
-+ ack_APIC_irq();
-+}
-+
-+static void mask_lapic_irq (unsigned int irq)
-+{
-+ unsigned long v;
-+
-+ v = apic_read(APIC_LVT0);
-+ apic_write_around(APIC_LVT0, v | APIC_LVT_MASKED);
-+}
-+
-+static void unmask_lapic_irq (unsigned int irq)
-+{
-+ unsigned long v;
-+
-+ v = apic_read(APIC_LVT0);
-+ apic_write_around(APIC_LVT0, v & ~APIC_LVT_MASKED);
-+}
-+
-+static struct irq_chip lapic_chip __read_mostly = {
-+ .name = "local-APIC-edge",
-+ .mask = mask_lapic_irq,
-+ .unmask = unmask_lapic_irq,
-+ .eoi = ack_apic,
-+};
-+
-+static void setup_nmi (void)
-+{
-+ /*
-+ * Dirty trick to enable the NMI watchdog ...
-+ * We put the 8259A master into AEOI mode and
-+ * unmask on all local APICs LVT0 as NMI.
-+ *
-+ * The idea to use the 8259A in AEOI mode ('8259A Virtual Wire')
-+ * is from Maciej W. Rozycki - so we do not have to EOI from
-+ * the NMI handler or the timer interrupt.
-+ */
-+ apic_printk(APIC_VERBOSE, KERN_INFO "activating NMI Watchdog ...");
-+
-+ on_each_cpu(enable_NMI_through_LVT0, NULL, 1, 1);
-+
-+ apic_printk(APIC_VERBOSE, " done.\n");
-+}
-+
-+/*
-+ * This looks a bit hackish but it's about the only one way of sending
-+ * a few INTA cycles to 8259As and any associated glue logic. ICR does
-+ * not support the ExtINT mode, unfortunately. We need to send these
-+ * cycles as some i82489DX-based boards have glue logic that keeps the
-+ * 8259A interrupt line asserted until INTA. --macro
-+ */
-+static inline void unlock_ExtINT_logic(void)
-+{
-+ int apic, pin, i;
-+ struct IO_APIC_route_entry entry0, entry1;
-+ unsigned char save_control, save_freq_select;
-+
-+ pin = find_isa_irq_pin(8, mp_INT);
-+ if (pin == -1) {
-+ WARN_ON_ONCE(1);
-+ return;
-+ }
-+ apic = find_isa_irq_apic(8, mp_INT);
-+ if (apic == -1) {
-+ WARN_ON_ONCE(1);
-+ return;
-+ }
-+
-+ entry0 = ioapic_read_entry(apic, pin);
-+ clear_IO_APIC_pin(apic, pin);
-+
-+ memset(&entry1, 0, sizeof(entry1));
-+
-+ entry1.dest_mode = 0; /* physical delivery */
-+ entry1.mask = 0; /* unmask IRQ now */
-+ entry1.dest.physical.physical_dest = hard_smp_processor_id();
-+ entry1.delivery_mode = dest_ExtINT;
-+ entry1.polarity = entry0.polarity;
-+ entry1.trigger = 0;
-+ entry1.vector = 0;
-+
-+ ioapic_write_entry(apic, pin, entry1);
-+
-+ save_control = CMOS_READ(RTC_CONTROL);
-+ save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
-+ CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6,
-+ RTC_FREQ_SELECT);
-+ CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL);
-+
-+ i = 100;
-+ while (i-- > 0) {
-+ mdelay(10);
-+ if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF)
-+ i -= 10;
-+ }
-+
-+ CMOS_WRITE(save_control, RTC_CONTROL);
-+ CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
-+ clear_IO_APIC_pin(apic, pin);
-+
-+ ioapic_write_entry(apic, pin, entry0);
-+}
-+
-+int timer_uses_ioapic_pin_0;
-+
-+/*
-+ * This code may look a bit paranoid, but it's supposed to cooperate with
-+ * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ
-+ * is so screwy. Thanks to Brian Perkins for testing/hacking this beast
-+ * fanatically on his truly buggy board.
-+ */
-+static inline void __init check_timer(void)
-+{
-+ int apic1, pin1, apic2, pin2;
-+ int vector;
-+
-+ /*
-+ * get/set the timer IRQ vector:
-+ */
-+ disable_8259A_irq(0);
-+ vector = assign_irq_vector(0);
-+ set_intr_gate(vector, interrupt[0]);
-+
-+ /*
-+ * Subtle, code in do_timer_interrupt() expects an AEOI
-+ * mode for the 8259A whenever interrupts are routed
-+ * through I/O APICs. Also IRQ0 has to be enabled in
-+ * the 8259A which implies the virtual wire has to be
-+ * disabled in the local APIC.
-+ */
-+ apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
-+ init_8259A(1);
-+ timer_ack = 1;
-+ if (timer_over_8254 > 0)
-+ enable_8259A_irq(0);
-+
-+ pin1 = find_isa_irq_pin(0, mp_INT);
-+ apic1 = find_isa_irq_apic(0, mp_INT);
-+ pin2 = ioapic_i8259.pin;
-+ apic2 = ioapic_i8259.apic;
-+
-+ if (pin1 == 0)
-+ timer_uses_ioapic_pin_0 = 1;
-+
-+ printk(KERN_INFO "..TIMER: vector=0x%02X apic1=%d pin1=%d apic2=%d pin2=%d\n",
-+ vector, apic1, pin1, apic2, pin2);
-+
-+ if (pin1 != -1) {
-+ /*
-+ * Ok, does IRQ0 through the IOAPIC work?
-+ */
-+ unmask_IO_APIC_irq(0);
-+ if (timer_irq_works()) {
-+ if (nmi_watchdog == NMI_IO_APIC) {
-+ disable_8259A_irq(0);
-+ setup_nmi();
-+ enable_8259A_irq(0);
-+ }
-+ if (disable_timer_pin_1 > 0)
-+ clear_IO_APIC_pin(0, pin1);
-+ return;
-+ }
-+ clear_IO_APIC_pin(apic1, pin1);
-+ printk(KERN_ERR "..MP-BIOS bug: 8254 timer not connected to "
-+ "IO-APIC\n");
-+ }
-+
-+ printk(KERN_INFO "...trying to set up timer (IRQ0) through the 8259A ... ");
-+ if (pin2 != -1) {
-+ printk("\n..... (found pin %d) ...", pin2);
-+ /*
-+ * legacy devices should be connected to IO APIC #0
-+ */
-+ setup_ExtINT_IRQ0_pin(apic2, pin2, vector);
-+ if (timer_irq_works()) {
-+ printk("works.\n");
-+ if (pin1 != -1)
-+ replace_pin_at_irq(0, apic1, pin1, apic2, pin2);
-+ else
-+ add_pin_to_irq(0, apic2, pin2);
-+ if (nmi_watchdog == NMI_IO_APIC) {
-+ setup_nmi();
-+ }
-+ return;
-+ }
-+ /*
-+ * Cleanup, just in case ...
-+ */
-+ clear_IO_APIC_pin(apic2, pin2);
-+ }
-+ printk(" failed.\n");
-+
-+ if (nmi_watchdog == NMI_IO_APIC) {
-+ printk(KERN_WARNING "timer doesn't work through the IO-APIC - disabling NMI Watchdog!\n");
-+ nmi_watchdog = 0;
-+ }
-+
-+ printk(KERN_INFO "...trying to set up timer as Virtual Wire IRQ...");
-+
-+ disable_8259A_irq(0);
-+ set_irq_chip_and_handler_name(0, &lapic_chip, handle_fasteoi_irq,
-+ "fasteoi");
-+ apic_write_around(APIC_LVT0, APIC_DM_FIXED | vector); /* Fixed mode */
-+ enable_8259A_irq(0);
-+
-+ if (timer_irq_works()) {
-+ printk(" works.\n");
-+ return;
-+ }
-+ apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | vector);
-+ printk(" failed.\n");
-+
-+ printk(KERN_INFO "...trying to set up timer as ExtINT IRQ...");
-+
-+ timer_ack = 0;
-+ init_8259A(0);
-+ make_8259A_irq(0);
-+ apic_write_around(APIC_LVT0, APIC_DM_EXTINT);
-+
-+ unlock_ExtINT_logic();
-+
-+ if (timer_irq_works()) {
-+ printk(" works.\n");
-+ return;
-+ }
-+ printk(" failed :(.\n");
-+ panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a "
-+ "report. Then try booting with the 'noapic' option");
-+}
-+#else
-+int timer_uses_ioapic_pin_0 = 0;
-+#define check_timer() ((void)0)
-+#endif
-+
-+/*
-+ *
-+ * IRQ's that are handled by the PIC in the MPS IOAPIC case.
-+ * - IRQ2 is the cascade IRQ, and cannot be a io-apic IRQ.
-+ * Linux doesn't really care, as it's not actually used
-+ * for any interrupt handling anyway.
-+ */
-+#define PIC_IRQS (1 << PIC_CASCADE_IR)
-+
-+void __init setup_IO_APIC(void)
-+{
-+ enable_IO_APIC();
-+
-+ if (acpi_ioapic)
-+ io_apic_irqs = ~0; /* all IRQs go through IOAPIC */
-+ else
-+ io_apic_irqs = ~PIC_IRQS;
-+
-+ printk("ENABLING IO-APIC IRQs\n");
-+
-+ /*
-+ * Set up IO-APIC IRQ routing.
-+ */
-+ if (!acpi_ioapic)
-+ setup_ioapic_ids_from_mpc();
-+#ifndef CONFIG_XEN
-+ sync_Arb_IDs();
-+#endif
-+ setup_IO_APIC_irqs();
-+ init_IO_APIC_traps();
-+ check_timer();
-+ if (!acpi_ioapic)
-+ print_IO_APIC();
-+}
-+
-+#ifndef CONFIG_XEN
-+static int __init setup_disable_8254_timer(char *s)
-+{
-+ timer_over_8254 = -1;
-+ return 1;
-+}
-+static int __init setup_enable_8254_timer(char *s)
-+{
-+ timer_over_8254 = 2;
-+ return 1;
-+}
-+
-+__setup("disable_8254_timer", setup_disable_8254_timer);
-+__setup("enable_8254_timer", setup_enable_8254_timer);
-+#endif
-+
-+/*
-+ * Called after all the initialization is done. If we didnt find any
-+ * APIC bugs then we can allow the modify fast path
-+ */
-+
-+static int __init io_apic_bug_finalize(void)
-+{
-+ if(sis_apic_bug == -1)
-+ sis_apic_bug = 0;
-+ if (is_initial_xendomain()) {
-+ struct xen_platform_op op = { .cmd = XENPF_platform_quirk };
-+ op.u.platform_quirk.quirk_id = sis_apic_bug ?
-+ QUIRK_IOAPIC_BAD_REGSEL : QUIRK_IOAPIC_GOOD_REGSEL;
-+ HYPERVISOR_platform_op(&op);
-+ }
-+ return 0;
-+}
-+
-+late_initcall(io_apic_bug_finalize);
-+
-+struct sysfs_ioapic_data {
-+ struct sys_device dev;
-+ struct IO_APIC_route_entry entry[0];
-+};
-+static struct sysfs_ioapic_data * mp_ioapic_data[MAX_IO_APICS];
-+
-+static int ioapic_suspend(struct sys_device *dev, pm_message_t state)
-+{
-+ struct IO_APIC_route_entry *entry;
-+ struct sysfs_ioapic_data *data;
-+ int i;
-+
-+ data = container_of(dev, struct sysfs_ioapic_data, dev);
-+ entry = data->entry;
-+ for (i = 0; i < nr_ioapic_registers[dev->id]; i ++)
-+ entry[i] = ioapic_read_entry(dev->id, i);
-+
-+ return 0;
-+}
-+
-+static int ioapic_resume(struct sys_device *dev)
-+{
-+ struct IO_APIC_route_entry *entry;
-+ struct sysfs_ioapic_data *data;
-+ unsigned long flags;
-+ union IO_APIC_reg_00 reg_00;
-+ int i;
-+
-+ data = container_of(dev, struct sysfs_ioapic_data, dev);
-+ entry = data->entry;
-+
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ reg_00.raw = io_apic_read(dev->id, 0);
-+ if (reg_00.bits.ID != mp_ioapics[dev->id].mpc_apicid) {
-+ reg_00.bits.ID = mp_ioapics[dev->id].mpc_apicid;
-+ io_apic_write(dev->id, 0, reg_00.raw);
-+ }
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+ for (i = 0; i < nr_ioapic_registers[dev->id]; i ++)
-+ ioapic_write_entry(dev->id, i, entry[i]);
-+
-+ return 0;
-+}
-+
-+static struct sysdev_class ioapic_sysdev_class = {
-+ set_kset_name("ioapic"),
-+ .suspend = ioapic_suspend,
-+ .resume = ioapic_resume,
-+};
-+
-+static int __init ioapic_init_sysfs(void)
-+{
-+ struct sys_device * dev;
-+ int i, size, error = 0;
-+
-+ error = sysdev_class_register(&ioapic_sysdev_class);
-+ if (error)
-+ return error;
-+
-+ for (i = 0; i < nr_ioapics; i++ ) {
-+ size = sizeof(struct sys_device) + nr_ioapic_registers[i]
-+ * sizeof(struct IO_APIC_route_entry);
-+ mp_ioapic_data[i] = kmalloc(size, GFP_KERNEL);
-+ if (!mp_ioapic_data[i]) {
-+ printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
-+ continue;
-+ }
-+ memset(mp_ioapic_data[i], 0, size);
-+ dev = &mp_ioapic_data[i]->dev;
-+ dev->id = i;
-+ dev->cls = &ioapic_sysdev_class;
-+ error = sysdev_register(dev);
-+ if (error) {
-+ kfree(mp_ioapic_data[i]);
-+ mp_ioapic_data[i] = NULL;
-+ printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
-+ continue;
-+ }
-+ }
-+
-+ return 0;
-+}
-+
-+device_initcall(ioapic_init_sysfs);
-+
-+#ifndef CONFIG_XEN
-+/*
-+ * Dynamic irq allocate and deallocation
-+ */
-+int create_irq(void)
-+{
-+ /* Allocate an unused irq */
-+ int irq, new, vector = 0;
-+ unsigned long flags;
-+
-+ irq = -ENOSPC;
-+ spin_lock_irqsave(&vector_lock, flags);
-+ for (new = (NR_IRQS - 1); new >= 0; new--) {
-+ if (platform_legacy_irq(new))
-+ continue;
-+ if (irq_vector[new] != 0)
-+ continue;
-+ vector = __assign_irq_vector(new);
-+ if (likely(vector > 0))
-+ irq = new;
-+ break;
-+ }
-+ spin_unlock_irqrestore(&vector_lock, flags);
-+
-+ if (irq >= 0) {
-+#ifndef CONFIG_XEN
-+ set_intr_gate(vector, interrupt[irq]);
-+#endif
-+ dynamic_irq_init(irq);
-+ }
-+ return irq;
-+}
-+
-+void destroy_irq(unsigned int irq)
-+{
-+ unsigned long flags;
-+
-+ dynamic_irq_cleanup(irq);
-+
-+ spin_lock_irqsave(&vector_lock, flags);
-+ irq_vector[irq] = 0;
-+ spin_unlock_irqrestore(&vector_lock, flags);
-+}
-+#endif
-+
-+/*
-+ * MSI mesage composition
-+ */
-+#ifdef CONFIG_PCI_MSI
-+static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg)
-+{
-+ int vector;
-+ unsigned dest;
-+
-+ vector = assign_irq_vector(irq);
-+ if (vector >= 0) {
-+ dest = cpu_mask_to_apicid(TARGET_CPUS);
-+
-+ msg->address_hi = MSI_ADDR_BASE_HI;
-+ msg->address_lo =
-+ MSI_ADDR_BASE_LO |
-+ ((INT_DEST_MODE == 0) ?
-+ MSI_ADDR_DEST_MODE_PHYSICAL:
-+ MSI_ADDR_DEST_MODE_LOGICAL) |
-+ ((INT_DELIVERY_MODE != dest_LowestPrio) ?
-+ MSI_ADDR_REDIRECTION_CPU:
-+ MSI_ADDR_REDIRECTION_LOWPRI) |
-+ MSI_ADDR_DEST_ID(dest);
-+
-+ msg->data =
-+ MSI_DATA_TRIGGER_EDGE |
-+ MSI_DATA_LEVEL_ASSERT |
-+ ((INT_DELIVERY_MODE != dest_LowestPrio) ?
-+ MSI_DATA_DELIVERY_FIXED:
-+ MSI_DATA_DELIVERY_LOWPRI) |
-+ MSI_DATA_VECTOR(vector);
-+ }
-+ return vector;
-+}
-+
-+#ifdef CONFIG_SMP
-+static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
-+{
-+ struct msi_msg msg;
-+ unsigned int dest;
-+ cpumask_t tmp;
-+ int vector;
-+
-+ cpus_and(tmp, mask, cpu_online_map);
-+ if (cpus_empty(tmp))
-+ tmp = TARGET_CPUS;
-+
-+ vector = assign_irq_vector(irq);
-+ if (vector < 0)
-+ return;
-+
-+ dest = cpu_mask_to_apicid(mask);
-+
-+ read_msi_msg(irq, &msg);
-+
-+ msg.data &= ~MSI_DATA_VECTOR_MASK;
-+ msg.data |= MSI_DATA_VECTOR(vector);
-+ msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
-+ msg.address_lo |= MSI_ADDR_DEST_ID(dest);
-+
-+ write_msi_msg(irq, &msg);
-+ irq_desc[irq].affinity = mask;
-+}
-+#endif /* CONFIG_SMP */
-+
-+/*
-+ * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices,
-+ * which implement the MSI or MSI-X Capability Structure.
-+ */
-+static struct irq_chip msi_chip = {
-+ .name = "PCI-MSI",
-+ .unmask = unmask_msi_irq,
-+ .mask = mask_msi_irq,
-+ .ack = ack_ioapic_irq,
-+#ifdef CONFIG_SMP
-+ .set_affinity = set_msi_irq_affinity,
-+#endif
-+ .retrigger = ioapic_retrigger_irq,
-+};
-+
-+int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
-+{
-+ struct msi_msg msg;
-+ int irq, ret;
-+ irq = create_irq();
-+ if (irq < 0)
-+ return irq;
-+
-+ ret = msi_compose_msg(dev, irq, &msg);
-+ if (ret < 0) {
-+ destroy_irq(irq);
-+ return ret;
-+ }
-+
-+ set_irq_msi(irq, desc);
-+ write_msi_msg(irq, &msg);
-+
-+ set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq,
-+ "edge");
-+
-+ return 0;
-+}
-+
-+void arch_teardown_msi_irq(unsigned int irq)
-+{
-+ destroy_irq(irq);
-+}
-+
-+#endif /* CONFIG_PCI_MSI */
-+
-+/*
-+ * Hypertransport interrupt support
-+ */
-+#ifdef CONFIG_HT_IRQ
-+
-+#ifdef CONFIG_SMP
-+
-+static void target_ht_irq(unsigned int irq, unsigned int dest)
-+{
-+ struct ht_irq_msg msg;
-+ fetch_ht_irq_msg(irq, &msg);
-+
-+ msg.address_lo &= ~(HT_IRQ_LOW_DEST_ID_MASK);
-+ msg.address_hi &= ~(HT_IRQ_HIGH_DEST_ID_MASK);
-+
-+ msg.address_lo |= HT_IRQ_LOW_DEST_ID(dest);
-+ msg.address_hi |= HT_IRQ_HIGH_DEST_ID(dest);
-+
-+ write_ht_irq_msg(irq, &msg);
-+}
-+
-+static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask)
-+{
-+ unsigned int dest;
-+ cpumask_t tmp;
-+
-+ cpus_and(tmp, mask, cpu_online_map);
-+ if (cpus_empty(tmp))
-+ tmp = TARGET_CPUS;
-+
-+ cpus_and(mask, tmp, CPU_MASK_ALL);
-+
-+ dest = cpu_mask_to_apicid(mask);
-+
-+ target_ht_irq(irq, dest);
-+ irq_desc[irq].affinity = mask;
-+}
-+#endif
-+
-+static struct irq_chip ht_irq_chip = {
-+ .name = "PCI-HT",
-+ .mask = mask_ht_irq,
-+ .unmask = unmask_ht_irq,
-+ .ack = ack_ioapic_irq,
-+#ifdef CONFIG_SMP
-+ .set_affinity = set_ht_irq_affinity,
-+#endif
-+ .retrigger = ioapic_retrigger_irq,
-+};
-+
-+int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
-+{
-+ int vector;
-+
-+ vector = assign_irq_vector(irq);
-+ if (vector >= 0) {
-+ struct ht_irq_msg msg;
-+ unsigned dest;
-+ cpumask_t tmp;
-+
-+ cpus_clear(tmp);
-+ cpu_set(vector >> 8, tmp);
-+ dest = cpu_mask_to_apicid(tmp);
-+
-+ msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest);
-+
-+ msg.address_lo =
-+ HT_IRQ_LOW_BASE |
-+ HT_IRQ_LOW_DEST_ID(dest) |
-+ HT_IRQ_LOW_VECTOR(vector) |
-+ ((INT_DEST_MODE == 0) ?
-+ HT_IRQ_LOW_DM_PHYSICAL :
-+ HT_IRQ_LOW_DM_LOGICAL) |
-+ HT_IRQ_LOW_RQEOI_EDGE |
-+ ((INT_DELIVERY_MODE != dest_LowestPrio) ?
-+ HT_IRQ_LOW_MT_FIXED :
-+ HT_IRQ_LOW_MT_ARBITRATED) |
-+ HT_IRQ_LOW_IRQ_MASKED;
-+
-+ write_ht_irq_msg(irq, &msg);
-+
-+ set_irq_chip_and_handler_name(irq, &ht_irq_chip,
-+ handle_edge_irq, "edge");
-+ }
-+ return vector;
-+}
-+#endif /* CONFIG_HT_IRQ */
-+
-+/* --------------------------------------------------------------------------
-+ ACPI-based IOAPIC Configuration
-+ -------------------------------------------------------------------------- */
-+
-+#ifdef CONFIG_ACPI
-+
-+int __init io_apic_get_unique_id (int ioapic, int apic_id)
-+{
-+#ifndef CONFIG_XEN
-+ union IO_APIC_reg_00 reg_00;
-+ static physid_mask_t apic_id_map = PHYSID_MASK_NONE;
-+ physid_mask_t tmp;
-+ unsigned long flags;
-+ int i = 0;
-+
-+ /*
-+ * The P4 platform supports up to 256 APIC IDs on two separate APIC
-+ * buses (one for LAPICs, one for IOAPICs), where predecessors only
-+ * supports up to 16 on one shared APIC bus.
-+ *
-+ * TBD: Expand LAPIC/IOAPIC support on P4-class systems to take full
-+ * advantage of new APIC bus architecture.
-+ */
-+
-+ if (physids_empty(apic_id_map))
-+ apic_id_map = ioapic_phys_id_map(phys_cpu_present_map);
-+
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ reg_00.raw = io_apic_read(ioapic, 0);
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+
-+ if (apic_id >= get_physical_broadcast()) {
-+ printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying "
-+ "%d\n", ioapic, apic_id, reg_00.bits.ID);
-+ apic_id = reg_00.bits.ID;
-+ }
-+
-+ /*
-+ * Every APIC in a system must have a unique ID or we get lots of nice
-+ * 'stuck on smp_invalidate_needed IPI wait' messages.
-+ */
-+ if (check_apicid_used(apic_id_map, apic_id)) {
-+
-+ for (i = 0; i < get_physical_broadcast(); i++) {
-+ if (!check_apicid_used(apic_id_map, i))
-+ break;
-+ }
-+
-+ if (i == get_physical_broadcast())
-+ panic("Max apic_id exceeded!\n");
-+
-+ printk(KERN_WARNING "IOAPIC[%d]: apic_id %d already used, "
-+ "trying %d\n", ioapic, apic_id, i);
-+
-+ apic_id = i;
-+ }
-+
-+ tmp = apicid_to_cpu_present(apic_id);
-+ physids_or(apic_id_map, apic_id_map, tmp);
-+
-+ if (reg_00.bits.ID != apic_id) {
-+ reg_00.bits.ID = apic_id;
-+
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ io_apic_write(ioapic, 0, reg_00.raw);
-+ reg_00.raw = io_apic_read(ioapic, 0);
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+
-+ /* Sanity check */
-+ if (reg_00.bits.ID != apic_id) {
-+ printk("IOAPIC[%d]: Unable to change apic_id!\n", ioapic);
-+ return -1;
-+ }
-+ }
-+
-+ apic_printk(APIC_VERBOSE, KERN_INFO
-+ "IOAPIC[%d]: Assigned apic_id %d\n", ioapic, apic_id);
-+#endif /* !CONFIG_XEN */
-+
-+ return apic_id;
-+}
-+
-+
-+int __init io_apic_get_version (int ioapic)
-+{
-+ union IO_APIC_reg_01 reg_01;
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ reg_01.raw = io_apic_read(ioapic, 1);
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+
-+ return reg_01.bits.version;
-+}
-+
-+
-+int __init io_apic_get_redir_entries (int ioapic)
-+{
-+ union IO_APIC_reg_01 reg_01;
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ reg_01.raw = io_apic_read(ioapic, 1);
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+
-+ return reg_01.bits.entries;
-+}
-+
-+
-+int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int active_high_low)
-+{
-+ struct IO_APIC_route_entry entry;
-+ unsigned long flags;
-+
-+ if (!IO_APIC_IRQ(irq)) {
-+ printk(KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n",
-+ ioapic);
-+ return -EINVAL;
-+ }
-+
-+ /*
-+ * Generate a PCI IRQ routing entry and program the IOAPIC accordingly.
-+ * Note that we mask (disable) IRQs now -- these get enabled when the
-+ * corresponding device driver registers for this IRQ.
-+ */
-+
-+ memset(&entry,0,sizeof(entry));
-+
-+ entry.delivery_mode = INT_DELIVERY_MODE;
-+ entry.dest_mode = INT_DEST_MODE;
-+ entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
-+ entry.trigger = edge_level;
-+ entry.polarity = active_high_low;
-+ entry.mask = 1;
-+
-+ /*
-+ * IRQs < 16 are already in the irq_2_pin[] map
-+ */
-+ if (irq >= 16)
-+ add_pin_to_irq(irq, ioapic, pin);
-+
-+ entry.vector = assign_irq_vector(irq);
-+
-+ apic_printk(APIC_DEBUG, KERN_DEBUG "IOAPIC[%d]: Set PCI routing entry "
-+ "(%d-%d -> 0x%x -> IRQ %d Mode:%i Active:%i)\n", ioapic,
-+ mp_ioapics[ioapic].mpc_apicid, pin, entry.vector, irq,
-+ edge_level, active_high_low);
-+
-+ ioapic_register_intr(irq, entry.vector, edge_level);
-+
-+ if (!ioapic && (irq < 16))
-+ disable_8259A_irq(irq);
-+
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ __ioapic_write_entry(ioapic, pin, entry);
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+
-+ return 0;
-+}
-+
-+#endif /* CONFIG_ACPI */
-+
-+#ifndef CONFIG_XEN
-+static int __init parse_disable_timer_pin_1(char *arg)
-+{
-+ disable_timer_pin_1 = 1;
-+ return 0;
-+}
-+early_param("disable_timer_pin_1", parse_disable_timer_pin_1);
-+
-+static int __init parse_enable_timer_pin_1(char *arg)
-+{
-+ disable_timer_pin_1 = -1;
-+ return 0;
-+}
-+early_param("enable_timer_pin_1", parse_enable_timer_pin_1);
-+#endif
-+
-+static int __init parse_noapic(char *arg)
-+{
-+ /* disable IO-APIC */
-+ disable_ioapic_setup();
-+ return 0;
-+}
-+early_param("noapic", parse_noapic);
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/i386/kernel/ioport-xen.c ubuntu-gutsy-xen/arch/i386/kernel/ioport-xen.c
---- ubuntu-gutsy/arch/i386/kernel/ioport-xen.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/arch/i386/kernel/ioport-xen.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,122 @@
-+/*
-+ * linux/arch/i386/kernel/ioport.c
-+ *
-+ * This contains the io-permission bitmap code - written by obz, with changes
-+ * by Linus.
-+ */
-+
-+#include <linux/sched.h>
-+#include <linux/kernel.h>
-+#include <linux/capability.h>
-+#include <linux/errno.h>
-+#include <linux/types.h>
-+#include <linux/ioport.h>
-+#include <linux/smp.h>
-+#include <linux/stddef.h>
-+#include <linux/slab.h>
-+#include <linux/thread_info.h>
-+#include <linux/syscalls.h>
-+#include <xen/interface/physdev.h>
-+
-+/* Set EXTENT bits starting at BASE in BITMAP to value TURN_ON. */
-+static void set_bitmap(unsigned long *bitmap, unsigned int base, unsigned int extent, int new_value)
-+{
-+ unsigned long mask;
-+ unsigned long *bitmap_base = bitmap + (base / BITS_PER_LONG);
-+ unsigned int low_index = base & (BITS_PER_LONG-1);
-+ int length = low_index + extent;
-+
-+ if (low_index != 0) {
-+ mask = (~0UL << low_index);
-+ if (length < BITS_PER_LONG)
-+ mask &= ~(~0UL << length);
-+ if (new_value)
-+ *bitmap_base++ |= mask;
-+ else
-+ *bitmap_base++ &= ~mask;
-+ length -= BITS_PER_LONG;
-+ }
-+
-+ mask = (new_value ? ~0UL : 0UL);
-+ while (length >= BITS_PER_LONG) {
-+ *bitmap_base++ = mask;
-+ length -= BITS_PER_LONG;
-+ }
-+
-+ if (length > 0) {
-+ mask = ~(~0UL << length);
-+ if (new_value)
-+ *bitmap_base++ |= mask;
-+ else
-+ *bitmap_base++ &= ~mask;
-+ }
-+}
-+
-+
-+/*
-+ * this changes the io permissions bitmap in the current task.
-+ */
-+asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
-+{
-+ struct thread_struct * t = &current->thread;
-+ unsigned long *bitmap;
-+ struct physdev_set_iobitmap set_iobitmap;
-+
-+ if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
-+ return -EINVAL;
-+ if (turn_on && !capable(CAP_SYS_RAWIO))
-+ return -EPERM;
-+
-+ /*
-+ * If it's the first ioperm() call in this thread's lifetime, set the
-+ * IO bitmap up. ioperm() is much less timing critical than clone(),
-+ * this is why we delay this operation until now:
-+ */
-+ if (!t->io_bitmap_ptr) {
-+ bitmap = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
-+ if (!bitmap)
-+ return -ENOMEM;
-+
-+ memset(bitmap, 0xff, IO_BITMAP_BYTES);
-+ t->io_bitmap_ptr = bitmap;
-+ set_thread_flag(TIF_IO_BITMAP);
-+
-+ set_xen_guest_handle(set_iobitmap.bitmap, (char *)bitmap);
-+ set_iobitmap.nr_ports = IO_BITMAP_BITS;
-+ HYPERVISOR_physdev_op(PHYSDEVOP_set_iobitmap, &set_iobitmap);
-+ }
-+
-+ set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
-+
-+ return 0;
-+}
-+
-+/*
-+ * sys_iopl has to be used when you want to access the IO ports
-+ * beyond the 0x3ff range: to get the full 65536 ports bitmapped
-+ * you'd need 8kB of bitmaps/process, which is a bit excessive.
-+ *
-+ * Here we just change the eflags value on the stack: we allow
-+ * only the super-user to do it. This depends on the stack-layout
-+ * on system-call entry - see also fork() and the signal handling
-+ * code.
-+ */
-+
-+asmlinkage long sys_iopl(unsigned long unused)
-+{
-+ volatile struct pt_regs * regs = (struct pt_regs *) &unused;
-+ unsigned int level = regs->ebx;
-+ struct thread_struct *t = &current->thread;
-+ unsigned int old = (t->iopl >> 12) & 3;
-+
-+ if (level > 3)
-+ return -EINVAL;
-+ /* Trying to gain more privileges? */
-+ if (level > old) {
-+ if (!capable(CAP_SYS_RAWIO))
-+ return -EPERM;
-+ }
-+ t->iopl = level << 12;
-+ set_iopl_mask(t->iopl);
-+ return 0;
-+}
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/i386/kernel/irq-xen.c ubuntu-gutsy-xen/arch/i386/kernel/irq-xen.c
---- ubuntu-gutsy/arch/i386/kernel/irq-xen.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/arch/i386/kernel/irq-xen.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,346 @@
-+/*
-+ * linux/arch/i386/kernel/irq.c
-+ *
-+ * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
-+ *
-+ * This file contains the lowest level x86-specific interrupt
-+ * entry, irq-stacks and irq statistics code. All the remaining
-+ * irq logic is done by the generic kernel/irq/ code and
-+ * by the x86-specific irq controller code. (e.g. i8259.c and
-+ * io_apic.c.)
-+ */
-+
-+#include <linux/module.h>
-+#include <linux/seq_file.h>
-+#include <linux/interrupt.h>
-+#include <linux/kernel_stat.h>
-+#include <linux/notifier.h>
-+#include <linux/cpu.h>
-+#include <linux/delay.h>
-+
-+#include <asm/apic.h>
-+#include <asm/uaccess.h>
-+
-+DEFINE_PER_CPU(irq_cpustat_t, irq_stat) ____cacheline_internodealigned_in_smp;
-+EXPORT_PER_CPU_SYMBOL(irq_stat);
-+
-+DEFINE_PER_CPU(struct pt_regs *, irq_regs);
-+EXPORT_PER_CPU_SYMBOL(irq_regs);
-+
-+/*
-+ * 'what should we do if we get a hw irq event on an illegal vector'.
-+ * each architecture has to answer this themselves.
-+ */
-+void ack_bad_irq(unsigned int irq)
-+{
-+ printk(KERN_ERR "unexpected IRQ trap at vector %02x\n", irq);
-+
-+#if defined(CONFIG_X86_LOCAL_APIC) && !defined(CONFIG_XEN)
-+ /*
-+ * Currently unexpected vectors happen only on SMP and APIC.
-+ * We _must_ ack these because every local APIC has only N
-+ * irq slots per priority level, and a 'hanging, unacked' IRQ
-+ * holds up an irq slot - in excessive cases (when multiple
-+ * unexpected vectors occur) that might lock up the APIC
-+ * completely.
-+ * But only ack when the APIC is enabled -AK
-+ */
-+ if (cpu_has_apic)
-+ ack_APIC_irq();
-+#endif
-+}
-+
-+#ifdef CONFIG_4KSTACKS
-+/*
-+ * per-CPU IRQ handling contexts (thread information and stack)
-+ */
-+union irq_ctx {
-+ struct thread_info tinfo;
-+ u32 stack[THREAD_SIZE/sizeof(u32)];
-+};
-+
-+static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly;
-+static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly;
-+#endif
-+
-+/*
-+ * do_IRQ handles all normal device IRQ's (the special
-+ * SMP cross-CPU interrupts have their own specific
-+ * handlers).
-+ */
-+fastcall unsigned int do_IRQ(struct pt_regs *regs)
-+{
-+ struct pt_regs *old_regs;
-+ /* high bit used in ret_from_ code */
-+ int irq = ~regs->orig_eax;
-+ struct irq_desc *desc = irq_desc + irq;
-+#ifdef CONFIG_4KSTACKS
-+ union irq_ctx *curctx, *irqctx;
-+ u32 *isp;
-+#endif
-+
-+ if (unlikely((unsigned)irq >= NR_IRQS)) {
-+ printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
-+ __FUNCTION__, irq);
-+ BUG();
-+ }
-+
-+ old_regs = set_irq_regs(regs);
-+ irq_enter();
-+#ifdef CONFIG_DEBUG_STACKOVERFLOW
-+ /* Debugging check for stack overflow: is there less than 1KB free? */
-+ {
-+ long esp;
-+
-+ __asm__ __volatile__("andl %%esp,%0" :
-+ "=r" (esp) : "0" (THREAD_SIZE - 1));
-+ if (unlikely(esp < (sizeof(struct thread_info) + STACK_WARN))) {
-+ printk("do_IRQ: stack overflow: %ld\n",
-+ esp - sizeof(struct thread_info));
-+ dump_stack();
-+ }
-+ }
-+#endif
-+
-+#ifdef CONFIG_4KSTACKS
-+
-+ curctx = (union irq_ctx *) current_thread_info();
-+ irqctx = hardirq_ctx[smp_processor_id()];
-+
-+ /*
-+ * this is where we switch to the IRQ stack. However, if we are
-+ * already using the IRQ stack (because we interrupted a hardirq
-+ * handler) we can't do that and just have to keep using the
-+ * current stack (which is the irq stack already after all)
-+ */
-+ if (curctx != irqctx) {
-+ int arg1, arg2, ebx;
-+
-+ /* build the stack frame on the IRQ stack */
-+ isp = (u32*) ((char*)irqctx + sizeof(*irqctx));
-+ irqctx->tinfo.task = curctx->tinfo.task;
-+ irqctx->tinfo.previous_esp = current_stack_pointer;
-+
-+ /*
-+ * Copy the softirq bits in preempt_count so that the
-+ * softirq checks work in the hardirq context.
-+ */
-+ irqctx->tinfo.preempt_count =
-+ (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
-+ (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
-+
-+ asm volatile(
-+ " xchgl %%ebx,%%esp \n"
-+ " call *%%edi \n"
-+ " movl %%ebx,%%esp \n"
-+ : "=a" (arg1), "=d" (arg2), "=b" (ebx)
-+ : "0" (irq), "1" (desc), "2" (isp),
-+ "D" (desc->handle_irq)
-+ : "memory", "cc"
-+ );
-+ } else
-+#endif
-+ desc->handle_irq(irq, desc);
-+
-+ irq_exit();
-+ set_irq_regs(old_regs);
-+ return 1;
-+}
-+
-+#ifdef CONFIG_4KSTACKS
-+
-+/*
-+ * These should really be __section__(".bss.page_aligned") as well, but
-+ * gcc's 3.0 and earlier don't handle that correctly.
-+ */
-+static char softirq_stack[NR_CPUS * THREAD_SIZE]
-+ __attribute__((__aligned__(THREAD_SIZE)));
-+
-+static char hardirq_stack[NR_CPUS * THREAD_SIZE]
-+ __attribute__((__aligned__(THREAD_SIZE)));
-+
-+/*
-+ * allocate per-cpu stacks for hardirq and for softirq processing
-+ */
-+void irq_ctx_init(int cpu)
-+{
-+ union irq_ctx *irqctx;
-+
-+ if (hardirq_ctx[cpu])
-+ return;
-+
-+ irqctx = (union irq_ctx*) &hardirq_stack[cpu*THREAD_SIZE];
-+ irqctx->tinfo.task = NULL;
-+ irqctx->tinfo.exec_domain = NULL;
-+ irqctx->tinfo.cpu = cpu;
-+ irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
-+ irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
-+
-+ hardirq_ctx[cpu] = irqctx;
-+
-+ irqctx = (union irq_ctx*) &softirq_stack[cpu*THREAD_SIZE];
-+ irqctx->tinfo.task = NULL;
-+ irqctx->tinfo.exec_domain = NULL;
-+ irqctx->tinfo.cpu = cpu;
-+ irqctx->tinfo.preempt_count = 0;
-+ irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
-+
-+ softirq_ctx[cpu] = irqctx;
-+
-+ printk("CPU %u irqstacks, hard=%p soft=%p\n",
-+ cpu,hardirq_ctx[cpu],softirq_ctx[cpu]);
-+}
-+
-+void irq_ctx_exit(int cpu)
-+{
-+ hardirq_ctx[cpu] = NULL;
-+}
-+
-+extern asmlinkage void __do_softirq(void);
-+
-+asmlinkage void do_softirq(void)
-+{
-+ unsigned long flags;
-+ struct thread_info *curctx;
-+ union irq_ctx *irqctx;
-+ u32 *isp;
-+
-+ if (in_interrupt())
-+ return;
-+
-+ local_irq_save(flags);
-+
-+ if (local_softirq_pending()) {
-+ curctx = current_thread_info();
-+ irqctx = softirq_ctx[smp_processor_id()];
-+ irqctx->tinfo.task = curctx->task;
-+ irqctx->tinfo.previous_esp = current_stack_pointer;
-+
-+ /* build the stack frame on the softirq stack */
-+ isp = (u32*) ((char*)irqctx + sizeof(*irqctx));
-+
-+ asm volatile(
-+ " xchgl %%ebx,%%esp \n"
-+ " call __do_softirq \n"
-+ " movl %%ebx,%%esp \n"
-+ : "=b"(isp)
-+ : "0"(isp)
-+ : "memory", "cc", "edx", "ecx", "eax"
-+ );
-+ /*
-+ * Shouldnt happen, we returned above if in_interrupt():
-+ */
-+ WARN_ON_ONCE(softirq_count());
-+ }
-+
-+ local_irq_restore(flags);
-+}
-+
-+EXPORT_SYMBOL(do_softirq);
-+#endif
-+
-+/*
-+ * Interrupt statistics:
-+ */
-+
-+atomic_t irq_err_count;
-+
-+/*
-+ * /proc/interrupts printing:
-+ */
-+
-+int show_interrupts(struct seq_file *p, void *v)
-+{
-+ int i = *(loff_t *) v, j;
-+ struct irqaction * action;
-+ unsigned long flags;
-+
-+ if (i == 0) {
-+ seq_printf(p, " ");
-+ for_each_online_cpu(j)
-+ seq_printf(p, "CPU%-8d",j);
-+ seq_putc(p, '\n');
-+ }
-+
-+ if (i < NR_IRQS) {
-+ spin_lock_irqsave(&irq_desc[i].lock, flags);
-+ action = irq_desc[i].action;
-+ if (!action)
-+ goto skip;
-+ seq_printf(p, "%3d: ",i);
-+#ifndef CONFIG_SMP
-+ seq_printf(p, "%10u ", kstat_irqs(i));
-+#else
-+ for_each_online_cpu(j)
-+ seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
-+#endif
-+ seq_printf(p, " %8s", irq_desc[i].chip->name);
-+ seq_printf(p, "-%-8s", irq_desc[i].name);
-+ seq_printf(p, " %s", action->name);
-+
-+ for (action=action->next; action; action = action->next)
-+ seq_printf(p, ", %s", action->name);
-+
-+ seq_putc(p, '\n');
-+skip:
-+ spin_unlock_irqrestore(&irq_desc[i].lock, flags);
-+ } else if (i == NR_IRQS) {
-+ seq_printf(p, "NMI: ");
-+ for_each_online_cpu(j)
-+ seq_printf(p, "%10u ", nmi_count(j));
-+ seq_putc(p, '\n');
-+#ifdef CONFIG_X86_LOCAL_APIC
-+ seq_printf(p, "LOC: ");
-+ for_each_online_cpu(j)
-+ seq_printf(p, "%10u ",
-+ per_cpu(irq_stat,j).apic_timer_irqs);
-+ seq_putc(p, '\n');
-+#endif
-+ seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
-+#if defined(CONFIG_X86_IO_APIC)
-+ seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count));
-+#endif
-+ }
-+ return 0;
-+}
-+
-+#ifdef CONFIG_HOTPLUG_CPU
-+
-+void fixup_irqs(cpumask_t map)
-+{
-+ unsigned int irq;
-+ static int warned;
-+
-+ for (irq = 0; irq < NR_IRQS; irq++) {
-+ cpumask_t mask;
-+ if (irq == 2)
-+ continue;
-+
-+ cpus_and(mask, irq_desc[irq].affinity, map);
-+ if (any_online_cpu(mask) == NR_CPUS) {
-+ /*printk("Breaking affinity for irq %i\n", irq);*/
-+ mask = map;
-+ }
-+ if (irq_desc[irq].chip->set_affinity)
-+ irq_desc[irq].chip->set_affinity(irq, mask);
-+ else if (irq_desc[irq].action && !(warned++))
-+ printk("Cannot set affinity for irq %i\n", irq);
-+ }
-+
-+#if 0
-+ barrier();
-+ /* Ingo Molnar says: "after the IO-APIC masks have been redirected
-+ [note the nop - the interrupt-enable boundary on x86 is two
-+ instructions from sti] - to flush out pending hardirqs and
-+ IPIs. After this point nothing is supposed to reach this CPU." */
-+ __asm__ __volatile__("sti; nop; cli");
-+ barrier();
-+#else
-+ /* That doesn't seem sufficient. Give it 1ms. */
-+ local_irq_enable();
-+ mdelay(1);
-+ local_irq_disable();
-+#endif
-+}
-+#endif
-+
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/i386/kernel/ldt-xen.c ubuntu-gutsy-xen/arch/i386/kernel/ldt-xen.c
---- ubuntu-gutsy/arch/i386/kernel/ldt-xen.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/arch/i386/kernel/ldt-xen.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,267 @@
-+/*
-+ * linux/arch/i386/kernel/ldt.c
-+ *
-+ * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds
-+ * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
-+ */
-+
-+#include <linux/errno.h>
-+#include <linux/sched.h>
-+#include <linux/string.h>
-+#include <linux/mm.h>
-+#include <linux/smp.h>
-+#include <linux/vmalloc.h>
-+#include <linux/slab.h>
-+
-+#include <asm/uaccess.h>
-+#include <asm/system.h>
-+#include <asm/ldt.h>
-+#include <asm/desc.h>
-+#include <asm/mmu_context.h>
-+
-+#ifdef CONFIG_SMP /* avoids "defined but not used" warnig */
-+static void flush_ldt(void *null)
-+{
-+ if (current->active_mm)
-+ load_LDT(&current->active_mm->context);
-+}
-+#endif
-+
-+static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
-+{
-+ void *oldldt;
-+ void *newldt;
-+ int oldsize;
-+
-+ if (mincount <= pc->size)
-+ return 0;
-+ oldsize = pc->size;
-+ mincount = (mincount+511)&(~511);
-+ if (mincount*LDT_ENTRY_SIZE > PAGE_SIZE)
-+ newldt = vmalloc(mincount*LDT_ENTRY_SIZE);
-+ else
-+ newldt = kmalloc(mincount*LDT_ENTRY_SIZE, GFP_KERNEL);
-+
-+ if (!newldt)
-+ return -ENOMEM;
-+
-+ if (oldsize)
-+ memcpy(newldt, pc->ldt, oldsize*LDT_ENTRY_SIZE);
-+ oldldt = pc->ldt;
-+ memset(newldt+oldsize*LDT_ENTRY_SIZE, 0, (mincount-oldsize)*LDT_ENTRY_SIZE);
-+ pc->ldt = newldt;
-+ wmb();
-+ pc->size = mincount;
-+ wmb();
-+
-+ if (reload) {
-+#ifdef CONFIG_SMP
-+ cpumask_t mask;
-+ preempt_disable();
-+#endif
-+ make_pages_readonly(
-+ pc->ldt,
-+ (pc->size * LDT_ENTRY_SIZE) / PAGE_SIZE,
-+ XENFEAT_writable_descriptor_tables);
-+ load_LDT(pc);
-+#ifdef CONFIG_SMP
-+ mask = cpumask_of_cpu(smp_processor_id());
-+ if (!cpus_equal(current->mm->cpu_vm_mask, mask))
-+ smp_call_function(flush_ldt, NULL, 1, 1);
-+ preempt_enable();
-+#endif
-+ }
-+ if (oldsize) {
-+ make_pages_writable(
-+ oldldt,
-+ (oldsize * LDT_ENTRY_SIZE) / PAGE_SIZE,
-+ XENFEAT_writable_descriptor_tables);
-+ if (oldsize*LDT_ENTRY_SIZE > PAGE_SIZE)
-+ vfree(oldldt);
-+ else
-+ kfree(oldldt);
-+ }
-+ return 0;
-+}
-+
-+static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
-+{
-+ int err = alloc_ldt(new, old->size, 0);
-+ if (err < 0)
-+ return err;
-+ memcpy(new->ldt, old->ldt, old->size*LDT_ENTRY_SIZE);
-+ make_pages_readonly(
-+ new->ldt,
-+ (new->size * LDT_ENTRY_SIZE) / PAGE_SIZE,
-+ XENFEAT_writable_descriptor_tables);
-+ return 0;
-+}
-+
-+/*
-+ * we do not have to muck with descriptors here, that is
-+ * done in switch_mm() as needed.
-+ */
-+int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
-+{
-+ struct mm_struct * old_mm;
-+ int retval = 0;
-+
-+ init_MUTEX(&mm->context.sem);
-+ mm->context.size = 0;
-+ mm->context.has_foreign_mappings = 0;
-+ old_mm = current->mm;
-+ if (old_mm && old_mm->context.size > 0) {
-+ down(&old_mm->context.sem);
-+ retval = copy_ldt(&mm->context, &old_mm->context);
-+ up(&old_mm->context.sem);
-+ }
-+ return retval;
-+}
-+
-+/*
-+ * No need to lock the MM as we are the last user
-+ */
-+void destroy_context(struct mm_struct *mm)
-+{
-+ if (mm->context.size) {
-+ if (mm == current->active_mm)
-+ clear_LDT();
-+ make_pages_writable(
-+ mm->context.ldt,
-+ (mm->context.size * LDT_ENTRY_SIZE) / PAGE_SIZE,
-+ XENFEAT_writable_descriptor_tables);
-+ if (mm->context.size*LDT_ENTRY_SIZE > PAGE_SIZE)
-+ vfree(mm->context.ldt);
-+ else
-+ kfree(mm->context.ldt);
-+ mm->context.size = 0;
-+ }
-+}
-+
-+static int read_ldt(void __user * ptr, unsigned long bytecount)
-+{
-+ int err;
-+ unsigned long size;
-+ struct mm_struct * mm = current->mm;
-+
-+ if (!mm->context.size)
-+ return 0;
-+ if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES)
-+ bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES;
-+
-+ down(&mm->context.sem);
-+ size = mm->context.size*LDT_ENTRY_SIZE;
-+ if (size > bytecount)
-+ size = bytecount;
-+
-+ err = 0;
-+ if (copy_to_user(ptr, mm->context.ldt, size))
-+ err = -EFAULT;
-+ up(&mm->context.sem);
-+ if (err < 0)
-+ goto error_return;
-+ if (size != bytecount) {
-+ /* zero-fill the rest */
-+ if (clear_user(ptr+size, bytecount-size) != 0) {
-+ err = -EFAULT;
-+ goto error_return;
-+ }
-+ }
-+ return bytecount;
-+error_return:
-+ return err;
-+}
-+
-+static int read_default_ldt(void __user * ptr, unsigned long bytecount)
-+{
-+ int err;
-+ unsigned long size;
-+
-+ err = 0;
-+ size = 5*sizeof(struct desc_struct);
-+ if (size > bytecount)
-+ size = bytecount;
-+
-+ err = size;
-+ if (clear_user(ptr, size))
-+ err = -EFAULT;
-+
-+ return err;
-+}
-+
-+static int write_ldt(void __user * ptr, unsigned long bytecount, int oldmode)
-+{
-+ struct mm_struct * mm = current->mm;
-+ __u32 entry_1, entry_2;
-+ int error;
-+ struct user_desc ldt_info;
-+
-+ error = -EINVAL;
-+ if (bytecount != sizeof(ldt_info))
-+ goto out;
-+ error = -EFAULT;
-+ if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info)))
-+ goto out;
-+
-+ error = -EINVAL;
-+ if (ldt_info.entry_number >= LDT_ENTRIES)
-+ goto out;
-+ if (ldt_info.contents == 3) {
-+ if (oldmode)
-+ goto out;
-+ if (ldt_info.seg_not_present == 0)
-+ goto out;
-+ }
-+
-+ down(&mm->context.sem);
-+ if (ldt_info.entry_number >= mm->context.size) {
-+ error = alloc_ldt(&current->mm->context, ldt_info.entry_number+1, 1);
-+ if (error < 0)
-+ goto out_unlock;
-+ }
-+
-+ /* Allow LDTs to be cleared by the user. */
-+ if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
-+ if (oldmode || LDT_empty(&ldt_info)) {
-+ entry_1 = 0;
-+ entry_2 = 0;
-+ goto install;
-+ }
-+ }
-+
-+ entry_1 = LDT_entry_a(&ldt_info);
-+ entry_2 = LDT_entry_b(&ldt_info);
-+ if (oldmode)
-+ entry_2 &= ~(1 << 20);
-+
-+ /* Install the new entry ... */
-+install:
-+ error = write_ldt_entry(mm->context.ldt, ldt_info.entry_number,
-+ entry_1, entry_2);
-+
-+out_unlock:
-+ up(&mm->context.sem);
-+out:
-+ return error;
-+}
-+
-+asmlinkage int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount)
-+{
-+ int ret = -ENOSYS;
-+
-+ switch (func) {
-+ case 0:
-+ ret = read_ldt(ptr, bytecount);
-+ break;
-+ case 1:
-+ ret = write_ldt(ptr, bytecount, 1);
-+ break;
-+ case 2:
-+ ret = read_default_ldt(ptr, bytecount);
-+ break;
-+ case 0x11:
-+ ret = write_ldt(ptr, bytecount, 0);
-+ break;
-+ }
-+ return ret;
-+}
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/i386/kernel/machine_kexec.c ubuntu-gutsy-xen/arch/i386/kernel/machine_kexec.c
---- ubuntu-gutsy/arch/i386/kernel/machine_kexec.c 2007-08-18 09:40:28.000000000 -0400
-+++ ubuntu-gutsy-xen/arch/i386/kernel/machine_kexec.c 2007-08-18 12:38:02.000000000 -0400
-@@ -20,6 +20,10 @@
- #include <asm/desc.h>
- #include <asm/system.h>
-
-+#ifdef CONFIG_XEN
-+#include <xen/interface/kexec.h>
-+#endif
-+
- #define PAGE_ALIGNED __attribute__ ((__aligned__(PAGE_SIZE)))
- static u32 kexec_pgd[1024] PAGE_ALIGNED;
- #ifdef CONFIG_X86_PAE
-@@ -29,48 +33,40 @@
- static u32 kexec_pte0[1024] PAGE_ALIGNED;
- static u32 kexec_pte1[1024] PAGE_ALIGNED;
-
--static void set_idt(void *newidt, __u16 limit)
--{
-- struct Xgt_desc_struct curidt;
-+#ifdef CONFIG_XEN
-
-- /* ia32 supports unaliged loads & stores */
-- curidt.size = limit;
-- curidt.address = (unsigned long)newidt;
-+#define __ma(x) (pfn_to_mfn(__pa((x)) >> PAGE_SHIFT) << PAGE_SHIFT)
-
-- load_idt(&curidt);
--};
-+#if PAGES_NR > KEXEC_XEN_NO_PAGES
-+#error PAGES_NR is greater than KEXEC_XEN_NO_PAGES - Xen support will break
-+#endif
-
-+#if PA_CONTROL_PAGE != 0
-+#error PA_CONTROL_PAGE is non zero - Xen support will break
-+#endif
-
--static void set_gdt(void *newgdt, __u16 limit)
-+void machine_kexec_setup_load_arg(xen_kexec_image_t *xki, struct kimage *image)
- {
-- struct Xgt_desc_struct curgdt;
-+ void *control_page;
-
-- /* ia32 supports unaligned loads & stores */
-- curgdt.size = limit;
-- curgdt.address = (unsigned long)newgdt;
-+ memset(xki->page_list, 0, sizeof(xki->page_list));
-
-- load_gdt(&curgdt);
--};
-+ control_page = page_address(image->control_code_page);
-+ memcpy(control_page, relocate_kernel, PAGE_SIZE);
-
--static void load_segments(void)
--{
--#define __STR(X) #X
--#define STR(X) __STR(X)
-+ xki->page_list[PA_CONTROL_PAGE] = __ma(control_page);
-+ xki->page_list[PA_PGD] = __ma(kexec_pgd);
-+#ifdef CONFIG_X86_PAE
-+ xki->page_list[PA_PMD_0] = __ma(kexec_pmd0);
-+ xki->page_list[PA_PMD_1] = __ma(kexec_pmd1);
-+#endif
-+ xki->page_list[PA_PTE_0] = __ma(kexec_pte0);
-+ xki->page_list[PA_PTE_1] = __ma(kexec_pte1);
-
-- __asm__ __volatile__ (
-- "\tljmp $"STR(__KERNEL_CS)",$1f\n"
-- "\t1:\n"
-- "\tmovl $"STR(__KERNEL_DS)",%%eax\n"
-- "\tmovl %%eax,%%ds\n"
-- "\tmovl %%eax,%%es\n"
-- "\tmovl %%eax,%%fs\n"
-- "\tmovl %%eax,%%gs\n"
-- "\tmovl %%eax,%%ss\n"
-- ::: "eax", "memory");
--#undef STR
--#undef __STR
- }
-
-+#endif /* CONFIG_XEN */
-+
- /*
- * A architecture hook called to validate the
- * proposed image and prepare the control pages
-@@ -97,6 +93,7 @@
- {
- }
-
-+#ifndef CONFIG_XEN
- /*
- * Do not allocate memory (or fail in any way) in machine_kexec().
- * We are past the point of no return, committed to rebooting now.
-@@ -127,26 +124,10 @@
- page_list[PA_PTE_1] = __pa(kexec_pte1);
- page_list[VA_PTE_1] = (unsigned long)kexec_pte1;
-
-- /* The segment registers are funny things, they have both a
-- * visible and an invisible part. Whenever the visible part is
-- * set to a specific selector, the invisible part is loaded
-- * with from a table in memory. At no other time is the
-- * descriptor table in memory accessed.
-- *
-- * I take advantage of this here by force loading the
-- * segments, before I zap the gdt with an invalid value.
-- */
-- load_segments();
-- /* The gdt & idt are now invalid.
-- * If you want to load them you must set up your own idt & gdt.
-- */
-- set_gdt(phys_to_virt(0),0);
-- set_idt(phys_to_virt(0),0);
--
-- /* now call it */
- relocate_kernel((unsigned long)image->head, (unsigned long)page_list,
- image->start, cpu_has_pae);
- }
-+#endif
-
- /* crashkernel=size@addr specifies the location to reserve for
- * a crash kernel. By reserving this memory we guarantee
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/i386/kernel/Makefile ubuntu-gutsy-xen/arch/i386/kernel/Makefile
---- ubuntu-gutsy/arch/i386/kernel/Makefile 2007-08-18 09:40:28.000000000 -0400
-+++ ubuntu-gutsy-xen/arch/i386/kernel/Makefile 2007-08-18 12:38:02.000000000 -0400
-@@ -47,12 +47,18 @@
-
- obj-$(CONFIG_SCx200) += scx200.o
-
-+ifdef CONFIG_XEN
-+vsyscall_note := vsyscall-note-xen.o
-+else
-+vsyscall_note := vsyscall-note.o
-+endif
-+
- # vsyscall.o contains the vsyscall DSO images as __initdata.
- # We must build both images before we can assemble it.
- # Note: kbuild does not track this dependency due to usage of .incbin
- $(obj)/vsyscall.o: $(obj)/vsyscall-int80.so $(obj)/vsyscall-sysenter.so
- targets += $(foreach F,int80 sysenter,vsyscall-$F.o vsyscall-$F.so)
--targets += vsyscall-note.o vsyscall.lds
-+targets += $(vsyscall_note) vsyscall.lds
-
- # The DSO images are built using a special linker script.
- quiet_cmd_syscall = SYSCALL $@
-@@ -68,7 +74,7 @@
-
- $(obj)/vsyscall-int80.so $(obj)/vsyscall-sysenter.so: \
- $(obj)/vsyscall-%.so: $(src)/vsyscall.lds \
-- $(obj)/vsyscall-%.o $(obj)/vsyscall-note.o FORCE
-+ $(obj)/vsyscall-%.o $(obj)/$(vsyscall_note) FORCE
- $(call if_changed,syscall)
-
- # We also create a special relocatable object that should mirror the symbol
-@@ -80,9 +86,20 @@
-
- SYSCFLAGS_vsyscall-syms.o = -r
- $(obj)/vsyscall-syms.o: $(src)/vsyscall.lds \
-- $(obj)/vsyscall-sysenter.o $(obj)/vsyscall-note.o FORCE
-+ $(obj)/vsyscall-sysenter.o $(obj)/$(vsyscall_note) FORCE
- $(call if_changed,syscall)
-
- k8-y += ../../x86_64/kernel/k8.o
- stacktrace-y += ../../x86_64/kernel/stacktrace.o
-
-+ifdef CONFIG_XEN
-+include $(srctree)/scripts/Makefile.xen
-+
-+obj-y += fixup.o
-+microcode-$(subst m,y,$(CONFIG_MICROCODE)) := microcode-xen.o
-+n-obj-xen := i8253.o i8259.o reboot.o smpboot.o trampoline.o tsc.o tsc_sync.o
-+
-+obj-y := $(call filterxen, $(obj-y), $(n-obj-xen))
-+obj-y := $(call cherrypickxen, $(obj-y))
-+extra-y := $(call cherrypickxen, $(extra-y))
-+endif
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/i386/kernel/microcode-xen.c ubuntu-gutsy-xen/arch/i386/kernel/microcode-xen.c
---- ubuntu-gutsy/arch/i386/kernel/microcode-xen.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/arch/i386/kernel/microcode-xen.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,209 @@
-+/*
-+ * Intel CPU Microcode Update Driver for Linux
-+ *
-+ * Copyright (C) 2000-2006 Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
-+ * 2006 Shaohua Li <shaohua.li@intel.com>
-+ *
-+ * This driver allows to upgrade microcode on Intel processors
-+ * belonging to IA-32 family - PentiumPro, Pentium II,
-+ * Pentium III, Xeon, Pentium 4, etc.
-+ *
-+ * Reference: Section 8.10 of Volume III, Intel Pentium 4 Manual,
-+ * Order Number 245472 or free download from:
-+ *
-+ * http://developer.intel.com/design/pentium4/manuals/245472.htm
-+ *
-+ * For more information, go to http://www.urbanmyth.org/microcode
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License
-+ * as published by the Free Software Foundation; either version
-+ * 2 of the License, or (at your option) any later version.
-+ */
-+
-+//#define DEBUG /* pr_debug */
-+#include <linux/capability.h>
-+#include <linux/kernel.h>
-+#include <linux/init.h>
-+#include <linux/sched.h>
-+#include <linux/cpumask.h>
-+#include <linux/module.h>
-+#include <linux/slab.h>
-+#include <linux/vmalloc.h>
-+#include <linux/miscdevice.h>
-+#include <linux/spinlock.h>
-+#include <linux/mm.h>
-+#include <linux/mutex.h>
-+#include <linux/cpu.h>
-+#include <linux/firmware.h>
-+#include <linux/platform_device.h>
-+
-+#include <asm/msr.h>
-+#include <asm/uaccess.h>
-+#include <asm/processor.h>
-+
-+MODULE_DESCRIPTION("Intel CPU (IA-32) Microcode Update Driver");
-+MODULE_AUTHOR("Tigran Aivazian <tigran@aivazian.fsnet.co.uk>");
-+MODULE_LICENSE("GPL");
-+
-+static int verbose;
-+module_param(verbose, int, 0644);
-+
-+#define MICROCODE_VERSION "1.14a-xen"
-+
-+#define DEFAULT_UCODE_DATASIZE (2000) /* 2000 bytes */
-+#define MC_HEADER_SIZE (sizeof (microcode_header_t)) /* 48 bytes */
-+#define DEFAULT_UCODE_TOTALSIZE (DEFAULT_UCODE_DATASIZE + MC_HEADER_SIZE) /* 2048 bytes */
-+
-+/* no concurrent ->write()s are allowed on /dev/cpu/microcode */
-+static DEFINE_MUTEX(microcode_mutex);
-+
-+#ifdef CONFIG_MICROCODE_OLD_INTERFACE
-+static int do_microcode_update (const void __user *ubuf, size_t len)
-+{
-+ int err;
-+ void *kbuf;
-+
-+ kbuf = vmalloc(len);
-+ if (!kbuf)
-+ return -ENOMEM;
-+
-+ if (copy_from_user(kbuf, ubuf, len) == 0) {
-+ struct xen_platform_op op;
-+
-+ op.cmd = XENPF_microcode_update;
-+ set_xen_guest_handle(op.u.microcode.data, kbuf);
-+ op.u.microcode.length = len;
-+ err = HYPERVISOR_platform_op(&op);
-+ } else
-+ err = -EFAULT;
-+
-+ vfree(kbuf);
-+
-+ return err;
-+}
-+
-+static int microcode_open (struct inode *unused1, struct file *unused2)
-+{
-+ return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
-+}
-+
-+static ssize_t microcode_write (struct file *file, const char __user *buf, size_t len, loff_t *ppos)
-+{
-+ ssize_t ret;
-+
-+ if (len < MC_HEADER_SIZE) {
-+ printk(KERN_ERR "microcode: not enough data\n");
-+ return -EINVAL;
-+ }
-+
-+ mutex_lock(&microcode_mutex);
-+
-+ ret = do_microcode_update(buf, len);
-+ if (!ret)
-+ ret = (ssize_t)len;
-+
-+ mutex_unlock(&microcode_mutex);
-+
-+ return ret;
-+}
-+
-+static const struct file_operations microcode_fops = {
-+ .owner = THIS_MODULE,
-+ .write = microcode_write,
-+ .open = microcode_open,
-+};
-+
-+static struct miscdevice microcode_dev = {
-+ .minor = MICROCODE_MINOR,
-+ .name = "microcode",
-+ .fops = &microcode_fops,
-+};
-+
-+static int __init microcode_dev_init (void)
-+{
-+ int error;
-+
-+ error = misc_register(&microcode_dev);
-+ if (error) {
-+ printk(KERN_ERR
-+ "microcode: can't misc_register on minor=%d\n",
-+ MICROCODE_MINOR);
-+ return error;
-+ }
-+
-+ return 0;
-+}
-+
-+static void microcode_dev_exit (void)
-+{
-+ misc_deregister(&microcode_dev);
-+}
-+
-+MODULE_ALIAS_MISCDEV(MICROCODE_MINOR);
-+#else
-+#define microcode_dev_init() 0
-+#define microcode_dev_exit() do { } while(0)
-+#endif
-+
-+/* fake device for request_firmware */
-+static struct platform_device *microcode_pdev;
-+
-+static int request_microcode(void)
-+{
-+ char name[30];
-+ const struct cpuinfo_x86 *c = &boot_cpu_data;
-+ const struct firmware *firmware;
-+ int error;
-+ struct xen_platform_op op;
-+
-+ sprintf(name,"intel-ucode/%02x-%02x-%02x",
-+ c->x86, c->x86_model, c->x86_mask);
-+ error = request_firmware(&firmware, name, &microcode_pdev->dev);
-+ if (error) {
-+ pr_debug("ucode data file %s load failed\n", name);
-+ return error;
-+ }
-+
-+ op.cmd = XENPF_microcode_update;
-+ set_xen_guest_handle(op.u.microcode.data, (void *)firmware->data);
-+ op.u.microcode.length = firmware->size;
-+ error = HYPERVISOR_platform_op(&op);
-+
-+ release_firmware(firmware);
-+
-+ if (error)
-+ pr_debug("ucode load failed\n");
-+
-+ return error;
-+}
-+
-+static int __init microcode_init (void)
-+{
-+ int error;
-+
-+ error = microcode_dev_init();
-+ if (error)
-+ return error;
-+ microcode_pdev = platform_device_register_simple("microcode", -1,
-+ NULL, 0);
-+ if (IS_ERR(microcode_pdev)) {
-+ microcode_dev_exit();
-+ return PTR_ERR(microcode_pdev);
-+ }
-+
-+ request_microcode();
-+
-+ printk(KERN_INFO
-+ "IA-32 Microcode Update Driver: v" MICROCODE_VERSION " <tigran@aivazian.fsnet.co.uk>\n");
-+ return 0;
-+}
-+
-+static void __exit microcode_exit (void)
-+{
-+ microcode_dev_exit();
-+ platform_device_unregister(microcode_pdev);
-+}
-+
-+module_init(microcode_init)
-+module_exit(microcode_exit)
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/i386/kernel/mpparse-xen.c ubuntu-gutsy-xen/arch/i386/kernel/mpparse-xen.c
---- ubuntu-gutsy/arch/i386/kernel/mpparse-xen.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/arch/i386/kernel/mpparse-xen.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,1160 @@
-+/*
-+ * Intel Multiprocessor Specification 1.1 and 1.4
-+ * compliant MP-table parsing routines.
-+ *
-+ * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
-+ * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
-+ *
-+ * Fixes
-+ * Erich Boleyn : MP v1.4 and additional changes.
-+ * Alan Cox : Added EBDA scanning
-+ * Ingo Molnar : various cleanups and rewrites
-+ * Maciej W. Rozycki: Bits for default MP configurations
-+ * Paul Diefenbaugh: Added full ACPI support
-+ */
-+
-+#include <linux/mm.h>
-+#include <linux/init.h>
-+#include <linux/acpi.h>
-+#include <linux/delay.h>
-+#include <linux/bootmem.h>
-+#include <linux/kernel_stat.h>
-+#include <linux/mc146818rtc.h>
-+#include <linux/bitops.h>
-+
-+#include <asm/smp.h>
-+#include <asm/acpi.h>
-+#include <asm/mtrr.h>
-+#include <asm/mpspec.h>
-+#include <asm/io_apic.h>
-+
-+#include <mach_apic.h>
-+#include <mach_apicdef.h>
-+#include <mach_mpparse.h>
-+#include <bios_ebda.h>
-+
-+/* Have we found an MP table */
-+int smp_found_config;
-+unsigned int __cpuinitdata maxcpus = NR_CPUS;
-+
-+/*
-+ * Various Linux-internal data structures created from the
-+ * MP-table.
-+ */
-+int apic_version [MAX_APICS];
-+int mp_bus_id_to_type [MAX_MP_BUSSES];
-+int mp_bus_id_to_node [MAX_MP_BUSSES];
-+int mp_bus_id_to_local [MAX_MP_BUSSES];
-+int quad_local_to_mp_bus_id [NR_CPUS/4][4];
-+int mp_bus_id_to_pci_bus [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 };
-+static int mp_current_pci_id;
-+
-+/* I/O APIC entries */
-+struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS];
-+
-+/* # of MP IRQ source entries */
-+struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
-+
-+/* MP IRQ source entries */
-+int mp_irq_entries;
-+
-+int nr_ioapics;
-+
-+int pic_mode;
-+#ifndef CONFIG_XEN
-+unsigned long mp_lapic_addr;
-+#endif
-+
-+unsigned int def_to_bigsmp = 0;
-+
-+/* Processor that is doing the boot up */
-+unsigned int boot_cpu_physical_apicid = -1U;
-+/* Internal processor count */
-+unsigned int __cpuinitdata num_processors;
-+
-+/* Bitmask of physically existing CPUs */
-+physid_mask_t phys_cpu_present_map;
-+
-+u8 bios_cpu_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
-+
-+/*
-+ * Intel MP BIOS table parsing routines:
-+ */
-+
-+
-+/*
-+ * Checksum an MP configuration block.
-+ */
-+
-+static int __init mpf_checksum(unsigned char *mp, int len)
-+{
-+ int sum = 0;
-+
-+ while (len--)
-+ sum += *mp++;
-+
-+ return sum & 0xFF;
-+}
-+
-+/*
-+ * Have to match translation table entries to main table entries by counter
-+ * hence the mpc_record variable .... can't see a less disgusting way of
-+ * doing this ....
-+ */
-+
-+static int mpc_record;
-+static struct mpc_config_translation *translation_table[MAX_MPC_ENTRY] __cpuinitdata;
-+
-+#ifndef CONFIG_XEN
-+static void __cpuinit MP_processor_info (struct mpc_config_processor *m)
-+{
-+ int ver, apicid;
-+ physid_mask_t phys_cpu;
-+
-+ if (!(m->mpc_cpuflag & CPU_ENABLED))
-+ return;
-+
-+ apicid = mpc_apic_id(m, translation_table[mpc_record]);
-+
-+ if (m->mpc_featureflag&(1<<0))
-+ Dprintk(" Floating point unit present.\n");
-+ if (m->mpc_featureflag&(1<<7))
-+ Dprintk(" Machine Exception supported.\n");
-+ if (m->mpc_featureflag&(1<<8))
-+ Dprintk(" 64 bit compare & exchange supported.\n");
-+ if (m->mpc_featureflag&(1<<9))
-+ Dprintk(" Internal APIC present.\n");
-+ if (m->mpc_featureflag&(1<<11))
-+ Dprintk(" SEP present.\n");
-+ if (m->mpc_featureflag&(1<<12))
-+ Dprintk(" MTRR present.\n");
-+ if (m->mpc_featureflag&(1<<13))
-+ Dprintk(" PGE present.\n");
-+ if (m->mpc_featureflag&(1<<14))
-+ Dprintk(" MCA present.\n");
-+ if (m->mpc_featureflag&(1<<15))
-+ Dprintk(" CMOV present.\n");
-+ if (m->mpc_featureflag&(1<<16))
-+ Dprintk(" PAT present.\n");
-+ if (m->mpc_featureflag&(1<<17))
-+ Dprintk(" PSE present.\n");
-+ if (m->mpc_featureflag&(1<<18))
-+ Dprintk(" PSN present.\n");
-+ if (m->mpc_featureflag&(1<<19))
-+ Dprintk(" Cache Line Flush Instruction present.\n");
-+ /* 20 Reserved */
-+ if (m->mpc_featureflag&(1<<21))
-+ Dprintk(" Debug Trace and EMON Store present.\n");
-+ if (m->mpc_featureflag&(1<<22))
-+ Dprintk(" ACPI Thermal Throttle Registers present.\n");
-+ if (m->mpc_featureflag&(1<<23))
-+ Dprintk(" MMX present.\n");
-+ if (m->mpc_featureflag&(1<<24))
-+ Dprintk(" FXSR present.\n");
-+ if (m->mpc_featureflag&(1<<25))
-+ Dprintk(" XMM present.\n");
-+ if (m->mpc_featureflag&(1<<26))
-+ Dprintk(" Willamette New Instructions present.\n");
-+ if (m->mpc_featureflag&(1<<27))
-+ Dprintk(" Self Snoop present.\n");
-+ if (m->mpc_featureflag&(1<<28))
-+ Dprintk(" HT present.\n");
-+ if (m->mpc_featureflag&(1<<29))
-+ Dprintk(" Thermal Monitor present.\n");
-+ /* 30, 31 Reserved */
-+
-+
-+ if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) {
-+ Dprintk(" Bootup CPU\n");
-+ boot_cpu_physical_apicid = m->mpc_apicid;
-+ }
-+
-+ ver = m->mpc_apicver;
-+
-+ /*
-+ * Validate version
-+ */
-+ if (ver == 0x0) {
-+ printk(KERN_WARNING "BIOS bug, APIC version is 0 for CPU#%d! "
-+ "fixing up to 0x10. (tell your hw vendor)\n",
-+ m->mpc_apicid);
-+ ver = 0x10;
-+ }
-+ apic_version[m->mpc_apicid] = ver;
-+
-+ phys_cpu = apicid_to_cpu_present(apicid);
-+ physids_or(phys_cpu_present_map, phys_cpu_present_map, phys_cpu);
-+
-+ if (num_processors >= NR_CPUS) {
-+ printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached."
-+ " Processor ignored.\n", NR_CPUS);
-+ return;
-+ }
-+
-+ if (num_processors >= maxcpus) {
-+ printk(KERN_WARNING "WARNING: maxcpus limit of %i reached."
-+ " Processor ignored.\n", maxcpus);
-+ return;
-+ }
-+
-+ cpu_set(num_processors, cpu_possible_map);
-+ num_processors++;
-+
-+ /*
-+ * Would be preferable to switch to bigsmp when CONFIG_HOTPLUG_CPU=y
-+ * but we need to work other dependencies like SMP_SUSPEND etc
-+ * before this can be done without some confusion.
-+ * if (CPU_HOTPLUG_ENABLED || num_processors > 8)
-+ * - Ashok Raj <ashok.raj@intel.com>
-+ */
-+ if (num_processors > 8) {
-+ switch (boot_cpu_data.x86_vendor) {
-+ case X86_VENDOR_INTEL:
-+ if (!APIC_XAPIC(ver)) {
-+ def_to_bigsmp = 0;
-+ break;
-+ }
-+ /* If P4 and above fall through */
-+ case X86_VENDOR_AMD:
-+ def_to_bigsmp = 1;
-+ }
-+ }
-+ bios_cpu_apicid[num_processors - 1] = m->mpc_apicid;
-+}
-+#else
-+static void __cpuinit MP_processor_info (struct mpc_config_processor *m)
-+{
-+ num_processors++;
-+}
-+#endif /* CONFIG_XEN */
-+
-+static void __init MP_bus_info (struct mpc_config_bus *m)
-+{
-+ char str[7];
-+
-+ memcpy(str, m->mpc_bustype, 6);
-+ str[6] = 0;
-+
-+ mpc_oem_bus_info(m, str, translation_table[mpc_record]);
-+
-+#if MAX_MP_BUSSES < 256
-+ if (m->mpc_busid >= MAX_MP_BUSSES) {
-+ printk(KERN_WARNING "MP table busid value (%d) for bustype %s "
-+ " is too large, max. supported is %d\n",
-+ m->mpc_busid, str, MAX_MP_BUSSES - 1);
-+ return;
-+ }
-+#endif
-+
-+ if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA)-1) == 0) {
-+ mp_bus_id_to_type[m->mpc_busid] = MP_BUS_ISA;
-+ } else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA)-1) == 0) {
-+ mp_bus_id_to_type[m->mpc_busid] = MP_BUS_EISA;
-+ } else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI)-1) == 0) {
-+ mpc_oem_pci_bus(m, translation_table[mpc_record]);
-+ mp_bus_id_to_type[m->mpc_busid] = MP_BUS_PCI;
-+ mp_bus_id_to_pci_bus[m->mpc_busid] = mp_current_pci_id;
-+ mp_current_pci_id++;
-+ } else if (strncmp(str, BUSTYPE_MCA, sizeof(BUSTYPE_MCA)-1) == 0) {
-+ mp_bus_id_to_type[m->mpc_busid] = MP_BUS_MCA;
-+ } else {
-+ printk(KERN_WARNING "Unknown bustype %s - ignoring\n", str);
-+ }
-+}
-+
-+static void __init MP_ioapic_info (struct mpc_config_ioapic *m)
-+{
-+ if (!(m->mpc_flags & MPC_APIC_USABLE))
-+ return;
-+
-+ printk(KERN_INFO "I/O APIC #%d Version %d at 0x%lX.\n",
-+ m->mpc_apicid, m->mpc_apicver, m->mpc_apicaddr);
-+ if (nr_ioapics >= MAX_IO_APICS) {
-+ printk(KERN_CRIT "Max # of I/O APICs (%d) exceeded (found %d).\n",
-+ MAX_IO_APICS, nr_ioapics);
-+ panic("Recompile kernel with bigger MAX_IO_APICS!.\n");
-+ }
-+ if (!m->mpc_apicaddr) {
-+ printk(KERN_ERR "WARNING: bogus zero I/O APIC address"
-+ " found in MP table, skipping!\n");
-+ return;
-+ }
-+ mp_ioapics[nr_ioapics] = *m;
-+ nr_ioapics++;
-+}
-+
-+static void __init MP_intsrc_info (struct mpc_config_intsrc *m)
-+{
-+ mp_irqs [mp_irq_entries] = *m;
-+ Dprintk("Int: type %d, pol %d, trig %d, bus %d,"
-+ " IRQ %02x, APIC ID %x, APIC INT %02x\n",
-+ m->mpc_irqtype, m->mpc_irqflag & 3,
-+ (m->mpc_irqflag >> 2) & 3, m->mpc_srcbus,
-+ m->mpc_srcbusirq, m->mpc_dstapic, m->mpc_dstirq);
-+ if (++mp_irq_entries == MAX_IRQ_SOURCES)
-+ panic("Max # of irq sources exceeded!!\n");
-+}
-+
-+static void __init MP_lintsrc_info (struct mpc_config_lintsrc *m)
-+{
-+ Dprintk("Lint: type %d, pol %d, trig %d, bus %d,"
-+ " IRQ %02x, APIC ID %x, APIC LINT %02x\n",
-+ m->mpc_irqtype, m->mpc_irqflag & 3,
-+ (m->mpc_irqflag >> 2) &3, m->mpc_srcbusid,
-+ m->mpc_srcbusirq, m->mpc_destapic, m->mpc_destapiclint);
-+}
-+
-+#ifdef CONFIG_X86_NUMAQ
-+static void __init MP_translation_info (struct mpc_config_translation *m)
-+{
-+ printk(KERN_INFO "Translation: record %d, type %d, quad %d, global %d, local %d\n", mpc_record, m->trans_type, m->trans_quad, m->trans_global, m->trans_local);
-+
-+ if (mpc_record >= MAX_MPC_ENTRY)
-+ printk(KERN_ERR "MAX_MPC_ENTRY exceeded!\n");
-+ else
-+ translation_table[mpc_record] = m; /* stash this for later */
-+ if (m->trans_quad < MAX_NUMNODES && !node_online(m->trans_quad))
-+ node_set_online(m->trans_quad);
-+}
-+
-+/*
-+ * Read/parse the MPC oem tables
-+ */
-+
-+static void __init smp_read_mpc_oem(struct mp_config_oemtable *oemtable, \
-+ unsigned short oemsize)
-+{
-+ int count = sizeof (*oemtable); /* the header size */
-+ unsigned char *oemptr = ((unsigned char *)oemtable)+count;
-+
-+ mpc_record = 0;
-+ printk(KERN_INFO "Found an OEM MPC table at %8p - parsing it ... \n", oemtable);
-+ if (memcmp(oemtable->oem_signature,MPC_OEM_SIGNATURE,4))
-+ {
-+ printk(KERN_WARNING "SMP mpc oemtable: bad signature [%c%c%c%c]!\n",
-+ oemtable->oem_signature[0],
-+ oemtable->oem_signature[1],
-+ oemtable->oem_signature[2],
-+ oemtable->oem_signature[3]);
-+ return;
-+ }
-+ if (mpf_checksum((unsigned char *)oemtable,oemtable->oem_length))
-+ {
-+ printk(KERN_WARNING "SMP oem mptable: checksum error!\n");
-+ return;
-+ }
-+ while (count < oemtable->oem_length) {
-+ switch (*oemptr) {
-+ case MP_TRANSLATION:
-+ {
-+ struct mpc_config_translation *m=
-+ (struct mpc_config_translation *)oemptr;
-+ MP_translation_info(m);
-+ oemptr += sizeof(*m);
-+ count += sizeof(*m);
-+ ++mpc_record;
-+ break;
-+ }
-+ default:
-+ {
-+ printk(KERN_WARNING "Unrecognised OEM table entry type! - %d\n", (int) *oemptr);
-+ return;
-+ }
-+ }
-+ }
-+}
-+
-+static inline void mps_oem_check(struct mp_config_table *mpc, char *oem,
-+ char *productid)
-+{
-+ if (strncmp(oem, "IBM NUMA", 8))
-+ printk("Warning! May not be a NUMA-Q system!\n");
-+ if (mpc->mpc_oemptr)
-+ smp_read_mpc_oem((struct mp_config_oemtable *) mpc->mpc_oemptr,
-+ mpc->mpc_oemsize);
-+}
-+#endif /* CONFIG_X86_NUMAQ */
-+
-+/*
-+ * Read/parse the MPC
-+ */
-+
-+static int __init smp_read_mpc(struct mp_config_table *mpc)
-+{
-+ char str[16];
-+ char oem[10];
-+ int count=sizeof(*mpc);
-+ unsigned char *mpt=((unsigned char *)mpc)+count;
-+
-+ if (memcmp(mpc->mpc_signature,MPC_SIGNATURE,4)) {
-+ printk(KERN_ERR "SMP mptable: bad signature [0x%x]!\n",
-+ *(u32 *)mpc->mpc_signature);
-+ return 0;
-+ }
-+ if (mpf_checksum((unsigned char *)mpc,mpc->mpc_length)) {
-+ printk(KERN_ERR "SMP mptable: checksum error!\n");
-+ return 0;
-+ }
-+ if (mpc->mpc_spec!=0x01 && mpc->mpc_spec!=0x04) {
-+ printk(KERN_ERR "SMP mptable: bad table version (%d)!!\n",
-+ mpc->mpc_spec);
-+ return 0;
-+ }
-+ if (!mpc->mpc_lapic) {
-+ printk(KERN_ERR "SMP mptable: null local APIC address!\n");
-+ return 0;
-+ }
-+ memcpy(oem,mpc->mpc_oem,8);
-+ oem[8]=0;
-+ printk(KERN_INFO "OEM ID: %s ",oem);
-+
-+ memcpy(str,mpc->mpc_productid,12);
-+ str[12]=0;
-+ printk("Product ID: %s ",str);
-+
-+ mps_oem_check(mpc, oem, str);
-+
-+#ifndef CONFIG_XEN
-+ printk("APIC at: 0x%lX\n",mpc->mpc_lapic);
-+
-+ /*
-+ * Save the local APIC address (it might be non-default) -- but only
-+ * if we're not using ACPI.
-+ */
-+ if (!acpi_lapic)
-+ mp_lapic_addr = mpc->mpc_lapic;
-+#endif
-+
-+ /*
-+ * Now process the configuration blocks.
-+ */
-+ mpc_record = 0;
-+ while (count < mpc->mpc_length) {
-+ switch(*mpt) {
-+ case MP_PROCESSOR:
-+ {
-+ struct mpc_config_processor *m=
-+ (struct mpc_config_processor *)mpt;
-+ /* ACPI may have already provided this data */
-+ if (!acpi_lapic)
-+ MP_processor_info(m);
-+ mpt += sizeof(*m);
-+ count += sizeof(*m);
-+ break;
-+ }
-+ case MP_BUS:
-+ {
-+ struct mpc_config_bus *m=
-+ (struct mpc_config_bus *)mpt;
-+ MP_bus_info(m);
-+ mpt += sizeof(*m);
-+ count += sizeof(*m);
-+ break;
-+ }
-+ case MP_IOAPIC:
-+ {
-+ struct mpc_config_ioapic *m=
-+ (struct mpc_config_ioapic *)mpt;
-+ MP_ioapic_info(m);
-+ mpt+=sizeof(*m);
-+ count+=sizeof(*m);
-+ break;
-+ }
-+ case MP_INTSRC:
-+ {
-+ struct mpc_config_intsrc *m=
-+ (struct mpc_config_intsrc *)mpt;
-+
-+ MP_intsrc_info(m);
-+ mpt+=sizeof(*m);
-+ count+=sizeof(*m);
-+ break;
-+ }
-+ case MP_LINTSRC:
-+ {
-+ struct mpc_config_lintsrc *m=
-+ (struct mpc_config_lintsrc *)mpt;
-+ MP_lintsrc_info(m);
-+ mpt+=sizeof(*m);
-+ count+=sizeof(*m);
-+ break;
-+ }
-+ default:
-+ {
-+ count = mpc->mpc_length;
-+ break;
-+ }
-+ }
-+ ++mpc_record;
-+ }
-+ setup_apic_routing();
-+ if (!num_processors)
-+ printk(KERN_ERR "SMP mptable: no processors registered!\n");
-+ return num_processors;
-+}
-+
-+static int __init ELCR_trigger(unsigned int irq)
-+{
-+ unsigned int port;
-+
-+ port = 0x4d0 + (irq >> 3);
-+ return (inb(port) >> (irq & 7)) & 1;
-+}
-+
-+static void __init construct_default_ioirq_mptable(int mpc_default_type)
-+{
-+ struct mpc_config_intsrc intsrc;
-+ int i;
-+ int ELCR_fallback = 0;
-+
-+ intsrc.mpc_type = MP_INTSRC;
-+ intsrc.mpc_irqflag = 0; /* conforming */
-+ intsrc.mpc_srcbus = 0;
-+ intsrc.mpc_dstapic = mp_ioapics[0].mpc_apicid;
-+
-+ intsrc.mpc_irqtype = mp_INT;
-+
-+ /*
-+ * If true, we have an ISA/PCI system with no IRQ entries
-+ * in the MP table. To prevent the PCI interrupts from being set up
-+ * incorrectly, we try to use the ELCR. The sanity check to see if
-+ * there is good ELCR data is very simple - IRQ0, 1, 2 and 13 can
-+ * never be level sensitive, so we simply see if the ELCR agrees.
-+ * If it does, we assume it's valid.
-+ */
-+ if (mpc_default_type == 5) {
-+ printk(KERN_INFO "ISA/PCI bus type with no IRQ information... falling back to ELCR\n");
-+
-+ if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) || ELCR_trigger(13))
-+ printk(KERN_WARNING "ELCR contains invalid data... not using ELCR\n");
-+ else {
-+ printk(KERN_INFO "Using ELCR to identify PCI interrupts\n");
-+ ELCR_fallback = 1;
-+ }
-+ }
-+
-+ for (i = 0; i < 16; i++) {
-+ switch (mpc_default_type) {
-+ case 2:
-+ if (i == 0 || i == 13)
-+ continue; /* IRQ0 & IRQ13 not connected */
-+ /* fall through */
-+ default:
-+ if (i == 2)
-+ continue; /* IRQ2 is never connected */
-+ }
-+
-+ if (ELCR_fallback) {
-+ /*
-+ * If the ELCR indicates a level-sensitive interrupt, we
-+ * copy that information over to the MP table in the
-+ * irqflag field (level sensitive, active high polarity).
-+ */
-+ if (ELCR_trigger(i))
-+ intsrc.mpc_irqflag = 13;
-+ else
-+ intsrc.mpc_irqflag = 0;
-+ }
-+
-+ intsrc.mpc_srcbusirq = i;
-+ intsrc.mpc_dstirq = i ? i : 2; /* IRQ0 to INTIN2 */
-+ MP_intsrc_info(&intsrc);
-+ }
-+
-+ intsrc.mpc_irqtype = mp_ExtINT;
-+ intsrc.mpc_srcbusirq = 0;
-+ intsrc.mpc_dstirq = 0; /* 8259A to INTIN0 */
-+ MP_intsrc_info(&intsrc);
-+}
-+
-+static inline void __init construct_default_ISA_mptable(int mpc_default_type)
-+{
-+ struct mpc_config_processor processor;
-+ struct mpc_config_bus bus;
-+ struct mpc_config_ioapic ioapic;
-+ struct mpc_config_lintsrc lintsrc;
-+ int linttypes[2] = { mp_ExtINT, mp_NMI };
-+ int i;
-+
-+#ifndef CONFIG_XEN
-+ /*
-+ * local APIC has default address
-+ */
-+ mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
-+#endif
-+
-+ /*
-+ * 2 CPUs, numbered 0 & 1.
-+ */
-+ processor.mpc_type = MP_PROCESSOR;
-+ /* Either an integrated APIC or a discrete 82489DX. */
-+ processor.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
-+ processor.mpc_cpuflag = CPU_ENABLED;
-+ processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) |
-+ (boot_cpu_data.x86_model << 4) |
-+ boot_cpu_data.x86_mask;
-+ processor.mpc_featureflag = boot_cpu_data.x86_capability[0];
-+ processor.mpc_reserved[0] = 0;
-+ processor.mpc_reserved[1] = 0;
-+ for (i = 0; i < 2; i++) {
-+ processor.mpc_apicid = i;
-+ MP_processor_info(&processor);
-+ }
-+
-+ bus.mpc_type = MP_BUS;
-+ bus.mpc_busid = 0;
-+ switch (mpc_default_type) {
-+ default:
-+ printk("???\n");
-+ printk(KERN_ERR "Unknown standard configuration %d\n",
-+ mpc_default_type);
-+ /* fall through */
-+ case 1:
-+ case 5:
-+ memcpy(bus.mpc_bustype, "ISA ", 6);
-+ break;
-+ case 2:
-+ case 6:
-+ case 3:
-+ memcpy(bus.mpc_bustype, "EISA ", 6);
-+ break;
-+ case 4:
-+ case 7:
-+ memcpy(bus.mpc_bustype, "MCA ", 6);
-+ }
-+ MP_bus_info(&bus);
-+ if (mpc_default_type > 4) {
-+ bus.mpc_busid = 1;
-+ memcpy(bus.mpc_bustype, "PCI ", 6);
-+ MP_bus_info(&bus);
-+ }
-+
-+ ioapic.mpc_type = MP_IOAPIC;
-+ ioapic.mpc_apicid = 2;
-+ ioapic.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
-+ ioapic.mpc_flags = MPC_APIC_USABLE;
-+ ioapic.mpc_apicaddr = 0xFEC00000;
-+ MP_ioapic_info(&ioapic);
-+
-+ /*
-+ * We set up most of the low 16 IO-APIC pins according to MPS rules.
-+ */
-+ construct_default_ioirq_mptable(mpc_default_type);
-+
-+ lintsrc.mpc_type = MP_LINTSRC;
-+ lintsrc.mpc_irqflag = 0; /* conforming */
-+ lintsrc.mpc_srcbusid = 0;
-+ lintsrc.mpc_srcbusirq = 0;
-+ lintsrc.mpc_destapic = MP_APIC_ALL;
-+ for (i = 0; i < 2; i++) {
-+ lintsrc.mpc_irqtype = linttypes[i];
-+ lintsrc.mpc_destapiclint = i;
-+ MP_lintsrc_info(&lintsrc);
-+ }
-+}
-+
-+static struct intel_mp_floating *mpf_found;
-+
-+/*
-+ * Scan the memory blocks for an SMP configuration block.
-+ */
-+void __init get_smp_config (void)
-+{
-+ struct intel_mp_floating *mpf = mpf_found;
-+
-+ /*
-+ * ACPI supports both logical (e.g. Hyper-Threading) and physical
-+ * processors, where MPS only supports physical.
-+ */
-+ if (acpi_lapic && acpi_ioapic) {
-+ printk(KERN_INFO "Using ACPI (MADT) for SMP configuration information\n");
-+ return;
-+ }
-+ else if (acpi_lapic)
-+ printk(KERN_INFO "Using ACPI for processor (LAPIC) configuration information\n");
-+
-+ printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n", mpf->mpf_specification);
-+ if (mpf->mpf_feature2 & (1<<7)) {
-+ printk(KERN_INFO " IMCR and PIC compatibility mode.\n");
-+ pic_mode = 1;
-+ } else {
-+ printk(KERN_INFO " Virtual Wire compatibility mode.\n");
-+ pic_mode = 0;
-+ }
-+
-+ /*
-+ * Now see if we need to read further.
-+ */
-+ if (mpf->mpf_feature1 != 0) {
-+
-+ printk(KERN_INFO "Default MP configuration #%d\n", mpf->mpf_feature1);
-+ construct_default_ISA_mptable(mpf->mpf_feature1);
-+
-+ } else if (mpf->mpf_physptr) {
-+
-+ /*
-+ * Read the physical hardware table. Anything here will
-+ * override the defaults.
-+ */
-+ if (!smp_read_mpc(isa_bus_to_virt(mpf->mpf_physptr))) {
-+ smp_found_config = 0;
-+ printk(KERN_ERR "BIOS bug, MP table errors detected!...\n");
-+ printk(KERN_ERR "... disabling SMP support. (tell your hw vendor)\n");
-+ return;
-+ }
-+ /*
-+ * If there are no explicit MP IRQ entries, then we are
-+ * broken. We set up most of the low 16 IO-APIC pins to
-+ * ISA defaults and hope it will work.
-+ */
-+ if (!mp_irq_entries) {
-+ struct mpc_config_bus bus;
-+
-+ printk(KERN_ERR "BIOS bug, no explicit IRQ entries, using default mptable. (tell your hw vendor)\n");
-+
-+ bus.mpc_type = MP_BUS;
-+ bus.mpc_busid = 0;
-+ memcpy(bus.mpc_bustype, "ISA ", 6);
-+ MP_bus_info(&bus);
-+
-+ construct_default_ioirq_mptable(0);
-+ }
-+
-+ } else
-+ BUG();
-+
-+ printk(KERN_INFO "Processors: %d\n", num_processors);
-+ /*
-+ * Only use the first configuration found.
-+ */
-+}
-+
-+static int __init smp_scan_config (unsigned long base, unsigned long length)
-+{
-+ unsigned long *bp = isa_bus_to_virt(base);
-+ struct intel_mp_floating *mpf;
-+
-+ Dprintk("Scan SMP from %p for %ld bytes.\n", bp,length);
-+ if (sizeof(*mpf) != 16)
-+ printk("Error: MPF size\n");
-+
-+ while (length > 0) {
-+ mpf = (struct intel_mp_floating *)bp;
-+ if ((*bp == SMP_MAGIC_IDENT) &&
-+ (mpf->mpf_length == 1) &&
-+ !mpf_checksum((unsigned char *)bp, 16) &&
-+ ((mpf->mpf_specification == 1)
-+ || (mpf->mpf_specification == 4)) ) {
-+
-+ smp_found_config = 1;
-+#ifndef CONFIG_XEN
-+ printk(KERN_INFO "found SMP MP-table at %08lx\n",
-+ virt_to_phys(mpf));
-+ reserve_bootmem(virt_to_phys(mpf), PAGE_SIZE);
-+ if (mpf->mpf_physptr) {
-+ /*
-+ * We cannot access to MPC table to compute
-+ * table size yet, as only few megabytes from
-+ * the bottom is mapped now.
-+ * PC-9800's MPC table places on the very last
-+ * of physical memory; so that simply reserving
-+ * PAGE_SIZE from mpg->mpf_physptr yields BUG()
-+ * in reserve_bootmem.
-+ */
-+ unsigned long size = PAGE_SIZE;
-+ unsigned long end = max_low_pfn * PAGE_SIZE;
-+ if (mpf->mpf_physptr + size > end)
-+ size = end - mpf->mpf_physptr;
-+ reserve_bootmem(mpf->mpf_physptr, size);
-+ }
-+#else
-+ printk(KERN_INFO "found SMP MP-table at %08lx\n",
-+ ((unsigned long)bp - (unsigned long)isa_bus_to_virt(base)) + base);
-+#endif
-+
-+ mpf_found = mpf;
-+ return 1;
-+ }
-+ bp += 4;
-+ length -= 16;
-+ }
-+ return 0;
-+}
-+
-+void __init find_smp_config (void)
-+{
-+#ifndef CONFIG_XEN
-+ unsigned int address;
-+#endif
-+
-+ /*
-+ * FIXME: Linux assumes you have 640K of base ram..
-+ * this continues the error...
-+ *
-+ * 1) Scan the bottom 1K for a signature
-+ * 2) Scan the top 1K of base RAM
-+ * 3) Scan the 64K of bios
-+ */
-+ if (smp_scan_config(0x0,0x400) ||
-+ smp_scan_config(639*0x400,0x400) ||
-+ smp_scan_config(0xF0000,0x10000))
-+ return;
-+ /*
-+ * If it is an SMP machine we should know now, unless the
-+ * configuration is in an EISA/MCA bus machine with an
-+ * extended bios data area.
-+ *
-+ * there is a real-mode segmented pointer pointing to the
-+ * 4K EBDA area at 0x40E, calculate and scan it here.
-+ *
-+ * NOTE! There are Linux loaders that will corrupt the EBDA
-+ * area, and as such this kind of SMP config may be less
-+ * trustworthy, simply because the SMP table may have been
-+ * stomped on during early boot. These loaders are buggy and
-+ * should be fixed.
-+ *
-+ * MP1.4 SPEC states to only scan first 1K of 4K EBDA.
-+ */
-+
-+#ifndef CONFIG_XEN
-+ address = get_bios_ebda();
-+ if (address)
-+ smp_scan_config(address, 0x400);
-+#endif
-+}
-+
-+int es7000_plat;
-+
-+/* --------------------------------------------------------------------------
-+ ACPI-based MP Configuration
-+ -------------------------------------------------------------------------- */
-+
-+#ifdef CONFIG_ACPI
-+
-+#ifndef CONFIG_XEN
-+void __init mp_register_lapic_address(u64 address)
-+{
-+ mp_lapic_addr = (unsigned long) address;
-+
-+ set_fixmap_nocache(FIX_APIC_BASE, mp_lapic_addr);
-+
-+ if (boot_cpu_physical_apicid == -1U)
-+ boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
-+
-+ Dprintk("Boot CPU = %d\n", boot_cpu_physical_apicid);
-+}
-+#endif
-+
-+void __cpuinit mp_register_lapic (u8 id, u8 enabled)
-+{
-+ struct mpc_config_processor processor;
-+ int boot_cpu = 0;
-+
-+ if (MAX_APICS - id <= 0) {
-+ printk(KERN_WARNING "Processor #%d invalid (max %d)\n",
-+ id, MAX_APICS);
-+ return;
-+ }
-+
-+ if (id == boot_cpu_physical_apicid)
-+ boot_cpu = 1;
-+
-+#ifndef CONFIG_XEN
-+ processor.mpc_type = MP_PROCESSOR;
-+ processor.mpc_apicid = id;
-+ processor.mpc_apicver = GET_APIC_VERSION(apic_read(APIC_LVR));
-+ processor.mpc_cpuflag = (enabled ? CPU_ENABLED : 0);
-+ processor.mpc_cpuflag |= (boot_cpu ? CPU_BOOTPROCESSOR : 0);
-+ processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) |
-+ (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask;
-+ processor.mpc_featureflag = boot_cpu_data.x86_capability[0];
-+ processor.mpc_reserved[0] = 0;
-+ processor.mpc_reserved[1] = 0;
-+#endif
-+
-+ MP_processor_info(&processor);
-+}
-+
-+#ifdef CONFIG_X86_IO_APIC
-+
-+#define MP_ISA_BUS 0
-+#define MP_MAX_IOAPIC_PIN 127
-+
-+static struct mp_ioapic_routing {
-+ int apic_id;
-+ int gsi_base;
-+ int gsi_end;
-+ u32 pin_programmed[4];
-+} mp_ioapic_routing[MAX_IO_APICS];
-+
-+static int mp_find_ioapic (int gsi)
-+{
-+ int i = 0;
-+
-+ /* Find the IOAPIC that manages this GSI. */
-+ for (i = 0; i < nr_ioapics; i++) {
-+ if ((gsi >= mp_ioapic_routing[i].gsi_base)
-+ && (gsi <= mp_ioapic_routing[i].gsi_end))
-+ return i;
-+ }
-+
-+ printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi);
-+
-+ return -1;
-+}
-+
-+void __init mp_register_ioapic(u8 id, u32 address, u32 gsi_base)
-+{
-+ int idx = 0;
-+ int tmpid;
-+
-+ if (nr_ioapics >= MAX_IO_APICS) {
-+ printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded "
-+ "(found %d)\n", MAX_IO_APICS, nr_ioapics);
-+ panic("Recompile kernel with bigger MAX_IO_APICS!\n");
-+ }
-+ if (!address) {
-+ printk(KERN_ERR "WARNING: Bogus (zero) I/O APIC address"
-+ " found in MADT table, skipping!\n");
-+ return;
-+ }
-+
-+ idx = nr_ioapics++;
-+
-+ mp_ioapics[idx].mpc_type = MP_IOAPIC;
-+ mp_ioapics[idx].mpc_flags = MPC_APIC_USABLE;
-+ mp_ioapics[idx].mpc_apicaddr = address;
-+
-+#ifndef CONFIG_XEN
-+ set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
-+ if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
-+ && !APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
-+ tmpid = io_apic_get_unique_id(idx, id);
-+ else
-+#endif
-+ tmpid = id;
-+ if (tmpid == -1) {
-+ nr_ioapics--;
-+ return;
-+ }
-+ mp_ioapics[idx].mpc_apicid = tmpid;
-+ mp_ioapics[idx].mpc_apicver = io_apic_get_version(idx);
-+
-+ /*
-+ * Build basic GSI lookup table to facilitate gsi->io_apic lookups
-+ * and to prevent reprogramming of IOAPIC pins (PCI GSIs).
-+ */
-+ mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].mpc_apicid;
-+ mp_ioapic_routing[idx].gsi_base = gsi_base;
-+ mp_ioapic_routing[idx].gsi_end = gsi_base +
-+ io_apic_get_redir_entries(idx);
-+
-+ printk("IOAPIC[%d]: apic_id %d, version %d, address 0x%lx, "
-+ "GSI %d-%d\n", idx, mp_ioapics[idx].mpc_apicid,
-+ mp_ioapics[idx].mpc_apicver, mp_ioapics[idx].mpc_apicaddr,
-+ mp_ioapic_routing[idx].gsi_base,
-+ mp_ioapic_routing[idx].gsi_end);
-+}
-+
-+void __init
-+mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi)
-+{
-+ struct mpc_config_intsrc intsrc;
-+ int ioapic = -1;
-+ int pin = -1;
-+
-+ /*
-+ * Convert 'gsi' to 'ioapic.pin'.
-+ */
-+ ioapic = mp_find_ioapic(gsi);
-+ if (ioapic < 0)
-+ return;
-+ pin = gsi - mp_ioapic_routing[ioapic].gsi_base;
-+
-+ /*
-+ * TBD: This check is for faulty timer entries, where the override
-+ * erroneously sets the trigger to level, resulting in a HUGE
-+ * increase of timer interrupts!
-+ */
-+ if ((bus_irq == 0) && (trigger == 3))
-+ trigger = 1;
-+
-+ intsrc.mpc_type = MP_INTSRC;
-+ intsrc.mpc_irqtype = mp_INT;
-+ intsrc.mpc_irqflag = (trigger << 2) | polarity;
-+ intsrc.mpc_srcbus = MP_ISA_BUS;
-+ intsrc.mpc_srcbusirq = bus_irq; /* IRQ */
-+ intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid; /* APIC ID */
-+ intsrc.mpc_dstirq = pin; /* INTIN# */
-+
-+ Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, %d-%d\n",
-+ intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3,
-+ (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus,
-+ intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, intsrc.mpc_dstirq);
-+
-+ mp_irqs[mp_irq_entries] = intsrc;
-+ if (++mp_irq_entries == MAX_IRQ_SOURCES)
-+ panic("Max # of irq sources exceeded!\n");
-+}
-+
-+void __init mp_config_acpi_legacy_irqs (void)
-+{
-+ struct mpc_config_intsrc intsrc;
-+ int i = 0;
-+ int ioapic = -1;
-+
-+ /*
-+ * Fabricate the legacy ISA bus (bus #31).
-+ */
-+ mp_bus_id_to_type[MP_ISA_BUS] = MP_BUS_ISA;
-+ Dprintk("Bus #%d is ISA\n", MP_ISA_BUS);
-+
-+ /*
-+ * Older generations of ES7000 have no legacy identity mappings
-+ */
-+ if (es7000_plat == 1)
-+ return;
-+
-+ /*
-+ * Locate the IOAPIC that manages the ISA IRQs (0-15).
-+ */
-+ ioapic = mp_find_ioapic(0);
-+ if (ioapic < 0)
-+ return;
-+
-+ intsrc.mpc_type = MP_INTSRC;
-+ intsrc.mpc_irqflag = 0; /* Conforming */
-+ intsrc.mpc_srcbus = MP_ISA_BUS;
-+ intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid;
-+
-+ /*
-+ * Use the default configuration for the IRQs 0-15. Unless
-+ * overriden by (MADT) interrupt source override entries.
-+ */
-+ for (i = 0; i < 16; i++) {
-+ int idx;
-+
-+ for (idx = 0; idx < mp_irq_entries; idx++) {
-+ struct mpc_config_intsrc *irq = mp_irqs + idx;
-+
-+ /* Do we already have a mapping for this ISA IRQ? */
-+ if (irq->mpc_srcbus == MP_ISA_BUS && irq->mpc_srcbusirq == i)
-+ break;
-+
-+ /* Do we already have a mapping for this IOAPIC pin */
-+ if ((irq->mpc_dstapic == intsrc.mpc_dstapic) &&
-+ (irq->mpc_dstirq == i))
-+ break;
-+ }
-+
-+ if (idx != mp_irq_entries) {
-+ printk(KERN_DEBUG "ACPI: IRQ%d used by override.\n", i);
-+ continue; /* IRQ already used */
-+ }
-+
-+ intsrc.mpc_irqtype = mp_INT;
-+ intsrc.mpc_srcbusirq = i; /* Identity mapped */
-+ intsrc.mpc_dstirq = i;
-+
-+ Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, "
-+ "%d-%d\n", intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3,
-+ (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus,
-+ intsrc.mpc_srcbusirq, intsrc.mpc_dstapic,
-+ intsrc.mpc_dstirq);
-+
-+ mp_irqs[mp_irq_entries] = intsrc;
-+ if (++mp_irq_entries == MAX_IRQ_SOURCES)
-+ panic("Max # of irq sources exceeded!\n");
-+ }
-+}
-+
-+#define MAX_GSI_NUM 4096
-+
-+int mp_register_gsi(u32 gsi, int triggering, int polarity)
-+{
-+ int ioapic = -1;
-+ int ioapic_pin = 0;
-+ int idx, bit = 0;
-+ static int pci_irq = 16;
-+ /*
-+ * Mapping between Global System Interrups, which
-+ * represent all possible interrupts, and IRQs
-+ * assigned to actual devices.
-+ */
-+ static int gsi_to_irq[MAX_GSI_NUM];
-+
-+ /* Don't set up the ACPI SCI because it's already set up */
-+ if (acpi_gbl_FADT.sci_interrupt == gsi)
-+ return gsi;
-+
-+ ioapic = mp_find_ioapic(gsi);
-+ if (ioapic < 0) {
-+ printk(KERN_WARNING "No IOAPIC for GSI %u\n", gsi);
-+ return gsi;
-+ }
-+
-+ ioapic_pin = gsi - mp_ioapic_routing[ioapic].gsi_base;
-+
-+ if (ioapic_renumber_irq)
-+ gsi = ioapic_renumber_irq(ioapic, gsi);
-+
-+ /*
-+ * Avoid pin reprogramming. PRTs typically include entries
-+ * with redundant pin->gsi mappings (but unique PCI devices);
-+ * we only program the IOAPIC on the first.
-+ */
-+ bit = ioapic_pin % 32;
-+ idx = (ioapic_pin < 32) ? 0 : (ioapic_pin / 32);
-+ if (idx > 3) {
-+ printk(KERN_ERR "Invalid reference to IOAPIC pin "
-+ "%d-%d\n", mp_ioapic_routing[ioapic].apic_id,
-+ ioapic_pin);
-+ return gsi;
-+ }
-+ if ((1<<bit) & mp_ioapic_routing[ioapic].pin_programmed[idx]) {
-+ Dprintk(KERN_DEBUG "Pin %d-%d already programmed\n",
-+ mp_ioapic_routing[ioapic].apic_id, ioapic_pin);
-+ return gsi_to_irq[gsi];
-+ }
-+
-+ mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit);
-+
-+ if (triggering == ACPI_LEVEL_SENSITIVE) {
-+ /*
-+ * For PCI devices assign IRQs in order, avoiding gaps
-+ * due to unused I/O APIC pins.
-+ */
-+ int irq = gsi;
-+ if (gsi < MAX_GSI_NUM) {
-+ /*
-+ * Retain the VIA chipset work-around (gsi > 15), but
-+ * avoid a problem where the 8254 timer (IRQ0) is setup
-+ * via an override (so it's not on pin 0 of the ioapic),
-+ * and at the same time, the pin 0 interrupt is a PCI
-+ * type. The gsi > 15 test could cause these two pins
-+ * to be shared as IRQ0, and they are not shareable.
-+ * So test for this condition, and if necessary, avoid
-+ * the pin collision.
-+ */
-+ if (gsi > 15 || (gsi == 0 && !timer_uses_ioapic_pin_0))
-+ gsi = pci_irq++;
-+ /*
-+ * Don't assign IRQ used by ACPI SCI
-+ */
-+ if (gsi == acpi_gbl_FADT.sci_interrupt)
-+ gsi = pci_irq++;
-+ gsi_to_irq[irq] = gsi;
-+ } else {
-+ printk(KERN_ERR "GSI %u is too high\n", gsi);
-+ return gsi;
-+ }
-+ }
-+
-+ io_apic_set_pci_routing(ioapic, ioapic_pin, gsi,
-+ triggering == ACPI_EDGE_SENSITIVE ? 0 : 1,
-+ polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
-+ return gsi;
-+}
-+
-+#endif /* CONFIG_X86_IO_APIC */
-+#endif /* CONFIG_ACPI */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/i386/kernel/nmi.c ubuntu-gutsy-xen/arch/i386/kernel/nmi.c
---- ubuntu-gutsy/arch/i386/kernel/nmi.c 2007-08-18 09:40:28.000000000 -0400
-+++ ubuntu-gutsy-xen/arch/i386/kernel/nmi.c 2007-08-18 12:38:02.000000000 -0400
-@@ -30,7 +30,15 @@
-
- #include "mach_traps.h"
-
-+#ifdef CONFIG_SYSCTL
- int unknown_nmi_panic;
-+static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu);
-+#endif
-+
-+extern void die_nmi(struct pt_regs *, const char *msg);
-+
-+#ifndef CONFIG_XEN
-+
- int nmi_watchdog_enabled;
-
- static cpumask_t backtrace_mask = CPU_MASK_NONE;
-@@ -48,9 +56,6 @@
-
- static DEFINE_PER_CPU(short, wd_enabled);
-
--/* local prototypes */
--static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu);
--
- static int endflag __initdata = 0;
-
- #ifdef CONFIG_SMP
-@@ -315,8 +320,6 @@
- }
- EXPORT_SYMBOL(touch_nmi_watchdog);
-
--extern void die_nmi(struct pt_regs *, const char *msg);
--
- __kprobes int nmi_watchdog_tick(struct pt_regs * regs, unsigned reason)
- {
-
-@@ -387,6 +390,8 @@
- return rc;
- }
-
-+#endif /* CONFIG_XEN */
-+
- int do_nmi_callback(struct pt_regs * regs, int cpu)
- {
- #ifdef CONFIG_SYSCTL
-@@ -408,6 +413,7 @@
- return 0;
- }
-
-+#ifndef CONFIG_XEN
- /*
- * proc handler for /proc/sys/kernel/nmi
- */
-@@ -446,9 +452,11 @@
- }
- return 0;
- }
-+#endif
-
- #endif
-
-+#ifndef CONFIG_XEN
- void __trigger_all_cpu_backtrace(void)
- {
- int i;
-@@ -464,3 +472,4 @@
-
- EXPORT_SYMBOL(nmi_active);
- EXPORT_SYMBOL(nmi_watchdog);
-+#endif
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/i386/kernel/pci-dma-xen.c ubuntu-gutsy-xen/arch/i386/kernel/pci-dma-xen.c
---- ubuntu-gutsy/arch/i386/kernel/pci-dma-xen.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/arch/i386/kernel/pci-dma-xen.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,405 @@
-+/*
-+ * Dynamic DMA mapping support.
-+ *
-+ * On i386 there is no hardware dynamic DMA address translation,
-+ * so consistent alloc/free are merely page allocation/freeing.
-+ * The rest of the dynamic DMA mapping interface is implemented
-+ * in asm/pci.h.
-+ */
-+
-+#include <linux/types.h>
-+#include <linux/mm.h>
-+#include <linux/string.h>
-+#include <linux/pci.h>
-+#include <linux/module.h>
-+#include <linux/version.h>
-+#include <linux/pci.h>
-+#include <asm/io.h>
-+#include <xen/balloon.h>
-+#include <xen/gnttab.h>
-+#include <asm/swiotlb.h>
-+#include <asm/tlbflush.h>
-+#include <asm-i386/mach-xen/asm/swiotlb.h>
-+#include <asm-i386/mach-xen/asm/gnttab_dma.h>
-+#include <asm/bug.h>
-+
-+#ifdef __x86_64__
-+#include <asm/proto.h>
-+
-+int iommu_merge __read_mostly = 0;
-+EXPORT_SYMBOL(iommu_merge);
-+
-+dma_addr_t bad_dma_address __read_mostly;
-+EXPORT_SYMBOL(bad_dma_address);
-+
-+/* This tells the BIO block layer to assume merging. Default to off
-+ because we cannot guarantee merging later. */
-+int iommu_bio_merge __read_mostly = 0;
-+EXPORT_SYMBOL(iommu_bio_merge);
-+
-+int force_iommu __read_mostly= 0;
-+
-+__init int iommu_setup(char *p)
-+{
-+ return 1;
-+}
-+
-+void __init pci_iommu_alloc(void)
-+{
-+#ifdef CONFIG_SWIOTLB
-+ pci_swiotlb_init();
-+#endif
-+}
-+
-+static int __init pci_iommu_init(void)
-+{
-+ no_iommu_init();
-+ return 0;
-+}
-+
-+/* Must execute after PCI subsystem */
-+fs_initcall(pci_iommu_init);
-+#endif
-+
-+struct dma_coherent_mem {
-+ void *virt_base;
-+ u32 device_base;
-+ int size;
-+ int flags;
-+ unsigned long *bitmap;
-+};
-+
-+#define IOMMU_BUG_ON(test) \
-+do { \
-+ if (unlikely(test)) { \
-+ printk(KERN_ALERT "Fatal DMA error! " \
-+ "Please use 'swiotlb=force'\n"); \
-+ BUG(); \
-+ } \
-+} while (0)
-+
-+int
-+dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents,
-+ enum dma_data_direction direction)
-+{
-+ int i, rc;
-+
-+ BUG_ON(!valid_dma_direction(direction));
-+ WARN_ON(nents == 0 || sg[0].length == 0);
-+
-+ if (swiotlb) {
-+ rc = swiotlb_map_sg(hwdev, sg, nents, direction);
-+ } else {
-+ for (i = 0; i < nents; i++ ) {
-+ sg[i].dma_address =
-+ gnttab_dma_map_page(sg[i].page) + sg[i].offset;
-+ sg[i].dma_length = sg[i].length;
-+ BUG_ON(!sg[i].page);
-+ IOMMU_BUG_ON(address_needs_mapping(
-+ hwdev, sg[i].dma_address));
-+ IOMMU_BUG_ON(range_straddles_page_boundary(
-+ page_to_pseudophys(sg[i].page) + sg[i].offset,
-+ sg[i].length));
-+ }
-+ rc = nents;
-+ }
-+
-+ flush_write_buffers();
-+ return rc;
-+}
-+EXPORT_SYMBOL(dma_map_sg);
-+
-+void
-+dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
-+ enum dma_data_direction direction)
-+{
-+ int i;
-+
-+ BUG_ON(!valid_dma_direction(direction));
-+ if (swiotlb)
-+ swiotlb_unmap_sg(hwdev, sg, nents, direction);
-+ else {
-+ for (i = 0; i < nents; i++ )
-+ gnttab_dma_unmap_page(sg[i].dma_address);
-+ }
-+}
-+EXPORT_SYMBOL(dma_unmap_sg);
-+
-+#ifdef CONFIG_HIGHMEM
-+dma_addr_t
-+dma_map_page(struct device *dev, struct page *page, unsigned long offset,
-+ size_t size, enum dma_data_direction direction)
-+{
-+ dma_addr_t dma_addr;
-+
-+ BUG_ON(!valid_dma_direction(direction));
-+ if (swiotlb) {
-+ dma_addr = swiotlb_map_page(
-+ dev, page, offset, size, direction);
-+ } else {
-+ dma_addr = gnttab_dma_map_page(page) + offset;
-+ IOMMU_BUG_ON(address_needs_mapping(dev, dma_addr));
-+ }
-+
-+ return dma_addr;
-+}
-+EXPORT_SYMBOL(dma_map_page);
-+
-+void
-+dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
-+ enum dma_data_direction direction)
-+{
-+ BUG_ON(!valid_dma_direction(direction));
-+ if (swiotlb)
-+ swiotlb_unmap_page(dev, dma_address, size, direction);
-+ else
-+ gnttab_dma_unmap_page(dma_address);
-+}
-+EXPORT_SYMBOL(dma_unmap_page);
-+#endif /* CONFIG_HIGHMEM */
-+
-+int
-+dma_mapping_error(dma_addr_t dma_addr)
-+{
-+ if (swiotlb)
-+ return swiotlb_dma_mapping_error(dma_addr);
-+ return 0;
-+}
-+EXPORT_SYMBOL(dma_mapping_error);
-+
-+int
-+dma_supported(struct device *dev, u64 mask)
-+{
-+ if (swiotlb)
-+ return swiotlb_dma_supported(dev, mask);
-+ /*
-+ * By default we'll BUG when an infeasible DMA is requested, and
-+ * request swiotlb=force (see IOMMU_BUG_ON).
-+ */
-+ return 1;
-+}
-+EXPORT_SYMBOL(dma_supported);
-+
-+void *dma_alloc_coherent(struct device *dev, size_t size,
-+ dma_addr_t *dma_handle, gfp_t gfp)
-+{
-+ void *ret;
-+ struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
-+ unsigned int order = get_order(size);
-+ unsigned long vstart;
-+ u64 mask;
-+
-+ /* ignore region specifiers */
-+ gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
-+
-+ if (mem) {
-+ int page = bitmap_find_free_region(mem->bitmap, mem->size,
-+ order);
-+ if (page >= 0) {
-+ *dma_handle = mem->device_base + (page << PAGE_SHIFT);
-+ ret = mem->virt_base + (page << PAGE_SHIFT);
-+ memset(ret, 0, size);
-+ return ret;
-+ }
-+ if (mem->flags & DMA_MEMORY_EXCLUSIVE)
-+ return NULL;
-+ }
-+
-+ if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
-+ gfp |= GFP_DMA;
-+
-+ vstart = __get_free_pages(gfp, order);
-+ ret = (void *)vstart;
-+
-+ if (dev != NULL && dev->coherent_dma_mask)
-+ mask = dev->coherent_dma_mask;
-+ else
-+ mask = 0xffffffff;
-+
-+ if (ret != NULL) {
-+ if (xen_create_contiguous_region(vstart, order,
-+ fls64(mask)) != 0) {
-+ free_pages(vstart, order);
-+ return NULL;
-+ }
-+ memset(ret, 0, size);
-+ *dma_handle = virt_to_bus(ret);
-+ }
-+ return ret;
-+}
-+EXPORT_SYMBOL(dma_alloc_coherent);
-+
-+void dma_free_coherent(struct device *dev, size_t size,
-+ void *vaddr, dma_addr_t dma_handle)
-+{
-+ struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
-+ int order = get_order(size);
-+
-+ if (mem && vaddr >= mem->virt_base && vaddr < (mem->virt_base + (mem->size << PAGE_SHIFT))) {
-+ int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
-+
-+ bitmap_release_region(mem->bitmap, page, order);
-+ } else {
-+ xen_destroy_contiguous_region((unsigned long)vaddr, order);
-+ free_pages((unsigned long)vaddr, order);
-+ }
-+}
-+EXPORT_SYMBOL(dma_free_coherent);
-+
-+#ifdef ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
-+int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
-+ dma_addr_t device_addr, size_t size, int flags)
-+{
-+ void __iomem *mem_base = NULL;
-+ int pages = size >> PAGE_SHIFT;
-+ int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
-+
-+ if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
-+ goto out;
-+ if (!size)
-+ goto out;
-+ if (dev->dma_mem)
-+ goto out;
-+
-+ /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
-+
-+ mem_base = ioremap(bus_addr, size);
-+ if (!mem_base)
-+ goto out;
-+
-+ dev->dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
-+ if (!dev->dma_mem)
-+ goto out;
-+ dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
-+ if (!dev->dma_mem->bitmap)
-+ goto free1_out;
-+
-+ dev->dma_mem->virt_base = mem_base;
-+ dev->dma_mem->device_base = device_addr;
-+ dev->dma_mem->size = pages;
-+ dev->dma_mem->flags = flags;
-+
-+ if (flags & DMA_MEMORY_MAP)
-+ return DMA_MEMORY_MAP;
-+
-+ return DMA_MEMORY_IO;
-+
-+ free1_out:
-+ kfree(dev->dma_mem);
-+ out:
-+ if (mem_base)
-+ iounmap(mem_base);
-+ return 0;
-+}
-+EXPORT_SYMBOL(dma_declare_coherent_memory);
-+
-+void dma_release_declared_memory(struct device *dev)
-+{
-+ struct dma_coherent_mem *mem = dev->dma_mem;
-+
-+ if(!mem)
-+ return;
-+ dev->dma_mem = NULL;
-+ iounmap(mem->virt_base);
-+ kfree(mem->bitmap);
-+ kfree(mem);
-+}
-+EXPORT_SYMBOL(dma_release_declared_memory);
-+
-+void *dma_mark_declared_memory_occupied(struct device *dev,
-+ dma_addr_t device_addr, size_t size)
-+{
-+ struct dma_coherent_mem *mem = dev->dma_mem;
-+ int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1) >> PAGE_SHIFT;
-+ int pos, err;
-+
-+ if (!mem)
-+ return ERR_PTR(-EINVAL);
-+
-+ pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
-+ err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages));
-+ if (err != 0)
-+ return ERR_PTR(err);
-+ return mem->virt_base + (pos << PAGE_SHIFT);
-+}
-+EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
-+#endif /* ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY */
-+
-+#if defined(CONFIG_PCI) && !defined(CONFIG_XEN)
-+/* Many VIA bridges seem to corrupt data for DAC. Disable it here */
-+
-+int forbid_dac;
-+EXPORT_SYMBOL(forbid_dac);
-+
-+static __devinit void via_no_dac(struct pci_dev *dev)
-+{
-+ if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) {
-+ printk(KERN_INFO "PCI: VIA PCI bridge detected. Disabling DAC.\n");
-+ forbid_dac = 1;
-+ }
-+}
-+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac);
-+
-+static int check_iommu(char *s)
-+{
-+ if (!strcmp(s, "usedac")) {
-+ forbid_dac = -1;
-+ return 1;
-+ }
-+ return 0;
-+}
-+__setup("iommu=", check_iommu);
-+#endif
-+
-+dma_addr_t
-+dma_map_single(struct device *dev, void *ptr, size_t size,
-+ enum dma_data_direction direction)
-+{
-+ dma_addr_t dma;
-+
-+ BUG_ON(!valid_dma_direction(direction));
-+ WARN_ON(size == 0);
-+
-+ if (swiotlb) {
-+ dma = swiotlb_map_single(dev, ptr, size, direction);
-+ } else {
-+ dma = gnttab_dma_map_page(virt_to_page(ptr)) +
-+ offset_in_page(ptr);
-+ IOMMU_BUG_ON(range_straddles_page_boundary(__pa(ptr), size));
-+ IOMMU_BUG_ON(address_needs_mapping(dev, dma));
-+ }
-+
-+ flush_write_buffers();
-+ return dma;
-+}
-+EXPORT_SYMBOL(dma_map_single);
-+
-+void
-+dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
-+ enum dma_data_direction direction)
-+{
-+ BUG_ON(!valid_dma_direction(direction));
-+ if (swiotlb)
-+ swiotlb_unmap_single(dev, dma_addr, size, direction);
-+ else
-+ gnttab_dma_unmap_page(dma_addr);
-+}
-+EXPORT_SYMBOL(dma_unmap_single);
-+
-+void
-+dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
-+ enum dma_data_direction direction)
-+{
-+ if (swiotlb)
-+ swiotlb_sync_single_for_cpu(dev, dma_handle, size, direction);
-+}
-+EXPORT_SYMBOL(dma_sync_single_for_cpu);
-+
-+void
-+dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
-+ enum dma_data_direction direction)
-+{
-+ if (swiotlb)
-+ swiotlb_sync_single_for_device(dev, dma_handle, size, direction);
-+}
-+EXPORT_SYMBOL(dma_sync_single_for_device);
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/i386/kernel/pcspeaker.c ubuntu-gutsy-xen/arch/i386/kernel/pcspeaker.c
---- ubuntu-gutsy/arch/i386/kernel/pcspeaker.c 2007-08-18 09:40:28.000000000 -0400
-+++ ubuntu-gutsy-xen/arch/i386/kernel/pcspeaker.c 2007-08-18 12:38:02.000000000 -0400
-@@ -7,6 +7,11 @@
- struct platform_device *pd;
- int ret;
-
-+#ifdef CONFIG_XEN
-+ if (!is_initial_xendomain())
-+ return 0;
-+#endif
-+
- pd = platform_device_alloc("pcspkr", -1);
- if (!pd)
- return -ENOMEM;
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/i386/kernel/process-xen.c ubuntu-gutsy-xen/arch/i386/kernel/process-xen.c
---- ubuntu-gutsy/arch/i386/kernel/process-xen.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/arch/i386/kernel/process-xen.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,869 @@
-+/*
-+ * linux/arch/i386/kernel/process.c
-+ *
-+ * Copyright (C) 1995 Linus Torvalds
-+ *
-+ * Pentium III FXSR, SSE support
-+ * Gareth Hughes <gareth@valinux.com>, May 2000
-+ */
-+
-+/*
-+ * This file handles the architecture-dependent parts of process handling..
-+ */
-+
-+#include <stdarg.h>
-+
-+#include <linux/cpu.h>
-+#include <linux/errno.h>
-+#include <linux/sched.h>
-+#include <linux/fs.h>
-+#include <linux/kernel.h>
-+#include <linux/mm.h>
-+#include <linux/elfcore.h>
-+#include <linux/smp.h>
-+#include <linux/stddef.h>
-+#include <linux/slab.h>
-+#include <linux/vmalloc.h>
-+#include <linux/user.h>
-+#include <linux/a.out.h>
-+#include <linux/interrupt.h>
-+#include <linux/utsname.h>
-+#include <linux/delay.h>
-+#include <linux/reboot.h>
-+#include <linux/init.h>
-+#include <linux/mc146818rtc.h>
-+#include <linux/module.h>
-+#include <linux/kallsyms.h>
-+#include <linux/ptrace.h>
-+#include <linux/random.h>
-+#include <linux/personality.h>
-+#include <linux/tick.h>
-+#include <linux/percpu.h>
-+
-+#include <asm/uaccess.h>
-+#include <asm/pgtable.h>
-+#include <asm/system.h>
-+#include <asm/io.h>
-+#include <asm/ldt.h>
-+#include <asm/processor.h>
-+#include <asm/i387.h>
-+#include <asm/desc.h>
-+#include <asm/vm86.h>
-+#ifdef CONFIG_MATH_EMULATION
-+#include <asm/math_emu.h>
-+#endif
-+
-+#include <xen/interface/physdev.h>
-+#include <xen/interface/vcpu.h>
-+#include <xen/cpu_hotplug.h>
-+
-+#include <linux/err.h>
-+
-+#include <asm/tlbflush.h>
-+#include <asm/cpu.h>
-+
-+asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
-+
-+static int hlt_counter;
-+
-+unsigned long boot_option_idle_override = 0;
-+EXPORT_SYMBOL(boot_option_idle_override);
-+
-+DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task;
-+EXPORT_PER_CPU_SYMBOL(current_task);
-+
-+DEFINE_PER_CPU(int, cpu_number);
-+EXPORT_PER_CPU_SYMBOL(cpu_number);
-+
-+/*
-+ * Return saved PC of a blocked thread.
-+ */
-+unsigned long thread_saved_pc(struct task_struct *tsk)
-+{
-+ return ((unsigned long *)tsk->thread.esp)[3];
-+}
-+
-+/*
-+ * Powermanagement idle function, if any..
-+ */
-+void (*pm_idle)(void);
-+EXPORT_SYMBOL(pm_idle);
-+static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
-+
-+void disable_hlt(void)
-+{
-+ hlt_counter++;
-+}
-+
-+EXPORT_SYMBOL(disable_hlt);
-+
-+void enable_hlt(void)
-+{
-+ hlt_counter--;
-+}
-+
-+EXPORT_SYMBOL(enable_hlt);
-+
-+/*
-+ * On SMP it's slightly faster (but much more power-consuming!)
-+ * to poll the ->work.need_resched flag instead of waiting for the
-+ * cross-CPU IPI to arrive. Use this option with caution.
-+ */
-+static void poll_idle (void)
-+{
-+ cpu_relax();
-+}
-+
-+static void xen_idle(void)
-+{
-+ current_thread_info()->status &= ~TS_POLLING;
-+ /*
-+ * TS_POLLING-cleared state must be visible before we
-+ * test NEED_RESCHED:
-+ */
-+ smp_mb();
-+
-+ local_irq_disable();
-+ if (!need_resched())
-+ safe_halt(); /* enables interrupts racelessly */
-+ else
-+ local_irq_enable();
-+ current_thread_info()->status |= TS_POLLING;
-+}
-+#ifdef CONFIG_APM_MODULE
-+EXPORT_SYMBOL(default_idle);
-+#endif
-+
-+#ifdef CONFIG_HOTPLUG_CPU
-+extern cpumask_t cpu_initialized;
-+static inline void play_dead(void)
-+{
-+ idle_task_exit();
-+ local_irq_disable();
-+ cpu_clear(smp_processor_id(), cpu_initialized);
-+ preempt_enable_no_resched();
-+ HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
-+ cpu_bringup();
-+}
-+#else
-+static inline void play_dead(void)
-+{
-+ BUG();
-+}
-+#endif /* CONFIG_HOTPLUG_CPU */
-+
-+/*
-+ * The idle thread. There's no useful work to be
-+ * done, so just try to conserve power and have a
-+ * low exit latency (ie sit in a loop waiting for
-+ * somebody to say that they'd like to reschedule)
-+ */
-+void cpu_idle(void)
-+{
-+ int cpu = smp_processor_id();
-+
-+ current_thread_info()->status |= TS_POLLING;
-+
-+ /* endless idle loop with no priority at all */
-+ while (1) {
-+ tick_nohz_stop_sched_tick();
-+ while (!need_resched()) {
-+ void (*idle)(void);
-+
-+ if (__get_cpu_var(cpu_idle_state))
-+ __get_cpu_var(cpu_idle_state) = 0;
-+
-+ check_pgt_cache();
-+ rmb();
-+ idle = xen_idle; /* no alternatives */
-+
-+ if (cpu_is_offline(cpu))
-+ play_dead();
-+
-+ __get_cpu_var(irq_stat).idle_timestamp = jiffies;
-+ idle();
-+ }
-+ tick_nohz_restart_sched_tick();
-+ preempt_enable_no_resched();
-+ schedule();
-+ preempt_disable();
-+ }
-+}
-+
-+void cpu_idle_wait(void)
-+{
-+ unsigned int cpu, this_cpu = get_cpu();
-+ cpumask_t map, tmp = current->cpus_allowed;
-+
-+ set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
-+ put_cpu();
-+
-+ cpus_clear(map);
-+ for_each_online_cpu(cpu) {
-+ per_cpu(cpu_idle_state, cpu) = 1;
-+ cpu_set(cpu, map);
-+ }
-+
-+ __get_cpu_var(cpu_idle_state) = 0;
-+
-+ wmb();
-+ do {
-+ ssleep(1);
-+ for_each_online_cpu(cpu) {
-+ if (cpu_isset(cpu, map) && !per_cpu(cpu_idle_state, cpu))
-+ cpu_clear(cpu, map);
-+ }
-+ cpus_and(map, map, cpu_online_map);
-+ } while (!cpus_empty(map));
-+
-+ set_cpus_allowed(current, tmp);
-+}
-+EXPORT_SYMBOL_GPL(cpu_idle_wait);
-+
-+void __devinit select_idle_routine(const struct cpuinfo_x86 *c)
-+{
-+}
-+
-+static int __init idle_setup(char *str)
-+{
-+ if (!strcmp(str, "poll")) {
-+ printk("using polling idle threads.\n");
-+ pm_idle = poll_idle;
-+ }
-+ else
-+ return -1;
-+
-+ boot_option_idle_override = 1;
-+ return 0;
-+}
-+early_param("idle", idle_setup);
-+
-+void show_regs(struct pt_regs * regs)
-+{
-+ unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
-+
-+ printk("\n");
-+ printk("Pid: %d, comm: %20s\n", current->pid, current->comm);
-+ printk("EIP: %04x:[<%08lx>] CPU: %d\n",0xffff & regs->xcs,regs->eip, smp_processor_id());
-+ print_symbol("EIP is at %s\n", regs->eip);
-+
-+ if (user_mode_vm(regs))
-+ printk(" ESP: %04x:%08lx",0xffff & regs->xss,regs->esp);
-+ printk(" EFLAGS: %08lx %s (%s %.*s)\n",
-+ regs->eflags, print_tainted(), init_utsname()->release,
-+ (int)strcspn(init_utsname()->version, " "),
-+ init_utsname()->version);
-+ printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
-+ regs->eax,regs->ebx,regs->ecx,regs->edx);
-+ printk("ESI: %08lx EDI: %08lx EBP: %08lx",
-+ regs->esi, regs->edi, regs->ebp);
-+ printk(" DS: %04x ES: %04x FS: %04x\n",
-+ 0xffff & regs->xds,0xffff & regs->xes, 0xffff & regs->xfs);
-+
-+ cr0 = read_cr0();
-+ cr2 = read_cr2();
-+ cr3 = read_cr3();
-+ cr4 = read_cr4_safe();
-+ printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", cr0, cr2, cr3, cr4);
-+ show_trace(NULL, regs, &regs->esp);
-+}
-+
-+/*
-+ * This gets run with %ebx containing the
-+ * function to call, and %edx containing
-+ * the "args".
-+ */
-+extern void kernel_thread_helper(void);
-+
-+/*
-+ * Create a kernel thread
-+ */
-+int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
-+{
-+ struct pt_regs regs;
-+
-+ memset(&regs, 0, sizeof(regs));
-+
-+ regs.ebx = (unsigned long) fn;
-+ regs.edx = (unsigned long) arg;
-+
-+ regs.xds = __USER_DS;
-+ regs.xes = __USER_DS;
-+ regs.xfs = __KERNEL_PERCPU;
-+ regs.orig_eax = -1;
-+ regs.eip = (unsigned long) kernel_thread_helper;
-+ regs.xcs = __KERNEL_CS | get_kernel_rpl();
-+ regs.eflags = X86_EFLAGS_IF | X86_EFLAGS_SF | X86_EFLAGS_PF | 0x2;
-+
-+ /* Ok, create the new process.. */
-+ return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
-+}
-+EXPORT_SYMBOL(kernel_thread);
-+
-+/*
-+ * Free current thread data structures etc..
-+ */
-+void exit_thread(void)
-+{
-+ /* The process may have allocated an io port bitmap... nuke it. */
-+ if (unlikely(test_thread_flag(TIF_IO_BITMAP))) {
-+ struct task_struct *tsk = current;
-+ struct thread_struct *t = &tsk->thread;
-+ struct physdev_set_iobitmap set_iobitmap;
-+ memset(&set_iobitmap, 0, sizeof(set_iobitmap));
-+ HYPERVISOR_physdev_op(PHYSDEVOP_set_iobitmap, &set_iobitmap);
-+ kfree(t->io_bitmap_ptr);
-+ t->io_bitmap_ptr = NULL;
-+ clear_thread_flag(TIF_IO_BITMAP);
-+ }
-+}
-+
-+void flush_thread(void)
-+{
-+ struct task_struct *tsk = current;
-+
-+ memset(tsk->thread.debugreg, 0, sizeof(unsigned long)*8);
-+ memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
-+ clear_tsk_thread_flag(tsk, TIF_DEBUG);
-+ /*
-+ * Forget coprocessor state..
-+ */
-+ clear_fpu(tsk);
-+ clear_used_math();
-+}
-+
-+void release_thread(struct task_struct *dead_task)
-+{
-+ BUG_ON(dead_task->mm);
-+ release_vm86_irqs(dead_task);
-+}
-+
-+/*
-+ * This gets called before we allocate a new thread and copy
-+ * the current task into it.
-+ */
-+void prepare_to_copy(struct task_struct *tsk)
-+{
-+ unlazy_fpu(tsk);
-+}
-+
-+int copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
-+ unsigned long unused,
-+ struct task_struct * p, struct pt_regs * regs)
-+{
-+ struct pt_regs * childregs;
-+ struct task_struct *tsk;
-+ int err;
-+
-+ childregs = task_pt_regs(p);
-+ *childregs = *regs;
-+ childregs->eax = 0;
-+ childregs->esp = esp;
-+
-+ p->thread.esp = (unsigned long) childregs;
-+ p->thread.esp0 = (unsigned long) (childregs+1);
-+
-+ p->thread.eip = (unsigned long) ret_from_fork;
-+
-+ savesegment(gs,p->thread.gs);
-+
-+ tsk = current;
-+ if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) {
-+ p->thread.io_bitmap_ptr = kmemdup(tsk->thread.io_bitmap_ptr,
-+ IO_BITMAP_BYTES, GFP_KERNEL);
-+ if (!p->thread.io_bitmap_ptr) {
-+ p->thread.io_bitmap_max = 0;
-+ return -ENOMEM;
-+ }
-+ set_tsk_thread_flag(p, TIF_IO_BITMAP);
-+ }
-+
-+ /*
-+ * Set a new TLS for the child thread?
-+ */
-+ if (clone_flags & CLONE_SETTLS) {
-+ struct desc_struct *desc;
-+ struct user_desc info;
-+ int idx;
-+
-+ err = -EFAULT;
-+ if (copy_from_user(&info, (void __user *)childregs->esi, sizeof(info)))
-+ goto out;
-+ err = -EINVAL;
-+ if (LDT_empty(&info))
-+ goto out;
-+
-+ idx = info.entry_number;
-+ if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
-+ goto out;
-+
-+ desc = p->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
-+ desc->a = LDT_entry_a(&info);
-+ desc->b = LDT_entry_b(&info);
-+ }
-+
-+ p->thread.iopl = current->thread.iopl;
-+
-+ err = 0;
-+ out:
-+ if (err && p->thread.io_bitmap_ptr) {
-+ kfree(p->thread.io_bitmap_ptr);
-+ p->thread.io_bitmap_max = 0;
-+ }
-+ return err;
-+}
-+
-+/*
-+ * fill in the user structure for a core dump..
-+ */
-+void dump_thread(struct pt_regs * regs, struct user * dump)
-+{
-+ int i;
-+
-+/* changed the size calculations - should hopefully work better. lbt */
-+ dump->magic = CMAGIC;
-+ dump->start_code = 0;
-+ dump->start_stack = regs->esp & ~(PAGE_SIZE - 1);
-+ dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT;
-+ dump->u_dsize = ((unsigned long) (current->mm->brk + (PAGE_SIZE-1))) >> PAGE_SHIFT;
-+ dump->u_dsize -= dump->u_tsize;
-+ dump->u_ssize = 0;
-+ for (i = 0; i < 8; i++)
-+ dump->u_debugreg[i] = current->thread.debugreg[i];
-+
-+ if (dump->start_stack < TASK_SIZE)
-+ dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> PAGE_SHIFT;
-+
-+ dump->regs.ebx = regs->ebx;
-+ dump->regs.ecx = regs->ecx;
-+ dump->regs.edx = regs->edx;
-+ dump->regs.esi = regs->esi;
-+ dump->regs.edi = regs->edi;
-+ dump->regs.ebp = regs->ebp;
-+ dump->regs.eax = regs->eax;
-+ dump->regs.ds = regs->xds;
-+ dump->regs.es = regs->xes;
-+ dump->regs.fs = regs->xfs;
-+ savesegment(gs,dump->regs.gs);
-+ dump->regs.orig_eax = regs->orig_eax;
-+ dump->regs.eip = regs->eip;
-+ dump->regs.cs = regs->xcs;
-+ dump->regs.eflags = regs->eflags;
-+ dump->regs.esp = regs->esp;
-+ dump->regs.ss = regs->xss;
-+
-+ dump->u_fpvalid = dump_fpu (regs, &dump->i387);
-+}
-+EXPORT_SYMBOL(dump_thread);
-+
-+/*
-+ * Capture the user space registers if the task is not running (in user space)
-+ */
-+int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
-+{
-+ struct pt_regs ptregs = *task_pt_regs(tsk);
-+ ptregs.xcs &= 0xffff;
-+ ptregs.xds &= 0xffff;
-+ ptregs.xes &= 0xffff;
-+ ptregs.xss &= 0xffff;
-+
-+ elf_core_copy_regs(regs, &ptregs);
-+
-+ return 1;
-+}
-+
-+static noinline void __switch_to_xtra(struct task_struct *next_p)
-+{
-+ struct thread_struct *next;
-+
-+ next = &next_p->thread;
-+
-+ if (test_tsk_thread_flag(next_p, TIF_DEBUG)) {
-+ set_debugreg(next->debugreg[0], 0);
-+ set_debugreg(next->debugreg[1], 1);
-+ set_debugreg(next->debugreg[2], 2);
-+ set_debugreg(next->debugreg[3], 3);
-+ /* no 4 and 5 */
-+ set_debugreg(next->debugreg[6], 6);
-+ set_debugreg(next->debugreg[7], 7);
-+ }
-+}
-+
-+/*
-+ * This function selects if the context switch from prev to next
-+ * has to tweak the TSC disable bit in the cr4.
-+ */
-+static inline void disable_tsc(struct task_struct *prev_p,
-+ struct task_struct *next_p)
-+{
-+ struct thread_info *prev, *next;
-+
-+ /*
-+ * gcc should eliminate the ->thread_info dereference if
-+ * has_secure_computing returns 0 at compile time (SECCOMP=n).
-+ */
-+ prev = task_thread_info(prev_p);
-+ next = task_thread_info(next_p);
-+
-+ if (has_secure_computing(prev) || has_secure_computing(next)) {
-+ /* slow path here */
-+ if (has_secure_computing(prev) &&
-+ !has_secure_computing(next)) {
-+ write_cr4(read_cr4() & ~X86_CR4_TSD);
-+ } else if (!has_secure_computing(prev) &&
-+ has_secure_computing(next))
-+ write_cr4(read_cr4() | X86_CR4_TSD);
-+ }
-+}
-+
-+/*
-+ * switch_to(x,yn) should switch tasks from x to y.
-+ *
-+ * We fsave/fwait so that an exception goes off at the right time
-+ * (as a call from the fsave or fwait in effect) rather than to
-+ * the wrong process. Lazy FP saving no longer makes any sense
-+ * with modern CPU's, and this simplifies a lot of things (SMP
-+ * and UP become the same).
-+ *
-+ * NOTE! We used to use the x86 hardware context switching. The
-+ * reason for not using it any more becomes apparent when you
-+ * try to recover gracefully from saved state that is no longer
-+ * valid (stale segment register values in particular). With the
-+ * hardware task-switch, there is no way to fix up bad state in
-+ * a reasonable manner.
-+ *
-+ * The fact that Intel documents the hardware task-switching to
-+ * be slow is a fairly red herring - this code is not noticeably
-+ * faster. However, there _is_ some room for improvement here,
-+ * so the performance issues may eventually be a valid point.
-+ * More important, however, is the fact that this allows us much
-+ * more flexibility.
-+ *
-+ * The return value (in %eax) will be the "prev" task after
-+ * the task-switch, and shows up in ret_from_fork in entry.S,
-+ * for example.
-+ */
-+struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
-+{
-+ struct thread_struct *prev = &prev_p->thread,
-+ *next = &next_p->thread;
-+ int cpu = smp_processor_id();
-+#ifndef CONFIG_X86_NO_TSS
-+ struct tss_struct *tss = &per_cpu(init_tss, cpu);
-+#endif
-+ struct physdev_set_iopl iopl_op;
-+ struct physdev_set_iobitmap iobmp_op;
-+ multicall_entry_t _mcl[8], *mcl = _mcl;
-+
-+ /* XEN NOTE: FS/GS saved in switch_mm(), not here. */
-+
-+ /*
-+ * This is basically '__unlazy_fpu', except that we queue a
-+ * multicall to indicate FPU task switch, rather than
-+ * synchronously trapping to Xen.
-+ */
-+ if (task_thread_info(prev_p)->status & TS_USEDFPU) {
-+ __save_init_fpu(prev_p); /* _not_ save_init_fpu() */
-+ mcl->op = __HYPERVISOR_fpu_taskswitch;
-+ mcl->args[0] = 1;
-+ mcl++;
-+ }
-+#if 0 /* lazy fpu sanity check */
-+ else BUG_ON(!(read_cr0() & 8));
-+#endif
-+
-+ /*
-+ * Reload esp0.
-+ * This is load_esp0(tss, next) with a multicall.
-+ */
-+ mcl->op = __HYPERVISOR_stack_switch;
-+ mcl->args[0] = __KERNEL_DS;
-+ mcl->args[1] = next->esp0;
-+ mcl++;
-+
-+ /*
-+ * Load the per-thread Thread-Local Storage descriptor.
-+ * This is load_TLS(next, cpu) with multicalls.
-+ */
-+#define C(i) do { \
-+ if (unlikely(next->tls_array[i].a != prev->tls_array[i].a || \
-+ next->tls_array[i].b != prev->tls_array[i].b)) { \
-+ mcl->op = __HYPERVISOR_update_descriptor; \
-+ *(u64 *)&mcl->args[0] = virt_to_machine( \
-+ &get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i]);\
-+ *(u64 *)&mcl->args[2] = *(u64 *)&next->tls_array[i]; \
-+ mcl++; \
-+ } \
-+} while (0)
-+ C(0); C(1); C(2);
-+#undef C
-+
-+ if (unlikely(prev->iopl != next->iopl)) {
-+ iopl_op.iopl = (next->iopl == 0) ? 1 : (next->iopl >> 12) & 3;
-+ mcl->op = __HYPERVISOR_physdev_op;
-+ mcl->args[0] = PHYSDEVOP_set_iopl;
-+ mcl->args[1] = (unsigned long)&iopl_op;
-+ mcl++;
-+ }
-+
-+ if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr)) {
-+ set_xen_guest_handle(iobmp_op.bitmap,
-+ (char *)next->io_bitmap_ptr);
-+ iobmp_op.nr_ports = next->io_bitmap_ptr ? IO_BITMAP_BITS : 0;
-+ mcl->op = __HYPERVISOR_physdev_op;
-+ mcl->args[0] = PHYSDEVOP_set_iobitmap;
-+ mcl->args[1] = (unsigned long)&iobmp_op;
-+ mcl++;
-+ }
-+
-+ (void)HYPERVISOR_multicall(_mcl, mcl - _mcl);
-+
-+ /* we're going to use this soon, after a few expensive things */
-+ if (next_p->fpu_counter > 5)
-+ prefetch(&next->i387.fxsave);
-+
-+ /*
-+ * Now maybe handle debug registers
-+ */
-+ if (unlikely(task_thread_info(next_p)->flags & _TIF_WORK_CTXSW))
-+ __switch_to_xtra(next_p);
-+
-+ disable_tsc(prev_p, next_p);
-+
-+ /*
-+ * Leave lazy mode, flushing any hypercalls made here.
-+ * This must be done before restoring TLS segments so
-+ * the GDT and LDT are properly updated, and must be
-+ * done before math_state_restore, so the TS bit is up
-+ * to date.
-+ */
-+ arch_leave_lazy_cpu_mode();
-+
-+ /* If the task has used fpu the last 5 timeslices, just do a full
-+ * restore of the math state immediately to avoid the trap; the
-+ * chances of needing FPU soon are obviously high now
-+ */
-+ if (next_p->fpu_counter > 5)
-+ math_state_restore();
-+
-+ /*
-+ * Restore %gs if needed (which is common)
-+ */
-+ if (prev->gs | next->gs)
-+ loadsegment(gs, next->gs);
-+
-+ x86_write_percpu(current_task, next_p);
-+
-+ return prev_p;
-+}
-+
-+asmlinkage int sys_fork(struct pt_regs regs)
-+{
-+ return do_fork(SIGCHLD, regs.esp, &regs, 0, NULL, NULL);
-+}
-+
-+asmlinkage int sys_clone(struct pt_regs regs)
-+{
-+ unsigned long clone_flags;
-+ unsigned long newsp;
-+ int __user *parent_tidptr, *child_tidptr;
-+
-+ clone_flags = regs.ebx;
-+ newsp = regs.ecx;
-+ parent_tidptr = (int __user *)regs.edx;
-+ child_tidptr = (int __user *)regs.edi;
-+ if (!newsp)
-+ newsp = regs.esp;
-+ return do_fork(clone_flags, newsp, &regs, 0, parent_tidptr, child_tidptr);
-+}
-+
-+/*
-+ * This is trivial, and on the face of it looks like it
-+ * could equally well be done in user mode.
-+ *
-+ * Not so, for quite unobvious reasons - register pressure.
-+ * In user mode vfork() cannot have a stack frame, and if
-+ * done by calling the "clone()" system call directly, you
-+ * do not have enough call-clobbered registers to hold all
-+ * the information you need.
-+ */
-+asmlinkage int sys_vfork(struct pt_regs regs)
-+{
-+ return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.esp, &regs, 0, NULL, NULL);
-+}
-+
-+/*
-+ * sys_execve() executes a new program.
-+ */
-+asmlinkage int sys_execve(struct pt_regs regs)
-+{
-+ int error;
-+ char * filename;
-+
-+ filename = getname((char __user *) regs.ebx);
-+ error = PTR_ERR(filename);
-+ if (IS_ERR(filename))
-+ goto out;
-+ error = do_execve(filename,
-+ (char __user * __user *) regs.ecx,
-+ (char __user * __user *) regs.edx,
-+ &regs);
-+ if (error == 0) {
-+ task_lock(current);
-+ current->ptrace &= ~PT_DTRACE;
-+ task_unlock(current);
-+ /* Make sure we don't return using sysenter.. */
-+ set_thread_flag(TIF_IRET);
-+ }
-+ putname(filename);
-+out:
-+ return error;
-+}
-+
-+#define top_esp (THREAD_SIZE - sizeof(unsigned long))
-+#define top_ebp (THREAD_SIZE - 2*sizeof(unsigned long))
-+
-+unsigned long get_wchan(struct task_struct *p)
-+{
-+ unsigned long ebp, esp, eip;
-+ unsigned long stack_page;
-+ int count = 0;
-+ if (!p || p == current || p->state == TASK_RUNNING)
-+ return 0;
-+ stack_page = (unsigned long)task_stack_page(p);
-+ esp = p->thread.esp;
-+ if (!stack_page || esp < stack_page || esp > top_esp+stack_page)
-+ return 0;
-+ /* include/asm-i386/system.h:switch_to() pushes ebp last. */
-+ ebp = *(unsigned long *) esp;
-+ do {
-+ if (ebp < stack_page || ebp > top_ebp+stack_page)
-+ return 0;
-+ eip = *(unsigned long *) (ebp+4);
-+ if (!in_sched_functions(eip))
-+ return eip;
-+ ebp = *(unsigned long *) ebp;
-+ } while (count++ < 16);
-+ return 0;
-+}
-+
-+/*
-+ * sys_alloc_thread_area: get a yet unused TLS descriptor index.
-+ */
-+static int get_free_idx(void)
-+{
-+ struct thread_struct *t = &current->thread;
-+ int idx;
-+
-+ for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
-+ if (desc_empty(t->tls_array + idx))
-+ return idx + GDT_ENTRY_TLS_MIN;
-+ return -ESRCH;
-+}
-+
-+/*
-+ * Set a given TLS descriptor:
-+ */
-+asmlinkage int sys_set_thread_area(struct user_desc __user *u_info)
-+{
-+ struct thread_struct *t = &current->thread;
-+ struct user_desc info;
-+ struct desc_struct *desc;
-+ int cpu, idx;
-+
-+ if (copy_from_user(&info, u_info, sizeof(info)))
-+ return -EFAULT;
-+ idx = info.entry_number;
-+
-+ /*
-+ * index -1 means the kernel should try to find and
-+ * allocate an empty descriptor:
-+ */
-+ if (idx == -1) {
-+ idx = get_free_idx();
-+ if (idx < 0)
-+ return idx;
-+ if (put_user(idx, &u_info->entry_number))
-+ return -EFAULT;
-+ }
-+
-+ if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
-+ return -EINVAL;
-+
-+ desc = t->tls_array + idx - GDT_ENTRY_TLS_MIN;
-+
-+ /*
-+ * We must not get preempted while modifying the TLS.
-+ */
-+ cpu = get_cpu();
-+
-+ if (LDT_empty(&info)) {
-+ desc->a = 0;
-+ desc->b = 0;
-+ } else {
-+ desc->a = LDT_entry_a(&info);
-+ desc->b = LDT_entry_b(&info);
-+ }
-+ load_TLS(t, cpu);
-+
-+ put_cpu();
-+
-+ return 0;
-+}
-+
-+/*
-+ * Get the current Thread-Local Storage area:
-+ */
-+
-+#define GET_BASE(desc) ( \
-+ (((desc)->a >> 16) & 0x0000ffff) | \
-+ (((desc)->b << 16) & 0x00ff0000) | \
-+ ( (desc)->b & 0xff000000) )
-+
-+#define GET_LIMIT(desc) ( \
-+ ((desc)->a & 0x0ffff) | \
-+ ((desc)->b & 0xf0000) )
-+
-+#define GET_32BIT(desc) (((desc)->b >> 22) & 1)
-+#define GET_CONTENTS(desc) (((desc)->b >> 10) & 3)
-+#define GET_WRITABLE(desc) (((desc)->b >> 9) & 1)
-+#define GET_LIMIT_PAGES(desc) (((desc)->b >> 23) & 1)
-+#define GET_PRESENT(desc) (((desc)->b >> 15) & 1)
-+#define GET_USEABLE(desc) (((desc)->b >> 20) & 1)
-+
-+asmlinkage int sys_get_thread_area(struct user_desc __user *u_info)
-+{
-+ struct user_desc info;
-+ struct desc_struct *desc;
-+ int idx;
-+
-+ if (get_user(idx, &u_info->entry_number))
-+ return -EFAULT;
-+ if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
-+ return -EINVAL;
-+
-+ memset(&info, 0, sizeof(info));
-+
-+ desc = current->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
-+
-+ info.entry_number = idx;
-+ info.base_addr = GET_BASE(desc);
-+ info.limit = GET_LIMIT(desc);
-+ info.seg_32bit = GET_32BIT(desc);
-+ info.contents = GET_CONTENTS(desc);
-+ info.read_exec_only = !GET_WRITABLE(desc);
-+ info.limit_in_pages = GET_LIMIT_PAGES(desc);
-+ info.seg_not_present = !GET_PRESENT(desc);
-+ info.useable = GET_USEABLE(desc);
-+
-+ if (copy_to_user(u_info, &info, sizeof(info)))
-+ return -EFAULT;
-+ return 0;
-+}
-+
-+unsigned long arch_align_stack(unsigned long sp)
-+{
-+ if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
-+ sp -= get_random_int() % 8192;
-+ return sp & ~0xf;
-+}
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/i386/kernel/quirks-xen.c ubuntu-gutsy-xen/arch/i386/kernel/quirks-xen.c
---- ubuntu-gutsy/arch/i386/kernel/quirks-xen.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/arch/i386/kernel/quirks-xen.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,46 @@
-+/*
-+ * This file contains work-arounds for x86 and x86_64 platform bugs.
-+ */
-+#include <linux/pci.h>
-+#include <linux/irq.h>
-+
-+#if defined(CONFIG_X86_IO_APIC) && (defined(CONFIG_SMP) || defined(CONFIG_XEN)) && defined(CONFIG_PCI)
-+
-+static void __devinit quirk_intel_irqbalance(struct pci_dev *dev)
-+{
-+ u8 config, rev;
-+ u32 word;
-+
-+ /* BIOS may enable hardware IRQ balancing for
-+ * E7520/E7320/E7525(revision ID 0x9 and below)
-+ * based platforms.
-+ * Disable SW irqbalance/affinity on those platforms.
-+ */
-+ pci_read_config_byte(dev, PCI_CLASS_REVISION, &rev);
-+ if (rev > 0x9)
-+ return;
-+
-+ /* enable access to config space*/
-+ pci_read_config_byte(dev, 0xf4, &config);
-+ pci_write_config_byte(dev, 0xf4, config|0x2);
-+
-+ /* read xTPR register */
-+ raw_pci_ops->read(0, 0, 0x40, 0x4c, 2, &word);
-+
-+ if (!(word & (1 << 13))) {
-+ struct xen_platform_op op;
-+ printk(KERN_INFO "Intel E7520/7320/7525 detected. "
-+ "Disabling irq balancing and affinity\n");
-+ op.cmd = XENPF_platform_quirk;
-+ op.u.platform_quirk.quirk_id = QUIRK_NOIRQBALANCING;
-+ (void)HYPERVISOR_platform_op(&op);
-+ }
-+
-+ /* put back the original value for config space*/
-+ if (!(config & 0x2))
-+ pci_write_config_byte(dev, 0xf4, config);
-+}
-+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH, quirk_intel_irqbalance);
-+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, quirk_intel_irqbalance);
-+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, quirk_intel_irqbalance);
-+#endif
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/i386/kernel/relocate_kernel.S ubuntu-gutsy-xen/arch/i386/kernel/relocate_kernel.S
---- ubuntu-gutsy/arch/i386/kernel/relocate_kernel.S 2007-08-18 09:40:28.000000000 -0400
-+++ ubuntu-gutsy-xen/arch/i386/kernel/relocate_kernel.S 2007-08-18 12:38:02.000000000 -0400
-@@ -154,14 +154,45 @@
- movl PTR(PA_PGD)(%ebp), %eax
- movl %eax, %cr3
-
-+ /* setup idt */
-+ movl %edi, %eax
-+ addl $(idt_48 - relocate_kernel), %eax
-+ lidtl (%eax)
-+
-+ /* setup gdt */
-+ movl %edi, %eax
-+ addl $(gdt - relocate_kernel), %eax
-+ movl %edi, %esi
-+ addl $((gdt_48 - relocate_kernel) + 2), %esi
-+ movl %eax, (%esi)
-+
-+ movl %edi, %eax
-+ addl $(gdt_48 - relocate_kernel), %eax
-+ lgdtl (%eax)
-+
-+ /* setup data segment registers */
-+ mov $(gdt_ds - gdt), %eax
-+ mov %eax, %ds
-+ mov %eax, %es
-+ mov %eax, %fs
-+ mov %eax, %gs
-+ mov %eax, %ss
-+
- /* setup a new stack at the end of the physical control page */
- lea 4096(%edi), %esp
-
-- /* jump to identity mapped page */
-- movl %edi, %eax
-- addl $(identity_mapped - relocate_kernel), %eax
-- pushl %eax
-- ret
-+ /* load new code segment and jump to identity mapped page */
-+ movl %edi, %esi
-+ xorl %eax, %eax
-+ pushl %eax
-+ pushl %esi
-+ pushl %eax
-+ movl $(gdt_cs - gdt), %eax
-+ pushl %eax
-+ movl %edi, %eax
-+ addl $(identity_mapped - relocate_kernel),%eax
-+ pushl %eax
-+ iretl
-
- identity_mapped:
- /* store the start address on the stack */
-@@ -250,3 +281,20 @@
- xorl %edi, %edi
- xorl %ebp, %ebp
- ret
-+
-+ .align 16
-+gdt:
-+ .quad 0x0000000000000000 /* NULL descriptor */
-+gdt_cs:
-+ .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
-+gdt_ds:
-+ .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
-+gdt_end:
-+
-+gdt_48:
-+ .word gdt_end - gdt - 1 /* limit */
-+ .long 0 /* base - filled in by code above */
-+
-+idt_48:
-+ .word 0 /* limit */
-+ .long 0 /* base */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/i386/kernel/setup-xen.c ubuntu-gutsy-xen/arch/i386/kernel/setup-xen.c
---- ubuntu-gutsy/arch/i386/kernel/setup-xen.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/arch/i386/kernel/setup-xen.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,838 @@
-+/*
-+ * linux/arch/i386/kernel/setup.c
-+ *
-+ * Copyright (C) 1995 Linus Torvalds
-+ *
-+ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
-+ *
-+ * Memory region support
-+ * David Parsons <orc@pell.chi.il.us>, July-August 1999
-+ *
-+ * Added E820 sanitization routine (removes overlapping memory regions);
-+ * Brian Moyle <bmoyle@mvista.com>, February 2001
-+ *
-+ * Moved CPU detection code to cpu/${cpu}.c
-+ * Patrick Mochel <mochel@osdl.org>, March 2002
-+ *
-+ * Provisions for empty E820 memory regions (reported by certain BIOSes).
-+ * Alex Achenbach <xela@slit.de>, December 2002.
-+ *
-+ */
-+
-+/*
-+ * This file handles the architecture-dependent parts of initialization
-+ */
-+
-+#include <linux/sched.h>
-+#include <linux/mm.h>
-+#include <linux/mmzone.h>
-+#include <linux/screen_info.h>
-+#include <linux/ioport.h>
-+#include <linux/acpi.h>
-+#include <linux/apm_bios.h>
-+#include <linux/initrd.h>
-+#include <linux/bootmem.h>
-+#include <linux/seq_file.h>
-+#include <linux/console.h>
-+#include <linux/mca.h>
-+#include <linux/root_dev.h>
-+#include <linux/highmem.h>
-+#include <linux/module.h>
-+#include <linux/efi.h>
-+#include <linux/init.h>
-+#include <linux/edd.h>
-+#include <linux/nodemask.h>
-+#include <linux/kernel.h>
-+#include <linux/percpu.h>
-+#include <linux/notifier.h>
-+#include <linux/kexec.h>
-+#include <linux/crash_dump.h>
-+#include <linux/dmi.h>
-+#include <linux/pfn.h>
-+
-+#include <video/edid.h>
-+
-+#include <asm/apic.h>
-+#include <asm/e820.h>
-+#include <asm/mpspec.h>
-+#include <asm/mmzone.h>
-+#include <asm/setup.h>
-+#include <asm/arch_hooks.h>
-+#include <asm/sections.h>
-+#include <asm/io_apic.h>
-+#include <asm/ist.h>
-+#include <asm/io.h>
-+#include <asm/hypervisor.h>
-+#include <xen/interface/physdev.h>
-+#include <xen/interface/memory.h>
-+#include <xen/features.h>
-+#include <xen/firmware.h>
-+#include <xen/xencons.h>
-+#include <setup_arch.h>
-+#include <bios_ebda.h>
-+
-+#ifdef CONFIG_XEN
-+#include <xen/interface/kexec.h>
-+#endif
-+
-+static int xen_panic_event(struct notifier_block *, unsigned long, void *);
-+static struct notifier_block xen_panic_block = {
-+ xen_panic_event, NULL, 0 /* try to go last */
-+};
-+
-+extern char hypercall_page[PAGE_SIZE];
-+EXPORT_SYMBOL(hypercall_page);
-+
-+int disable_pse __devinitdata = 0;
-+
-+/*
-+ * Machine setup..
-+ */
-+extern struct resource code_resource;
-+extern struct resource data_resource;
-+
-+/* cpu data as detected by the assembly code in head.S */
-+struct cpuinfo_x86 new_cpu_data __cpuinitdata = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
-+/* common cpu data for all cpus */
-+struct cpuinfo_x86 boot_cpu_data __read_mostly = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
-+EXPORT_SYMBOL(boot_cpu_data);
-+
-+unsigned long mmu_cr4_features;
-+
-+/* for MCA, but anyone else can use it if they want */
-+unsigned int machine_id;
-+#ifdef CONFIG_MCA
-+EXPORT_SYMBOL(machine_id);
-+#endif
-+unsigned int machine_submodel_id;
-+unsigned int BIOS_revision;
-+unsigned int mca_pentium_flag;
-+
-+/* Boot loader ID as an integer, for the benefit of proc_dointvec */
-+int bootloader_type;
-+
-+/* user-defined highmem size */
-+static unsigned int highmem_pages = -1;
-+
-+/*
-+ * Setup options
-+ */
-+struct drive_info_struct { char dummy[32]; } drive_info;
-+#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_HD) || \
-+ defined(CONFIG_BLK_DEV_IDE_MODULE) || defined(CONFIG_BLK_DEV_HD_MODULE)
-+EXPORT_SYMBOL(drive_info);
-+#endif
-+struct screen_info screen_info;
-+EXPORT_SYMBOL(screen_info);
-+struct apm_info apm_info;
-+EXPORT_SYMBOL(apm_info);
-+struct sys_desc_table_struct {
-+ unsigned short length;
-+ unsigned char table[0];
-+};
-+struct edid_info edid_info;
-+EXPORT_SYMBOL_GPL(edid_info);
-+#ifndef CONFIG_XEN
-+#define copy_edid() (edid_info = EDID_INFO)
-+#endif
-+struct ist_info ist_info;
-+#if defined(CONFIG_X86_SPEEDSTEP_SMI) || \
-+ defined(CONFIG_X86_SPEEDSTEP_SMI_MODULE)
-+EXPORT_SYMBOL(ist_info);
-+#endif
-+
-+extern void early_cpu_init(void);
-+extern int root_mountflags;
-+
-+unsigned long saved_videomode;
-+
-+#define RAMDISK_IMAGE_START_MASK 0x07FF
-+#define RAMDISK_PROMPT_FLAG 0x8000
-+#define RAMDISK_LOAD_FLAG 0x4000
-+
-+static char __initdata command_line[COMMAND_LINE_SIZE];
-+
-+unsigned char __initdata boot_params[PARAM_SIZE];
-+
-+/*
-+ * Point at the empty zero page to start with. We map the real shared_info
-+ * page as soon as fixmap is up and running.
-+ */
-+shared_info_t *HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
-+EXPORT_SYMBOL(HYPERVISOR_shared_info);
-+
-+unsigned long *phys_to_machine_mapping;
-+unsigned long *pfn_to_mfn_frame_list_list, *pfn_to_mfn_frame_list[16];
-+EXPORT_SYMBOL(phys_to_machine_mapping);
-+
-+/* Raw start-of-day parameters from the hypervisor. */
-+start_info_t *xen_start_info;
-+EXPORT_SYMBOL(xen_start_info);
-+
-+#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
-+struct edd edd;
-+#ifdef CONFIG_EDD_MODULE
-+EXPORT_SYMBOL(edd);
-+#endif
-+#ifndef CONFIG_XEN
-+/**
-+ * copy_edd() - Copy the BIOS EDD information
-+ * from boot_params into a safe place.
-+ *
-+ */
-+static inline void copy_edd(void)
-+{
-+ memcpy(edd.mbr_signature, EDD_MBR_SIGNATURE, sizeof(edd.mbr_signature));
-+ memcpy(edd.edd_info, EDD_BUF, sizeof(edd.edd_info));
-+ edd.mbr_signature_nr = EDD_MBR_SIG_NR;
-+ edd.edd_info_nr = EDD_NR;
-+}
-+#endif
-+#else
-+static inline void copy_edd(void)
-+{
-+}
-+#endif
-+
-+int __initdata user_defined_memmap = 0;
-+
-+/*
-+ * "mem=nopentium" disables the 4MB page tables.
-+ * "mem=XXX[kKmM]" defines a memory region from HIGH_MEM
-+ * to <mem>, overriding the bios size.
-+ * "memmap=XXX[KkmM]@XXX[KkmM]" defines a memory region from
-+ * <start> to <start>+<mem>, overriding the bios size.
-+ *
-+ * HPA tells me bootloaders need to parse mem=, so no new
-+ * option should be mem= [also see Documentation/i386/boot.txt]
-+ */
-+static int __init parse_mem(char *arg)
-+{
-+ if (!arg)
-+ return -EINVAL;
-+
-+ if (strcmp(arg, "nopentium") == 0) {
-+ clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability);
-+ disable_pse = 1;
-+ } else {
-+ /* If the user specifies memory size, we
-+ * limit the BIOS-provided memory map to
-+ * that size. exactmap can be used to specify
-+ * the exact map. mem=number can be used to
-+ * trim the existing memory map.
-+ */
-+ unsigned long long mem_size;
-+
-+ mem_size = memparse(arg, &arg);
-+ limit_regions(mem_size);
-+ user_defined_memmap = 1;
-+ }
-+ return 0;
-+}
-+early_param("mem", parse_mem);
-+
-+#ifdef CONFIG_PROC_VMCORE
-+/* elfcorehdr= specifies the location of elf core header
-+ * stored by the crashed kernel.
-+ */
-+static int __init parse_elfcorehdr(char *arg)
-+{
-+ if (!arg)
-+ return -EINVAL;
-+
-+ elfcorehdr_addr = memparse(arg, &arg);
-+ return 0;
-+}
-+early_param("elfcorehdr", parse_elfcorehdr);
-+#endif /* CONFIG_PROC_VMCORE */
-+
-+/*
-+ * highmem=size forces highmem to be exactly 'size' bytes.
-+ * This works even on boxes that have no highmem otherwise.
-+ * This also works to reduce highmem size on bigger boxes.
-+ */
-+static int __init parse_highmem(char *arg)
-+{
-+ if (!arg)
-+ return -EINVAL;
-+
-+ highmem_pages = memparse(arg, &arg) >> PAGE_SHIFT;
-+ return 0;
-+}
-+early_param("highmem", parse_highmem);
-+
-+/*
-+ * vmalloc=size forces the vmalloc area to be exactly 'size'
-+ * bytes. This can be used to increase (or decrease) the
-+ * vmalloc area - the default is 128m.
-+ */
-+static int __init parse_vmalloc(char *arg)
-+{
-+ if (!arg)
-+ return -EINVAL;
-+
-+ __VMALLOC_RESERVE = memparse(arg, &arg);
-+ return 0;
-+}
-+early_param("vmalloc", parse_vmalloc);
-+
-+#ifndef CONFIG_XEN
-+/*
-+ * reservetop=size reserves a hole at the top of the kernel address space which
-+ * a hypervisor can load into later. Needed for dynamically loaded hypervisors,
-+ * so relocating the fixmap can be done before paging initialization.
-+ */
-+static int __init parse_reservetop(char *arg)
-+{
-+ unsigned long address;
-+
-+ if (!arg)
-+ return -EINVAL;
-+
-+ address = memparse(arg, &arg);
-+ reserve_top_address(address);
-+ return 0;
-+}
-+early_param("reservetop", parse_reservetop);
-+#endif
-+
-+/*
-+ * Determine low and high memory ranges:
-+ */
-+unsigned long __init find_max_low_pfn(void)
-+{
-+ unsigned long max_low_pfn;
-+
-+ max_low_pfn = max_pfn;
-+ if (max_low_pfn > MAXMEM_PFN) {
-+ if (highmem_pages == -1)
-+ highmem_pages = max_pfn - MAXMEM_PFN;
-+ if (highmem_pages + MAXMEM_PFN < max_pfn)
-+ max_pfn = MAXMEM_PFN + highmem_pages;
-+ if (highmem_pages + MAXMEM_PFN > max_pfn) {
-+ printk("only %luMB highmem pages available, ignoring highmem size of %uMB.\n", pages_to_mb(max_pfn - MAXMEM_PFN), pages_to_mb(highmem_pages));
-+ highmem_pages = 0;
-+ }
-+ max_low_pfn = MAXMEM_PFN;
-+#ifndef CONFIG_HIGHMEM
-+ /* Maximum memory usable is what is directly addressable */
-+ printk(KERN_WARNING "Warning only %ldMB will be used.\n",
-+ MAXMEM>>20);
-+ if (max_pfn > MAX_NONPAE_PFN)
-+ printk(KERN_WARNING "Use a PAE enabled kernel.\n");
-+ else
-+ printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
-+ max_pfn = MAXMEM_PFN;
-+#else /* !CONFIG_HIGHMEM */
-+#ifndef CONFIG_X86_PAE
-+ if (max_pfn > MAX_NONPAE_PFN) {
-+ max_pfn = MAX_NONPAE_PFN;
-+ printk(KERN_WARNING "Warning only 4GB will be used.\n");
-+ printk(KERN_WARNING "Use a PAE enabled kernel.\n");
-+ }
-+#endif /* !CONFIG_X86_PAE */
-+#endif /* !CONFIG_HIGHMEM */
-+ } else {
-+ if (highmem_pages == -1)
-+ highmem_pages = 0;
-+#ifdef CONFIG_HIGHMEM
-+ if (highmem_pages >= max_pfn) {
-+ printk(KERN_ERR "highmem size specified (%uMB) is bigger than pages available (%luMB)!.\n", pages_to_mb(highmem_pages), pages_to_mb(max_pfn));
-+ highmem_pages = 0;
-+ }
-+ if (highmem_pages) {
-+ if (max_low_pfn-highmem_pages < 64*1024*1024/PAGE_SIZE){
-+ printk(KERN_ERR "highmem size %uMB results in smaller than 64MB lowmem, ignoring it.\n", pages_to_mb(highmem_pages));
-+ highmem_pages = 0;
-+ }
-+ max_low_pfn -= highmem_pages;
-+ }
-+#else
-+ if (highmem_pages)
-+ printk(KERN_ERR "ignoring highmem size on non-highmem kernel!\n");
-+#endif
-+ }
-+ return max_low_pfn;
-+}
-+
-+#ifndef CONFIG_XEN
-+/*
-+ * workaround for Dell systems that neglect to reserve EBDA
-+ */
-+static void __init reserve_ebda_region(void)
-+{
-+ unsigned int addr;
-+ addr = get_bios_ebda();
-+ if (addr)
-+ reserve_bootmem(addr, PAGE_SIZE);
-+}
-+#endif
-+
-+#ifndef CONFIG_NEED_MULTIPLE_NODES
-+void __init setup_bootmem_allocator(void);
-+static unsigned long __init setup_memory(void)
-+{
-+ /*
-+ * partially used pages are not usable - thus
-+ * we are rounding upwards:
-+ */
-+ min_low_pfn = PFN_UP(__pa(xen_start_info->pt_base)) +
-+ xen_start_info->nr_pt_frames;
-+
-+ find_max_pfn();
-+
-+ max_low_pfn = find_max_low_pfn();
-+
-+#ifdef CONFIG_HIGHMEM
-+ highstart_pfn = highend_pfn = max_pfn;
-+ if (max_pfn > max_low_pfn) {
-+ highstart_pfn = max_low_pfn;
-+ }
-+ printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
-+ pages_to_mb(highend_pfn - highstart_pfn));
-+ num_physpages = highend_pfn;
-+ high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
-+#else
-+ num_physpages = max_low_pfn;
-+ high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
-+#endif
-+#ifdef CONFIG_FLATMEM
-+ max_mapnr = num_physpages;
-+#endif
-+ printk(KERN_NOTICE "%ldMB LOWMEM available.\n",
-+ pages_to_mb(max_low_pfn));
-+
-+ setup_bootmem_allocator();
-+
-+ return max_low_pfn;
-+}
-+
-+void __init zone_sizes_init(void)
-+{
-+ unsigned long max_zone_pfns[MAX_NR_ZONES];
-+
-+ memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
-+ /*
-+ * XEN: Our notion of "DMA memory" is fake when running over Xen.
-+ * We simply put all RAM in the DMA zone so that those drivers which
-+ * needlessly specify GFP_DMA do not get starved of RAM unnecessarily.
-+ * Those drivers that *do* require lowmem are screwed anyway when
-+ * running over Xen!
-+ */
-+ max_zone_pfns[ZONE_DMA] = max_low_pfn;
-+ max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
-+#ifdef CONFIG_HIGHMEM
-+ max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
-+ add_active_range(0, 0, highend_pfn);
-+#else
-+ add_active_range(0, 0, max_low_pfn);
-+#endif
-+
-+ free_area_init_nodes(max_zone_pfns);
-+}
-+#else
-+extern unsigned long __init setup_memory(void);
-+extern void zone_sizes_init(void);
-+#endif /* !CONFIG_NEED_MULTIPLE_NODES */
-+
-+void __init setup_bootmem_allocator(void)
-+{
-+ unsigned long bootmap_size;
-+ /*
-+ * Initialize the boot-time allocator (with low memory only):
-+ */
-+ bootmap_size = init_bootmem(min_low_pfn, max_low_pfn);
-+
-+ register_bootmem_low_pages(max_low_pfn);
-+
-+ /*
-+ * Reserve the bootmem bitmap itself as well. We do this in two
-+ * steps (first step was init_bootmem()) because this catches
-+ * the (very unlikely) case of us accidentally initializing the
-+ * bootmem allocator with an invalid RAM area.
-+ */
-+ reserve_bootmem(__pa_symbol(_text), (PFN_PHYS(min_low_pfn) +
-+ bootmap_size + PAGE_SIZE-1) - __pa_symbol(_text));
-+
-+#ifndef CONFIG_XEN
-+ /*
-+ * reserve physical page 0 - it's a special BIOS page on many boxes,
-+ * enabling clean reboots, SMP operation, laptop functions.
-+ */
-+ reserve_bootmem(0, PAGE_SIZE);
-+
-+ /* reserve EBDA region, it's a 4K region */
-+ reserve_ebda_region();
-+
-+ /* could be an AMD 768MPX chipset. Reserve a page before VGA to prevent
-+ PCI prefetch into it (errata #56). Usually the page is reserved anyways,
-+ unless you have no PS/2 mouse plugged in. */
-+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
-+ boot_cpu_data.x86 == 6)
-+ reserve_bootmem(0xa0000 - 4096, 4096);
-+
-+#ifdef CONFIG_SMP
-+ /*
-+ * But first pinch a few for the stack/trampoline stuff
-+ * FIXME: Don't need the extra page at 4K, but need to fix
-+ * trampoline before removing it. (see the GDT stuff)
-+ */
-+ reserve_bootmem(PAGE_SIZE, PAGE_SIZE);
-+#endif
-+#ifdef CONFIG_ACPI_SLEEP
-+ /*
-+ * Reserve low memory region for sleep support.
-+ */
-+ acpi_reserve_bootmem();
-+#endif
-+ numa_kva_reserve();
-+#endif /* !CONFIG_XEN */
-+
-+#ifdef CONFIG_BLK_DEV_INITRD
-+ if (xen_start_info->mod_start) {
-+ if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) {
-+ /*reserve_bootmem(INITRD_START, INITRD_SIZE);*/
-+ initrd_start = INITRD_START + PAGE_OFFSET;
-+ initrd_end = initrd_start+INITRD_SIZE;
-+ initrd_below_start_ok = 1;
-+ }
-+ else {
-+ printk(KERN_ERR "initrd extends beyond end of memory "
-+ "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
-+ INITRD_START + INITRD_SIZE,
-+ max_low_pfn << PAGE_SHIFT);
-+ initrd_start = 0;
-+ }
-+ }
-+#endif
-+#ifdef CONFIG_KEXEC
-+#ifdef CONFIG_XEN
-+ xen_machine_kexec_setup_resources();
-+#else
-+ if (crashk_res.start != crashk_res.end)
-+ reserve_bootmem(crashk_res.start,
-+ crashk_res.end - crashk_res.start + 1);
-+#endif
-+#endif
-+
-+ if (!xen_feature(XENFEAT_auto_translated_physmap))
-+ phys_to_machine_mapping =
-+ (unsigned long *)xen_start_info->mfn_list;
-+}
-+
-+/*
-+ * The node 0 pgdat is initialized before all of these because
-+ * it's needed for bootmem. node>0 pgdats have their virtual
-+ * space allocated before the pagetables are in place to access
-+ * them, so they can't be cleared then.
-+ *
-+ * This should all compile down to nothing when NUMA is off.
-+ */
-+void __init remapped_pgdat_init(void)
-+{
-+ int nid;
-+
-+ for_each_online_node(nid) {
-+ if (nid != 0)
-+ memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
-+ }
-+}
-+
-+#ifdef CONFIG_MCA
-+static void set_mca_bus(int x)
-+{
-+ MCA_bus = x;
-+}
-+#else
-+static void set_mca_bus(int x) { }
-+#endif
-+
-+/* Overridden in paravirt.c if CONFIG_PARAVIRT */
-+char * __init __attribute__((weak)) memory_setup(void)
-+{
-+ return machine_specific_memory_setup();
-+}
-+
-+/*
-+ * Determine if we were loaded by an EFI loader. If so, then we have also been
-+ * passed the efi memmap, systab, etc., so we should use these data structures
-+ * for initialization. Note, the efi init code path is determined by the
-+ * global efi_enabled. This allows the same kernel image to be used on existing
-+ * systems (with a traditional BIOS) as well as on EFI systems.
-+ */
-+void __init setup_arch(char **cmdline_p)
-+{
-+ int i, j, k, fpp;
-+ struct physdev_set_iopl set_iopl;
-+ unsigned long max_low_pfn;
-+
-+ /* Force a quick death if the kernel panics (not domain 0). */
-+ extern int panic_timeout;
-+ if (!is_initial_xendomain()) {
-+ if (!panic_timeout)
-+ panic_timeout = 1;
-+
-+ /* Register a call for panic conditions. */
-+ atomic_notifier_chain_register(&panic_notifier_list, &xen_panic_block);
-+ }
-+
-+ HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments);
-+ HYPERVISOR_vm_assist(VMASST_CMD_enable,
-+ VMASST_TYPE_writable_pagetables);
-+
-+ memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
-+ early_cpu_init();
-+#ifdef CONFIG_SMP
-+ prefill_possible_map();
-+#endif
-+
-+ /*
-+ * FIXME: This isn't an official loader_type right
-+ * now but does currently work with elilo.
-+ * If we were configured as an EFI kernel, check to make
-+ * sure that we were loaded correctly from elilo and that
-+ * the system table is valid. If not, then initialize normally.
-+ */
-+#ifdef CONFIG_EFI
-+ if ((LOADER_TYPE == 0x50) && EFI_SYSTAB)
-+ efi_enabled = 1;
-+#endif
-+
-+ /* This must be initialized to UNNAMED_MAJOR for ipconfig to work
-+ properly. Setting ROOT_DEV to default to /dev/ram0 breaks initrd.
-+ */
-+ ROOT_DEV = MKDEV(UNNAMED_MAJOR,0);
-+ drive_info = DRIVE_INFO;
-+ screen_info = SCREEN_INFO;
-+ copy_edid();
-+ apm_info.bios = APM_BIOS_INFO;
-+ ist_info = IST_INFO;
-+ saved_videomode = VIDEO_MODE;
-+ if( SYS_DESC_TABLE.length != 0 ) {
-+ set_mca_bus(SYS_DESC_TABLE.table[3] & 0x2);
-+ machine_id = SYS_DESC_TABLE.table[0];
-+ machine_submodel_id = SYS_DESC_TABLE.table[1];
-+ BIOS_revision = SYS_DESC_TABLE.table[2];
-+ }
-+ bootloader_type = LOADER_TYPE;
-+
-+ if (is_initial_xendomain()) {
-+ /* This is drawn from a dump from vgacon:startup in
-+ * standard Linux. */
-+ screen_info.orig_video_mode = 3;
-+ screen_info.orig_video_isVGA = 1;
-+ screen_info.orig_video_lines = 25;
-+ screen_info.orig_video_cols = 80;
-+ screen_info.orig_video_ega_bx = 3;
-+ screen_info.orig_video_points = 16;
-+ screen_info.orig_y = screen_info.orig_video_lines - 1;
-+ if (xen_start_info->console.dom0.info_size >=
-+ sizeof(struct dom0_vga_console_info)) {
-+ const struct dom0_vga_console_info *info =
-+ (struct dom0_vga_console_info *)(
-+ (char *)xen_start_info +
-+ xen_start_info->console.dom0.info_off);
-+ dom0_init_screen_info(info);
-+ }
-+ xen_start_info->console.domU.mfn = 0;
-+ xen_start_info->console.domU.evtchn = 0;
-+ } else
-+ screen_info.orig_video_isVGA = 0;
-+
-+#ifdef CONFIG_BLK_DEV_RAM
-+ rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
-+ rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
-+ rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
-+#endif
-+
-+ setup_xen_features();
-+
-+ ARCH_SETUP
-+ if (efi_enabled)
-+ efi_init();
-+ else {
-+ printk(KERN_INFO "BIOS-provided physical RAM map:\n");
-+ print_memory_map(memory_setup());
-+ }
-+
-+ copy_edd();
-+
-+ if (!MOUNT_ROOT_RDONLY)
-+ root_mountflags &= ~MS_RDONLY;
-+ init_mm.start_code = (unsigned long) _text;
-+ init_mm.end_code = (unsigned long) _etext;
-+ init_mm.end_data = (unsigned long) _edata;
-+ init_mm.brk = (PFN_UP(__pa(xen_start_info->pt_base)) +
-+ xen_start_info->nr_pt_frames) << PAGE_SHIFT;
-+
-+ code_resource.start = virt_to_phys(_text);
-+ code_resource.end = virt_to_phys(_etext)-1;
-+ data_resource.start = virt_to_phys(_etext);
-+ data_resource.end = virt_to_phys(_edata)-1;
-+
-+ if ((i = MAX_GUEST_CMDLINE) > COMMAND_LINE_SIZE)
-+ i = COMMAND_LINE_SIZE;
-+ memcpy(boot_command_line, xen_start_info->cmd_line, i);
-+ boot_command_line[i - 1] = '\0';
-+ parse_early_param();
-+
-+ if (user_defined_memmap) {
-+ printk(KERN_INFO "user-defined physical RAM map:\n");
-+ print_memory_map("user");
-+ }
-+
-+ strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
-+ *cmdline_p = command_line;
-+
-+ max_low_pfn = setup_memory();
-+
-+#ifdef CONFIG_VMI
-+ /*
-+ * Must be after max_low_pfn is determined, and before kernel
-+ * pagetables are setup.
-+ */
-+ vmi_init();
-+#endif
-+
-+ /*
-+ * NOTE: before this point _nobody_ is allowed to allocate
-+ * any memory using the bootmem allocator. Although the
-+ * alloctor is now initialised only the first 8Mb of the kernel
-+ * virtual address space has been mapped. All allocations before
-+ * paging_init() has completed must use the alloc_bootmem_low_pages()
-+ * variant (which allocates DMA'able memory) and care must be taken
-+ * not to exceed the 8Mb limit.
-+ */
-+
-+#ifdef CONFIG_SMP
-+ smp_alloc_memory(); /* AP processor realmode stacks in low memory*/
-+#endif
-+ paging_init();
-+ remapped_pgdat_init();
-+ sparse_init();
-+ zone_sizes_init();
-+
-+#ifdef CONFIG_X86_FIND_SMP_CONFIG
-+ /*
-+ * Find and reserve possible boot-time SMP configuration:
-+ */
-+ find_smp_config();
-+#endif
-+
-+ /* Make sure we have a correctly sized P->M table. */
-+ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
-+ phys_to_machine_mapping = alloc_bootmem_low_pages(
-+ max_pfn * sizeof(unsigned long));
-+ memset(phys_to_machine_mapping, ~0,
-+ max_pfn * sizeof(unsigned long));
-+ memcpy(phys_to_machine_mapping,
-+ (unsigned long *)xen_start_info->mfn_list,
-+ xen_start_info->nr_pages * sizeof(unsigned long));
-+ free_bootmem(
-+ __pa(xen_start_info->mfn_list),
-+ PFN_PHYS(PFN_UP(xen_start_info->nr_pages *
-+ sizeof(unsigned long))));
-+
-+ /*
-+ * Initialise the list of the frames that specify the list of
-+ * frames that make up the p2m table. Used by save/restore
-+ */
-+ pfn_to_mfn_frame_list_list = alloc_bootmem_low_pages(PAGE_SIZE);
-+
-+ fpp = PAGE_SIZE/sizeof(unsigned long);
-+ for (i=0, j=0, k=-1; i< max_pfn; i+=fpp, j++) {
-+ if ((j % fpp) == 0) {
-+ k++;
-+ BUG_ON(k>=16);
-+ pfn_to_mfn_frame_list[k] =
-+ alloc_bootmem_low_pages(PAGE_SIZE);
-+ pfn_to_mfn_frame_list_list[k] =
-+ virt_to_mfn(pfn_to_mfn_frame_list[k]);
-+ j=0;
-+ }
-+ pfn_to_mfn_frame_list[k][j] =
-+ virt_to_mfn(&phys_to_machine_mapping[i]);
-+ }
-+ HYPERVISOR_shared_info->arch.max_pfn = max_pfn;
-+ HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
-+ virt_to_mfn(pfn_to_mfn_frame_list_list);
-+ }
-+
-+ /*
-+ * NOTE: at this point the bootmem allocator is fully available.
-+ */
-+
-+ if (is_initial_xendomain())
-+ dmi_scan_machine();
-+
-+#ifdef CONFIG_X86_GENERICARCH
-+ generic_apic_probe();
-+#endif
-+ if (efi_enabled)
-+ efi_map_memmap();
-+
-+ set_iopl.iopl = 1;
-+ HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
-+
-+#ifdef CONFIG_ACPI
-+ if (!is_initial_xendomain()) {
-+ printk(KERN_INFO "ACPI in unprivileged domain disabled\n");
-+ acpi_disabled = 1;
-+ acpi_ht = 0;
-+ }
-+
-+ /*
-+ * Parse the ACPI tables for possible boot-time SMP configuration.
-+ */
-+ acpi_boot_table_init();
-+#endif
-+
-+#ifdef CONFIG_PCI
-+#ifdef CONFIG_X86_IO_APIC
-+ check_acpi_pci(); /* Checks more than just ACPI actually */
-+#endif
-+#endif
-+
-+#ifdef CONFIG_ACPI
-+ acpi_boot_init();
-+
-+#if defined(CONFIG_SMP) && defined(CONFIG_X86_PC)
-+ if (def_to_bigsmp)
-+ printk(KERN_WARNING "More than 8 CPUs detected and "
-+ "CONFIG_X86_PC cannot handle it.\nUse "
-+ "CONFIG_X86_GENERICARCH or CONFIG_X86_BIGSMP.\n");
-+#endif
-+#endif
-+#ifdef CONFIG_X86_LOCAL_APIC
-+ if (smp_found_config)
-+ get_smp_config();
-+#endif
-+
-+ e820_register_memory();
-+
-+ if (is_initial_xendomain()) {
-+#ifdef CONFIG_VT
-+#if defined(CONFIG_VGA_CONSOLE)
-+ if (!efi_enabled ||
-+ (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY))
-+ conswitchp = &vga_con;
-+#elif defined(CONFIG_DUMMY_CONSOLE)
-+ conswitchp = &dummy_con;
-+#endif
-+#endif
-+ } else {
-+#if defined(CONFIG_VT) && defined(CONFIG_DUMMY_CONSOLE)
-+ conswitchp = &dummy_con;
-+#endif
-+ }
-+
-+ xencons_early_setup();
-+}
-+
-+static int
-+xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
-+{
-+ HYPERVISOR_shutdown(SHUTDOWN_crash);
-+ /* we're never actually going to get here... */
-+ return NOTIFY_DONE;
-+}
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/i386/kernel/smp-xen.c ubuntu-gutsy-xen/arch/i386/kernel/smp-xen.c
---- ubuntu-gutsy/arch/i386/kernel/smp-xen.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/arch/i386/kernel/smp-xen.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,658 @@
-+/*
-+ * Intel SMP support routines.
-+ *
-+ * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
-+ * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
-+ *
-+ * This code is released under the GNU General Public License version 2 or
-+ * later.
-+ */
-+
-+#include <linux/init.h>
-+
-+#include <linux/mm.h>
-+#include <linux/delay.h>
-+#include <linux/spinlock.h>
-+#include <linux/kernel_stat.h>
-+#include <linux/mc146818rtc.h>
-+#include <linux/cache.h>
-+#include <linux/interrupt.h>
-+#include <linux/cpu.h>
-+#include <linux/module.h>
-+
-+#include <asm/mtrr.h>
-+#include <asm/tlbflush.h>
-+#if 0
-+#include <mach_apic.h>
-+#endif
-+#include <xen/evtchn.h>
-+
-+/*
-+ * Some notes on x86 processor bugs affecting SMP operation:
-+ *
-+ * Pentium, Pentium Pro, II, III (and all CPUs) have bugs.
-+ * The Linux implications for SMP are handled as follows:
-+ *
-+ * Pentium III / [Xeon]
-+ * None of the E1AP-E3AP errata are visible to the user.
-+ *
-+ * E1AP. see PII A1AP
-+ * E2AP. see PII A2AP
-+ * E3AP. see PII A3AP
-+ *
-+ * Pentium II / [Xeon]
-+ * None of the A1AP-A3AP errata are visible to the user.
-+ *
-+ * A1AP. see PPro 1AP
-+ * A2AP. see PPro 2AP
-+ * A3AP. see PPro 7AP
-+ *
-+ * Pentium Pro
-+ * None of 1AP-9AP errata are visible to the normal user,
-+ * except occasional delivery of 'spurious interrupt' as trap #15.
-+ * This is very rare and a non-problem.
-+ *
-+ * 1AP. Linux maps APIC as non-cacheable
-+ * 2AP. worked around in hardware
-+ * 3AP. fixed in C0 and above steppings microcode update.
-+ * Linux does not use excessive STARTUP_IPIs.
-+ * 4AP. worked around in hardware
-+ * 5AP. symmetric IO mode (normal Linux operation) not affected.
-+ * 'noapic' mode has vector 0xf filled out properly.
-+ * 6AP. 'noapic' mode might be affected - fixed in later steppings
-+ * 7AP. We do not assume writes to the LVT deassering IRQs
-+ * 8AP. We do not enable low power mode (deep sleep) during MP bootup
-+ * 9AP. We do not use mixed mode
-+ *
-+ * Pentium
-+ * There is a marginal case where REP MOVS on 100MHz SMP
-+ * machines with B stepping processors can fail. XXX should provide
-+ * an L1cache=Writethrough or L1cache=off option.
-+ *
-+ * B stepping CPUs may hang. There are hardware work arounds
-+ * for this. We warn about it in case your board doesn't have the work
-+ * arounds. Basically thats so I can tell anyone with a B stepping
-+ * CPU and SMP problems "tough".
-+ *
-+ * Specific items [From Pentium Processor Specification Update]
-+ *
-+ * 1AP. Linux doesn't use remote read
-+ * 2AP. Linux doesn't trust APIC errors
-+ * 3AP. We work around this
-+ * 4AP. Linux never generated 3 interrupts of the same priority
-+ * to cause a lost local interrupt.
-+ * 5AP. Remote read is never used
-+ * 6AP. not affected - worked around in hardware
-+ * 7AP. not affected - worked around in hardware
-+ * 8AP. worked around in hardware - we get explicit CS errors if not
-+ * 9AP. only 'noapic' mode affected. Might generate spurious
-+ * interrupts, we log only the first one and count the
-+ * rest silently.
-+ * 10AP. not affected - worked around in hardware
-+ * 11AP. Linux reads the APIC between writes to avoid this, as per
-+ * the documentation. Make sure you preserve this as it affects
-+ * the C stepping chips too.
-+ * 12AP. not affected - worked around in hardware
-+ * 13AP. not affected - worked around in hardware
-+ * 14AP. we always deassert INIT during bootup
-+ * 15AP. not affected - worked around in hardware
-+ * 16AP. not affected - worked around in hardware
-+ * 17AP. not affected - worked around in hardware
-+ * 18AP. not affected - worked around in hardware
-+ * 19AP. not affected - worked around in BIOS
-+ *
-+ * If this sounds worrying believe me these bugs are either ___RARE___,
-+ * or are signal timing bugs worked around in hardware and there's
-+ * about nothing of note with C stepping upwards.
-+ */
-+
-+DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) ____cacheline_aligned = { &init_mm, 0, };
-+
-+/*
-+ * the following functions deal with sending IPIs between CPUs.
-+ *
-+ * We use 'broadcast', CPU->CPU IPIs and self-IPIs too.
-+ */
-+
-+#ifndef CONFIG_XEN
-+static inline int __prepare_ICR (unsigned int shortcut, int vector)
-+{
-+ unsigned int icr = shortcut | APIC_DEST_LOGICAL;
-+
-+ switch (vector) {
-+ default:
-+ icr |= APIC_DM_FIXED | vector;
-+ break;
-+ case NMI_VECTOR:
-+ icr |= APIC_DM_NMI;
-+ break;
-+ }
-+ return icr;
-+}
-+
-+static inline int __prepare_ICR2 (unsigned int mask)
-+{
-+ return SET_APIC_DEST_FIELD(mask);
-+}
-+#endif
-+
-+DECLARE_PER_CPU(int, ipi_to_irq[NR_IPIS]);
-+
-+static inline void __send_IPI_one(unsigned int cpu, int vector)
-+{
-+ int irq = per_cpu(ipi_to_irq, cpu)[vector];
-+ BUG_ON(irq < 0);
-+ notify_remote_via_irq(irq);
-+}
-+
-+void __send_IPI_shortcut(unsigned int shortcut, int vector)
-+{
-+ int cpu;
-+
-+ switch (shortcut) {
-+ case APIC_DEST_SELF:
-+ __send_IPI_one(smp_processor_id(), vector);
-+ break;
-+ case APIC_DEST_ALLBUT:
-+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
-+ if (cpu == smp_processor_id())
-+ continue;
-+ if (cpu_isset(cpu, cpu_online_map)) {
-+ __send_IPI_one(cpu, vector);
-+ }
-+ }
-+ break;
-+ default:
-+ printk("XXXXXX __send_IPI_shortcut %08x vector %d\n", shortcut,
-+ vector);
-+ break;
-+ }
-+}
-+
-+void fastcall send_IPI_self(int vector)
-+{
-+ __send_IPI_shortcut(APIC_DEST_SELF, vector);
-+}
-+
-+/*
-+ * This is only used on smaller machines.
-+ */
-+void send_IPI_mask_bitmask(cpumask_t mask, int vector)
-+{
-+ unsigned long flags;
-+ unsigned int cpu;
-+
-+ local_irq_save(flags);
-+ WARN_ON(cpus_addr(mask)[0] & ~cpus_addr(cpu_online_map)[0]);
-+
-+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
-+ if (cpu_isset(cpu, mask)) {
-+ __send_IPI_one(cpu, vector);
-+ }
-+ }
-+
-+ local_irq_restore(flags);
-+}
-+
-+void send_IPI_mask_sequence(cpumask_t mask, int vector)
-+{
-+
-+ send_IPI_mask_bitmask(mask, vector);
-+}
-+
-+#include <mach_ipi.h> /* must come after the send_IPI functions above for inlining */
-+
-+#if 0 /* XEN */
-+/*
-+ * Smarter SMP flushing macros.
-+ * c/o Linus Torvalds.
-+ *
-+ * These mean you can really definitely utterly forget about
-+ * writing to user space from interrupts. (Its not allowed anyway).
-+ *
-+ * Optimizations Manfred Spraul <manfred@colorfullife.com>
-+ */
-+
-+static cpumask_t flush_cpumask;
-+static struct mm_struct * flush_mm;
-+static unsigned long flush_va;
-+static DEFINE_SPINLOCK(tlbstate_lock);
-+
-+/*
-+ * We cannot call mmdrop() because we are in interrupt context,
-+ * instead update mm->cpu_vm_mask.
-+ *
-+ * We need to reload %cr3 since the page tables may be going
-+ * away from under us..
-+ */
-+static inline void leave_mm (unsigned long cpu)
-+{
-+ if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)
-+ BUG();
-+ cpu_clear(cpu, per_cpu(cpu_tlbstate, cpu).active_mm->cpu_vm_mask);
-+ load_cr3(swapper_pg_dir);
-+}
-+
-+/*
-+ *
-+ * The flush IPI assumes that a thread switch happens in this order:
-+ * [cpu0: the cpu that switches]
-+ * 1) switch_mm() either 1a) or 1b)
-+ * 1a) thread switch to a different mm
-+ * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask);
-+ * Stop ipi delivery for the old mm. This is not synchronized with
-+ * the other cpus, but smp_invalidate_interrupt ignore flush ipis
-+ * for the wrong mm, and in the worst case we perform a superflous
-+ * tlb flush.
-+ * 1a2) set cpu_tlbstate to TLBSTATE_OK
-+ * Now the smp_invalidate_interrupt won't call leave_mm if cpu0
-+ * was in lazy tlb mode.
-+ * 1a3) update cpu_tlbstate[].active_mm
-+ * Now cpu0 accepts tlb flushes for the new mm.
-+ * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask);
-+ * Now the other cpus will send tlb flush ipis.
-+ * 1a4) change cr3.
-+ * 1b) thread switch without mm change
-+ * cpu_tlbstate[].active_mm is correct, cpu0 already handles
-+ * flush ipis.
-+ * 1b1) set cpu_tlbstate to TLBSTATE_OK
-+ * 1b2) test_and_set the cpu bit in cpu_vm_mask.
-+ * Atomically set the bit [other cpus will start sending flush ipis],
-+ * and test the bit.
-+ * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
-+ * 2) switch %%esp, ie current
-+ *
-+ * The interrupt must handle 2 special cases:
-+ * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
-+ * - the cpu performs speculative tlb reads, i.e. even if the cpu only
-+ * runs in kernel space, the cpu could load tlb entries for user space
-+ * pages.
-+ *
-+ * The good news is that cpu_tlbstate is local to each cpu, no
-+ * write/read ordering problems.
-+ */
-+
-+/*
-+ * TLB flush IPI:
-+ *
-+ * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
-+ * 2) Leave the mm if we are in the lazy tlb mode.
-+ */
-+
-+irqreturn_t smp_invalidate_interrupt(int irq, void *dev_id)
-+{
-+ unsigned long cpu;
-+
-+ cpu = get_cpu();
-+
-+ if (!cpu_isset(cpu, flush_cpumask))
-+ goto out;
-+ /*
-+ * This was a BUG() but until someone can quote me the
-+ * line from the intel manual that guarantees an IPI to
-+ * multiple CPUs is retried _only_ on the erroring CPUs
-+ * its staying as a return
-+ *
-+ * BUG();
-+ */
-+
-+ if (flush_mm == per_cpu(cpu_tlbstate, cpu).active_mm) {
-+ if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) {
-+ if (flush_va == TLB_FLUSH_ALL)
-+ local_flush_tlb();
-+ else
-+ __flush_tlb_one(flush_va);
-+ } else
-+ leave_mm(cpu);
-+ }
-+ smp_mb__before_clear_bit();
-+ cpu_clear(cpu, flush_cpumask);
-+ smp_mb__after_clear_bit();
-+out:
-+ put_cpu_no_resched();
-+
-+ return IRQ_HANDLED;
-+}
-+
-+void xen_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
-+ unsigned long va)
-+{
-+ cpumask_t cpumask = *cpumaskp;
-+
-+ /*
-+ * A couple of (to be removed) sanity checks:
-+ *
-+ * - current CPU must not be in mask
-+ * - mask must exist :)
-+ */
-+ BUG_ON(cpus_empty(cpumask));
-+ BUG_ON(cpu_isset(smp_processor_id(), cpumask));
-+ BUG_ON(!mm);
-+
-+#ifdef CONFIG_HOTPLUG_CPU
-+ /* If a CPU which we ran on has gone down, OK. */
-+ cpus_and(cpumask, cpumask, cpu_online_map);
-+ if (unlikely(cpus_empty(cpumask)))
-+ return;
-+#endif
-+
-+ /*
-+ * i'm not happy about this global shared spinlock in the
-+ * MM hot path, but we'll see how contended it is.
-+ * AK: x86-64 has a faster method that could be ported.
-+ */
-+ spin_lock(&tlbstate_lock);
-+
-+ flush_mm = mm;
-+ flush_va = va;
-+ cpus_or(flush_cpumask, cpumask, flush_cpumask);
-+ /*
-+ * We have to send the IPI only to
-+ * CPUs affected.
-+ */
-+ send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR);
-+
-+ while (!cpus_empty(flush_cpumask))
-+ /* nothing. lockup detection does not belong here */
-+ cpu_relax();
-+
-+ flush_mm = NULL;
-+ flush_va = 0;
-+ spin_unlock(&tlbstate_lock);
-+}
-+
-+void flush_tlb_current_task(void)
-+{
-+ struct mm_struct *mm = current->mm;
-+ cpumask_t cpu_mask;
-+
-+ preempt_disable();
-+ cpu_mask = mm->cpu_vm_mask;
-+ cpu_clear(smp_processor_id(), cpu_mask);
-+
-+ local_flush_tlb();
-+ if (!cpus_empty(cpu_mask))
-+ flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);
-+ preempt_enable();
-+}
-+
-+void flush_tlb_mm (struct mm_struct * mm)
-+{
-+ cpumask_t cpu_mask;
-+
-+ preempt_disable();
-+ cpu_mask = mm->cpu_vm_mask;
-+ cpu_clear(smp_processor_id(), cpu_mask);
-+
-+ if (current->active_mm == mm) {
-+ if (current->mm)
-+ local_flush_tlb();
-+ else
-+ leave_mm(smp_processor_id());
-+ }
-+ if (!cpus_empty(cpu_mask))
-+ flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);
-+
-+ preempt_enable();
-+}
-+
-+void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
-+{
-+ struct mm_struct *mm = vma->vm_mm;
-+ cpumask_t cpu_mask;
-+
-+ preempt_disable();
-+ cpu_mask = mm->cpu_vm_mask;
-+ cpu_clear(smp_processor_id(), cpu_mask);
-+
-+ if (current->active_mm == mm) {
-+ if(current->mm)
-+ __flush_tlb_one(va);
-+ else
-+ leave_mm(smp_processor_id());
-+ }
-+
-+ if (!cpus_empty(cpu_mask))
-+ flush_tlb_others(cpu_mask, mm, va);
-+
-+ preempt_enable();
-+}
-+EXPORT_SYMBOL(flush_tlb_page);
-+
-+static void do_flush_tlb_all(void* info)
-+{
-+ unsigned long cpu = smp_processor_id();
-+
-+ __flush_tlb_all();
-+ if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_LAZY)
-+ leave_mm(cpu);
-+}
-+
-+void flush_tlb_all(void)
-+{
-+ on_each_cpu(do_flush_tlb_all, NULL, 1, 1);
-+}
-+
-+#else
-+
-+irqreturn_t smp_invalidate_interrupt(int irq, void *dev_id)
-+{ return 0; }
-+void flush_tlb_current_task(void)
-+{ xen_tlb_flush_mask(&current->mm->cpu_vm_mask); }
-+void flush_tlb_mm(struct mm_struct * mm)
-+{ xen_tlb_flush_mask(&mm->cpu_vm_mask); }
-+void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
-+{ xen_invlpg_mask(&vma->vm_mm->cpu_vm_mask, va); }
-+EXPORT_SYMBOL(flush_tlb_page);
-+void flush_tlb_all(void)
-+{ xen_tlb_flush_all(); }
-+
-+#endif /* XEN */
-+
-+/*
-+ * this function sends a 'reschedule' IPI to another CPU.
-+ * it goes straight through and wastes no time serializing
-+ * anything. Worst case is that we lose a reschedule ...
-+ */
-+void xen_smp_send_reschedule(int cpu)
-+{
-+ WARN_ON(cpu_is_offline(cpu));
-+ send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
-+}
-+
-+/*
-+ * Structure and data for smp_call_function(). This is designed to minimise
-+ * static memory requirements. It also looks cleaner.
-+ */
-+static DEFINE_SPINLOCK(call_lock);
-+
-+struct call_data_struct {
-+ void (*func) (void *info);
-+ void *info;
-+ atomic_t started;
-+ atomic_t finished;
-+ int wait;
-+};
-+
-+void lock_ipi_call_lock(void)
-+{
-+ spin_lock_irq(&call_lock);
-+}
-+
-+void unlock_ipi_call_lock(void)
-+{
-+ spin_unlock_irq(&call_lock);
-+}
-+
-+static struct call_data_struct *call_data;
-+
-+static void __smp_call_function(void (*func) (void *info), void *info,
-+ int nonatomic, int wait)
-+{
-+ struct call_data_struct data;
-+ int cpus = num_online_cpus() - 1;
-+
-+ if (!cpus)
-+ return;
-+
-+ data.func = func;
-+ data.info = info;
-+ atomic_set(&data.started, 0);
-+ data.wait = wait;
-+ if (wait)
-+ atomic_set(&data.finished, 0);
-+
-+ call_data = &data;
-+ mb();
-+
-+ /* Send a message to all other CPUs and wait for them to respond */
-+ send_IPI_allbutself(CALL_FUNCTION_VECTOR);
-+
-+ /* Wait for response */
-+ while (atomic_read(&data.started) != cpus)
-+ cpu_relax();
-+
-+ if (wait)
-+ while (atomic_read(&data.finished) != cpus)
-+ cpu_relax();
-+}
-+
-+
-+/**
-+ * smp_call_function_mask(): Run a function on a set of other CPUs.
-+ * @mask: The set of cpus to run on. Must not include the current cpu.
-+ * @func: The function to run. This must be fast and non-blocking.
-+ * @info: An arbitrary pointer to pass to the function.
-+ * @wait: If true, wait (atomically) until function has completed on other CPUs.
-+ *
-+ * Returns 0 on success, else a negative status code.
-+ *
-+ * If @wait is true, then returns once @func has returned; otherwise
-+ * it returns just before the target cpu calls @func.
-+ *
-+ * You must not call this function with disabled interrupts or from a
-+ * hardware interrupt handler or from a bottom half handler.
-+ */
-+int
-+xen_smp_call_function_mask(cpumask_t mask,
-+ void (*func)(void *), void *info,
-+ int wait)
-+{
-+ struct call_data_struct data;
-+ cpumask_t allbutself;
-+ int cpus;
-+
-+ /* Can deadlock when called with interrupts disabled */
-+ WARN_ON(irqs_disabled());
-+
-+ /* Holding any lock stops cpus from going down. */
-+ spin_lock(&call_lock);
-+
-+ allbutself = cpu_online_map;
-+ cpu_clear(smp_processor_id(), allbutself);
-+
-+ cpus_and(mask, mask, allbutself);
-+ cpus = cpus_weight(mask);
-+
-+ if (!cpus) {
-+ spin_unlock(&call_lock);
-+ return 0;
-+ }
-+
-+ data.func = func;
-+ data.info = info;
-+ atomic_set(&data.started, 0);
-+ data.wait = wait;
-+ if (wait)
-+ atomic_set(&data.finished, 0);
-+
-+ call_data = &data;
-+ mb();
-+
-+ /* Send a message to other CPUs */
-+ if (cpus_equal(mask, allbutself))
-+ send_IPI_allbutself(CALL_FUNCTION_VECTOR);
-+ else
-+ send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
-+
-+ /* Wait for response */
-+ while (atomic_read(&data.started) != cpus)
-+ barrier();
-+
-+ if (wait)
-+ while (atomic_read(&data.finished) != cpus)
-+ barrier();
-+ spin_unlock(&call_lock);
-+
-+ return 0;
-+}
-+
-+static void stop_this_cpu (void * dummy)
-+{
-+ local_irq_disable();
-+ /*
-+ * Remove this CPU:
-+ */
-+ cpu_clear(smp_processor_id(), cpu_online_map);
-+ mask_evtchn_local();
-+ if (cpu_data[smp_processor_id()].hlt_works_ok)
-+ for(;;) halt();
-+ for (;;);
-+}
-+
-+/*
-+ * this function calls the 'stop' function on all other CPUs in the system.
-+ */
-+
-+void xen_smp_send_stop(void)
-+{
-+ /* Don't deadlock on the call lock in panic */
-+ int nolock = !spin_trylock(&call_lock);
-+ unsigned long flags;
-+
-+ local_irq_save(flags);
-+ __smp_call_function(stop_this_cpu, NULL, 0, 0);
-+ if (!nolock)
-+ spin_unlock(&call_lock);
-+ mask_evtchn_local();
-+ local_irq_restore(flags);
-+}
-+
-+/*
-+ * Reschedule call back. Nothing to do,
-+ * all the work is done automatically when
-+ * we return from the interrupt.
-+ */
-+irqreturn_t smp_reschedule_interrupt(int irq, void *dev_id)
-+{
-+
-+ return IRQ_HANDLED;
-+}
-+
-+#include <linux/kallsyms.h>
-+irqreturn_t smp_call_function_interrupt(int irq, void *dev_id)
-+{
-+ void (*func) (void *info) = call_data->func;
-+ void *info = call_data->info;
-+ int wait = call_data->wait;
-+
-+ /*
-+ * Notify initiating CPU that I've grabbed the data and am
-+ * about to execute the function
-+ */
-+ mb();
-+ atomic_inc(&call_data->started);
-+ /*
-+ * At this point the info structure may be out of scope unless wait==1
-+ */
-+ irq_enter();
-+ (*func)(info);
-+ irq_exit();
-+
-+ if (wait) {
-+ mb();
-+ atomic_inc(&call_data->finished);
-+ }
-+
-+ return IRQ_HANDLED;
-+}
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/i386/kernel/swiotlb.c ubuntu-gutsy-xen/arch/i386/kernel/swiotlb.c
---- ubuntu-gutsy/arch/i386/kernel/swiotlb.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/arch/i386/kernel/swiotlb.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,741 @@
-+/*
-+ * Dynamic DMA mapping support.
-+ *
-+ * This implementation is a fallback for platforms that do not support
-+ * I/O TLBs (aka DMA address translation hardware).
-+ * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
-+ * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
-+ * Copyright (C) 2000, 2003 Hewlett-Packard Co
-+ * David Mosberger-Tang <davidm@hpl.hp.com>
-+ * Copyright (C) 2005 Keir Fraser <keir@xensource.com>
-+ */
-+
-+#include <linux/cache.h>
-+#include <linux/mm.h>
-+#include <linux/module.h>
-+#include <linux/pci.h>
-+#include <linux/spinlock.h>
-+#include <linux/string.h>
-+#include <linux/types.h>
-+#include <linux/ctype.h>
-+#include <linux/init.h>
-+#include <linux/bootmem.h>
-+#include <linux/highmem.h>
-+#include <asm/io.h>
-+#include <asm/pci.h>
-+#include <asm/dma.h>
-+#include <asm/uaccess.h>
-+#include <xen/gnttab.h>
-+#include <xen/interface/memory.h>
-+#include <asm-i386/mach-xen/asm/gnttab_dma.h>
-+
-+int swiotlb;
-+EXPORT_SYMBOL(swiotlb);
-+
-+#define OFFSET(val,align) ((unsigned long)((val) & ( (align) - 1)))
-+
-+/*
-+ * Maximum allowable number of contiguous slabs to map,
-+ * must be a power of 2. What is the appropriate value ?
-+ * The complexity of {map,unmap}_single is linearly dependent on this value.
-+ */
-+#define IO_TLB_SEGSIZE 128
-+
-+/*
-+ * log of the size of each IO TLB slab. The number of slabs is command line
-+ * controllable.
-+ */
-+#define IO_TLB_SHIFT 11
-+
-+int swiotlb_force;
-+
-+static char *iotlb_virt_start;
-+static unsigned long iotlb_nslabs;
-+
-+/*
-+ * Used to do a quick range check in swiotlb_unmap_single and
-+ * swiotlb_sync_single_*, to see if the memory was in fact allocated by this
-+ * API.
-+ */
-+static unsigned long iotlb_pfn_start, iotlb_pfn_end;
-+
-+/* Does the given dma address reside within the swiotlb aperture? */
-+static inline int in_swiotlb_aperture(dma_addr_t dev_addr)
-+{
-+ unsigned long pfn = mfn_to_local_pfn(dev_addr >> PAGE_SHIFT);
-+ return (pfn_valid(pfn)
-+ && (pfn >= iotlb_pfn_start)
-+ && (pfn < iotlb_pfn_end));
-+}
-+
-+/*
-+ * When the IOMMU overflows we return a fallback buffer. This sets the size.
-+ */
-+static unsigned long io_tlb_overflow = 32*1024;
-+
-+void *io_tlb_overflow_buffer;
-+
-+/*
-+ * This is a free list describing the number of free entries available from
-+ * each index
-+ */
-+static unsigned int *io_tlb_list;
-+static unsigned int io_tlb_index;
-+
-+/*
-+ * We need to save away the original address corresponding to a mapped entry
-+ * for the sync operations.
-+ */
-+static struct phys_addr {
-+ struct page *page;
-+ unsigned int offset;
-+} *io_tlb_orig_addr;
-+
-+/*
-+ * Protect the above data structures in the map and unmap calls
-+ */
-+static DEFINE_SPINLOCK(io_tlb_lock);
-+
-+static unsigned int dma_bits;
-+static unsigned int __initdata max_dma_bits = 32;
-+static int __init
-+setup_dma_bits(char *str)
-+{
-+ max_dma_bits = simple_strtoul(str, NULL, 0);
-+ return 0;
-+}
-+__setup("dma_bits=", setup_dma_bits);
-+
-+static int __init
-+setup_io_tlb_npages(char *str)
-+{
-+ /* Unlike ia64, the size is aperture in megabytes, not 'slabs'! */
-+ if (isdigit(*str)) {
-+ iotlb_nslabs = simple_strtoul(str, &str, 0) <<
-+ (20 - IO_TLB_SHIFT);
-+ iotlb_nslabs = ALIGN(iotlb_nslabs, IO_TLB_SEGSIZE);
-+ /* Round up to power of two (xen_create_contiguous_region). */
-+ while (iotlb_nslabs & (iotlb_nslabs-1))
-+ iotlb_nslabs += iotlb_nslabs & ~(iotlb_nslabs-1);
-+ }
-+ if (*str == ',')
-+ ++str;
-+ /*
-+ * NB. 'force' enables the swiotlb, but doesn't force its use for
-+ * every DMA like it does on native Linux. 'off' forcibly disables
-+ * use of the swiotlb.
-+ */
-+ if (!strcmp(str, "force"))
-+ swiotlb_force = 1;
-+ else if (!strcmp(str, "off"))
-+ swiotlb_force = -1;
-+ return 1;
-+}
-+__setup("swiotlb=", setup_io_tlb_npages);
-+/* make io_tlb_overflow tunable too? */
-+
-+/*
-+ * Statically reserve bounce buffer space and initialize bounce buffer data
-+ * structures for the software IO TLB used to implement the PCI DMA API.
-+ */
-+void __init
-+swiotlb_init_with_default_size(size_t default_size)
-+{
-+ unsigned long i, bytes;
-+ int rc;
-+
-+ if (!iotlb_nslabs) {
-+ iotlb_nslabs = (default_size >> IO_TLB_SHIFT);
-+ iotlb_nslabs = ALIGN(iotlb_nslabs, IO_TLB_SEGSIZE);
-+ /* Round up to power of two (xen_create_contiguous_region). */
-+ while (iotlb_nslabs & (iotlb_nslabs-1))
-+ iotlb_nslabs += iotlb_nslabs & ~(iotlb_nslabs-1);
-+ }
-+
-+ bytes = iotlb_nslabs * (1UL << IO_TLB_SHIFT);
-+
-+ /*
-+ * Get IO TLB memory from the low pages
-+ */
-+ iotlb_virt_start = alloc_bootmem_low_pages(bytes);
-+ if (!iotlb_virt_start)
-+ panic("Cannot allocate SWIOTLB buffer!\n");
-+
-+ dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT;
-+ for (i = 0; i < iotlb_nslabs; i += IO_TLB_SEGSIZE) {
-+ do {
-+ rc = xen_create_contiguous_region(
-+ (unsigned long)iotlb_virt_start + (i << IO_TLB_SHIFT),
-+ get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT),
-+ dma_bits);
-+ } while (rc && dma_bits++ < max_dma_bits);
-+ if (rc) {
-+ if (i == 0)
-+ panic("No suitable physical memory available for SWIOTLB buffer!\n"
-+ "Use dom0_mem Xen boot parameter to reserve\n"
-+ "some DMA memory (e.g., dom0_mem=-128M).\n");
-+ iotlb_nslabs = i;
-+ i <<= IO_TLB_SHIFT;
-+ free_bootmem(__pa(iotlb_virt_start + i), bytes - i);
-+ bytes = i;
-+ for (dma_bits = 0; i > 0; i -= IO_TLB_SEGSIZE << IO_TLB_SHIFT) {
-+ unsigned int bits = fls64(virt_to_bus(iotlb_virt_start + i - 1));
-+
-+ if (bits > dma_bits)
-+ dma_bits = bits;
-+ }
-+ break;
-+ }
-+ }
-+
-+ /*
-+ * Allocate and initialize the free list array. This array is used
-+ * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE.
-+ */
-+ io_tlb_list = alloc_bootmem(iotlb_nslabs * sizeof(int));
-+ for (i = 0; i < iotlb_nslabs; i++)
-+ io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
-+ io_tlb_index = 0;
-+ io_tlb_orig_addr = alloc_bootmem(
-+ iotlb_nslabs * sizeof(*io_tlb_orig_addr));
-+
-+ /*
-+ * Get the overflow emergency buffer
-+ */
-+ io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow);
-+ if (!io_tlb_overflow_buffer)
-+ panic("Cannot allocate SWIOTLB overflow buffer!\n");
-+
-+ do {
-+ rc = xen_create_contiguous_region(
-+ (unsigned long)io_tlb_overflow_buffer,
-+ get_order(io_tlb_overflow),
-+ dma_bits);
-+ } while (rc && dma_bits++ < max_dma_bits);
-+ if (rc)
-+ panic("No suitable physical memory available for SWIOTLB overflow buffer!\n");
-+
-+ iotlb_pfn_start = __pa(iotlb_virt_start) >> PAGE_SHIFT;
-+ iotlb_pfn_end = iotlb_pfn_start + (bytes >> PAGE_SHIFT);
-+
-+ printk(KERN_INFO "Software IO TLB enabled: \n"
-+ " Aperture: %lu megabytes\n"
-+ " Kernel range: %p - %p\n"
-+ " Address size: %u bits\n",
-+ bytes >> 20,
-+ iotlb_virt_start, iotlb_virt_start + bytes,
-+ dma_bits);
-+}
-+
-+void __init
-+swiotlb_init(void)
-+{
-+ long ram_end;
-+ size_t defsz = 64 * (1 << 20); /* 64MB default size */
-+
-+ if (swiotlb_force == 1) {
-+ swiotlb = 1;
-+ } else if ((swiotlb_force != -1) &&
-+ is_running_on_xen() &&
-+ is_initial_xendomain()) {
-+ /* Domain 0 always has a swiotlb. */
-+ ram_end = HYPERVISOR_memory_op(XENMEM_maximum_ram_page, NULL);
-+ if (ram_end <= 0x7ffff)
-+ defsz = 2 * (1 << 20); /* 2MB on <2GB on systems. */
-+ swiotlb = 1;
-+ }
-+
-+ if (swiotlb)
-+ swiotlb_init_with_default_size(defsz);
-+ else
-+ printk(KERN_INFO "Software IO TLB disabled\n");
-+}
-+
-+/*
-+ * We use __copy_to_user_inatomic to transfer to the host buffer because the
-+ * buffer may be mapped read-only (e.g, in blkback driver) but lower-level
-+ * drivers map the buffer for DMA_BIDIRECTIONAL access. This causes an
-+ * unnecessary copy from the aperture to the host buffer, and a page fault.
-+ */
-+static void
-+__sync_single(struct phys_addr buffer, char *dma_addr, size_t size, int dir)
-+{
-+ if (PageHighMem(buffer.page)) {
-+ size_t len, bytes;
-+ char *dev, *host, *kmp;
-+ len = size;
-+ while (len != 0) {
-+ unsigned long flags;
-+
-+ if (((bytes = len) + buffer.offset) > PAGE_SIZE)
-+ bytes = PAGE_SIZE - buffer.offset;
-+ local_irq_save(flags); /* protects KM_BOUNCE_READ */
-+ kmp = kmap_atomic(buffer.page, KM_BOUNCE_READ);
-+ dev = dma_addr + size - len;
-+ host = kmp + buffer.offset;
-+ if (dir == DMA_FROM_DEVICE) {
-+ if (__copy_to_user_inatomic(host, dev, bytes))
-+ /* inaccessible */;
-+ } else
-+ memcpy(dev, host, bytes);
-+ kunmap_atomic(kmp, KM_BOUNCE_READ);
-+ local_irq_restore(flags);
-+ len -= bytes;
-+ buffer.page++;
-+ buffer.offset = 0;
-+ }
-+ } else {
-+ char *host = (char *)phys_to_virt(
-+ page_to_pseudophys(buffer.page)) + buffer.offset;
-+ if (dir == DMA_FROM_DEVICE) {
-+ if (__copy_to_user_inatomic(host, dma_addr, size))
-+ /* inaccessible */;
-+ } else if (dir == DMA_TO_DEVICE)
-+ memcpy(dma_addr, host, size);
-+ }
-+}
-+
-+/*
-+ * Allocates bounce buffer and returns its kernel virtual address.
-+ */
-+static void *
-+map_single(struct device *hwdev, struct phys_addr buffer, size_t size, int dir)
-+{
-+ unsigned long flags;
-+ char *dma_addr;
-+ unsigned int nslots, stride, index, wrap;
-+ struct phys_addr slot_buf;
-+ int i;
-+
-+ /*
-+ * For mappings greater than a page, we limit the stride (and
-+ * hence alignment) to a page size.
-+ */
-+ nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
-+ if (size > PAGE_SIZE)
-+ stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
-+ else
-+ stride = 1;
-+
-+ BUG_ON(!nslots);
-+
-+ /*
-+ * Find suitable number of IO TLB entries size that will fit this
-+ * request and allocate a buffer from that IO TLB pool.
-+ */
-+ spin_lock_irqsave(&io_tlb_lock, flags);
-+ {
-+ wrap = index = ALIGN(io_tlb_index, stride);
-+
-+ if (index >= iotlb_nslabs)
-+ wrap = index = 0;
-+
-+ do {
-+ /*
-+ * If we find a slot that indicates we have 'nslots'
-+ * number of contiguous buffers, we allocate the
-+ * buffers from that slot and mark the entries as '0'
-+ * indicating unavailable.
-+ */
-+ if (io_tlb_list[index] >= nslots) {
-+ int count = 0;
-+
-+ for (i = index; i < (int)(index + nslots); i++)
-+ io_tlb_list[i] = 0;
-+ for (i = index - 1;
-+ (OFFSET(i, IO_TLB_SEGSIZE) !=
-+ IO_TLB_SEGSIZE -1) && io_tlb_list[i];
-+ i--)
-+ io_tlb_list[i] = ++count;
-+ dma_addr = iotlb_virt_start +
-+ (index << IO_TLB_SHIFT);
-+
-+ /*
-+ * Update the indices to avoid searching in
-+ * the next round.
-+ */
-+ io_tlb_index =
-+ ((index + nslots) < iotlb_nslabs
-+ ? (index + nslots) : 0);
-+
-+ goto found;
-+ }
-+ index += stride;
-+ if (index >= iotlb_nslabs)
-+ index = 0;
-+ } while (index != wrap);
-+
-+ spin_unlock_irqrestore(&io_tlb_lock, flags);
-+ return NULL;
-+ }
-+ found:
-+ spin_unlock_irqrestore(&io_tlb_lock, flags);
-+
-+ /*
-+ * Save away the mapping from the original address to the DMA address.
-+ * This is needed when we sync the memory. Then we sync the buffer if
-+ * needed.
-+ */
-+ slot_buf = buffer;
-+ for (i = 0; i < nslots; i++) {
-+ slot_buf.page += slot_buf.offset >> PAGE_SHIFT;
-+ slot_buf.offset &= PAGE_SIZE - 1;
-+ io_tlb_orig_addr[index+i] = slot_buf;
-+ slot_buf.offset += 1 << IO_TLB_SHIFT;
-+ }
-+ if ((dir == DMA_TO_DEVICE) || (dir == DMA_BIDIRECTIONAL))
-+ __sync_single(buffer, dma_addr, size, DMA_TO_DEVICE);
-+
-+ return dma_addr;
-+}
-+
-+struct phys_addr dma_addr_to_phys_addr(char *dma_addr)
-+{
-+ int index = (dma_addr - iotlb_virt_start) >> IO_TLB_SHIFT;
-+ struct phys_addr buffer = io_tlb_orig_addr[index];
-+ buffer.offset += (long)dma_addr & ((1 << IO_TLB_SHIFT) - 1);
-+ buffer.page += buffer.offset >> PAGE_SHIFT;
-+ buffer.offset &= PAGE_SIZE - 1;
-+ return buffer;
-+}
-+
-+/*
-+ * dma_addr is the kernel virtual address of the bounce buffer to unmap.
-+ */
-+static void
-+unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
-+{
-+ unsigned long flags;
-+ int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
-+ int index = (dma_addr - iotlb_virt_start) >> IO_TLB_SHIFT;
-+ struct phys_addr buffer = dma_addr_to_phys_addr(dma_addr);
-+
-+ /*
-+ * First, sync the memory before unmapping the entry
-+ */
-+ if ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))
-+ __sync_single(buffer, dma_addr, size, DMA_FROM_DEVICE);
-+
-+ /*
-+ * Return the buffer to the free list by setting the corresponding
-+ * entries to indicate the number of contigous entries available.
-+ * While returning the entries to the free list, we merge the entries
-+ * with slots below and above the pool being returned.
-+ */
-+ spin_lock_irqsave(&io_tlb_lock, flags);
-+ {
-+ count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ?
-+ io_tlb_list[index + nslots] : 0);
-+ /*
-+ * Step 1: return the slots to the free list, merging the
-+ * slots with superceeding slots
-+ */
-+ for (i = index + nslots - 1; i >= index; i--)
-+ io_tlb_list[i] = ++count;
-+ /*
-+ * Step 2: merge the returned slots with the preceding slots,
-+ * if available (non zero)
-+ */
-+ for (i = index - 1;
-+ (OFFSET(i, IO_TLB_SEGSIZE) !=
-+ IO_TLB_SEGSIZE -1) && io_tlb_list[i];
-+ i--)
-+ io_tlb_list[i] = ++count;
-+ }
-+ spin_unlock_irqrestore(&io_tlb_lock, flags);
-+}
-+
-+static void
-+sync_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
-+{
-+ struct phys_addr buffer = dma_addr_to_phys_addr(dma_addr);
-+ BUG_ON((dir != DMA_FROM_DEVICE) && (dir != DMA_TO_DEVICE));
-+ __sync_single(buffer, dma_addr, size, dir);
-+}
-+
-+static void
-+swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
-+{
-+ /*
-+ * Ran out of IOMMU space for this operation. This is very bad.
-+ * Unfortunately the drivers cannot handle this operation properly.
-+ * unless they check for pci_dma_mapping_error (most don't)
-+ * When the mapping is small enough return a static buffer to limit
-+ * the damage, or panic when the transfer is too big.
-+ */
-+ printk(KERN_ERR "PCI-DMA: Out of SW-IOMMU space for %zu bytes at "
-+ "device %s\n", (unsigned long)size, dev ? dev->bus_id : "?");
-+
-+ if (size > io_tlb_overflow && do_panic) {
-+ if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL)
-+ panic("PCI-DMA: Memory would be corrupted\n");
-+ if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL)
-+ panic("PCI-DMA: Random memory would be DMAed\n");
-+ }
-+}
-+
-+/*
-+ * Map a single buffer of the indicated size for DMA in streaming mode. The
-+ * PCI address to use is returned.
-+ *
-+ * Once the device is given the dma address, the device owns this memory until
-+ * either swiotlb_unmap_single or swiotlb_dma_sync_single is performed.
-+ */
-+dma_addr_t
-+swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
-+{
-+ dma_addr_t dev_addr = gnttab_dma_map_page(virt_to_page(ptr)) +
-+ offset_in_page(ptr);
-+ void *map;
-+ struct phys_addr buffer;
-+
-+ BUG_ON(dir == DMA_NONE);
-+
-+ /*
-+ * If the pointer passed in happens to be in the device's DMA window,
-+ * we can safely return the device addr and not worry about bounce
-+ * buffering it.
-+ */
-+ if (!range_straddles_page_boundary(__pa(ptr), size) &&
-+ !address_needs_mapping(hwdev, dev_addr))
-+ return dev_addr;
-+
-+ /*
-+ * Oh well, have to allocate and map a bounce buffer.
-+ */
-+ gnttab_dma_unmap_page(dev_addr);
-+ buffer.page = virt_to_page(ptr);
-+ buffer.offset = (unsigned long)ptr & ~PAGE_MASK;
-+ map = map_single(hwdev, buffer, size, dir);
-+ if (!map) {
-+ swiotlb_full(hwdev, size, dir, 1);
-+ map = io_tlb_overflow_buffer;
-+ }
-+
-+ dev_addr = virt_to_bus(map);
-+ return dev_addr;
-+}
-+
-+/*
-+ * Unmap a single streaming mode DMA translation. The dma_addr and size must
-+ * match what was provided for in a previous swiotlb_map_single call. All
-+ * other usages are undefined.
-+ *
-+ * After this call, reads by the cpu to the buffer are guaranteed to see
-+ * whatever the device wrote there.
-+ */
-+void
-+swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size,
-+ int dir)
-+{
-+ BUG_ON(dir == DMA_NONE);
-+ if (in_swiotlb_aperture(dev_addr))
-+ unmap_single(hwdev, bus_to_virt(dev_addr), size, dir);
-+ else
-+ gnttab_dma_unmap_page(dev_addr);
-+}
-+
-+/*
-+ * Make physical memory consistent for a single streaming mode DMA translation
-+ * after a transfer.
-+ *
-+ * If you perform a swiotlb_map_single() but wish to interrogate the buffer
-+ * using the cpu, yet do not wish to teardown the PCI dma mapping, you must
-+ * call this function before doing so. At the next point you give the PCI dma
-+ * address back to the card, you must first perform a
-+ * swiotlb_dma_sync_for_device, and then the device again owns the buffer
-+ */
-+void
-+swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
-+ size_t size, int dir)
-+{
-+ BUG_ON(dir == DMA_NONE);
-+ if (in_swiotlb_aperture(dev_addr))
-+ sync_single(hwdev, bus_to_virt(dev_addr), size, dir);
-+}
-+
-+void
-+swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
-+ size_t size, int dir)
-+{
-+ BUG_ON(dir == DMA_NONE);
-+ if (in_swiotlb_aperture(dev_addr))
-+ sync_single(hwdev, bus_to_virt(dev_addr), size, dir);
-+}
-+
-+/*
-+ * Map a set of buffers described by scatterlist in streaming mode for DMA.
-+ * This is the scatter-gather version of the above swiotlb_map_single
-+ * interface. Here the scatter gather list elements are each tagged with the
-+ * appropriate dma address and length. They are obtained via
-+ * sg_dma_{address,length}(SG).
-+ *
-+ * NOTE: An implementation may be able to use a smaller number of
-+ * DMA address/length pairs than there are SG table elements.
-+ * (for example via virtual mapping capabilities)
-+ * The routine returns the number of addr/length pairs actually
-+ * used, at most nents.
-+ *
-+ * Device ownership issues as mentioned above for swiotlb_map_single are the
-+ * same here.
-+ */
-+int
-+swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
-+ int dir)
-+{
-+ struct phys_addr buffer;
-+ dma_addr_t dev_addr;
-+ char *map;
-+ int i;
-+
-+ BUG_ON(dir == DMA_NONE);
-+
-+ for (i = 0; i < nelems; i++, sg++) {
-+ dev_addr = gnttab_dma_map_page(sg->page) + sg->offset;
-+
-+ if (range_straddles_page_boundary(page_to_pseudophys(sg->page)
-+ + sg->offset, sg->length)
-+ || address_needs_mapping(hwdev, dev_addr)) {
-+ gnttab_dma_unmap_page(dev_addr);
-+ buffer.page = sg->page;
-+ buffer.offset = sg->offset;
-+ map = map_single(hwdev, buffer, sg->length, dir);
-+ if (!map) {
-+ /* Don't panic here, we expect map_sg users
-+ to do proper error handling. */
-+ swiotlb_full(hwdev, sg->length, dir, 0);
-+ swiotlb_unmap_sg(hwdev, sg - i, i, dir);
-+ sg[0].dma_length = 0;
-+ return 0;
-+ }
-+ sg->dma_address = virt_to_bus(map);
-+ } else
-+ sg->dma_address = dev_addr;
-+ sg->dma_length = sg->length;
-+ }
-+ return nelems;
-+}
-+
-+/*
-+ * Unmap a set of streaming mode DMA translations. Again, cpu read rules
-+ * concerning calls here are the same as for swiotlb_unmap_single() above.
-+ */
-+void
-+swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
-+ int dir)
-+{
-+ int i;
-+
-+ BUG_ON(dir == DMA_NONE);
-+
-+ for (i = 0; i < nelems; i++, sg++)
-+ if (in_swiotlb_aperture(sg->dma_address))
-+ unmap_single(hwdev, bus_to_virt(sg->dma_address),
-+ sg->dma_length, dir);
-+ else
-+ gnttab_dma_unmap_page(sg->dma_address);
-+}
-+
-+/*
-+ * Make physical memory consistent for a set of streaming mode DMA translations
-+ * after a transfer.
-+ *
-+ * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
-+ * and usage.
-+ */
-+void
-+swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
-+ int nelems, int dir)
-+{
-+ int i;
-+
-+ BUG_ON(dir == DMA_NONE);
-+
-+ for (i = 0; i < nelems; i++, sg++)
-+ if (in_swiotlb_aperture(sg->dma_address))
-+ sync_single(hwdev, bus_to_virt(sg->dma_address),
-+ sg->dma_length, dir);
-+}
-+
-+void
-+swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
-+ int nelems, int dir)
-+{
-+ int i;
-+
-+ BUG_ON(dir == DMA_NONE);
-+
-+ for (i = 0; i < nelems; i++, sg++)
-+ if (in_swiotlb_aperture(sg->dma_address))
-+ sync_single(hwdev, bus_to_virt(sg->dma_address),
-+ sg->dma_length, dir);
-+}
-+
-+#ifdef CONFIG_HIGHMEM
-+
-+dma_addr_t
-+swiotlb_map_page(struct device *hwdev, struct page *page,
-+ unsigned long offset, size_t size,
-+ enum dma_data_direction direction)
-+{
-+ struct phys_addr buffer;
-+ dma_addr_t dev_addr;
-+ char *map;
-+
-+ dev_addr = gnttab_dma_map_page(page) + offset;
-+ if (address_needs_mapping(hwdev, dev_addr)) {
-+ gnttab_dma_unmap_page(dev_addr);
-+ buffer.page = page;
-+ buffer.offset = offset;
-+ map = map_single(hwdev, buffer, size, direction);
-+ if (!map) {
-+ swiotlb_full(hwdev, size, direction, 1);
-+ map = io_tlb_overflow_buffer;
-+ }
-+ dev_addr = (dma_addr_t)virt_to_bus(map);
-+ }
-+
-+ return dev_addr;
-+}
-+
-+void
-+swiotlb_unmap_page(struct device *hwdev, dma_addr_t dma_address,
-+ size_t size, enum dma_data_direction direction)
-+{
-+ BUG_ON(direction == DMA_NONE);
-+ if (in_swiotlb_aperture(dma_address))
-+ unmap_single(hwdev, bus_to_virt(dma_address), size, direction);
-+ else
-+ gnttab_dma_unmap_page(dma_address);
-+}
-+
-+#endif
-+
-+int
-+swiotlb_dma_mapping_error(dma_addr_t dma_addr)
-+{
-+ return (dma_addr == virt_to_bus(io_tlb_overflow_buffer));
-+}
-+
-+/*
-+ * Return whether the given PCI device DMA address mask can be supported
-+ * properly. For example, if your device can only drive the low 24-bits
-+ * during PCI bus mastering, then you would pass 0x00ffffff as the mask to
-+ * this function.
-+ */
-+int
-+swiotlb_dma_supported (struct device *hwdev, u64 mask)
-+{
-+ return (mask >= ((1UL << dma_bits) - 1));
-+}
-+
-+EXPORT_SYMBOL(swiotlb_map_single);
-+EXPORT_SYMBOL(swiotlb_unmap_single);
-+EXPORT_SYMBOL(swiotlb_map_sg);
-+EXPORT_SYMBOL(swiotlb_unmap_sg);
-+EXPORT_SYMBOL(swiotlb_sync_single_for_cpu);
-+EXPORT_SYMBOL(swiotlb_sync_single_for_device);
-+EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu);
-+EXPORT_SYMBOL(swiotlb_sync_sg_for_device);
-+EXPORT_SYMBOL(swiotlb_dma_mapping_error);
-+EXPORT_SYMBOL(swiotlb_dma_supported);
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/i386/kernel/sysenter.c ubuntu-gutsy-xen/arch/i386/kernel/sysenter.c
---- ubuntu-gutsy/arch/i386/kernel/sysenter.c 2007-08-18 09:40:28.000000000 -0400
-+++ ubuntu-gutsy-xen/arch/i386/kernel/sysenter.c 2007-08-18 12:38:02.000000000 -0400
-@@ -37,6 +37,10 @@
- #define VDSO_DEFAULT VDSO_ENABLED
- #endif
-
-+#ifdef CONFIG_XEN
-+#include <xen/interface/callback.h>
-+#endif
-+
- /*
- * Should the kernel map a VDSO page into processes and pass its
- * address down to glibc upon exec()?
-@@ -175,6 +179,7 @@
-
- void enable_sep_cpu(void)
- {
-+#ifndef CONFIG_X86_NO_TSS
- int cpu = get_cpu();
- struct tss_struct *tss = &per_cpu(init_tss, cpu);
-
-@@ -189,6 +194,7 @@
- wrmsr(MSR_IA32_SYSENTER_ESP, tss->x86_tss.esp1, 0);
- wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long) sysenter_entry, 0);
- put_cpu();
-+#endif
- }
-
- static struct vm_area_struct gate_vma;
-@@ -242,6 +248,18 @@
-
- syscall_pages[0] = virt_to_page(syscall_page);
-
-+#ifdef CONFIG_XEN
-+ if (boot_cpu_has(X86_FEATURE_SEP)) {
-+ static struct callback_register __initdata sysenter = {
-+ .type = CALLBACKTYPE_sysenter,
-+ .address = { __KERNEL_CS, (unsigned long)sysenter_entry },
-+ };
-+
-+ if (HYPERVISOR_callback_op(CALLBACKOP_register, &sysenter) < 0)
-+ clear_bit(X86_FEATURE_SEP, boot_cpu_data.x86_capability);
-+ }
-+#endif
-+
- gate_vma_init();
-
- printk("Compat vDSO mapped to %08lx.\n", __fix_to_virt(FIX_VDSO));
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/i386/kernel/time-xen.c ubuntu-gutsy-xen/arch/i386/kernel/time-xen.c
---- ubuntu-gutsy/arch/i386/kernel/time-xen.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/arch/i386/kernel/time-xen.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,1041 @@
-+/*
-+ * linux/arch/i386/kernel/time.c
-+ *
-+ * Copyright (C) 1991, 1992, 1995 Linus Torvalds
-+ *
-+ * This file contains the PC-specific time handling details:
-+ * reading the RTC at bootup, etc..
-+ * 1994-07-02 Alan Modra
-+ * fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime
-+ * 1995-03-26 Markus Kuhn
-+ * fixed 500 ms bug at call to set_rtc_mmss, fixed DS12887
-+ * precision CMOS clock update
-+ * 1996-05-03 Ingo Molnar
-+ * fixed time warps in do_[slow|fast]_gettimeoffset()
-+ * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
-+ * "A Kernel Model for Precision Timekeeping" by Dave Mills
-+ * 1998-09-05 (Various)
-+ * More robust do_fast_gettimeoffset() algorithm implemented
-+ * (works with APM, Cyrix 6x86MX and Centaur C6),
-+ * monotonic gettimeofday() with fast_get_timeoffset(),
-+ * drift-proof precision TSC calibration on boot
-+ * (C. Scott Ananian <cananian@alumni.princeton.edu>, Andrew D.
-+ * Balsa <andrebalsa@altern.org>, Philip Gladstone <philip@raptor.com>;
-+ * ported from 2.0.35 Jumbo-9 by Michael Krause <m.krause@tu-harburg.de>).
-+ * 1998-12-16 Andrea Arcangeli
-+ * Fixed Jumbo-9 code in 2.1.131: do_gettimeofday was missing 1 jiffy
-+ * because was not accounting lost_ticks.
-+ * 1998-12-24 Copyright (C) 1998 Andrea Arcangeli
-+ * Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
-+ * serialize accesses to xtime/lost_ticks).
-+ */
-+
-+#include <linux/errno.h>
-+#include <linux/sched.h>
-+#include <linux/kernel.h>
-+#include <linux/param.h>
-+#include <linux/string.h>
-+#include <linux/mm.h>
-+#include <linux/interrupt.h>
-+#include <linux/time.h>
-+#include <linux/delay.h>
-+#include <linux/init.h>
-+#include <linux/smp.h>
-+#include <linux/module.h>
-+#include <linux/sysdev.h>
-+#include <linux/bcd.h>
-+#include <linux/efi.h>
-+#include <linux/mca.h>
-+#include <linux/sysctl.h>
-+#include <linux/percpu.h>
-+#include <linux/kernel_stat.h>
-+#include <linux/posix-timers.h>
-+
-+#include <asm/io.h>
-+#include <asm/smp.h>
-+#include <asm/irq.h>
-+#include <asm/msr.h>
-+#include <asm/delay.h>
-+#include <asm/mpspec.h>
-+#include <asm/uaccess.h>
-+#include <asm/processor.h>
-+#include <asm/timer.h>
-+#include <asm/time.h>
-+#include <asm/sections.h>
-+
-+#include "mach_time.h"
-+
-+#include <linux/timex.h>
-+#include <linux/clocksource.h>
-+
-+#include <asm/hpet.h>
-+
-+#include <asm/arch_hooks.h>
-+
-+#include <xen/evtchn.h>
-+#include <xen/interface/vcpu.h>
-+
-+#ifdef CONFIG_X86_32
-+#include <asm/i8253.h>
-+DEFINE_SPINLOCK(i8253_lock);
-+EXPORT_SYMBOL(i8253_lock);
-+#else
-+volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
-+#endif
-+
-+#define XEN_SHIFT 22
-+
-+unsigned int cpu_khz; /* Detected as we calibrate the TSC */
-+EXPORT_SYMBOL(cpu_khz);
-+
-+DEFINE_SPINLOCK(rtc_lock);
-+EXPORT_SYMBOL(rtc_lock);
-+
-+/* These are peridically updated in shared_info, and then copied here. */
-+struct shadow_time_info {
-+ u64 tsc_timestamp; /* TSC at last update of time vals. */
-+ u64 system_timestamp; /* Time, in nanosecs, since boot. */
-+ u32 tsc_to_nsec_mul;
-+ u32 tsc_to_usec_mul;
-+ int tsc_shift;
-+ u32 version;
-+};
-+static DEFINE_PER_CPU(struct shadow_time_info, shadow_time);
-+static struct timespec shadow_tv;
-+static u32 shadow_tv_version;
-+
-+/* Keep track of last time we did processing/updating of jiffies and xtime. */
-+static u64 processed_system_time; /* System time (ns) at last processing. */
-+static DEFINE_PER_CPU(u64, processed_system_time);
-+
-+/* How much CPU time was spent blocked and how much was 'stolen'? */
-+static DEFINE_PER_CPU(u64, processed_stolen_time);
-+static DEFINE_PER_CPU(u64, processed_blocked_time);
-+
-+/* Current runstate of each CPU (updated automatically by the hypervisor). */
-+static DEFINE_PER_CPU(struct vcpu_runstate_info, runstate);
-+
-+/* Must be signed, as it's compared with s64 quantities which can be -ve. */
-+#define NS_PER_TICK (1000000000LL/HZ)
-+
-+static void __clock_was_set(struct work_struct *unused)
-+{
-+ clock_was_set();
-+}
-+static DECLARE_WORK(clock_was_set_work, __clock_was_set);
-+
-+static inline void __normalize_time(time_t *sec, s64 *nsec)
-+{
-+ while (*nsec >= NSEC_PER_SEC) {
-+ (*nsec) -= NSEC_PER_SEC;
-+ (*sec)++;
-+ }
-+ while (*nsec < 0) {
-+ (*nsec) += NSEC_PER_SEC;
-+ (*sec)--;
-+ }
-+}
-+
-+/* Does this guest OS track Xen time, or set its wall clock independently? */
-+static int independent_wallclock = 0;
-+static int __init __independent_wallclock(char *str)
-+{
-+ independent_wallclock = 1;
-+ return 1;
-+}
-+__setup("independent_wallclock", __independent_wallclock);
-+
-+/* Permitted clock jitter, in nsecs, beyond which a warning will be printed. */
-+static unsigned long permitted_clock_jitter = 10000000UL; /* 10ms */
-+static int __init __permitted_clock_jitter(char *str)
-+{
-+ permitted_clock_jitter = simple_strtoul(str, NULL, 0);
-+ return 1;
-+}
-+__setup("permitted_clock_jitter=", __permitted_clock_jitter);
-+
-+#if 0
-+static void delay_tsc(unsigned long loops)
-+{
-+ unsigned long bclock, now;
-+
-+ rdtscl(bclock);
-+ do {
-+ rep_nop();
-+ rdtscl(now);
-+ } while ((now - bclock) < loops);
-+}
-+
-+struct timer_opts timer_tsc = {
-+ .name = "tsc",
-+ .delay = delay_tsc,
-+};
-+#endif
-+
-+/*
-+ * Scale a 64-bit delta by scaling and multiplying by a 32-bit fraction,
-+ * yielding a 64-bit result.
-+ */
-+static inline u64 scale_delta(u64 delta, u32 mul_frac, int shift)
-+{
-+ u64 product;
-+#ifdef __i386__
-+ u32 tmp1, tmp2;
-+#endif
-+
-+ if (shift < 0)
-+ delta >>= -shift;
-+ else
-+ delta <<= shift;
-+
-+#ifdef __i386__
-+ __asm__ (
-+ "mul %5 ; "
-+ "mov %4,%%eax ; "
-+ "mov %%edx,%4 ; "
-+ "mul %5 ; "
-+ "xor %5,%5 ; "
-+ "add %4,%%eax ; "
-+ "adc %5,%%edx ; "
-+ : "=A" (product), "=r" (tmp1), "=r" (tmp2)
-+ : "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (mul_frac) );
-+#else
-+ __asm__ (
-+ "mul %%rdx ; shrd $32,%%rdx,%%rax"
-+ : "=a" (product) : "0" (delta), "d" ((u64)mul_frac) );
-+#endif
-+
-+ return product;
-+}
-+
-+#if 0 /* defined (__i386__) */
-+int read_current_timer(unsigned long *timer_val)
-+{
-+ rdtscl(*timer_val);
-+ return 0;
-+}
-+#endif
-+
-+static void init_cpu_khz(void)
-+{
-+ u64 __cpu_khz = 1000000ULL << 32;
-+ struct vcpu_time_info *info = &vcpu_info(0)->time;
-+ do_div(__cpu_khz, info->tsc_to_system_mul);
-+ if (info->tsc_shift < 0)
-+ cpu_khz = __cpu_khz << -info->tsc_shift;
-+ else
-+ cpu_khz = __cpu_khz >> info->tsc_shift;
-+}
-+
-+static u64 get_nsec_offset(struct shadow_time_info *shadow)
-+{
-+ u64 now, delta;
-+ rdtscll(now);
-+ delta = now - shadow->tsc_timestamp;
-+ return scale_delta(delta, shadow->tsc_to_nsec_mul, shadow->tsc_shift);
-+}
-+
-+static void __update_wallclock(time_t sec, long nsec)
-+{
-+ long wtm_nsec, xtime_nsec;
-+ time_t wtm_sec, xtime_sec;
-+ u64 tmp, wc_nsec;
-+
-+ /* Adjust wall-clock time base. */
-+ wc_nsec = processed_system_time;
-+ wc_nsec += sec * (u64)NSEC_PER_SEC;
-+ wc_nsec += nsec;
-+
-+ /* Split wallclock base into seconds and nanoseconds. */
-+ tmp = wc_nsec;
-+ xtime_nsec = do_div(tmp, 1000000000);
-+ xtime_sec = (time_t)tmp;
-+
-+ wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - xtime_sec);
-+ wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - xtime_nsec);
-+
-+ set_normalized_timespec(&xtime, xtime_sec, xtime_nsec);
-+ set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
-+
-+ ntp_clear();
-+}
-+
-+static void update_wallclock(void)
-+{
-+ shared_info_t *s = HYPERVISOR_shared_info;
-+
-+ do {
-+ shadow_tv_version = s->wc_version;
-+ rmb();
-+ shadow_tv.tv_sec = s->wc_sec;
-+ shadow_tv.tv_nsec = s->wc_nsec;
-+ rmb();
-+ } while ((s->wc_version & 1) | (shadow_tv_version ^ s->wc_version));
-+
-+ if (!independent_wallclock)
-+ __update_wallclock(shadow_tv.tv_sec, shadow_tv.tv_nsec);
-+}
-+
-+/*
-+ * Reads a consistent set of time-base values from Xen, into a shadow data
-+ * area.
-+ */
-+static void get_time_values_from_xen(int cpu)
-+{
-+ struct vcpu_time_info *src;
-+ struct shadow_time_info *dst;
-+
-+ src = &vcpu_info(cpu)->time;
-+ dst = &per_cpu(shadow_time, cpu);
-+
-+ do {
-+ dst->version = src->version;
-+ rmb();
-+ dst->tsc_timestamp = src->tsc_timestamp;
-+ dst->system_timestamp = src->system_time;
-+ dst->tsc_to_nsec_mul = src->tsc_to_system_mul;
-+ dst->tsc_shift = src->tsc_shift;
-+ rmb();
-+ } while ((src->version & 1) | (dst->version ^ src->version));
-+
-+ dst->tsc_to_usec_mul = dst->tsc_to_nsec_mul / 1000;
-+}
-+
-+static inline int time_values_up_to_date(int cpu)
-+{
-+ struct vcpu_time_info *src;
-+ struct shadow_time_info *dst;
-+
-+ src = &vcpu_info(cpu)->time;
-+ dst = &per_cpu(shadow_time, cpu);
-+
-+ rmb();
-+ return (dst->version == src->version);
-+}
-+
-+/*
-+ * This is a special lock that is owned by the CPU and holds the index
-+ * register we are working with. It is required for NMI access to the
-+ * CMOS/RTC registers. See include/asm-i386/mc146818rtc.h for details.
-+ */
-+volatile unsigned long cmos_lock = 0;
-+EXPORT_SYMBOL(cmos_lock);
-+
-+/* Routines for accessing the CMOS RAM/RTC. */
-+unsigned char rtc_cmos_read(unsigned char addr)
-+{
-+ unsigned char val;
-+ lock_cmos_prefix(addr);
-+ outb_p(addr, RTC_PORT(0));
-+ val = inb_p(RTC_PORT(1));
-+ lock_cmos_suffix(addr);
-+ return val;
-+}
-+EXPORT_SYMBOL(rtc_cmos_read);
-+
-+void rtc_cmos_write(unsigned char val, unsigned char addr)
-+{
-+ lock_cmos_prefix(addr);
-+ outb_p(addr, RTC_PORT(0));
-+ outb_p(val, RTC_PORT(1));
-+ lock_cmos_suffix(addr);
-+}
-+EXPORT_SYMBOL(rtc_cmos_write);
-+
-+static void sync_xen_wallclock(unsigned long dummy);
-+static DEFINE_TIMER(sync_xen_wallclock_timer, sync_xen_wallclock, 0, 0);
-+static void sync_xen_wallclock(unsigned long dummy)
-+{
-+ time_t sec;
-+ s64 nsec;
-+ struct xen_platform_op op;
-+
-+ if (!ntp_synced() || independent_wallclock || !is_initial_xendomain())
-+ return;
-+
-+ write_seqlock_irq(&xtime_lock);
-+
-+ sec = xtime.tv_sec;
-+ nsec = xtime.tv_nsec;
-+ __normalize_time(&sec, &nsec);
-+
-+ op.cmd = XENPF_settime;
-+ op.u.settime.secs = sec;
-+ op.u.settime.nsecs = nsec;
-+ op.u.settime.system_time = processed_system_time;
-+ HYPERVISOR_platform_op(&op);
-+
-+ update_wallclock();
-+
-+ write_sequnlock_irq(&xtime_lock);
-+
-+ /* Once per minute. */
-+ mod_timer(&sync_xen_wallclock_timer, jiffies + 60*HZ);
-+}
-+
-+static int set_rtc_mmss(unsigned long nowtime)
-+{
-+ int retval;
-+ unsigned long flags;
-+
-+ if (independent_wallclock || !is_initial_xendomain())
-+ return 0;
-+
-+ /* gets recalled with irq locally disabled */
-+ /* XXX - does irqsave resolve this? -johnstul */
-+ spin_lock_irqsave(&rtc_lock, flags);
-+ retval = set_wallclock(nowtime);
-+ spin_unlock_irqrestore(&rtc_lock, flags);
-+
-+ return retval;
-+}
-+
-+unsigned long long sched_clock(void)
-+{
-+ int cpu = get_cpu();
-+ struct shadow_time_info *shadow = &per_cpu(shadow_time, cpu);
-+ u64 time;
-+ u32 local_time_version;
-+
-+ do {
-+ local_time_version = shadow->version;
-+ barrier();
-+ time = shadow->system_timestamp + get_nsec_offset(shadow);
-+ if (!time_values_up_to_date(cpu))
-+ get_time_values_from_xen(cpu);
-+ barrier();
-+ } while (local_time_version != shadow->version);
-+
-+ put_cpu();
-+
-+ return time;
-+}
-+
-+unsigned long profile_pc(struct pt_regs *regs)
-+{
-+ unsigned long pc = instruction_pointer(regs);
-+
-+#ifdef __x86_64__
-+ /* Assume the lock function has either no stack frame or only a single word.
-+ This checks if the address on the stack looks like a kernel text address.
-+ There is a small window for false hits, but in that case the tick
-+ is just accounted to the spinlock function.
-+ Better would be to write these functions in assembler again
-+ and check exactly. */
-+ if (!user_mode_vm(regs) && in_lock_functions(pc)) {
-+ char *v = *(char **)regs->rsp;
-+ if ((v >= _stext && v <= _etext) ||
-+ (v >= _sinittext && v <= _einittext) ||
-+ (v >= (char *)MODULES_VADDR && v <= (char *)MODULES_END))
-+ return (unsigned long)v;
-+ return ((unsigned long *)regs->rsp)[1];
-+ }
-+#else
-+#ifdef CONFIG_SMP
-+ if (!v8086_mode(regs) && SEGMENT_IS_KERNEL_CODE(regs->xcs) &&
-+ in_lock_functions(pc)) {
-+#ifdef CONFIG_FRAME_POINTER
-+ return *(unsigned long *)(regs->ebp + 4);
-+#else
-+ unsigned long *sp = (unsigned long *)&regs->esp;
-+
-+ /* Return address is either directly at stack pointer
-+ or above a saved eflags. Eflags has bits 22-31 zero,
-+ kernel addresses don't. */
-+ if (sp[0] >> 22)
-+ return sp[0];
-+ if (sp[1] >> 22)
-+ return sp[1];
-+#endif
-+ }
-+#endif
-+#endif
-+
-+ return pc;
-+}
-+EXPORT_SYMBOL(profile_pc);
-+
-+/*
-+ * This is the same as the above, except we _also_ save the current
-+ * Time Stamp Counter value at the time of the timer interrupt, so that
-+ * we later on can estimate the time of day more exactly.
-+ */
-+irqreturn_t timer_interrupt(int irq, void *dev_id)
-+{
-+ s64 delta, delta_cpu, stolen, blocked;
-+ u64 sched_time;
-+ int i, cpu = smp_processor_id();
-+ struct shadow_time_info *shadow = &per_cpu(shadow_time, cpu);
-+ struct vcpu_runstate_info *runstate = &per_cpu(runstate, cpu);
-+
-+ /*
-+ * Here we are in the timer irq handler. We just have irqs locally
-+ * disabled but we don't know if the timer_bh is running on the other
-+ * CPU. We need to avoid to SMP race with it. NOTE: we don' t need
-+ * the irq version of write_lock because as just said we have irq
-+ * locally disabled. -arca
-+ */
-+ write_seqlock(&xtime_lock);
-+
-+ do {
-+ get_time_values_from_xen(cpu);
-+
-+ /* Obtain a consistent snapshot of elapsed wallclock cycles. */
-+ delta = delta_cpu =
-+ shadow->system_timestamp + get_nsec_offset(shadow);
-+ delta -= processed_system_time;
-+ delta_cpu -= per_cpu(processed_system_time, cpu);
-+
-+ /*
-+ * Obtain a consistent snapshot of stolen/blocked cycles. We
-+ * can use state_entry_time to detect if we get preempted here.
-+ */
-+ do {
-+ sched_time = runstate->state_entry_time;
-+ barrier();
-+ stolen = runstate->time[RUNSTATE_runnable] +
-+ runstate->time[RUNSTATE_offline] -
-+ per_cpu(processed_stolen_time, cpu);
-+ blocked = runstate->time[RUNSTATE_blocked] -
-+ per_cpu(processed_blocked_time, cpu);
-+ barrier();
-+ } while (sched_time != runstate->state_entry_time);
-+ } while (!time_values_up_to_date(cpu));
-+
-+ if ((unlikely(delta < -(s64)permitted_clock_jitter) ||
-+ unlikely(delta_cpu < -(s64)permitted_clock_jitter))
-+ && printk_ratelimit()) {
-+ printk("Timer ISR/%d: Time went backwards: "
-+ "delta=%lld delta_cpu=%lld shadow=%lld "
-+ "off=%lld processed=%lld cpu_processed=%lld\n",
-+ cpu, delta, delta_cpu, shadow->system_timestamp,
-+ (s64)get_nsec_offset(shadow),
-+ processed_system_time,
-+ per_cpu(processed_system_time, cpu));
-+ for (i = 0; i < num_online_cpus(); i++)
-+ printk(" %d: %lld\n", i,
-+ per_cpu(processed_system_time, i));
-+ }
-+
-+ /* System-wide jiffy work. */
-+ if (delta >= NS_PER_TICK) {
-+ do_div(delta, NS_PER_TICK);
-+ processed_system_time += delta * NS_PER_TICK;
-+ while (delta > HZ) {
-+ do_timer(HZ);
-+ delta -= HZ;
-+ }
-+ do_timer(delta);
-+ }
-+
-+ if (shadow_tv_version != HYPERVISOR_shared_info->wc_version) {
-+ update_wallclock();
-+ if (keventd_up())
-+ schedule_work(&clock_was_set_work);
-+ }
-+
-+ write_sequnlock(&xtime_lock);
-+
-+ /*
-+ * Account stolen ticks.
-+ * HACK: Passing NULL to account_steal_time()
-+ * ensures that the ticks are accounted as stolen.
-+ */
-+ if ((stolen > 0) && (delta_cpu > 0)) {
-+ delta_cpu -= stolen;
-+ if (unlikely(delta_cpu < 0))
-+ stolen += delta_cpu; /* clamp local-time progress */
-+ do_div(stolen, NS_PER_TICK);
-+ per_cpu(processed_stolen_time, cpu) += stolen * NS_PER_TICK;
-+ per_cpu(processed_system_time, cpu) += stolen * NS_PER_TICK;
-+ account_steal_time(NULL, (cputime_t)stolen);
-+ }
-+
-+ /*
-+ * Account blocked ticks.
-+ * HACK: Passing idle_task to account_steal_time()
-+ * ensures that the ticks are accounted as idle/wait.
-+ */
-+ if ((blocked > 0) && (delta_cpu > 0)) {
-+ delta_cpu -= blocked;
-+ if (unlikely(delta_cpu < 0))
-+ blocked += delta_cpu; /* clamp local-time progress */
-+ do_div(blocked, NS_PER_TICK);
-+ per_cpu(processed_blocked_time, cpu) += blocked * NS_PER_TICK;
-+ per_cpu(processed_system_time, cpu) += blocked * NS_PER_TICK;
-+ account_steal_time(idle_task(cpu), (cputime_t)blocked);
-+ }
-+
-+ /* Account user/system ticks. */
-+ if (delta_cpu > 0) {
-+ do_div(delta_cpu, NS_PER_TICK);
-+ per_cpu(processed_system_time, cpu) += delta_cpu * NS_PER_TICK;
-+ if (user_mode_vm(get_irq_regs()))
-+ account_user_time(current, (cputime_t)delta_cpu);
-+ else
-+ account_system_time(current, HARDIRQ_OFFSET,
-+ (cputime_t)delta_cpu);
-+ }
-+
-+ /* Offlined for more than a few seconds? Avoid lockup warnings. */
-+ if (stolen > 5*HZ)
-+ touch_softlockup_watchdog();
-+
-+ /* Local timer processing (see update_process_times()). */
-+ run_local_timers();
-+ if (rcu_pending(cpu))
-+ rcu_check_callbacks(cpu, user_mode_vm(get_irq_regs()));
-+ scheduler_tick();
-+ run_posix_cpu_timers(current);
-+ profile_tick(CPU_PROFILING);
-+
-+ return IRQ_HANDLED;
-+}
-+
-+void mark_tsc_unstable(char *reason)
-+{
-+#ifndef CONFIG_XEN /* XXX Should tell the hypervisor about this fact. */
-+ tsc_unstable = 1;
-+#endif
-+}
-+EXPORT_SYMBOL_GPL(mark_tsc_unstable);
-+
-+static cycle_t xen_clocksource_read(void)
-+{
-+ int cpu = get_cpu();
-+ struct shadow_time_info *shadow = &per_cpu(shadow_time, cpu);
-+ cycle_t ret;
-+
-+ get_time_values_from_xen(cpu);
-+
-+ ret = shadow->system_timestamp + get_nsec_offset(shadow);
-+
-+ put_cpu();
-+
-+#ifdef CONFIG_SMP
-+ for (;;) {
-+ static cycle_t last_ret;
-+#ifndef CONFIG_64BIT
-+ cycle_t last = cmpxchg64(&last_ret, 0, 0);
-+#else
-+ cycle_t last = last_ret;
-+#define cmpxchg64 cmpxchg
-+#endif
-+
-+ if ((s64)(ret - last) < 0) {
-+ if (last - ret > permitted_clock_jitter
-+ && printk_ratelimit())
-+ printk(KERN_WARNING "clocksource/%d: "
-+ "Time went backwards: "
-+ "delta=%Ld shadow=%Lu offset=%Lu\n",
-+ cpu, ret - last,
-+ shadow->system_timestamp,
-+ get_nsec_offset(shadow));
-+ ret = last;
-+ }
-+ if (cmpxchg64(&last_ret, last, ret) == last)
-+ break;
-+ }
-+#endif
-+
-+ return ret;
-+}
-+
-+static struct clocksource clocksource_xen = {
-+ .name = "xen",
-+ .rating = 400,
-+ .read = xen_clocksource_read,
-+ .mask = CLOCKSOURCE_MASK(64),
-+ .mult = 1 << XEN_SHIFT, /* time directly in nanoseconds */
-+ .shift = XEN_SHIFT,
-+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
-+};
-+
-+static void init_missing_ticks_accounting(int cpu)
-+{
-+ struct vcpu_register_runstate_memory_area area;
-+ struct vcpu_runstate_info *runstate = &per_cpu(runstate, cpu);
-+
-+ memset(runstate, 0, sizeof(*runstate));
-+
-+ area.addr.v = runstate;
-+ HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area, cpu, &area);
-+
-+ per_cpu(processed_blocked_time, cpu) =
-+ runstate->time[RUNSTATE_blocked];
-+ per_cpu(processed_stolen_time, cpu) =
-+ runstate->time[RUNSTATE_runnable] +
-+ runstate->time[RUNSTATE_offline];
-+}
-+
-+/* not static: needed by APM */
-+unsigned long read_persistent_clock(void)
-+{
-+ unsigned long retval;
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&rtc_lock, flags);
-+
-+ retval = get_wallclock();
-+
-+ spin_unlock_irqrestore(&rtc_lock, flags);
-+
-+ return retval;
-+}
-+
-+static void sync_cmos_clock(unsigned long dummy);
-+
-+static DEFINE_TIMER(sync_cmos_timer, sync_cmos_clock, 0, 0);
-+int no_sync_cmos_clock;
-+
-+static void sync_cmos_clock(unsigned long dummy)
-+{
-+ struct timeval now, next;
-+ int fail = 1;
-+
-+ /*
-+ * If we have an externally synchronized Linux clock, then update
-+ * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
-+ * called as close as possible to 500 ms before the new second starts.
-+ * This code is run on a timer. If the clock is set, that timer
-+ * may not expire at the correct time. Thus, we adjust...
-+ */
-+ if (!ntp_synced())
-+ /*
-+ * Not synced, exit, do not restart a timer (if one is
-+ * running, let it run out).
-+ */
-+ return;
-+
-+ do_gettimeofday(&now);
-+ if (now.tv_usec >= USEC_AFTER - ((unsigned) TICK_SIZE) / 2 &&
-+ now.tv_usec <= USEC_BEFORE + ((unsigned) TICK_SIZE) / 2)
-+ fail = set_rtc_mmss(now.tv_sec);
-+
-+ next.tv_usec = USEC_AFTER - now.tv_usec;
-+ if (next.tv_usec <= 0)
-+ next.tv_usec += USEC_PER_SEC;
-+
-+ if (!fail)
-+ next.tv_sec = 659;
-+ else
-+ next.tv_sec = 0;
-+
-+ if (next.tv_usec >= USEC_PER_SEC) {
-+ next.tv_sec++;
-+ next.tv_usec -= USEC_PER_SEC;
-+ }
-+ mod_timer(&sync_cmos_timer, jiffies + timeval_to_jiffies(&next));
-+}
-+
-+void notify_arch_cmos_timer(void)
-+{
-+ if (!no_sync_cmos_clock)
-+ mod_timer(&sync_cmos_timer, jiffies + 1);
-+ mod_timer(&sync_xen_wallclock_timer, jiffies + 1);
-+}
-+
-+static long clock_cmos_diff;
-+static unsigned long sleep_start;
-+
-+static int timer_suspend(struct sys_device *dev, pm_message_t state)
-+{
-+ /*
-+ * Estimate time zone so that set_time can update the clock
-+ */
-+ unsigned long ctime = read_persistent_clock();
-+
-+ clock_cmos_diff = -ctime;
-+ clock_cmos_diff += get_seconds();
-+ sleep_start = ctime;
-+ return 0;
-+}
-+
-+static int timer_resume(struct sys_device *dev)
-+{
-+ unsigned long flags;
-+ unsigned long sec;
-+ unsigned long ctime = read_persistent_clock();
-+ long sleep_length = (ctime - sleep_start) * HZ;
-+ struct timespec ts;
-+
-+ if (sleep_length < 0) {
-+ printk(KERN_WARNING "CMOS clock skew detected in timer resume!\n");
-+ /* The time after the resume must not be earlier than the time
-+ * before the suspend or some nasty things will happen
-+ */
-+ sleep_length = 0;
-+ ctime = sleep_start;
-+ }
-+
-+ sec = ctime + clock_cmos_diff;
-+ ts.tv_sec = sec;
-+ ts.tv_nsec = 0;
-+ do_settimeofday(&ts);
-+ write_seqlock_irqsave(&xtime_lock, flags);
-+ jiffies_64 += sleep_length;
-+ write_sequnlock_irqrestore(&xtime_lock, flags);
-+ touch_softlockup_watchdog();
-+ return 0;
-+}
-+
-+static struct sysdev_class timer_sysclass = {
-+ .resume = timer_resume,
-+ .suspend = timer_suspend,
-+ set_kset_name("timer"),
-+};
-+
-+
-+/* XXX this driverfs stuff should probably go elsewhere later -john */
-+static struct sys_device device_timer = {
-+ .id = 0,
-+ .cls = &timer_sysclass,
-+};
-+
-+static int time_init_device(void)
-+{
-+ int error = sysdev_class_register(&timer_sysclass);
-+ if (!error)
-+ error = sysdev_register(&device_timer);
-+ return error;
-+}
-+
-+device_initcall(time_init_device);
-+
-+extern void (*late_time_init)(void);
-+
-+/* Dynamically-mapped IRQ. */
-+DEFINE_PER_CPU(int, timer_irq);
-+
-+static void setup_cpu0_timer_irq(void)
-+{
-+ per_cpu(timer_irq, 0) =
-+ bind_virq_to_irqhandler(
-+ VIRQ_TIMER,
-+ 0,
-+ timer_interrupt,
-+ IRQF_DISABLED,
-+ "timer0",
-+ NULL);
-+ BUG_ON(per_cpu(timer_irq, 0) < 0);
-+}
-+
-+static struct vcpu_set_periodic_timer xen_set_periodic_tick = {
-+ .period_ns = NS_PER_TICK
-+};
-+
-+void __init time_init(void)
-+{
-+ init_cpu_khz();
-+ printk(KERN_INFO "Xen reported: %u.%03u MHz processor.\n",
-+ cpu_khz / 1000, cpu_khz % 1000);
-+
-+ HYPERVISOR_vcpu_op(VCPUOP_set_periodic_timer, 0,
-+ &xen_set_periodic_tick);
-+
-+ get_time_values_from_xen(0);
-+
-+ processed_system_time = per_cpu(shadow_time, 0).system_timestamp;
-+ per_cpu(processed_system_time, 0) = processed_system_time;
-+ init_missing_ticks_accounting(0);
-+
-+ clocksource_register(&clocksource_xen);
-+
-+ update_wallclock();
-+
-+#ifndef CONFIG_X86_64
-+ use_tsc_delay();
-+#endif
-+
-+ /* Cannot request_irq() until kmem is initialised. */
-+ late_time_init = setup_cpu0_timer_irq;
-+}
-+
-+/* Convert jiffies to system time. */
-+u64 jiffies_to_st(unsigned long j)
-+{
-+ unsigned long seq;
-+ long delta;
-+ u64 st;
-+
-+ do {
-+ seq = read_seqbegin(&xtime_lock);
-+ delta = j - jiffies;
-+ if (delta < 1) {
-+ /* Triggers in some wrap-around cases, but that's okay:
-+ * we just end up with a shorter timeout. */
-+ st = processed_system_time + NS_PER_TICK;
-+ } else if (((unsigned long)delta >> (BITS_PER_LONG-3)) != 0) {
-+ /* Very long timeout means there is no pending timer.
-+ * We indicate this to Xen by passing zero timeout. */
-+ st = 0;
-+ } else {
-+ st = processed_system_time + delta * (u64)NS_PER_TICK;
-+ }
-+ } while (read_seqretry(&xtime_lock, seq));
-+
-+ return st;
-+}
-+EXPORT_SYMBOL(jiffies_to_st);
-+
-+/*
-+ * stop_hz_timer / start_hz_timer - enter/exit 'tickless mode' on an idle cpu
-+ * These functions are based on implementations from arch/s390/kernel/time.c
-+ */
-+static void stop_hz_timer(void)
-+{
-+ struct vcpu_set_singleshot_timer singleshot;
-+ unsigned int cpu = smp_processor_id();
-+ unsigned long j;
-+ int rc;
-+
-+ cpu_set(cpu, nohz_cpu_mask);
-+
-+ /* See matching smp_mb in rcu_start_batch in rcupdate.c. These mbs */
-+ /* ensure that if __rcu_pending (nested in rcu_needs_cpu) fetches a */
-+ /* value of rcp->cur that matches rdp->quiescbatch and allows us to */
-+ /* stop the hz timer then the cpumasks created for subsequent values */
-+ /* of cur in rcu_start_batch are guaranteed to pick up the updated */
-+ /* nohz_cpu_mask and so will not depend on this cpu. */
-+
-+ smp_mb();
-+
-+ /* Leave ourselves in tick mode if rcu or softirq or timer pending. */
-+ if (rcu_needs_cpu(cpu) || local_softirq_pending() ||
-+ (j = next_timer_interrupt(), time_before_eq(j, jiffies))) {
-+ cpu_clear(cpu, nohz_cpu_mask);
-+ j = jiffies + 1;
-+ }
-+
-+ singleshot.timeout_abs_ns = jiffies_to_st(j);
-+ singleshot.flags = 0;
-+ rc = HYPERVISOR_vcpu_op(VCPUOP_set_singleshot_timer, cpu, &singleshot);
-+#if CONFIG_XEN_COMPAT <= 0x030004
-+ if (rc) {
-+ BUG_ON(rc != -ENOSYS);
-+ rc = HYPERVISOR_set_timer_op(singleshot.timeout_abs_ns);
-+ }
-+#endif
-+ BUG_ON(rc);
-+}
-+
-+static void start_hz_timer(void)
-+{
-+ cpu_clear(smp_processor_id(), nohz_cpu_mask);
-+}
-+
-+void xen_safe_halt(void)
-+{
-+ stop_hz_timer();
-+ /* Blocking includes an implicit local_irq_enable(). */
-+ HYPERVISOR_block();
-+ start_hz_timer();
-+}
-+EXPORT_SYMBOL(xen_safe_halt);
-+
-+void xen_halt(void)
-+{
-+ if (irqs_disabled())
-+ HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
-+}
-+EXPORT_SYMBOL(xen_halt);
-+
-+/* No locking required. Interrupts are disabled on all CPUs. */
-+void time_resume(void)
-+{
-+ unsigned int cpu;
-+
-+ init_cpu_khz();
-+
-+ for_each_online_cpu(cpu) {
-+ HYPERVISOR_vcpu_op(VCPUOP_set_periodic_timer, cpu,
-+ &xen_set_periodic_tick);
-+ get_time_values_from_xen(cpu);
-+ per_cpu(processed_system_time, cpu) =
-+ per_cpu(shadow_time, 0).system_timestamp;
-+ init_missing_ticks_accounting(cpu);
-+ }
-+
-+ processed_system_time = per_cpu(shadow_time, 0).system_timestamp;
-+
-+ update_wallclock();
-+}
-+
-+#ifdef CONFIG_SMP
-+static char timer_name[NR_CPUS][15];
-+
-+int local_setup_timer(unsigned int cpu)
-+{
-+ int seq, irq;
-+
-+ BUG_ON(cpu == 0);
-+
-+ HYPERVISOR_vcpu_op(VCPUOP_set_periodic_timer, cpu,
-+ &xen_set_periodic_tick);
-+
-+ do {
-+ seq = read_seqbegin(&xtime_lock);
-+ /* Use cpu0 timestamp: cpu's shadow is not initialised yet. */
-+ per_cpu(processed_system_time, cpu) =
-+ per_cpu(shadow_time, 0).system_timestamp;
-+ init_missing_ticks_accounting(cpu);
-+ } while (read_seqretry(&xtime_lock, seq));
-+
-+ sprintf(timer_name[cpu], "timer%d", cpu);
-+ irq = bind_virq_to_irqhandler(VIRQ_TIMER,
-+ cpu,
-+ timer_interrupt,
-+ IRQF_DISABLED,
-+ timer_name[cpu],
-+ NULL);
-+ if (irq < 0)
-+ return irq;
-+ per_cpu(timer_irq, cpu) = irq;
-+
-+ return 0;
-+}
-+
-+void local_teardown_timer(unsigned int cpu)
-+{
-+ BUG_ON(cpu == 0);
-+ unbind_from_irqhandler(per_cpu(timer_irq, cpu), NULL);
-+}
-+#endif
-+
-+/*
-+ * /proc/sys/xen: This really belongs in another file. It can stay here for
-+ * now however.
-+ */
-+static ctl_table xen_subtable[] = {
-+ {
-+ .ctl_name = 1,
-+ .procname = "independent_wallclock",
-+ .data = &independent_wallclock,
-+ .maxlen = sizeof(independent_wallclock),
-+ .mode = 0644,
-+ .proc_handler = proc_dointvec
-+ },
-+ {
-+ .ctl_name = 2,
-+ .procname = "permitted_clock_jitter",
-+ .data = &permitted_clock_jitter,
-+ .maxlen = sizeof(permitted_clock_jitter),
-+ .mode = 0644,
-+ .proc_handler = proc_doulongvec_minmax
-+ },
-+ { 0 }
-+};
-+static ctl_table xen_table[] = {
-+ {
-+ .ctl_name = 123,
-+ .procname = "xen",
-+ .mode = 0555,
-+ .child = xen_subtable},
-+ { 0 }
-+};
-+static int __init xen_sysctl_init(void)
-+{
-+ (void)register_sysctl_table(xen_table);
-+ return 0;
-+}
-+__initcall(xen_sysctl_init);
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/i386/kernel/traps.c ubuntu-gutsy-xen/arch/i386/kernel/traps.c
---- ubuntu-gutsy/arch/i386/kernel/traps.c 2007-08-18 09:40:28.000000000 -0400
-+++ ubuntu-gutsy-xen/arch/i386/kernel/traps.c 2007-08-18 12:38:02.000000000 -0400
-@@ -647,18 +647,11 @@
- static __kprobes void
- io_check_error(unsigned char reason, struct pt_regs * regs)
- {
-- unsigned long i;
--
- printk(KERN_EMERG "NMI: IOCK error (debug interrupt?)\n");
- show_registers(regs);
-
- /* Re-enable the IOCK line, wait for a few seconds */
-- reason = (reason & 0xf) | 8;
-- outb(reason, 0x61);
-- i = 2000;
-- while (--i) udelay(1000);
-- reason &= ~8;
-- outb(reason, 0x61);
-+ clear_io_check_error(reason);
- }
-
- static __kprobes void
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/i386/kernel/traps-xen.c ubuntu-gutsy-xen/arch/i386/kernel/traps-xen.c
---- ubuntu-gutsy/arch/i386/kernel/traps-xen.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/arch/i386/kernel/traps-xen.c 2007-08-19 06:36:13.000000000 -0400
-@@ -0,0 +1,1153 @@
-+/*
-+ * linux/arch/i386/traps.c
-+ *
-+ * Copyright (C) 1991, 1992 Linus Torvalds
-+ *
-+ * Pentium III FXSR, SSE support
-+ * Gareth Hughes <gareth@valinux.com>, May 2000
-+ */
-+
-+/*
-+ * 'Traps.c' handles hardware traps and faults after we have saved some
-+ * state in 'asm.s'.
-+ */
-+#include <linux/sched.h>
-+#include <linux/kernel.h>
-+#include <linux/string.h>
-+#include <linux/errno.h>
-+#include <linux/timer.h>
-+#include <linux/mm.h>
-+#include <linux/init.h>
-+#include <linux/delay.h>
-+#include <linux/spinlock.h>
-+#include <linux/interrupt.h>
-+#include <linux/highmem.h>
-+#include <linux/kallsyms.h>
-+#include <linux/ptrace.h>
-+#include <linux/utsname.h>
-+#include <linux/kprobes.h>
-+#include <linux/kexec.h>
-+#include <linux/unwind.h>
-+#include <linux/uaccess.h>
-+#include <linux/nmi.h>
-+#include <linux/bug.h>
-+
-+#ifdef CONFIG_EISA
-+#include <linux/ioport.h>
-+#include <linux/eisa.h>
-+#endif
-+
-+#ifdef CONFIG_MCA
-+#include <linux/mca.h>
-+#endif
-+
-+#include <asm/processor.h>
-+#include <asm/system.h>
-+#include <asm/io.h>
-+#include <asm/atomic.h>
-+#include <asm/debugreg.h>
-+#include <asm/desc.h>
-+#include <asm/i387.h>
-+#include <asm/nmi.h>
-+#include <asm/unwind.h>
-+#include <asm/smp.h>
-+#include <asm/arch_hooks.h>
-+#include <linux/kdebug.h>
-+#include <asm/stacktrace.h>
-+
-+#include <linux/module.h>
-+
-+#include "mach_traps.h"
-+
-+int panic_on_unrecovered_nmi;
-+
-+asmlinkage int system_call(void);
-+
-+/* Do we ignore FPU interrupts ? */
-+char ignore_fpu_irq = 0;
-+
-+#ifndef CONFIG_X86_NO_IDT
-+/*
-+ * The IDT has to be page-aligned to simplify the Pentium
-+ * F0 0F bug workaround.. We have a special link segment
-+ * for this.
-+ */
-+struct desc_struct idt_table[256] __attribute__((__section__(".data.idt"))) = { {0, 0}, };
-+#endif
-+
-+asmlinkage void divide_error(void);
-+asmlinkage void debug(void);
-+asmlinkage void nmi(void);
-+asmlinkage void int3(void);
-+asmlinkage void overflow(void);
-+asmlinkage void bounds(void);
-+asmlinkage void invalid_op(void);
-+asmlinkage void device_not_available(void);
-+asmlinkage void coprocessor_segment_overrun(void);
-+asmlinkage void invalid_TSS(void);
-+asmlinkage void segment_not_present(void);
-+asmlinkage void stack_segment(void);
-+asmlinkage void general_protection(void);
-+asmlinkage void page_fault(void);
-+asmlinkage void coprocessor_error(void);
-+asmlinkage void simd_coprocessor_error(void);
-+asmlinkage void alignment_check(void);
-+#ifndef CONFIG_XEN
-+asmlinkage void spurious_interrupt_bug(void);
-+#else
-+asmlinkage void fixup_4gb_segment(void);
-+#endif
-+asmlinkage void machine_check(void);
-+
-+int kstack_depth_to_print = 24;
-+static unsigned int code_bytes = 64;
-+
-+static inline int valid_stack_ptr(struct thread_info *tinfo, void *p)
-+{
-+ return p > (void *)tinfo &&
-+ p < (void *)tinfo + THREAD_SIZE - 3;
-+}
-+
-+static inline unsigned long print_context_stack(struct thread_info *tinfo,
-+ unsigned long *stack, unsigned long ebp,
-+ struct stacktrace_ops *ops, void *data)
-+{
-+ unsigned long addr;
-+
-+#ifdef CONFIG_FRAME_POINTER
-+ while (valid_stack_ptr(tinfo, (void *)ebp)) {
-+ unsigned long new_ebp;
-+ addr = *(unsigned long *)(ebp + 4);
-+ ops->address(data, addr);
-+ /*
-+ * break out of recursive entries (such as
-+ * end_of_stack_stop_unwind_function). Also,
-+ * we can never allow a frame pointer to
-+ * move downwards!
-+ */
-+ new_ebp = *(unsigned long *)ebp;
-+ if (new_ebp <= ebp)
-+ break;
-+ ebp = new_ebp;
-+ }
-+#else
-+ while (valid_stack_ptr(tinfo, stack)) {
-+ addr = *stack++;
-+ if (__kernel_text_address(addr))
-+ ops->address(data, addr);
-+ }
-+#endif
-+ return ebp;
-+}
-+
-+#define MSG(msg) ops->warning(data, msg)
-+
-+void dump_trace(struct task_struct *task, struct pt_regs *regs,
-+ unsigned long *stack,
-+ struct stacktrace_ops *ops, void *data)
-+{
-+ unsigned long ebp = 0;
-+
-+ if (!task)
-+ task = current;
-+
-+ if (!stack) {
-+ unsigned long dummy;
-+ stack = &dummy;
-+ if (task && task != current)
-+ stack = (unsigned long *)task->thread.esp;
-+ }
-+
-+#ifdef CONFIG_FRAME_POINTER
-+ if (!ebp) {
-+ if (task == current) {
-+ /* Grab ebp right from our regs */
-+ asm ("movl %%ebp, %0" : "=r" (ebp) : );
-+ } else {
-+ /* ebp is the last reg pushed by switch_to */
-+ ebp = *(unsigned long *) task->thread.esp;
-+ }
-+ }
-+#endif
-+
-+ while (1) {
-+ struct thread_info *context;
-+ context = (struct thread_info *)
-+ ((unsigned long)stack & (~(THREAD_SIZE - 1)));
-+ ebp = print_context_stack(context, stack, ebp, ops, data);
-+ /* Should be after the line below, but somewhere
-+ in early boot context comes out corrupted and we
-+ can't reference it -AK */
-+ if (ops->stack(data, "IRQ") < 0)
-+ break;
-+ stack = (unsigned long*)context->previous_esp;
-+ if (!stack)
-+ break;
-+ touch_nmi_watchdog();
-+ }
-+}
-+EXPORT_SYMBOL(dump_trace);
-+
-+static void
-+print_trace_warning_symbol(void *data, char *msg, unsigned long symbol)
-+{
-+ printk(data);
-+ print_symbol(msg, symbol);
-+ printk("\n");
-+}
-+
-+static void print_trace_warning(void *data, char *msg)
-+{
-+ printk("%s%s\n", (char *)data, msg);
-+}
-+
-+static int print_trace_stack(void *data, char *name)
-+{
-+ return 0;
-+}
-+
-+/*
-+ * Print one address/symbol entries per line.
-+ */
-+static void print_trace_address(void *data, unsigned long addr)
-+{
-+ printk("%s [<%08lx>] ", (char *)data, addr);
-+ print_symbol("%s\n", addr);
-+}
-+
-+static struct stacktrace_ops print_trace_ops = {
-+ .warning = print_trace_warning,
-+ .warning_symbol = print_trace_warning_symbol,
-+ .stack = print_trace_stack,
-+ .address = print_trace_address,
-+};
-+
-+static void
-+show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
-+ unsigned long * stack, char *log_lvl)
-+{
-+ dump_trace(task, regs, stack, &print_trace_ops, log_lvl);
-+ printk("%s =======================\n", log_lvl);
-+}
-+
-+void show_trace(struct task_struct *task, struct pt_regs *regs,
-+ unsigned long * stack)
-+{
-+ show_trace_log_lvl(task, regs, stack, "");
-+}
-+
-+static void show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
-+ unsigned long *esp, char *log_lvl)
-+{
-+ unsigned long *stack;
-+ int i;
-+
-+ if (esp == NULL) {
-+ if (task)
-+ esp = (unsigned long*)task->thread.esp;
-+ else
-+ esp = (unsigned long *)&esp;
-+ }
-+
-+ stack = esp;
-+ for(i = 0; i < kstack_depth_to_print; i++) {
-+ if (kstack_end(stack))
-+ break;
-+ if (i && ((i % 8) == 0))
-+ printk("\n%s ", log_lvl);
-+ printk("%08lx ", *stack++);
-+ }
-+ printk("\n%sCall Trace:\n", log_lvl);
-+ show_trace_log_lvl(task, regs, esp, log_lvl);
-+}
-+
-+void show_stack(struct task_struct *task, unsigned long *esp)
-+{
-+ printk(" ");
-+ show_stack_log_lvl(task, NULL, esp, "");
-+}
-+
-+/*
-+ * The architecture-independent dump_stack generator
-+ */
-+void dump_stack(void)
-+{
-+ unsigned long stack;
-+
-+ show_trace(current, NULL, &stack);
-+}
-+
-+EXPORT_SYMBOL(dump_stack);
-+
-+void show_registers(struct pt_regs *regs)
-+{
-+ int i;
-+ int in_kernel = 1;
-+ unsigned long esp;
-+ unsigned short ss, gs;
-+
-+ esp = (unsigned long) (&regs->esp);
-+ savesegment(ss, ss);
-+ savesegment(gs, gs);
-+ if (user_mode_vm(regs)) {
-+ in_kernel = 0;
-+ esp = regs->esp;
-+ ss = regs->xss & 0xffff;
-+ }
-+ print_modules();
-+ printk(KERN_EMERG "CPU: %d\n"
-+ KERN_EMERG "EIP: %04x:[<%08lx>] %s VLI\n"
-+ KERN_EMERG "EFLAGS: %08lx (%s %.*s)\n",
-+ smp_processor_id(), 0xffff & regs->xcs, regs->eip,
-+ print_tainted(), regs->eflags, init_utsname()->release,
-+ (int)strcspn(init_utsname()->version, " "),
-+ init_utsname()->version);
-+ print_symbol(KERN_EMERG "EIP is at %s\n", regs->eip);
-+ printk(KERN_EMERG "eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n",
-+ regs->eax, regs->ebx, regs->ecx, regs->edx);
-+ printk(KERN_EMERG "esi: %08lx edi: %08lx ebp: %08lx esp: %08lx\n",
-+ regs->esi, regs->edi, regs->ebp, esp);
-+ printk(KERN_EMERG "ds: %04x es: %04x fs: %04x gs: %04x ss: %04x\n",
-+ regs->xds & 0xffff, regs->xes & 0xffff, regs->xfs & 0xffff, gs, ss);
-+ printk(KERN_EMERG "Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)",
-+ TASK_COMM_LEN, current->comm, current->pid,
-+ current_thread_info(), current, task_thread_info(current));
-+ /*
-+ * When in-kernel, we also print out the stack and code at the
-+ * time of the fault..
-+ */
-+ if (in_kernel) {
-+ u8 *eip;
-+ unsigned int code_prologue = code_bytes * 43 / 64;
-+ unsigned int code_len = code_bytes;
-+ unsigned char c;
-+
-+ printk("\n" KERN_EMERG "Stack: ");
-+ show_stack_log_lvl(NULL, regs, (unsigned long *)esp, KERN_EMERG);
-+
-+ printk(KERN_EMERG "Code: ");
-+
-+ eip = (u8 *)regs->eip - code_prologue;
-+ if (eip < (u8 *)PAGE_OFFSET ||
-+ probe_kernel_address(eip, c)) {
-+ /* try starting at EIP */
-+ eip = (u8 *)regs->eip;
-+ code_len = code_len - code_prologue + 1;
-+ }
-+ for (i = 0; i < code_len; i++, eip++) {
-+ if (eip < (u8 *)PAGE_OFFSET ||
-+ probe_kernel_address(eip, c)) {
-+ printk(" Bad EIP value.");
-+ break;
-+ }
-+ if (eip == (u8 *)regs->eip)
-+ printk("<%02x> ", c);
-+ else
-+ printk("%02x ", c);
-+ }
-+ }
-+ printk("\n");
-+}
-+
-+int is_valid_bugaddr(unsigned long eip)
-+{
-+ unsigned short ud2;
-+
-+ if (eip < PAGE_OFFSET)
-+ return 0;
-+ if (probe_kernel_address((unsigned short *)eip, ud2))
-+ return 0;
-+
-+ return ud2 == 0x0b0f;
-+}
-+
-+/*
-+ * This is gone through when something in the kernel has done something bad and
-+ * is about to be terminated.
-+ */
-+void die(const char * str, struct pt_regs * regs, long err)
-+{
-+ static struct {
-+ spinlock_t lock;
-+ u32 lock_owner;
-+ int lock_owner_depth;
-+ } die = {
-+ .lock = __SPIN_LOCK_UNLOCKED(die.lock),
-+ .lock_owner = -1,
-+ .lock_owner_depth = 0
-+ };
-+ static int die_counter;
-+ unsigned long flags;
-+
-+ oops_enter();
-+
-+ if (die.lock_owner != raw_smp_processor_id()) {
-+ console_verbose();
-+ spin_lock_irqsave(&die.lock, flags);
-+ die.lock_owner = smp_processor_id();
-+ die.lock_owner_depth = 0;
-+ bust_spinlocks(1);
-+ }
-+ else
-+ local_save_flags(flags);
-+
-+ if (++die.lock_owner_depth < 3) {
-+ int nl = 0;
-+ unsigned long esp;
-+ unsigned short ss;
-+
-+ report_bug(regs->eip);
-+
-+ printk(KERN_EMERG "%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
-+#ifdef CONFIG_PREEMPT
-+ printk(KERN_EMERG "PREEMPT ");
-+ nl = 1;
-+#endif
-+#ifdef CONFIG_SMP
-+ if (!nl)
-+ printk(KERN_EMERG);
-+ printk("SMP ");
-+ nl = 1;
-+#endif
-+#ifdef CONFIG_DEBUG_PAGEALLOC
-+ if (!nl)
-+ printk(KERN_EMERG);
-+ printk("DEBUG_PAGEALLOC");
-+ nl = 1;
-+#endif
-+ if (nl)
-+ printk("\n");
-+ if (notify_die(DIE_OOPS, str, regs, err,
-+ current->thread.trap_no, SIGSEGV) !=
-+ NOTIFY_STOP) {
-+ show_registers(regs);
-+ /* Executive summary in case the oops scrolled away */
-+ esp = (unsigned long) (&regs->esp);
-+ savesegment(ss, ss);
-+ if (user_mode(regs)) {
-+ esp = regs->esp;
-+ ss = regs->xss & 0xffff;
-+ }
-+ printk(KERN_EMERG "EIP: [<%08lx>] ", regs->eip);
-+ print_symbol("%s", regs->eip);
-+ printk(" SS:ESP %04x:%08lx\n", ss, esp);
-+ }
-+ else
-+ regs = NULL;
-+ } else
-+ printk(KERN_EMERG "Recursive die() failure, output suppressed\n");
-+
-+ bust_spinlocks(0);
-+ die.lock_owner = -1;
-+ spin_unlock_irqrestore(&die.lock, flags);
-+
-+ if (!regs)
-+ return;
-+
-+ if (kexec_should_crash(current))
-+ crash_kexec(regs);
-+
-+ if (in_interrupt())
-+ panic("Fatal exception in interrupt");
-+
-+ if (panic_on_oops)
-+ panic("Fatal exception");
-+
-+ oops_exit();
-+ do_exit(SIGSEGV);
-+}
-+
-+static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err)
-+{
-+ if (!user_mode_vm(regs))
-+ die(str, regs, err);
-+}
-+
-+static void __kprobes do_trap(int trapnr, int signr, char *str, int vm86,
-+ struct pt_regs * regs, long error_code,
-+ siginfo_t *info)
-+{
-+ struct task_struct *tsk = current;
-+
-+ if (regs->eflags & VM_MASK) {
-+ if (vm86)
-+ goto vm86_trap;
-+ goto trap_signal;
-+ }
-+
-+ if (!user_mode(regs))
-+ goto kernel_trap;
-+
-+ trap_signal: {
-+ /*
-+ * We want error_code and trap_no set for userspace faults and
-+ * kernelspace faults which result in die(), but not
-+ * kernelspace faults which are fixed up. die() gives the
-+ * process no chance to handle the signal and notice the
-+ * kernel fault information, so that won't result in polluting
-+ * the information about previously queued, but not yet
-+ * delivered, faults. See also do_general_protection below.
-+ */
-+ tsk->thread.error_code = error_code;
-+ tsk->thread.trap_no = trapnr;
-+
-+ if (info)
-+ force_sig_info(signr, info, tsk);
-+ else
-+ force_sig(signr, tsk);
-+ return;
-+ }
-+
-+ kernel_trap: {
-+ if (!fixup_exception(regs)) {
-+ tsk->thread.error_code = error_code;
-+ tsk->thread.trap_no = trapnr;
-+ die(str, regs, error_code);
-+ }
-+ return;
-+ }
-+
-+ vm86_trap: {
-+ int ret = handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, trapnr);
-+ if (ret) goto trap_signal;
-+ return;
-+ }
-+}
-+
-+#define DO_ERROR(trapnr, signr, str, name) \
-+fastcall void do_##name(struct pt_regs * regs, long error_code) \
-+{ \
-+ if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
-+ == NOTIFY_STOP) \
-+ return; \
-+ do_trap(trapnr, signr, str, 0, regs, error_code, NULL); \
-+}
-+
-+#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
-+fastcall void do_##name(struct pt_regs * regs, long error_code) \
-+{ \
-+ siginfo_t info; \
-+ info.si_signo = signr; \
-+ info.si_errno = 0; \
-+ info.si_code = sicode; \
-+ info.si_addr = (void __user *)siaddr; \
-+ if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
-+ == NOTIFY_STOP) \
-+ return; \
-+ do_trap(trapnr, signr, str, 0, regs, error_code, &info); \
-+}
-+
-+#define DO_VM86_ERROR(trapnr, signr, str, name) \
-+fastcall void do_##name(struct pt_regs * regs, long error_code) \
-+{ \
-+ if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
-+ == NOTIFY_STOP) \
-+ return; \
-+ do_trap(trapnr, signr, str, 1, regs, error_code, NULL); \
-+}
-+
-+#define DO_VM86_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
-+fastcall void do_##name(struct pt_regs * regs, long error_code) \
-+{ \
-+ siginfo_t info; \
-+ info.si_signo = signr; \
-+ info.si_errno = 0; \
-+ info.si_code = sicode; \
-+ info.si_addr = (void __user *)siaddr; \
-+ if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
-+ == NOTIFY_STOP) \
-+ return; \
-+ do_trap(trapnr, signr, str, 1, regs, error_code, &info); \
-+}
-+
-+DO_VM86_ERROR_INFO( 0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->eip)
-+#ifndef CONFIG_KPROBES
-+DO_VM86_ERROR( 3, SIGTRAP, "int3", int3)
-+#endif
-+DO_VM86_ERROR( 4, SIGSEGV, "overflow", overflow)
-+DO_VM86_ERROR( 5, SIGSEGV, "bounds", bounds)
-+DO_ERROR_INFO( 6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->eip)
-+DO_ERROR( 9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun)
-+DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
-+DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
-+DO_ERROR(12, SIGBUS, "stack segment", stack_segment)
-+DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
-+DO_ERROR_INFO(32, SIGSEGV, "iret exception", iret_error, ILL_BADSTK, 0)
-+
-+fastcall void __kprobes do_general_protection(struct pt_regs * regs,
-+ long error_code)
-+{
-+ if (regs->eflags & VM_MASK)
-+ goto gp_in_vm86;
-+
-+ if (!user_mode(regs))
-+ goto gp_in_kernel;
-+
-+ current->thread.error_code = error_code;
-+ current->thread.trap_no = 13;
-+ force_sig(SIGSEGV, current);
-+ return;
-+
-+gp_in_vm86:
-+ local_irq_enable();
-+ handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
-+ return;
-+
-+gp_in_kernel:
-+ if (!fixup_exception(regs)) {
-+ current->thread.error_code = error_code;
-+ current->thread.trap_no = 13;
-+ if (notify_die(DIE_GPF, "general protection fault", regs,
-+ error_code, 13, SIGSEGV) == NOTIFY_STOP)
-+ return;
-+ die("general protection fault", regs, error_code);
-+ }
-+}
-+
-+static __kprobes void
-+mem_parity_error(unsigned char reason, struct pt_regs * regs)
-+{
-+ printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x on "
-+ "CPU %d.\n", reason, smp_processor_id());
-+ printk(KERN_EMERG "You have some hardware problem, likely on the PCI bus.\n");
-+ if (panic_on_unrecovered_nmi)
-+ panic("NMI: Not continuing");
-+
-+ printk(KERN_EMERG "Dazed and confused, but trying to continue\n");
-+
-+ /* Clear and disable the memory parity error line. */
-+ clear_mem_error(reason);
-+}
-+
-+static __kprobes void
-+io_check_error(unsigned char reason, struct pt_regs * regs)
-+{
-+ printk(KERN_EMERG "NMI: IOCK error (debug interrupt?)\n");
-+ show_registers(regs);
-+
-+ /* Re-enable the IOCK line, wait for a few seconds */
-+ clear_io_check_error(reason);
-+}
-+
-+static __kprobes void
-+unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
-+{
-+#ifdef CONFIG_MCA
-+ /* Might actually be able to figure out what the guilty party
-+ * is. */
-+ if( MCA_bus ) {
-+ mca_handle_nmi();
-+ return;
-+ }
-+#endif
-+ printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x on "
-+ "CPU %d.\n", reason, smp_processor_id());
-+ printk(KERN_EMERG "Do you have a strange power saving mode enabled?\n");
-+ if (panic_on_unrecovered_nmi)
-+ panic("NMI: Not continuing");
-+
-+ printk(KERN_EMERG "Dazed and confused, but trying to continue\n");
-+}
-+
-+static DEFINE_SPINLOCK(nmi_print_lock);
-+
-+void __kprobes die_nmi(struct pt_regs *regs, const char *msg)
-+{
-+ if (notify_die(DIE_NMIWATCHDOG, msg, regs, 0, 2, SIGINT) ==
-+ NOTIFY_STOP)
-+ return;
-+
-+ spin_lock(&nmi_print_lock);
-+ /*
-+ * We are in trouble anyway, lets at least try
-+ * to get a message out.
-+ */
-+ bust_spinlocks(1);
-+ printk(KERN_EMERG "%s", msg);
-+ printk(" on CPU%d, eip %08lx, registers:\n",
-+ smp_processor_id(), regs->eip);
-+ show_registers(regs);
-+ console_silent();
-+ spin_unlock(&nmi_print_lock);
-+ bust_spinlocks(0);
-+
-+ /* If we are in kernel we are probably nested up pretty bad
-+ * and might aswell get out now while we still can.
-+ */
-+ if (!user_mode_vm(regs)) {
-+ current->thread.trap_no = 2;
-+ crash_kexec(regs);
-+ }
-+
-+ do_exit(SIGSEGV);
-+}
-+
-+static __kprobes void default_do_nmi(struct pt_regs * regs)
-+{
-+ unsigned char reason = 0;
-+
-+ /* Only the BSP gets external NMIs from the system. */
-+ if (!smp_processor_id())
-+ reason = get_nmi_reason();
-+
-+ if (!(reason & 0xc0)) {
-+ if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT)
-+ == NOTIFY_STOP)
-+ return;
-+#ifdef CONFIG_X86_LOCAL_APIC
-+#ifndef CONFIG_XEN
-+ /*
-+ * Ok, so this is none of the documented NMI sources,
-+ * so it must be the NMI watchdog.
-+ */
-+ if (nmi_watchdog_tick(regs, reason))
-+ return;
-+#endif
-+ if (!do_nmi_callback(regs, smp_processor_id()))
-+#endif
-+ unknown_nmi_error(reason, regs);
-+
-+ return;
-+ }
-+ if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP)
-+ return;
-+ if (reason & 0x80)
-+ mem_parity_error(reason, regs);
-+ if (reason & 0x40)
-+ io_check_error(reason, regs);
-+ /*
-+ * Reassert NMI in case it became active meanwhile
-+ * as it's edge-triggered.
-+ */
-+ reassert_nmi();
-+}
-+
-+fastcall __kprobes void do_nmi(struct pt_regs * regs, long error_code)
-+{
-+ int cpu;
-+
-+ nmi_enter();
-+
-+ cpu = smp_processor_id();
-+
-+ ++nmi_count(cpu);
-+
-+ default_do_nmi(regs);
-+
-+ nmi_exit();
-+}
-+
-+#ifdef CONFIG_KPROBES
-+fastcall void __kprobes do_int3(struct pt_regs *regs, long error_code)
-+{
-+ if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP)
-+ == NOTIFY_STOP)
-+ return;
-+ /* This is an interrupt gate, because kprobes wants interrupts
-+ disabled. Normal trap handlers don't. */
-+ restore_interrupts(regs);
-+ do_trap(3, SIGTRAP, "int3", 1, regs, error_code, NULL);
-+}
-+#endif
-+
-+/*
-+ * Our handling of the processor debug registers is non-trivial.
-+ * We do not clear them on entry and exit from the kernel. Therefore
-+ * it is possible to get a watchpoint trap here from inside the kernel.
-+ * However, the code in ./ptrace.c has ensured that the user can
-+ * only set watchpoints on userspace addresses. Therefore the in-kernel
-+ * watchpoint trap can only occur in code which is reading/writing
-+ * from user space. Such code must not hold kernel locks (since it
-+ * can equally take a page fault), therefore it is safe to call
-+ * force_sig_info even though that claims and releases locks.
-+ *
-+ * Code in ./signal.c ensures that the debug control register
-+ * is restored before we deliver any signal, and therefore that
-+ * user code runs with the correct debug control register even though
-+ * we clear it here.
-+ *
-+ * Being careful here means that we don't have to be as careful in a
-+ * lot of more complicated places (task switching can be a bit lazy
-+ * about restoring all the debug state, and ptrace doesn't have to
-+ * find every occurrence of the TF bit that could be saved away even
-+ * by user code)
-+ */
-+fastcall void __kprobes do_debug(struct pt_regs * regs, long error_code)
-+{
-+ unsigned int condition;
-+ struct task_struct *tsk = current;
-+
-+ get_debugreg(condition, 6);
-+
-+ if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
-+ SIGTRAP) == NOTIFY_STOP)
-+ return;
-+ /* It's safe to allow irq's after DR6 has been saved */
-+ if (regs->eflags & X86_EFLAGS_IF)
-+ local_irq_enable();
-+
-+ /* Mask out spurious debug traps due to lazy DR7 setting */
-+ if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) {
-+ if (!tsk->thread.debugreg[7])
-+ goto clear_dr7;
-+ }
-+
-+ if (regs->eflags & VM_MASK)
-+ goto debug_vm86;
-+
-+ /* Save debug status register where ptrace can see it */
-+ tsk->thread.debugreg[6] = condition;
-+
-+ /*
-+ * Single-stepping through TF: make sure we ignore any events in
-+ * kernel space (but re-enable TF when returning to user mode).
-+ */
-+ if (condition & DR_STEP) {
-+ /*
-+ * We already checked v86 mode above, so we can
-+ * check for kernel mode by just checking the CPL
-+ * of CS.
-+ */
-+ if (!user_mode(regs))
-+ goto clear_TF_reenable;
-+ }
-+
-+ /* Ok, finally something we can handle */
-+ send_sigtrap(tsk, regs, error_code);
-+
-+ /* Disable additional traps. They'll be re-enabled when
-+ * the signal is delivered.
-+ */
-+clear_dr7:
-+ set_debugreg(0, 7);
-+ return;
-+
-+debug_vm86:
-+ handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 1);
-+ return;
-+
-+clear_TF_reenable:
-+ set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
-+ regs->eflags &= ~TF_MASK;
-+ return;
-+}
-+
-+/*
-+ * Note that we play around with the 'TS' bit in an attempt to get
-+ * the correct behaviour even in the presence of the asynchronous
-+ * IRQ13 behaviour
-+ */
-+void math_error(void __user *eip)
-+{
-+ struct task_struct * task;
-+ siginfo_t info;
-+ unsigned short cwd, swd;
-+
-+ /*
-+ * Save the info for the exception handler and clear the error.
-+ */
-+ task = current;
-+ save_init_fpu(task);
-+ task->thread.trap_no = 16;
-+ task->thread.error_code = 0;
-+ info.si_signo = SIGFPE;
-+ info.si_errno = 0;
-+ info.si_code = __SI_FAULT;
-+ info.si_addr = eip;
-+ /*
-+ * (~cwd & swd) will mask out exceptions that are not set to unmasked
-+ * status. 0x3f is the exception bits in these regs, 0x200 is the
-+ * C1 reg you need in case of a stack fault, 0x040 is the stack
-+ * fault bit. We should only be taking one exception at a time,
-+ * so if this combination doesn't produce any single exception,
-+ * then we have a bad program that isn't syncronizing its FPU usage
-+ * and it will suffer the consequences since we won't be able to
-+ * fully reproduce the context of the exception
-+ */
-+ cwd = get_fpu_cwd(task);
-+ swd = get_fpu_swd(task);
-+ switch (swd & ~cwd & 0x3f) {
-+ case 0x000: /* No unmasked exception */
-+ return;
-+ default: /* Multiple exceptions */
-+ break;
-+ case 0x001: /* Invalid Op */
-+ /*
-+ * swd & 0x240 == 0x040: Stack Underflow
-+ * swd & 0x240 == 0x240: Stack Overflow
-+ * User must clear the SF bit (0x40) if set
-+ */
-+ info.si_code = FPE_FLTINV;
-+ break;
-+ case 0x002: /* Denormalize */
-+ case 0x010: /* Underflow */
-+ info.si_code = FPE_FLTUND;
-+ break;
-+ case 0x004: /* Zero Divide */
-+ info.si_code = FPE_FLTDIV;
-+ break;
-+ case 0x008: /* Overflow */
-+ info.si_code = FPE_FLTOVF;
-+ break;
-+ case 0x020: /* Precision */
-+ info.si_code = FPE_FLTRES;
-+ break;
-+ }
-+ force_sig_info(SIGFPE, &info, task);
-+}
-+
-+fastcall void do_coprocessor_error(struct pt_regs * regs, long error_code)
-+{
-+ ignore_fpu_irq = 1;
-+ math_error((void __user *)regs->eip);
-+}
-+
-+static void simd_math_error(void __user *eip)
-+{
-+ struct task_struct * task;
-+ siginfo_t info;
-+ unsigned short mxcsr;
-+
-+ /*
-+ * Save the info for the exception handler and clear the error.
-+ */
-+ task = current;
-+ save_init_fpu(task);
-+ task->thread.trap_no = 19;
-+ task->thread.error_code = 0;
-+ info.si_signo = SIGFPE;
-+ info.si_errno = 0;
-+ info.si_code = __SI_FAULT;
-+ info.si_addr = eip;
-+ /*
-+ * The SIMD FPU exceptions are handled a little differently, as there
-+ * is only a single status/control register. Thus, to determine which
-+ * unmasked exception was caught we must mask the exception mask bits
-+ * at 0x1f80, and then use these to mask the exception bits at 0x3f.
-+ */
-+ mxcsr = get_fpu_mxcsr(task);
-+ switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) {
-+ case 0x000:
-+ default:
-+ break;
-+ case 0x001: /* Invalid Op */
-+ info.si_code = FPE_FLTINV;
-+ break;
-+ case 0x002: /* Denormalize */
-+ case 0x010: /* Underflow */
-+ info.si_code = FPE_FLTUND;
-+ break;
-+ case 0x004: /* Zero Divide */
-+ info.si_code = FPE_FLTDIV;
-+ break;
-+ case 0x008: /* Overflow */
-+ info.si_code = FPE_FLTOVF;
-+ break;
-+ case 0x020: /* Precision */
-+ info.si_code = FPE_FLTRES;
-+ break;
-+ }
-+ force_sig_info(SIGFPE, &info, task);
-+}
-+
-+fastcall void do_simd_coprocessor_error(struct pt_regs * regs,
-+ long error_code)
-+{
-+ if (cpu_has_xmm) {
-+ /* Handle SIMD FPU exceptions on PIII+ processors. */
-+ ignore_fpu_irq = 1;
-+ simd_math_error((void __user *)regs->eip);
-+ } else {
-+ /*
-+ * Handle strange cache flush from user space exception
-+ * in all other cases. This is undocumented behaviour.
-+ */
-+ if (regs->eflags & VM_MASK) {
-+ handle_vm86_fault((struct kernel_vm86_regs *)regs,
-+ error_code);
-+ return;
-+ }
-+ current->thread.trap_no = 19;
-+ current->thread.error_code = error_code;
-+ die_if_kernel("cache flush denied", regs, error_code);
-+ force_sig(SIGSEGV, current);
-+ }
-+}
-+
-+#ifndef CONFIG_XEN
-+fastcall void do_spurious_interrupt_bug(struct pt_regs * regs,
-+ long error_code)
-+{
-+#if 0
-+ /* No need to warn about this any longer. */
-+ printk("Ignoring P6 Local APIC Spurious Interrupt Bug...\n");
-+#endif
-+}
-+
-+fastcall unsigned long patch_espfix_desc(unsigned long uesp,
-+ unsigned long kesp)
-+{
-+ struct desc_struct *gdt = __get_cpu_var(gdt_page).gdt;
-+ unsigned long base = (kesp - uesp) & -THREAD_SIZE;
-+ unsigned long new_kesp = kesp - base;
-+ unsigned long lim_pages = (new_kesp | (THREAD_SIZE - 1)) >> PAGE_SHIFT;
-+ __u64 desc = *(__u64 *)&gdt[GDT_ENTRY_ESPFIX_SS];
-+ /* Set up base for espfix segment */
-+ desc &= 0x00f0ff0000000000ULL;
-+ desc |= ((((__u64)base) << 16) & 0x000000ffffff0000ULL) |
-+ ((((__u64)base) << 32) & 0xff00000000000000ULL) |
-+ ((((__u64)lim_pages) << 32) & 0x000f000000000000ULL) |
-+ (lim_pages & 0xffff);
-+ *(__u64 *)&gdt[GDT_ENTRY_ESPFIX_SS] = desc;
-+ return new_kesp;
-+}
-+#endif
-+
-+/*
-+ * 'math_state_restore()' saves the current math information in the
-+ * old math state array, and gets the new ones from the current task
-+ *
-+ * Careful.. There are problems with IBM-designed IRQ13 behaviour.
-+ * Don't touch unless you *really* know how it works.
-+ *
-+ * Must be called with kernel preemption disabled (in this case,
-+ * local interrupts are disabled at the call-site in entry.S).
-+ */
-+asmlinkage void math_state_restore(void)
-+{
-+ struct thread_info *thread = current_thread_info();
-+ struct task_struct *tsk = thread->task;
-+
-+ /* NB. 'clts' is done for us by Xen during virtual trap. */
-+ if (!tsk_used_math(tsk))
-+ init_fpu(tsk);
-+ restore_fpu(tsk);
-+ thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */
-+ tsk->fpu_counter++;
-+}
-+
-+#ifndef CONFIG_MATH_EMULATION
-+
-+asmlinkage void math_emulate(long arg)
-+{
-+ printk(KERN_EMERG "math-emulation not enabled and no coprocessor found.\n");
-+ printk(KERN_EMERG "killing %s.\n",current->comm);
-+ force_sig(SIGFPE,current);
-+ schedule();
-+}
-+
-+#endif /* CONFIG_MATH_EMULATION */
-+
-+#ifdef CONFIG_X86_F00F_BUG
-+void __init trap_init_f00f_bug(void)
-+{
-+ __set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO);
-+
-+ /*
-+ * Update the IDT descriptor and reload the IDT so that
-+ * it uses the read-only mapped virtual address.
-+ */
-+ idt_descr.address = fix_to_virt(FIX_F00F_IDT);
-+ load_idt(&idt_descr);
-+}
-+#endif
-+
-+
-+/*
-+ * NB. All these are "trap gates" (i.e. events_mask isn't set) except
-+ * for those that specify <dpl>|4 in the second field.
-+ */
-+static trap_info_t trap_table[] = {
-+ { 0, 0, __KERNEL_CS, (unsigned long)divide_error },
-+ { 1, 0|4, __KERNEL_CS, (unsigned long)debug },
-+ { 3, 3|4, __KERNEL_CS, (unsigned long)int3 },
-+ { 4, 3, __KERNEL_CS, (unsigned long)overflow },
-+ { 5, 0, __KERNEL_CS, (unsigned long)bounds },
-+ { 6, 0, __KERNEL_CS, (unsigned long)invalid_op },
-+ { 7, 0|4, __KERNEL_CS, (unsigned long)device_not_available },
-+ { 9, 0, __KERNEL_CS, (unsigned long)coprocessor_segment_overrun },
-+ { 10, 0, __KERNEL_CS, (unsigned long)invalid_TSS },
-+ { 11, 0, __KERNEL_CS, (unsigned long)segment_not_present },
-+ { 12, 0, __KERNEL_CS, (unsigned long)stack_segment },
-+ { 13, 0, __KERNEL_CS, (unsigned long)general_protection },
-+ { 14, 0|4, __KERNEL_CS, (unsigned long)page_fault },
-+ { 15, 0, __KERNEL_CS, (unsigned long)fixup_4gb_segment },
-+ { 16, 0, __KERNEL_CS, (unsigned long)coprocessor_error },
-+ { 17, 0, __KERNEL_CS, (unsigned long)alignment_check },
-+#ifdef CONFIG_X86_MCE
-+ { 18, 0, __KERNEL_CS, (unsigned long)machine_check },
-+#endif
-+ { 19, 0, __KERNEL_CS, (unsigned long)simd_coprocessor_error },
-+ { SYSCALL_VECTOR, 3, __KERNEL_CS, (unsigned long)system_call },
-+ { 0, 0, 0, 0 }
-+};
-+
-+void __init trap_init(void)
-+{
-+ HYPERVISOR_set_trap_table(trap_table);
-+
-+ if (cpu_has_fxsr) {
-+ /*
-+ * Verify that the FXSAVE/FXRSTOR data will be 16-byte aligned.
-+ * Generates a compile-time "error: zero width for bit-field" if
-+ * the alignment is wrong.
-+ */
-+ struct fxsrAlignAssert {
-+ int _:!(offsetof(struct task_struct,
-+ thread.i387.fxsave) & 15);
-+ };
-+
-+ printk(KERN_INFO "Enabling fast FPU save and restore... ");
-+ set_in_cr4(X86_CR4_OSFXSR);
-+ printk("done.\n");
-+ }
-+ if (cpu_has_xmm) {
-+ printk(KERN_INFO "Enabling unmasked SIMD FPU exception "
-+ "support... ");
-+ set_in_cr4(X86_CR4_OSXMMEXCPT);
-+ printk("done.\n");
-+ }
-+
-+ /*
-+ * Should be a barrier for any external CPU state.
-+ */
-+ cpu_init();
-+}
-+
-+void smp_trap_init(trap_info_t *trap_ctxt)
-+{
-+ trap_info_t *t = trap_table;
-+
-+ for (t = trap_table; t->address; t++) {
-+ trap_ctxt[t->vector].flags = t->flags;
-+ trap_ctxt[t->vector].cs = t->cs;
-+ trap_ctxt[t->vector].address = t->address;
-+ }
-+}
-+
-+static int __init oops_setup(char *s)
-+{
-+ if (!s)
-+ return -EINVAL;
-+ if (!strcmp(s, "panic"))
-+ panic_on_oops = 1;
-+ return 0;
-+}
-+early_param("oops", oops_setup);
-+
-+static int __init kstack_setup(char *s)
-+{
-+ kstack_depth_to_print = simple_strtoul(s, NULL, 0);
-+ return 1;
-+}
-+__setup("kstack=", kstack_setup);
-+
-+static int __init code_bytes_setup(char *s)
-+{
-+ code_bytes = simple_strtoul(s, NULL, 0);
-+ if (code_bytes > 8192)
-+ code_bytes = 8192;
-+
-+ return 1;
-+}
-+__setup("code_bytes=", code_bytes_setup);
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/i386/kernel/vm86.c ubuntu-gutsy-xen/arch/i386/kernel/vm86.c
---- ubuntu-gutsy/arch/i386/kernel/vm86.c 2007-08-18 09:40:28.000000000 -0400
-+++ ubuntu-gutsy-xen/arch/i386/kernel/vm86.c 2007-08-18 12:38:02.000000000 -0400
-@@ -125,7 +125,9 @@
- struct pt_regs * FASTCALL(save_v86_state(struct kernel_vm86_regs * regs));
- struct pt_regs * fastcall save_v86_state(struct kernel_vm86_regs * regs)
- {
-+#ifndef CONFIG_X86_NO_TSS
- struct tss_struct *tss;
-+#endif
- struct pt_regs *ret;
- unsigned long tmp;
-
-@@ -148,12 +150,16 @@
- do_exit(SIGSEGV);
- }
-
-+#ifndef CONFIG_X86_NO_TSS
- tss = &per_cpu(init_tss, get_cpu());
-+#endif
- current->thread.esp0 = current->thread.saved_esp0;
- current->thread.sysenter_cs = __KERNEL_CS;
- load_esp0(tss, &current->thread);
- current->thread.saved_esp0 = 0;
-+#ifndef CONFIG_X86_NO_TSS
- put_cpu();
-+#endif
-
- ret = KVM86->regs32;
-
-@@ -279,7 +285,9 @@
-
- static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk)
- {
-+#ifndef CONFIG_X86_NO_TSS
- struct tss_struct *tss;
-+#endif
- /*
- * make sure the vm86() system call doesn't try to do anything silly
- */
-@@ -324,12 +332,16 @@
- tsk->thread.saved_fs = info->regs32->xfs;
- savesegment(gs, tsk->thread.saved_gs);
-
-+#ifndef CONFIG_X86_NO_TSS
- tss = &per_cpu(init_tss, get_cpu());
-+#endif
- tsk->thread.esp0 = (unsigned long) &info->VM86_TSS_ESP0;
- if (cpu_has_sep)
- tsk->thread.sysenter_cs = 0;
- load_esp0(tss, &tsk->thread);
-+#ifndef CONFIG_X86_NO_TSS
- put_cpu();
-+#endif
-
- tsk->thread.screen_bitmap = info->screen_bitmap;
- if (info->flags & VM86_SCREEN_BITMAP)
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/i386/kernel/vmlinux.lds.S ubuntu-gutsy-xen/arch/i386/kernel/vmlinux.lds.S
---- ubuntu-gutsy/arch/i386/kernel/vmlinux.lds.S 2007-08-18 09:40:28.000000000 -0400
-+++ ubuntu-gutsy-xen/arch/i386/kernel/vmlinux.lds.S 2007-08-18 12:38:02.000000000 -0400
-@@ -35,6 +35,12 @@
- SECTIONS
- {
- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
-+
-+#if defined(CONFIG_XEN) && CONFIG_XEN_COMPAT <= 0x030002
-+#undef LOAD_OFFSET
-+#define LOAD_OFFSET 0
-+#endif
-+
- phys_startup_32 = startup_32 - LOAD_OFFSET;
-
- .text.head : AT(ADDR(.text.head) - LOAD_OFFSET) {
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/i386/kernel/vsyscall-note-xen.S ubuntu-gutsy-xen/arch/i386/kernel/vsyscall-note-xen.S
---- ubuntu-gutsy/arch/i386/kernel/vsyscall-note-xen.S 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/arch/i386/kernel/vsyscall-note-xen.S 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,32 @@
-+/*
-+ * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text.
-+ * Here we can supply some information useful to userland.
-+ * First we get the vanilla i386 note that supplies the kernel version info.
-+ */
-+
-+#include "vsyscall-note.S"
-+
-+/*
-+ * Now we add a special note telling glibc's dynamic linker a fake hardware
-+ * flavor that it will use to choose the search path for libraries in the
-+ * same way it uses real hardware capabilities like "mmx".
-+ * We supply "nosegneg" as the fake capability, to indicate that we
-+ * do not like negative offsets in instructions using segment overrides,
-+ * since we implement those inefficiently. This makes it possible to
-+ * install libraries optimized to avoid those access patterns in someplace
-+ * like /lib/i686/tls/nosegneg. Note that an /etc/ld.so.conf.d/file
-+ * corresponding to the bits here is needed to make ldconfig work right.
-+ * It should contain:
-+ * hwcap 0 nosegneg
-+ * to match the mapping of bit to name that we give here.
-+ */
-+#define NOTE_KERNELCAP_BEGIN(ncaps, mask) \
-+ ASM_ELF_NOTE_BEGIN(".note.kernelcap", "a", "GNU", 2) \
-+ .long ncaps, mask
-+#define NOTE_KERNELCAP(bit, name) \
-+ .byte bit; .asciz name
-+#define NOTE_KERNELCAP_END ASM_ELF_NOTE_END
-+
-+NOTE_KERNELCAP_BEGIN(1, 1)
-+NOTE_KERNELCAP(0, "nosegneg")
-+NOTE_KERNELCAP_END
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/i386/mach-xen/Makefile ubuntu-gutsy-xen/arch/i386/mach-xen/Makefile
---- ubuntu-gutsy/arch/i386/mach-xen/Makefile 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/arch/i386/mach-xen/Makefile 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,5 @@
-+#
-+# Makefile for the linux kernel.
-+#
-+
-+obj-y := setup.o
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/i386/mach-xen/setup.c ubuntu-gutsy-xen/arch/i386/mach-xen/setup.c
---- ubuntu-gutsy/arch/i386/mach-xen/setup.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/arch/i386/mach-xen/setup.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,149 @@
-+/*
-+ * Machine specific setup for generic
-+ */
-+
-+#include <linux/smp.h>
-+#include <linux/init.h>
-+#include <linux/interrupt.h>
-+#include <linux/module.h>
-+#include <asm/acpi.h>
-+#include <asm/arch_hooks.h>
-+#include <asm/e820.h>
-+#include <asm/setup.h>
-+#include <asm/fixmap.h>
-+
-+#include <xen/interface/callback.h>
-+#include <xen/interface/memory.h>
-+
-+#ifdef CONFIG_HOTPLUG_CPU
-+#define DEFAULT_SEND_IPI (1)
-+#else
-+#define DEFAULT_SEND_IPI (0)
-+#endif
-+
-+int no_broadcast=DEFAULT_SEND_IPI;
-+
-+static __init int no_ipi_broadcast(char *str)
-+{
-+ get_option(&str, &no_broadcast);
-+ printk ("Using %s mode\n", no_broadcast ? "No IPI Broadcast" :
-+ "IPI Broadcast");
-+ return 1;
-+}
-+
-+__setup("no_ipi_broadcast", no_ipi_broadcast);
-+
-+static int __init print_ipi_mode(void)
-+{
-+ printk ("Using IPI %s mode\n", no_broadcast ? "No-Shortcut" :
-+ "Shortcut");
-+ return 0;
-+}
-+
-+late_initcall(print_ipi_mode);
-+
-+/**
-+ * machine_specific_memory_setup - Hook for machine specific memory setup.
-+ *
-+ * Description:
-+ * This is included late in kernel/setup.c so that it can make
-+ * use of all of the static functions.
-+ **/
-+
-+char * __init machine_specific_memory_setup(void)
-+{
-+ int rc;
-+ struct xen_memory_map memmap;
-+ /*
-+ * This is rather large for a stack variable but this early in
-+ * the boot process we know we have plenty slack space.
-+ */
-+ struct e820entry map[E820MAX];
-+
-+ memmap.nr_entries = E820MAX;
-+ set_xen_guest_handle(memmap.buffer, map);
-+
-+ rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap);
-+ if ( rc == -ENOSYS ) {
-+ memmap.nr_entries = 1;
-+ map[0].addr = 0ULL;
-+ map[0].size = PFN_PHYS((unsigned long long)xen_start_info->nr_pages);
-+ /* 8MB slack (to balance backend allocations). */
-+ map[0].size += 8ULL << 20;
-+ map[0].type = E820_RAM;
-+ rc = 0;
-+ }
-+ BUG_ON(rc);
-+
-+ sanitize_e820_map(map, (char *)&memmap.nr_entries);
-+
-+ BUG_ON(copy_e820_map(map, (char)memmap.nr_entries) < 0);
-+
-+ return "Xen";
-+}
-+
-+
-+extern void hypervisor_callback(void);
-+extern void failsafe_callback(void);
-+extern void nmi(void);
-+
-+unsigned long *machine_to_phys_mapping;
-+EXPORT_SYMBOL(machine_to_phys_mapping);
-+unsigned int machine_to_phys_order;
-+EXPORT_SYMBOL(machine_to_phys_order);
-+
-+void __init machine_specific_arch_setup(void)
-+{
-+ int ret;
-+ struct xen_machphys_mapping mapping;
-+ unsigned long machine_to_phys_nr_ents;
-+ struct xen_platform_parameters pp;
-+ static struct callback_register __initdata event = {
-+ .type = CALLBACKTYPE_event,
-+ .address = { __KERNEL_CS, (unsigned long)hypervisor_callback },
-+ };
-+ static struct callback_register __initdata failsafe = {
-+ .type = CALLBACKTYPE_failsafe,
-+ .address = { __KERNEL_CS, (unsigned long)failsafe_callback },
-+ };
-+ static struct callback_register __initdata nmi_cb = {
-+ .type = CALLBACKTYPE_nmi,
-+ .address = { __KERNEL_CS, (unsigned long)nmi },
-+ };
-+
-+ ret = HYPERVISOR_callback_op(CALLBACKOP_register, &event);
-+ if (ret == 0)
-+ ret = HYPERVISOR_callback_op(CALLBACKOP_register, &failsafe);
-+#if CONFIG_XEN_COMPAT <= 0x030002
-+ if (ret == -ENOSYS)
-+ ret = HYPERVISOR_set_callbacks(
-+ event.address.cs, event.address.eip,
-+ failsafe.address.cs, failsafe.address.eip);
-+#endif
-+ BUG_ON(ret);
-+
-+ ret = HYPERVISOR_callback_op(CALLBACKOP_register, &nmi_cb);
-+#if CONFIG_XEN_COMPAT <= 0x030002
-+ if (ret == -ENOSYS) {
-+ static struct xennmi_callback __initdata cb = {
-+ .handler_address = (unsigned long)nmi
-+ };
-+
-+ HYPERVISOR_nmi_op(XENNMI_register_callback, &cb);
-+ }
-+#endif
-+
-+ if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0) {
-+ hypervisor_virt_start = pp.virt_start;
-+ reserve_top_address(0UL - pp.virt_start);
-+ }
-+
-+ machine_to_phys_mapping = (unsigned long *)MACH2PHYS_VIRT_START;
-+ machine_to_phys_nr_ents = MACH2PHYS_NR_ENTRIES;
-+ if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) {
-+ machine_to_phys_mapping = (unsigned long *)mapping.v_start;
-+ machine_to_phys_nr_ents = mapping.max_mfn + 1;
-+ }
-+ while ((1UL << machine_to_phys_order) < machine_to_phys_nr_ents )
-+ machine_to_phys_order++;
-+}
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/i386/Makefile ubuntu-gutsy-xen/arch/i386/Makefile
---- ubuntu-gutsy/arch/i386/Makefile 2007-08-18 09:40:28.000000000 -0400
-+++ ubuntu-gutsy-xen/arch/i386/Makefile 2007-08-18 12:38:02.000000000 -0400
-@@ -60,6 +60,11 @@
-
- CFLAGS += $(cflags-y)
-
-+cppflags-$(CONFIG_XEN) += \
-+ -D__XEN_INTERFACE_VERSION__=$(CONFIG_XEN_INTERFACE_VERSION)
-+
-+CPPFLAGS += $(cppflags-y)
-+
- # Default subarch .c files
- mcore-y := mach-default
-
-@@ -83,6 +88,10 @@
- mflags-$(CONFIG_X86_SUMMIT) := -Iinclude/asm-i386/mach-summit
- mcore-$(CONFIG_X86_SUMMIT) := mach-default
-
-+# Xen subarch support
-+mflags-$(CONFIG_X86_XEN) := -Iinclude/asm-i386/mach-xen
-+mcore-$(CONFIG_X86_XEN) := mach-xen
-+
- # generic subarchitecture
- mflags-$(CONFIG_X86_GENERICARCH) := -Iinclude/asm-i386/mach-generic
- mcore-$(CONFIG_X86_GENERICARCH) := mach-default
-@@ -117,6 +126,19 @@
- PHONY += zImage bzImage compressed zlilo bzlilo \
- zdisk bzdisk fdimage fdimage144 fdimage288 isoimage install
-
-+ifdef CONFIG_XEN
-+CPPFLAGS := -Iinclude$(if $(KBUILD_SRC),2)/asm/mach-xen $(CPPFLAGS)
-+head-y := arch/i386/kernel/head-xen.o arch/i386/kernel/init_task-xen.o
-+boot := arch/i386/boot-xen
-+.PHONY: vmlinuz
-+all: vmlinuz
-+
-+vmlinuz: vmlinux
-+ $(Q)$(MAKE) $(build)=$(boot) $@
-+
-+install:
-+ $(Q)$(MAKE) $(build)=$(boot) XENGUEST=$(XENGUEST) $@
-+else
- all: bzImage
-
- # KBUILD_IMAGE specify target image being built
-@@ -139,6 +161,7 @@
-
- install:
- $(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(KBUILD_IMAGE) install
-+endif
-
- archclean:
- $(Q)$(MAKE) $(clean)=arch/i386/boot
-@@ -157,3 +180,4 @@
- CLEAN_FILES += arch/$(ARCH)/boot/fdimage \
- arch/$(ARCH)/boot/image.iso \
- arch/$(ARCH)/boot/mtools.conf
-+CLEAN_FILES += vmlinuz vmlinux-stripped
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/i386/mm/fault-xen.c ubuntu-gutsy-xen/arch/i386/mm/fault-xen.c
---- ubuntu-gutsy/arch/i386/mm/fault-xen.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/arch/i386/mm/fault-xen.c 2007-08-18 12:43:13.000000000 -0400
-@@ -0,0 +1,740 @@
-+/*
-+ * linux/arch/i386/mm/fault.c
-+ *
-+ * Copyright (C) 1995 Linus Torvalds
-+ */
-+
-+#include <linux/signal.h>
-+#include <linux/sched.h>
-+#include <linux/kernel.h>
-+#include <linux/errno.h>
-+#include <linux/string.h>
-+#include <linux/types.h>
-+#include <linux/ptrace.h>
-+#include <linux/mman.h>
-+#include <linux/mm.h>
-+#include <linux/smp.h>
-+#include <linux/interrupt.h>
-+#include <linux/init.h>
-+#include <linux/tty.h>
-+#include <linux/vt_kern.h> /* For unblank_screen() */
-+#include <linux/highmem.h>
-+#include <linux/bootmem.h> /* for max_low_pfn */
-+#include <linux/vmalloc.h>
-+#include <linux/module.h>
-+#include <linux/kprobes.h>
-+#include <linux/uaccess.h>
-+#include <linux/kdebug.h>
-+
-+#include <asm/system.h>
-+#include <asm/desc.h>
-+#include <asm/segment.h>
-+
-+extern void die(const char *,struct pt_regs *,long);
-+
-+static ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain);
-+
-+int register_page_fault_notifier(struct notifier_block *nb)
-+{
-+ vmalloc_sync_all();
-+ return atomic_notifier_chain_register(&notify_page_fault_chain, nb);
-+}
-+EXPORT_SYMBOL_GPL(register_page_fault_notifier);
-+
-+int unregister_page_fault_notifier(struct notifier_block *nb)
-+{
-+ return atomic_notifier_chain_unregister(&notify_page_fault_chain, nb);
-+}
-+EXPORT_SYMBOL_GPL(unregister_page_fault_notifier);
-+
-+static inline int notify_page_fault(struct pt_regs *regs, long err)
-+{
-+ struct die_args args = {
-+ .regs = regs,
-+ .str = "page fault",
-+ .err = err,
-+ .trapnr = 14,
-+ .signr = SIGSEGV
-+ };
-+ return atomic_notifier_call_chain(&notify_page_fault_chain,
-+ DIE_PAGE_FAULT, &args);
-+}
-+
-+/*
-+ * Return EIP plus the CS segment base. The segment limit is also
-+ * adjusted, clamped to the kernel/user address space (whichever is
-+ * appropriate), and returned in *eip_limit.
-+ *
-+ * The segment is checked, because it might have been changed by another
-+ * task between the original faulting instruction and here.
-+ *
-+ * If CS is no longer a valid code segment, or if EIP is beyond the
-+ * limit, or if it is a kernel address when CS is not a kernel segment,
-+ * then the returned value will be greater than *eip_limit.
-+ *
-+ * This is slow, but is very rarely executed.
-+ */
-+static inline unsigned long get_segment_eip(struct pt_regs *regs,
-+ unsigned long *eip_limit)
-+{
-+ unsigned long eip = regs->eip;
-+ unsigned seg = regs->xcs & 0xffff;
-+ u32 seg_ar, seg_limit, base, *desc;
-+
-+ /* Unlikely, but must come before segment checks. */
-+ if (unlikely(regs->eflags & VM_MASK)) {
-+ base = seg << 4;
-+ *eip_limit = base + 0xffff;
-+ return base + (eip & 0xffff);
-+ }
-+
-+ /* The standard kernel/user address space limit. */
-+ *eip_limit = user_mode(regs) ? USER_DS.seg : KERNEL_DS.seg;
-+
-+ /* By far the most common cases. */
-+ if (likely(SEGMENT_IS_FLAT_CODE(seg)))
-+ return eip;
-+
-+ /* Check the segment exists, is within the current LDT/GDT size,
-+ that kernel/user (ring 0..3) has the appropriate privilege,
-+ that it's a code segment, and get the limit. */
-+ __asm__ ("larl %3,%0; lsll %3,%1"
-+ : "=&r" (seg_ar), "=r" (seg_limit) : "0" (0), "rm" (seg));
-+ if ((~seg_ar & 0x9800) || eip > seg_limit) {
-+ *eip_limit = 0;
-+ return 1; /* So that returned eip > *eip_limit. */
-+ }
-+
-+ /* Get the GDT/LDT descriptor base.
-+ When you look for races in this code remember that
-+ LDT and other horrors are only used in user space. */
-+ if (seg & (1<<2)) {
-+ /* Must lock the LDT while reading it. */
-+ down(&current->mm->context.sem);
-+ desc = current->mm->context.ldt;
-+ desc = (void *)desc + (seg & ~7);
-+ } else {
-+ /* Must disable preemption while reading the GDT. */
-+ desc = (u32 *)get_cpu_gdt_table(get_cpu());
-+ desc = (void *)desc + (seg & ~7);
-+ }
-+
-+ /* Decode the code segment base from the descriptor */
-+ base = get_desc_base((unsigned long *)desc);
-+
-+ if (seg & (1<<2)) {
-+ up(&current->mm->context.sem);
-+ } else
-+ put_cpu();
-+
-+ /* Adjust EIP and segment limit, and clamp at the kernel limit.
-+ It's legitimate for segments to wrap at 0xffffffff. */
-+ seg_limit += base;
-+ if (seg_limit < *eip_limit && seg_limit >= base)
-+ *eip_limit = seg_limit;
-+ return eip + base;
-+}
-+
-+/*
-+ * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
-+ * Check that here and ignore it.
-+ */
-+static int __is_prefetch(struct pt_regs *regs, unsigned long addr)
-+{
-+ unsigned long limit;
-+ unsigned char *instr = (unsigned char *)get_segment_eip (regs, &limit);
-+ int scan_more = 1;
-+ int prefetch = 0;
-+ int i;
-+
-+ for (i = 0; scan_more && i < 15; i++) {
-+ unsigned char opcode;
-+ unsigned char instr_hi;
-+ unsigned char instr_lo;
-+
-+ if (instr > (unsigned char *)limit)
-+ break;
-+ if (probe_kernel_address(instr, opcode))
-+ break;
-+
-+ instr_hi = opcode & 0xf0;
-+ instr_lo = opcode & 0x0f;
-+ instr++;
-+
-+ switch (instr_hi) {
-+ case 0x20:
-+ case 0x30:
-+ /* Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes. */
-+ scan_more = ((instr_lo & 7) == 0x6);
-+ break;
-+
-+ case 0x60:
-+ /* 0x64 thru 0x67 are valid prefixes in all modes. */
-+ scan_more = (instr_lo & 0xC) == 0x4;
-+ break;
-+ case 0xF0:
-+ /* 0xF0, 0xF2, and 0xF3 are valid prefixes */
-+ scan_more = !instr_lo || (instr_lo>>1) == 1;
-+ break;
-+ case 0x00:
-+ /* Prefetch instruction is 0x0F0D or 0x0F18 */
-+ scan_more = 0;
-+ if (instr > (unsigned char *)limit)
-+ break;
-+ if (probe_kernel_address(instr, opcode))
-+ break;
-+ prefetch = (instr_lo == 0xF) &&
-+ (opcode == 0x0D || opcode == 0x18);
-+ break;
-+ default:
-+ scan_more = 0;
-+ break;
-+ }
-+ }
-+ return prefetch;
-+}
-+
-+static inline int is_prefetch(struct pt_regs *regs, unsigned long addr,
-+ unsigned long error_code)
-+{
-+ if (unlikely(boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
-+ boot_cpu_data.x86 >= 6)) {
-+ /* Catch an obscure case of prefetch inside an NX page. */
-+ if (nx_enabled && (error_code & 16))
-+ return 0;
-+ return __is_prefetch(regs, addr);
-+ }
-+ return 0;
-+}
-+
-+static noinline void force_sig_info_fault(int si_signo, int si_code,
-+ unsigned long address, struct task_struct *tsk)
-+{
-+ siginfo_t info;
-+
-+ info.si_signo = si_signo;
-+ info.si_errno = 0;
-+ info.si_code = si_code;
-+ info.si_addr = (void __user *)address;
-+ force_sig_info(si_signo, &info, tsk);
-+}
-+
-+fastcall void do_invalid_op(struct pt_regs *, unsigned long);
-+
-+#ifdef CONFIG_X86_PAE
-+static void dump_fault_path(unsigned long address)
-+{
-+ unsigned long *p, page;
-+ unsigned long mfn;
-+
-+ page = read_cr3();
-+ p = (unsigned long *)__va(page);
-+ p += (address >> 30) * 2;
-+ printk(KERN_ALERT "%08lx -> *pde = %08lx:%08lx\n", page, p[1], p[0]);
-+ if (p[0] & 1) {
-+ mfn = (p[0] >> PAGE_SHIFT) | (p[1] << 20);
-+ page = mfn_to_pfn(mfn) << PAGE_SHIFT;
-+ p = (unsigned long *)__va(page);
-+ address &= 0x3fffffff;
-+ p += (address >> 21) * 2;
-+ printk(KERN_ALERT "%08lx -> *pme = %08lx:%08lx\n",
-+ page, p[1], p[0]);
-+ mfn = (p[0] >> PAGE_SHIFT) | (p[1] << 20);
-+#ifdef CONFIG_HIGHPTE
-+ if (mfn_to_pfn(mfn) >= highstart_pfn)
-+ return;
-+#endif
-+ if (p[0] & 1) {
-+ page = mfn_to_pfn(mfn) << PAGE_SHIFT;
-+ p = (unsigned long *) __va(page);
-+ address &= 0x001fffff;
-+ p += (address >> 12) * 2;
-+ printk(KERN_ALERT "%08lx -> *pte = %08lx:%08lx\n",
-+ page, p[1], p[0]);
-+ }
-+ }
-+}
-+#else
-+static void dump_fault_path(unsigned long address)
-+{
-+ unsigned long page;
-+
-+ page = read_cr3();
-+ page = ((unsigned long *) __va(page))[address >> PGDIR_SHIFT];
-+ printk(KERN_ALERT "*pde = ma %08lx pa %08lx\n", page,
-+ machine_to_phys(page));
-+ /*
-+ * We must not directly access the pte in the highpte
-+ * case if the page table is located in highmem.
-+ * And lets rather not kmap-atomic the pte, just in case
-+ * it's allocated already.
-+ */
-+ if ((machine_to_phys(page) >> PAGE_SHIFT) < max_low_pfn
-+ && (page & _PAGE_PRESENT)) {
-+ page = machine_to_phys(page & PAGE_MASK);
-+ page = ((unsigned long *) __va(page))[(address >> PAGE_SHIFT)
-+ & (PTRS_PER_PTE - 1)];
-+ printk(KERN_ALERT "*pte = ma %08lx pa %08lx\n", page,
-+ machine_to_phys(page));
-+ }
-+}
-+#endif
-+
-+static int spurious_fault(struct pt_regs *regs,
-+ unsigned long address,
-+ unsigned long error_code)
-+{
-+ pgd_t *pgd;
-+ pud_t *pud;
-+ pmd_t *pmd;
-+ pte_t *pte;
-+
-+ /* Reserved-bit violation or user access to kernel space? */
-+ if (error_code & 0x0c)
-+ return 0;
-+
-+ pgd = init_mm.pgd + pgd_index(address);
-+ if (!pgd_present(*pgd))
-+ return 0;
-+
-+ pud = pud_offset(pgd, address);
-+ if (!pud_present(*pud))
-+ return 0;
-+
-+ pmd = pmd_offset(pud, address);
-+ if (!pmd_present(*pmd))
-+ return 0;
-+
-+ pte = pte_offset_kernel(pmd, address);
-+ if (!pte_present(*pte))
-+ return 0;
-+ if ((error_code & 0x02) && !pte_write(*pte))
-+ return 0;
-+#ifdef CONFIG_X86_PAE
-+ if ((error_code & 0x10) && (pte_val(*pte) & _PAGE_NX))
-+ return 0;
-+#endif
-+
-+ return 1;
-+}
-+
-+static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
-+{
-+ unsigned index = pgd_index(address);
-+ pgd_t *pgd_k;
-+ pud_t *pud, *pud_k;
-+ pmd_t *pmd, *pmd_k;
-+
-+ pgd += index;
-+ pgd_k = init_mm.pgd + index;
-+
-+ if (!pgd_present(*pgd_k))
-+ return NULL;
-+
-+ /*
-+ * set_pgd(pgd, *pgd_k); here would be useless on PAE
-+ * and redundant with the set_pmd() on non-PAE. As would
-+ * set_pud.
-+ */
-+
-+ pud = pud_offset(pgd, address);
-+ pud_k = pud_offset(pgd_k, address);
-+ if (!pud_present(*pud_k))
-+ return NULL;
-+
-+ pmd = pmd_offset(pud, address);
-+ pmd_k = pmd_offset(pud_k, address);
-+ if (!pmd_present(*pmd_k))
-+ return NULL;
-+ if (!pmd_present(*pmd))
-+#ifndef CONFIG_XEN
-+ set_pmd(pmd, *pmd_k);
-+#else
-+ /*
-+ * When running on Xen we must launder *pmd_k through
-+ * pmd_val() to ensure that _PAGE_PRESENT is correctly set.
-+ */
-+ set_pmd(pmd, __pmd(pmd_val(*pmd_k)));
-+#endif
-+ else
-+ BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
-+ return pmd_k;
-+}
-+
-+/*
-+ * Handle a fault on the vmalloc or module mapping area
-+ *
-+ * This assumes no large pages in there.
-+ */
-+static inline int vmalloc_fault(unsigned long address)
-+{
-+ unsigned long pgd_paddr;
-+ pmd_t *pmd_k;
-+ pte_t *pte_k;
-+ /*
-+ * Synchronize this task's top level page-table
-+ * with the 'reference' page table.
-+ *
-+ * Do _not_ use "current" here. We might be inside
-+ * an interrupt in the middle of a task switch..
-+ */
-+ pgd_paddr = read_cr3();
-+ pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
-+ if (!pmd_k)
-+ return -1;
-+ pte_k = pte_offset_kernel(pmd_k, address);
-+ if (!pte_present(*pte_k))
-+ return -1;
-+ return 0;
-+}
-+
-+/*
-+ * This routine handles page faults. It determines the address,
-+ * and the problem, and then passes it off to one of the appropriate
-+ * routines.
-+ *
-+ * error_code:
-+ * bit 0 == 0 means no page found, 1 means protection fault
-+ * bit 1 == 0 means read, 1 means write
-+ * bit 2 == 0 means kernel, 1 means user-mode
-+ * bit 3 == 1 means use of reserved bit detected
-+ * bit 4 == 1 means fault was an instruction fetch
-+ */
-+fastcall void __kprobes do_page_fault(struct pt_regs *regs,
-+ unsigned long error_code)
-+{
-+ struct task_struct *tsk;
-+ struct mm_struct *mm;
-+ struct vm_area_struct * vma, * prev_vma;
-+ unsigned long address;
-+ int write, si_code;
-+
-+ /* get the address */
-+ address = read_cr2();
-+
-+ /* Set the "privileged fault" bit to something sane. */
-+ error_code &= ~4;
-+ error_code |= (regs->xcs & 2) << 1;
-+ if (regs->eflags & X86_EFLAGS_VM)
-+ error_code |= 4;
-+
-+ tsk = current;
-+
-+ si_code = SEGV_MAPERR;
-+
-+ /*
-+ * We fault-in kernel-space virtual memory on-demand. The
-+ * 'reference' page table is init_mm.pgd.
-+ *
-+ * NOTE! We MUST NOT take any locks for this case. We may
-+ * be in an interrupt or a critical region, and should
-+ * only copy the information from the master page table,
-+ * nothing more.
-+ *
-+ * This verifies that the fault happens in kernel space
-+ * (error_code & 4) == 0, and that the fault was not a
-+ * protection error (error_code & 9) == 0.
-+ */
-+ if (unlikely(address >= TASK_SIZE)) {
-+#ifdef CONFIG_XEN
-+ /* Faults in hypervisor area can never be patched up. */
-+ if (address >= hypervisor_virt_start)
-+ goto bad_area_nosemaphore;
-+#endif
-+ if (!(error_code & 0x0000000d) && vmalloc_fault(address) >= 0)
-+ return;
-+ /* Can take a spurious fault if mapping changes R/O -> R/W. */
-+ if (spurious_fault(regs, address, error_code))
-+ return;
-+ if (notify_page_fault(regs, error_code) == NOTIFY_STOP)
-+ return;
-+ /*
-+ * Don't take the mm semaphore here. If we fixup a prefetch
-+ * fault we could otherwise deadlock.
-+ */
-+ goto bad_area_nosemaphore;
-+ }
-+
-+ if (notify_page_fault(regs, error_code) == NOTIFY_STOP)
-+ return;
-+
-+ /* It's safe to allow irq's after cr2 has been saved and the vmalloc
-+ fault has been handled. */
-+ if (regs->eflags & (X86_EFLAGS_IF|VM_MASK))
-+ local_irq_enable();
-+
-+ mm = tsk->mm;
-+
-+ /*
-+ * If we're in an interrupt, have no user context or are running in an
-+ * atomic region then we must not take the fault..
-+ */
-+ if (in_atomic() || !mm)
-+ goto bad_area_nosemaphore;
-+
-+ /* When running in the kernel we expect faults to occur only to
-+ * addresses in user space. All other faults represent errors in the
-+ * kernel and should generate an OOPS. Unfortunatly, in the case of an
-+ * erroneous fault occurring in a code path which already holds mmap_sem
-+ * we will deadlock attempting to validate the fault against the
-+ * address space. Luckily the kernel only validly references user
-+ * space from well defined areas of code, which are listed in the
-+ * exceptions table.
-+ *
-+ * As the vast majority of faults will be valid we will only perform
-+ * the source reference check when there is a possibilty of a deadlock.
-+ * Attempt to lock the address space, if we cannot we then validate the
-+ * source. If this is invalid we can skip the address space check,
-+ * thus avoiding the deadlock.
-+ */
-+ if (!down_read_trylock(&mm->mmap_sem)) {
-+ if ((error_code & 4) == 0 &&
-+ !search_exception_tables(regs->eip))
-+ goto bad_area_nosemaphore;
-+ down_read(&mm->mmap_sem);
-+ }
-+
-+ vma = find_vma(mm, address);
-+ if (!vma)
-+ goto bad_area;
-+ if (vma->vm_start <= address)
-+ goto good_area;
-+ if (!(vma->vm_flags & VM_GROWSDOWN))
-+ goto bad_area;
-+ if (error_code & 4) {
-+ /*
-+ * Accessing the stack below %esp is always a bug.
-+ * The large cushion allows instructions like enter
-+ * and pusha to work. ("enter $65535,$31" pushes
-+ * 32 pointers and then decrements %esp by 65535.)
-+ */
-+ if (address + 65536 + 32 * sizeof(unsigned long) < regs->esp)
-+ goto bad_area;
-+ }
-+ /*
-+ * find_vma_prev is just a bit slower, because it cannot
-+ * use the mmap_cache, so we run it only in the growsdown
-+ * slow path and we leave find_vma in the fast path.
-+ */
-+ find_vma_prev(current->mm, address, &prev_vma);
-+ if (expand_stack(vma, address))
-+ goto bad_area;
-+/*
-+ * Ok, we have a good vm_area for this memory access, so
-+ * we can handle it..
-+ */
-+good_area:
-+ si_code = SEGV_ACCERR;
-+ write = 0;
-+ switch (error_code & 3) {
-+ default: /* 3: write, present */
-+ /* fall through */
-+ case 2: /* write, not present */
-+ if (!(vma->vm_flags & VM_WRITE))
-+ goto bad_area;
-+ write++;
-+ break;
-+ case 1: /* read, present */
-+ goto bad_area;
-+ case 0: /* read, not present */
-+ if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
-+ goto bad_area;
-+ }
-+
-+ survive:
-+ /*
-+ * If for any reason at all we couldn't handle the fault,
-+ * make sure we exit gracefully rather than endlessly redo
-+ * the fault.
-+ */
-+ switch (handle_mm_fault(mm, vma, address, write)) {
-+ case VM_FAULT_MINOR:
-+ tsk->min_flt++;
-+ break;
-+ case VM_FAULT_MAJOR:
-+ tsk->maj_flt++;
-+ break;
-+ case VM_FAULT_SIGBUS:
-+ goto do_sigbus;
-+ case VM_FAULT_OOM:
-+ goto out_of_memory;
-+ default:
-+ BUG();
-+ }
-+
-+ /*
-+ * Did it hit the DOS screen memory VA from vm86 mode?
-+ */
-+ if (regs->eflags & VM_MASK) {
-+ unsigned long bit = (address - 0xA0000) >> PAGE_SHIFT;
-+ if (bit < 32)
-+ tsk->thread.screen_bitmap |= 1 << bit;
-+ }
-+ up_read(&mm->mmap_sem);
-+ return;
-+
-+/*
-+ * Something tried to access memory that isn't in our memory map..
-+ * Fix it, but check if it's kernel or user first..
-+ */
-+bad_area:
-+ up_read(&mm->mmap_sem);
-+
-+bad_area_nosemaphore:
-+ /* User mode accesses just cause a SIGSEGV */
-+ if (error_code & 4) {
-+ /*
-+ * It's possible to have interrupts off here.
-+ */
-+ local_irq_enable();
-+
-+ /*
-+ * Valid to do another page fault here because this one came
-+ * from user space.
-+ */
-+ if (is_prefetch(regs, address, error_code))
-+ return;
-+
-+ tsk->thread.cr2 = address;
-+ /* Kernel addresses are always protection faults */
-+ tsk->thread.error_code = error_code | (address >= TASK_SIZE);
-+ tsk->thread.trap_no = 14;
-+ force_sig_info_fault(SIGSEGV, si_code, address, tsk);
-+ return;
-+ }
-+
-+#ifdef CONFIG_X86_F00F_BUG
-+ /*
-+ * Pentium F0 0F C7 C8 bug workaround.
-+ */
-+ if (boot_cpu_data.f00f_bug) {
-+ unsigned long nr;
-+
-+ nr = (address - idt_descr.address) >> 3;
-+
-+ if (nr == 6) {
-+ do_invalid_op(regs, 0);
-+ return;
-+ }
-+ }
-+#endif
-+
-+no_context:
-+ /* Are we prepared to handle this kernel fault? */
-+ if (fixup_exception(regs))
-+ return;
-+
-+ /*
-+ * Valid to do another page fault here, because if this fault
-+ * had been triggered by is_prefetch fixup_exception would have
-+ * handled it.
-+ */
-+ if (is_prefetch(regs, address, error_code))
-+ return;
-+
-+/*
-+ * Oops. The kernel tried to access some bad page. We'll have to
-+ * terminate things with extreme prejudice.
-+ */
-+
-+ bust_spinlocks(1);
-+
-+ if (oops_may_print()) {
-+#ifdef CONFIG_X86_PAE
-+ if (error_code & 16) {
-+ pte_t *pte = lookup_address(address);
-+
-+ if (pte && pte_present(*pte) && !pte_exec_kernel(*pte))
-+ printk(KERN_CRIT "kernel tried to execute "
-+ "NX-protected page - exploit attempt? "
-+ "(uid: %d)\n", current->uid);
-+ }
-+#endif
-+ if (address < PAGE_SIZE)
-+ printk(KERN_ALERT "BUG: unable to handle kernel NULL "
-+ "pointer dereference");
-+ else
-+ printk(KERN_ALERT "BUG: unable to handle kernel paging"
-+ " request");
-+ printk(" at virtual address %08lx\n",address);
-+ printk(KERN_ALERT " printing eip:\n");
-+ printk("%08lx\n", regs->eip);
-+ dump_fault_path(address);
-+ }
-+ tsk->thread.cr2 = address;
-+ tsk->thread.trap_no = 14;
-+ tsk->thread.error_code = error_code;
-+ die("Oops", regs, error_code);
-+ bust_spinlocks(0);
-+ do_exit(SIGKILL);
-+
-+/*
-+ * We ran out of memory, or some other thing happened to us that made
-+ * us unable to handle the page fault gracefully.
-+ */
-+out_of_memory:
-+ up_read(&mm->mmap_sem);
-+ if (is_init(tsk)) {
-+ yield();
-+ down_read(&mm->mmap_sem);
-+ goto survive;
-+ }
-+ printk("VM: killing process %s\n", tsk->comm);
-+ if (error_code & 4)
-+ do_exit(SIGKILL);
-+ goto no_context;
-+
-+do_sigbus:
-+ up_read(&mm->mmap_sem);
-+
-+ /* Kernel mode? Handle exceptions or die */
-+ if (!(error_code & 4))
-+ goto no_context;
-+
-+ /* User space => ok to do another page fault */
-+ if (is_prefetch(regs, address, error_code))
-+ return;
-+
-+ tsk->thread.cr2 = address;
-+ tsk->thread.error_code = error_code;
-+ tsk->thread.trap_no = 14;
-+ force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk);
-+}
-+
-+void vmalloc_sync_all(void)
-+{
-+ /*
-+ * Note that races in the updates of insync and start aren't
-+ * problematic: insync can only get set bits added, and updates to
-+ * start are only improving performance (without affecting correctness
-+ * if undone).
-+ */
-+ static DECLARE_BITMAP(insync, PTRS_PER_PGD);
-+ static unsigned long start = TASK_SIZE;
-+ unsigned long address;
-+
-+ if (SHARED_KERNEL_PMD)
-+ return;
-+
-+ BUILD_BUG_ON(TASK_SIZE & ~PGDIR_MASK);
-+ for (address = start; address >= TASK_SIZE; address += PGDIR_SIZE) {
-+ if (!test_bit(pgd_index(address), insync)) {
-+ unsigned long flags;
-+ struct page *page;
-+
-+ spin_lock_irqsave(&pgd_lock, flags);
-+ for (page = pgd_list; page; page =
-+ (struct page *)page->index)
-+ if (!vmalloc_sync_one(page_address(page),
-+ address)) {
-+ BUG_ON(page != pgd_list);
-+ break;
-+ }
-+ spin_unlock_irqrestore(&pgd_lock, flags);
-+ if (!page)
-+ set_bit(pgd_index(address), insync);
-+ }
-+ if (address == start && test_bit(pgd_index(address), insync))
-+ start = address + PGDIR_SIZE;
-+ }
-+}
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/i386/mm/highmem-xen.c ubuntu-gutsy-xen/arch/i386/mm/highmem-xen.c
---- ubuntu-gutsy/arch/i386/mm/highmem-xen.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/arch/i386/mm/highmem-xen.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,114 @@
-+#include <linux/highmem.h>
-+#include <linux/module.h>
-+
-+void *kmap(struct page *page)
-+{
-+ might_sleep();
-+ if (!PageHighMem(page))
-+ return page_address(page);
-+ return kmap_high(page);
-+}
-+
-+void kunmap(struct page *page)
-+{
-+ if (in_interrupt())
-+ BUG();
-+ if (!PageHighMem(page))
-+ return;
-+ kunmap_high(page);
-+}
-+
-+/*
-+ * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
-+ * no global lock is needed and because the kmap code must perform a global TLB
-+ * invalidation when the kmap pool wraps.
-+ *
-+ * However when holding an atomic kmap is is not legal to sleep, so atomic
-+ * kmaps are appropriate for short, tight code paths only.
-+ */
-+void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
-+{
-+ enum fixed_addresses idx;
-+ unsigned long vaddr;
-+
-+ /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
-+ pagefault_disable();
-+
-+ idx = type + KM_TYPE_NR*smp_processor_id();
-+ BUG_ON(!pte_none(*(kmap_pte-idx)));
-+
-+ if (!PageHighMem(page))
-+ return page_address(page);
-+
-+ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
-+ set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot));
-+ arch_flush_lazy_mmu_mode();
-+
-+ return (void*) vaddr;
-+}
-+
-+void *kmap_atomic(struct page *page, enum km_type type)
-+{
-+ return kmap_atomic_prot(page, type, kmap_prot);
-+}
-+
-+void kunmap_atomic(void *kvaddr, enum km_type type)
-+{
-+ unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
-+ enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
-+
-+ /*
-+ * Force other mappings to Oops if they'll try to access this pte
-+ * without first remap it. Keeping stale mappings around is a bad idea
-+ * also, in case the page changes cacheability attributes or becomes
-+ * a protected page in a hypervisor.
-+ */
-+ if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx))
-+ kpte_clear_flush(kmap_pte-idx, vaddr);
-+ else {
-+#ifdef CONFIG_DEBUG_HIGHMEM
-+ BUG_ON(vaddr < PAGE_OFFSET);
-+ BUG_ON(vaddr >= (unsigned long)high_memory);
-+#endif
-+ }
-+
-+ arch_flush_lazy_mmu_mode();
-+ pagefault_enable();
-+}
-+
-+/* This is the same as kmap_atomic() but can map memory that doesn't
-+ * have a struct page associated with it.
-+ */
-+void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
-+{
-+ enum fixed_addresses idx;
-+ unsigned long vaddr;
-+
-+ pagefault_disable();
-+
-+ idx = type + KM_TYPE_NR*smp_processor_id();
-+ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
-+ set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot));
-+ arch_flush_lazy_mmu_mode();
-+
-+ return (void*) vaddr;
-+}
-+
-+struct page *kmap_atomic_to_page(void *ptr)
-+{
-+ unsigned long idx, vaddr = (unsigned long)ptr;
-+ pte_t *pte;
-+
-+ if (vaddr < FIXADDR_START)
-+ return virt_to_page(ptr);
-+
-+ idx = virt_to_fix(vaddr);
-+ pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
-+ return pte_page(*pte);
-+}
-+
-+EXPORT_SYMBOL(kmap);
-+EXPORT_SYMBOL(kunmap);
-+EXPORT_SYMBOL(kmap_atomic);
-+EXPORT_SYMBOL(kunmap_atomic);
-+EXPORT_SYMBOL(kmap_atomic_to_page);
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/i386/mm/hypervisor.c ubuntu-gutsy-xen/arch/i386/mm/hypervisor.c
---- ubuntu-gutsy/arch/i386/mm/hypervisor.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/arch/i386/mm/hypervisor.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,510 @@
-+/******************************************************************************
-+ * mm/hypervisor.c
-+ *
-+ * Update page tables via the hypervisor.
-+ *
-+ * Copyright (c) 2002-2004, K A Fraser
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#include <linux/sched.h>
-+#include <linux/mm.h>
-+#include <linux/vmalloc.h>
-+#include <asm/page.h>
-+#include <asm/pgtable.h>
-+#include <asm/hypervisor.h>
-+#include <xen/balloon.h>
-+#include <xen/features.h>
-+#include <xen/interface/memory.h>
-+#include <linux/module.h>
-+#include <linux/percpu.h>
-+#include <linux/highmem.h>
-+#include <asm/tlbflush.h>
-+
-+void xen_l1_entry_update(pte_t *ptr, pte_t val)
-+{
-+ mmu_update_t u;
-+ u.ptr = virt_to_machine(ptr);
-+ u.val = __pte_val(val);
-+ BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
-+}
-+
-+void xen_l2_entry_update(pmd_t *ptr, pmd_t val)
-+{
-+ mmu_update_t u;
-+ u.ptr = virt_to_machine(ptr);
-+ u.val = __pmd_val(val);
-+ BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
-+}
-+
-+#if defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
-+void xen_l3_entry_update(pud_t *ptr, pud_t val)
-+{
-+ mmu_update_t u;
-+ u.ptr = virt_to_machine(ptr);
-+ u.val = __pud_val(val);
-+ BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
-+}
-+#endif
-+
-+#ifdef CONFIG_X86_64
-+void xen_l4_entry_update(pgd_t *ptr, pgd_t val)
-+{
-+ mmu_update_t u;
-+ u.ptr = virt_to_machine(ptr);
-+ u.val = __pgd_val(val);
-+ BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
-+}
-+#endif /* CONFIG_X86_64 */
-+
-+void xen_pt_switch(unsigned long ptr)
-+{
-+ struct mmuext_op op;
-+ op.cmd = MMUEXT_NEW_BASEPTR;
-+ op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
-+ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
-+}
-+
-+void xen_new_user_pt(unsigned long ptr)
-+{
-+ struct mmuext_op op;
-+ op.cmd = MMUEXT_NEW_USER_BASEPTR;
-+ op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
-+ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
-+}
-+
-+void xen_tlb_flush(void)
-+{
-+ struct mmuext_op op;
-+ op.cmd = MMUEXT_TLB_FLUSH_LOCAL;
-+ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
-+}
-+EXPORT_SYMBOL(xen_tlb_flush);
-+
-+void xen_invlpg(unsigned long ptr)
-+{
-+ struct mmuext_op op;
-+ op.cmd = MMUEXT_INVLPG_LOCAL;
-+ op.arg1.linear_addr = ptr & PAGE_MASK;
-+ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
-+}
-+EXPORT_SYMBOL(xen_invlpg);
-+
-+#ifdef CONFIG_SMP
-+
-+void xen_tlb_flush_all(void)
-+{
-+ struct mmuext_op op;
-+ op.cmd = MMUEXT_TLB_FLUSH_ALL;
-+ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
-+}
-+
-+void xen_tlb_flush_mask(cpumask_t *mask)
-+{
-+ struct mmuext_op op;
-+ if ( cpus_empty(*mask) )
-+ return;
-+ op.cmd = MMUEXT_TLB_FLUSH_MULTI;
-+ set_xen_guest_handle(op.arg2.vcpumask, mask->bits);
-+ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
-+}
-+
-+void xen_invlpg_all(unsigned long ptr)
-+{
-+ struct mmuext_op op;
-+ op.cmd = MMUEXT_INVLPG_ALL;
-+ op.arg1.linear_addr = ptr & PAGE_MASK;
-+ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
-+}
-+
-+void xen_invlpg_mask(cpumask_t *mask, unsigned long ptr)
-+{
-+ struct mmuext_op op;
-+ if ( cpus_empty(*mask) )
-+ return;
-+ op.cmd = MMUEXT_INVLPG_MULTI;
-+ op.arg1.linear_addr = ptr & PAGE_MASK;
-+ set_xen_guest_handle(op.arg2.vcpumask, mask->bits);
-+ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
-+}
-+
-+#endif /* CONFIG_SMP */
-+
-+void xen_pgd_pin(unsigned long ptr)
-+{
-+ struct mmuext_op op;
-+#ifdef CONFIG_X86_64
-+ op.cmd = MMUEXT_PIN_L4_TABLE;
-+#elif defined(CONFIG_X86_PAE)
-+ op.cmd = MMUEXT_PIN_L3_TABLE;
-+#else
-+ op.cmd = MMUEXT_PIN_L2_TABLE;
-+#endif
-+ op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
-+ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
-+}
-+
-+void xen_pgd_unpin(unsigned long ptr)
-+{
-+ struct mmuext_op op;
-+ op.cmd = MMUEXT_UNPIN_TABLE;
-+ op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
-+ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
-+}
-+
-+void xen_set_ldt(unsigned long ptr, unsigned long len)
-+{
-+ struct mmuext_op op;
-+ op.cmd = MMUEXT_SET_LDT;
-+ op.arg1.linear_addr = ptr;
-+ op.arg2.nr_ents = len;
-+ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
-+}
-+
-+/*
-+ * Bitmap is indexed by page number. If bit is set, the page is part of a
-+ * xen_create_contiguous_region() area of memory.
-+ */
-+unsigned long *contiguous_bitmap;
-+
-+static void contiguous_bitmap_set(
-+ unsigned long first_page, unsigned long nr_pages)
-+{
-+ unsigned long start_off, end_off, curr_idx, end_idx;
-+
-+ curr_idx = first_page / BITS_PER_LONG;
-+ start_off = first_page & (BITS_PER_LONG-1);
-+ end_idx = (first_page + nr_pages) / BITS_PER_LONG;
-+ end_off = (first_page + nr_pages) & (BITS_PER_LONG-1);
-+
-+ if (curr_idx == end_idx) {
-+ contiguous_bitmap[curr_idx] |=
-+ ((1UL<<end_off)-1) & -(1UL<<start_off);
-+ } else {
-+ contiguous_bitmap[curr_idx] |= -(1UL<<start_off);
-+ while ( ++curr_idx < end_idx )
-+ contiguous_bitmap[curr_idx] = ~0UL;
-+ contiguous_bitmap[curr_idx] |= (1UL<<end_off)-1;
-+ }
-+}
-+
-+static void contiguous_bitmap_clear(
-+ unsigned long first_page, unsigned long nr_pages)
-+{
-+ unsigned long start_off, end_off, curr_idx, end_idx;
-+
-+ curr_idx = first_page / BITS_PER_LONG;
-+ start_off = first_page & (BITS_PER_LONG-1);
-+ end_idx = (first_page + nr_pages) / BITS_PER_LONG;
-+ end_off = (first_page + nr_pages) & (BITS_PER_LONG-1);
-+
-+ if (curr_idx == end_idx) {
-+ contiguous_bitmap[curr_idx] &=
-+ -(1UL<<end_off) | ((1UL<<start_off)-1);
-+ } else {
-+ contiguous_bitmap[curr_idx] &= (1UL<<start_off)-1;
-+ while ( ++curr_idx != end_idx )
-+ contiguous_bitmap[curr_idx] = 0;
-+ contiguous_bitmap[curr_idx] &= -(1UL<<end_off);
-+ }
-+}
-+
-+/* Protected by balloon_lock. */
-+#define MAX_CONTIG_ORDER 9 /* 2MB */
-+static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
-+static multicall_entry_t cr_mcl[1<<MAX_CONTIG_ORDER];
-+
-+/* Ensure multi-page extents are contiguous in machine memory. */
-+int xen_create_contiguous_region(
-+ unsigned long vstart, unsigned int order, unsigned int address_bits)
-+{
-+ unsigned long *in_frames = discontig_frames, out_frame;
-+ unsigned long frame, i, flags;
-+ long rc;
-+ int success;
-+ struct xen_memory_exchange exchange = {
-+ .in = {
-+ .nr_extents = 1UL << order,
-+ .extent_order = 0,
-+ .domid = DOMID_SELF
-+ },
-+ .out = {
-+ .nr_extents = 1,
-+ .extent_order = order,
-+ .address_bits = address_bits,
-+ .domid = DOMID_SELF
-+ }
-+ };
-+
-+ /*
-+ * Currently an auto-translated guest will not perform I/O, nor will
-+ * it require PAE page directories below 4GB. Therefore any calls to
-+ * this function are redundant and can be ignored.
-+ */
-+ if (xen_feature(XENFEAT_auto_translated_physmap))
-+ return 0;
-+
-+ if (unlikely(order > MAX_CONTIG_ORDER))
-+ return -ENOMEM;
-+
-+ set_xen_guest_handle(exchange.in.extent_start, in_frames);
-+ set_xen_guest_handle(exchange.out.extent_start, &out_frame);
-+
-+ scrub_pages(vstart, 1 << order);
-+
-+ balloon_lock(flags);
-+
-+ /* 1. Zap current PTEs, remembering MFNs. */
-+ for (i = 0; i < (1UL<<order); i++) {
-+ in_frames[i] = pfn_to_mfn((__pa(vstart) >> PAGE_SHIFT) + i);
-+ MULTI_update_va_mapping(cr_mcl + i, vstart + (i*PAGE_SIZE),
-+ __pte_ma(0), 0);
-+ set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i,
-+ INVALID_P2M_ENTRY);
-+ }
-+ if (HYPERVISOR_multicall_check(cr_mcl, i, NULL))
-+ BUG();
-+
-+ /* 2. Get a new contiguous memory extent. */
-+ out_frame = __pa(vstart) >> PAGE_SHIFT;
-+ rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
-+ success = (exchange.nr_exchanged == (1UL << order));
-+ BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
-+ BUG_ON(success && (rc != 0));
-+#if CONFIG_XEN_COMPAT <= 0x030002
-+ if (unlikely(rc == -ENOSYS)) {
-+ /* Compatibility when XENMEM_exchange is unsupported. */
-+ if (HYPERVISOR_memory_op(XENMEM_decrease_reservation,
-+ &exchange.in) != (1UL << order))
-+ BUG();
-+ success = (HYPERVISOR_memory_op(XENMEM_populate_physmap,
-+ &exchange.out) == 1);
-+ if (!success) {
-+ /* Couldn't get special memory: fall back to normal. */
-+ for (i = 0; i < (1UL<<order); i++)
-+ in_frames[i] = (__pa(vstart)>>PAGE_SHIFT) + i;
-+ if (HYPERVISOR_memory_op(XENMEM_populate_physmap,
-+ &exchange.in) != (1UL<<order))
-+ BUG();
-+ }
-+ }
-+#endif
-+
-+ /* 3. Map the new extent in place of old pages. */
-+ for (i = 0; i < (1UL<<order); i++) {
-+ frame = success ? (out_frame + i) : in_frames[i];
-+ MULTI_update_va_mapping(cr_mcl + i, vstart + (i*PAGE_SIZE),
-+ pfn_pte_ma(frame, PAGE_KERNEL), 0);
-+ set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i, frame);
-+ }
-+
-+ cr_mcl[i - 1].args[MULTI_UVMFLAGS_INDEX] = order
-+ ? UVMF_TLB_FLUSH|UVMF_ALL
-+ : UVMF_INVLPG|UVMF_ALL;
-+ if (HYPERVISOR_multicall_check(cr_mcl, i, NULL))
-+ BUG();
-+
-+ if (success)
-+ contiguous_bitmap_set(__pa(vstart) >> PAGE_SHIFT,
-+ 1UL << order);
-+
-+ balloon_unlock(flags);
-+
-+ return success ? 0 : -ENOMEM;
-+}
-+EXPORT_SYMBOL_GPL(xen_create_contiguous_region);
-+
-+void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order)
-+{
-+ unsigned long *out_frames = discontig_frames, in_frame;
-+ unsigned long frame, i, flags;
-+ long rc;
-+ int success;
-+ struct xen_memory_exchange exchange = {
-+ .in = {
-+ .nr_extents = 1,
-+ .extent_order = order,
-+ .domid = DOMID_SELF
-+ },
-+ .out = {
-+ .nr_extents = 1UL << order,
-+ .extent_order = 0,
-+ .domid = DOMID_SELF
-+ }
-+ };
-+
-+ if (xen_feature(XENFEAT_auto_translated_physmap) ||
-+ !test_bit(__pa(vstart) >> PAGE_SHIFT, contiguous_bitmap))
-+ return;
-+
-+ if (unlikely(order > MAX_CONTIG_ORDER))
-+ return;
-+
-+ set_xen_guest_handle(exchange.in.extent_start, &in_frame);
-+ set_xen_guest_handle(exchange.out.extent_start, out_frames);
-+
-+ scrub_pages(vstart, 1 << order);
-+
-+ balloon_lock(flags);
-+
-+ contiguous_bitmap_clear(__pa(vstart) >> PAGE_SHIFT, 1UL << order);
-+
-+ /* 1. Find start MFN of contiguous extent. */
-+ in_frame = pfn_to_mfn(__pa(vstart) >> PAGE_SHIFT);
-+
-+ /* 2. Zap current PTEs. */
-+ for (i = 0; i < (1UL<<order); i++) {
-+ MULTI_update_va_mapping(cr_mcl + i, vstart + (i*PAGE_SIZE),
-+ __pte_ma(0), 0);
-+ set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i,
-+ INVALID_P2M_ENTRY);
-+ out_frames[i] = (__pa(vstart) >> PAGE_SHIFT) + i;
-+ }
-+ if (HYPERVISOR_multicall_check(cr_mcl, i, NULL))
-+ BUG();
-+
-+ /* 3. Do the exchange for non-contiguous MFNs. */
-+ rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
-+ success = (exchange.nr_exchanged == 1);
-+ BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
-+ BUG_ON(success && (rc != 0));
-+#if CONFIG_XEN_COMPAT <= 0x030002
-+ if (unlikely(rc == -ENOSYS)) {
-+ /* Compatibility when XENMEM_exchange is unsupported. */
-+ if (HYPERVISOR_memory_op(XENMEM_decrease_reservation,
-+ &exchange.in) != 1)
-+ BUG();
-+ if (HYPERVISOR_memory_op(XENMEM_populate_physmap,
-+ &exchange.out) != (1UL << order))
-+ BUG();
-+ success = 1;
-+ }
-+#endif
-+
-+ /* 4. Map new pages in place of old pages. */
-+ for (i = 0; i < (1UL<<order); i++) {
-+ frame = success ? out_frames[i] : (in_frame + i);
-+ MULTI_update_va_mapping(cr_mcl + i, vstart + (i*PAGE_SIZE),
-+ pfn_pte_ma(frame, PAGE_KERNEL), 0);
-+ set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i, frame);
-+ }
-+
-+ cr_mcl[i - 1].args[MULTI_UVMFLAGS_INDEX] = order
-+ ? UVMF_TLB_FLUSH|UVMF_ALL
-+ : UVMF_INVLPG|UVMF_ALL;
-+ if (HYPERVISOR_multicall_check(cr_mcl, i, NULL))
-+ BUG();
-+
-+ balloon_unlock(flags);
-+
-+ if (unlikely(!success)) {
-+ /* Try hard to get the special memory back to Xen. */
-+ exchange.in.extent_order = 0;
-+ set_xen_guest_handle(exchange.in.extent_start, &in_frame);
-+
-+ for (i = 0; i < (1UL<<order); i++) {
-+ struct page *page = alloc_page(__GFP_HIGHMEM);
-+ unsigned long pfn;
-+ mmu_update_t mmu;
-+ unsigned int j = 0;
-+
-+ if (!page) {
-+ printk(KERN_WARNING "Xen and kernel out of memory "
-+ "while trying to release an order %u "
-+ "contiguous region\n", order);
-+ break;
-+ }
-+ pfn = page_to_pfn(page);
-+
-+ balloon_lock(flags);
-+
-+ if (!PageHighMem(page)) {
-+ void *v = __va(pfn << PAGE_SHIFT);
-+
-+ scrub_pages(v, 1);
-+ MULTI_update_va_mapping(cr_mcl + j, (unsigned long)v,
-+ __pte_ma(0), UVMF_INVLPG|UVMF_ALL);
-+ ++j;
-+ }
-+#ifdef CONFIG_XEN_SCRUB_PAGES
-+ else {
-+ scrub_pages(kmap(page), 1);
-+ kunmap(page);
-+ kmap_flush_unused();
-+ }
-+#endif
-+
-+ frame = pfn_to_mfn(pfn);
-+ set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
-+
-+ MULTI_update_va_mapping(cr_mcl + j, vstart,
-+ pfn_pte_ma(frame, PAGE_KERNEL),
-+ UVMF_INVLPG|UVMF_ALL);
-+ ++j;
-+
-+ pfn = __pa(vstart) >> PAGE_SHIFT;
-+ set_phys_to_machine(pfn, frame);
-+ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
-+ mmu.ptr = ((uint64_t)frame << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
-+ mmu.val = pfn;
-+ cr_mcl[j].op = __HYPERVISOR_mmu_update;
-+ cr_mcl[j].args[0] = (unsigned long)&mmu;
-+ cr_mcl[j].args[1] = 1;
-+ cr_mcl[j].args[2] = 0;
-+ cr_mcl[j].args[3] = DOMID_SELF;
-+ ++j;
-+ }
-+
-+ cr_mcl[j].op = __HYPERVISOR_memory_op;
-+ cr_mcl[j].args[0] = XENMEM_decrease_reservation;
-+ cr_mcl[j].args[1] = (unsigned long)&exchange.in;
-+
-+ if (HYPERVISOR_multicall(cr_mcl, j + 1))
-+ BUG();
-+ BUG_ON(cr_mcl[j].result != 1);
-+ while (j--)
-+ BUG_ON(cr_mcl[j].result != 0);
-+
-+ balloon_unlock(flags);
-+
-+ free_empty_pages(&page, 1);
-+
-+ in_frame++;
-+ vstart += PAGE_SIZE;
-+ }
-+ }
-+}
-+EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
-+
-+#ifdef __i386__
-+int write_ldt_entry(void *ldt, int entry, __u32 entry_a, __u32 entry_b)
-+{
-+ __u32 *lp = (__u32 *)((char *)ldt + entry * 8);
-+ maddr_t mach_lp = arbitrary_virt_to_machine(lp);
-+ return HYPERVISOR_update_descriptor(
-+ mach_lp, (u64)entry_a | ((u64)entry_b<<32));
-+}
-+#endif
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/i386/mm/init-xen.c ubuntu-gutsy-xen/arch/i386/mm/init-xen.c
---- ubuntu-gutsy/arch/i386/mm/init-xen.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/arch/i386/mm/init-xen.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,915 @@
-+/*
-+ * linux/arch/i386/mm/init.c
-+ *
-+ * Copyright (C) 1995 Linus Torvalds
-+ *
-+ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
-+ */
-+
-+#include <linux/module.h>
-+#include <linux/signal.h>
-+#include <linux/sched.h>
-+#include <linux/kernel.h>
-+#include <linux/errno.h>
-+#include <linux/string.h>
-+#include <linux/types.h>
-+#include <linux/ptrace.h>
-+#include <linux/mman.h>
-+#include <linux/mm.h>
-+#include <linux/hugetlb.h>
-+#include <linux/swap.h>
-+#include <linux/smp.h>
-+#include <linux/init.h>
-+#include <linux/highmem.h>
-+#include <linux/pagemap.h>
-+#include <linux/pfn.h>
-+#include <linux/poison.h>
-+#include <linux/bootmem.h>
-+#include <linux/slab.h>
-+#include <linux/proc_fs.h>
-+#include <linux/efi.h>
-+#include <linux/memory_hotplug.h>
-+#include <linux/initrd.h>
-+#include <linux/cpumask.h>
-+#include <linux/dma-mapping.h>
-+#include <linux/scatterlist.h>
-+
-+#include <asm/processor.h>
-+#include <asm/system.h>
-+#include <asm/uaccess.h>
-+#include <asm/pgtable.h>
-+#include <asm/dma.h>
-+#include <asm/fixmap.h>
-+#include <asm/e820.h>
-+#include <asm/apic.h>
-+#include <asm/tlb.h>
-+#include <asm/tlbflush.h>
-+#include <asm/sections.h>
-+#include <asm/hypervisor.h>
-+#include <asm/swiotlb.h>
-+
-+extern unsigned long *contiguous_bitmap;
-+
-+unsigned int __VMALLOC_RESERVE = 128 << 20;
-+
-+DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
-+unsigned long highstart_pfn, highend_pfn;
-+
-+static int noinline do_test_wp_bit(void);
-+
-+/*
-+ * Creates a middle page table and puts a pointer to it in the
-+ * given global directory entry. This only returns the gd entry
-+ * in non-PAE compilation mode, since the middle layer is folded.
-+ */
-+static pmd_t * __init one_md_table_init(pgd_t *pgd)
-+{
-+ pud_t *pud;
-+ pmd_t *pmd_table;
-+
-+#ifdef CONFIG_X86_PAE
-+ if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
-+ pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
-+
-+ paravirt_alloc_pd(__pa(pmd_table) >> PAGE_SHIFT);
-+ make_lowmem_page_readonly(pmd_table, XENFEAT_writable_page_tables);
-+ set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
-+ pud = pud_offset(pgd, 0);
-+ if (pmd_table != pmd_offset(pud, 0))
-+ BUG();
-+ }
-+#endif
-+ pud = pud_offset(pgd, 0);
-+ pmd_table = pmd_offset(pud, 0);
-+
-+ return pmd_table;
-+}
-+
-+/*
-+ * Create a page table and place a pointer to it in a middle page
-+ * directory entry.
-+ */
-+static pte_t * __init one_page_table_init(pmd_t *pmd)
-+{
-+#if CONFIG_XEN_COMPAT <= 0x030002
-+ if (pmd_none(*pmd)) {
-+#else
-+ if (!(pmd_val(*pmd) & _PAGE_PRESENT)) {
-+#endif
-+ pte_t *page_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
-+
-+ paravirt_alloc_pt(__pa(page_table) >> PAGE_SHIFT);
-+ make_lowmem_page_readonly(page_table,
-+ XENFEAT_writable_page_tables);
-+ set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
-+ BUG_ON(page_table != pte_offset_kernel(pmd, 0));
-+ }
-+
-+ return pte_offset_kernel(pmd, 0);
-+}
-+
-+/*
-+ * This function initializes a certain range of kernel virtual memory
-+ * with new bootmem page tables, everywhere page tables are missing in
-+ * the given range.
-+ */
-+
-+/*
-+ * NOTE: The pagetables are allocated contiguous on the physical space
-+ * so we can cache the place of the first one and move around without
-+ * checking the pgd every time.
-+ */
-+static void __init page_table_range_init (unsigned long start, unsigned long end, pgd_t *pgd_base)
-+{
-+ pgd_t *pgd;
-+ pmd_t *pmd;
-+ int pgd_idx, pmd_idx;
-+ unsigned long vaddr;
-+
-+ vaddr = start;
-+ pgd_idx = pgd_index(vaddr);
-+ pmd_idx = pmd_index(vaddr);
-+ pgd = pgd_base + pgd_idx;
-+
-+ for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
-+ pmd = one_md_table_init(pgd);
-+ pmd = pmd + pmd_index(vaddr);
-+ for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); pmd++, pmd_idx++) {
-+ if (vaddr < hypervisor_virt_start)
-+ one_page_table_init(pmd);
-+
-+ vaddr += PMD_SIZE;
-+ }
-+ pmd_idx = 0;
-+ }
-+}
-+
-+static inline int is_kernel_text(unsigned long addr)
-+{
-+ if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
-+ return 1;
-+ return 0;
-+}
-+
-+/*
-+ * This maps the physical memory to kernel virtual address space, a total
-+ * of max_low_pfn pages, by creating page tables starting from address
-+ * PAGE_OFFSET.
-+ */
-+static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
-+{
-+ unsigned long pfn;
-+ pgd_t *pgd;
-+ pmd_t *pmd;
-+ pte_t *pte;
-+ int pgd_idx, pmd_idx, pte_ofs;
-+
-+ unsigned long max_ram_pfn = xen_start_info->nr_pages;
-+ if (max_ram_pfn > max_low_pfn)
-+ max_ram_pfn = max_low_pfn;
-+
-+ pgd_idx = pgd_index(PAGE_OFFSET);
-+ pgd = pgd_base + pgd_idx;
-+ pfn = 0;
-+ pmd_idx = pmd_index(PAGE_OFFSET);
-+ pte_ofs = pte_index(PAGE_OFFSET);
-+
-+ for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
-+#ifdef CONFIG_XEN
-+ /*
-+ * Native linux hasn't PAE-paging enabled yet at this
-+ * point. When running as xen domain we are in PAE
-+ * mode already, thus we can't simply hook a empty
-+ * pmd. That would kill the mappings we are currently
-+ * using ...
-+ */
-+ pmd = pmd_offset(pud_offset(pgd, PAGE_OFFSET), PAGE_OFFSET);
-+#else
-+ pmd = one_md_table_init(pgd);
-+#endif
-+ if (pfn >= max_low_pfn)
-+ continue;
-+ pmd += pmd_idx;
-+ for (; pmd_idx < PTRS_PER_PMD && pfn < max_low_pfn; pmd++, pmd_idx++) {
-+ unsigned int address = pfn * PAGE_SIZE + PAGE_OFFSET;
-+ if (address >= hypervisor_virt_start)
-+ continue;
-+
-+ /* Map with big pages if possible, otherwise create normal page tables. */
-+ if (cpu_has_pse) {
-+ unsigned int address2 = (pfn + PTRS_PER_PTE - 1) * PAGE_SIZE + PAGE_OFFSET + PAGE_SIZE-1;
-+ if (is_kernel_text(address) || is_kernel_text(address2))
-+ set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE_EXEC));
-+ else
-+ set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE));
-+
-+ pfn += PTRS_PER_PTE;
-+ } else {
-+ pte = one_page_table_init(pmd);
-+
-+ for (pte += pte_ofs;
-+ pte_ofs < PTRS_PER_PTE && pfn < max_low_pfn;
-+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
-+ /* XEN: Only map initial RAM allocation. */
-+ if ((pfn >= max_ram_pfn) || pte_present(*pte))
-+ continue;
-+ if (is_kernel_text(address))
-+ set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
-+ else
-+ set_pte(pte, pfn_pte(pfn, PAGE_KERNEL));
-+ }
-+ pte_ofs = 0;
-+ }
-+ }
-+ pmd_idx = 0;
-+ }
-+}
-+
-+#ifndef CONFIG_XEN
-+
-+static inline int page_kills_ppro(unsigned long pagenr)
-+{
-+ if (pagenr >= 0x70000 && pagenr <= 0x7003F)
-+ return 1;
-+ return 0;
-+}
-+
-+#else
-+
-+#define page_kills_ppro(p) 0
-+
-+#endif
-+
-+int page_is_ram(unsigned long pagenr)
-+{
-+ int i;
-+ unsigned long addr, end;
-+
-+ if (efi_enabled) {
-+ efi_memory_desc_t *md;
-+ void *p;
-+
-+ for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
-+ md = p;
-+ if (!is_available_memory(md))
-+ continue;
-+ addr = (md->phys_addr+PAGE_SIZE-1) >> PAGE_SHIFT;
-+ end = (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT)) >> PAGE_SHIFT;
-+
-+ if ((pagenr >= addr) && (pagenr < end))
-+ return 1;
-+ }
-+ return 0;
-+ }
-+
-+ for (i = 0; i < e820.nr_map; i++) {
-+
-+ if (e820.map[i].type != E820_RAM) /* not usable memory */
-+ continue;
-+ /*
-+ * !!!FIXME!!! Some BIOSen report areas as RAM that
-+ * are not. Notably the 640->1Mb area. We need a sanity
-+ * check here.
-+ */
-+ addr = (e820.map[i].addr+PAGE_SIZE-1) >> PAGE_SHIFT;
-+ end = (e820.map[i].addr+e820.map[i].size) >> PAGE_SHIFT;
-+ if ((pagenr >= addr) && (pagenr < end))
-+ return 1;
-+ }
-+ return 0;
-+}
-+
-+#ifdef CONFIG_HIGHMEM
-+pte_t *kmap_pte;
-+pgprot_t kmap_prot;
-+
-+#define kmap_get_fixmap_pte(vaddr) \
-+ pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), vaddr), (vaddr)), (vaddr))
-+
-+static void __init kmap_init(void)
-+{
-+ unsigned long kmap_vstart;
-+
-+ /* cache the first kmap pte */
-+ kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
-+ kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
-+
-+ kmap_prot = PAGE_KERNEL;
-+}
-+
-+static void __init permanent_kmaps_init(pgd_t *pgd_base)
-+{
-+ pgd_t *pgd;
-+ pud_t *pud;
-+ pmd_t *pmd;
-+ pte_t *pte;
-+ unsigned long vaddr;
-+
-+ vaddr = PKMAP_BASE;
-+ page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
-+
-+ pgd = swapper_pg_dir + pgd_index(vaddr);
-+ pud = pud_offset(pgd, vaddr);
-+ pmd = pmd_offset(pud, vaddr);
-+ pte = pte_offset_kernel(pmd, vaddr);
-+ pkmap_page_table = pte;
-+}
-+
-+static void __meminit free_new_highpage(struct page *page, int pfn)
-+{
-+ init_page_count(page);
-+ if (pfn < xen_start_info->nr_pages)
-+ __free_page(page);
-+ totalhigh_pages++;
-+}
-+
-+void __init add_one_highpage_init(struct page *page, int pfn, int bad_ppro)
-+{
-+ if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn))) {
-+ ClearPageReserved(page);
-+ free_new_highpage(page, pfn);
-+ } else
-+ SetPageReserved(page);
-+}
-+
-+static int __meminit add_one_highpage_hotplug(struct page *page, unsigned long pfn)
-+{
-+ free_new_highpage(page, pfn);
-+ totalram_pages++;
-+#ifdef CONFIG_FLATMEM
-+ max_mapnr = max(pfn, max_mapnr);
-+#endif
-+ num_physpages++;
-+ return 0;
-+}
-+
-+/*
-+ * Not currently handling the NUMA case.
-+ * Assuming single node and all memory that
-+ * has been added dynamically that would be
-+ * onlined here is in HIGHMEM
-+ */
-+void __meminit online_page(struct page *page)
-+{
-+ ClearPageReserved(page);
-+ add_one_highpage_hotplug(page, page_to_pfn(page));
-+}
-+
-+
-+#ifdef CONFIG_NUMA
-+extern void set_highmem_pages_init(int);
-+#else
-+static void __init set_highmem_pages_init(int bad_ppro)
-+{
-+ int pfn;
-+ for (pfn = highstart_pfn; pfn < highend_pfn; pfn++)
-+ add_one_highpage_init(pfn_to_page(pfn), pfn, bad_ppro);
-+ totalram_pages += totalhigh_pages;
-+}
-+#endif /* CONFIG_FLATMEM */
-+
-+#else
-+#define kmap_init() do { } while (0)
-+#define permanent_kmaps_init(pgd_base) do { } while (0)
-+#define set_highmem_pages_init(bad_ppro) do { } while (0)
-+#endif /* CONFIG_HIGHMEM */
-+
-+unsigned long long __PAGE_KERNEL = _PAGE_KERNEL;
-+EXPORT_SYMBOL(__PAGE_KERNEL);
-+unsigned long long __PAGE_KERNEL_EXEC = _PAGE_KERNEL_EXEC;
-+
-+#ifdef CONFIG_NUMA
-+extern void __init remap_numa_kva(void);
-+#else
-+#define remap_numa_kva() do {} while (0)
-+#endif
-+
-+pgd_t *swapper_pg_dir;
-+
-+static void __init xen_pagetable_setup_start(pgd_t *base)
-+{
-+ swapper_pg_dir = base;
-+ init_mm.pgd = base;
-+}
-+
-+static void __init xen_pagetable_setup_done(pgd_t *base)
-+{
-+}
-+
-+/*
-+ * Build a proper pagetable for the kernel mappings. Up until this
-+ * point, we've been running on some set of pagetables constructed by
-+ * the boot process.
-+ *
-+ * If we're booting on native hardware, this will be a pagetable
-+ * constructed in arch/i386/kernel/head.S, and not running in PAE mode
-+ * (even if we'll end up running in PAE). The root of the pagetable
-+ * will be swapper_pg_dir.
-+ *
-+ * If we're booting paravirtualized under a hypervisor, then there are
-+ * more options: we may already be running PAE, and the pagetable may
-+ * or may not be based in swapper_pg_dir. In any case,
-+ * paravirt_pagetable_setup_start() will set up swapper_pg_dir
-+ * appropriately for the rest of the initialization to work.
-+ *
-+ * In general, pagetable_init() assumes that the pagetable may already
-+ * be partially populated, and so it avoids stomping on any existing
-+ * mappings.
-+ */
-+static void __init pagetable_init (void)
-+{
-+ unsigned long vaddr, end;
-+ pgd_t *pgd_base = (pgd_t *)xen_start_info->pt_base;
-+
-+ xen_pagetable_setup_start(pgd_base);
-+
-+ /* Enable PSE if available */
-+ if (cpu_has_pse)
-+ set_in_cr4(X86_CR4_PSE);
-+
-+ /* Enable PGE if available */
-+ if (cpu_has_pge) {
-+ set_in_cr4(X86_CR4_PGE);
-+ __PAGE_KERNEL |= _PAGE_GLOBAL;
-+ __PAGE_KERNEL_EXEC |= _PAGE_GLOBAL;
-+ }
-+
-+ kernel_physical_mapping_init(pgd_base);
-+ remap_numa_kva();
-+
-+ /*
-+ * Fixed mappings, only the page table structure has to be
-+ * created - mappings will be set by set_fixmap():
-+ */
-+ vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
-+ end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
-+ page_table_range_init(vaddr, end, pgd_base);
-+
-+ permanent_kmaps_init(pgd_base);
-+
-+ xen_pagetable_setup_done(pgd_base);
-+}
-+
-+#if defined(CONFIG_SOFTWARE_SUSPEND) || defined(CONFIG_ACPI_SLEEP)
-+/*
-+ * Swap suspend & friends need this for resume because things like the intel-agp
-+ * driver might have split up a kernel 4MB mapping.
-+ */
-+char __nosavedata swsusp_pg_dir[PAGE_SIZE]
-+ __attribute__ ((aligned (PAGE_SIZE)));
-+
-+static inline void save_pg_dir(void)
-+{
-+ memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
-+}
-+#else
-+static inline void save_pg_dir(void)
-+{
-+}
-+#endif
-+
-+void zap_low_mappings (void)
-+{
-+ int i;
-+
-+ save_pg_dir();
-+
-+ /*
-+ * Zap initial low-memory mappings.
-+ *
-+ * Note that "pgd_clear()" doesn't do it for
-+ * us, because pgd_clear() is a no-op on i386.
-+ */
-+ for (i = 0; i < USER_PTRS_PER_PGD; i++)
-+#if defined(CONFIG_X86_PAE) && !defined(CONFIG_XEN)
-+ set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page)));
-+#else
-+ set_pgd(swapper_pg_dir+i, __pgd(0));
-+#endif
-+ flush_tlb_all();
-+}
-+
-+static int disable_nx __initdata = 0;
-+u64 __supported_pte_mask __read_mostly = ~_PAGE_NX;
-+EXPORT_SYMBOL(__supported_pte_mask);
-+
-+/*
-+ * noexec = on|off
-+ *
-+ * Control non executable mappings.
-+ *
-+ * on Enable
-+ * off Disable
-+ */
-+static int __init noexec_setup(char *str)
-+{
-+ if (!str || !strcmp(str, "on")) {
-+ if (cpu_has_nx) {
-+ __supported_pte_mask |= _PAGE_NX;
-+ disable_nx = 0;
-+ }
-+ } else if (!strcmp(str,"off")) {
-+ disable_nx = 1;
-+ __supported_pte_mask &= ~_PAGE_NX;
-+ } else
-+ return -EINVAL;
-+
-+ return 0;
-+}
-+early_param("noexec", noexec_setup);
-+
-+int nx_enabled = 0;
-+#ifdef CONFIG_X86_PAE
-+
-+static void __init set_nx(void)
-+{
-+ unsigned int v[4], l, h;
-+
-+ if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
-+ cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
-+ if ((v[3] & (1 << 20)) && !disable_nx) {
-+ rdmsr(MSR_EFER, l, h);
-+ l |= EFER_NX;
-+ wrmsr(MSR_EFER, l, h);
-+ nx_enabled = 1;
-+ __supported_pte_mask |= _PAGE_NX;
-+ }
-+ }
-+}
-+
-+/*
-+ * Enables/disables executability of a given kernel page and
-+ * returns the previous setting.
-+ */
-+int __init set_kernel_exec(unsigned long vaddr, int enable)
-+{
-+ pte_t *pte;
-+ int ret = 1;
-+
-+ if (!nx_enabled)
-+ goto out;
-+
-+ pte = lookup_address(vaddr);
-+ BUG_ON(!pte);
-+
-+ if (!pte_exec_kernel(*pte))
-+ ret = 0;
-+
-+ if (enable)
-+ pte->pte_high &= ~(1 << (_PAGE_BIT_NX - 32));
-+ else
-+ pte->pte_high |= 1 << (_PAGE_BIT_NX - 32);
-+ pte_update_defer(&init_mm, vaddr, pte);
-+ __flush_tlb_all();
-+out:
-+ return ret;
-+}
-+
-+#endif
-+
-+/*
-+ * paging_init() sets up the page tables - note that the first 8MB are
-+ * already mapped by head.S.
-+ *
-+ * This routines also unmaps the page at virtual kernel address 0, so
-+ * that we can trap those pesky NULL-reference errors in the kernel.
-+ */
-+void __init paging_init(void)
-+{
-+ int i;
-+
-+#ifdef CONFIG_X86_PAE
-+ set_nx();
-+ if (nx_enabled)
-+ printk("NX (Execute Disable) protection: active\n");
-+#endif
-+
-+ pagetable_init();
-+
-+#if defined(CONFIG_X86_PAE) && !defined(CONFIG_XEN)
-+ /*
-+ * We will bail out later - printk doesn't work right now so
-+ * the user would just see a hanging kernel.
-+ * when running as xen domain we are already in PAE mode at
-+ * this point.
-+ */
-+ if (cpu_has_pae)
-+ set_in_cr4(X86_CR4_PAE);
-+#endif
-+ __flush_tlb_all();
-+
-+ kmap_init();
-+
-+ /* Switch to the real shared_info page, and clear the
-+ * dummy page. */
-+ set_fixmap(FIX_SHARED_INFO, xen_start_info->shared_info);
-+ HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
-+ memset(empty_zero_page, 0, sizeof(empty_zero_page));
-+
-+ /* Setup mapping of lower 1st MB */
-+ for (i = 0; i < NR_FIX_ISAMAPS; i++)
-+ if (is_initial_xendomain())
-+ set_fixmap(FIX_ISAMAP_BEGIN - i, i * PAGE_SIZE);
-+ else
-+ __set_fixmap(FIX_ISAMAP_BEGIN - i,
-+ virt_to_machine(empty_zero_page),
-+ PAGE_KERNEL_RO);
-+}
-+
-+/*
-+ * Test if the WP bit works in supervisor mode. It isn't supported on 386's
-+ * and also on some strange 486's (NexGen etc.). All 586+'s are OK. This
-+ * used to involve black magic jumps to work around some nasty CPU bugs,
-+ * but fortunately the switch to using exceptions got rid of all that.
-+ */
-+
-+static void __init test_wp_bit(void)
-+{
-+ printk("Checking if this processor honours the WP bit even in supervisor mode... ");
-+
-+ /* Any page-aligned address will do, the test is non-destructive */
-+ __set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY);
-+ boot_cpu_data.wp_works_ok = do_test_wp_bit();
-+ clear_fixmap(FIX_WP_TEST);
-+
-+ if (!boot_cpu_data.wp_works_ok) {
-+ printk("No.\n");
-+#ifdef CONFIG_X86_WP_WORKS_OK
-+ panic("This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
-+#endif
-+ } else {
-+ printk("Ok.\n");
-+ }
-+}
-+
-+static struct kcore_list kcore_mem, kcore_vmalloc;
-+
-+void __init mem_init(void)
-+{
-+ extern int ppro_with_ram_bug(void);
-+ int codesize, reservedpages, datasize, initsize;
-+ int tmp;
-+ int bad_ppro;
-+ unsigned long pfn;
-+
-+ contiguous_bitmap = alloc_bootmem_low_pages(
-+ (max_low_pfn + 2*BITS_PER_LONG) >> 3);
-+ BUG_ON(!contiguous_bitmap);
-+ memset(contiguous_bitmap, 0, (max_low_pfn + 2*BITS_PER_LONG) >> 3);
-+
-+#if defined(CONFIG_SWIOTLB)
-+ swiotlb_init();
-+#endif
-+
-+#ifdef CONFIG_FLATMEM
-+ BUG_ON(!mem_map);
-+#endif
-+
-+ bad_ppro = ppro_with_ram_bug();
-+
-+#ifdef CONFIG_HIGHMEM
-+ /* check that fixmap and pkmap do not overlap */
-+ if (PKMAP_BASE+LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) {
-+ printk(KERN_ERR "fixmap and kmap areas overlap - this will crash\n");
-+ printk(KERN_ERR "pkstart: %lxh pkend: %lxh fixstart %lxh\n",
-+ PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, FIXADDR_START);
-+ BUG();
-+ }
-+#endif
-+
-+ printk("vmalloc area: %lx-%lx, maxmem %lx\n",
-+ VMALLOC_START,VMALLOC_END,MAXMEM);
-+ BUG_ON(VMALLOC_START > VMALLOC_END);
-+
-+ /* this will put all low memory onto the freelists */
-+ totalram_pages += free_all_bootmem();
-+ /* XEN: init and count low-mem pages outside initial allocation. */
-+ for (pfn = xen_start_info->nr_pages; pfn < max_low_pfn; pfn++) {
-+ ClearPageReserved(pfn_to_page(pfn));
-+ init_page_count(pfn_to_page(pfn));
-+ totalram_pages++;
-+ }
-+
-+ reservedpages = 0;
-+ for (tmp = 0; tmp < max_low_pfn; tmp++)
-+ /*
-+ * Only count reserved RAM pages
-+ */
-+ if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
-+ reservedpages++;
-+
-+ set_highmem_pages_init(bad_ppro);
-+
-+ codesize = (unsigned long) &_etext - (unsigned long) &_text;
-+ datasize = (unsigned long) &_edata - (unsigned long) &_etext;
-+ initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
-+
-+ kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
-+ kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
-+ VMALLOC_END-VMALLOC_START);
-+
-+ printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n",
-+ (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
-+ num_physpages << (PAGE_SHIFT-10),
-+ codesize >> 10,
-+ reservedpages << (PAGE_SHIFT-10),
-+ datasize >> 10,
-+ initsize >> 10,
-+ (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
-+ );
-+
-+#if 1 /* double-sanity-check paranoia */
-+ printk("virtual kernel memory layout:\n"
-+ " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
-+#ifdef CONFIG_HIGHMEM
-+ " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
-+#endif
-+ " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
-+ " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
-+ " .init : 0x%08lx - 0x%08lx (%4ld kB)\n"
-+ " .data : 0x%08lx - 0x%08lx (%4ld kB)\n"
-+ " .text : 0x%08lx - 0x%08lx (%4ld kB)\n",
-+ FIXADDR_START, FIXADDR_TOP,
-+ (FIXADDR_TOP - FIXADDR_START) >> 10,
-+
-+#ifdef CONFIG_HIGHMEM
-+ PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
-+ (LAST_PKMAP*PAGE_SIZE) >> 10,
-+#endif
-+
-+ VMALLOC_START, VMALLOC_END,
-+ (VMALLOC_END - VMALLOC_START) >> 20,
-+
-+ (unsigned long)__va(0), (unsigned long)high_memory,
-+ ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
-+
-+ (unsigned long)&__init_begin, (unsigned long)&__init_end,
-+ ((unsigned long)&__init_end - (unsigned long)&__init_begin) >> 10,
-+
-+ (unsigned long)&_etext, (unsigned long)&_edata,
-+ ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
-+
-+ (unsigned long)&_text, (unsigned long)&_etext,
-+ ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
-+
-+#ifdef CONFIG_HIGHMEM
-+ BUG_ON(PKMAP_BASE+LAST_PKMAP*PAGE_SIZE > FIXADDR_START);
-+ BUG_ON(VMALLOC_END > PKMAP_BASE);
-+#endif
-+ BUG_ON(VMALLOC_START > VMALLOC_END);
-+ BUG_ON((unsigned long)high_memory > VMALLOC_START);
-+#endif /* double-sanity-check paranoia */
-+
-+#ifdef CONFIG_X86_PAE
-+ if (!cpu_has_pae)
-+ panic("cannot execute a PAE-enabled kernel on a PAE-less CPU!");
-+#endif
-+ if (boot_cpu_data.wp_works_ok < 0)
-+ test_wp_bit();
-+
-+ /*
-+ * Subtle. SMP is doing it's boot stuff late (because it has to
-+ * fork idle threads) - but it also needs low mappings for the
-+ * protected-mode entry to work. We zap these entries only after
-+ * the WP-bit has been tested.
-+ */
-+#ifndef CONFIG_SMP
-+ zap_low_mappings();
-+#endif
-+
-+ set_bit(PG_pinned, &virt_to_page(init_mm.pgd)->flags);
-+}
-+
-+#ifdef CONFIG_MEMORY_HOTPLUG
-+int arch_add_memory(int nid, u64 start, u64 size)
-+{
-+ struct pglist_data *pgdata = NODE_DATA(nid);
-+ struct zone *zone = pgdata->node_zones + ZONE_HIGHMEM;
-+ unsigned long start_pfn = start >> PAGE_SHIFT;
-+ unsigned long nr_pages = size >> PAGE_SHIFT;
-+
-+ return __add_pages(zone, start_pfn, nr_pages);
-+}
-+
-+int remove_memory(u64 start, u64 size)
-+{
-+ return -EINVAL;
-+}
-+EXPORT_SYMBOL_GPL(remove_memory);
-+#endif
-+
-+struct kmem_cache *pmd_cache;
-+
-+void __init pgtable_cache_init(void)
-+{
-+ size_t pgd_size = PTRS_PER_PGD*sizeof(pgd_t);
-+
-+ if (PTRS_PER_PMD > 1) {
-+ pmd_cache = kmem_cache_create("pmd",
-+ PTRS_PER_PMD*sizeof(pmd_t),
-+ PTRS_PER_PMD*sizeof(pmd_t),
-+ SLAB_PANIC,
-+ pmd_ctor,
-+ NULL);
-+ if (!SHARED_KERNEL_PMD) {
-+ /* If we're in PAE mode and have a non-shared
-+ kernel pmd, then the pgd size must be a
-+ page size. This is because the pgd_list
-+ links through the page structure, so there
-+ can only be one pgd per page for this to
-+ work. */
-+ pgd_size = PAGE_SIZE;
-+ }
-+ }
-+}
-+
-+/*
-+ * This function cannot be __init, since exceptions don't work in that
-+ * section. Put this after the callers, so that it cannot be inlined.
-+ */
-+static int noinline do_test_wp_bit(void)
-+{
-+ char tmp_reg;
-+ int flag;
-+
-+ __asm__ __volatile__(
-+ " movb %0,%1 \n"
-+ "1: movb %1,%0 \n"
-+ " xorl %2,%2 \n"
-+ "2: \n"
-+ ".section __ex_table,\"a\"\n"
-+ " .align 4 \n"
-+ " .long 1b,2b \n"
-+ ".previous \n"
-+ :"=m" (*(char *)fix_to_virt(FIX_WP_TEST)),
-+ "=q" (tmp_reg),
-+ "=r" (flag)
-+ :"2" (1)
-+ :"memory");
-+
-+ return flag;
-+}
-+
-+#ifdef CONFIG_DEBUG_RODATA
-+
-+void mark_rodata_ro(void)
-+{
-+ unsigned long start = PFN_ALIGN(_text);
-+ unsigned long size = PFN_ALIGN(_etext) - start;
-+
-+#ifndef CONFIG_KPROBES
-+#ifdef CONFIG_HOTPLUG_CPU
-+ /* It must still be possible to apply SMP alternatives. */
-+ if (num_possible_cpus() <= 1)
-+#endif
-+ {
-+ change_page_attr(virt_to_page(start),
-+ size >> PAGE_SHIFT, PAGE_KERNEL_RX);
-+ printk("Write protecting the kernel text: %luk\n", size >> 10);
-+ }
-+#endif
-+ start += size;
-+ size = (unsigned long)__end_rodata - start;
-+ change_page_attr(virt_to_page(start),
-+ size >> PAGE_SHIFT, PAGE_KERNEL_RO);
-+ printk("Write protecting the kernel read-only data: %luk\n",
-+ size >> 10);
-+
-+ /*
-+ * change_page_attr() requires a global_flush_tlb() call after it.
-+ * We do this after the printk so that if something went wrong in the
-+ * change, the printk gets out at least to give a better debug hint
-+ * of who is the culprit.
-+ */
-+ global_flush_tlb();
-+}
-+#endif
-+
-+void free_init_pages(char *what, unsigned long begin, unsigned long end)
-+{
-+ unsigned long addr;
-+
-+ for (addr = begin; addr < end; addr += PAGE_SIZE) {
-+ ClearPageReserved(virt_to_page(addr));
-+ init_page_count(virt_to_page(addr));
-+ memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
-+ free_page(addr);
-+ totalram_pages++;
-+ }
-+ printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
-+}
-+
-+void free_initmem(void)
-+{
-+ free_init_pages("unused kernel memory",
-+ (unsigned long)(&__init_begin),
-+ (unsigned long)(&__init_end));
-+}
-+
-+#ifdef CONFIG_BLK_DEV_INITRD
-+void free_initrd_mem(unsigned long start, unsigned long end)
-+{
-+ free_init_pages("initrd memory", start, end);
-+}
-+#endif
-+
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/i386/mm/ioremap-xen.c ubuntu-gutsy-xen/arch/i386/mm/ioremap-xen.c
---- ubuntu-gutsy/arch/i386/mm/ioremap-xen.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/arch/i386/mm/ioremap-xen.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,446 @@
-+/*
-+ * arch/i386/mm/ioremap.c
-+ *
-+ * Re-map IO memory to kernel address space so that we can access it.
-+ * This is needed for high PCI addresses that aren't mapped in the
-+ * 640k-1MB IO memory area on PC's
-+ *
-+ * (C) Copyright 1995 1996 Linus Torvalds
-+ */
-+
-+#include <linux/vmalloc.h>
-+#include <linux/init.h>
-+#include <linux/slab.h>
-+#include <linux/module.h>
-+#include <linux/io.h>
-+#include <linux/sched.h>
-+#include <asm/fixmap.h>
-+#include <asm/cacheflush.h>
-+#include <asm/tlbflush.h>
-+#include <asm/pgtable.h>
-+#include <asm/pgalloc.h>
-+
-+#define ISA_START_ADDRESS 0x0
-+#define ISA_END_ADDRESS 0x100000
-+
-+static int direct_remap_area_pte_fn(pte_t *pte,
-+ struct page *pmd_page,
-+ unsigned long address,
-+ void *data)
-+{
-+ mmu_update_t **v = (mmu_update_t **)data;
-+
-+ BUG_ON(!pte_none(*pte));
-+
-+ (*v)->ptr = ((u64)pfn_to_mfn(page_to_pfn(pmd_page)) <<
-+ PAGE_SHIFT) | ((unsigned long)pte & ~PAGE_MASK);
-+ (*v)++;
-+
-+ return 0;
-+}
-+
-+static int __direct_remap_pfn_range(struct mm_struct *mm,
-+ unsigned long address,
-+ unsigned long mfn,
-+ unsigned long size,
-+ pgprot_t prot,
-+ domid_t domid)
-+{
-+ int rc;
-+ unsigned long i, start_address;
-+ mmu_update_t *u, *v, *w;
-+
-+ u = v = w = (mmu_update_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
-+ if (u == NULL)
-+ return -ENOMEM;
-+
-+ start_address = address;
-+
-+ flush_cache_all();
-+
-+ for (i = 0; i < size; i += PAGE_SIZE) {
-+ if ((v - u) == (PAGE_SIZE / sizeof(mmu_update_t))) {
-+ /* Flush a full batch after filling in the PTE ptrs. */
-+ rc = apply_to_page_range(mm, start_address,
-+ address - start_address,
-+ direct_remap_area_pte_fn, &w);
-+ if (rc)
-+ goto out;
-+ rc = -EFAULT;
-+ if (HYPERVISOR_mmu_update(u, v - u, NULL, domid) < 0)
-+ goto out;
-+ v = w = u;
-+ start_address = address;
-+ }
-+
-+ /*
-+ * Fill in the machine address: PTE ptr is done later by
-+ * __direct_remap_area_pages().
-+ */
-+ v->val = __pte_val(pfn_pte_ma(mfn, prot));
-+
-+ mfn++;
-+ address += PAGE_SIZE;
-+ v++;
-+ }
-+
-+ if (v != u) {
-+ /* Final batch. */
-+ rc = apply_to_page_range(mm, start_address,
-+ address - start_address,
-+ direct_remap_area_pte_fn, &w);
-+ if (rc)
-+ goto out;
-+ rc = -EFAULT;
-+ if (unlikely(HYPERVISOR_mmu_update(u, v - u, NULL, domid) < 0))
-+ goto out;
-+ }
-+
-+ rc = 0;
-+
-+ out:
-+ flush_tlb_all();
-+
-+ free_page((unsigned long)u);
-+
-+ return rc;
-+}
-+
-+int direct_remap_pfn_range(struct vm_area_struct *vma,
-+ unsigned long address,
-+ unsigned long mfn,
-+ unsigned long size,
-+ pgprot_t prot,
-+ domid_t domid)
-+{
-+ if (xen_feature(XENFEAT_auto_translated_physmap))
-+ return remap_pfn_range(vma, address, mfn, size, prot);
-+
-+ if (domid == DOMID_SELF)
-+ return -EINVAL;
-+
-+ vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
-+
-+ vma->vm_mm->context.has_foreign_mappings = 1;
-+
-+ return __direct_remap_pfn_range(
-+ vma->vm_mm, address, mfn, size, prot, domid);
-+}
-+EXPORT_SYMBOL(direct_remap_pfn_range);
-+
-+int direct_kernel_remap_pfn_range(unsigned long address,
-+ unsigned long mfn,
-+ unsigned long size,
-+ pgprot_t prot,
-+ domid_t domid)
-+{
-+ return __direct_remap_pfn_range(
-+ &init_mm, address, mfn, size, prot, domid);
-+}
-+EXPORT_SYMBOL(direct_kernel_remap_pfn_range);
-+
-+static int lookup_pte_fn(
-+ pte_t *pte, struct page *pmd_page, unsigned long addr, void *data)
-+{
-+ uint64_t *ptep = (uint64_t *)data;
-+ if (ptep)
-+ *ptep = ((uint64_t)pfn_to_mfn(page_to_pfn(pmd_page)) <<
-+ PAGE_SHIFT) | ((unsigned long)pte & ~PAGE_MASK);
-+ return 0;
-+}
-+
-+int create_lookup_pte_addr(struct mm_struct *mm,
-+ unsigned long address,
-+ uint64_t *ptep)
-+{
-+ return apply_to_page_range(mm, address, PAGE_SIZE,
-+ lookup_pte_fn, ptep);
-+}
-+
-+EXPORT_SYMBOL(create_lookup_pte_addr);
-+
-+static int noop_fn(
-+ pte_t *pte, struct page *pmd_page, unsigned long addr, void *data)
-+{
-+ return 0;
-+}
-+
-+int touch_pte_range(struct mm_struct *mm,
-+ unsigned long address,
-+ unsigned long size)
-+{
-+ return apply_to_page_range(mm, address, size, noop_fn, NULL);
-+}
-+
-+EXPORT_SYMBOL(touch_pte_range);
-+
-+/*
-+ * Does @address reside within a non-highmem page that is local to this virtual
-+ * machine (i.e., not an I/O page, nor a memory page belonging to another VM).
-+ * See the comment that accompanies mfn_to_local_pfn() in page.h to understand
-+ * why this works.
-+ */
-+static inline int is_local_lowmem(unsigned long address)
-+{
-+ extern unsigned long max_low_pfn;
-+ return (mfn_to_local_pfn(address >> PAGE_SHIFT) < max_low_pfn);
-+}
-+
-+/*
-+ * Generic mapping function (not visible outside):
-+ */
-+
-+/*
-+ * Remap an arbitrary physical address space into the kernel virtual
-+ * address space. Needed when the kernel wants to access high addresses
-+ * directly.
-+ *
-+ * NOTE! We need to allow non-page-aligned mappings too: we will obviously
-+ * have to convert them into an offset in a page-aligned mapping, but the
-+ * caller shouldn't need to know that small detail.
-+ */
-+void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
-+{
-+ void __iomem * addr;
-+ struct vm_struct * area;
-+ unsigned long offset, last_addr;
-+ pgprot_t prot;
-+ domid_t domid = DOMID_IO;
-+
-+ /* Don't allow wraparound or zero size */
-+ last_addr = phys_addr + size - 1;
-+ if (!size || last_addr < phys_addr)
-+ return NULL;
-+
-+ /*
-+ * Don't remap the low PCI/ISA area, it's always mapped..
-+ */
-+ if (is_initial_xendomain() &&
-+ phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
-+ return (void __iomem *) isa_bus_to_virt(phys_addr);
-+
-+ /*
-+ * Don't allow anybody to remap normal RAM that we're using..
-+ */
-+ if (is_local_lowmem(phys_addr)) {
-+ char *t_addr, *t_end;
-+ struct page *page;
-+
-+ t_addr = bus_to_virt(phys_addr);
-+ t_end = t_addr + (size - 1);
-+
-+ for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
-+ if(!PageReserved(page))
-+ return NULL;
-+
-+ domid = DOMID_SELF;
-+ }
-+
-+ prot = __pgprot(_KERNPG_TABLE | flags);
-+
-+ /*
-+ * Mappings have to be page-aligned
-+ */
-+ offset = phys_addr & ~PAGE_MASK;
-+ phys_addr &= PAGE_MASK;
-+ size = PAGE_ALIGN(last_addr+1) - phys_addr;
-+
-+ /*
-+ * Ok, go for it..
-+ */
-+ area = get_vm_area(size, VM_IOREMAP | (flags << 20));
-+ if (!area)
-+ return NULL;
-+ area->phys_addr = phys_addr;
-+ addr = (void __iomem *) area->addr;
-+ if (__direct_remap_pfn_range(&init_mm, (unsigned long)addr,
-+ phys_addr>>PAGE_SHIFT,
-+ size, prot, domid)) {
-+ vunmap((void __force *) addr);
-+ return NULL;
-+ }
-+ return (void __iomem *) (offset + (char __iomem *)addr);
-+}
-+EXPORT_SYMBOL(__ioremap);
-+
-+/**
-+ * ioremap_nocache - map bus memory into CPU space
-+ * @offset: bus address of the memory
-+ * @size: size of the resource to map
-+ *
-+ * ioremap_nocache performs a platform specific sequence of operations to
-+ * make bus memory CPU accessible via the readb/readw/readl/writeb/
-+ * writew/writel functions and the other mmio helpers. The returned
-+ * address is not guaranteed to be usable directly as a virtual
-+ * address.
-+ *
-+ * This version of ioremap ensures that the memory is marked uncachable
-+ * on the CPU as well as honouring existing caching rules from things like
-+ * the PCI bus. Note that there are other caches and buffers on many
-+ * busses. In particular driver authors should read up on PCI writes
-+ *
-+ * It's useful if some control registers are in such an area and
-+ * write combining or read caching is not desirable:
-+ *
-+ * Must be freed with iounmap.
-+ */
-+
-+void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
-+{
-+ unsigned long last_addr;
-+ void __iomem *p = __ioremap(phys_addr, size, _PAGE_PCD);
-+ if (!p)
-+ return p;
-+
-+ /* Guaranteed to be > phys_addr, as per __ioremap() */
-+ last_addr = phys_addr + size - 1;
-+
-+ if (is_local_lowmem(last_addr)) {
-+ struct page *ppage = virt_to_page(bus_to_virt(phys_addr));
-+ unsigned long npages;
-+
-+ phys_addr &= PAGE_MASK;
-+
-+ /* This might overflow and become zero.. */
-+ last_addr = PAGE_ALIGN(last_addr);
-+
-+ /* .. but that's ok, because modulo-2**n arithmetic will make
-+ * the page-aligned "last - first" come out right.
-+ */
-+ npages = (last_addr - phys_addr) >> PAGE_SHIFT;
-+
-+ if (change_page_attr(ppage, npages, PAGE_KERNEL_NOCACHE) < 0) {
-+ iounmap(p);
-+ p = NULL;
-+ }
-+ global_flush_tlb();
-+ }
-+
-+ return p;
-+}
-+EXPORT_SYMBOL(ioremap_nocache);
-+
-+/**
-+ * iounmap - Free a IO remapping
-+ * @addr: virtual address from ioremap_*
-+ *
-+ * Caller must ensure there is only one unmapping for the same pointer.
-+ */
-+void iounmap(volatile void __iomem *addr)
-+{
-+ struct vm_struct *p, *o;
-+
-+ if ((void __force *)addr <= high_memory)
-+ return;
-+
-+ /*
-+ * __ioremap special-cases the PCI/ISA range by not instantiating a
-+ * vm_area and by simply returning an address into the kernel mapping
-+ * of ISA space. So handle that here.
-+ */
-+ if ((unsigned long) addr >= fix_to_virt(FIX_ISAMAP_BEGIN))
-+ return;
-+
-+ addr = (volatile void __iomem *)(PAGE_MASK & (unsigned long __force)addr);
-+
-+ /* Use the vm area unlocked, assuming the caller
-+ ensures there isn't another iounmap for the same address
-+ in parallel. Reuse of the virtual address is prevented by
-+ leaving it in the global lists until we're done with it.
-+ cpa takes care of the direct mappings. */
-+ read_lock(&vmlist_lock);
-+ for (p = vmlist; p; p = p->next) {
-+ if (p->addr == addr)
-+ break;
-+ }
-+ read_unlock(&vmlist_lock);
-+
-+ if (!p) {
-+ printk("iounmap: bad address %p\n", addr);
-+ dump_stack();
-+ return;
-+ }
-+
-+ /* Reset the direct mapping. Can block */
-+ if ((p->flags >> 20) && is_local_lowmem(p->phys_addr)) {
-+ /* p->size includes the guard page, but cpa doesn't like that */
-+ change_page_attr(virt_to_page(bus_to_virt(p->phys_addr)),
-+ (p->size - PAGE_SIZE) >> PAGE_SHIFT,
-+ PAGE_KERNEL);
-+ global_flush_tlb();
-+ }
-+
-+ /* Finally remove it */
-+ o = remove_vm_area((void *)addr);
-+ BUG_ON(p != o || o == NULL);
-+ kfree(p);
-+}
-+EXPORT_SYMBOL(iounmap);
-+
-+void __init *bt_ioremap(unsigned long phys_addr, unsigned long size)
-+{
-+ unsigned long offset, last_addr;
-+ unsigned int nrpages;
-+ enum fixed_addresses idx;
-+
-+ /* Don't allow wraparound or zero size */
-+ last_addr = phys_addr + size - 1;
-+ if (!size || last_addr < phys_addr)
-+ return NULL;
-+
-+ /*
-+ * Don't remap the low PCI/ISA area, it's always mapped..
-+ */
-+ if (is_initial_xendomain() &&
-+ phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
-+ return isa_bus_to_virt(phys_addr);
-+
-+ /*
-+ * Mappings have to be page-aligned
-+ */
-+ offset = phys_addr & ~PAGE_MASK;
-+ phys_addr &= PAGE_MASK;
-+ size = PAGE_ALIGN(last_addr) - phys_addr;
-+
-+ /*
-+ * Mappings have to fit in the FIX_BTMAP area.
-+ */
-+ nrpages = size >> PAGE_SHIFT;
-+ if (nrpages > NR_FIX_BTMAPS)
-+ return NULL;
-+
-+ /*
-+ * Ok, go for it..
-+ */
-+ idx = FIX_BTMAP_BEGIN;
-+ while (nrpages > 0) {
-+ set_fixmap(idx, phys_addr);
-+ phys_addr += PAGE_SIZE;
-+ --idx;
-+ --nrpages;
-+ }
-+ return (void*) (offset + fix_to_virt(FIX_BTMAP_BEGIN));
-+}
-+
-+void __init bt_iounmap(void *addr, unsigned long size)
-+{
-+ unsigned long virt_addr;
-+ unsigned long offset;
-+ unsigned int nrpages;
-+ enum fixed_addresses idx;
-+
-+ virt_addr = (unsigned long)addr;
-+ if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN))
-+ return;
-+ if (virt_addr >= fix_to_virt(FIX_ISAMAP_BEGIN))
-+ return;
-+ offset = virt_addr & ~PAGE_MASK;
-+ nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
-+
-+ idx = FIX_BTMAP_BEGIN;
-+ while (nrpages > 0) {
-+ clear_fixmap(idx);
-+ --idx;
-+ --nrpages;
-+ }
-+}
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/i386/mm/Makefile ubuntu-gutsy-xen/arch/i386/mm/Makefile
---- ubuntu-gutsy/arch/i386/mm/Makefile 2007-08-18 09:40:28.000000000 -0400
-+++ ubuntu-gutsy-xen/arch/i386/mm/Makefile 2007-08-18 12:38:02.000000000 -0400
-@@ -8,3 +8,11 @@
- obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
- obj-$(CONFIG_HIGHMEM) += highmem.o
- obj-$(CONFIG_BOOT_IOREMAP) += boot_ioremap.o
-+
-+ifdef CONFIG_XEN
-+include $(srctree)/scripts/Makefile.xen
-+
-+obj-y += hypervisor.o
-+
-+obj-y := $(call cherrypickxen, $(obj-y))
-+endif
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/i386/mm/pgtable-xen.c ubuntu-gutsy-xen/arch/i386/mm/pgtable-xen.c
---- ubuntu-gutsy/arch/i386/mm/pgtable-xen.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/arch/i386/mm/pgtable-xen.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,869 @@
-+/*
-+ * linux/arch/i386/mm/pgtable.c
-+ */
-+
-+#include <linux/sched.h>
-+#include <linux/kernel.h>
-+#include <linux/errno.h>
-+#include <linux/mm.h>
-+#include <linux/swap.h>
-+#include <linux/smp.h>
-+#include <linux/highmem.h>
-+#include <linux/slab.h>
-+#include <linux/pagemap.h>
-+#include <linux/spinlock.h>
-+#include <linux/module.h>
-+#include <linux/quicklist.h>
-+
-+#include <asm/system.h>
-+#include <asm/pgtable.h>
-+#include <asm/pgalloc.h>
-+#include <asm/fixmap.h>
-+#include <asm/e820.h>
-+#include <asm/tlb.h>
-+#include <asm/tlbflush.h>
-+#include <asm/io.h>
-+#include <asm/mmu_context.h>
-+
-+#include <xen/features.h>
-+#include <asm/hypervisor.h>
-+
-+static void pgd_test_and_unpin(pgd_t *pgd);
-+
-+void show_mem(void)
-+{
-+ int total = 0, reserved = 0;
-+ int shared = 0, cached = 0;
-+ int highmem = 0;
-+ struct page *page;
-+ pg_data_t *pgdat;
-+ unsigned long i;
-+ unsigned long flags;
-+
-+ printk(KERN_INFO "Mem-info:\n");
-+ show_free_areas();
-+ printk(KERN_INFO "Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
-+ for_each_online_pgdat(pgdat) {
-+ pgdat_resize_lock(pgdat, &flags);
-+ for (i = 0; i < pgdat->node_spanned_pages; ++i) {
-+ page = pgdat_page_nr(pgdat, i);
-+ total++;
-+ if (PageHighMem(page))
-+ highmem++;
-+ if (PageReserved(page))
-+ reserved++;
-+ else if (PageSwapCache(page))
-+ cached++;
-+ else if (page_count(page))
-+ shared += page_count(page) - 1;
-+ }
-+ pgdat_resize_unlock(pgdat, &flags);
-+ }
-+ printk(KERN_INFO "%d pages of RAM\n", total);
-+ printk(KERN_INFO "%d pages of HIGHMEM\n", highmem);
-+ printk(KERN_INFO "%d reserved pages\n", reserved);
-+ printk(KERN_INFO "%d pages shared\n", shared);
-+ printk(KERN_INFO "%d pages swap cached\n", cached);
-+
-+ printk(KERN_INFO "%lu pages dirty\n", global_page_state(NR_FILE_DIRTY));
-+ printk(KERN_INFO "%lu pages writeback\n",
-+ global_page_state(NR_WRITEBACK));
-+ printk(KERN_INFO "%lu pages mapped\n", global_page_state(NR_FILE_MAPPED));
-+ printk(KERN_INFO "%lu pages slab\n",
-+ global_page_state(NR_SLAB_RECLAIMABLE) +
-+ global_page_state(NR_SLAB_UNRECLAIMABLE));
-+ printk(KERN_INFO "%lu pages pagetables\n",
-+ global_page_state(NR_PAGETABLE));
-+}
-+
-+/*
-+ * Associate a virtual page frame with a given physical page frame
-+ * and protection flags for that frame.
-+ */
-+static void set_pte_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
-+{
-+ pgd_t *pgd;
-+ pud_t *pud;
-+ pmd_t *pmd;
-+ pte_t *pte;
-+
-+ pgd = swapper_pg_dir + pgd_index(vaddr);
-+ if (pgd_none(*pgd)) {
-+ BUG();
-+ return;
-+ }
-+ pud = pud_offset(pgd, vaddr);
-+ if (pud_none(*pud)) {
-+ BUG();
-+ return;
-+ }
-+ pmd = pmd_offset(pud, vaddr);
-+ if (pmd_none(*pmd)) {
-+ BUG();
-+ return;
-+ }
-+ pte = pte_offset_kernel(pmd, vaddr);
-+ if (pgprot_val(flags))
-+ /* <pfn,flags> stored as-is, to permit clearing entries */
-+ set_pte(pte, pfn_pte(pfn, flags));
-+ else
-+ pte_clear(&init_mm, vaddr, pte);
-+
-+ /*
-+ * It's enough to flush this one mapping.
-+ * (PGE mappings get flushed as well)
-+ */
-+ __flush_tlb_one(vaddr);
-+}
-+
-+/*
-+ * Associate a virtual page frame with a given physical page frame
-+ * and protection flags for that frame.
-+ */
-+static void set_pte_pfn_ma(unsigned long vaddr, unsigned long pfn,
-+ pgprot_t flags)
-+{
-+ pgd_t *pgd;
-+ pud_t *pud;
-+ pmd_t *pmd;
-+ pte_t *pte;
-+
-+ pgd = swapper_pg_dir + pgd_index(vaddr);
-+ if (pgd_none(*pgd)) {
-+ BUG();
-+ return;
-+ }
-+ pud = pud_offset(pgd, vaddr);
-+ if (pud_none(*pud)) {
-+ BUG();
-+ return;
-+ }
-+ pmd = pmd_offset(pud, vaddr);
-+ if (pmd_none(*pmd)) {
-+ BUG();
-+ return;
-+ }
-+ pte = pte_offset_kernel(pmd, vaddr);
-+ if (pgprot_val(flags))
-+ /* <pfn,flags> stored as-is, to permit clearing entries */
-+ set_pte(pte, pfn_pte_ma(pfn, flags));
-+ else
-+ pte_clear(&init_mm, vaddr, pte);
-+
-+ /*
-+ * It's enough to flush this one mapping.
-+ * (PGE mappings get flushed as well)
-+ */
-+ __flush_tlb_one(vaddr);
-+}
-+
-+/*
-+ * Associate a large virtual page frame with a given physical page frame
-+ * and protection flags for that frame. pfn is for the base of the page,
-+ * vaddr is what the page gets mapped to - both must be properly aligned.
-+ * The pmd must already be instantiated. Assumes PAE mode.
-+ */
-+void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
-+{
-+ pgd_t *pgd;
-+ pud_t *pud;
-+ pmd_t *pmd;
-+
-+ if (vaddr & (PMD_SIZE-1)) { /* vaddr is misaligned */
-+ printk(KERN_WARNING "set_pmd_pfn: vaddr misaligned\n");
-+ return; /* BUG(); */
-+ }
-+ if (pfn & (PTRS_PER_PTE-1)) { /* pfn is misaligned */
-+ printk(KERN_WARNING "set_pmd_pfn: pfn misaligned\n");
-+ return; /* BUG(); */
-+ }
-+ pgd = swapper_pg_dir + pgd_index(vaddr);
-+ if (pgd_none(*pgd)) {
-+ printk(KERN_WARNING "set_pmd_pfn: pgd_none\n");
-+ return; /* BUG(); */
-+ }
-+ pud = pud_offset(pgd, vaddr);
-+ pmd = pmd_offset(pud, vaddr);
-+ set_pmd(pmd, pfn_pmd(pfn, flags));
-+ /*
-+ * It's enough to flush this one mapping.
-+ * (PGE mappings get flushed as well)
-+ */
-+ __flush_tlb_one(vaddr);
-+}
-+
-+static int fixmaps = 0;
-+unsigned long hypervisor_virt_start = HYPERVISOR_VIRT_START;
-+unsigned long __FIXADDR_TOP = (HYPERVISOR_VIRT_START - PAGE_SIZE);
-+EXPORT_SYMBOL(__FIXADDR_TOP);
-+
-+void __set_fixmap (enum fixed_addresses idx, maddr_t phys, pgprot_t flags)
-+{
-+ unsigned long address = __fix_to_virt(idx);
-+
-+ if (idx >= __end_of_fixed_addresses) {
-+ BUG();
-+ return;
-+ }
-+ switch (idx) {
-+ case FIX_WP_TEST:
-+#ifdef CONFIG_X86_F00F_BUG
-+ case FIX_F00F_IDT:
-+#endif
-+ case FIX_VDSO:
-+ set_pte_pfn(address, phys >> PAGE_SHIFT, flags);
-+ break;
-+ default:
-+ set_pte_pfn_ma(address, phys >> PAGE_SHIFT, flags);
-+ break;
-+ }
-+ fixmaps++;
-+}
-+
-+/**
-+ * reserve_top_address - reserves a hole in the top of kernel address space
-+ * @reserve - size of hole to reserve
-+ *
-+ * Can be used to relocate the fixmap area and poke a hole in the top
-+ * of kernel address space to make room for a hypervisor.
-+ */
-+void __init reserve_top_address(unsigned long reserve)
-+{
-+ BUG_ON(fixmaps > 0);
-+ printk(KERN_INFO "Reserving virtual address space above 0x%08x\n",
-+ (int)-reserve);
-+ __FIXADDR_TOP = -reserve - PAGE_SIZE;
-+ __VMALLOC_RESERVE += reserve;
-+}
-+
-+pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
-+{
-+ pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
-+ if (pte)
-+ make_lowmem_page_readonly(pte, XENFEAT_writable_page_tables);
-+ return pte;
-+}
-+
-+struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
-+{
-+ struct page *pte;
-+
-+#ifdef CONFIG_HIGHPTE
-+ pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT|__GFP_ZERO, 0);
-+#else
-+ pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
-+#endif
-+ if (pte) {
-+ SetPageForeign(pte, pte_free);
-+ init_page_count(pte);
-+ }
-+ return pte;
-+}
-+
-+void pte_free(struct page *pte)
-+{
-+ unsigned long pfn = page_to_pfn(pte);
-+
-+ if (!PageHighMem(pte)) {
-+ unsigned long va = (unsigned long)__va(pfn << PAGE_SHIFT);
-+
-+ if (!pte_write(*virt_to_ptep(va)))
-+ if (HYPERVISOR_update_va_mapping(
-+ va, pfn_pte(pfn, PAGE_KERNEL), 0))
-+ BUG();
-+ } else
-+ clear_bit(PG_pinned, &pte->flags);
-+
-+ ClearPageForeign(pte);
-+ init_page_count(pte);
-+
-+ __free_page(pte);
-+}
-+
-+void pmd_ctor(void *pmd, struct kmem_cache *cache, unsigned long flags)
-+{
-+ memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
-+}
-+
-+/*
-+ * List of all pgd's needed for non-PAE so it can invalidate entries
-+ * in both cached and uncached pgd's; not needed for PAE since the
-+ * kernel pmd is shared. If PAE were not to share the pmd a similar
-+ * tactic would be needed. This is essentially codepath-based locking
-+ * against pageattr.c; it is the unique case in which a valid change
-+ * of kernel pagetables can't be lazily synchronized by vmalloc faults.
-+ * vmalloc faults work because attached pagetables are never freed.
-+ * -- wli
-+ */
-+DEFINE_SPINLOCK(pgd_lock);
-+struct page *pgd_list;
-+
-+static inline void pgd_list_add(pgd_t *pgd)
-+{
-+ struct page *page = virt_to_page(pgd);
-+ page->index = (unsigned long)pgd_list;
-+ if (pgd_list)
-+ set_page_private(pgd_list, (unsigned long)&page->index);
-+ pgd_list = page;
-+ set_page_private(page, (unsigned long)&pgd_list);
-+}
-+
-+static inline void pgd_list_del(pgd_t *pgd)
-+{
-+ struct page *next, **pprev, *page = virt_to_page(pgd);
-+ next = (struct page *)page->index;
-+ pprev = (struct page **)page_private(page);
-+ *pprev = next;
-+ if (next)
-+ set_page_private(next, (unsigned long)pprev);
-+}
-+
-+
-+
-+#if (PTRS_PER_PMD == 1)
-+/* Non-PAE pgd constructor */
-+void pgd_ctor(void *pgd)
-+{
-+ unsigned long flags;
-+
-+ memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
-+
-+ spin_lock_irqsave(&pgd_lock, flags);
-+
-+ /* must happen under lock */
-+ clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD,
-+ swapper_pg_dir + USER_PTRS_PER_PGD,
-+ KERNEL_PGD_PTRS);
-+
-+ paravirt_alloc_pd_clone(__pa(pgd) >> PAGE_SHIFT,
-+ __pa(swapper_pg_dir) >> PAGE_SHIFT,
-+ USER_PTRS_PER_PGD,
-+ KERNEL_PGD_PTRS);
-+ pgd_list_add(pgd);
-+ spin_unlock_irqrestore(&pgd_lock, flags);
-+}
-+#else /* PTRS_PER_PMD > 1 */
-+/* PAE pgd constructor */
-+void pgd_ctor(void *pgd)
-+{
-+ /* PAE, kernel PMD may be shared */
-+
-+ if (SHARED_KERNEL_PMD) {
-+ clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD,
-+ swapper_pg_dir + USER_PTRS_PER_PGD,
-+ KERNEL_PGD_PTRS);
-+#ifndef CONFIG_XEN
-+ } else {
-+ unsigned long flags;
-+
-+ memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
-+ spin_lock_irqsave(&pgd_lock, flags);
-+ pgd_list_add(pgd);
-+ spin_unlock_irqrestore(&pgd_lock, flags);
-+#endif
-+ }
-+}
-+#endif /* PTRS_PER_PMD */
-+
-+void pgd_dtor(void *pgd)
-+{
-+ unsigned long flags; /* can be called from interrupt context */
-+
-+ if (SHARED_KERNEL_PMD)
-+ return;
-+
-+ paravirt_release_pd(__pa(pgd) >> PAGE_SHIFT);
-+ spin_lock_irqsave(&pgd_lock, flags);
-+ pgd_list_del(pgd);
-+ spin_unlock_irqrestore(&pgd_lock, flags);
-+
-+ pgd_test_and_unpin(pgd);
-+}
-+
-+#define UNSHARED_PTRS_PER_PGD \
-+ (SHARED_KERNEL_PMD ? USER_PTRS_PER_PGD : PTRS_PER_PGD)
-+
-+/* If we allocate a pmd for part of the kernel address space, then
-+ make sure its initialized with the appropriate kernel mappings.
-+ Otherwise use a cached zeroed pmd. */
-+static pmd_t *pmd_cache_alloc(int idx)
-+{
-+ pmd_t *pmd;
-+
-+ if (idx >= USER_PTRS_PER_PGD) {
-+ pmd = (pmd_t *)__get_free_page(GFP_KERNEL);
-+
-+#ifndef CONFIG_XEN
-+ if (pmd)
-+ memcpy(pmd,
-+ (void *)pgd_page_vaddr(swapper_pg_dir[idx]),
-+ sizeof(pmd_t) * PTRS_PER_PMD);
-+#endif
-+ } else
-+ pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
-+
-+ return pmd;
-+}
-+
-+static void pmd_cache_free(pmd_t *pmd, int idx)
-+{
-+ if (idx >= USER_PTRS_PER_PGD) {
-+ make_lowmem_page_writable(pmd, XENFEAT_writable_page_tables);
-+ memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
-+ free_page((unsigned long)pmd);
-+ } else
-+ kmem_cache_free(pmd_cache, pmd);
-+}
-+
-+pgd_t *pgd_alloc(struct mm_struct *mm)
-+{
-+ int i;
-+ pgd_t *pgd = quicklist_alloc(0, GFP_KERNEL, pgd_ctor);
-+ pmd_t **pmds = NULL;
-+ unsigned long flags;
-+
-+ pgd_test_and_unpin(pgd);
-+
-+ if (PTRS_PER_PMD == 1 || !pgd)
-+ return pgd;
-+
-+#ifdef CONFIG_XEN
-+ if (!SHARED_KERNEL_PMD) {
-+ /*
-+ * We can race save/restore (if we sleep during a GFP_KERNEL memory
-+ * allocation). We therefore store virtual addresses of pmds as they
-+ * do not change across save/restore, and poke the machine addresses
-+ * into the pgdir under the pgd_lock.
-+ */
-+ pmds = kmalloc(PTRS_PER_PGD * sizeof(pmd_t *), GFP_KERNEL);
-+ if (!pmds) {
-+ quicklist_free(0, pgd_dtor, pgd);
-+ return NULL;
-+ }
-+ }
-+#endif
-+
-+ /* Allocate pmds, remember virtual addresses. */
-+ for (i = 0; i < UNSHARED_PTRS_PER_PGD; ++i) {
-+ pmd_t *pmd = pmd_cache_alloc(i);
-+
-+ if (!pmd)
-+ goto out_oom;
-+
-+ paravirt_alloc_pd(__pa(pmd) >> PAGE_SHIFT);
-+ if (pmds)
-+ pmds[i] = pmd;
-+ else
-+ set_pgd(&pgd[i], __pgd(1 + __pa(pmd)));
-+ }
-+
-+#ifdef CONFIG_XEN
-+ if (SHARED_KERNEL_PMD)
-+ return pgd;
-+
-+ spin_lock_irqsave(&pgd_lock, flags);
-+
-+ /* Protect against save/restore: move below 4GB under pgd_lock. */
-+ if (!xen_feature(XENFEAT_pae_pgdir_above_4gb)) {
-+ int rc = xen_create_contiguous_region(
-+ (unsigned long)pgd, 0, 32);
-+ if (rc) {
-+ spin_unlock_irqrestore(&pgd_lock, flags);
-+ goto out_oom;
-+ }
-+ }
-+
-+ /* Copy kernel pmd contents and write-protect the new pmds. */
-+ for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
-+ unsigned long v = (unsigned long)i << PGDIR_SHIFT;
-+ pgd_t *kpgd = pgd_offset_k(v);
-+ pud_t *kpud = pud_offset(kpgd, v);
-+ pmd_t *kpmd = pmd_offset(kpud, v);
-+ memcpy(pmds[i], kpmd, PAGE_SIZE);
-+ make_lowmem_page_readonly(
-+ pmds[i], XENFEAT_writable_page_tables);
-+ }
-+
-+ /* It is safe to poke machine addresses of pmds under the pmd_lock. */
-+ for (i = 0; i < PTRS_PER_PGD; i++)
-+ set_pgd(&pgd[i], __pgd(1 + __pa(pmds[i])));
-+
-+ /* Ensure this pgd gets picked up and pinned on save/restore. */
-+ pgd_list_add(pgd);
-+
-+ spin_unlock_irqrestore(&pgd_lock, flags);
-+
-+ kfree(pmds);
-+#endif
-+
-+ return pgd;
-+
-+out_oom:
-+ if (!pmds) {
-+ for (i--; i >= 0; i--) {
-+ pgd_t pgdent = pgd[i];
-+ void* pmd = (void *)__va(pgd_val(pgdent)-1);
-+ paravirt_release_pd(__pa(pmd) >> PAGE_SHIFT);
-+ pmd_cache_free(pmd, i);
-+ }
-+ } else {
-+ for (i--; i >= 0; i--) {
-+ paravirt_release_pd(__pa(pmds[i]) >> PAGE_SHIFT);
-+ pmd_cache_free(pmds[i], i);
-+ }
-+ kfree(pmds);
-+ }
-+ quicklist_free(0, pgd_dtor, pgd);
-+ return NULL;
-+}
-+
-+void pgd_free(pgd_t *pgd)
-+{
-+ int i;
-+
-+ /*
-+ * After this the pgd should not be pinned for the duration of this
-+ * function's execution. We should never sleep and thus never race:
-+ * 1. User pmds will not become write-protected under our feet due
-+ * to a concurrent mm_pin_all().
-+ * 2. The machine addresses in PGD entries will not become invalid
-+ * due to a concurrent save/restore.
-+ */
-+ pgd_test_and_unpin(pgd);
-+
-+ /* in the PAE case user pgd entries are overwritten before usage */
-+ if (PTRS_PER_PMD > 1) {
-+ if (!SHARED_KERNEL_PMD) {
-+ unsigned long flags;
-+ spin_lock_irqsave(&pgd_lock, flags);
-+ pgd_list_del(pgd);
-+ spin_unlock_irqrestore(&pgd_lock, flags);
-+ }
-+
-+ for (i = 0; i < UNSHARED_PTRS_PER_PGD; ++i) {
-+ pgd_t pgdent = pgd[i];
-+ void* pmd = (void *)__va(pgd_val(pgdent)-1);
-+ paravirt_release_pd(__pa(pmd) >> PAGE_SHIFT);
-+ pmd_cache_free(pmd, i);
-+ }
-+
-+ if (!xen_feature(XENFEAT_pae_pgdir_above_4gb))
-+ xen_destroy_contiguous_region((unsigned long)pgd, 0);
-+ }
-+
-+ /* in the non-PAE case, free_pgtables() clears user pgd entries */
-+ quicklist_free(0, pgd_dtor, pgd);
-+}
-+
-+void check_pgt_cache(void)
-+{
-+ quicklist_trim(0, pgd_dtor, 25, 16);
-+}
-+
-+void make_lowmem_page_readonly(void *va, unsigned int feature)
-+{
-+ pte_t *pte;
-+ int rc;
-+
-+ if (xen_feature(feature))
-+ return;
-+
-+ pte = virt_to_ptep(va);
-+ rc = HYPERVISOR_update_va_mapping(
-+ (unsigned long)va, pte_wrprotect(*pte), 0);
-+ BUG_ON(rc);
-+}
-+
-+void make_lowmem_page_writable(void *va, unsigned int feature)
-+{
-+ pte_t *pte;
-+ int rc;
-+
-+ if (xen_feature(feature))
-+ return;
-+
-+ pte = virt_to_ptep(va);
-+ rc = HYPERVISOR_update_va_mapping(
-+ (unsigned long)va, pte_mkwrite(*pte), 0);
-+ BUG_ON(rc);
-+}
-+
-+void make_page_readonly(void *va, unsigned int feature)
-+{
-+ pte_t *pte;
-+ int rc;
-+
-+ if (xen_feature(feature))
-+ return;
-+
-+ pte = virt_to_ptep(va);
-+ rc = HYPERVISOR_update_va_mapping(
-+ (unsigned long)va, pte_wrprotect(*pte), 0);
-+ if (rc) /* fallback? */
-+ xen_l1_entry_update(pte, pte_wrprotect(*pte));
-+ if ((unsigned long)va >= (unsigned long)high_memory) {
-+ unsigned long pfn = pte_pfn(*pte);
-+#ifdef CONFIG_HIGHMEM
-+ if (pfn >= highstart_pfn)
-+ kmap_flush_unused(); /* flush stale writable kmaps */
-+ else
-+#endif
-+ make_lowmem_page_readonly(
-+ phys_to_virt(pfn << PAGE_SHIFT), feature);
-+ }
-+}
-+
-+void make_page_writable(void *va, unsigned int feature)
-+{
-+ pte_t *pte;
-+ int rc;
-+
-+ if (xen_feature(feature))
-+ return;
-+
-+ pte = virt_to_ptep(va);
-+ rc = HYPERVISOR_update_va_mapping(
-+ (unsigned long)va, pte_mkwrite(*pte), 0);
-+ if (rc) /* fallback? */
-+ xen_l1_entry_update(pte, pte_mkwrite(*pte));
-+ if ((unsigned long)va >= (unsigned long)high_memory) {
-+ unsigned long pfn = pte_pfn(*pte);
-+#ifdef CONFIG_HIGHMEM
-+ if (pfn < highstart_pfn)
-+#endif
-+ make_lowmem_page_writable(
-+ phys_to_virt(pfn << PAGE_SHIFT), feature);
-+ }
-+}
-+
-+void make_pages_readonly(void *va, unsigned int nr, unsigned int feature)
-+{
-+ if (xen_feature(feature))
-+ return;
-+
-+ while (nr-- != 0) {
-+ make_page_readonly(va, feature);
-+ va = (void *)((unsigned long)va + PAGE_SIZE);
-+ }
-+}
-+
-+void make_pages_writable(void *va, unsigned int nr, unsigned int feature)
-+{
-+ if (xen_feature(feature))
-+ return;
-+
-+ while (nr-- != 0) {
-+ make_page_writable(va, feature);
-+ va = (void *)((unsigned long)va + PAGE_SIZE);
-+ }
-+}
-+
-+static void _pin_lock(struct mm_struct *mm, int lock) {
-+ if (lock)
-+ spin_lock(&mm->page_table_lock);
-+#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
-+ /* While mm->page_table_lock protects us against insertions and
-+ * removals of higher level page table pages, it doesn't protect
-+ * against updates of pte-s. Such updates, however, require the
-+ * pte pages to be in consistent state (unpinned+writable or
-+ * pinned+readonly). The pinning and attribute changes, however
-+ * cannot be done atomically, which is why such updates must be
-+ * prevented from happening concurrently.
-+ * Note that no pte lock can ever elsewhere be acquired nesting
-+ * with an already acquired one in the same mm, or with the mm's
-+ * page_table_lock already acquired, as that would break in the
-+ * non-split case (where all these are actually resolving to the
-+ * one page_table_lock). Thus acquiring all of them here is not
-+ * going to result in dead locks, and the order of acquires
-+ * doesn't matter.
-+ */
-+ {
-+ pgd_t *pgd = mm->pgd;
-+ unsigned g;
-+
-+ for (g = 0; g < USER_PTRS_PER_PGD; g++, pgd++) {
-+ pud_t *pud;
-+ unsigned u;
-+
-+ if (pgd_none(*pgd))
-+ continue;
-+ pud = pud_offset(pgd, 0);
-+ for (u = 0; u < PTRS_PER_PUD; u++, pud++) {
-+ pmd_t *pmd;
-+ unsigned m;
-+
-+ if (pud_none(*pud))
-+ continue;
-+ pmd = pmd_offset(pud, 0);
-+ for (m = 0; m < PTRS_PER_PMD; m++, pmd++) {
-+ spinlock_t *ptl;
-+
-+ if (pmd_none(*pmd))
-+ continue;
-+ ptl = pte_lockptr(0, pmd);
-+ if (lock)
-+ spin_lock(ptl);
-+ else
-+ spin_unlock(ptl);
-+ }
-+ }
-+ }
-+ }
-+#endif
-+ if (!lock)
-+ spin_unlock(&mm->page_table_lock);
-+}
-+#define pin_lock(mm) _pin_lock(mm, 1)
-+#define pin_unlock(mm) _pin_lock(mm, 0)
-+
-+static inline void pgd_walk_set_prot(struct page *page, pgprot_t flags)
-+{
-+ unsigned long pfn = page_to_pfn(page);
-+ int rc;
-+
-+ if (PageHighMem(page)) {
-+ if (pgprot_val(flags) & _PAGE_RW)
-+ clear_bit(PG_pinned, &page->flags);
-+ else
-+ set_bit(PG_pinned, &page->flags);
-+ } else {
-+ rc = HYPERVISOR_update_va_mapping(
-+ (unsigned long)__va(pfn << PAGE_SHIFT),
-+ pfn_pte(pfn, flags), 0);
-+ if (rc)
-+ BUG();
-+ }
-+}
-+
-+static void pgd_walk(pgd_t *pgd_base, pgprot_t flags)
-+{
-+ pgd_t *pgd = pgd_base;
-+ pud_t *pud;
-+ pmd_t *pmd;
-+ int g, u, m, rc;
-+
-+ if (xen_feature(XENFEAT_auto_translated_physmap))
-+ return;
-+
-+ for (g = 0; g < USER_PTRS_PER_PGD; g++, pgd++) {
-+ if (pgd_none(*pgd))
-+ continue;
-+ pud = pud_offset(pgd, 0);
-+ if (PTRS_PER_PUD > 1) /* not folded */
-+ pgd_walk_set_prot(virt_to_page(pud),flags);
-+ for (u = 0; u < PTRS_PER_PUD; u++, pud++) {
-+ if (pud_none(*pud))
-+ continue;
-+ pmd = pmd_offset(pud, 0);
-+ if (PTRS_PER_PMD > 1) /* not folded */
-+ pgd_walk_set_prot(virt_to_page(pmd),flags);
-+ for (m = 0; m < PTRS_PER_PMD; m++, pmd++) {
-+ if (pmd_none(*pmd))
-+ continue;
-+ pgd_walk_set_prot(pmd_page(*pmd),flags);
-+ }
-+ }
-+ }
-+
-+ rc = HYPERVISOR_update_va_mapping(
-+ (unsigned long)pgd_base,
-+ pfn_pte(virt_to_phys(pgd_base)>>PAGE_SHIFT, flags),
-+ UVMF_TLB_FLUSH);
-+ if (rc)
-+ BUG();
-+}
-+
-+static void __pgd_pin(pgd_t *pgd)
-+{
-+ pgd_walk(pgd, PAGE_KERNEL_RO);
-+ kmap_flush_unused();
-+ xen_pgd_pin(__pa(pgd));
-+ set_bit(PG_pinned, &virt_to_page(pgd)->flags);
-+}
-+
-+static void __pgd_unpin(pgd_t *pgd)
-+{
-+ xen_pgd_unpin(__pa(pgd));
-+ pgd_walk(pgd, PAGE_KERNEL);
-+ clear_bit(PG_pinned, &virt_to_page(pgd)->flags);
-+}
-+
-+static void pgd_test_and_unpin(pgd_t *pgd)
-+{
-+ if (test_bit(PG_pinned, &virt_to_page(pgd)->flags))
-+ __pgd_unpin(pgd);
-+}
-+
-+void mm_pin(struct mm_struct *mm)
-+{
-+ if (xen_feature(XENFEAT_writable_page_tables))
-+ return;
-+ pin_lock(mm);
-+ __pgd_pin(mm->pgd);
-+ pin_unlock(mm);
-+}
-+
-+void mm_unpin(struct mm_struct *mm)
-+{
-+ if (xen_feature(XENFEAT_writable_page_tables))
-+ return;
-+ pin_lock(mm);
-+ __pgd_unpin(mm->pgd);
-+ pin_unlock(mm);
-+}
-+
-+void mm_pin_all(void)
-+{
-+ struct page *page;
-+ unsigned long flags;
-+
-+ if (xen_feature(XENFEAT_writable_page_tables))
-+ return;
-+
-+ /*
-+ * Allow uninterrupted access to the pgd_list. Also protects
-+ * __pgd_pin() by disabling preemption.
-+ * All other CPUs must be at a safe point (e.g., in stop_machine
-+ * or offlined entirely).
-+ */
-+ spin_lock_irqsave(&pgd_lock, flags);
-+ for (page = pgd_list; page; page = (struct page *)page->index) {
-+ if (!test_bit(PG_pinned, &page->flags))
-+ __pgd_pin((pgd_t *)page_address(page));
-+ }
-+ spin_unlock_irqrestore(&pgd_lock, flags);
-+}
-+
-+void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
-+{
-+ if (!test_bit(PG_pinned, &virt_to_page(mm->pgd)->flags))
-+ mm_pin(mm);
-+}
-+
-+void arch_exit_mmap(struct mm_struct *mm)
-+{
-+ struct task_struct *tsk = current;
-+
-+ task_lock(tsk);
-+
-+ /*
-+ * We aggressively remove defunct pgd from cr3. We execute unmap_vmas()
-+ * *much* faster this way, as no tlb flushes means bigger wrpt batches.
-+ */
-+ if (tsk->active_mm == mm) {
-+ tsk->active_mm = &init_mm;
-+ atomic_inc(&init_mm.mm_count);
-+
-+ switch_mm(mm, &init_mm, tsk);
-+
-+ atomic_dec(&mm->mm_count);
-+ BUG_ON(atomic_read(&mm->mm_count) == 0);
-+ }
-+
-+ task_unlock(tsk);
-+
-+ if (test_bit(PG_pinned, &virt_to_page(mm->pgd)->flags) &&
-+ (atomic_read(&mm->mm_count) == 1) &&
-+ !mm->context.has_foreign_mappings)
-+ mm_unpin(mm);
-+}
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/i386/oprofile/Makefile ubuntu-gutsy-xen/arch/i386/oprofile/Makefile
---- ubuntu-gutsy/arch/i386/oprofile/Makefile 2007-08-18 09:40:28.000000000 -0400
-+++ ubuntu-gutsy-xen/arch/i386/oprofile/Makefile 2007-08-18 12:38:02.000000000 -0400
-@@ -6,7 +6,14 @@
- oprofilefs.o oprofile_stats.o \
- timer_int.o )
-
-+ifdef CONFIG_XEN
-+XENOPROF_COMMON_OBJS = $(addprefix ../../../drivers/xen/xenoprof/, \
-+ xenoprofile.o)
-+oprofile-y := $(DRIVER_OBJS) \
-+ $(XENOPROF_COMMON_OBJS) xenoprof.o
-+else
- oprofile-y := $(DRIVER_OBJS) init.o backtrace.o
- oprofile-$(CONFIG_X86_LOCAL_APIC) += nmi_int.o op_model_athlon.o \
- op_model_ppro.o op_model_p4.o
- oprofile-$(CONFIG_X86_IO_APIC) += nmi_timer_int.o
-+endif
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/i386/oprofile/xenoprof.c ubuntu-gutsy-xen/arch/i386/oprofile/xenoprof.c
---- ubuntu-gutsy/arch/i386/oprofile/xenoprof.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/arch/i386/oprofile/xenoprof.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,179 @@
-+/**
-+ * @file xenoprof.c
-+ *
-+ * @remark Copyright 2002 OProfile authors
-+ * @remark Read the file COPYING
-+ *
-+ * @author John Levon <levon@movementarian.org>
-+ *
-+ * Modified by Aravind Menon and Jose Renato Santos for Xen
-+ * These modifications are:
-+ * Copyright (C) 2005 Hewlett-Packard Co.
-+ *
-+ * x86-specific part
-+ * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
-+ * VA Linux Systems Japan K.K.
-+ */
-+
-+#include <linux/init.h>
-+#include <linux/oprofile.h>
-+#include <linux/sched.h>
-+#include <asm/pgtable.h>
-+
-+#include <xen/driver_util.h>
-+#include <xen/interface/xen.h>
-+#include <xen/interface/xenoprof.h>
-+#include <xen/xenoprof.h>
-+#include "op_counter.h"
-+
-+static unsigned int num_events = 0;
-+
-+void __init xenoprof_arch_init_counter(struct xenoprof_init *init)
-+{
-+ num_events = init->num_events;
-+ /* just in case - make sure we do not overflow event list
-+ (i.e. counter_config list) */
-+ if (num_events > OP_MAX_COUNTER) {
-+ num_events = OP_MAX_COUNTER;
-+ init->num_events = num_events;
-+ }
-+}
-+
-+void xenoprof_arch_counter(void)
-+{
-+ int i;
-+ struct xenoprof_counter counter;
-+
-+ for (i=0; i<num_events; i++) {
-+ counter.ind = i;
-+ counter.count = (uint64_t)counter_config[i].count;
-+ counter.enabled = (uint32_t)counter_config[i].enabled;
-+ counter.event = (uint32_t)counter_config[i].event;
-+ counter.kernel = (uint32_t)counter_config[i].kernel;
-+ counter.user = (uint32_t)counter_config[i].user;
-+ counter.unit_mask = (uint64_t)counter_config[i].unit_mask;
-+ HYPERVISOR_xenoprof_op(XENOPROF_counter,
-+ &counter);
-+ }
-+}
-+
-+void xenoprof_arch_start(void)
-+{
-+ /* nothing */
-+}
-+
-+void xenoprof_arch_stop(void)
-+{
-+ /* nothing */
-+}
-+
-+void xenoprof_arch_unmap_shared_buffer(struct xenoprof_shared_buffer * sbuf)
-+{
-+ if (sbuf->buffer) {
-+ vunmap(sbuf->buffer);
-+ sbuf->buffer = NULL;
-+ }
-+}
-+
-+int xenoprof_arch_map_shared_buffer(struct xenoprof_get_buffer * get_buffer,
-+ struct xenoprof_shared_buffer * sbuf)
-+{
-+ int npages, ret;
-+ struct vm_struct *area;
-+
-+ sbuf->buffer = NULL;
-+ if ( (ret = HYPERVISOR_xenoprof_op(XENOPROF_get_buffer, get_buffer)) )
-+ return ret;
-+
-+ npages = (get_buffer->bufsize * get_buffer->nbuf - 1) / PAGE_SIZE + 1;
-+
-+ area = alloc_vm_area(npages * PAGE_SIZE);
-+ if (area == NULL)
-+ return -ENOMEM;
-+
-+ if ( (ret = direct_kernel_remap_pfn_range(
-+ (unsigned long)area->addr,
-+ get_buffer->buf_gmaddr >> PAGE_SHIFT,
-+ npages * PAGE_SIZE, __pgprot(_KERNPG_TABLE),
-+ DOMID_SELF)) ) {
-+ vunmap(area->addr);
-+ return ret;
-+ }
-+
-+ sbuf->buffer = area->addr;
-+ return ret;
-+}
-+
-+int xenoprof_arch_set_passive(struct xenoprof_passive * pdomain,
-+ struct xenoprof_shared_buffer * sbuf)
-+{
-+ int ret;
-+ int npages;
-+ struct vm_struct *area;
-+ pgprot_t prot = __pgprot(_KERNPG_TABLE);
-+
-+ sbuf->buffer = NULL;
-+ ret = HYPERVISOR_xenoprof_op(XENOPROF_set_passive, pdomain);
-+ if (ret)
-+ goto out;
-+
-+ npages = (pdomain->bufsize * pdomain->nbuf - 1) / PAGE_SIZE + 1;
-+
-+ area = alloc_vm_area(npages * PAGE_SIZE);
-+ if (area == NULL) {
-+ ret = -ENOMEM;
-+ goto out;
-+ }
-+
-+ ret = direct_kernel_remap_pfn_range(
-+ (unsigned long)area->addr,
-+ pdomain->buf_gmaddr >> PAGE_SHIFT,
-+ npages * PAGE_SIZE, prot, DOMID_SELF);
-+ if (ret) {
-+ vunmap(area->addr);
-+ goto out;
-+ }
-+ sbuf->buffer = area->addr;
-+
-+out:
-+ return ret;
-+}
-+
-+struct op_counter_config counter_config[OP_MAX_COUNTER];
-+
-+int xenoprof_create_files(struct super_block * sb, struct dentry * root)
-+{
-+ unsigned int i;
-+
-+ for (i = 0; i < num_events; ++i) {
-+ struct dentry * dir;
-+ char buf[2];
-+
-+ snprintf(buf, 2, "%d", i);
-+ dir = oprofilefs_mkdir(sb, root, buf);
-+ oprofilefs_create_ulong(sb, dir, "enabled",
-+ &counter_config[i].enabled);
-+ oprofilefs_create_ulong(sb, dir, "event",
-+ &counter_config[i].event);
-+ oprofilefs_create_ulong(sb, dir, "count",
-+ &counter_config[i].count);
-+ oprofilefs_create_ulong(sb, dir, "unit_mask",
-+ &counter_config[i].unit_mask);
-+ oprofilefs_create_ulong(sb, dir, "kernel",
-+ &counter_config[i].kernel);
-+ oprofilefs_create_ulong(sb, dir, "user",
-+ &counter_config[i].user);
-+ }
-+
-+ return 0;
-+}
-+
-+int __init oprofile_arch_init(struct oprofile_operations * ops)
-+{
-+ return xenoprofile_init(ops);
-+}
-+
-+void oprofile_arch_exit(void)
-+{
-+ xenoprofile_exit();
-+}
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/i386/pci/irq-xen.c ubuntu-gutsy-xen/arch/i386/pci/irq-xen.c
---- ubuntu-gutsy/arch/i386/pci/irq-xen.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/arch/i386/pci/irq-xen.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,1175 @@
-+/*
-+ * Low-Level PCI Support for PC -- Routing of Interrupts
-+ *
-+ * (c) 1999--2000 Martin Mares <mj@ucw.cz>
-+ */
-+
-+#include <linux/types.h>
-+#include <linux/kernel.h>
-+#include <linux/pci.h>
-+#include <linux/init.h>
-+#include <linux/slab.h>
-+#include <linux/interrupt.h>
-+#include <linux/dmi.h>
-+#include <asm/io.h>
-+#include <asm/smp.h>
-+#include <asm/io_apic.h>
-+#include <linux/irq.h>
-+#include <linux/acpi.h>
-+
-+#include "pci.h"
-+
-+#define PIRQ_SIGNATURE (('$' << 0) + ('P' << 8) + ('I' << 16) + ('R' << 24))
-+#define PIRQ_VERSION 0x0100
-+
-+static int broken_hp_bios_irq9;
-+static int acer_tm360_irqrouting;
-+
-+static struct irq_routing_table *pirq_table;
-+
-+static int pirq_enable_irq(struct pci_dev *dev);
-+
-+/*
-+ * Never use: 0, 1, 2 (timer, keyboard, and cascade)
-+ * Avoid using: 13, 14 and 15 (FP error and IDE).
-+ * Penalize: 3, 4, 6, 7, 12 (known ISA uses: serial, floppy, parallel and mouse)
-+ */
-+unsigned int pcibios_irq_mask = 0xfff8;
-+
-+static int pirq_penalty[16] = {
-+ 1000000, 1000000, 1000000, 1000, 1000, 0, 1000, 1000,
-+ 0, 0, 0, 0, 1000, 100000, 100000, 100000
-+};
-+
-+struct irq_router {
-+ char *name;
-+ u16 vendor, device;
-+ int (*get)(struct pci_dev *router, struct pci_dev *dev, int pirq);
-+ int (*set)(struct pci_dev *router, struct pci_dev *dev, int pirq, int new);
-+};
-+
-+struct irq_router_handler {
-+ u16 vendor;
-+ int (*probe)(struct irq_router *r, struct pci_dev *router, u16 device);
-+};
-+
-+int (*pcibios_enable_irq)(struct pci_dev *dev) = NULL;
-+void (*pcibios_disable_irq)(struct pci_dev *dev) = NULL;
-+
-+/*
-+ * Check passed address for the PCI IRQ Routing Table signature
-+ * and perform checksum verification.
-+ */
-+
-+static inline struct irq_routing_table * pirq_check_routing_table(u8 *addr)
-+{
-+ struct irq_routing_table *rt;
-+ int i;
-+ u8 sum;
-+
-+ rt = (struct irq_routing_table *) addr;
-+ if (rt->signature != PIRQ_SIGNATURE ||
-+ rt->version != PIRQ_VERSION ||
-+ rt->size % 16 ||
-+ rt->size < sizeof(struct irq_routing_table))
-+ return NULL;
-+ sum = 0;
-+ for (i=0; i < rt->size; i++)
-+ sum += addr[i];
-+ if (!sum) {
-+ DBG(KERN_DEBUG "PCI: Interrupt Routing Table found at 0x%p\n", rt);
-+ return rt;
-+ }
-+ return NULL;
-+}
-+
-+
-+
-+/*
-+ * Search 0xf0000 -- 0xfffff for the PCI IRQ Routing Table.
-+ */
-+
-+static struct irq_routing_table * __init pirq_find_routing_table(void)
-+{
-+ u8 *addr;
-+ struct irq_routing_table *rt;
-+
-+#ifdef CONFIG_XEN
-+ if (!is_initial_xendomain())
-+ return NULL;
-+#endif
-+ if (pirq_table_addr) {
-+ rt = pirq_check_routing_table((u8 *) isa_bus_to_virt(pirq_table_addr));
-+ if (rt)
-+ return rt;
-+ printk(KERN_WARNING "PCI: PIRQ table NOT found at pirqaddr\n");
-+ }
-+ for(addr = (u8 *) isa_bus_to_virt(0xf0000); addr < (u8 *) isa_bus_to_virt(0x100000); addr += 16) {
-+ rt = pirq_check_routing_table(addr);
-+ if (rt)
-+ return rt;
-+ }
-+ return NULL;
-+}
-+
-+/*
-+ * If we have a IRQ routing table, use it to search for peer host
-+ * bridges. It's a gross hack, but since there are no other known
-+ * ways how to get a list of buses, we have to go this way.
-+ */
-+
-+static void __init pirq_peer_trick(void)
-+{
-+ struct irq_routing_table *rt = pirq_table;
-+ u8 busmap[256];
-+ int i;
-+ struct irq_info *e;
-+
-+ memset(busmap, 0, sizeof(busmap));
-+ for(i=0; i < (rt->size - sizeof(struct irq_routing_table)) / sizeof(struct irq_info); i++) {
-+ e = &rt->slots[i];
-+#ifdef DEBUG
-+ {
-+ int j;
-+ DBG(KERN_DEBUG "%02x:%02x slot=%02x", e->bus, e->devfn/8, e->slot);
-+ for(j=0; j<4; j++)
-+ DBG(" %d:%02x/%04x", j, e->irq[j].link, e->irq[j].bitmap);
-+ DBG("\n");
-+ }
-+#endif
-+ busmap[e->bus] = 1;
-+ }
-+ for(i = 1; i < 256; i++) {
-+ if (!busmap[i] || pci_find_bus(0, i))
-+ continue;
-+ if (pci_scan_bus(i, &pci_root_ops, NULL))
-+ printk(KERN_INFO "PCI: Discovered primary peer bus %02x [IRQ]\n", i);
-+ }
-+ pcibios_last_bus = -1;
-+}
-+
-+/*
-+ * Code for querying and setting of IRQ routes on various interrupt routers.
-+ */
-+
-+void eisa_set_level_irq(unsigned int irq)
-+{
-+ unsigned char mask = 1 << (irq & 7);
-+ unsigned int port = 0x4d0 + (irq >> 3);
-+ unsigned char val;
-+ static u16 eisa_irq_mask;
-+
-+ if (irq >= 16 || (1 << irq) & eisa_irq_mask)
-+ return;
-+
-+ eisa_irq_mask |= (1 << irq);
-+ printk(KERN_DEBUG "PCI: setting IRQ %u as level-triggered\n", irq);
-+ val = inb(port);
-+ if (!(val & mask)) {
-+ DBG(KERN_DEBUG " -> edge");
-+ outb(val | mask, port);
-+ }
-+}
-+
-+/*
-+ * Common IRQ routing practice: nybbles in config space,
-+ * offset by some magic constant.
-+ */
-+static unsigned int read_config_nybble(struct pci_dev *router, unsigned offset, unsigned nr)
-+{
-+ u8 x;
-+ unsigned reg = offset + (nr >> 1);
-+
-+ pci_read_config_byte(router, reg, &x);
-+ return (nr & 1) ? (x >> 4) : (x & 0xf);
-+}
-+
-+static void write_config_nybble(struct pci_dev *router, unsigned offset, unsigned nr, unsigned int val)
-+{
-+ u8 x;
-+ unsigned reg = offset + (nr >> 1);
-+
-+ pci_read_config_byte(router, reg, &x);
-+ x = (nr & 1) ? ((x & 0x0f) | (val << 4)) : ((x & 0xf0) | val);
-+ pci_write_config_byte(router, reg, x);
-+}
-+
-+/*
-+ * ALI pirq entries are damn ugly, and completely undocumented.
-+ * This has been figured out from pirq tables, and it's not a pretty
-+ * picture.
-+ */
-+static int pirq_ali_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
-+{
-+ static const unsigned char irqmap[16] = { 0, 9, 3, 10, 4, 5, 7, 6, 1, 11, 0, 12, 0, 14, 0, 15 };
-+
-+ return irqmap[read_config_nybble(router, 0x48, pirq-1)];
-+}
-+
-+static int pirq_ali_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
-+{
-+ static const unsigned char irqmap[16] = { 0, 8, 0, 2, 4, 5, 7, 6, 0, 1, 3, 9, 11, 0, 13, 15 };
-+ unsigned int val = irqmap[irq];
-+
-+ if (val) {
-+ write_config_nybble(router, 0x48, pirq-1, val);
-+ return 1;
-+ }
-+ return 0;
-+}
-+
-+/*
-+ * The Intel PIIX4 pirq rules are fairly simple: "pirq" is
-+ * just a pointer to the config space.
-+ */
-+static int pirq_piix_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
-+{
-+ u8 x;
-+
-+ pci_read_config_byte(router, pirq, &x);
-+ return (x < 16) ? x : 0;
-+}
-+
-+static int pirq_piix_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
-+{
-+ pci_write_config_byte(router, pirq, irq);
-+ return 1;
-+}
-+
-+/*
-+ * The VIA pirq rules are nibble-based, like ALI,
-+ * but without the ugly irq number munging.
-+ * However, PIRQD is in the upper instead of lower 4 bits.
-+ */
-+static int pirq_via_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
-+{
-+ return read_config_nybble(router, 0x55, pirq == 4 ? 5 : pirq);
-+}
-+
-+static int pirq_via_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
-+{
-+ write_config_nybble(router, 0x55, pirq == 4 ? 5 : pirq, irq);
-+ return 1;
-+}
-+
-+/*
-+ * The VIA pirq rules are nibble-based, like ALI,
-+ * but without the ugly irq number munging.
-+ * However, for 82C586, nibble map is different .
-+ */
-+static int pirq_via586_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
-+{
-+ static const unsigned int pirqmap[4] = { 3, 2, 5, 1 };
-+ return read_config_nybble(router, 0x55, pirqmap[pirq-1]);
-+}
-+
-+static int pirq_via586_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
-+{
-+ static const unsigned int pirqmap[4] = { 3, 2, 5, 1 };
-+ write_config_nybble(router, 0x55, pirqmap[pirq-1], irq);
-+ return 1;
-+}
-+
-+/*
-+ * ITE 8330G pirq rules are nibble-based
-+ * FIXME: pirqmap may be { 1, 0, 3, 2 },
-+ * 2+3 are both mapped to irq 9 on my system
-+ */
-+static int pirq_ite_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
-+{
-+ static const unsigned char pirqmap[4] = { 1, 0, 2, 3 };
-+ return read_config_nybble(router,0x43, pirqmap[pirq-1]);
-+}
-+
-+static int pirq_ite_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
-+{
-+ static const unsigned char pirqmap[4] = { 1, 0, 2, 3 };
-+ write_config_nybble(router, 0x43, pirqmap[pirq-1], irq);
-+ return 1;
-+}
-+
-+/*
-+ * OPTI: high four bits are nibble pointer..
-+ * I wonder what the low bits do?
-+ */
-+static int pirq_opti_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
-+{
-+ return read_config_nybble(router, 0xb8, pirq >> 4);
-+}
-+
-+static int pirq_opti_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
-+{
-+ write_config_nybble(router, 0xb8, pirq >> 4, irq);
-+ return 1;
-+}
-+
-+/*
-+ * Cyrix: nibble offset 0x5C
-+ * 0x5C bits 7:4 is INTB bits 3:0 is INTA
-+ * 0x5D bits 7:4 is INTD bits 3:0 is INTC
-+ */
-+static int pirq_cyrix_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
-+{
-+ return read_config_nybble(router, 0x5C, (pirq-1)^1);
-+}
-+
-+static int pirq_cyrix_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
-+{
-+ write_config_nybble(router, 0x5C, (pirq-1)^1, irq);
-+ return 1;
-+}
-+
-+/*
-+ * PIRQ routing for SiS 85C503 router used in several SiS chipsets.
-+ * We have to deal with the following issues here:
-+ * - vendors have different ideas about the meaning of link values
-+ * - some onboard devices (integrated in the chipset) have special
-+ * links and are thus routed differently (i.e. not via PCI INTA-INTD)
-+ * - different revision of the router have a different layout for
-+ * the routing registers, particularly for the onchip devices
-+ *
-+ * For all routing registers the common thing is we have one byte
-+ * per routeable link which is defined as:
-+ * bit 7 IRQ mapping enabled (0) or disabled (1)
-+ * bits [6:4] reserved (sometimes used for onchip devices)
-+ * bits [3:0] IRQ to map to
-+ * allowed: 3-7, 9-12, 14-15
-+ * reserved: 0, 1, 2, 8, 13
-+ *
-+ * The config-space registers located at 0x41/0x42/0x43/0x44 are
-+ * always used to route the normal PCI INT A/B/C/D respectively.
-+ * Apparently there are systems implementing PCI routing table using
-+ * link values 0x01-0x04 and others using 0x41-0x44 for PCI INTA..D.
-+ * We try our best to handle both link mappings.
-+ *
-+ * Currently (2003-05-21) it appears most SiS chipsets follow the
-+ * definition of routing registers from the SiS-5595 southbridge.
-+ * According to the SiS 5595 datasheets the revision id's of the
-+ * router (ISA-bridge) should be 0x01 or 0xb0.
-+ *
-+ * Furthermore we've also seen lspci dumps with revision 0x00 and 0xb1.
-+ * Looks like these are used in a number of SiS 5xx/6xx/7xx chipsets.
-+ * They seem to work with the current routing code. However there is
-+ * some concern because of the two USB-OHCI HCs (original SiS 5595
-+ * had only one). YMMV.
-+ *
-+ * Onchip routing for router rev-id 0x01/0xb0 and probably 0x00/0xb1:
-+ *
-+ * 0x61: IDEIRQ:
-+ * bits [6:5] must be written 01
-+ * bit 4 channel-select primary (0), secondary (1)
-+ *
-+ * 0x62: USBIRQ:
-+ * bit 6 OHCI function disabled (0), enabled (1)
-+ *
-+ * 0x6a: ACPI/SCI IRQ: bits 4-6 reserved
-+ *
-+ * 0x7e: Data Acq. Module IRQ - bits 4-6 reserved
-+ *
-+ * We support USBIRQ (in addition to INTA-INTD) and keep the
-+ * IDE, ACPI and DAQ routing untouched as set by the BIOS.
-+ *
-+ * Currently the only reported exception is the new SiS 65x chipset
-+ * which includes the SiS 69x southbridge. Here we have the 85C503
-+ * router revision 0x04 and there are changes in the register layout
-+ * mostly related to the different USB HCs with USB 2.0 support.
-+ *
-+ * Onchip routing for router rev-id 0x04 (try-and-error observation)
-+ *
-+ * 0x60/0x61/0x62/0x63: 1xEHCI and 3xOHCI (companion) USB-HCs
-+ * bit 6-4 are probably unused, not like 5595
-+ */
-+
-+#define PIRQ_SIS_IRQ_MASK 0x0f
-+#define PIRQ_SIS_IRQ_DISABLE 0x80
-+#define PIRQ_SIS_USB_ENABLE 0x40
-+
-+static int pirq_sis_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
-+{
-+ u8 x;
-+ int reg;
-+
-+ reg = pirq;
-+ if (reg >= 0x01 && reg <= 0x04)
-+ reg += 0x40;
-+ pci_read_config_byte(router, reg, &x);
-+ return (x & PIRQ_SIS_IRQ_DISABLE) ? 0 : (x & PIRQ_SIS_IRQ_MASK);
-+}
-+
-+static int pirq_sis_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
-+{
-+ u8 x;
-+ int reg;
-+
-+ reg = pirq;
-+ if (reg >= 0x01 && reg <= 0x04)
-+ reg += 0x40;
-+ pci_read_config_byte(router, reg, &x);
-+ x &= ~(PIRQ_SIS_IRQ_MASK | PIRQ_SIS_IRQ_DISABLE);
-+ x |= irq ? irq: PIRQ_SIS_IRQ_DISABLE;
-+ pci_write_config_byte(router, reg, x);
-+ return 1;
-+}
-+
-+
-+/*
-+ * VLSI: nibble offset 0x74 - educated guess due to routing table and
-+ * config space of VLSI 82C534 PCI-bridge/router (1004:0102)
-+ * Tested on HP OmniBook 800 covering PIRQ 1, 2, 4, 8 for onboard
-+ * devices, PIRQ 3 for non-pci(!) soundchip and (untested) PIRQ 6
-+ * for the busbridge to the docking station.
-+ */
-+
-+static int pirq_vlsi_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
-+{
-+ if (pirq > 8) {
-+ printk(KERN_INFO "VLSI router pirq escape (%d)\n", pirq);
-+ return 0;
-+ }
-+ return read_config_nybble(router, 0x74, pirq-1);
-+}
-+
-+static int pirq_vlsi_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
-+{
-+ if (pirq > 8) {
-+ printk(KERN_INFO "VLSI router pirq escape (%d)\n", pirq);
-+ return 0;
-+ }
-+ write_config_nybble(router, 0x74, pirq-1, irq);
-+ return 1;
-+}
-+
-+/*
-+ * ServerWorks: PCI interrupts mapped to system IRQ lines through Index
-+ * and Redirect I/O registers (0x0c00 and 0x0c01). The Index register
-+ * format is (PCIIRQ## | 0x10), e.g.: PCIIRQ10=0x1a. The Redirect
-+ * register is a straight binary coding of desired PIC IRQ (low nibble).
-+ *
-+ * The 'link' value in the PIRQ table is already in the correct format
-+ * for the Index register. There are some special index values:
-+ * 0x00 for ACPI (SCI), 0x01 for USB, 0x02 for IDE0, 0x04 for IDE1,
-+ * and 0x03 for SMBus.
-+ */
-+static int pirq_serverworks_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
-+{
-+ outb_p(pirq, 0xc00);
-+ return inb(0xc01) & 0xf;
-+}
-+
-+static int pirq_serverworks_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
-+{
-+ outb_p(pirq, 0xc00);
-+ outb_p(irq, 0xc01);
-+ return 1;
-+}
-+
-+/* Support for AMD756 PCI IRQ Routing
-+ * Jhon H. Caicedo <jhcaiced@osso.org.co>
-+ * Jun/21/2001 0.2.0 Release, fixed to use "nybble" functions... (jhcaiced)
-+ * Jun/19/2001 Alpha Release 0.1.0 (jhcaiced)
-+ * The AMD756 pirq rules are nibble-based
-+ * offset 0x56 0-3 PIRQA 4-7 PIRQB
-+ * offset 0x57 0-3 PIRQC 4-7 PIRQD
-+ */
-+static int pirq_amd756_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
-+{
-+ u8 irq;
-+ irq = 0;
-+ if (pirq <= 4)
-+ {
-+ irq = read_config_nybble(router, 0x56, pirq - 1);
-+ }
-+ printk(KERN_INFO "AMD756: dev %04x:%04x, router pirq : %d get irq : %2d\n",
-+ dev->vendor, dev->device, pirq, irq);
-+ return irq;
-+}
-+
-+static int pirq_amd756_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
-+{
-+ printk(KERN_INFO "AMD756: dev %04x:%04x, router pirq : %d SET irq : %2d\n",
-+ dev->vendor, dev->device, pirq, irq);
-+ if (pirq <= 4)
-+ {
-+ write_config_nybble(router, 0x56, pirq - 1, irq);
-+ }
-+ return 1;
-+}
-+
-+#ifdef CONFIG_PCI_BIOS
-+
-+static int pirq_bios_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
-+{
-+ struct pci_dev *bridge;
-+ int pin = pci_get_interrupt_pin(dev, &bridge);
-+ return pcibios_set_irq_routing(bridge, pin, irq);
-+}
-+
-+#endif
-+
-+static __init int intel_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
-+{
-+ static struct pci_device_id __initdata pirq_440gx[] = {
-+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0) },
-+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_2) },
-+ { },
-+ };
-+
-+ /* 440GX has a proprietary PIRQ router -- don't use it */
-+ if (pci_dev_present(pirq_440gx))
-+ return 0;
-+
-+ switch(device)
-+ {
-+ case PCI_DEVICE_ID_INTEL_82371FB_0:
-+ case PCI_DEVICE_ID_INTEL_82371SB_0:
-+ case PCI_DEVICE_ID_INTEL_82371AB_0:
-+ case PCI_DEVICE_ID_INTEL_82371MX:
-+ case PCI_DEVICE_ID_INTEL_82443MX_0:
-+ case PCI_DEVICE_ID_INTEL_82801AA_0:
-+ case PCI_DEVICE_ID_INTEL_82801AB_0:
-+ case PCI_DEVICE_ID_INTEL_82801BA_0:
-+ case PCI_DEVICE_ID_INTEL_82801BA_10:
-+ case PCI_DEVICE_ID_INTEL_82801CA_0:
-+ case PCI_DEVICE_ID_INTEL_82801CA_12:
-+ case PCI_DEVICE_ID_INTEL_82801DB_0:
-+ case PCI_DEVICE_ID_INTEL_82801E_0:
-+ case PCI_DEVICE_ID_INTEL_82801EB_0:
-+ case PCI_DEVICE_ID_INTEL_ESB_1:
-+ case PCI_DEVICE_ID_INTEL_ICH6_0:
-+ case PCI_DEVICE_ID_INTEL_ICH6_1:
-+ case PCI_DEVICE_ID_INTEL_ICH7_0:
-+ case PCI_DEVICE_ID_INTEL_ICH7_1:
-+ case PCI_DEVICE_ID_INTEL_ICH7_30:
-+ case PCI_DEVICE_ID_INTEL_ICH7_31:
-+ case PCI_DEVICE_ID_INTEL_ESB2_0:
-+ case PCI_DEVICE_ID_INTEL_ICH8_0:
-+ case PCI_DEVICE_ID_INTEL_ICH8_1:
-+ case PCI_DEVICE_ID_INTEL_ICH8_2:
-+ case PCI_DEVICE_ID_INTEL_ICH8_3:
-+ case PCI_DEVICE_ID_INTEL_ICH8_4:
-+ case PCI_DEVICE_ID_INTEL_ICH9_0:
-+ case PCI_DEVICE_ID_INTEL_ICH9_1:
-+ case PCI_DEVICE_ID_INTEL_ICH9_2:
-+ case PCI_DEVICE_ID_INTEL_ICH9_3:
-+ case PCI_DEVICE_ID_INTEL_ICH9_4:
-+ case PCI_DEVICE_ID_INTEL_ICH9_5:
-+ r->name = "PIIX/ICH";
-+ r->get = pirq_piix_get;
-+ r->set = pirq_piix_set;
-+ return 1;
-+ }
-+ return 0;
-+}
-+
-+static __init int via_router_probe(struct irq_router *r,
-+ struct pci_dev *router, u16 device)
-+{
-+ /* FIXME: We should move some of the quirk fixup stuff here */
-+
-+ /*
-+ * work arounds for some buggy BIOSes
-+ */
-+ if (device == PCI_DEVICE_ID_VIA_82C586_0) {
-+ switch(router->device) {
-+ case PCI_DEVICE_ID_VIA_82C686:
-+ /*
-+ * Asus k7m bios wrongly reports 82C686A
-+ * as 586-compatible
-+ */
-+ device = PCI_DEVICE_ID_VIA_82C686;
-+ break;
-+ case PCI_DEVICE_ID_VIA_8235:
-+ /**
-+ * Asus a7v-x bios wrongly reports 8235
-+ * as 586-compatible
-+ */
-+ device = PCI_DEVICE_ID_VIA_8235;
-+ break;
-+ }
-+ }
-+
-+ switch(device) {
-+ case PCI_DEVICE_ID_VIA_82C586_0:
-+ r->name = "VIA";
-+ r->get = pirq_via586_get;
-+ r->set = pirq_via586_set;
-+ return 1;
-+ case PCI_DEVICE_ID_VIA_82C596:
-+ case PCI_DEVICE_ID_VIA_82C686:
-+ case PCI_DEVICE_ID_VIA_8231:
-+ case PCI_DEVICE_ID_VIA_8233A:
-+ case PCI_DEVICE_ID_VIA_8235:
-+ case PCI_DEVICE_ID_VIA_8237:
-+ /* FIXME: add new ones for 8233/5 */
-+ r->name = "VIA";
-+ r->get = pirq_via_get;
-+ r->set = pirq_via_set;
-+ return 1;
-+ }
-+ return 0;
-+}
-+
-+static __init int vlsi_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
-+{
-+ switch(device)
-+ {
-+ case PCI_DEVICE_ID_VLSI_82C534:
-+ r->name = "VLSI 82C534";
-+ r->get = pirq_vlsi_get;
-+ r->set = pirq_vlsi_set;
-+ return 1;
-+ }
-+ return 0;
-+}
-+
-+
-+static __init int serverworks_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
-+{
-+ switch(device)
-+ {
-+ case PCI_DEVICE_ID_SERVERWORKS_OSB4:
-+ case PCI_DEVICE_ID_SERVERWORKS_CSB5:
-+ r->name = "ServerWorks";
-+ r->get = pirq_serverworks_get;
-+ r->set = pirq_serverworks_set;
-+ return 1;
-+ }
-+ return 0;
-+}
-+
-+static __init int sis_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
-+{
-+ if (device != PCI_DEVICE_ID_SI_503)
-+ return 0;
-+
-+ r->name = "SIS";
-+ r->get = pirq_sis_get;
-+ r->set = pirq_sis_set;
-+ return 1;
-+}
-+
-+static __init int cyrix_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
-+{
-+ switch(device)
-+ {
-+ case PCI_DEVICE_ID_CYRIX_5520:
-+ r->name = "NatSemi";
-+ r->get = pirq_cyrix_get;
-+ r->set = pirq_cyrix_set;
-+ return 1;
-+ }
-+ return 0;
-+}
-+
-+static __init int opti_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
-+{
-+ switch(device)
-+ {
-+ case PCI_DEVICE_ID_OPTI_82C700:
-+ r->name = "OPTI";
-+ r->get = pirq_opti_get;
-+ r->set = pirq_opti_set;
-+ return 1;
-+ }
-+ return 0;
-+}
-+
-+static __init int ite_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
-+{
-+ switch(device)
-+ {
-+ case PCI_DEVICE_ID_ITE_IT8330G_0:
-+ r->name = "ITE";
-+ r->get = pirq_ite_get;
-+ r->set = pirq_ite_set;
-+ return 1;
-+ }
-+ return 0;
-+}
-+
-+static __init int ali_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
-+{
-+ switch(device)
-+ {
-+ case PCI_DEVICE_ID_AL_M1533:
-+ case PCI_DEVICE_ID_AL_M1563:
-+ printk(KERN_DEBUG "PCI: Using ALI IRQ Router\n");
-+ r->name = "ALI";
-+ r->get = pirq_ali_get;
-+ r->set = pirq_ali_set;
-+ return 1;
-+ }
-+ return 0;
-+}
-+
-+static __init int amd_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
-+{
-+ switch(device)
-+ {
-+ case PCI_DEVICE_ID_AMD_VIPER_740B:
-+ r->name = "AMD756";
-+ break;
-+ case PCI_DEVICE_ID_AMD_VIPER_7413:
-+ r->name = "AMD766";
-+ break;
-+ case PCI_DEVICE_ID_AMD_VIPER_7443:
-+ r->name = "AMD768";
-+ break;
-+ default:
-+ return 0;
-+ }
-+ r->get = pirq_amd756_get;
-+ r->set = pirq_amd756_set;
-+ return 1;
-+}
-+
-+static __initdata struct irq_router_handler pirq_routers[] = {
-+ { PCI_VENDOR_ID_INTEL, intel_router_probe },
-+ { PCI_VENDOR_ID_AL, ali_router_probe },
-+ { PCI_VENDOR_ID_ITE, ite_router_probe },
-+ { PCI_VENDOR_ID_VIA, via_router_probe },
-+ { PCI_VENDOR_ID_OPTI, opti_router_probe },
-+ { PCI_VENDOR_ID_SI, sis_router_probe },
-+ { PCI_VENDOR_ID_CYRIX, cyrix_router_probe },
-+ { PCI_VENDOR_ID_VLSI, vlsi_router_probe },
-+ { PCI_VENDOR_ID_SERVERWORKS, serverworks_router_probe },
-+ { PCI_VENDOR_ID_AMD, amd_router_probe },
-+ /* Someone with docs needs to add the ATI Radeon IGP */
-+ { 0, NULL }
-+};
-+static struct irq_router pirq_router;
-+static struct pci_dev *pirq_router_dev;
-+
-+
-+/*
-+ * FIXME: should we have an option to say "generic for
-+ * chipset" ?
-+ */
-+
-+static void __init pirq_find_router(struct irq_router *r)
-+{
-+ struct irq_routing_table *rt = pirq_table;
-+ struct irq_router_handler *h;
-+
-+#ifdef CONFIG_PCI_BIOS
-+ if (!rt->signature) {
-+ printk(KERN_INFO "PCI: Using BIOS for IRQ routing\n");
-+ r->set = pirq_bios_set;
-+ r->name = "BIOS";
-+ return;
-+ }
-+#endif
-+
-+ /* Default unless a driver reloads it */
-+ r->name = "default";
-+ r->get = NULL;
-+ r->set = NULL;
-+
-+ DBG(KERN_DEBUG "PCI: Attempting to find IRQ router for %04x:%04x\n",
-+ rt->rtr_vendor, rt->rtr_device);
-+
-+ pirq_router_dev = pci_get_bus_and_slot(rt->rtr_bus, rt->rtr_devfn);
-+ if (!pirq_router_dev) {
-+ DBG(KERN_DEBUG "PCI: Interrupt router not found at "
-+ "%02x:%02x\n", rt->rtr_bus, rt->rtr_devfn);
-+ return;
-+ }
-+
-+ for( h = pirq_routers; h->vendor; h++) {
-+ /* First look for a router match */
-+ if (rt->rtr_vendor == h->vendor && h->probe(r, pirq_router_dev, rt->rtr_device))
-+ break;
-+ /* Fall back to a device match */
-+ if (pirq_router_dev->vendor == h->vendor && h->probe(r, pirq_router_dev, pirq_router_dev->device))
-+ break;
-+ }
-+ printk(KERN_INFO "PCI: Using IRQ router %s [%04x/%04x] at %s\n",
-+ pirq_router.name,
-+ pirq_router_dev->vendor,
-+ pirq_router_dev->device,
-+ pci_name(pirq_router_dev));
-+
-+ /* The device remains referenced for the kernel lifetime */
-+}
-+
-+static struct irq_info *pirq_get_info(struct pci_dev *dev)
-+{
-+ struct irq_routing_table *rt = pirq_table;
-+ int entries = (rt->size - sizeof(struct irq_routing_table)) / sizeof(struct irq_info);
-+ struct irq_info *info;
-+
-+ for (info = rt->slots; entries--; info++)
-+ if (info->bus == dev->bus->number && PCI_SLOT(info->devfn) == PCI_SLOT(dev->devfn))
-+ return info;
-+ return NULL;
-+}
-+
-+static int pcibios_lookup_irq(struct pci_dev *dev, int assign)
-+{
-+ u8 pin;
-+ struct irq_info *info;
-+ int i, pirq, newirq;
-+ int irq = 0;
-+ u32 mask;
-+ struct irq_router *r = &pirq_router;
-+ struct pci_dev *dev2 = NULL;
-+ char *msg = NULL;
-+
-+ /* Find IRQ pin */
-+ pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
-+ if (!pin) {
-+ DBG(KERN_DEBUG " -> no interrupt pin\n");
-+ return 0;
-+ }
-+ pin = pin - 1;
-+
-+ /* Find IRQ routing entry */
-+
-+ if (!pirq_table)
-+ return 0;
-+
-+ DBG(KERN_DEBUG "IRQ for %s[%c]", pci_name(dev), 'A' + pin);
-+ info = pirq_get_info(dev);
-+ if (!info) {
-+ DBG(" -> not found in routing table\n" KERN_DEBUG);
-+ return 0;
-+ }
-+ pirq = info->irq[pin].link;
-+ mask = info->irq[pin].bitmap;
-+ if (!pirq) {
-+ DBG(" -> not routed\n" KERN_DEBUG);
-+ return 0;
-+ }
-+ DBG(" -> PIRQ %02x, mask %04x, excl %04x", pirq, mask, pirq_table->exclusive_irqs);
-+ mask &= pcibios_irq_mask;
-+
-+ /* Work around broken HP Pavilion Notebooks which assign USB to
-+ IRQ 9 even though it is actually wired to IRQ 11 */
-+
-+ if (broken_hp_bios_irq9 && pirq == 0x59 && dev->irq == 9) {
-+ dev->irq = 11;
-+ pci_write_config_byte(dev, PCI_INTERRUPT_LINE, 11);
-+ r->set(pirq_router_dev, dev, pirq, 11);
-+ }
-+
-+ /* same for Acer Travelmate 360, but with CB and irq 11 -> 10 */
-+ if (acer_tm360_irqrouting && dev->irq == 11 && dev->vendor == PCI_VENDOR_ID_O2) {
-+ pirq = 0x68;
-+ mask = 0x400;
-+ dev->irq = r->get(pirq_router_dev, dev, pirq);
-+ pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
-+ }
-+
-+ /*
-+ * Find the best IRQ to assign: use the one
-+ * reported by the device if possible.
-+ */
-+ newirq = dev->irq;
-+ if (newirq && !((1 << newirq) & mask)) {
-+ if ( pci_probe & PCI_USE_PIRQ_MASK) newirq = 0;
-+ else printk("\n" KERN_WARNING
-+ "PCI: IRQ %i for device %s doesn't match PIRQ mask "
-+ "- try pci=usepirqmask\n" KERN_DEBUG, newirq,
-+ pci_name(dev));
-+ }
-+ if (!newirq && assign) {
-+ for (i = 0; i < 16; i++) {
-+ if (!(mask & (1 << i)))
-+ continue;
-+ if (pirq_penalty[i] < pirq_penalty[newirq] && can_request_irq(i, IRQF_SHARED))
-+ newirq = i;
-+ }
-+ }
-+ DBG(" -> newirq=%d", newirq);
-+
-+ /* Check if it is hardcoded */
-+ if ((pirq & 0xf0) == 0xf0) {
-+ irq = pirq & 0xf;
-+ DBG(" -> hardcoded IRQ %d\n", irq);
-+ msg = "Hardcoded";
-+ } else if ( r->get && (irq = r->get(pirq_router_dev, dev, pirq)) && \
-+ ((!(pci_probe & PCI_USE_PIRQ_MASK)) || ((1 << irq) & mask)) ) {
-+ DBG(" -> got IRQ %d\n", irq);
-+ msg = "Found";
-+ eisa_set_level_irq(irq);
-+ } else if (newirq && r->set && (dev->class >> 8) != PCI_CLASS_DISPLAY_VGA) {
-+ DBG(" -> assigning IRQ %d", newirq);
-+ if (r->set(pirq_router_dev, dev, pirq, newirq)) {
-+ eisa_set_level_irq(newirq);
-+ DBG(" ... OK\n");
-+ msg = "Assigned";
-+ irq = newirq;
-+ }
-+ }
-+
-+ if (!irq) {
-+ DBG(" ... failed\n");
-+ if (newirq && mask == (1 << newirq)) {
-+ msg = "Guessed";
-+ irq = newirq;
-+ } else
-+ return 0;
-+ }
-+ printk(KERN_INFO "PCI: %s IRQ %d for device %s\n", msg, irq, pci_name(dev));
-+
-+ /* Update IRQ for all devices with the same pirq value */
-+ while ((dev2 = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev2)) != NULL) {
-+ pci_read_config_byte(dev2, PCI_INTERRUPT_PIN, &pin);
-+ if (!pin)
-+ continue;
-+ pin--;
-+ info = pirq_get_info(dev2);
-+ if (!info)
-+ continue;
-+ if (info->irq[pin].link == pirq) {
-+ /* We refuse to override the dev->irq information. Give a warning! */
-+ if ( dev2->irq && dev2->irq != irq && \
-+ (!(pci_probe & PCI_USE_PIRQ_MASK) || \
-+ ((1 << dev2->irq) & mask)) ) {
-+#ifndef CONFIG_PCI_MSI
-+ printk(KERN_INFO "IRQ routing conflict for %s, have irq %d, want irq %d\n",
-+ pci_name(dev2), dev2->irq, irq);
-+#endif
-+ continue;
-+ }
-+ dev2->irq = irq;
-+ pirq_penalty[irq]++;
-+ if (dev != dev2)
-+ printk(KERN_INFO "PCI: Sharing IRQ %d with %s\n", irq, pci_name(dev2));
-+ }
-+ }
-+ return 1;
-+}
-+
-+static void __init pcibios_fixup_irqs(void)
-+{
-+ struct pci_dev *dev = NULL;
-+ u8 pin;
-+
-+ DBG(KERN_DEBUG "PCI: IRQ fixup\n");
-+ while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
-+ /*
-+ * If the BIOS has set an out of range IRQ number, just ignore it.
-+ * Also keep track of which IRQ's are already in use.
-+ */
-+ if (dev->irq >= 16) {
-+ DBG(KERN_DEBUG "%s: ignoring bogus IRQ %d\n", pci_name(dev), dev->irq);
-+ dev->irq = 0;
-+ }
-+ /* If the IRQ is already assigned to a PCI device, ignore its ISA use penalty */
-+ if (pirq_penalty[dev->irq] >= 100 && pirq_penalty[dev->irq] < 100000)
-+ pirq_penalty[dev->irq] = 0;
-+ pirq_penalty[dev->irq]++;
-+ }
-+
-+ dev = NULL;
-+ while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
-+ pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
-+#ifdef CONFIG_X86_IO_APIC
-+ /*
-+ * Recalculate IRQ numbers if we use the I/O APIC.
-+ */
-+ if (io_apic_assign_pci_irqs)
-+ {
-+ int irq;
-+
-+ if (pin) {
-+ pin--; /* interrupt pins are numbered starting from 1 */
-+ irq = IO_APIC_get_PCI_irq_vector(dev->bus->number, PCI_SLOT(dev->devfn), pin);
-+ /*
-+ * Busses behind bridges are typically not listed in the MP-table.
-+ * In this case we have to look up the IRQ based on the parent bus,
-+ * parent slot, and pin number. The SMP code detects such bridged
-+ * busses itself so we should get into this branch reliably.
-+ */
-+ if (irq < 0 && dev->bus->parent) { /* go back to the bridge */
-+ struct pci_dev * bridge = dev->bus->self;
-+
-+ pin = (pin + PCI_SLOT(dev->devfn)) % 4;
-+ irq = IO_APIC_get_PCI_irq_vector(bridge->bus->number,
-+ PCI_SLOT(bridge->devfn), pin);
-+ if (irq >= 0)
-+ printk(KERN_WARNING "PCI: using PPB %s[%c] to get irq %d\n",
-+ pci_name(bridge), 'A' + pin, irq);
-+ }
-+ if (irq >= 0) {
-+ printk(KERN_INFO "PCI->APIC IRQ transform: %s[%c] -> IRQ %d\n",
-+ pci_name(dev), 'A' + pin, irq);
-+ dev->irq = irq;
-+ }
-+ }
-+ }
-+#endif
-+ /*
-+ * Still no IRQ? Try to lookup one...
-+ */
-+ if (pin && !dev->irq)
-+ pcibios_lookup_irq(dev, 0);
-+ }
-+}
-+
-+/*
-+ * Work around broken HP Pavilion Notebooks which assign USB to
-+ * IRQ 9 even though it is actually wired to IRQ 11
-+ */
-+static int __init fix_broken_hp_bios_irq9(struct dmi_system_id *d)
-+{
-+ if (!broken_hp_bios_irq9) {
-+ broken_hp_bios_irq9 = 1;
-+ printk(KERN_INFO "%s detected - fixing broken IRQ routing\n", d->ident);
-+ }
-+ return 0;
-+}
-+
-+/*
-+ * Work around broken Acer TravelMate 360 Notebooks which assign
-+ * Cardbus to IRQ 11 even though it is actually wired to IRQ 10
-+ */
-+static int __init fix_acer_tm360_irqrouting(struct dmi_system_id *d)
-+{
-+ if (!acer_tm360_irqrouting) {
-+ acer_tm360_irqrouting = 1;
-+ printk(KERN_INFO "%s detected - fixing broken IRQ routing\n", d->ident);
-+ }
-+ return 0;
-+}
-+
-+static struct dmi_system_id __initdata pciirq_dmi_table[] = {
-+ {
-+ .callback = fix_broken_hp_bios_irq9,
-+ .ident = "HP Pavilion N5400 Series Laptop",
-+ .matches = {
-+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
-+ DMI_MATCH(DMI_BIOS_VERSION, "GE.M1.03"),
-+ DMI_MATCH(DMI_PRODUCT_VERSION, "HP Pavilion Notebook Model GE"),
-+ DMI_MATCH(DMI_BOARD_VERSION, "OmniBook N32N-736"),
-+ },
-+ },
-+ {
-+ .callback = fix_acer_tm360_irqrouting,
-+ .ident = "Acer TravelMate 36x Laptop",
-+ .matches = {
-+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
-+ DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 360"),
-+ },
-+ },
-+ { }
-+};
-+
-+static int __init pcibios_irq_init(void)
-+{
-+ DBG(KERN_DEBUG "PCI: IRQ init\n");
-+
-+ if (pcibios_enable_irq || raw_pci_ops == NULL)
-+ return 0;
-+
-+ dmi_check_system(pciirq_dmi_table);
-+
-+ pirq_table = pirq_find_routing_table();
-+
-+#ifdef CONFIG_PCI_BIOS
-+ if (!pirq_table && (pci_probe & PCI_BIOS_IRQ_SCAN))
-+ pirq_table = pcibios_get_irq_routing_table();
-+#endif
-+ if (pirq_table) {
-+ pirq_peer_trick();
-+ pirq_find_router(&pirq_router);
-+ if (pirq_table->exclusive_irqs) {
-+ int i;
-+ for (i=0; i<16; i++)
-+ if (!(pirq_table->exclusive_irqs & (1 << i)))
-+ pirq_penalty[i] += 100;
-+ }
-+ /* If we're using the I/O APIC, avoid using the PCI IRQ routing table */
-+ if (io_apic_assign_pci_irqs)
-+ pirq_table = NULL;
-+ }
-+
-+ pcibios_enable_irq = pirq_enable_irq;
-+
-+ pcibios_fixup_irqs();
-+ return 0;
-+}
-+
-+subsys_initcall(pcibios_irq_init);
-+
-+
-+static void pirq_penalize_isa_irq(int irq, int active)
-+{
-+ /*
-+ * If any ISAPnP device reports an IRQ in its list of possible
-+ * IRQ's, we try to avoid assigning it to PCI devices.
-+ */
-+ if (irq < 16) {
-+ if (active)
-+ pirq_penalty[irq] += 1000;
-+ else
-+ pirq_penalty[irq] += 100;
-+ }
-+}
-+
-+void pcibios_penalize_isa_irq(int irq, int active)
-+{
-+#ifdef CONFIG_ACPI
-+ if (!acpi_noirq)
-+ acpi_penalize_isa_irq(irq, active);
-+ else
-+#endif
-+ pirq_penalize_isa_irq(irq, active);
-+}
-+
-+static int pirq_enable_irq(struct pci_dev *dev)
-+{
-+ u8 pin;
-+ struct pci_dev *temp_dev;
-+
-+ pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
-+ if (pin && !pcibios_lookup_irq(dev, 1) && !dev->irq) {
-+ char *msg = "";
-+
-+ pin--; /* interrupt pins are numbered starting from 1 */
-+
-+ if (io_apic_assign_pci_irqs) {
-+ int irq;
-+
-+ irq = IO_APIC_get_PCI_irq_vector(dev->bus->number, PCI_SLOT(dev->devfn), pin);
-+ /*
-+ * Busses behind bridges are typically not listed in the MP-table.
-+ * In this case we have to look up the IRQ based on the parent bus,
-+ * parent slot, and pin number. The SMP code detects such bridged
-+ * busses itself so we should get into this branch reliably.
-+ */
-+ temp_dev = dev;
-+ while (irq < 0 && dev->bus->parent) { /* go back to the bridge */
-+ struct pci_dev * bridge = dev->bus->self;
-+
-+ pin = (pin + PCI_SLOT(dev->devfn)) % 4;
-+ irq = IO_APIC_get_PCI_irq_vector(bridge->bus->number,
-+ PCI_SLOT(bridge->devfn), pin);
-+ if (irq >= 0)
-+ printk(KERN_WARNING "PCI: using PPB %s[%c] to get irq %d\n",
-+ pci_name(bridge), 'A' + pin, irq);
-+ dev = bridge;
-+ }
-+ dev = temp_dev;
-+ if (irq >= 0) {
-+ printk(KERN_INFO "PCI->APIC IRQ transform: %s[%c] -> IRQ %d\n",
-+ pci_name(dev), 'A' + pin, irq);
-+ dev->irq = irq;
-+ return 0;
-+ } else
-+ msg = " Probably buggy MP table.";
-+ } else if (pci_probe & PCI_BIOS_IRQ_SCAN)
-+ msg = "";
-+ else
-+ msg = " Please try using pci=biosirq.";
-+
-+ /* With IDE legacy devices the IRQ lookup failure is not a problem.. */
-+ if (dev->class >> 8 == PCI_CLASS_STORAGE_IDE && !(dev->class & 0x5))
-+ return 0;
-+
-+ printk(KERN_WARNING "PCI: No IRQ known for interrupt pin %c of device %s.%s\n",
-+ 'A' + pin, pci_name(dev), msg);
-+ }
-+ return 0;
-+}
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/i386/pci/Makefile ubuntu-gutsy-xen/arch/i386/pci/Makefile
---- ubuntu-gutsy/arch/i386/pci/Makefile 2007-08-18 09:40:28.000000000 -0400
-+++ ubuntu-gutsy-xen/arch/i386/pci/Makefile 2007-08-18 12:38:02.000000000 -0400
-@@ -4,6 +4,10 @@
- obj-$(CONFIG_PCI_MMCONFIG) += mmconfig.o direct.o mmconfig-shared.o
- obj-$(CONFIG_PCI_DIRECT) += direct.o
-
-+# pcifront should be after pcbios.o, mmconfig.o, and direct.o as it should only
-+# take over if direct access to the PCI bus is unavailable
-+obj-$(CONFIG_XEN_PCIDEV_FRONTEND) += pcifront.o
-+
- pci-y := fixup.o
- pci-$(CONFIG_ACPI) += acpi.o
- pci-y += legacy.o irq.o
-@@ -12,3 +16,8 @@
- pci-$(CONFIG_X86_NUMAQ) := numa.o irq.o
-
- obj-y += $(pci-y) common.o early.o
-+
-+ifdef CONFIG_XEN
-+include $(srctree)/scripts/Makefile.xen
-+obj-y := $(call cherrypickxen, $(obj-y))
-+endif
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/i386/pci/pcifront.c ubuntu-gutsy-xen/arch/i386/pci/pcifront.c
---- ubuntu-gutsy/arch/i386/pci/pcifront.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/arch/i386/pci/pcifront.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,55 @@
-+/*
-+ * PCI Frontend Stub - puts some "dummy" functions in to the Linux x86 PCI core
-+ * to support the Xen PCI Frontend's operation
-+ *
-+ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
-+ */
-+#include <linux/module.h>
-+#include <linux/init.h>
-+#include <linux/pci.h>
-+#include <asm/acpi.h>
-+#include "pci.h"
-+
-+static int pcifront_enable_irq(struct pci_dev *dev)
-+{
-+ u8 irq;
-+ pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
-+ dev->irq = irq;
-+
-+ return 0;
-+}
-+
-+extern u8 pci_cache_line_size;
-+
-+static int __init pcifront_x86_stub_init(void)
-+{
-+ struct cpuinfo_x86 *c = &boot_cpu_data;
-+
-+ /* Only install our method if we haven't found real hardware already */
-+ if (raw_pci_ops)
-+ return 0;
-+
-+ printk(KERN_INFO "PCI: setting up Xen PCI frontend stub\n");
-+
-+ /* Copied from arch/i386/pci/common.c */
-+ pci_cache_line_size = 32 >> 2;
-+ if (c->x86 >= 6 && c->x86_vendor == X86_VENDOR_AMD)
-+ pci_cache_line_size = 64 >> 2; /* K7 & K8 */
-+ else if (c->x86 > 6 && c->x86_vendor == X86_VENDOR_INTEL)
-+ pci_cache_line_size = 128 >> 2; /* P4 */
-+
-+ /* On x86, we need to disable the normal IRQ routing table and
-+ * just ask the backend
-+ */
-+ pcibios_enable_irq = pcifront_enable_irq;
-+ pcibios_disable_irq = NULL;
-+
-+#ifdef CONFIG_ACPI
-+ /* Keep ACPI out of the picture */
-+ acpi_noirq = 1;
-+#endif
-+
-+ return 0;
-+}
-+
-+arch_initcall(pcifront_x86_stub_init);
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/i386/power/Makefile ubuntu-gutsy-xen/arch/i386/power/Makefile
---- ubuntu-gutsy/arch/i386/power/Makefile 2007-08-18 09:40:28.000000000 -0400
-+++ ubuntu-gutsy-xen/arch/i386/power/Makefile 2007-08-18 12:38:02.000000000 -0400
-@@ -1,2 +1,4 @@
--obj-$(CONFIG_PM) += cpu.o
-+obj-$(CONFIG_PM_LEGACY) += cpu.o
-+obj-$(CONFIG_SOFTWARE_SUSPEND) += cpu.o
-+obj-$(CONFIG_ACPI_SLEEP) += cpu.o
- obj-$(CONFIG_SOFTWARE_SUSPEND) += swsusp.o suspend.o
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/x86_64/ia32/ia32entry-xen.S ubuntu-gutsy-xen/arch/x86_64/ia32/ia32entry-xen.S
---- ubuntu-gutsy/arch/x86_64/ia32/ia32entry-xen.S 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/arch/x86_64/ia32/ia32entry-xen.S 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,749 @@
-+/*
-+ * Compatibility mode system call entry point for x86-64.
-+ *
-+ * Copyright 2000-2002 Andi Kleen, SuSE Labs.
-+ */
-+
-+#include <asm/dwarf2.h>
-+#include <asm/calling.h>
-+#include <asm/asm-offsets.h>
-+#include <asm/current.h>
-+#include <asm/errno.h>
-+#include <asm/ia32_unistd.h>
-+#include <asm/thread_info.h>
-+#include <asm/segment.h>
-+#include <asm/vsyscall32.h>
-+#include <asm/irqflags.h>
-+#include <linux/linkage.h>
-+
-+#define __XEN_X86_64 1
-+
-+#define IA32_NR_syscalls ((ia32_syscall_end - ia32_sys_call_table)/8)
-+
-+ .macro IA32_ARG_FIXUP noebp=0
-+ movl %edi,%r8d
-+ .if \noebp
-+ .else
-+ movl %ebp,%r9d
-+ .endif
-+ xchg %ecx,%esi
-+ movl %ebx,%edi
-+ movl %edx,%edx /* zero extension */
-+ .endm
-+
-+ /* clobbers %eax */
-+ .macro CLEAR_RREGS
-+ xorl %eax,%eax
-+ movq %rax,R11(%rsp)
-+ movq %rax,R10(%rsp)
-+ movq %rax,R9(%rsp)
-+ movq %rax,R8(%rsp)
-+ .endm
-+
-+#if defined (__XEN_X86_64)
-+#include "../kernel/xen_entry.S"
-+
-+#define __swapgs
-+#define __cli
-+#define __sti
-+#else
-+/*
-+ * Use the native instructions
-+ */
-+#define __swapgs swapgs
-+#define __cli cli
-+#define __sti sti
-+#endif
-+
-+ .macro CFI_STARTPROC32 simple
-+ CFI_STARTPROC \simple
-+ CFI_UNDEFINED r8
-+ CFI_UNDEFINED r9
-+ CFI_UNDEFINED r10
-+ CFI_UNDEFINED r11
-+ CFI_UNDEFINED r12
-+ CFI_UNDEFINED r13
-+ CFI_UNDEFINED r14
-+ CFI_UNDEFINED r15
-+ .endm
-+
-+/*
-+ * 32bit SYSENTER instruction entry.
-+ *
-+ * Arguments:
-+ * %eax System call number.
-+ * %ebx Arg1
-+ * %ecx Arg2
-+ * %edx Arg3
-+ * %esi Arg4
-+ * %edi Arg5
-+ * %ebp user stack
-+ * 0(%ebp) Arg6
-+ *
-+ * Interrupts off.
-+ *
-+ * This is purely a fast path. For anything complicated we use the int 0x80
-+ * path below. Set up a complete hardware stack frame to share code
-+ * with the int 0x80 path.
-+ */
-+ENTRY(ia32_sysenter_target)
-+ CFI_STARTPROC32 simple
-+ CFI_SIGNAL_FRAME
-+ CFI_DEF_CFA rsp,0
-+ CFI_REGISTER rsp,rbp
-+ __swapgs
-+ movq %gs:pda_kernelstack, %rsp
-+ addq $(PDA_STACKOFFSET),%rsp
-+ /*
-+ * No need to follow this irqs on/off section: the syscall
-+ * disabled irqs, here we enable it straight after entry:
-+ */
-+ XEN_UNBLOCK_EVENTS(%r11)
-+ __sti
-+ movl %ebp,%ebp /* zero extension */
-+ pushq $__USER32_DS
-+ CFI_ADJUST_CFA_OFFSET 8
-+ /*CFI_REL_OFFSET ss,0*/
-+ pushq %rbp
-+ CFI_ADJUST_CFA_OFFSET 8
-+ CFI_REL_OFFSET rsp,0
-+ pushfq
-+ CFI_ADJUST_CFA_OFFSET 8
-+ /*CFI_REL_OFFSET rflags,0*/
-+ movl $VSYSCALL32_SYSEXIT, %r10d
-+ CFI_REGISTER rip,r10
-+ pushq $__USER32_CS
-+ CFI_ADJUST_CFA_OFFSET 8
-+ /*CFI_REL_OFFSET cs,0*/
-+ movl %eax, %eax
-+ pushq %r10
-+ CFI_ADJUST_CFA_OFFSET 8
-+ CFI_REL_OFFSET rip,0
-+ pushq %rax
-+ CFI_ADJUST_CFA_OFFSET 8
-+ cld
-+ SAVE_ARGS 0,0,0
-+ /* no need to do an access_ok check here because rbp has been
-+ 32bit zero extended */
-+1: movl (%rbp),%r9d
-+ .section __ex_table,"a"
-+ .quad 1b,ia32_badarg
-+ .previous
-+ GET_THREAD_INFO(%r10)
-+ orl $TS_COMPAT,threadinfo_status(%r10)
-+ testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%r10)
-+ CFI_REMEMBER_STATE
-+ jnz sysenter_tracesys
-+sysenter_do_call:
-+ cmpl $(IA32_NR_syscalls-1),%eax
-+ ja ia32_badsys
-+ IA32_ARG_FIXUP 1
-+ call *ia32_sys_call_table(,%rax,8)
-+ movq %rax,RAX-ARGOFFSET(%rsp)
-+ GET_THREAD_INFO(%r10)
-+ XEN_BLOCK_EVENTS(%r11)
-+ __cli
-+ TRACE_IRQS_OFF
-+ testl $_TIF_ALLWORK_MASK,threadinfo_flags(%r10)
-+ jnz int_ret_from_sys_call
-+ andl $~TS_COMPAT,threadinfo_status(%r10)
-+ /* clear IF, that popfq doesn't enable interrupts early */
-+ andl $~0x200,EFLAGS-R11(%rsp)
-+ RESTORE_ARGS 1,24,1,1,1,1
-+ popfq
-+ CFI_ADJUST_CFA_OFFSET -8
-+ /*CFI_RESTORE rflags*/
-+ popq %rcx /* User %esp */
-+ CFI_ADJUST_CFA_OFFSET -8
-+ CFI_REGISTER rsp,rcx
-+ movl $VSYSCALL32_SYSEXIT,%edx /* User %eip */
-+ CFI_REGISTER rip,rdx
-+ TRACE_IRQS_ON
-+ __swapgs
-+ XEN_UNBLOCK_EVENTS(%r11)
-+ __sti /* sti only takes effect after the next instruction */
-+ /* sysexit */
-+ .byte 0xf, 0x35 /* TBD */
-+
-+sysenter_tracesys:
-+ CFI_RESTORE_STATE
-+ SAVE_REST
-+ CLEAR_RREGS
-+ movq $-ENOSYS,RAX(%rsp) /* really needed? */
-+ movq %rsp,%rdi /* &pt_regs -> arg1 */
-+ call syscall_trace_enter
-+ LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */
-+ RESTORE_REST
-+ movl %ebp, %ebp
-+ /* no need to do an access_ok check here because rbp has been
-+ 32bit zero extended */
-+1: movl (%rbp),%r9d
-+ .section __ex_table,"a"
-+ .quad 1b,ia32_badarg
-+ .previous
-+ jmp sysenter_do_call
-+ CFI_ENDPROC
-+ENDPROC(ia32_sysenter_target)
-+
-+/*
-+ * 32bit SYSCALL instruction entry.
-+ *
-+ * Arguments:
-+ * %eax System call number.
-+ * %ebx Arg1
-+ * %ecx return EIP
-+ * %edx Arg3
-+ * %esi Arg4
-+ * %edi Arg5
-+ * %ebp Arg2 [note: not saved in the stack frame, should not be touched]
-+ * %esp user stack
-+ * 0(%esp) Arg6
-+ *
-+ * Interrupts off.
-+ *
-+ * This is purely a fast path. For anything complicated we use the int 0x80
-+ * path below. Set up a complete hardware stack frame to share code
-+ * with the int 0x80 path.
-+ */
-+ENTRY(ia32_cstar_target)
-+ CFI_STARTPROC32 simple
-+ CFI_SIGNAL_FRAME
-+ CFI_DEF_CFA rsp,PDA_STACKOFFSET
-+ CFI_REGISTER rip,rcx
-+ /*CFI_REGISTER rflags,r11*/
-+ __swapgs
-+ movl %esp,%r8d
-+ CFI_REGISTER rsp,r8
-+ movq %gs:pda_kernelstack,%rsp
-+ /*
-+ * No need to follow this irqs on/off section: the syscall
-+ * disabled irqs and here we enable it straight after entry:
-+ */
-+ XEN_UNBLOCK_EVENTS(%r11)
-+ __sti
-+ SAVE_ARGS 8,1,1
-+ movl %eax,%eax /* zero extension */
-+ movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
-+ movq %rcx,RIP-ARGOFFSET(%rsp)
-+ CFI_REL_OFFSET rip,RIP-ARGOFFSET
-+ movq %rbp,RCX-ARGOFFSET(%rsp) /* this lies slightly to ptrace */
-+ movl %ebp,%ecx
-+ movq $__USER32_CS,CS-ARGOFFSET(%rsp)
-+ movq $__USER32_DS,SS-ARGOFFSET(%rsp)
-+ movq %r11,EFLAGS-ARGOFFSET(%rsp)
-+ /*CFI_REL_OFFSET rflags,EFLAGS-ARGOFFSET*/
-+ movq %r8,RSP-ARGOFFSET(%rsp)
-+ CFI_REL_OFFSET rsp,RSP-ARGOFFSET
-+ /* no need to do an access_ok check here because r8 has been
-+ 32bit zero extended */
-+ /* hardware stack frame is complete now */
-+1: movl (%r8),%r9d
-+ .section __ex_table,"a"
-+ .quad 1b,ia32_badarg
-+ .previous
-+ GET_THREAD_INFO(%r10)
-+ orl $TS_COMPAT,threadinfo_status(%r10)
-+ testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%r10)
-+ CFI_REMEMBER_STATE
-+ jnz cstar_tracesys
-+cstar_do_call:
-+ cmpl $IA32_NR_syscalls-1,%eax
-+ ja ia32_badsys
-+ IA32_ARG_FIXUP 1
-+ call *ia32_sys_call_table(,%rax,8)
-+ movq %rax,RAX-ARGOFFSET(%rsp)
-+ GET_THREAD_INFO(%r10)
-+ XEN_BLOCK_EVENTS(%r11)
-+ __cli
-+ TRACE_IRQS_OFF
-+ testl $_TIF_ALLWORK_MASK,threadinfo_flags(%r10)
-+ jnz int_ret_from_sys_call
-+ andl $~TS_COMPAT,threadinfo_status(%r10)
-+ RESTORE_ARGS 1,-ARG_SKIP,1,1,1
-+ movl RIP-ARGOFFSET(%rsp),%ecx
-+ CFI_REGISTER rip,rcx
-+ movl EFLAGS-ARGOFFSET(%rsp),%r11d
-+ /*CFI_REGISTER rflags,r11*/
-+ TRACE_IRQS_ON
-+ movl RSP-ARGOFFSET(%rsp),%esp
-+ CFI_RESTORE rsp
-+ __swapgs
-+ sysretl /* TBD */
-+
-+cstar_tracesys:
-+ CFI_RESTORE_STATE
-+ SAVE_REST
-+ CLEAR_RREGS
-+ movq $-ENOSYS,RAX(%rsp) /* really needed? */
-+ movq %rsp,%rdi /* &pt_regs -> arg1 */
-+ call syscall_trace_enter
-+ LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */
-+ RESTORE_REST
-+ movl RSP-ARGOFFSET(%rsp), %r8d
-+ /* no need to do an access_ok check here because r8 has been
-+ 32bit zero extended */
-+1: movl (%r8),%r9d
-+ .section __ex_table,"a"
-+ .quad 1b,ia32_badarg
-+ .previous
-+ jmp cstar_do_call
-+END(ia32_cstar_target)
-+
-+ia32_badarg:
-+ movq $-EFAULT,%rax
-+ jmp ia32_sysret
-+ CFI_ENDPROC
-+
-+/*
-+ * Emulated IA32 system calls via int 0x80.
-+ *
-+ * Arguments:
-+ * %eax System call number.
-+ * %ebx Arg1
-+ * %ecx Arg2
-+ * %edx Arg3
-+ * %esi Arg4
-+ * %edi Arg5
-+ * %ebp Arg6 [note: not saved in the stack frame, should not be touched]
-+ *
-+ * Notes:
-+ * Uses the same stack frame as the x86-64 version.
-+ * All registers except %eax must be saved (but ptrace may violate that)
-+ * Arguments are zero extended. For system calls that want sign extension and
-+ * take long arguments a wrapper is needed. Most calls can just be called
-+ * directly.
-+ * Assumes it is only called from user space and entered with interrupts off.
-+ */
-+
-+ENTRY(ia32_syscall)
-+ CFI_STARTPROC simple
-+ CFI_SIGNAL_FRAME
-+ CFI_DEF_CFA rsp,SS+8-RIP
-+ /*CFI_REL_OFFSET ss,SS-RIP*/
-+ CFI_REL_OFFSET rsp,RSP-RIP
-+ /*CFI_REL_OFFSET rflags,EFLAGS-RIP*/
-+ /*CFI_REL_OFFSET cs,CS-RIP*/
-+ CFI_REL_OFFSET rip,RIP-RIP
-+ __swapgs
-+ /*
-+ * No need to follow this irqs on/off section: the syscall
-+ * disabled irqs and here we enable it straight after entry:
-+ */
-+ XEN_UNBLOCK_EVENTS(%r11)
-+ __sti
-+ movq (%rsp),%rcx
-+ movq 8(%rsp),%r11
-+ addq $0x10,%rsp /* skip rcx and r11 */
-+ movl %eax,%eax
-+ pushq %rax
-+ CFI_ADJUST_CFA_OFFSET 8
-+ cld
-+/* 1: jmp 1b */
-+ /* note the registers are not zero extended to the sf.
-+ this could be a problem. */
-+ SAVE_ARGS 0,0,1
-+ GET_THREAD_INFO(%r10)
-+ orl $TS_COMPAT,threadinfo_status(%r10)
-+ testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%r10)
-+ jnz ia32_tracesys
-+ia32_do_syscall:
-+ cmpl $(IA32_NR_syscalls-1),%eax
-+ ja ia32_badsys
-+ IA32_ARG_FIXUP
-+ call *ia32_sys_call_table(,%rax,8) # xxx: rip relative
-+ia32_sysret:
-+ movq %rax,RAX-ARGOFFSET(%rsp)
-+ jmp int_ret_from_sys_call
-+
-+ia32_tracesys:
-+ SAVE_REST
-+ movq $-ENOSYS,RAX(%rsp) /* really needed? */
-+ movq %rsp,%rdi /* &pt_regs -> arg1 */
-+ call syscall_trace_enter
-+ LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */
-+ RESTORE_REST
-+ jmp ia32_do_syscall
-+END(ia32_syscall)
-+
-+ia32_badsys:
-+ movq $0,ORIG_RAX-ARGOFFSET(%rsp)
-+ movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
-+ jmp int_ret_from_sys_call
-+
-+quiet_ni_syscall:
-+ movq $-ENOSYS,%rax
-+ ret
-+ CFI_ENDPROC
-+
-+ .macro PTREGSCALL label, func, arg
-+ .globl \label
-+\label:
-+ leaq \func(%rip),%rax
-+ leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */
-+ jmp ia32_ptregs_common
-+ .endm
-+
-+ CFI_STARTPROC32
-+
-+ PTREGSCALL stub32_rt_sigreturn, sys32_rt_sigreturn, %rdi
-+ PTREGSCALL stub32_sigreturn, sys32_sigreturn, %rdi
-+ PTREGSCALL stub32_sigaltstack, sys32_sigaltstack, %rdx
-+ PTREGSCALL stub32_sigsuspend, sys32_sigsuspend, %rcx
-+ PTREGSCALL stub32_execve, sys32_execve, %rcx
-+ PTREGSCALL stub32_fork, sys_fork, %rdi
-+ PTREGSCALL stub32_clone, sys32_clone, %rdx
-+ PTREGSCALL stub32_vfork, sys_vfork, %rdi
-+ PTREGSCALL stub32_iopl, sys_iopl, %rsi
-+ PTREGSCALL stub32_rt_sigsuspend, sys_rt_sigsuspend, %rdx
-+
-+ENTRY(ia32_ptregs_common)
-+ popq %r11
-+ CFI_ENDPROC
-+ CFI_STARTPROC32 simple
-+ CFI_SIGNAL_FRAME
-+ CFI_DEF_CFA rsp,SS+8-ARGOFFSET
-+ CFI_REL_OFFSET rax,RAX-ARGOFFSET
-+ CFI_REL_OFFSET rcx,RCX-ARGOFFSET
-+ CFI_REL_OFFSET rdx,RDX-ARGOFFSET
-+ CFI_REL_OFFSET rsi,RSI-ARGOFFSET
-+ CFI_REL_OFFSET rdi,RDI-ARGOFFSET
-+ CFI_REL_OFFSET rip,RIP-ARGOFFSET
-+/* CFI_REL_OFFSET cs,CS-ARGOFFSET*/
-+/* CFI_REL_OFFSET rflags,EFLAGS-ARGOFFSET*/
-+ CFI_REL_OFFSET rsp,RSP-ARGOFFSET
-+/* CFI_REL_OFFSET ss,SS-ARGOFFSET*/
-+ SAVE_REST
-+ call *%rax
-+ RESTORE_REST
-+ jmp ia32_sysret /* misbalances the return cache */
-+ CFI_ENDPROC
-+END(ia32_ptregs_common)
-+
-+ .section .rodata,"a"
-+ .align 8
-+ia32_sys_call_table:
-+ .quad sys_restart_syscall
-+ .quad sys_exit
-+ .quad stub32_fork
-+ .quad sys_read
-+ .quad sys_write
-+ .quad compat_sys_open /* 5 */
-+ .quad sys_close
-+ .quad sys32_waitpid
-+ .quad sys_creat
-+ .quad sys_link
-+ .quad sys_unlink /* 10 */
-+ .quad stub32_execve
-+ .quad sys_chdir
-+ .quad compat_sys_time
-+ .quad sys_mknod
-+ .quad sys_chmod /* 15 */
-+ .quad sys_lchown16
-+ .quad quiet_ni_syscall /* old break syscall holder */
-+ .quad sys_stat
-+ .quad sys32_lseek
-+ .quad sys_getpid /* 20 */
-+ .quad compat_sys_mount /* mount */
-+ .quad sys_oldumount /* old_umount */
-+ .quad sys_setuid16
-+ .quad sys_getuid16
-+ .quad compat_sys_stime /* stime */ /* 25 */
-+ .quad sys32_ptrace /* ptrace */
-+ .quad sys_alarm
-+ .quad sys_fstat /* (old)fstat */
-+ .quad sys_pause
-+ .quad compat_sys_utime /* 30 */
-+ .quad quiet_ni_syscall /* old stty syscall holder */
-+ .quad quiet_ni_syscall /* old gtty syscall holder */
-+ .quad sys_access
-+ .quad sys_nice
-+ .quad quiet_ni_syscall /* 35 */ /* old ftime syscall holder */
-+ .quad sys_sync
-+ .quad sys32_kill
-+ .quad sys_rename
-+ .quad sys_mkdir
-+ .quad sys_rmdir /* 40 */
-+ .quad sys_dup
-+ .quad sys32_pipe
-+ .quad compat_sys_times
-+ .quad quiet_ni_syscall /* old prof syscall holder */
-+ .quad sys_brk /* 45 */
-+ .quad sys_setgid16
-+ .quad sys_getgid16
-+ .quad sys_signal
-+ .quad sys_geteuid16
-+ .quad sys_getegid16 /* 50 */
-+ .quad sys_acct
-+ .quad sys_umount /* new_umount */
-+ .quad quiet_ni_syscall /* old lock syscall holder */
-+ .quad compat_sys_ioctl
-+ .quad compat_sys_fcntl64 /* 55 */
-+ .quad quiet_ni_syscall /* old mpx syscall holder */
-+ .quad sys_setpgid
-+ .quad quiet_ni_syscall /* old ulimit syscall holder */
-+ .quad sys32_olduname
-+ .quad sys_umask /* 60 */
-+ .quad sys_chroot
-+ .quad sys32_ustat
-+ .quad sys_dup2
-+ .quad sys_getppid
-+ .quad sys_getpgrp /* 65 */
-+ .quad sys_setsid
-+ .quad sys32_sigaction
-+ .quad sys_sgetmask
-+ .quad sys_ssetmask
-+ .quad sys_setreuid16 /* 70 */
-+ .quad sys_setregid16
-+ .quad stub32_sigsuspend
-+ .quad compat_sys_sigpending
-+ .quad sys_sethostname
-+ .quad compat_sys_setrlimit /* 75 */
-+ .quad compat_sys_old_getrlimit /* old_getrlimit */
-+ .quad compat_sys_getrusage
-+ .quad sys32_gettimeofday
-+ .quad sys32_settimeofday
-+ .quad sys_getgroups16 /* 80 */
-+ .quad sys_setgroups16
-+ .quad sys32_old_select
-+ .quad sys_symlink
-+ .quad sys_lstat
-+ .quad sys_readlink /* 85 */
-+ .quad sys_uselib
-+ .quad sys_swapon
-+ .quad sys_reboot
-+ .quad compat_sys_old_readdir
-+ .quad sys32_mmap /* 90 */
-+ .quad sys_munmap
-+ .quad sys_truncate
-+ .quad sys_ftruncate
-+ .quad sys_fchmod
-+ .quad sys_fchown16 /* 95 */
-+ .quad sys_getpriority
-+ .quad sys_setpriority
-+ .quad quiet_ni_syscall /* old profil syscall holder */
-+ .quad compat_sys_statfs
-+ .quad compat_sys_fstatfs /* 100 */
-+ .quad sys_ioperm
-+ .quad compat_sys_socketcall
-+ .quad sys_syslog
-+ .quad compat_sys_setitimer
-+ .quad compat_sys_getitimer /* 105 */
-+ .quad compat_sys_newstat
-+ .quad compat_sys_newlstat
-+ .quad compat_sys_newfstat
-+ .quad sys32_uname
-+ .quad stub32_iopl /* 110 */
-+ .quad sys_vhangup
-+ .quad quiet_ni_syscall /* old "idle" system call */
-+ .quad sys32_vm86_warning /* vm86old */
-+ .quad compat_sys_wait4
-+ .quad sys_swapoff /* 115 */
-+ .quad compat_sys_sysinfo
-+ .quad sys32_ipc
-+ .quad sys_fsync
-+ .quad stub32_sigreturn
-+ .quad stub32_clone /* 120 */
-+ .quad sys_setdomainname
-+ .quad sys_uname
-+ .quad sys_modify_ldt
-+ .quad compat_sys_adjtimex
-+ .quad sys32_mprotect /* 125 */
-+ .quad compat_sys_sigprocmask
-+ .quad quiet_ni_syscall /* create_module */
-+ .quad sys_init_module
-+ .quad sys_delete_module
-+ .quad quiet_ni_syscall /* 130 get_kernel_syms */
-+ .quad sys_quotactl
-+ .quad sys_getpgid
-+ .quad sys_fchdir
-+ .quad quiet_ni_syscall /* bdflush */
-+ .quad sys_sysfs /* 135 */
-+ .quad sys_personality
-+ .quad quiet_ni_syscall /* for afs_syscall */
-+ .quad sys_setfsuid16
-+ .quad sys_setfsgid16
-+ .quad sys_llseek /* 140 */
-+ .quad compat_sys_getdents
-+ .quad compat_sys_select
-+ .quad sys_flock
-+ .quad sys_msync
-+ .quad compat_sys_readv /* 145 */
-+ .quad compat_sys_writev
-+ .quad sys_getsid
-+ .quad sys_fdatasync
-+ .quad sys32_sysctl /* sysctl */
-+ .quad sys_mlock /* 150 */
-+ .quad sys_munlock
-+ .quad sys_mlockall
-+ .quad sys_munlockall
-+ .quad sys_sched_setparam
-+ .quad sys_sched_getparam /* 155 */
-+ .quad sys_sched_setscheduler
-+ .quad sys_sched_getscheduler
-+ .quad sys_sched_yield
-+ .quad sys_sched_get_priority_max
-+ .quad sys_sched_get_priority_min /* 160 */
-+ .quad sys32_sched_rr_get_interval
-+ .quad compat_sys_nanosleep
-+ .quad sys_mremap
-+ .quad sys_setresuid16
-+ .quad sys_getresuid16 /* 165 */
-+ .quad sys32_vm86_warning /* vm86 */
-+ .quad quiet_ni_syscall /* query_module */
-+ .quad sys_poll
-+ .quad compat_sys_nfsservctl
-+ .quad sys_setresgid16 /* 170 */
-+ .quad sys_getresgid16
-+ .quad sys_prctl
-+ .quad stub32_rt_sigreturn
-+ .quad sys32_rt_sigaction
-+ .quad sys32_rt_sigprocmask /* 175 */
-+ .quad sys32_rt_sigpending
-+ .quad compat_sys_rt_sigtimedwait
-+ .quad sys32_rt_sigqueueinfo
-+ .quad stub32_rt_sigsuspend
-+ .quad sys32_pread /* 180 */
-+ .quad sys32_pwrite
-+ .quad sys_chown16
-+ .quad sys_getcwd
-+ .quad sys_capget
-+ .quad sys_capset
-+ .quad stub32_sigaltstack
-+ .quad sys32_sendfile
-+ .quad quiet_ni_syscall /* streams1 */
-+ .quad quiet_ni_syscall /* streams2 */
-+ .quad stub32_vfork /* 190 */
-+ .quad compat_sys_getrlimit
-+ .quad sys32_mmap2
-+ .quad sys32_truncate64
-+ .quad sys32_ftruncate64
-+ .quad sys32_stat64 /* 195 */
-+ .quad sys32_lstat64
-+ .quad sys32_fstat64
-+ .quad sys_lchown
-+ .quad sys_getuid
-+ .quad sys_getgid /* 200 */
-+ .quad sys_geteuid
-+ .quad sys_getegid
-+ .quad sys_setreuid
-+ .quad sys_setregid
-+ .quad sys_getgroups /* 205 */
-+ .quad sys_setgroups
-+ .quad sys_fchown
-+ .quad sys_setresuid
-+ .quad sys_getresuid
-+ .quad sys_setresgid /* 210 */
-+ .quad sys_getresgid
-+ .quad sys_chown
-+ .quad sys_setuid
-+ .quad sys_setgid
-+ .quad sys_setfsuid /* 215 */
-+ .quad sys_setfsgid
-+ .quad sys_pivot_root
-+ .quad sys_mincore
-+ .quad sys_madvise
-+ .quad compat_sys_getdents64 /* 220 getdents64 */
-+ .quad compat_sys_fcntl64
-+ .quad quiet_ni_syscall /* tux */
-+ .quad quiet_ni_syscall /* security */
-+ .quad sys_gettid
-+ .quad sys32_readahead /* 225 */
-+ .quad sys_setxattr
-+ .quad sys_lsetxattr
-+ .quad sys_fsetxattr
-+ .quad sys_getxattr
-+ .quad sys_lgetxattr /* 230 */
-+ .quad sys_fgetxattr
-+ .quad sys_listxattr
-+ .quad sys_llistxattr
-+ .quad sys_flistxattr
-+ .quad sys_removexattr /* 235 */
-+ .quad sys_lremovexattr
-+ .quad sys_fremovexattr
-+ .quad sys_tkill
-+ .quad sys_sendfile64
-+ .quad compat_sys_futex /* 240 */
-+ .quad compat_sys_sched_setaffinity
-+ .quad compat_sys_sched_getaffinity
-+ .quad sys32_set_thread_area
-+ .quad sys32_get_thread_area
-+ .quad compat_sys_io_setup /* 245 */
-+ .quad sys_io_destroy
-+ .quad compat_sys_io_getevents
-+ .quad compat_sys_io_submit
-+ .quad sys_io_cancel
-+ .quad sys32_fadvise64 /* 250 */
-+ .quad quiet_ni_syscall /* free_huge_pages */
-+ .quad sys_exit_group
-+ .quad sys32_lookup_dcookie
-+ .quad sys_epoll_create
-+ .quad sys_epoll_ctl /* 255 */
-+ .quad sys_epoll_wait
-+ .quad sys_remap_file_pages
-+ .quad sys_set_tid_address
-+ .quad compat_sys_timer_create
-+ .quad compat_sys_timer_settime /* 260 */
-+ .quad compat_sys_timer_gettime
-+ .quad sys_timer_getoverrun
-+ .quad sys_timer_delete
-+ .quad compat_sys_clock_settime
-+ .quad compat_sys_clock_gettime /* 265 */
-+ .quad compat_sys_clock_getres
-+ .quad compat_sys_clock_nanosleep
-+ .quad compat_sys_statfs64
-+ .quad compat_sys_fstatfs64
-+ .quad sys_tgkill /* 270 */
-+ .quad compat_sys_utimes
-+ .quad sys32_fadvise64_64
-+ .quad quiet_ni_syscall /* sys_vserver */
-+ .quad sys_mbind
-+ .quad compat_sys_get_mempolicy /* 275 */
-+ .quad sys_set_mempolicy
-+ .quad compat_sys_mq_open
-+ .quad sys_mq_unlink
-+ .quad compat_sys_mq_timedsend
-+ .quad compat_sys_mq_timedreceive /* 280 */
-+ .quad compat_sys_mq_notify
-+ .quad compat_sys_mq_getsetattr
-+ .quad compat_sys_kexec_load /* reserved for kexec */
-+ .quad compat_sys_waitid
-+ .quad quiet_ni_syscall /* 285: sys_altroot */
-+ .quad sys_add_key
-+ .quad sys_request_key
-+ .quad sys_keyctl
-+ .quad sys_ioprio_set
-+ .quad sys_ioprio_get /* 290 */
-+ .quad sys_inotify_init
-+ .quad sys_inotify_add_watch
-+ .quad sys_inotify_rm_watch
-+ .quad sys_migrate_pages
-+ .quad compat_sys_openat /* 295 */
-+ .quad sys_mkdirat
-+ .quad sys_mknodat
-+ .quad sys_fchownat
-+ .quad compat_sys_futimesat
-+ .quad sys32_fstatat /* 300 */
-+ .quad sys_unlinkat
-+ .quad sys_renameat
-+ .quad sys_linkat
-+ .quad sys_symlinkat
-+ .quad sys_readlinkat /* 305 */
-+ .quad sys_fchmodat
-+ .quad sys_faccessat
-+ .quad compat_sys_pselect6
-+ .quad compat_sys_ppoll
-+ .quad sys_unshare /* 310 */
-+ .quad compat_sys_set_robust_list
-+ .quad compat_sys_get_robust_list
-+ .quad sys_splice
-+ .quad sys32_sync_file_range
-+ .quad sys_tee /* 315 */
-+ .quad compat_sys_vmsplice
-+ .quad compat_sys_move_pages
-+ .quad sys_getcpu
-+ .quad sys_epoll_pwait
-+ .quad compat_sys_utimensat /* 320 */
-+ .quad compat_sys_signalfd
-+ .quad compat_sys_timerfd
-+ .quad sys_eventfd
-+ia32_syscall_end:
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/x86_64/ia32/Makefile ubuntu-gutsy-xen/arch/x86_64/ia32/Makefile
---- ubuntu-gutsy/arch/x86_64/ia32/Makefile 2007-08-18 09:40:29.000000000 -0400
-+++ ubuntu-gutsy-xen/arch/x86_64/ia32/Makefile 2007-08-18 12:38:02.000000000 -0400
-@@ -27,9 +27,25 @@
- -Wl,-soname=linux-gate.so.1 -o $@ \
- -Wl,-T,$(filter-out FORCE,$^)
-
-+$(obj)/vsyscall-int80.so \
- $(obj)/vsyscall-sysenter.so $(obj)/vsyscall-syscall.so: \
- $(obj)/vsyscall-%.so: $(src)/vsyscall.lds $(obj)/vsyscall-%.o FORCE
- $(call if_changed,syscall)
-
--AFLAGS_vsyscall-sysenter.o = -m32 -Wa,-32
--AFLAGS_vsyscall-syscall.o = -m32 -Wa,-32
-+AFLAGS_vsyscall-sysenter.o = -m32 -Wa,-32 -Iarch/i386/kernel
-+AFLAGS_vsyscall-syscall.o = -m32 -Wa,-32 -Iarch/i386/kernel
-+
-+ifdef CONFIG_XEN
-+AFLAGS_vsyscall-int80.o = -m32 -Wa,-32 -Iarch/i386/kernel
-+CFLAGS_syscall32-xen.o += -DUSE_INT80
-+AFLAGS_syscall32_syscall-xen.o += -DUSE_INT80
-+
-+$(obj)/syscall32_syscall-xen.o: \
-+ $(foreach F,int80 sysenter syscall,$(obj)/vsyscall-$F.so)
-+
-+targets := $(foreach F,int80 sysenter syscall,vsyscall-$F.o vsyscall-$F.so)
-+
-+include $(srctree)/scripts/Makefile.xen
-+
-+obj-y := $(call cherrypickxen, $(obj-y))
-+endif
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/x86_64/ia32/syscall32_syscall-xen.S ubuntu-gutsy-xen/arch/x86_64/ia32/syscall32_syscall-xen.S
---- ubuntu-gutsy/arch/x86_64/ia32/syscall32_syscall-xen.S 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/arch/x86_64/ia32/syscall32_syscall-xen.S 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,28 @@
-+/* 32bit VDSOs mapped into user space. */
-+
-+ .section ".init.data","aw"
-+
-+#ifdef USE_INT80
-+
-+ .globl syscall32_int80
-+ .globl syscall32_int80_end
-+
-+syscall32_int80:
-+ .incbin "arch/x86_64/ia32/vsyscall-int80.so"
-+syscall32_int80_end:
-+
-+#endif
-+
-+ .globl syscall32_syscall
-+ .globl syscall32_syscall_end
-+
-+syscall32_syscall:
-+ .incbin "arch/x86_64/ia32/vsyscall-syscall.so"
-+syscall32_syscall_end:
-+
-+ .globl syscall32_sysenter
-+ .globl syscall32_sysenter_end
-+
-+syscall32_sysenter:
-+ .incbin "arch/x86_64/ia32/vsyscall-sysenter.so"
-+syscall32_sysenter_end:
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/x86_64/ia32/syscall32-xen.c ubuntu-gutsy-xen/arch/x86_64/ia32/syscall32-xen.c
---- ubuntu-gutsy/arch/x86_64/ia32/syscall32-xen.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/arch/x86_64/ia32/syscall32-xen.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,111 @@
-+/* Copyright 2002,2003 Andi Kleen, SuSE Labs */
-+
-+/* vsyscall handling for 32bit processes. Map a stub page into it
-+ on demand because 32bit cannot reach the kernel's fixmaps */
-+
-+#include <linux/mm.h>
-+#include <linux/string.h>
-+#include <linux/kernel.h>
-+#include <linux/gfp.h>
-+#include <linux/init.h>
-+#include <linux/stringify.h>
-+#include <linux/security.h>
-+#include <asm/proto.h>
-+#include <asm/tlbflush.h>
-+#include <asm/ia32_unistd.h>
-+#include <asm/vsyscall32.h>
-+
-+#ifdef USE_INT80
-+extern unsigned char syscall32_int80[], syscall32_int80_end[];
-+#endif
-+extern unsigned char syscall32_syscall[], syscall32_syscall_end[];
-+extern unsigned char syscall32_sysenter[], syscall32_sysenter_end[];
-+extern int sysctl_vsyscall32;
-+
-+static struct page *syscall32_pages[1];
-+#ifndef USE_INT80
-+static int use_sysenter = -1;
-+#endif
-+
-+struct linux_binprm;
-+
-+/* Setup a VMA at program startup for the vsyscall page */
-+int syscall32_setup_pages(struct linux_binprm *bprm, int exstack)
-+{
-+ struct mm_struct *mm = current->mm;
-+ int ret;
-+
-+ down_write(&mm->mmap_sem);
-+ /*
-+ * MAYWRITE to allow gdb to COW and set breakpoints
-+ *
-+ * Make sure the vDSO gets into every core dump.
-+ * Dumping its contents makes post-mortem fully interpretable later
-+ * without matching up the same kernel and hardware config to see
-+ * what PC values meant.
-+ */
-+ /* Could randomize here */
-+ ret = install_special_mapping(mm, VSYSCALL32_BASE, PAGE_SIZE,
-+ VM_READ|VM_EXEC|
-+ VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
-+ VM_ALWAYSDUMP,
-+ syscall32_pages);
-+ up_write(&mm->mmap_sem);
-+ return ret;
-+}
-+
-+const char *arch_vma_name(struct vm_area_struct *vma)
-+{
-+ if (vma->vm_start == VSYSCALL32_BASE &&
-+ vma->vm_mm && vma->vm_mm->task_size == IA32_PAGE_OFFSET)
-+ return "[vdso]";
-+ return NULL;
-+}
-+
-+static int __init init_syscall32(void)
-+{
-+ char *syscall32_page = (void *)get_zeroed_page(GFP_KERNEL);
-+ if (!syscall32_page)
-+ panic("Cannot allocate syscall32 page");
-+
-+ syscall32_pages[0] = virt_to_page(syscall32_page);
-+#ifdef USE_INT80
-+ /*
-+ * At this point we use int 0x80.
-+ */
-+ memcpy(syscall32_page, syscall32_int80,
-+ syscall32_int80_end - syscall32_int80);
-+#else
-+ if (use_sysenter > 0) {
-+ memcpy(syscall32_page, syscall32_sysenter,
-+ syscall32_sysenter_end - syscall32_sysenter);
-+ } else {
-+ memcpy(syscall32_page, syscall32_syscall,
-+ syscall32_syscall_end - syscall32_syscall);
-+ }
-+#endif
-+ return 0;
-+}
-+
-+/*
-+ * This must be done early in case we have an initrd containing 32-bit
-+ * binaries (e.g., hotplug). This could be pushed upstream to arch/x86_64.
-+ */
-+core_initcall(init_syscall32);
-+
-+/* May not be __init: called during resume */
-+void syscall32_cpu_init(void)
-+{
-+#ifndef USE_INT80
-+ if (use_sysenter < 0)
-+ use_sysenter = (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL);
-+
-+ /* Load these always in case some future AMD CPU supports
-+ SYSENTER from compat mode too. */
-+ checking_wrmsrl(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
-+ checking_wrmsrl(MSR_IA32_SYSENTER_ESP, 0ULL);
-+ checking_wrmsrl(MSR_IA32_SYSENTER_EIP, (u64)ia32_sysenter_target);
-+
-+ wrmsrl(MSR_CSTAR, ia32_cstar_target);
-+#endif
-+}
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/x86_64/ia32/vsyscall-int80.S ubuntu-gutsy-xen/arch/x86_64/ia32/vsyscall-int80.S
---- ubuntu-gutsy/arch/x86_64/ia32/vsyscall-int80.S 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/arch/x86_64/ia32/vsyscall-int80.S 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,58 @@
-+/*
-+ * Code for the vsyscall page. This version uses the old int $0x80 method.
-+ *
-+ * NOTE:
-+ * 1) __kernel_vsyscall _must_ be first in this page.
-+ * 2) there are alignment constraints on this stub, see vsyscall-sigreturn.S
-+ * for details.
-+ */
-+#include <asm/ia32_unistd.h>
-+#include <asm/asm-offsets.h>
-+
-+ .code32
-+ .text
-+ .section .text.vsyscall,"ax"
-+ .globl __kernel_vsyscall
-+ .type __kernel_vsyscall,@function
-+__kernel_vsyscall:
-+.LSTART_vsyscall:
-+ int $0x80
-+ ret
-+.LEND_vsyscall:
-+ .size __kernel_vsyscall,.-.LSTART_vsyscall
-+ .previous
-+
-+ .section .eh_frame,"a",@progbits
-+.LSTARTFRAME:
-+ .long .LENDCIE-.LSTARTCIE
-+.LSTARTCIE:
-+ .long 0 /* CIE ID */
-+ .byte 1 /* Version number */
-+ .string "zR" /* NUL-terminated augmentation string */
-+ .uleb128 1 /* Code alignment factor */
-+ .sleb128 -4 /* Data alignment factor */
-+ .byte 8 /* Return address register column */
-+ .uleb128 1 /* Augmentation value length */
-+ .byte 0x1b /* DW_EH_PE_pcrel|DW_EH_PE_sdata4. */
-+ .byte 0x0c /* DW_CFA_def_cfa */
-+ .uleb128 4
-+ .uleb128 4
-+ .byte 0x88 /* DW_CFA_offset, column 0x8 */
-+ .uleb128 1
-+ .align 4
-+.LENDCIE:
-+
-+ .long .LENDFDE1-.LSTARTFDE1 /* Length FDE */
-+.LSTARTFDE1:
-+ .long .LSTARTFDE1-.LSTARTFRAME /* CIE pointer */
-+ .long .LSTART_vsyscall-. /* PC-relative start address */
-+ .long .LEND_vsyscall-.LSTART_vsyscall
-+ .uleb128 0 /* Augmentation length */
-+ .align 4
-+.LENDFDE1:
-+
-+/*
-+ * Get the common code for the sigreturn entry points.
-+ */
-+#define SYSCALL_ENTER_KERNEL int $0x80
-+#include "vsyscall-sigreturn.S"
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/x86_64/ia32/vsyscall-int80.so ubuntu-gutsy-xen/arch/x86_64/ia32/vsyscall-int80.so
---- ubuntu-gutsy/arch/x86_64/ia32/vsyscall-int80.so 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/arch/x86_64/ia32/vsyscall-int80.so 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,3 @@
-+ELF
-+
-+
-\ No newline at end of file
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/x86_64/ia32/vsyscall-sigreturn.S ubuntu-gutsy-xen/arch/x86_64/ia32/vsyscall-sigreturn.S
---- ubuntu-gutsy/arch/x86_64/ia32/vsyscall-sigreturn.S 2007-08-18 09:40:29.000000000 -0400
-+++ ubuntu-gutsy-xen/arch/x86_64/ia32/vsyscall-sigreturn.S 2007-08-18 12:38:02.000000000 -0400
-@@ -139,5 +139,5 @@
- .align 4
- .LENDFDE3:
-
--#include "../../i386/kernel/vsyscall-note.S"
-+#include <vsyscall-note.S>
-
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/x86_64/Kconfig ubuntu-gutsy-xen/arch/x86_64/Kconfig
---- ubuntu-gutsy/arch/x86_64/Kconfig 2007-08-18 09:40:29.000000000 -0400
-+++ ubuntu-gutsy-xen/arch/x86_64/Kconfig 2007-08-18 12:38:02.000000000 -0400
-@@ -183,6 +183,22 @@
-
- endchoice
-
-+config X86_64_XEN
-+ bool "Enable Xen compatible kernel"
-+ select SWIOTLB
-+ help
-+ This option will compile a kernel compatible with Xen hypervisor
-+
-+config X86_NO_TSS
-+ bool
-+ depends on X86_64_XEN
-+ default y
-+
-+config X86_NO_IDT
-+ bool
-+ depends on X86_64_XEN
-+ default y
-+
- #
- # Define implied options from the CPU selection here
- #
-@@ -203,6 +219,7 @@
-
- config X86_TSC
- bool
-+ depends on !X86_64_XEN
- default y
-
- config X86_GOOD_APIC
-@@ -251,7 +268,7 @@
-
- config X86_HT
- bool
-- depends on SMP && !MK8
-+ depends on SMP && !MK8 && !X86_64_XEN
- default y
-
- config MATH_EMULATION
-@@ -265,14 +282,22 @@
-
- config X86_IO_APIC
- bool
-+ depends !XEN_UNPRIVILEGED_GUEST
- default y
-
-+config X86_XEN_GENAPIC
-+ bool
-+ depends X86_64_XEN
-+ default XEN_PRIVILEGED_GUEST || SMP
-+
- config X86_LOCAL_APIC
- bool
-+ depends !XEN_UNPRIVILEGED_GUEST
- default y
-
- config MTRR
- bool "MTRR (Memory Type Range Register) support"
-+ depends on !XEN_UNPRIVILEGED_GUEST
- ---help---
- On Intel P6 family processors (Pentium Pro, Pentium II and later)
- the Memory Type Range Registers (MTRRs) may be used to control
-@@ -313,7 +338,7 @@
-
- config SCHED_SMT
- bool "SMT (Hyperthreading) scheduler support"
-- depends on SMP
-+ depends on SMP && !X86_64_XEN
- default n
- help
- SMT scheduler support improves the CPU scheduler's decision making
-@@ -323,7 +348,7 @@
-
- config SCHED_MC
- bool "Multi-core scheduler support"
-- depends on SMP
-+ depends on SMP && !X86_64_XEN
- default y
- help
- Multi-core scheduler support improves the CPU scheduler's decision
-@@ -334,7 +359,7 @@
-
- config NUMA
- bool "Non Uniform Memory Access (NUMA) Support"
-- depends on SMP
-+ depends on SMP && !X86_64_XEN
- help
- Enable NUMA (Non Uniform Memory Access) support. The kernel
- will try to allocate memory used by a CPU on the local memory
-@@ -390,7 +415,7 @@
-
- config ARCH_SPARSEMEM_ENABLE
- def_bool y
-- depends on (NUMA || EXPERIMENTAL)
-+ depends on (NUMA || EXPERIMENTAL) && !X86_64_XEN
-
- config ARCH_MEMORY_PROBE
- def_bool y
-@@ -418,6 +443,7 @@
- int "Maximum number of CPUs (2-255)"
- range 2 255
- depends on SMP
-+ default "16" if X86_64_XEN
- default "8"
- help
- This allows you to specify the maximum number of CPUs which this
-@@ -443,6 +469,7 @@
-
- config HPET_TIMER
- bool
-+ depends on !X86_64_XEN
- default y
- help
- Use the IA-PC HPET (High Precision Event Timer) to manage
-@@ -463,7 +490,7 @@
- default y
- select SWIOTLB
- select AGP
-- depends on PCI
-+ depends on PCI && !X86_64_XEN
- help
- Support for full DMA access of devices with 32bit memory access only
- on systems with more than 3GB. This is usually needed for USB,
-@@ -478,7 +505,7 @@
- config CALGARY_IOMMU
- bool "IBM Calgary IOMMU support"
- select SWIOTLB
-- depends on PCI && EXPERIMENTAL
-+ depends on PCI && !X86_64_XEN && EXPERIMENTAL
- help
- Support for hardware IOMMUs in IBM's xSeries x366 and x460
- systems. Needed to run systems with more than 3GB of memory
-@@ -516,6 +543,7 @@
-
- config X86_MCE
- bool "Machine check support" if EMBEDDED
-+ depends on !X86_64_XEN
- default y
- help
- Include a machine check error handler to report hardware errors.
-@@ -541,6 +569,7 @@
-
- config KEXEC
- bool "kexec system call"
-+ depends on !XEN_UNPRIVILEGED_GUEST
- help
- kexec is a system call that implements the ability to shutdown your
- current kernel, and to start another kernel. It is like a reboot
-@@ -570,7 +599,7 @@
-
- config RELOCATABLE
- bool "Build a relocatable kernel(EXPERIMENTAL)"
-- depends on EXPERIMENTAL
-+ depends on EXPERIMENTAL && !X86_64_XEN
- help
- Builds a relocatable kernel. This enables loading and running
- a kernel binary from a different physical address than it has
-@@ -691,12 +720,17 @@
- default y
-
- menu "Power management options"
-+ depends on !XEN_UNPRIVILEGED_GUEST
-
-+if !X86_64_XEN
- source kernel/power/Kconfig
-+endif
-
- source "drivers/acpi/Kconfig"
-
-+if !X86_64_XEN
- source "arch/x86_64/kernel/cpufreq/Kconfig"
-+endif
-
- endmenu
-
-@@ -704,7 +738,7 @@
-
- config PCI
- bool "PCI support"
-- select ARCH_SUPPORTS_MSI if (X86_LOCAL_APIC && X86_IO_APIC)
-+ select ARCH_SUPPORTS_MSI if (X86_LOCAL_APIC && X86_IO_APIC && !X86_64_XEN)
-
- # x86-64 doesn't support PCI BIOS access from long mode so always go direct.
- config PCI_DIRECT
-@@ -716,6 +750,21 @@
- bool "Support mmconfig PCI config space access"
- depends on PCI && ACPI
-
-+config XEN_PCIDEV_FRONTEND
-+ bool "Xen PCI Frontend"
-+ depends on PCI && X86_64_XEN
-+ default y
-+ help
-+ The PCI device frontend driver allows the kernel to import arbitrary
-+ PCI devices from a PCI backend to support PCI driver domains.
-+
-+config XEN_PCIDEV_FE_DEBUG
-+ bool "Xen PCI Frontend Debugging"
-+ depends on XEN_PCIDEV_FRONTEND
-+ default n
-+ help
-+ Enables some debug statements within the PCI Frontend.
-+
- source "drivers/pci/pcie/Kconfig"
-
- source "drivers/pci/Kconfig"
-@@ -786,4 +835,6 @@
-
- source "crypto/Kconfig"
-
-+source "drivers/xen/Kconfig"
-+
- source "lib/Kconfig"
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/x86_64/kernel/acpi/Makefile ubuntu-gutsy-xen/arch/x86_64/kernel/acpi/Makefile
---- ubuntu-gutsy/arch/x86_64/kernel/acpi/Makefile 2007-08-18 09:40:29.000000000 -0400
-+++ ubuntu-gutsy-xen/arch/x86_64/kernel/acpi/Makefile 2007-08-18 12:38:02.000000000 -0400
-@@ -7,3 +7,5 @@
- processor-y := ../../../i386/kernel/acpi/processor.o ../../../i386/kernel/acpi/cstate.o
- endif
-
-+boot-$(CONFIG_XEN) := ../../../i386/kernel/acpi/boot-xen.o
-+processor-$(CONFIG_XEN) := ../../../i386/kernel/acpi/processor.o
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/x86_64/kernel/asm-offsets.c ubuntu-gutsy-xen/arch/x86_64/kernel/asm-offsets.c
---- ubuntu-gutsy/arch/x86_64/kernel/asm-offsets.c 2007-08-18 09:40:29.000000000 -0400
-+++ ubuntu-gutsy-xen/arch/x86_64/kernel/asm-offsets.c 2007-08-18 12:38:02.000000000 -0400
-@@ -75,8 +75,10 @@
- DEFINE(pbe_address, offsetof(struct pbe, address));
- DEFINE(pbe_orig_address, offsetof(struct pbe, orig_address));
- DEFINE(pbe_next, offsetof(struct pbe, next));
-+#ifndef CONFIG_X86_NO_TSS
- BLANK();
- DEFINE(TSS_ist, offsetof(struct tss_struct, ist));
-+#endif
- BLANK();
- DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx));
- BLANK();
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/x86_64/kernel/crash.c ubuntu-gutsy-xen/arch/x86_64/kernel/crash.c
---- ubuntu-gutsy/arch/x86_64/kernel/crash.c 2007-08-18 09:40:29.000000000 -0400
-+++ ubuntu-gutsy-xen/arch/x86_64/kernel/crash.c 2007-08-18 12:38:02.000000000 -0400
-@@ -25,6 +25,7 @@
- #include <asm/hw_irq.h>
- #include <asm/mach_apic.h>
-
-+#ifndef CONFIG_XEN
- /* This keeps a track of which one is crashing cpu. */
- static int crashing_cpu;
-
-@@ -107,6 +108,7 @@
- /* There are no cpus to shootdown */
- }
- #endif
-+#endif /* CONFIG_XEN */
-
- void machine_crash_shutdown(struct pt_regs *regs)
- {
-@@ -122,14 +124,16 @@
- /* The kernel is broken so disable interrupts */
- local_irq_disable();
-
-+#ifndef CONFIG_XEN
- /* Make a note of crashing cpu. Will be used in NMI callback.*/
- crashing_cpu = smp_processor_id();
-+
- nmi_shootdown_cpus();
-
- if(cpu_has_apic)
- disable_local_APIC();
-
- disable_IO_APIC();
--
-+#endif /* CONFIG_XEN */
- crash_save_cpu(regs, smp_processor_id());
- }
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/x86_64/kernel/e820-xen.c ubuntu-gutsy-xen/arch/x86_64/kernel/e820-xen.c
---- ubuntu-gutsy/arch/x86_64/kernel/e820-xen.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/arch/x86_64/kernel/e820-xen.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,816 @@
-+/*
-+ * Handle the memory map.
-+ * The functions here do the job until bootmem takes over.
-+ *
-+ * Getting sanitize_e820_map() in sync with i386 version by applying change:
-+ * - Provisions for empty E820 memory regions (reported by certain BIOSes).
-+ * Alex Achenbach <xela@slit.de>, December 2002.
-+ * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
-+ *
-+ */
-+#include <linux/kernel.h>
-+#include <linux/types.h>
-+#include <linux/init.h>
-+#include <linux/bootmem.h>
-+#include <linux/ioport.h>
-+#include <linux/string.h>
-+#include <linux/kexec.h>
-+#include <linux/module.h>
-+#include <linux/mm.h>
-+#include <linux/suspend.h>
-+#include <linux/pfn.h>
-+
-+#include <asm/pgtable.h>
-+#include <asm/page.h>
-+#include <asm/e820.h>
-+#include <asm/proto.h>
-+#include <asm/bootsetup.h>
-+#include <asm/sections.h>
-+#include <xen/interface/memory.h>
-+
-+struct e820map e820 __initdata;
-+#ifdef CONFIG_XEN
-+struct e820map machine_e820;
-+#endif
-+
-+/*
-+ * PFN of last memory page.
-+ */
-+unsigned long end_pfn;
-+EXPORT_SYMBOL(end_pfn);
-+
-+/*
-+ * end_pfn only includes RAM, while end_pfn_map includes all e820 entries.
-+ * The direct mapping extends to end_pfn_map, so that we can directly access
-+ * apertures, ACPI and other tables without having to play with fixmaps.
-+ */
-+unsigned long end_pfn_map;
-+
-+/*
-+ * Last pfn which the user wants to use.
-+ */
-+static unsigned long __initdata end_user_pfn = MAXMEM>>PAGE_SHIFT;
-+
-+extern struct resource code_resource, data_resource;
-+
-+/* Check for some hardcoded bad areas that early boot is not allowed to touch */
-+static inline int bad_addr(unsigned long *addrp, unsigned long size)
-+{
-+ unsigned long addr = *addrp, last = addr + size;
-+
-+#ifndef CONFIG_XEN
-+ /* various gunk below that needed for SMP startup */
-+ if (addr < 0x8000) {
-+ *addrp = PAGE_ALIGN(0x8000);
-+ return 1;
-+ }
-+
-+ /* direct mapping tables of the kernel */
-+ if (last >= table_start<<PAGE_SHIFT && addr < table_end<<PAGE_SHIFT) {
-+ *addrp = PAGE_ALIGN(table_end << PAGE_SHIFT);
-+ return 1;
-+ }
-+
-+ /* initrd */
-+#ifdef CONFIG_BLK_DEV_INITRD
-+ if (LOADER_TYPE && INITRD_START && last >= INITRD_START &&
-+ addr < INITRD_START+INITRD_SIZE) {
-+ *addrp = PAGE_ALIGN(INITRD_START + INITRD_SIZE);
-+ return 1;
-+ }
-+#endif
-+ /* kernel code */
-+ if (last >= __pa_symbol(&_text) && addr < __pa_symbol(&_end)) {
-+ *addrp = PAGE_ALIGN(__pa_symbol(&_end));
-+ return 1;
-+ }
-+
-+ if (last >= ebda_addr && addr < ebda_addr + ebda_size) {
-+ *addrp = PAGE_ALIGN(ebda_addr + ebda_size);
-+ return 1;
-+ }
-+
-+#ifdef CONFIG_NUMA
-+ /* NUMA memory to node map */
-+ if (last >= nodemap_addr && addr < nodemap_addr + nodemap_size) {
-+ *addrp = nodemap_addr + nodemap_size;
-+ return 1;
-+ }
-+#endif
-+ /* XXX ramdisk image here? */
-+#else
-+ if (last < (table_end<<PAGE_SHIFT)) {
-+ *addrp = table_end << PAGE_SHIFT;
-+ return 1;
-+ }
-+#endif
-+ return 0;
-+}
-+
-+/*
-+ * This function checks if any part of the range <start,end> is mapped
-+ * with type.
-+ */
-+int
-+e820_any_mapped(unsigned long start, unsigned long end, unsigned type)
-+{
-+ int i;
-+#ifndef CONFIG_XEN
-+ for (i = 0; i < e820.nr_map; i++) {
-+ struct e820entry *ei = &e820.map[i];
-+#else
-+ if (!is_initial_xendomain())
-+ return 0;
-+ for (i = 0; i < machine_e820.nr_map; i++) {
-+ const struct e820entry *ei = &machine_e820.map[i];
-+#endif
-+ if (type && ei->type != type)
-+ continue;
-+ if (ei->addr >= end || ei->addr + ei->size <= start)
-+ continue;
-+ return 1;
-+ }
-+ return 0;
-+}
-+EXPORT_SYMBOL_GPL(e820_any_mapped);
-+
-+/*
-+ * This function checks if the entire range <start,end> is mapped with type.
-+ *
-+ * Note: this function only works correct if the e820 table is sorted and
-+ * not-overlapping, which is the case
-+ */
-+int __init e820_all_mapped(unsigned long start, unsigned long end, unsigned type)
-+{
-+ int i;
-+
-+#ifndef CONFIG_XEN
-+ for (i = 0; i < e820.nr_map; i++) {
-+ struct e820entry *ei = &e820.map[i];
-+#else
-+ if (!is_initial_xendomain())
-+ return 0;
-+ for (i = 0; i < machine_e820.nr_map; i++) {
-+ const struct e820entry *ei = &machine_e820.map[i];
-+#endif
-+
-+ if (type && ei->type != type)
-+ continue;
-+ /* is the region (part) in overlap with the current region ?*/
-+ if (ei->addr >= end || ei->addr + ei->size <= start)
-+ continue;
-+
-+ /* if the region is at the beginning of <start,end> we move
-+ * start to the end of the region since it's ok until there
-+ */
-+ if (ei->addr <= start)
-+ start = ei->addr + ei->size;
-+ /* if start is now at or beyond end, we're done, full coverage */
-+ if (start >= end)
-+ return 1; /* we're done */
-+ }
-+ return 0;
-+}
-+
-+/*
-+ * Find a free area in a specific range.
-+ */
-+unsigned long __init find_e820_area(unsigned long start, unsigned long end, unsigned size)
-+{
-+ int i;
-+ for (i = 0; i < e820.nr_map; i++) {
-+ struct e820entry *ei = &e820.map[i];
-+ unsigned long addr = ei->addr, last;
-+ if (ei->type != E820_RAM)
-+ continue;
-+ if (addr < start)
-+ addr = start;
-+ if (addr > ei->addr + ei->size)
-+ continue;
-+ while (bad_addr(&addr, size) && addr+size <= ei->addr+ei->size)
-+ ;
-+ last = PAGE_ALIGN(addr) + size;
-+ if (last > ei->addr + ei->size)
-+ continue;
-+ if (last > end)
-+ continue;
-+ return addr;
-+ }
-+ return -1UL;
-+}
-+
-+/*
-+ * Find the highest page frame number we have available
-+ */
-+unsigned long __init e820_end_of_ram(void)
-+{
-+ unsigned long end_pfn = 0;
-+ end_pfn = find_max_pfn_with_active_regions();
-+
-+ if (end_pfn > end_pfn_map)
-+ end_pfn_map = end_pfn;
-+ if (end_pfn_map > MAXMEM>>PAGE_SHIFT)
-+ end_pfn_map = MAXMEM>>PAGE_SHIFT;
-+ if (end_pfn > end_user_pfn)
-+ end_pfn = end_user_pfn;
-+ if (end_pfn > end_pfn_map)
-+ end_pfn = end_pfn_map;
-+
-+ printk("end_pfn_map = %lu\n", end_pfn_map);
-+ return end_pfn;
-+}
-+
-+/*
-+ * Find the hole size in the range.
-+ */
-+unsigned long __init e820_hole_size(unsigned long start, unsigned long end)
-+{
-+ unsigned long ram = 0;
-+ int i;
-+
-+ for (i = 0; i < e820.nr_map; i++) {
-+ struct e820entry *ei = &e820.map[i];
-+ unsigned long last, addr;
-+
-+ if (ei->type != E820_RAM ||
-+ ei->addr+ei->size <= start ||
-+ ei->addr >= end)
-+ continue;
-+
-+ addr = round_up(ei->addr, PAGE_SIZE);
-+ if (addr < start)
-+ addr = start;
-+
-+ last = round_down(ei->addr + ei->size, PAGE_SIZE);
-+ if (last >= end)
-+ last = end;
-+
-+ if (last > addr)
-+ ram += last - addr;
-+ }
-+ return ((end - start) - ram);
-+}
-+
-+/*
-+ * Mark e820 reserved areas as busy for the resource manager.
-+ */
-+void __init e820_reserve_resources(struct e820entry *e820, int nr_map)
-+{
-+ int i;
-+ for (i = 0; i < nr_map; i++) {
-+ struct resource *res;
-+ res = alloc_bootmem_low(sizeof(struct resource));
-+ switch (e820[i].type) {
-+ case E820_RAM: res->name = "System RAM"; break;
-+ case E820_ACPI: res->name = "ACPI Tables"; break;
-+ case E820_NVS: res->name = "ACPI Non-volatile Storage"; break;
-+ default: res->name = "reserved";
-+ }
-+ res->start = e820[i].addr;
-+ res->end = res->start + e820[i].size - 1;
-+ res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
-+ request_resource(&iomem_resource, res);
-+ if (e820[i].type == E820_RAM) {
-+ /*
-+ * We don't know which RAM region contains kernel data,
-+ * so we try it repeatedly and let the resource manager
-+ * test it.
-+ */
-+#ifndef CONFIG_XEN
-+ request_resource(res, &code_resource);
-+ request_resource(res, &data_resource);
-+#endif
-+#ifdef CONFIG_KEXEC
-+ if (crashk_res.start != crashk_res.end)
-+ request_resource(res, &crashk_res);
-+#ifdef CONFIG_XEN
-+ xen_machine_kexec_register_resources(res);
-+#endif
-+#endif
-+ }
-+ }
-+}
-+
-+#ifndef CONFIG_XEN
-+/*
-+ * Find the ranges of physical addresses that do not correspond to
-+ * e820 RAM areas and mark the corresponding pages as nosave for software
-+ * suspend and suspend to RAM.
-+ *
-+ * This function requires the e820 map to be sorted and without any
-+ * overlapping entries and assumes the first e820 area to be RAM.
-+ */
-+void __init e820_mark_nosave_regions(void)
-+{
-+ int i;
-+ unsigned long paddr;
-+
-+ paddr = round_down(e820.map[0].addr + e820.map[0].size, PAGE_SIZE);
-+ for (i = 1; i < e820.nr_map; i++) {
-+ struct e820entry *ei = &e820.map[i];
-+
-+ if (paddr < ei->addr)
-+ register_nosave_region(PFN_DOWN(paddr),
-+ PFN_UP(ei->addr));
-+
-+ paddr = round_down(ei->addr + ei->size, PAGE_SIZE);
-+ if (ei->type != E820_RAM)
-+ register_nosave_region(PFN_UP(ei->addr),
-+ PFN_DOWN(paddr));
-+
-+ if (paddr >= (end_pfn << PAGE_SHIFT))
-+ break;
-+ }
-+}
-+#endif
-+
-+/* Walk the e820 map and register active regions within a node */
-+void __init
-+e820_register_active_regions(int nid, unsigned long start_pfn,
-+ unsigned long end_pfn)
-+{
-+ int i;
-+ unsigned long ei_startpfn, ei_endpfn;
-+ for (i = 0; i < e820.nr_map; i++) {
-+ struct e820entry *ei = &e820.map[i];
-+ ei_startpfn = round_up(ei->addr, PAGE_SIZE) >> PAGE_SHIFT;
-+ ei_endpfn = round_down(ei->addr + ei->size, PAGE_SIZE)
-+ >> PAGE_SHIFT;
-+
-+ /* Skip map entries smaller than a page */
-+ if (ei_startpfn >= ei_endpfn)
-+ continue;
-+
-+ /* Check if end_pfn_map should be updated */
-+ if (ei->type != E820_RAM && ei_endpfn > end_pfn_map)
-+ end_pfn_map = ei_endpfn;
-+
-+ /* Skip if map is outside the node */
-+ if (ei->type != E820_RAM ||
-+ ei_endpfn <= start_pfn ||
-+ ei_startpfn >= end_pfn)
-+ continue;
-+
-+ /* Check for overlaps */
-+ if (ei_startpfn < start_pfn)
-+ ei_startpfn = start_pfn;
-+ if (ei_endpfn > end_pfn)
-+ ei_endpfn = end_pfn;
-+
-+ /* Obey end_user_pfn to save on memmap */
-+ if (ei_startpfn >= end_user_pfn)
-+ continue;
-+ if (ei_endpfn > end_user_pfn)
-+ ei_endpfn = end_user_pfn;
-+
-+ add_active_range(nid, ei_startpfn, ei_endpfn);
-+ }
-+}
-+
-+/*
-+ * Add a memory region to the kernel e820 map.
-+ */
-+void __init add_memory_region(unsigned long start, unsigned long size, int type)
-+{
-+ int x = e820.nr_map;
-+
-+ if (x == E820MAX) {
-+ printk(KERN_ERR "Ooops! Too many entries in the memory map!\n");
-+ return;
-+ }
-+
-+ e820.map[x].addr = start;
-+ e820.map[x].size = size;
-+ e820.map[x].type = type;
-+ e820.nr_map++;
-+}
-+
-+void __init e820_print_map(char *who)
-+{
-+ int i;
-+
-+ for (i = 0; i < e820.nr_map; i++) {
-+ printk(" %s: %016Lx - %016Lx ", who,
-+ (unsigned long long) e820.map[i].addr,
-+ (unsigned long long) (e820.map[i].addr + e820.map[i].size));
-+ switch (e820.map[i].type) {
-+ case E820_RAM: printk("(usable)\n");
-+ break;
-+ case E820_RESERVED:
-+ printk("(reserved)\n");
-+ break;
-+ case E820_ACPI:
-+ printk("(ACPI data)\n");
-+ break;
-+ case E820_NVS:
-+ printk("(ACPI NVS)\n");
-+ break;
-+ default: printk("type %u\n", e820.map[i].type);
-+ break;
-+ }
-+ }
-+}
-+
-+/*
-+ * Sanitize the BIOS e820 map.
-+ *
-+ * Some e820 responses include overlapping entries. The following
-+ * replaces the original e820 map with a new one, removing overlaps.
-+ *
-+ */
-+static int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map)
-+{
-+ struct change_member {
-+ struct e820entry *pbios; /* pointer to original bios entry */
-+ unsigned long long addr; /* address for this change point */
-+ };
-+ static struct change_member change_point_list[2*E820MAX] __initdata;
-+ static struct change_member *change_point[2*E820MAX] __initdata;
-+ static struct e820entry *overlap_list[E820MAX] __initdata;
-+ static struct e820entry new_bios[E820MAX] __initdata;
-+ struct change_member *change_tmp;
-+ unsigned long current_type, last_type;
-+ unsigned long long last_addr;
-+ int chgidx, still_changing;
-+ int overlap_entries;
-+ int new_bios_entry;
-+ int old_nr, new_nr, chg_nr;
-+ int i;
-+
-+ /*
-+ Visually we're performing the following (1,2,3,4 = memory types)...
-+
-+ Sample memory map (w/overlaps):
-+ ____22__________________
-+ ______________________4_
-+ ____1111________________
-+ _44_____________________
-+ 11111111________________
-+ ____________________33__
-+ ___________44___________
-+ __________33333_________
-+ ______________22________
-+ ___________________2222_
-+ _________111111111______
-+ _____________________11_
-+ _________________4______
-+
-+ Sanitized equivalent (no overlap):
-+ 1_______________________
-+ _44_____________________
-+ ___1____________________
-+ ____22__________________
-+ ______11________________
-+ _________1______________
-+ __________3_____________
-+ ___________44___________
-+ _____________33_________
-+ _______________2________
-+ ________________1_______
-+ _________________4______
-+ ___________________2____
-+ ____________________33__
-+ ______________________4_
-+ */
-+
-+ /* if there's only one memory region, don't bother */
-+ if (*pnr_map < 2)
-+ return -1;
-+
-+ old_nr = *pnr_map;
-+
-+ /* bail out if we find any unreasonable addresses in bios map */
-+ for (i=0; i<old_nr; i++)
-+ if (biosmap[i].addr + biosmap[i].size < biosmap[i].addr)
-+ return -1;
-+
-+ /* create pointers for initial change-point information (for sorting) */
-+ for (i=0; i < 2*old_nr; i++)
-+ change_point[i] = &change_point_list[i];
-+
-+ /* record all known change-points (starting and ending addresses),
-+ omitting those that are for empty memory regions */
-+ chgidx = 0;
-+ for (i=0; i < old_nr; i++) {
-+ if (biosmap[i].size != 0) {
-+ change_point[chgidx]->addr = biosmap[i].addr;
-+ change_point[chgidx++]->pbios = &biosmap[i];
-+ change_point[chgidx]->addr = biosmap[i].addr + biosmap[i].size;
-+ change_point[chgidx++]->pbios = &biosmap[i];
-+ }
-+ }
-+ chg_nr = chgidx;
-+
-+ /* sort change-point list by memory addresses (low -> high) */
-+ still_changing = 1;
-+ while (still_changing) {
-+ still_changing = 0;
-+ for (i=1; i < chg_nr; i++) {
-+ /* if <current_addr> > <last_addr>, swap */
-+ /* or, if current=<start_addr> & last=<end_addr>, swap */
-+ if ((change_point[i]->addr < change_point[i-1]->addr) ||
-+ ((change_point[i]->addr == change_point[i-1]->addr) &&
-+ (change_point[i]->addr == change_point[i]->pbios->addr) &&
-+ (change_point[i-1]->addr != change_point[i-1]->pbios->addr))
-+ )
-+ {
-+ change_tmp = change_point[i];
-+ change_point[i] = change_point[i-1];
-+ change_point[i-1] = change_tmp;
-+ still_changing=1;
-+ }
-+ }
-+ }
-+
-+ /* create a new bios memory map, removing overlaps */
-+ overlap_entries=0; /* number of entries in the overlap table */
-+ new_bios_entry=0; /* index for creating new bios map entries */
-+ last_type = 0; /* start with undefined memory type */
-+ last_addr = 0; /* start with 0 as last starting address */
-+ /* loop through change-points, determining affect on the new bios map */
-+ for (chgidx=0; chgidx < chg_nr; chgidx++)
-+ {
-+ /* keep track of all overlapping bios entries */
-+ if (change_point[chgidx]->addr == change_point[chgidx]->pbios->addr)
-+ {
-+ /* add map entry to overlap list (> 1 entry implies an overlap) */
-+ overlap_list[overlap_entries++]=change_point[chgidx]->pbios;
-+ }
-+ else
-+ {
-+ /* remove entry from list (order independent, so swap with last) */
-+ for (i=0; i<overlap_entries; i++)
-+ {
-+ if (overlap_list[i] == change_point[chgidx]->pbios)
-+ overlap_list[i] = overlap_list[overlap_entries-1];
-+ }
-+ overlap_entries--;
-+ }
-+ /* if there are overlapping entries, decide which "type" to use */
-+ /* (larger value takes precedence -- 1=usable, 2,3,4,4+=unusable) */
-+ current_type = 0;
-+ for (i=0; i<overlap_entries; i++)
-+ if (overlap_list[i]->type > current_type)
-+ current_type = overlap_list[i]->type;
-+ /* continue building up new bios map based on this information */
-+ if (current_type != last_type) {
-+ if (last_type != 0) {
-+ new_bios[new_bios_entry].size =
-+ change_point[chgidx]->addr - last_addr;
-+ /* move forward only if the new size was non-zero */
-+ if (new_bios[new_bios_entry].size != 0)
-+ if (++new_bios_entry >= E820MAX)
-+ break; /* no more space left for new bios entries */
-+ }
-+ if (current_type != 0) {
-+ new_bios[new_bios_entry].addr = change_point[chgidx]->addr;
-+ new_bios[new_bios_entry].type = current_type;
-+ last_addr=change_point[chgidx]->addr;
-+ }
-+ last_type = current_type;
-+ }
-+ }
-+ new_nr = new_bios_entry; /* retain count for new bios entries */
-+
-+ /* copy new bios mapping into original location */
-+ memcpy(biosmap, new_bios, new_nr*sizeof(struct e820entry));
-+ *pnr_map = new_nr;
-+
-+ return 0;
-+}
-+
-+/*
-+ * Copy the BIOS e820 map into a safe place.
-+ *
-+ * Sanity-check it while we're at it..
-+ *
-+ * If we're lucky and live on a modern system, the setup code
-+ * will have given us a memory map that we can use to properly
-+ * set up memory. If we aren't, we'll fake a memory map.
-+ */
-+static int __init copy_e820_map(struct e820entry * biosmap, int nr_map)
-+{
-+#ifndef CONFIG_XEN
-+ /* Only one memory region (or negative)? Ignore it */
-+ if (nr_map < 2)
-+ return -1;
-+#else
-+ BUG_ON(nr_map < 1);
-+#endif
-+
-+ do {
-+ unsigned long start = biosmap->addr;
-+ unsigned long size = biosmap->size;
-+ unsigned long end = start + size;
-+ unsigned long type = biosmap->type;
-+
-+ /* Overflow in 64 bits? Ignore the memory map. */
-+ if (start > end)
-+ return -1;
-+
-+ add_memory_region(start, size, type);
-+ } while (biosmap++,--nr_map);
-+ return 0;
-+}
-+
-+void early_panic(char *msg)
-+{
-+ early_printk(msg);
-+ panic(msg);
-+}
-+
-+#ifndef CONFIG_XEN
-+void __init setup_memory_region(void)
-+{
-+ /*
-+ * Try to copy the BIOS-supplied E820-map.
-+ *
-+ * Otherwise fake a memory map; one section from 0k->640k,
-+ * the next section from 1mb->appropriate_mem_k
-+ */
-+ sanitize_e820_map(E820_MAP, &E820_MAP_NR);
-+ if (copy_e820_map(E820_MAP, E820_MAP_NR) < 0)
-+ early_panic("Cannot find a valid memory map");
-+ printk(KERN_INFO "BIOS-provided physical RAM map:\n");
-+ e820_print_map("BIOS-e820");
-+}
-+
-+#else /* CONFIG_XEN */
-+
-+void __init setup_memory_region(void)
-+{
-+ int rc;
-+ struct xen_memory_map memmap;
-+ /*
-+ * This is rather large for a stack variable but this early in
-+ * the boot process we know we have plenty slack space.
-+ */
-+ struct e820entry map[E820MAX];
-+
-+ memmap.nr_entries = E820MAX;
-+ set_xen_guest_handle(memmap.buffer, map);
-+
-+ rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap);
-+ if ( rc == -ENOSYS ) {
-+ memmap.nr_entries = 1;
-+ map[0].addr = 0ULL;
-+ map[0].size = xen_start_info->nr_pages << PAGE_SHIFT;
-+ /* 8MB slack (to balance backend allocations). */
-+ map[0].size += 8 << 20;
-+ map[0].type = E820_RAM;
-+ rc = 0;
-+ }
-+ BUG_ON(rc);
-+
-+ sanitize_e820_map(map, (char *)&memmap.nr_entries);
-+
-+ if (copy_e820_map(map, (char)memmap.nr_entries) < 0)
-+ early_panic("Cannot find a valid memory map");
-+
-+ printk(KERN_INFO "BIOS-provided physical RAM map:\n");
-+ e820_print_map("Xen");
-+}
-+#endif
-+
-+static int __init parse_memopt(char *p)
-+{
-+ int i;
-+ unsigned long current_end;
-+ unsigned long end;
-+
-+ if (!p)
-+ return -EINVAL;
-+ end_user_pfn = memparse(p, &p);
-+ end_user_pfn >>= PAGE_SHIFT;
-+
-+ end = end_user_pfn<<PAGE_SHIFT;
-+ i = e820.nr_map-1;
-+ current_end = e820.map[i].addr + e820.map[i].size;
-+
-+ if (current_end < end) {
-+ /*
-+ * The e820 map ends before our requested size so
-+ * extend the final entry to the requested address.
-+ */
-+ if (e820.map[i].type == E820_RAM)
-+ e820.map[i].size = end - e820.map[i].addr;
-+ else
-+ add_memory_region(current_end, end - current_end, E820_RAM);
-+ }
-+
-+ return 0;
-+}
-+early_param("mem", parse_memopt);
-+
-+static int userdef __initdata;
-+
-+static int __init parse_memmap_opt(char *p)
-+{
-+ char *oldp;
-+ unsigned long long start_at, mem_size;
-+
-+ if (!strcmp(p, "exactmap")) {
-+#ifdef CONFIG_CRASH_DUMP
-+ /* If we are doing a crash dump, we
-+ * still need to know the real mem
-+ * size before original memory map is
-+ * reset.
-+ */
-+ e820_register_active_regions(0, 0, -1UL);
-+ saved_max_pfn = e820_end_of_ram();
-+ remove_all_active_ranges();
-+#endif
-+ end_pfn_map = 0;
-+ e820.nr_map = 0;
-+ userdef = 1;
-+ return 0;
-+ }
-+
-+ oldp = p;
-+ mem_size = memparse(p, &p);
-+ if (p == oldp)
-+ return -EINVAL;
-+ if (*p == '@') {
-+ start_at = memparse(p+1, &p);
-+ add_memory_region(start_at, mem_size, E820_RAM);
-+ } else if (*p == '#') {
-+ start_at = memparse(p+1, &p);
-+ add_memory_region(start_at, mem_size, E820_ACPI);
-+ } else if (*p == '$') {
-+ start_at = memparse(p+1, &p);
-+ add_memory_region(start_at, mem_size, E820_RESERVED);
-+ } else {
-+ end_user_pfn = (mem_size >> PAGE_SHIFT);
-+ }
-+ return *p == '\0' ? 0 : -EINVAL;
-+}
-+early_param("memmap", parse_memmap_opt);
-+
-+void __init finish_e820_parsing(void)
-+{
-+ if (userdef) {
-+ printk(KERN_INFO "user-defined physical RAM map:\n");
-+ e820_print_map("user");
-+ }
-+}
-+
-+unsigned long pci_mem_start = 0xaeedbabe;
-+EXPORT_SYMBOL(pci_mem_start);
-+
-+/*
-+ * Search for the biggest gap in the low 32 bits of the e820
-+ * memory space. We pass this space to PCI to assign MMIO resources
-+ * for hotplug or unconfigured devices in.
-+ * Hopefully the BIOS let enough space left.
-+ */
-+__init void e820_setup_gap(struct e820entry *e820, int nr_map)
-+{
-+ unsigned long gapstart, gapsize, round;
-+ unsigned long last;
-+ int i;
-+ int found = 0;
-+
-+ last = 0x100000000ull;
-+ gapstart = 0x10000000;
-+ gapsize = 0x400000;
-+ i = nr_map;
-+ while (--i >= 0) {
-+ unsigned long long start = e820[i].addr;
-+ unsigned long long end = start + e820[i].size;
-+
-+ /*
-+ * Since "last" is at most 4GB, we know we'll
-+ * fit in 32 bits if this condition is true
-+ */
-+ if (last > end) {
-+ unsigned long gap = last - end;
-+
-+ if (gap > gapsize) {
-+ gapsize = gap;
-+ gapstart = end;
-+ found = 1;
-+ }
-+ }
-+ if (start < last)
-+ last = start;
-+ }
-+
-+ if (!found) {
-+ gapstart = (end_pfn << PAGE_SHIFT) + 1024*1024;
-+ printk(KERN_ERR "PCI: Warning: Cannot find a gap in the 32bit address range\n"
-+ KERN_ERR "PCI: Unassigned devices with 32bit resource registers may break!\n");
-+ }
-+
-+ /*
-+ * See how much we want to round up: start off with
-+ * rounding to the next 1MB area.
-+ */
-+ round = 0x100000;
-+ while ((gapsize >> 4) > round)
-+ round += round;
-+ /* Fun with two's complement */
-+ pci_mem_start = (gapstart + round) & -round;
-+
-+ printk(KERN_INFO "Allocating PCI resources starting at %lx (gap: %lx:%lx)\n",
-+ pci_mem_start, gapstart, gapsize);
-+}
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/x86_64/kernel/early_printk-xen.c ubuntu-gutsy-xen/arch/x86_64/kernel/early_printk-xen.c
---- ubuntu-gutsy/arch/x86_64/kernel/early_printk-xen.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/arch/x86_64/kernel/early_printk-xen.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,287 @@
-+#include <linux/console.h>
-+#include <linux/kernel.h>
-+#include <linux/init.h>
-+#include <linux/string.h>
-+#include <linux/screen_info.h>
-+#include <asm/io.h>
-+#include <asm/processor.h>
-+#include <asm/fcntl.h>
-+
-+/* Simple VGA output */
-+
-+#ifdef __i386__
-+#include <asm/setup.h>
-+#else
-+#include <asm/bootsetup.h>
-+#endif
-+#define VGABASE (__ISA_IO_base + 0xb8000)
-+
-+#ifndef CONFIG_XEN
-+static int max_ypos = 25, max_xpos = 80;
-+static int current_ypos = 25, current_xpos = 0;
-+
-+static void early_vga_write(struct console *con, const char *str, unsigned n)
-+{
-+ char c;
-+ int i, k, j;
-+
-+ while ((c = *str++) != '\0' && n-- > 0) {
-+ if (current_ypos >= max_ypos) {
-+ /* scroll 1 line up */
-+ for (k = 1, j = 0; k < max_ypos; k++, j++) {
-+ for (i = 0; i < max_xpos; i++) {
-+ writew(readw(VGABASE+2*(max_xpos*k+i)),
-+ VGABASE + 2*(max_xpos*j + i));
-+ }
-+ }
-+ for (i = 0; i < max_xpos; i++)
-+ writew(0x720, VGABASE + 2*(max_xpos*j + i));
-+ current_ypos = max_ypos-1;
-+ }
-+ if (c == '\n') {
-+ current_xpos = 0;
-+ current_ypos++;
-+ } else if (c != '\r') {
-+ writew(((0x7 << 8) | (unsigned short) c),
-+ VGABASE + 2*(max_xpos*current_ypos +
-+ current_xpos++));
-+ if (current_xpos >= max_xpos) {
-+ current_xpos = 0;
-+ current_ypos++;
-+ }
-+ }
-+ }
-+}
-+
-+static struct console early_vga_console = {
-+ .name = "earlyvga",
-+ .write = early_vga_write,
-+ .flags = CON_PRINTBUFFER,
-+ .index = -1,
-+};
-+
-+/* Serial functions loosely based on a similar package from Klaus P. Gerlicher */
-+
-+static int early_serial_base = 0x3f8; /* ttyS0 */
-+
-+#define XMTRDY 0x20
-+
-+#define DLAB 0x80
-+
-+#define TXR 0 /* Transmit register (WRITE) */
-+#define RXR 0 /* Receive register (READ) */
-+#define IER 1 /* Interrupt Enable */
-+#define IIR 2 /* Interrupt ID */
-+#define FCR 2 /* FIFO control */
-+#define LCR 3 /* Line control */
-+#define MCR 4 /* Modem control */
-+#define LSR 5 /* Line Status */
-+#define MSR 6 /* Modem Status */
-+#define DLL 0 /* Divisor Latch Low */
-+#define DLH 1 /* Divisor latch High */
-+
-+static int early_serial_putc(unsigned char ch)
-+{
-+ unsigned timeout = 0xffff;
-+ while ((inb(early_serial_base + LSR) & XMTRDY) == 0 && --timeout)
-+ cpu_relax();
-+ outb(ch, early_serial_base + TXR);
-+ return timeout ? 0 : -1;
-+}
-+
-+static void early_serial_write(struct console *con, const char *s, unsigned n)
-+{
-+ while (*s && n-- > 0) {
-+ if (*s == '\n')
-+ early_serial_putc('\r');
-+ early_serial_putc(*s);
-+ s++;
-+ }
-+}
-+
-+#define DEFAULT_BAUD 9600
-+
-+static __init void early_serial_init(char *s)
-+{
-+ unsigned char c;
-+ unsigned divisor;
-+ unsigned baud = DEFAULT_BAUD;
-+ char *e;
-+
-+ if (*s == ',')
-+ ++s;
-+
-+ if (*s) {
-+ unsigned port;
-+ if (!strncmp(s,"0x",2)) {
-+ early_serial_base = simple_strtoul(s, &e, 16);
-+ } else {
-+ static int bases[] = { 0x3f8, 0x2f8 };
-+
-+ if (!strncmp(s,"ttyS",4))
-+ s += 4;
-+ port = simple_strtoul(s, &e, 10);
-+ if (port > 1 || s == e)
-+ port = 0;
-+ early_serial_base = bases[port];
-+ }
-+ s += strcspn(s, ",");
-+ if (*s == ',')
-+ s++;
-+ }
-+
-+ outb(0x3, early_serial_base + LCR); /* 8n1 */
-+ outb(0, early_serial_base + IER); /* no interrupt */
-+ outb(0, early_serial_base + FCR); /* no fifo */
-+ outb(0x3, early_serial_base + MCR); /* DTR + RTS */
-+
-+ if (*s) {
-+ baud = simple_strtoul(s, &e, 0);
-+ if (baud == 0 || s == e)
-+ baud = DEFAULT_BAUD;
-+ }
-+
-+ divisor = 115200 / baud;
-+ c = inb(early_serial_base + LCR);
-+ outb(c | DLAB, early_serial_base + LCR);
-+ outb(divisor & 0xff, early_serial_base + DLL);
-+ outb((divisor >> 8) & 0xff, early_serial_base + DLH);
-+ outb(c & ~DLAB, early_serial_base + LCR);
-+}
-+
-+#else /* CONFIG_XEN */
-+
-+static void
-+early_serial_write(struct console *con, const char *s, unsigned count)
-+{
-+ int n;
-+
-+ while (count > 0) {
-+ n = HYPERVISOR_console_io(CONSOLEIO_write, count, (char *)s);
-+ if (n <= 0)
-+ break;
-+ count -= n;
-+ s += n;
-+ }
-+}
-+
-+static __init void early_serial_init(char *s)
-+{
-+}
-+
-+/*
-+ * No early VGA console on Xen, as we do not have convenient ISA-space
-+ * mappings. Someone should fix this for domain 0. For now, use fake serial.
-+ */
-+#define early_vga_console early_serial_console
-+
-+#endif
-+
-+static struct console early_serial_console = {
-+ .name = "earlyser",
-+ .write = early_serial_write,
-+ .flags = CON_PRINTBUFFER,
-+ .index = -1,
-+};
-+
-+/* Console interface to a host file on AMD's SimNow! */
-+
-+static int simnow_fd;
-+
-+enum {
-+ MAGIC1 = 0xBACCD00A,
-+ MAGIC2 = 0xCA110000,
-+ XOPEN = 5,
-+ XWRITE = 4,
-+};
-+
-+static noinline long simnow(long cmd, long a, long b, long c)
-+{
-+ long ret;
-+ asm volatile("cpuid" :
-+ "=a" (ret) :
-+ "b" (a), "c" (b), "d" (c), "0" (MAGIC1), "D" (cmd + MAGIC2));
-+ return ret;
-+}
-+
-+static void __init simnow_init(char *str)
-+{
-+ char *fn = "klog";
-+ if (*str == '=')
-+ fn = ++str;
-+ /* error ignored */
-+ simnow_fd = simnow(XOPEN, (unsigned long)fn, O_WRONLY|O_APPEND|O_CREAT, 0644);
-+}
-+
-+static void simnow_write(struct console *con, const char *s, unsigned n)
-+{
-+ simnow(XWRITE, simnow_fd, (unsigned long)s, n);
-+}
-+
-+static struct console simnow_console = {
-+ .name = "simnow",
-+ .write = simnow_write,
-+ .flags = CON_PRINTBUFFER,
-+ .index = -1,
-+};
-+
-+/* Direct interface for emergencies */
-+struct console *early_console = &early_vga_console;
-+static int early_console_initialized = 0;
-+
-+void early_printk(const char *fmt, ...)
-+{
-+ char buf[512];
-+ int n;
-+ va_list ap;
-+
-+ va_start(ap,fmt);
-+ n = vscnprintf(buf,512,fmt,ap);
-+ early_console->write(early_console,buf,n);
-+ va_end(ap);
-+}
-+
-+static int __initdata keep_early;
-+
-+static int __init setup_early_printk(char *buf)
-+{
-+ if (!buf)
-+ return 0;
-+
-+ if (early_console_initialized)
-+ return 0;
-+ early_console_initialized = 1;
-+
-+ if (strstr(buf, "keep"))
-+ keep_early = 1;
-+
-+ if (!strncmp(buf, "serial", 6)) {
-+ early_serial_init(buf + 6);
-+ early_console = &early_serial_console;
-+ } else if (!strncmp(buf, "ttyS", 4)) {
-+ early_serial_init(buf);
-+ early_console = &early_serial_console;
-+ } else if (!strncmp(buf, "vga", 3)
-+#ifndef CONFIG_XEN
-+ && SCREEN_INFO.orig_video_isVGA == 1) {
-+ max_xpos = SCREEN_INFO.orig_video_cols;
-+ max_ypos = SCREEN_INFO.orig_video_lines;
-+ current_ypos = SCREEN_INFO.orig_y;
-+#else
-+ || !strncmp(buf, "xen", 3)) {
-+#endif
-+ early_console = &early_vga_console;
-+ } else if (!strncmp(buf, "simnow", 6)) {
-+ simnow_init(buf + 6);
-+ early_console = &simnow_console;
-+ keep_early = 1;
-+ }
-+
-+ if (keep_early)
-+ early_console->flags &= ~CON_BOOT;
-+ else
-+ early_console->flags |= CON_BOOT;
-+ register_console(early_console);
-+ return 0;
-+}
-+early_param("earlyprintk", setup_early_printk);
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/x86_64/kernel/entry-xen.S ubuntu-gutsy-xen/arch/x86_64/kernel/entry-xen.S
---- ubuntu-gutsy/arch/x86_64/kernel/entry-xen.S 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/arch/x86_64/kernel/entry-xen.S 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,1270 @@
-+/*
-+ * linux/arch/x86_64/entry.S
-+ *
-+ * Copyright (C) 1991, 1992 Linus Torvalds
-+ * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
-+ * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
-+ * Jun Nakajima <jun.nakajima@intel.com>
-+ * Asit Mallick <asit.k.mallick@intel.com>
-+ * Modified for Xen
-+ */
-+
-+/*
-+ * entry.S contains the system-call and fault low-level handling routines.
-+ *
-+ * NOTE: This code handles signal-recognition, which happens every time
-+ * after an interrupt and after each system call.
-+ *
-+ * Normal syscalls and interrupts don't save a full stack frame, this is
-+ * only done for syscall tracing, signals or fork/exec et.al.
-+ *
-+ * A note on terminology:
-+ * - top of stack: Architecture defined interrupt frame from SS to RIP
-+ * at the top of the kernel process stack.
-+ * - partial stack frame: partially saved registers upto R11.
-+ * - full stack frame: Like partial stack frame, but all register saved.
-+ *
-+ * Some macro usage:
-+ * - CFI macros are used to generate dwarf2 unwind information for better
-+ * backtraces. They don't change any code.
-+ * - SAVE_ALL/RESTORE_ALL - Save/restore all registers
-+ * - SAVE_ARGS/RESTORE_ARGS - Save/restore registers that C functions modify.
-+ * There are unfortunately lots of special cases where some registers
-+ * not touched. The macro is a big mess that should be cleaned up.
-+ * - SAVE_REST/RESTORE_REST - Handle the registers not saved by SAVE_ARGS.
-+ * Gives a full stack frame.
-+ * - ENTRY/END Define functions in the symbol table.
-+ * - FIXUP_TOP_OF_STACK/RESTORE_TOP_OF_STACK - Fix up the hardware stack
-+ * frame that is otherwise undefined after a SYSCALL
-+ * - TRACE_IRQ_* - Trace hard interrupt state for lock debugging.
-+ * - errorentry/paranoidentry/zeroentry - Define exception entry points.
-+ */
-+
-+#include <linux/linkage.h>
-+#include <asm/segment.h>
-+#include <asm/cache.h>
-+#include <asm/errno.h>
-+#include <asm/dwarf2.h>
-+#include <asm/calling.h>
-+#include <asm/asm-offsets.h>
-+#include <asm/msr.h>
-+#include <asm/unistd.h>
-+#include <asm/thread_info.h>
-+#include <asm/hw_irq.h>
-+#include <asm/page.h>
-+#include <asm/irqflags.h>
-+#include <asm/errno.h>
-+#include <xen/interface/arch-x86_64.h>
-+#include <xen/interface/features.h>
-+
-+#include "irq_vectors.h"
-+
-+#include "xen_entry.S"
-+
-+ .code64
-+
-+#ifndef CONFIG_PREEMPT
-+#define retint_kernel retint_restore_args
-+#endif
-+
-+
-+.macro TRACE_IRQS_IRETQ offset=ARGOFFSET
-+#ifdef CONFIG_TRACE_IRQFLAGS
-+ bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
-+ jnc 1f
-+ TRACE_IRQS_ON
-+1:
-+#endif
-+.endm
-+
-+NMI_MASK = 0x80000000
-+
-+/*
-+ * C code is not supposed to know about undefined top of stack. Every time
-+ * a C function with an pt_regs argument is called from the SYSCALL based
-+ * fast path FIXUP_TOP_OF_STACK is needed.
-+ * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs
-+ * manipulation.
-+ */
-+
-+ /* %rsp:at FRAMEEND */
-+ .macro FIXUP_TOP_OF_STACK tmp
-+ movq $__USER_CS,CS(%rsp)
-+ movq $-1,RCX(%rsp)
-+ .endm
-+
-+ .macro RESTORE_TOP_OF_STACK tmp,offset=0
-+ .endm
-+
-+ .macro FAKE_STACK_FRAME child_rip
-+ /* push in order ss, rsp, eflags, cs, rip */
-+ xorl %eax, %eax
-+ pushq %rax /* ss */
-+ CFI_ADJUST_CFA_OFFSET 8
-+ /*CFI_REL_OFFSET ss,0*/
-+ pushq %rax /* rsp */
-+ CFI_ADJUST_CFA_OFFSET 8
-+ CFI_REL_OFFSET rsp,0
-+ pushq $(1<<9) /* eflags - interrupts on */
-+ CFI_ADJUST_CFA_OFFSET 8
-+ /*CFI_REL_OFFSET rflags,0*/
-+ pushq $__KERNEL_CS /* cs */
-+ CFI_ADJUST_CFA_OFFSET 8
-+ /*CFI_REL_OFFSET cs,0*/
-+ pushq \child_rip /* rip */
-+ CFI_ADJUST_CFA_OFFSET 8
-+ CFI_REL_OFFSET rip,0
-+ pushq %rax /* orig rax */
-+ CFI_ADJUST_CFA_OFFSET 8
-+ .endm
-+
-+ .macro UNFAKE_STACK_FRAME
-+ addq $8*6, %rsp
-+ CFI_ADJUST_CFA_OFFSET -(6*8)
-+ .endm
-+
-+ .macro CFI_DEFAULT_STACK start=1,adj=0
-+ .if \start
-+ CFI_STARTPROC simple
-+ CFI_SIGNAL_FRAME
-+ CFI_DEF_CFA rsp,SS+8-(\adj*ARGOFFSET)
-+ .else
-+ CFI_DEF_CFA_OFFSET SS+8-(\adj*ARGOFFSET)
-+ .endif
-+ .if \adj == 0
-+ CFI_REL_OFFSET r15,R15
-+ CFI_REL_OFFSET r14,R14
-+ CFI_REL_OFFSET r13,R13
-+ CFI_REL_OFFSET r12,R12
-+ CFI_REL_OFFSET rbp,RBP
-+ CFI_REL_OFFSET rbx,RBX
-+ .endif
-+ CFI_REL_OFFSET r11,R11
-+ CFI_REL_OFFSET r10,R10
-+ CFI_REL_OFFSET r9,R9
-+ CFI_REL_OFFSET r8,R8
-+ CFI_REL_OFFSET rax,RAX
-+ CFI_REL_OFFSET rcx,RCX
-+ CFI_REL_OFFSET rdx,RDX
-+ CFI_REL_OFFSET rsi,RSI
-+ CFI_REL_OFFSET rdi,RDI
-+ CFI_REL_OFFSET rip,RIP
-+ /*CFI_REL_OFFSET cs,CS*/
-+ /*CFI_REL_OFFSET rflags,EFLAGS*/
-+ CFI_REL_OFFSET rsp,RSP
-+ /*CFI_REL_OFFSET ss,SS*/
-+ .endm
-+
-+ /*
-+ * Must be consistent with the definition in arch-x86/xen-x86_64.h:
-+ * struct iret_context {
-+ * u64 rax, r11, rcx, flags, rip, cs, rflags, rsp, ss;
-+ * };
-+ * with rax, r11, and rcx being taken care of in the hypercall stub.
-+ */
-+ .macro HYPERVISOR_IRET flag
-+ testb $3,1*8(%rsp)
-+ jnz 2f
-+ testl $NMI_MASK,2*8(%rsp)
-+ jnz 2f
-+
-+ cmpb $0,(xen_features+XENFEAT_supervisor_mode_kernel)(%rip)
-+ jne 1f
-+
-+ /* Direct iret to kernel space. Correct CS and SS. */
-+ orl $3,1*8(%rsp)
-+ orl $3,4*8(%rsp)
-+1: iretq
-+
-+2: /* Slow iret via hypervisor. */
-+ andl $~NMI_MASK, 2*8(%rsp)
-+ pushq $\flag
-+ jmp hypercall_page + (__HYPERVISOR_iret * 32)
-+ .endm
-+
-+/*
-+ * A newly forked process directly context switches into this.
-+ */
-+/* rdi: prev */
-+ENTRY(ret_from_fork)
-+ CFI_DEFAULT_STACK
-+ push kernel_eflags(%rip)
-+ CFI_ADJUST_CFA_OFFSET 4
-+ popf # reset kernel eflags
-+ CFI_ADJUST_CFA_OFFSET -4
-+ call schedule_tail
-+ GET_THREAD_INFO(%rcx)
-+ testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%rcx)
-+ jnz rff_trace
-+rff_action:
-+ RESTORE_REST
-+ testl $3,CS-ARGOFFSET(%rsp) # from kernel_thread?
-+ je int_ret_from_sys_call
-+ testl $_TIF_IA32,threadinfo_flags(%rcx)
-+ jnz int_ret_from_sys_call
-+ RESTORE_TOP_OF_STACK %rdi,ARGOFFSET
-+ jmp ret_from_sys_call
-+rff_trace:
-+ movq %rsp,%rdi
-+ call syscall_trace_leave
-+ GET_THREAD_INFO(%rcx)
-+ jmp rff_action
-+ CFI_ENDPROC
-+END(ret_from_fork)
-+
-+/*
-+ * initial frame state for interrupts and exceptions
-+ */
-+ .macro _frame ref
-+ CFI_STARTPROC simple
-+ CFI_SIGNAL_FRAME
-+ CFI_DEF_CFA rsp,SS+8-\ref
-+ /*CFI_REL_OFFSET ss,SS-\ref*/
-+ CFI_REL_OFFSET rsp,RSP-\ref
-+ /*CFI_REL_OFFSET rflags,EFLAGS-\ref*/
-+ /*CFI_REL_OFFSET cs,CS-\ref*/
-+ CFI_REL_OFFSET rip,RIP-\ref
-+ .endm
-+
-+/*
-+ * System call entry. Upto 6 arguments in registers are supported.
-+ *
-+ * SYSCALL does not save anything on the stack and does not change the
-+ * stack pointer.
-+ */
-+
-+/*
-+ * Register setup:
-+ * rax system call number
-+ * rdi arg0
-+ * rcx return address for syscall/sysret, C arg3
-+ * rsi arg1
-+ * rdx arg2
-+ * r10 arg3 (--> moved to rcx for C)
-+ * r8 arg4
-+ * r9 arg5
-+ * r11 eflags for syscall/sysret, temporary for C
-+ * r12-r15,rbp,rbx saved by C code, not touched.
-+ *
-+ * Interrupts are off on entry.
-+ * Only called from user space.
-+ *
-+ * XXX if we had a free scratch register we could save the RSP into the stack frame
-+ * and report it properly in ps. Unfortunately we haven't.
-+ *
-+ * When user can change the frames always force IRET. That is because
-+ * it deals with uncanonical addresses better. SYSRET has trouble
-+ * with them due to bugs in both AMD and Intel CPUs.
-+ */
-+
-+ENTRY(system_call)
-+ _frame (RIP-0x10)
-+ SAVE_ARGS -8,0
-+ movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
-+ /*
-+ * No need to follow this irqs off/on section - it's straight
-+ * and short:
-+ */
-+ XEN_UNBLOCK_EVENTS(%r11)
-+ GET_THREAD_INFO(%rcx)
-+ testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%rcx)
-+ jnz tracesys
-+ cmpq $__NR_syscall_max,%rax
-+ ja badsys
-+ movq %r10,%rcx
-+ call *sys_call_table(,%rax,8) # XXX: rip relative
-+ movq %rax,RAX-ARGOFFSET(%rsp)
-+/*
-+ * Syscall return path ending with SYSRET (fast path)
-+ * Has incomplete stack frame and undefined top of stack.
-+ */
-+ret_from_sys_call:
-+ movl $_TIF_ALLWORK_MASK,%edi
-+ /* edi: flagmask */
-+sysret_check:
-+ GET_THREAD_INFO(%rcx)
-+ XEN_BLOCK_EVENTS(%rsi)
-+ TRACE_IRQS_OFF
-+ movl threadinfo_flags(%rcx),%edx
-+ andl %edi,%edx
-+ jnz sysret_careful
-+ CFI_REMEMBER_STATE
-+ /*
-+ * sysretq will re-enable interrupts:
-+ */
-+ TRACE_IRQS_ON
-+ XEN_UNBLOCK_EVENTS(%rsi)
-+ RESTORE_ARGS 0,8,0
-+ HYPERVISOR_IRET VGCF_IN_SYSCALL
-+
-+ CFI_RESTORE_STATE
-+ /* Handle reschedules */
-+ /* edx: work, edi: workmask */
-+sysret_careful:
-+ bt $TIF_NEED_RESCHED,%edx
-+ jnc sysret_signal
-+ TRACE_IRQS_ON
-+ XEN_UNBLOCK_EVENTS(%rsi)
-+ pushq %rdi
-+ CFI_ADJUST_CFA_OFFSET 8
-+ call schedule
-+ popq %rdi
-+ CFI_ADJUST_CFA_OFFSET -8
-+ jmp sysret_check
-+
-+ /* Handle a signal */
-+sysret_signal:
-+ TRACE_IRQS_ON
-+/* sti */
-+ XEN_UNBLOCK_EVENTS(%rsi)
-+ testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx
-+ jz 1f
-+
-+ /* Really a signal */
-+ /* edx: work flags (arg3) */
-+ leaq do_notify_resume(%rip),%rax
-+ leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1
-+ xorl %esi,%esi # oldset -> arg2
-+ call ptregscall_common
-+1: movl $_TIF_NEED_RESCHED,%edi
-+ /* Use IRET because user could have changed frame. This
-+ works because ptregscall_common has called FIXUP_TOP_OF_STACK. */
-+ XEN_BLOCK_EVENTS(%rsi)
-+ TRACE_IRQS_OFF
-+ jmp int_with_check
-+
-+badsys:
-+ movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
-+ jmp ret_from_sys_call
-+
-+ /* Do syscall tracing */
-+tracesys:
-+ SAVE_REST
-+ movq $-ENOSYS,RAX(%rsp)
-+ FIXUP_TOP_OF_STACK %rdi
-+ movq %rsp,%rdi
-+ call syscall_trace_enter
-+ LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */
-+ RESTORE_REST
-+ cmpq $__NR_syscall_max,%rax
-+ movq $-ENOSYS,%rcx
-+ cmova %rcx,%rax
-+ ja 1f
-+ movq %r10,%rcx /* fixup for C */
-+ call *sys_call_table(,%rax,8)
-+1: movq %rax,RAX-ARGOFFSET(%rsp)
-+ /* Use IRET because user could have changed frame */
-+
-+/*
-+ * Syscall return path ending with IRET.
-+ * Has correct top of stack, but partial stack frame.
-+ */
-+ .globl int_ret_from_sys_call
-+int_ret_from_sys_call:
-+ XEN_BLOCK_EVENTS(%rsi)
-+ TRACE_IRQS_OFF
-+ testb $3,CS-ARGOFFSET(%rsp)
-+ jnz 1f
-+ /* Need to set the proper %ss (not NULL) for ring 3 iretq */
-+ movl $__KERNEL_DS,SS-ARGOFFSET(%rsp)
-+ jmp retint_restore_args # retrun from ring3 kernel
-+1:
-+ movl $_TIF_ALLWORK_MASK,%edi
-+ /* edi: mask to check */
-+int_with_check:
-+ GET_THREAD_INFO(%rcx)
-+ movl threadinfo_flags(%rcx),%edx
-+ andl %edi,%edx
-+ jnz int_careful
-+ andl $~TS_COMPAT,threadinfo_status(%rcx)
-+ jmp retint_restore_args
-+
-+ /* Either reschedule or signal or syscall exit tracking needed. */
-+ /* First do a reschedule test. */
-+ /* edx: work, edi: workmask */
-+int_careful:
-+ bt $TIF_NEED_RESCHED,%edx
-+ jnc int_very_careful
-+ TRACE_IRQS_ON
-+/* sti */
-+ XEN_UNBLOCK_EVENTS(%rsi)
-+ pushq %rdi
-+ CFI_ADJUST_CFA_OFFSET 8
-+ call schedule
-+ popq %rdi
-+ CFI_ADJUST_CFA_OFFSET -8
-+ XEN_BLOCK_EVENTS(%rsi)
-+ TRACE_IRQS_OFF
-+ jmp int_with_check
-+
-+ /* handle signals and tracing -- both require a full stack frame */
-+int_very_careful:
-+ TRACE_IRQS_ON
-+/* sti */
-+ XEN_UNBLOCK_EVENTS(%rsi)
-+ SAVE_REST
-+ /* Check for syscall exit trace */
-+ testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edx
-+ jz int_signal
-+ pushq %rdi
-+ CFI_ADJUST_CFA_OFFSET 8
-+ leaq 8(%rsp),%rdi # &ptregs -> arg1
-+ call syscall_trace_leave
-+ popq %rdi
-+ CFI_ADJUST_CFA_OFFSET -8
-+ andl $~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edi
-+ jmp int_restore_rest
-+
-+int_signal:
-+ testl $(_TIF_NOTIFY_RESUME|_TIF_SIGPENDING|_TIF_SINGLESTEP),%edx
-+ jz 1f
-+ movq %rsp,%rdi # &ptregs -> arg1
-+ xorl %esi,%esi # oldset -> arg2
-+ call do_notify_resume
-+1: movl $_TIF_NEED_RESCHED,%edi
-+int_restore_rest:
-+ RESTORE_REST
-+ XEN_BLOCK_EVENTS(%rsi)
-+ TRACE_IRQS_OFF
-+ jmp int_with_check
-+ CFI_ENDPROC
-+END(system_call)
-+
-+/*
-+ * Certain special system calls that need to save a complete full stack frame.
-+ */
-+
-+ .macro PTREGSCALL label,func,arg
-+ .globl \label
-+\label:
-+ leaq \func(%rip),%rax
-+ leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */
-+ jmp ptregscall_common
-+END(\label)
-+ .endm
-+
-+ CFI_STARTPROC
-+
-+ PTREGSCALL stub_clone, sys_clone, %r8
-+ PTREGSCALL stub_fork, sys_fork, %rdi
-+ PTREGSCALL stub_vfork, sys_vfork, %rdi
-+ PTREGSCALL stub_rt_sigsuspend, sys_rt_sigsuspend, %rdx
-+ PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx
-+ PTREGSCALL stub_iopl, sys_iopl, %rsi
-+
-+ENTRY(ptregscall_common)
-+ popq %r11
-+ CFI_ADJUST_CFA_OFFSET -8
-+ CFI_REGISTER rip, r11
-+ SAVE_REST
-+ movq %r11, %r15
-+ CFI_REGISTER rip, r15
-+ FIXUP_TOP_OF_STACK %r11
-+ call *%rax
-+ RESTORE_TOP_OF_STACK %r11
-+ movq %r15, %r11
-+ CFI_REGISTER rip, r11
-+ RESTORE_REST
-+ pushq %r11
-+ CFI_ADJUST_CFA_OFFSET 8
-+ CFI_REL_OFFSET rip, 0
-+ ret
-+ CFI_ENDPROC
-+END(ptregscall_common)
-+
-+ENTRY(stub_execve)
-+ CFI_STARTPROC
-+ popq %r11
-+ CFI_ADJUST_CFA_OFFSET -8
-+ CFI_REGISTER rip, r11
-+ SAVE_REST
-+ FIXUP_TOP_OF_STACK %r11
-+ call sys_execve
-+ RESTORE_TOP_OF_STACK %r11
-+ movq %rax,RAX(%rsp)
-+ RESTORE_REST
-+ jmp int_ret_from_sys_call
-+ CFI_ENDPROC
-+END(stub_execve)
-+
-+/*
-+ * sigreturn is special because it needs to restore all registers on return.
-+ * This cannot be done with SYSRET, so use the IRET return path instead.
-+ */
-+ENTRY(stub_rt_sigreturn)
-+ CFI_STARTPROC
-+ addq $8, %rsp
-+ CFI_ADJUST_CFA_OFFSET -8
-+ SAVE_REST
-+ movq %rsp,%rdi
-+ FIXUP_TOP_OF_STACK %r11
-+ call sys_rt_sigreturn
-+ movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
-+ RESTORE_REST
-+ jmp int_ret_from_sys_call
-+ CFI_ENDPROC
-+END(stub_rt_sigreturn)
-+
-+/* initial frame state for interrupts (and exceptions without error code) */
-+#define INTR_FRAME _frame (RIP-0x10); \
-+ CFI_REL_OFFSET rcx,0; \
-+ CFI_REL_OFFSET r11,8
-+
-+/* initial frame state for exceptions with error code (and interrupts with
-+ vector already pushed) */
-+#define XCPT_FRAME _frame (RIP-0x18); \
-+ CFI_REL_OFFSET rcx,0; \
-+ CFI_REL_OFFSET r11,8
-+
-+/*
-+ * Interrupt exit.
-+ *
-+ */
-+
-+retint_check:
-+ CFI_DEFAULT_STACK adj=1
-+ movl threadinfo_flags(%rcx),%edx
-+ andl %edi,%edx
-+ CFI_REMEMBER_STATE
-+ jnz retint_careful
-+retint_restore_args:
-+ movl EFLAGS-REST_SKIP(%rsp), %eax
-+ shr $9, %eax # EAX[0] == IRET_EFLAGS.IF
-+ XEN_GET_VCPU_INFO(%rsi)
-+ andb evtchn_upcall_mask(%rsi),%al
-+ andb $1,%al # EAX[0] == IRET_EFLAGS.IF & event_mask
-+ jnz restore_all_enable_events # != 0 => enable event delivery
-+ XEN_PUT_VCPU_INFO(%rsi)
-+
-+ RESTORE_ARGS 0,8,0
-+ HYPERVISOR_IRET 0
-+
-+ /* edi: workmask, edx: work */
-+retint_careful:
-+ CFI_RESTORE_STATE
-+ bt $TIF_NEED_RESCHED,%edx
-+ jnc retint_signal
-+ TRACE_IRQS_ON
-+ XEN_UNBLOCK_EVENTS(%rsi)
-+/* sti */
-+ pushq %rdi
-+ CFI_ADJUST_CFA_OFFSET 8
-+ call schedule
-+ popq %rdi
-+ CFI_ADJUST_CFA_OFFSET -8
-+ GET_THREAD_INFO(%rcx)
-+ XEN_BLOCK_EVENTS(%rsi)
-+/* cli */
-+ TRACE_IRQS_OFF
-+ jmp retint_check
-+
-+retint_signal:
-+ testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx
-+ jz retint_restore_args
-+ TRACE_IRQS_ON
-+ XEN_UNBLOCK_EVENTS(%rsi)
-+ SAVE_REST
-+ movq $-1,ORIG_RAX(%rsp)
-+ xorl %esi,%esi # oldset
-+ movq %rsp,%rdi # &pt_regs
-+ call do_notify_resume
-+ RESTORE_REST
-+ XEN_BLOCK_EVENTS(%rsi)
-+ TRACE_IRQS_OFF
-+ movl $_TIF_NEED_RESCHED,%edi
-+ GET_THREAD_INFO(%rcx)
-+ jmp retint_check
-+
-+#ifdef CONFIG_PREEMPT
-+ /* Returning to kernel space. Check if we need preemption */
-+ /* rcx: threadinfo. interrupts off. */
-+ENTRY(retint_kernel)
-+ cmpl $0,threadinfo_preempt_count(%rcx)
-+ jnz retint_restore_args
-+ bt $TIF_NEED_RESCHED,threadinfo_flags(%rcx)
-+ jnc retint_restore_args
-+ bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */
-+ jnc retint_restore_args
-+ call preempt_schedule_irq
-+ jmp retint_kernel /* check again */
-+#endif
-+
-+ CFI_ENDPROC
-+END(retint_check)
-+
-+#ifndef CONFIG_XEN
-+/*
-+ * APIC interrupts.
-+ */
-+ .macro apicinterrupt num,func
-+ INTR_FRAME
-+ pushq $~(\num)
-+ CFI_ADJUST_CFA_OFFSET 8
-+ interrupt \func
-+ jmp error_entry
-+ CFI_ENDPROC
-+ .endm
-+
-+ENTRY(thermal_interrupt)
-+ apicinterrupt THERMAL_APIC_VECTOR,smp_thermal_interrupt
-+END(thermal_interrupt)
-+
-+ENTRY(threshold_interrupt)
-+ apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt
-+END(threshold_interrupt)
-+
-+#ifdef CONFIG_SMP
-+ENTRY(reschedule_interrupt)
-+ apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt
-+END(reschedule_interrupt)
-+
-+ .macro INVALIDATE_ENTRY num
-+ENTRY(invalidate_interrupt\num)
-+ apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt
-+END(invalidate_interrupt\num)
-+ .endm
-+
-+ INVALIDATE_ENTRY 0
-+ INVALIDATE_ENTRY 1
-+ INVALIDATE_ENTRY 2
-+ INVALIDATE_ENTRY 3
-+ INVALIDATE_ENTRY 4
-+ INVALIDATE_ENTRY 5
-+ INVALIDATE_ENTRY 6
-+ INVALIDATE_ENTRY 7
-+
-+ENTRY(call_function_interrupt)
-+ apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt
-+END(call_function_interrupt)
-+ENTRY(irq_move_cleanup_interrupt)
-+ apicinterrupt IRQ_MOVE_CLEANUP_VECTOR,smp_irq_move_cleanup_interrupt
-+END(irq_move_cleanup_interrupt)
-+#endif
-+
-+ENTRY(apic_timer_interrupt)
-+ apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt
-+END(apic_timer_interrupt)
-+
-+ENTRY(error_interrupt)
-+ apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt
-+END(error_interrupt)
-+
-+ENTRY(spurious_interrupt)
-+ apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt
-+END(spurious_interrupt)
-+#endif /* !CONFIG_XEN */
-+
-+/*
-+ * Exception entry points.
-+ */
-+ .macro zeroentry sym
-+ INTR_FRAME
-+ movq (%rsp),%rcx
-+ CFI_RESTORE rcx
-+ movq 8(%rsp),%r11
-+ CFI_RESTORE r11
-+ addq $0x10,%rsp /* skip rcx and r11 */
-+ CFI_ADJUST_CFA_OFFSET -0x10
-+ pushq $0 /* push error code/oldrax */
-+ CFI_ADJUST_CFA_OFFSET 8
-+ pushq %rax /* push real oldrax to the rdi slot */
-+ CFI_ADJUST_CFA_OFFSET 8
-+ CFI_REL_OFFSET rax,0
-+ leaq \sym(%rip),%rax
-+ jmp error_entry
-+ CFI_ENDPROC
-+ .endm
-+
-+ .macro errorentry sym
-+ XCPT_FRAME
-+ movq (%rsp),%rcx
-+ CFI_RESTORE rcx
-+ movq 8(%rsp),%r11
-+ CFI_RESTORE r11
-+ addq $0x10,%rsp /* rsp points to the error code */
-+ CFI_ADJUST_CFA_OFFSET -0x10
-+ pushq %rax
-+ CFI_ADJUST_CFA_OFFSET 8
-+ CFI_REL_OFFSET rax,0
-+ leaq \sym(%rip),%rax
-+ jmp error_entry
-+ CFI_ENDPROC
-+ .endm
-+
-+#if 0 /* not XEN */
-+ /* error code is on the stack already */
-+ /* handle NMI like exceptions that can happen everywhere */
-+ .macro paranoidentry sym, ist=0, irqtrace=1
-+ movq (%rsp),%rcx
-+ movq 8(%rsp),%r11
-+ addq $0x10,%rsp /* skip rcx and r11 */
-+ SAVE_ALL
-+ cld
-+#if 0 /* not XEN */
-+ movl $1,%ebx
-+ movl $MSR_GS_BASE,%ecx
-+ rdmsr
-+ testl %edx,%edx
-+ js 1f
-+ swapgs
-+ xorl %ebx,%ebx
-+1:
-+#endif
-+ .if \ist
-+ movq %gs:pda_data_offset, %rbp
-+ .endif
-+ movq %rsp,%rdi
-+ movq ORIG_RAX(%rsp),%rsi
-+ movq $-1,ORIG_RAX(%rsp)
-+ .if \ist
-+ subq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
-+ .endif
-+ call \sym
-+ .if \ist
-+ addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
-+ .endif
-+/* cli */
-+ XEN_BLOCK_EVENTS(%rsi)
-+ .if \irqtrace
-+ TRACE_IRQS_OFF
-+ .endif
-+ .endm
-+
-+ /*
-+ * "Paranoid" exit path from exception stack.
-+ * Paranoid because this is used by NMIs and cannot take
-+ * any kernel state for granted.
-+ * We don't do kernel preemption checks here, because only
-+ * NMI should be common and it does not enable IRQs and
-+ * cannot get reschedule ticks.
-+ *
-+ * "trace" is 0 for the NMI handler only, because irq-tracing
-+ * is fundamentally NMI-unsafe. (we cannot change the soft and
-+ * hard flags at once, atomically)
-+ */
-+ .macro paranoidexit trace=1
-+ /* ebx: no swapgs flag */
-+paranoid_exit\trace:
-+ testl %ebx,%ebx /* swapgs needed? */
-+ jnz paranoid_restore\trace
-+ testl $3,CS(%rsp)
-+ jnz paranoid_userspace\trace
-+paranoid_swapgs\trace:
-+ .if \trace
-+ TRACE_IRQS_IRETQ 0
-+ .endif
-+ swapgs
-+paranoid_restore\trace:
-+ RESTORE_ALL 8
-+ iretq
-+paranoid_userspace\trace:
-+ GET_THREAD_INFO(%rcx)
-+ movl threadinfo_flags(%rcx),%ebx
-+ andl $_TIF_WORK_MASK,%ebx
-+ jz paranoid_swapgs\trace
-+ movq %rsp,%rdi /* &pt_regs */
-+ call sync_regs
-+ movq %rax,%rsp /* switch stack for scheduling */
-+ testl $_TIF_NEED_RESCHED,%ebx
-+ jnz paranoid_schedule\trace
-+ movl %ebx,%edx /* arg3: thread flags */
-+ .if \trace
-+ TRACE_IRQS_ON
-+ .endif
-+ sti
-+ xorl %esi,%esi /* arg2: oldset */
-+ movq %rsp,%rdi /* arg1: &pt_regs */
-+ call do_notify_resume
-+ cli
-+ .if \trace
-+ TRACE_IRQS_OFF
-+ .endif
-+ jmp paranoid_userspace\trace
-+paranoid_schedule\trace:
-+ .if \trace
-+ TRACE_IRQS_ON
-+ .endif
-+ sti
-+ call schedule
-+ cli
-+ .if \trace
-+ TRACE_IRQS_OFF
-+ .endif
-+ jmp paranoid_userspace\trace
-+ CFI_ENDPROC
-+ .endm
-+#endif
-+
-+/*
-+ * Exception entry point. This expects an error code/orig_rax on the stack
-+ * and the exception handler in %rax.
-+ */
-+KPROBE_ENTRY(error_entry)
-+ _frame RDI
-+ CFI_REL_OFFSET rax,0
-+ /* rdi slot contains rax, oldrax contains error code */
-+ cld
-+ subq $14*8,%rsp
-+ CFI_ADJUST_CFA_OFFSET (14*8)
-+ movq %rsi,13*8(%rsp)
-+ CFI_REL_OFFSET rsi,RSI
-+ movq 14*8(%rsp),%rsi /* load rax from rdi slot */
-+ CFI_REGISTER rax,rsi
-+ movq %rdx,12*8(%rsp)
-+ CFI_REL_OFFSET rdx,RDX
-+ movq %rcx,11*8(%rsp)
-+ CFI_REL_OFFSET rcx,RCX
-+ movq %rsi,10*8(%rsp) /* store rax */
-+ CFI_REL_OFFSET rax,RAX
-+ movq %r8, 9*8(%rsp)
-+ CFI_REL_OFFSET r8,R8
-+ movq %r9, 8*8(%rsp)
-+ CFI_REL_OFFSET r9,R9
-+ movq %r10,7*8(%rsp)
-+ CFI_REL_OFFSET r10,R10
-+ movq %r11,6*8(%rsp)
-+ CFI_REL_OFFSET r11,R11
-+ movq %rbx,5*8(%rsp)
-+ CFI_REL_OFFSET rbx,RBX
-+ movq %rbp,4*8(%rsp)
-+ CFI_REL_OFFSET rbp,RBP
-+ movq %r12,3*8(%rsp)
-+ CFI_REL_OFFSET r12,R12
-+ movq %r13,2*8(%rsp)
-+ CFI_REL_OFFSET r13,R13
-+ movq %r14,1*8(%rsp)
-+ CFI_REL_OFFSET r14,R14
-+ movq %r15,(%rsp)
-+ CFI_REL_OFFSET r15,R15
-+#if 0
-+ cmpl $__KERNEL_CS,CS(%rsp)
-+ CFI_REMEMBER_STATE
-+ je error_kernelspace
-+#endif
-+error_call_handler:
-+ movq %rdi, RDI(%rsp)
-+ CFI_REL_OFFSET rdi,RDI
-+ movq %rsp,%rdi
-+ movq ORIG_RAX(%rsp),%rsi # get error code
-+ movq $-1,ORIG_RAX(%rsp)
-+ call *%rax
-+error_exit:
-+ RESTORE_REST
-+/* cli */
-+ XEN_BLOCK_EVENTS(%rsi)
-+ TRACE_IRQS_OFF
-+ GET_THREAD_INFO(%rcx)
-+ testb $3,CS-ARGOFFSET(%rsp)
-+ jz retint_kernel
-+ movl threadinfo_flags(%rcx),%edx
-+ movl $_TIF_WORK_MASK,%edi
-+ andl %edi,%edx
-+ jnz retint_careful
-+ /*
-+ * The iret might restore flags:
-+ */
-+ TRACE_IRQS_IRETQ
-+ jmp retint_restore_args
-+
-+#if 0
-+ /*
-+ * We need to re-write the logic here because we don't do iretq to
-+ * to return to user mode. It's still possible that we get trap/fault
-+ * in the kernel (when accessing buffers pointed to by system calls,
-+ * for example).
-+ *
-+ */
-+ CFI_RESTORE_STATE
-+error_kernelspace:
-+ incl %ebx
-+ /* There are two places in the kernel that can potentially fault with
-+ usergs. Handle them here. The exception handlers after
-+ iret run with kernel gs again, so don't set the user space flag.
-+ B stepping K8s sometimes report an truncated RIP for IRET
-+ exceptions returning to compat mode. Check for these here too. */
-+ leaq iret_label(%rip),%rbp
-+ cmpq %rbp,RIP(%rsp)
-+ je error_swapgs
-+ movl %ebp,%ebp /* zero extend */
-+ cmpq %rbp,RIP(%rsp)
-+ je error_swapgs
-+ cmpq $gs_change,RIP(%rsp)
-+ je error_swapgs
-+ jmp error_sti
-+#endif
-+ CFI_ENDPROC
-+KPROBE_END(error_entry)
-+
-+ENTRY(hypervisor_callback)
-+ zeroentry do_hypervisor_callback
-+END(hypervisor_callback)
-+
-+/*
-+ * Copied from arch/xen/i386/kernel/entry.S
-+ */
-+# A note on the "critical region" in our callback handler.
-+# We want to avoid stacking callback handlers due to events occurring
-+# during handling of the last event. To do this, we keep events disabled
-+# until we've done all processing. HOWEVER, we must enable events before
-+# popping the stack frame (can't be done atomically) and so it would still
-+# be possible to get enough handler activations to overflow the stack.
-+# Although unlikely, bugs of that kind are hard to track down, so we'd
-+# like to avoid the possibility.
-+# So, on entry to the handler we detect whether we interrupted an
-+# existing activation in its critical region -- if so, we pop the current
-+# activation and restart the handler using the previous one.
-+ENTRY(do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
-+ CFI_STARTPROC
-+# Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
-+# see the correct pointer to the pt_regs
-+ movq %rdi, %rsp # we don't return, adjust the stack frame
-+ CFI_ENDPROC
-+ CFI_DEFAULT_STACK
-+11: incl %gs:pda_irqcount
-+ movq %rsp,%rbp
-+ CFI_DEF_CFA_REGISTER rbp
-+ cmovzq %gs:pda_irqstackptr,%rsp
-+ pushq %rbp # backlink for old unwinder
-+ call evtchn_do_upcall
-+ popq %rsp
-+ CFI_DEF_CFA_REGISTER rsp
-+ decl %gs:pda_irqcount
-+ jmp error_exit
-+ CFI_ENDPROC
-+END(do_hypervisor_callback)
-+
-+ ALIGN
-+restore_all_enable_events:
-+ CFI_DEFAULT_STACK adj=1
-+ TRACE_IRQS_ON
-+ XEN_UNBLOCK_EVENTS(%rsi) # %rsi is already set up...
-+
-+scrit: /**** START OF CRITICAL REGION ****/
-+ XEN_TEST_PENDING(%rsi)
-+ CFI_REMEMBER_STATE
-+ jnz 14f # process more events if necessary...
-+ XEN_PUT_VCPU_INFO(%rsi)
-+ RESTORE_ARGS 0,8,0
-+ HYPERVISOR_IRET 0
-+
-+ CFI_RESTORE_STATE
-+14: XEN_LOCKED_BLOCK_EVENTS(%rsi)
-+ XEN_PUT_VCPU_INFO(%rsi)
-+ SAVE_REST
-+ movq %rsp,%rdi # set the argument again
-+ jmp 11b
-+ CFI_ENDPROC
-+ecrit: /**** END OF CRITICAL REGION ****/
-+# At this point, unlike on x86-32, we don't do the fixup to simplify the
-+# code and the stack frame is more complex on x86-64.
-+# When the kernel is interrupted in the critical section, the kernel
-+# will do IRET in that case, and everything will be restored at that point,
-+# i.e. it just resumes from the next instruction interrupted with the same context.
-+
-+# Hypervisor uses this for application faults while it executes.
-+# We get here for two reasons:
-+# 1. Fault while reloading DS, ES, FS or GS
-+# 2. Fault while executing IRET
-+# Category 1 we do not need to fix up as Xen has already reloaded all segment
-+# registers that could be reloaded and zeroed the others.
-+# Category 2 we fix up by killing the current process. We cannot use the
-+# normal Linux return path in this case because if we use the IRET hypercall
-+# to pop the stack frame we end up in an infinite loop of failsafe callbacks.
-+# We distinguish between categories by comparing each saved segment register
-+# with its current contents: any discrepancy means we in category 1.
-+ENTRY(failsafe_callback)
-+ _frame (RIP-0x30)
-+ CFI_REL_OFFSET rcx, 0
-+ CFI_REL_OFFSET r11, 8
-+ movw %ds,%cx
-+ cmpw %cx,0x10(%rsp)
-+ CFI_REMEMBER_STATE
-+ jne 1f
-+ movw %es,%cx
-+ cmpw %cx,0x18(%rsp)
-+ jne 1f
-+ movw %fs,%cx
-+ cmpw %cx,0x20(%rsp)
-+ jne 1f
-+ movw %gs,%cx
-+ cmpw %cx,0x28(%rsp)
-+ jne 1f
-+ /* All segments match their saved values => Category 2 (Bad IRET). */
-+ movq (%rsp),%rcx
-+ CFI_RESTORE rcx
-+ movq 8(%rsp),%r11
-+ CFI_RESTORE r11
-+ addq $0x30,%rsp
-+ CFI_ADJUST_CFA_OFFSET -0x30
-+ movq $11,%rdi /* SIGSEGV */
-+ jmp do_exit
-+ CFI_RESTORE_STATE
-+1: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */
-+ movq (%rsp),%rcx
-+ CFI_RESTORE rcx
-+ movq 8(%rsp),%r11
-+ CFI_RESTORE r11
-+ addq $0x30,%rsp
-+ CFI_ADJUST_CFA_OFFSET -0x30
-+ pushq $0
-+ CFI_ADJUST_CFA_OFFSET 8
-+ SAVE_ALL
-+ jmp error_exit
-+ CFI_ENDPROC
-+#if 0
-+ .section __ex_table,"a"
-+ .align 8
-+ .quad gs_change,bad_gs
-+ .previous
-+ .section .fixup,"ax"
-+ /* running with kernelgs */
-+bad_gs:
-+/* swapgs */ /* switch back to user gs */
-+ xorl %eax,%eax
-+ movl %eax,%gs
-+ jmp 2b
-+ .previous
-+#endif
-+
-+/*
-+ * Create a kernel thread.
-+ *
-+ * C extern interface:
-+ * extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
-+ *
-+ * asm input arguments:
-+ * rdi: fn, rsi: arg, rdx: flags
-+ */
-+ENTRY(kernel_thread)
-+ CFI_STARTPROC
-+ FAKE_STACK_FRAME $child_rip
-+ SAVE_ALL
-+
-+ # rdi: flags, rsi: usp, rdx: will be &pt_regs
-+ movq %rdx,%rdi
-+ orq kernel_thread_flags(%rip),%rdi
-+ movq $-1, %rsi
-+ movq %rsp, %rdx
-+
-+ xorl %r8d,%r8d
-+ xorl %r9d,%r9d
-+
-+ # clone now
-+ call do_fork
-+ movq %rax,RAX(%rsp)
-+ xorl %edi,%edi
-+
-+ /*
-+ * It isn't worth to check for reschedule here,
-+ * so internally to the x86_64 port you can rely on kernel_thread()
-+ * not to reschedule the child before returning, this avoids the need
-+ * of hacks for example to fork off the per-CPU idle tasks.
-+ * [Hopefully no generic code relies on the reschedule -AK]
-+ */
-+ RESTORE_ALL
-+ UNFAKE_STACK_FRAME
-+ ret
-+ CFI_ENDPROC
-+ENDPROC(kernel_thread)
-+
-+child_rip:
-+ pushq $0 # fake return address
-+ CFI_STARTPROC
-+ /*
-+ * Here we are in the child and the registers are set as they were
-+ * at kernel_thread() invocation in the parent.
-+ */
-+ movq %rdi, %rax
-+ movq %rsi, %rdi
-+ call *%rax
-+ # exit
-+ xorl %edi, %edi
-+ call do_exit
-+ CFI_ENDPROC
-+ENDPROC(child_rip)
-+
-+/*
-+ * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
-+ *
-+ * C extern interface:
-+ * extern long execve(char *name, char **argv, char **envp)
-+ *
-+ * asm input arguments:
-+ * rdi: name, rsi: argv, rdx: envp
-+ *
-+ * We want to fallback into:
-+ * extern long sys_execve(char *name, char **argv,char **envp, struct pt_regs regs)
-+ *
-+ * do_sys_execve asm fallback arguments:
-+ * rdi: name, rsi: argv, rdx: envp, fake frame on the stack
-+ */
-+ENTRY(kernel_execve)
-+ CFI_STARTPROC
-+ FAKE_STACK_FRAME $0
-+ SAVE_ALL
-+ call sys_execve
-+ movq %rax, RAX(%rsp)
-+ RESTORE_REST
-+ testq %rax,%rax
-+ jne 1f
-+ jmp int_ret_from_sys_call
-+1: RESTORE_ARGS
-+ UNFAKE_STACK_FRAME
-+ ret
-+ CFI_ENDPROC
-+ENDPROC(kernel_execve)
-+
-+KPROBE_ENTRY(page_fault)
-+ errorentry do_page_fault
-+KPROBE_END(page_fault)
-+
-+ENTRY(coprocessor_error)
-+ zeroentry do_coprocessor_error
-+END(coprocessor_error)
-+
-+ENTRY(simd_coprocessor_error)
-+ zeroentry do_simd_coprocessor_error
-+END(simd_coprocessor_error)
-+
-+ENTRY(device_not_available)
-+ zeroentry math_state_restore
-+END(device_not_available)
-+
-+ /* runs on exception stack */
-+KPROBE_ENTRY(debug)
-+/* INTR_FRAME
-+ pushq $0
-+ CFI_ADJUST_CFA_OFFSET 8 */
-+ zeroentry do_debug
-+/* paranoidexit
-+ CFI_ENDPROC */
-+KPROBE_END(debug)
-+
-+KPROBE_ENTRY(nmi)
-+ zeroentry do_nmi_callback
-+KPROBE_END(nmi)
-+do_nmi_callback:
-+ CFI_STARTPROC
-+ addq $8, %rsp
-+ CFI_ENDPROC
-+ CFI_DEFAULT_STACK
-+ call do_nmi
-+ orl $NMI_MASK,EFLAGS(%rsp)
-+ RESTORE_REST
-+ XEN_BLOCK_EVENTS(%rsi)
-+ TRACE_IRQS_OFF
-+ GET_THREAD_INFO(%rcx)
-+ jmp retint_restore_args
-+ CFI_ENDPROC
-+END(do_nmi_callback)
-+
-+KPROBE_ENTRY(int3)
-+/* INTR_FRAME
-+ pushq $0
-+ CFI_ADJUST_CFA_OFFSET 8 */
-+ zeroentry do_int3
-+/* jmp paranoid_exit1
-+ CFI_ENDPROC */
-+KPROBE_END(int3)
-+
-+ENTRY(overflow)
-+ zeroentry do_overflow
-+END(overflow)
-+
-+ENTRY(bounds)
-+ zeroentry do_bounds
-+END(bounds)
-+
-+ENTRY(invalid_op)
-+ zeroentry do_invalid_op
-+END(invalid_op)
-+
-+ENTRY(coprocessor_segment_overrun)
-+ zeroentry do_coprocessor_segment_overrun
-+END(coprocessor_segment_overrun)
-+
-+ENTRY(reserved)
-+ zeroentry do_reserved
-+END(reserved)
-+
-+#if 0
-+ /* runs on exception stack */
-+ENTRY(double_fault)
-+ XCPT_FRAME
-+ paranoidentry do_double_fault
-+ jmp paranoid_exit1
-+ CFI_ENDPROC
-+END(double_fault)
-+#endif
-+
-+ENTRY(invalid_TSS)
-+ errorentry do_invalid_TSS
-+END(invalid_TSS)
-+
-+ENTRY(segment_not_present)
-+ errorentry do_segment_not_present
-+END(segment_not_present)
-+
-+ /* runs on exception stack */
-+ENTRY(stack_segment)
-+/* XCPT_FRAME
-+ paranoidentry do_stack_segment */
-+ errorentry do_stack_segment
-+/* jmp paranoid_exit1
-+ CFI_ENDPROC */
-+END(stack_segment)
-+
-+KPROBE_ENTRY(general_protection)
-+ errorentry do_general_protection
-+KPROBE_END(general_protection)
-+
-+ENTRY(alignment_check)
-+ errorentry do_alignment_check
-+END(alignment_check)
-+
-+ENTRY(divide_error)
-+ zeroentry do_divide_error
-+END(divide_error)
-+
-+ENTRY(spurious_interrupt_bug)
-+ zeroentry do_spurious_interrupt_bug
-+END(spurious_interrupt_bug)
-+
-+#ifdef CONFIG_X86_MCE
-+ /* runs on exception stack */
-+ENTRY(machine_check)
-+ INTR_FRAME
-+ pushq $0
-+ CFI_ADJUST_CFA_OFFSET 8
-+ paranoidentry do_machine_check
-+ jmp paranoid_exit1
-+ CFI_ENDPROC
-+END(machine_check)
-+#endif
-+
-+/* Call softirq on interrupt stack. Interrupts are off. */
-+ENTRY(call_softirq)
-+ CFI_STARTPROC
-+ push %rbp
-+ CFI_ADJUST_CFA_OFFSET 8
-+ CFI_REL_OFFSET rbp,0
-+ mov %rsp,%rbp
-+ CFI_DEF_CFA_REGISTER rbp
-+ incl %gs:pda_irqcount
-+ cmove %gs:pda_irqstackptr,%rsp
-+ push %rbp # backlink for old unwinder
-+ call __do_softirq
-+ leaveq
-+ CFI_DEF_CFA_REGISTER rsp
-+ CFI_ADJUST_CFA_OFFSET -8
-+ decl %gs:pda_irqcount
-+ ret
-+ CFI_ENDPROC
-+ENDPROC(call_softirq)
-+
-+KPROBE_ENTRY(ignore_sysret)
-+ CFI_STARTPROC
-+ mov $-ENOSYS,%eax
-+ sysret
-+ CFI_ENDPROC
-+ENDPROC(ignore_sysret)
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/x86_64/kernel/genapic_xen.c ubuntu-gutsy-xen/arch/x86_64/kernel/genapic_xen.c
---- ubuntu-gutsy/arch/x86_64/kernel/genapic_xen.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/arch/x86_64/kernel/genapic_xen.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,166 @@
-+/*
-+ * Copyright 2004 James Cleverdon, IBM.
-+ * Subject to the GNU Public License, v.2
-+ *
-+ * Xen APIC subarch code. Maximum 8 CPUs, logical delivery.
-+ *
-+ * Hacked for x86-64 by James Cleverdon from i386 architecture code by
-+ * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
-+ * James Cleverdon.
-+ *
-+ * Hacked to pieces for Xen by Chris Wright.
-+ */
-+#include <linux/threads.h>
-+#include <linux/cpumask.h>
-+#include <linux/string.h>
-+#include <linux/kernel.h>
-+#include <linux/ctype.h>
-+#include <linux/init.h>
-+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
-+#include <asm/smp.h>
-+#else
-+#include <asm/apic.h>
-+#endif
-+#include <asm/genapic.h>
-+#include <xen/evtchn.h>
-+
-+DECLARE_PER_CPU(int, ipi_to_irq[NR_IPIS]);
-+
-+static inline void __send_IPI_one(unsigned int cpu, int vector)
-+{
-+ int irq = per_cpu(ipi_to_irq, cpu)[vector];
-+ BUG_ON(irq < 0);
-+ notify_remote_via_irq(irq);
-+}
-+
-+void xen_send_IPI_shortcut(unsigned int shortcut, int vector)
-+{
-+ int cpu;
-+
-+ switch (shortcut) {
-+ case APIC_DEST_SELF:
-+ __send_IPI_one(smp_processor_id(), vector);
-+ break;
-+ case APIC_DEST_ALLBUT:
-+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
-+ if (cpu == smp_processor_id())
-+ continue;
-+ if (cpu_isset(cpu, cpu_online_map)) {
-+ __send_IPI_one(cpu, vector);
-+ }
-+ }
-+ break;
-+ case APIC_DEST_ALLINC:
-+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
-+ if (cpu_isset(cpu, cpu_online_map)) {
-+ __send_IPI_one(cpu, vector);
-+ }
-+ }
-+ break;
-+ default:
-+ printk("XXXXXX __send_IPI_shortcut %08x vector %d\n", shortcut,
-+ vector);
-+ break;
-+ }
-+}
-+
-+static cpumask_t xen_target_cpus(void)
-+{
-+ return cpu_online_map;
-+}
-+
-+static cpumask_t xen_vector_allocation_domain(int cpu)
-+{
-+ cpumask_t domain = CPU_MASK_NONE;
-+ cpu_set(cpu, domain);
-+ return domain;
-+}
-+
-+/*
-+ * Set up the logical destination ID.
-+ * Do nothing, not called now.
-+ */
-+static void xen_init_apic_ldr(void)
-+{
-+ Dprintk("%s\n", __FUNCTION__);
-+ return;
-+}
-+
-+static void xen_send_IPI_allbutself(int vector)
-+{
-+ /*
-+ * if there are no other CPUs in the system then
-+ * we get an APIC send error if we try to broadcast.
-+ * thus we have to avoid sending IPIs in this case.
-+ */
-+ Dprintk("%s\n", __FUNCTION__);
-+ if (num_online_cpus() > 1)
-+ xen_send_IPI_shortcut(APIC_DEST_ALLBUT, vector);
-+}
-+
-+static void xen_send_IPI_all(int vector)
-+{
-+ Dprintk("%s\n", __FUNCTION__);
-+ xen_send_IPI_shortcut(APIC_DEST_ALLINC, vector);
-+}
-+
-+static void xen_send_IPI_mask(cpumask_t cpumask, int vector)
-+{
-+ unsigned long mask = cpus_addr(cpumask)[0];
-+ unsigned int cpu;
-+ unsigned long flags;
-+
-+ Dprintk("%s\n", __FUNCTION__);
-+ local_irq_save(flags);
-+ WARN_ON(mask & ~cpus_addr(cpu_online_map)[0]);
-+
-+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
-+ if (cpu_isset(cpu, cpumask)) {
-+ __send_IPI_one(cpu, vector);
-+ }
-+ }
-+ local_irq_restore(flags);
-+}
-+
-+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
-+static int xen_apic_id_registered(void)
-+{
-+ /* better be set */
-+ Dprintk("%s\n", __FUNCTION__);
-+ return physid_isset(smp_processor_id(), phys_cpu_present_map);
-+}
-+#endif
-+
-+static unsigned int xen_cpu_mask_to_apicid(cpumask_t cpumask)
-+{
-+ Dprintk("%s\n", __FUNCTION__);
-+ return cpus_addr(cpumask)[0] & APIC_ALL_CPUS;
-+}
-+
-+static unsigned int phys_pkg_id(int index_msb)
-+{
-+ u32 ebx;
-+
-+ Dprintk("%s\n", __FUNCTION__);
-+ ebx = cpuid_ebx(1);
-+ return ((ebx >> 24) & 0xFF) >> index_msb;
-+}
-+
-+struct genapic apic_xen = {
-+ .name = "xen",
-+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
-+ .int_delivery_mode = dest_LowestPrio,
-+#endif
-+ .int_dest_mode = 1,
-+ .target_cpus = xen_target_cpus,
-+ .vector_allocation_domain = xen_vector_allocation_domain,
-+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
-+ .apic_id_registered = xen_apic_id_registered,
-+#endif
-+ .init_apic_ldr = xen_init_apic_ldr,
-+ .send_IPI_all = xen_send_IPI_all,
-+ .send_IPI_allbutself = xen_send_IPI_allbutself,
-+ .send_IPI_mask = xen_send_IPI_mask,
-+ .cpu_mask_to_apicid = xen_cpu_mask_to_apicid,
-+ .phys_pkg_id = phys_pkg_id,
-+};
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/x86_64/kernel/genapic-xen.c ubuntu-gutsy-xen/arch/x86_64/kernel/genapic-xen.c
---- ubuntu-gutsy/arch/x86_64/kernel/genapic-xen.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/arch/x86_64/kernel/genapic-xen.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,84 @@
-+/*
-+ * Copyright 2004 James Cleverdon, IBM.
-+ * Subject to the GNU Public License, v.2
-+ *
-+ * Generic APIC sub-arch probe layer.
-+ *
-+ * Hacked for x86-64 by James Cleverdon from i386 architecture code by
-+ * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
-+ * James Cleverdon.
-+ */
-+#include <linux/threads.h>
-+#include <linux/cpumask.h>
-+#include <linux/string.h>
-+#include <linux/module.h>
-+#include <linux/kernel.h>
-+#include <linux/ctype.h>
-+#include <linux/init.h>
-+
-+#include <asm/smp.h>
-+#include <asm/genapic.h>
-+
-+#ifdef CONFIG_ACPI
-+#include <acpi/acpi_bus.h>
-+#endif
-+
-+/* which logical CPU number maps to which CPU (physical APIC ID) */
-+u8 x86_cpu_to_apicid[NR_CPUS] __read_mostly
-+ = { [0 ... NR_CPUS-1] = BAD_APICID };
-+EXPORT_SYMBOL(x86_cpu_to_apicid);
-+
-+u8 x86_cpu_to_log_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
-+
-+#ifndef CONFIG_XEN
-+struct genapic __read_mostly *genapic = &apic_flat;
-+#else
-+extern struct genapic apic_xen;
-+struct genapic __read_mostly *genapic = &apic_xen;
-+#endif
-+
-+
-+/*
-+ * Check the APIC IDs in bios_cpu_apicid and choose the APIC mode.
-+ */
-+void __init setup_apic_routing(void)
-+{
-+#ifndef CONFIG_XEN
-+#ifdef CONFIG_ACPI
-+ /*
-+ * Quirk: some x86_64 machines can only use physical APIC mode
-+ * regardless of how many processors are present (x86_64 ES7000
-+ * is an example).
-+ */
-+ if (acpi_gbl_FADT.header.revision > FADT2_REVISION_ID &&
-+ (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL))
-+ genapic = &apic_physflat;
-+ else
-+#endif
-+
-+ if (cpus_weight(cpu_possible_map) <= 8)
-+ genapic = &apic_flat;
-+ else
-+ genapic = &apic_physflat;
-+
-+#else
-+ /* hardcode to xen apic functions */
-+ genapic = &apic_xen;
-+#endif
-+ printk(KERN_INFO "Setting APIC routing to %s\n", genapic->name);
-+}
-+
-+/* Same for both flat and physical. */
-+
-+#ifdef CONFIG_XEN
-+extern void xen_send_IPI_shortcut(unsigned int shortcut, int vector);
-+#endif
-+
-+void send_IPI_self(int vector)
-+{
-+#ifndef CONFIG_XEN
-+ __send_IPI_shortcut(APIC_DEST_SELF, vector, APIC_DEST_PHYSICAL);
-+#else
-+ xen_send_IPI_shortcut(APIC_DEST_SELF, vector);
-+#endif
-+}
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/x86_64/kernel/head64-xen.c ubuntu-gutsy-xen/arch/x86_64/kernel/head64-xen.c
---- ubuntu-gutsy/arch/x86_64/kernel/head64-xen.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/arch/x86_64/kernel/head64-xen.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,132 @@
-+/*
-+ * linux/arch/x86_64/kernel/head64.c -- prepare to run common code
-+ *
-+ * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
-+ *
-+ * Jun Nakajima <jun.nakajima@intel.com>
-+ * Modified for Xen.
-+ */
-+
-+#include <linux/init.h>
-+#include <linux/linkage.h>
-+#include <linux/types.h>
-+#include <linux/kernel.h>
-+#include <linux/string.h>
-+#include <linux/percpu.h>
-+#include <linux/module.h>
-+
-+#include <asm/processor.h>
-+#include <asm/proto.h>
-+#include <asm/smp.h>
-+#include <asm/bootsetup.h>
-+#include <asm/setup.h>
-+#include <asm/desc.h>
-+#include <asm/pgtable.h>
-+#include <asm/tlbflush.h>
-+#include <asm/sections.h>
-+
-+unsigned long start_pfn;
-+
-+#ifndef CONFIG_XEN
-+static void __init zap_identity_mappings(void)
-+{
-+ pgd_t *pgd = pgd_offset_k(0UL);
-+ pgd_clear(pgd);
-+ __flush_tlb();
-+}
-+
-+/* Don't add a printk in there. printk relies on the PDA which is not initialized
-+ yet. */
-+static void __init clear_bss(void)
-+{
-+ memset(__bss_start, 0,
-+ (unsigned long) __bss_stop - (unsigned long) __bss_start);
-+}
-+#endif
-+
-+#define NEW_CL_POINTER 0x228 /* Relative to real mode data */
-+#define OLD_CL_MAGIC_ADDR 0x20
-+#define OLD_CL_MAGIC 0xA33F
-+#define OLD_CL_BASE_ADDR 0x90000
-+#define OLD_CL_OFFSET 0x90022
-+
-+static void __init copy_bootdata(char *real_mode_data)
-+{
-+#ifndef CONFIG_XEN
-+ unsigned long new_data;
-+ char * command_line;
-+
-+ memcpy(x86_boot_params, real_mode_data, BOOT_PARAM_SIZE);
-+ new_data = *(u32 *) (x86_boot_params + NEW_CL_POINTER);
-+ if (!new_data) {
-+ if (OLD_CL_MAGIC != *(u16 *)(real_mode_data + OLD_CL_MAGIC_ADDR)) {
-+ return;
-+ }
-+ new_data = __pa(real_mode_data) + *(u16 *)(real_mode_data + OLD_CL_OFFSET);
-+ }
-+ command_line = __va(new_data);
-+ memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
-+#else
-+ int max_cmdline;
-+
-+ if ((max_cmdline = MAX_GUEST_CMDLINE) > COMMAND_LINE_SIZE)
-+ max_cmdline = COMMAND_LINE_SIZE;
-+ memcpy(boot_command_line, xen_start_info->cmd_line, max_cmdline);
-+ boot_command_line[max_cmdline-1] = '\0';
-+#endif
-+}
-+
-+#include <xen/interface/memory.h>
-+unsigned long *machine_to_phys_mapping;
-+EXPORT_SYMBOL(machine_to_phys_mapping);
-+unsigned int machine_to_phys_order;
-+EXPORT_SYMBOL(machine_to_phys_order);
-+
-+void __init x86_64_start_kernel(char * real_mode_data)
-+{
-+ struct xen_machphys_mapping mapping;
-+ unsigned long machine_to_phys_nr_ents;
-+ int i;
-+
-+ setup_xen_features();
-+
-+ xen_start_info = (struct start_info *)real_mode_data;
-+ if (!xen_feature(XENFEAT_auto_translated_physmap))
-+ phys_to_machine_mapping =
-+ (unsigned long *)xen_start_info->mfn_list;
-+ start_pfn = (__pa(xen_start_info->pt_base) >> PAGE_SHIFT) +
-+ xen_start_info->nr_pt_frames;
-+
-+ machine_to_phys_mapping = (unsigned long *)MACH2PHYS_VIRT_START;
-+ machine_to_phys_nr_ents = MACH2PHYS_NR_ENTRIES;
-+ if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) {
-+ machine_to_phys_mapping = (unsigned long *)mapping.v_start;
-+ machine_to_phys_nr_ents = mapping.max_mfn + 1;
-+ }
-+ while ((1UL << machine_to_phys_order) < machine_to_phys_nr_ents )
-+ machine_to_phys_order++;
-+
-+#ifndef CONFIG_XEN
-+ /* clear bss before set_intr_gate with early_idt_handler */
-+ clear_bss();
-+
-+ /* Make NULL pointers segfault */
-+ zap_identity_mappings();
-+
-+ for (i = 0; i < IDT_ENTRIES; i++)
-+ set_intr_gate(i, early_idt_handler);
-+ asm volatile("lidt %0" :: "m" (idt_descr));
-+#endif
-+
-+ early_printk("Kernel alive\n");
-+
-+ for (i = 0; i < NR_CPUS; i++)
-+ cpu_pda(i) = &boot_cpu_pda[i];
-+
-+ pda_init(0);
-+ copy_bootdata(__va(real_mode_data));
-+#ifdef CONFIG_SMP
-+ cpu_set(0, cpu_online_map);
-+#endif
-+ start_kernel();
-+}
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/x86_64/kernel/head-xen.S ubuntu-gutsy-xen/arch/x86_64/kernel/head-xen.S
---- ubuntu-gutsy/arch/x86_64/kernel/head-xen.S 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/arch/x86_64/kernel/head-xen.S 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,189 @@
-+/*
-+ * linux/arch/x86_64/kernel/head.S -- start in 32bit and switch to 64bit
-+ *
-+ * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
-+ * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
-+ * Copyright (C) 2000 Karsten Keil <kkeil@suse.de>
-+ * Copyright (C) 2001,2002 Andi Kleen <ak@suse.de>
-+ * Copyright (C) 2005 Eric Biederman <ebiederm@xmission.com>
-+ * Jun Nakajima <jun.nakajima@intel.com>
-+ * Modified for Xen
-+ */
-+
-+
-+#include <linux/linkage.h>
-+#include <linux/threads.h>
-+#include <linux/init.h>
-+#include <linux/elfnote.h>
-+#include <asm/desc.h>
-+#include <asm/segment.h>
-+#include <asm/page.h>
-+#include <asm/msr.h>
-+#include <asm/cache.h>
-+#include <asm/dwarf2.h>
-+#include <xen/interface/elfnote.h>
-+
-+ .section .bootstrap.text, "ax", @progbits
-+ .code64
-+ .globl startup_64
-+startup_64:
-+ movq $(init_thread_union+THREAD_SIZE-8),%rsp
-+
-+ /* rsi is pointer to startup info structure.
-+ pass it to C */
-+ movq %rsi,%rdi
-+ pushq $0 # fake return address
-+ jmp x86_64_start_kernel
-+
-+.balign PAGE_SIZE
-+
-+#define NEXT_PAGE(name) \
-+ .balign PAGE_SIZE; \
-+ phys_##name = . - .bootstrap.text; \
-+ENTRY(name)
-+
-+NEXT_PAGE(init_level4_pgt)
-+ .fill 512,8,0
-+ /*
-+ * We update two pgd entries to make kernel and user pgd consistent
-+ * at pgd_populate(). It can be used for kernel modules. So we place
-+ * this page here for those cases to avoid memory corruption.
-+ * We also use this page to establish the initial mapping for the
-+ * vsyscall area.
-+ */
-+ .fill 512,8,0
-+
-+NEXT_PAGE(level3_kernel_pgt)
-+ .fill 512,8,0
-+
-+ /*
-+ * This is used for vsyscall area mapping as we have a different
-+ * level4 page table for user.
-+ */
-+NEXT_PAGE(level3_user_pgt)
-+ .fill 512,8,0
-+
-+NEXT_PAGE(level2_kernel_pgt)
-+ .fill 512,8,0
-+
-+NEXT_PAGE(hypercall_page)
-+ CFI_STARTPROC
-+ .rept 0x1000 / 0x20
-+ .skip 1 /* push %rcx */
-+ CFI_ADJUST_CFA_OFFSET 8
-+ CFI_REL_OFFSET rcx,0
-+ .skip 2 /* push %r11 */
-+ CFI_ADJUST_CFA_OFFSET 8
-+ CFI_REL_OFFSET rcx,0
-+ .skip 5 /* mov $#,%eax */
-+ .skip 2 /* syscall */
-+ .skip 2 /* pop %r11 */
-+ CFI_ADJUST_CFA_OFFSET -8
-+ CFI_RESTORE r11
-+ .skip 1 /* pop %rcx */
-+ CFI_ADJUST_CFA_OFFSET -8
-+ CFI_RESTORE rcx
-+ .align 0x20,0 /* ret */
-+ .endr
-+ CFI_ENDPROC
-+
-+#undef NEXT_PAGE
-+
-+ .data
-+
-+ .align 16
-+ .globl cpu_gdt_descr
-+cpu_gdt_descr:
-+ .word gdt_end-cpu_gdt_table-1
-+gdt:
-+ .quad cpu_gdt_table
-+#ifdef CONFIG_SMP
-+ .rept NR_CPUS-1
-+ .word 0
-+ .quad 0
-+ .endr
-+#endif
-+
-+/* We need valid kernel segments for data and code in long mode too
-+ * IRET will check the segment types kkeil 2000/10/28
-+ * Also sysret mandates a special GDT layout
-+ */
-+
-+ .section .data.page_aligned, "aw"
-+ .align PAGE_SIZE
-+
-+/* The TLS descriptors are currently at a different place compared to i386.
-+ Hopefully nobody expects them at a fixed place (Wine?) */
-+
-+ENTRY(cpu_gdt_table)
-+ .quad 0x0000000000000000 /* NULL descriptor */
-+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
-+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
-+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
-+ .quad 0x00cffb000000ffff /* __USER32_CS */
-+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
-+ .quad 0x00affb000000ffff /* __USER_CS */
-+ .quad 0x0 /* unused */
-+ .quad 0,0 /* TSS */
-+ .quad 0,0 /* LDT */
-+ .quad 0,0,0 /* three TLS descriptors */
-+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
-+gdt_end:
-+ /* asm/segment.h:GDT_ENTRIES must match this */
-+ /* This should be a multiple of the cache line size */
-+ /* GDTs of other CPUs are now dynamically allocated */
-+
-+ /* zero the remaining page */
-+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
-+
-+ .section .bss.page_aligned, "aw", @nobits
-+ .align PAGE_SIZE
-+ENTRY(empty_zero_page)
-+ .skip PAGE_SIZE
-+
-+#if CONFIG_XEN_COMPAT <= 0x030002
-+/*
-+ * __xen_guest information
-+ */
-+.macro utoh value
-+ i = 64
-+ .rept 16
-+ i = i - 4
-+ .byte '0' + ((((\value) >> i) & 0xf) > 9) * ('0' - 'A' + 10) + (((\value) >> i) & 0xf)
-+ .endr
-+.endm
-+
-+.section __xen_guest
-+ .ascii "GUEST_OS=linux,GUEST_VER=2.6"
-+ .ascii ",XEN_VER=xen-3.0"
-+ .ascii ",VIRT_BASE=0x"
-+ utoh __START_KERNEL_map
-+ .ascii ",ELF_PADDR_OFFSET=0x"
-+ utoh __START_KERNEL_map
-+ .ascii ",VIRT_ENTRY=0x"
-+ utoh (__START_KERNEL_map + __PHYSICAL_START + startup_64 - .bootstrap.text)
-+ .ascii ",HYPERCALL_PAGE=0x"
-+ utoh (phys_hypercall_page >> PAGE_SHIFT)
-+ .ascii ",FEATURES=writable_page_tables"
-+ .ascii "|writable_descriptor_tables"
-+ .ascii "|auto_translated_physmap"
-+ .ascii "|supervisor_mode_kernel"
-+ .ascii ",LOADER=generic"
-+ .byte 0
-+#endif /* CONFIG_XEN_COMPAT <= 0x030002 */
-+
-+ ELFNOTE(Xen, XEN_ELFNOTE_GUEST_OS, .asciz, "linux")
-+ ELFNOTE(Xen, XEN_ELFNOTE_GUEST_VERSION, .asciz, "2.6")
-+ ELFNOTE(Xen, XEN_ELFNOTE_XEN_VERSION, .asciz, "xen-3.0")
-+ ELFNOTE(Xen, XEN_ELFNOTE_VIRT_BASE, .quad, __START_KERNEL_map)
-+#if CONFIG_XEN_COMPAT <= 0x030002
-+ ELFNOTE(Xen, XEN_ELFNOTE_PADDR_OFFSET, .quad, __START_KERNEL_map)
-+#else
-+ ELFNOTE(Xen, XEN_ELFNOTE_PADDR_OFFSET, .quad, 0)
-+#endif
-+ ELFNOTE(Xen, XEN_ELFNOTE_ENTRY, .quad, startup_64)
-+ ELFNOTE(Xen, XEN_ELFNOTE_HYPERCALL_PAGE, .quad, hypercall_page)
-+ ELFNOTE(Xen, XEN_ELFNOTE_L1_MFN_VALID, .quad, _PAGE_PRESENT,_PAGE_PRESENT)
-+ ELFNOTE(Xen, XEN_ELFNOTE_FEATURES, .asciz, "writable_page_tables|writable_descriptor_tables|auto_translated_physmap|pae_pgdir_above_4gb|supervisor_mode_kernel")
-+ ELFNOTE(Xen, XEN_ELFNOTE_LOADER, .asciz, "generic")
-+ ELFNOTE(Xen, XEN_ELFNOTE_SUSPEND_CANCEL, .long, 1)
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/x86_64/kernel/init_task.c ubuntu-gutsy-xen/arch/x86_64/kernel/init_task.c
---- ubuntu-gutsy/arch/x86_64/kernel/init_task.c 2007-08-18 09:40:29.000000000 -0400
-+++ ubuntu-gutsy-xen/arch/x86_64/kernel/init_task.c 2007-08-18 12:38:02.000000000 -0400
-@@ -37,6 +37,8 @@
- struct task_struct init_task = INIT_TASK(init_task);
-
- EXPORT_SYMBOL(init_task);
-+
-+#ifndef CONFIG_X86_NO_TSS
- /*
- * per-CPU TSS segments. Threads are completely 'soft' on Linux,
- * no more per-task TSS's. The TSS size is kept cacheline-aligned
-@@ -50,5 +52,6 @@
- * debugging, no special alignment required.
- */
- DEFINE_PER_CPU(struct orig_ist, orig_ist);
-+#endif
-
- #define ALIGN_TO_4K __attribute__((section(".data.init_task")))
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/x86_64/kernel/io_apic-xen.c ubuntu-gutsy-xen/arch/x86_64/kernel/io_apic-xen.c
---- ubuntu-gutsy/arch/x86_64/kernel/io_apic-xen.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/arch/x86_64/kernel/io_apic-xen.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,2162 @@
-+/*
-+ * Intel IO-APIC support for multi-Pentium hosts.
-+ *
-+ * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar, Hajnalka Szabo
-+ *
-+ * Many thanks to Stig Venaas for trying out countless experimental
-+ * patches and reporting/debugging problems patiently!
-+ *
-+ * (c) 1999, Multiple IO-APIC support, developed by
-+ * Ken-ichi Yaku <yaku@css1.kbnes.nec.co.jp> and
-+ * Hidemi Kishimoto <kisimoto@css1.kbnes.nec.co.jp>,
-+ * further tested and cleaned up by Zach Brown <zab@redhat.com>
-+ * and Ingo Molnar <mingo@redhat.com>
-+ *
-+ * Fixes
-+ * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
-+ * thanks to Eric Gilmore
-+ * and Rolf G. Tews
-+ * for testing these extensively
-+ * Paul Diefenbaugh : Added full ACPI support
-+ */
-+
-+#include <linux/mm.h>
-+#include <linux/interrupt.h>
-+#include <linux/init.h>
-+#include <linux/delay.h>
-+#include <linux/sched.h>
-+#include <linux/pci.h>
-+#include <linux/mc146818rtc.h>
-+#include <linux/acpi.h>
-+#include <linux/sysdev.h>
-+#include <linux/msi.h>
-+#include <linux/htirq.h>
-+#ifdef CONFIG_ACPI
-+#include <acpi/acpi_bus.h>
-+#endif
-+
-+#include <asm/idle.h>
-+#include <asm/io.h>
-+#include <asm/smp.h>
-+#include <asm/desc.h>
-+#include <asm/proto.h>
-+#include <asm/mach_apic.h>
-+#include <asm/acpi.h>
-+#include <asm/dma.h>
-+#include <asm/nmi.h>
-+#include <asm/msidef.h>
-+#include <asm/hypertransport.h>
-+
-+struct irq_cfg {
-+#ifndef CONFIG_XEN
-+ cpumask_t domain;
-+ cpumask_t old_domain;
-+#endif
-+ unsigned move_cleanup_count;
-+ u8 vector;
-+ u8 move_in_progress : 1;
-+};
-+
-+/* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */
-+struct irq_cfg irq_cfg[NR_IRQS] __read_mostly;
-+
-+static int assign_irq_vector(int irq, cpumask_t mask);
-+
-+#define __apicdebuginit __init
-+
-+int sis_apic_bug; /* not actually supported, dummy for compile */
-+
-+#ifdef CONFIG_XEN
-+#include <xen/interface/xen.h>
-+#include <xen/interface/physdev.h>
-+
-+/* Fake i8259 */
-+#define make_8259A_irq(_irq) (io_apic_irqs &= ~(1UL<<(_irq)))
-+#define disable_8259A_irq(_irq) ((void)0)
-+#define i8259A_irq_pending(_irq) (0)
-+
-+unsigned long io_apic_irqs;
-+
-+#define clear_IO_APIC() ((void)0)
-+#else
-+static int no_timer_check;
-+
-+static int disable_timer_pin_1 __initdata;
-+
-+int timer_over_8254 __initdata = 1;
-+
-+/* Where if anywhere is the i8259 connect in external int mode */
-+static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
-+#endif
-+
-+static DEFINE_SPINLOCK(ioapic_lock);
-+DEFINE_SPINLOCK(vector_lock);
-+
-+/*
-+ * # of IRQ routing registers
-+ */
-+int nr_ioapic_registers[MAX_IO_APICS];
-+
-+/*
-+ * Rough estimation of how many shared IRQs there are, can
-+ * be changed anytime.
-+ */
-+#define MAX_PLUS_SHARED_IRQS NR_IRQS
-+#define PIN_MAP_SIZE (MAX_PLUS_SHARED_IRQS + NR_IRQS)
-+
-+/*
-+ * This is performance-critical, we want to do it O(1)
-+ *
-+ * the indexing order of this array favors 1:1 mappings
-+ * between pins and IRQs.
-+ */
-+
-+static struct irq_pin_list {
-+ short apic, pin, next;
-+} irq_2_pin[PIN_MAP_SIZE];
-+
-+#ifndef CONFIG_XEN
-+struct io_apic {
-+ unsigned int index;
-+ unsigned int unused[3];
-+ unsigned int data;
-+};
-+
-+static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx)
-+{
-+ return (void __iomem *) __fix_to_virt(FIX_IO_APIC_BASE_0 + idx)
-+ + (mp_ioapics[idx].mpc_apicaddr & ~PAGE_MASK);
-+}
-+#endif
-+
-+static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg)
-+{
-+#ifndef CONFIG_XEN
-+ struct io_apic __iomem *io_apic = io_apic_base(apic);
-+ writel(reg, &io_apic->index);
-+ return readl(&io_apic->data);
-+#else
-+ struct physdev_apic apic_op;
-+ int ret;
-+
-+ apic_op.apic_physbase = mp_ioapics[apic].mpc_apicaddr;
-+ apic_op.reg = reg;
-+ ret = HYPERVISOR_physdev_op(PHYSDEVOP_apic_read, &apic_op);
-+ if (ret)
-+ return ret;
-+ return apic_op.value;
-+#endif
-+}
-+
-+static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
-+{
-+#ifndef CONFIG_XEN
-+ struct io_apic __iomem *io_apic = io_apic_base(apic);
-+ writel(reg, &io_apic->index);
-+ writel(value, &io_apic->data);
-+#else
-+ struct physdev_apic apic_op;
-+
-+ apic_op.apic_physbase = mp_ioapics[apic].mpc_apicaddr;
-+ apic_op.reg = reg;
-+ apic_op.value = value;
-+ HYPERVISOR_physdev_op(PHYSDEVOP_apic_write, &apic_op);
-+#endif
-+}
-+
-+#ifndef CONFIG_XEN
-+/*
-+ * Re-write a value: to be used for read-modify-write
-+ * cycles where the read already set up the index register.
-+ */
-+static inline void io_apic_modify(unsigned int apic, unsigned int value)
-+{
-+ struct io_apic __iomem *io_apic = io_apic_base(apic);
-+ writel(value, &io_apic->data);
-+}
-+#else
-+#define io_apic_modify io_apic_write
-+#endif
-+
-+/*
-+ * Synchronize the IO-APIC and the CPU by doing
-+ * a dummy read from the IO-APIC
-+ */
-+static inline void io_apic_sync(unsigned int apic)
-+{
-+#ifndef CONFIG_XEN
-+ struct io_apic __iomem *io_apic = io_apic_base(apic);
-+ readl(&io_apic->data);
-+#endif
-+}
-+
-+union entry_union {
-+ struct { u32 w1, w2; };
-+ struct IO_APIC_route_entry entry;
-+};
-+
-+static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin)
-+{
-+ union entry_union eu;
-+ unsigned long flags;
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ eu.w1 = io_apic_read(apic, 0x10 + 2 * pin);
-+ eu.w2 = io_apic_read(apic, 0x11 + 2 * pin);
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+ return eu.entry;
-+}
-+
-+/*
-+ * When we write a new IO APIC routing entry, we need to write the high
-+ * word first! If the mask bit in the low word is clear, we will enable
-+ * the interrupt, and we need to make sure the entry is fully populated
-+ * before that happens.
-+ */
-+static void
-+__ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
-+{
-+ union entry_union eu;
-+ eu.entry = e;
-+ io_apic_write(apic, 0x11 + 2*pin, eu.w2);
-+ io_apic_write(apic, 0x10 + 2*pin, eu.w1);
-+}
-+
-+static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
-+{
-+ unsigned long flags;
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ __ioapic_write_entry(apic, pin, e);
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+}
-+
-+#ifndef CONFIG_XEN
-+/*
-+ * When we mask an IO APIC routing entry, we need to write the low
-+ * word first, in order to set the mask bit before we change the
-+ * high bits!
-+ */
-+static void ioapic_mask_entry(int apic, int pin)
-+{
-+ unsigned long flags;
-+ union entry_union eu = { .entry.mask = 1 };
-+
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ io_apic_write(apic, 0x10 + 2*pin, eu.w1);
-+ io_apic_write(apic, 0x11 + 2*pin, eu.w2);
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+}
-+
-+#ifdef CONFIG_SMP
-+static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, u8 vector)
-+{
-+ int apic, pin;
-+ struct irq_pin_list *entry = irq_2_pin + irq;
-+
-+ BUG_ON(irq >= NR_IRQS);
-+ for (;;) {
-+ unsigned int reg;
-+ apic = entry->apic;
-+ pin = entry->pin;
-+ if (pin == -1)
-+ break;
-+ io_apic_write(apic, 0x11 + pin*2, dest);
-+ reg = io_apic_read(apic, 0x10 + pin*2);
-+ reg &= ~0x000000ff;
-+ reg |= vector;
-+ io_apic_modify(apic, reg);
-+ if (!entry->next)
-+ break;
-+ entry = irq_2_pin + entry->next;
-+ }
-+}
-+
-+static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
-+{
-+ struct irq_cfg *cfg = irq_cfg + irq;
-+ unsigned long flags;
-+ unsigned int dest;
-+ cpumask_t tmp;
-+
-+ cpus_and(tmp, mask, cpu_online_map);
-+ if (cpus_empty(tmp))
-+ return;
-+
-+ if (assign_irq_vector(irq, mask))
-+ return;
-+
-+ cpus_and(tmp, cfg->domain, mask);
-+ dest = cpu_mask_to_apicid(tmp);
-+
-+ /*
-+ * Only the high 8 bits are valid.
-+ */
-+ dest = SET_APIC_LOGICAL_ID(dest);
-+
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ __target_IO_APIC_irq(irq, dest, cfg->vector);
-+ irq_desc[irq].affinity = mask;
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+}
-+#endif
-+#endif
-+
-+/*
-+ * The common case is 1:1 IRQ<->pin mappings. Sometimes there are
-+ * shared ISA-space IRQs, so we have to support them. We are super
-+ * fast in the common case, and fast for shared ISA-space IRQs.
-+ */
-+static void add_pin_to_irq(unsigned int irq, int apic, int pin)
-+{
-+ static int first_free_entry = NR_IRQS;
-+ struct irq_pin_list *entry = irq_2_pin + irq;
-+
-+ BUG_ON(irq >= NR_IRQS);
-+ while (entry->next)
-+ entry = irq_2_pin + entry->next;
-+
-+ if (entry->pin != -1) {
-+ entry->next = first_free_entry;
-+ entry = irq_2_pin + entry->next;
-+ if (++first_free_entry >= PIN_MAP_SIZE)
-+ panic("io_apic.c: ran out of irq_2_pin entries!");
-+ }
-+ entry->apic = apic;
-+ entry->pin = pin;
-+}
-+
-+#ifndef CONFIG_XEN
-+#define __DO_ACTION(R, ACTION, FINAL) \
-+ \
-+{ \
-+ int pin; \
-+ struct irq_pin_list *entry = irq_2_pin + irq; \
-+ \
-+ BUG_ON(irq >= NR_IRQS); \
-+ for (;;) { \
-+ unsigned int reg; \
-+ pin = entry->pin; \
-+ if (pin == -1) \
-+ break; \
-+ reg = io_apic_read(entry->apic, 0x10 + R + pin*2); \
-+ reg ACTION; \
-+ io_apic_modify(entry->apic, reg); \
-+ FINAL; \
-+ if (!entry->next) \
-+ break; \
-+ entry = irq_2_pin + entry->next; \
-+ } \
-+}
-+
-+#define DO_ACTION(name,R,ACTION, FINAL) \
-+ \
-+ static void name##_IO_APIC_irq (unsigned int irq) \
-+ __DO_ACTION(R, ACTION, FINAL)
-+
-+DO_ACTION( __mask, 0, |= 0x00010000, io_apic_sync(entry->apic) )
-+ /* mask = 1 */
-+DO_ACTION( __unmask, 0, &= 0xfffeffff, )
-+ /* mask = 0 */
-+
-+static void mask_IO_APIC_irq (unsigned int irq)
-+{
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ __mask_IO_APIC_irq(irq);
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+}
-+
-+static void unmask_IO_APIC_irq (unsigned int irq)
-+{
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ __unmask_IO_APIC_irq(irq);
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+}
-+
-+static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
-+{
-+ struct IO_APIC_route_entry entry;
-+
-+ /* Check delivery_mode to be sure we're not clearing an SMI pin */
-+ entry = ioapic_read_entry(apic, pin);
-+ if (entry.delivery_mode == dest_SMI)
-+ return;
-+ /*
-+ * Disable it in the IO-APIC irq-routing table:
-+ */
-+ ioapic_mask_entry(apic, pin);
-+}
-+
-+static void clear_IO_APIC (void)
-+{
-+ int apic, pin;
-+
-+ for (apic = 0; apic < nr_ioapics; apic++)
-+ for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
-+ clear_IO_APIC_pin(apic, pin);
-+}
-+
-+#endif /* !CONFIG_XEN */
-+
-+int skip_ioapic_setup;
-+int ioapic_force;
-+
-+/* dummy parsing: see setup.c */
-+
-+static int __init disable_ioapic_setup(char *str)
-+{
-+ skip_ioapic_setup = 1;
-+ return 0;
-+}
-+early_param("noapic", disable_ioapic_setup);
-+
-+#ifndef CONFIG_XEN
-+/* Actually the next is obsolete, but keep it for paranoid reasons -AK */
-+static int __init disable_timer_pin_setup(char *arg)
-+{
-+ disable_timer_pin_1 = 1;
-+ return 1;
-+}
-+__setup("disable_timer_pin_1", disable_timer_pin_setup);
-+
-+static int __init setup_disable_8254_timer(char *s)
-+{
-+ timer_over_8254 = -1;
-+ return 1;
-+}
-+static int __init setup_enable_8254_timer(char *s)
-+{
-+ timer_over_8254 = 2;
-+ return 1;
-+}
-+
-+__setup("disable_8254_timer", setup_disable_8254_timer);
-+__setup("enable_8254_timer", setup_enable_8254_timer);
-+#endif /* !CONFIG_XEN */
-+
-+
-+/*
-+ * Find the IRQ entry number of a certain pin.
-+ */
-+static int find_irq_entry(int apic, int pin, int type)
-+{
-+ int i;
-+
-+ for (i = 0; i < mp_irq_entries; i++)
-+ if (mp_irqs[i].mpc_irqtype == type &&
-+ (mp_irqs[i].mpc_dstapic == mp_ioapics[apic].mpc_apicid ||
-+ mp_irqs[i].mpc_dstapic == MP_APIC_ALL) &&
-+ mp_irqs[i].mpc_dstirq == pin)
-+ return i;
-+
-+ return -1;
-+}
-+
-+#ifndef CONFIG_XEN
-+/*
-+ * Find the pin to which IRQ[irq] (ISA) is connected
-+ */
-+static int __init find_isa_irq_pin(int irq, int type)
-+{
-+ int i;
-+
-+ for (i = 0; i < mp_irq_entries; i++) {
-+ int lbus = mp_irqs[i].mpc_srcbus;
-+
-+ if (test_bit(lbus, mp_bus_not_pci) &&
-+ (mp_irqs[i].mpc_irqtype == type) &&
-+ (mp_irqs[i].mpc_srcbusirq == irq))
-+
-+ return mp_irqs[i].mpc_dstirq;
-+ }
-+ return -1;
-+}
-+
-+static int __init find_isa_irq_apic(int irq, int type)
-+{
-+ int i;
-+
-+ for (i = 0; i < mp_irq_entries; i++) {
-+ int lbus = mp_irqs[i].mpc_srcbus;
-+
-+ if (test_bit(lbus, mp_bus_not_pci) &&
-+ (mp_irqs[i].mpc_irqtype == type) &&
-+ (mp_irqs[i].mpc_srcbusirq == irq))
-+ break;
-+ }
-+ if (i < mp_irq_entries) {
-+ int apic;
-+ for(apic = 0; apic < nr_ioapics; apic++) {
-+ if (mp_ioapics[apic].mpc_apicid == mp_irqs[i].mpc_dstapic)
-+ return apic;
-+ }
-+ }
-+
-+ return -1;
-+}
-+#endif
-+
-+/*
-+ * Find a specific PCI IRQ entry.
-+ * Not an __init, possibly needed by modules
-+ */
-+static int pin_2_irq(int idx, int apic, int pin);
-+
-+int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
-+{
-+ int apic, i, best_guess = -1;
-+
-+ apic_printk(APIC_DEBUG, "querying PCI -> IRQ mapping bus:%d, slot:%d, pin:%d.\n",
-+ bus, slot, pin);
-+ if (mp_bus_id_to_pci_bus[bus] == -1) {
-+ apic_printk(APIC_VERBOSE, "PCI BIOS passed nonexistent PCI bus %d!\n", bus);
-+ return -1;
-+ }
-+ for (i = 0; i < mp_irq_entries; i++) {
-+ int lbus = mp_irqs[i].mpc_srcbus;
-+
-+ for (apic = 0; apic < nr_ioapics; apic++)
-+ if (mp_ioapics[apic].mpc_apicid == mp_irqs[i].mpc_dstapic ||
-+ mp_irqs[i].mpc_dstapic == MP_APIC_ALL)
-+ break;
-+
-+ if (!test_bit(lbus, mp_bus_not_pci) &&
-+ !mp_irqs[i].mpc_irqtype &&
-+ (bus == lbus) &&
-+ (slot == ((mp_irqs[i].mpc_srcbusirq >> 2) & 0x1f))) {
-+ int irq = pin_2_irq(i,apic,mp_irqs[i].mpc_dstirq);
-+
-+ if (!(apic || IO_APIC_IRQ(irq)))
-+ continue;
-+
-+ if (pin == (mp_irqs[i].mpc_srcbusirq & 3))
-+ return irq;
-+ /*
-+ * Use the first all-but-pin matching entry as a
-+ * best-guess fuzzy result for broken mptables.
-+ */
-+ if (best_guess < 0)
-+ best_guess = irq;
-+ }
-+ }
-+ BUG_ON(best_guess >= NR_IRQS);
-+ return best_guess;
-+}
-+
-+/* ISA interrupts are always polarity zero edge triggered,
-+ * when listed as conforming in the MP table. */
-+
-+#define default_ISA_trigger(idx) (0)
-+#define default_ISA_polarity(idx) (0)
-+
-+/* PCI interrupts are always polarity one level triggered,
-+ * when listed as conforming in the MP table. */
-+
-+#define default_PCI_trigger(idx) (1)
-+#define default_PCI_polarity(idx) (1)
-+
-+static int __init MPBIOS_polarity(int idx)
-+{
-+ int bus = mp_irqs[idx].mpc_srcbus;
-+ int polarity;
-+
-+ /*
-+ * Determine IRQ line polarity (high active or low active):
-+ */
-+ switch (mp_irqs[idx].mpc_irqflag & 3)
-+ {
-+ case 0: /* conforms, ie. bus-type dependent polarity */
-+ if (test_bit(bus, mp_bus_not_pci))
-+ polarity = default_ISA_polarity(idx);
-+ else
-+ polarity = default_PCI_polarity(idx);
-+ break;
-+ case 1: /* high active */
-+ {
-+ polarity = 0;
-+ break;
-+ }
-+ case 2: /* reserved */
-+ {
-+ printk(KERN_WARNING "broken BIOS!!\n");
-+ polarity = 1;
-+ break;
-+ }
-+ case 3: /* low active */
-+ {
-+ polarity = 1;
-+ break;
-+ }
-+ default: /* invalid */
-+ {
-+ printk(KERN_WARNING "broken BIOS!!\n");
-+ polarity = 1;
-+ break;
-+ }
-+ }
-+ return polarity;
-+}
-+
-+static int MPBIOS_trigger(int idx)
-+{
-+ int bus = mp_irqs[idx].mpc_srcbus;
-+ int trigger;
-+
-+ /*
-+ * Determine IRQ trigger mode (edge or level sensitive):
-+ */
-+ switch ((mp_irqs[idx].mpc_irqflag>>2) & 3)
-+ {
-+ case 0: /* conforms, ie. bus-type dependent */
-+ if (test_bit(bus, mp_bus_not_pci))
-+ trigger = default_ISA_trigger(idx);
-+ else
-+ trigger = default_PCI_trigger(idx);
-+ break;
-+ case 1: /* edge */
-+ {
-+ trigger = 0;
-+ break;
-+ }
-+ case 2: /* reserved */
-+ {
-+ printk(KERN_WARNING "broken BIOS!!\n");
-+ trigger = 1;
-+ break;
-+ }
-+ case 3: /* level */
-+ {
-+ trigger = 1;
-+ break;
-+ }
-+ default: /* invalid */
-+ {
-+ printk(KERN_WARNING "broken BIOS!!\n");
-+ trigger = 0;
-+ break;
-+ }
-+ }
-+ return trigger;
-+}
-+
-+static inline int irq_polarity(int idx)
-+{
-+ return MPBIOS_polarity(idx);
-+}
-+
-+static inline int irq_trigger(int idx)
-+{
-+ return MPBIOS_trigger(idx);
-+}
-+
-+static int pin_2_irq(int idx, int apic, int pin)
-+{
-+ int irq, i;
-+ int bus = mp_irqs[idx].mpc_srcbus;
-+
-+ /*
-+ * Debugging check, we are in big trouble if this message pops up!
-+ */
-+ if (mp_irqs[idx].mpc_dstirq != pin)
-+ printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n");
-+
-+ if (test_bit(bus, mp_bus_not_pci)) {
-+ irq = mp_irqs[idx].mpc_srcbusirq;
-+ } else {
-+ /*
-+ * PCI IRQs are mapped in order
-+ */
-+ i = irq = 0;
-+ while (i < apic)
-+ irq += nr_ioapic_registers[i++];
-+ irq += pin;
-+ }
-+ BUG_ON(irq >= NR_IRQS);
-+ return irq;
-+}
-+
-+static int __assign_irq_vector(int irq, cpumask_t mask)
-+{
-+ struct physdev_irq irq_op;
-+ struct irq_cfg *cfg;
-+
-+ BUG_ON((unsigned)irq >= NR_IRQS);
-+ cfg = &irq_cfg[irq];
-+
-+ if ((cfg->move_in_progress) || cfg->move_cleanup_count)
-+ return -EBUSY;
-+
-+ if (cfg->vector)
-+ return 0;
-+
-+ irq_op.irq = irq;
-+ if (HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op))
-+ return -ENOSPC;
-+
-+ cfg->vector = irq_op.vector;
-+
-+ return 0;
-+}
-+
-+static int assign_irq_vector(int irq, cpumask_t mask)
-+{
-+ int err;
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&vector_lock, flags);
-+ err = __assign_irq_vector(irq, mask);
-+ spin_unlock_irqrestore(&vector_lock, flags);
-+ return err;
-+}
-+
-+#ifndef CONFIG_XEN
-+static void __clear_irq_vector(int irq)
-+{
-+ struct irq_cfg *cfg;
-+ cpumask_t mask;
-+ int cpu, vector;
-+
-+ BUG_ON((unsigned)irq >= NR_IRQS);
-+ cfg = &irq_cfg[irq];
-+ BUG_ON(!cfg->vector);
-+
-+ vector = cfg->vector;
-+ cpus_and(mask, cfg->domain, cpu_online_map);
-+ for_each_cpu_mask(cpu, mask)
-+ per_cpu(vector_irq, cpu)[vector] = -1;
-+
-+ cfg->vector = 0;
-+ cfg->domain = CPU_MASK_NONE;
-+}
-+
-+void __setup_vector_irq(int cpu)
-+{
-+ /* Initialize vector_irq on a new cpu */
-+ /* This function must be called with vector_lock held */
-+ int irq, vector;
-+
-+ /* Mark the inuse vectors */
-+ for (irq = 0; irq < NR_IRQS; ++irq) {
-+ if (!cpu_isset(cpu, irq_cfg[irq].domain))
-+ continue;
-+ vector = irq_cfg[irq].vector;
-+ per_cpu(vector_irq, cpu)[vector] = irq;
-+ }
-+ /* Mark the free vectors */
-+ for (vector = 0; vector < NR_VECTORS; ++vector) {
-+ irq = per_cpu(vector_irq, cpu)[vector];
-+ if (irq < 0)
-+ continue;
-+ if (!cpu_isset(cpu, irq_cfg[irq].domain))
-+ per_cpu(vector_irq, cpu)[vector] = -1;
-+ }
-+}
-+
-+static struct irq_chip ioapic_chip;
-+
-+static void ioapic_register_intr(int irq, unsigned long trigger)
-+{
-+ if (trigger)
-+ set_irq_chip_and_handler_name(irq, &ioapic_chip,
-+ handle_fasteoi_irq, "fasteoi");
-+ else
-+ set_irq_chip_and_handler_name(irq, &ioapic_chip,
-+ handle_edge_irq, "edge");
-+}
-+#else
-+#define ioapic_register_intr(irq,trigger) ((void)0)
-+#endif /* !CONFIG_XEN */
-+
-+static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq,
-+ int trigger, int polarity)
-+{
-+ struct irq_cfg *cfg = irq_cfg + irq;
-+ struct IO_APIC_route_entry entry;
-+ cpumask_t mask;
-+
-+ if (!IO_APIC_IRQ(irq))
-+ return;
-+
-+ mask = TARGET_CPUS;
-+ if (assign_irq_vector(irq, mask))
-+ return;
-+
-+#ifndef CONFIG_XEN
-+ cpus_and(mask, cfg->domain, mask);
-+#endif
-+
-+ apic_printk(APIC_VERBOSE,KERN_DEBUG
-+ "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> "
-+ "IRQ %d Mode:%i Active:%i)\n",
-+ apic, mp_ioapics[apic].mpc_apicid, pin, cfg->vector,
-+ irq, trigger, polarity);
-+
-+ /*
-+ * add it to the IO-APIC irq-routing table:
-+ */
-+ memset(&entry,0,sizeof(entry));
-+
-+ entry.delivery_mode = INT_DELIVERY_MODE;
-+ entry.dest_mode = INT_DEST_MODE;
-+ entry.dest = cpu_mask_to_apicid(mask);
-+ entry.mask = 0; /* enable IRQ */
-+ entry.trigger = trigger;
-+ entry.polarity = polarity;
-+ entry.vector = cfg->vector;
-+
-+ /* Mask level triggered irqs.
-+ * Use IRQ_DELAYED_DISABLE for edge triggered irqs.
-+ */
-+ if (trigger)
-+ entry.mask = 1;
-+
-+ ioapic_register_intr(irq, trigger);
-+ if (irq < 16)
-+ disable_8259A_irq(irq);
-+
-+ ioapic_write_entry(apic, pin, entry);
-+}
-+
-+static void __init setup_IO_APIC_irqs(void)
-+{
-+ int apic, pin, idx, irq, first_notcon = 1;
-+
-+ apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
-+
-+ for (apic = 0; apic < nr_ioapics; apic++) {
-+ for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
-+
-+ idx = find_irq_entry(apic,pin,mp_INT);
-+ if (idx == -1) {
-+ if (first_notcon) {
-+ apic_printk(APIC_VERBOSE, KERN_DEBUG " IO-APIC (apicid-pin) %d-%d", mp_ioapics[apic].mpc_apicid, pin);
-+ first_notcon = 0;
-+ } else
-+ apic_printk(APIC_VERBOSE, ", %d-%d", mp_ioapics[apic].mpc_apicid, pin);
-+ continue;
-+ }
-+
-+ irq = pin_2_irq(idx, apic, pin);
-+ add_pin_to_irq(irq, apic, pin);
-+
-+ setup_IO_APIC_irq(apic, pin, irq,
-+ irq_trigger(idx), irq_polarity(idx));
-+ }
-+ }
-+
-+ if (!first_notcon)
-+ apic_printk(APIC_VERBOSE," not connected.\n");
-+}
-+
-+#ifndef CONFIG_XEN
-+/*
-+ * Set up the 8259A-master output pin as broadcast to all
-+ * CPUs.
-+ */
-+static void __init setup_ExtINT_IRQ0_pin(unsigned int apic, unsigned int pin, int vector)
-+{
-+ struct IO_APIC_route_entry entry;
-+ unsigned long flags;
-+
-+ memset(&entry,0,sizeof(entry));
-+
-+ disable_8259A_irq(0);
-+
-+ /* mask LVT0 */
-+ apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
-+
-+ /*
-+ * We use logical delivery to get the timer IRQ
-+ * to the first CPU.
-+ */
-+ entry.dest_mode = INT_DEST_MODE;
-+ entry.mask = 0; /* unmask IRQ now */
-+ entry.dest = cpu_mask_to_apicid(TARGET_CPUS);
-+ entry.delivery_mode = INT_DELIVERY_MODE;
-+ entry.polarity = 0;
-+ entry.trigger = 0;
-+ entry.vector = vector;
-+
-+ /*
-+ * The timer IRQ doesn't have to know that behind the
-+ * scene we have a 8259A-master in AEOI mode ...
-+ */
-+ set_irq_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, "edge");
-+
-+ /*
-+ * Add it to the IO-APIC irq-routing table:
-+ */
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1));
-+ io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0));
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+
-+ enable_8259A_irq(0);
-+}
-+
-+void __apicdebuginit print_IO_APIC(void)
-+{
-+ int apic, i;
-+ union IO_APIC_reg_00 reg_00;
-+ union IO_APIC_reg_01 reg_01;
-+ union IO_APIC_reg_02 reg_02;
-+ unsigned long flags;
-+
-+ if (apic_verbosity == APIC_QUIET)
-+ return;
-+
-+ printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
-+ for (i = 0; i < nr_ioapics; i++)
-+ printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
-+ mp_ioapics[i].mpc_apicid, nr_ioapic_registers[i]);
-+
-+ /*
-+ * We are a bit conservative about what we expect. We have to
-+ * know about every hardware change ASAP.
-+ */
-+ printk(KERN_INFO "testing the IO APIC.......................\n");
-+
-+ for (apic = 0; apic < nr_ioapics; apic++) {
-+
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ reg_00.raw = io_apic_read(apic, 0);
-+ reg_01.raw = io_apic_read(apic, 1);
-+ if (reg_01.bits.version >= 0x10)
-+ reg_02.raw = io_apic_read(apic, 2);
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+
-+ printk("\n");
-+ printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].mpc_apicid);
-+ printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
-+ printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID);
-+
-+ printk(KERN_DEBUG ".... register #01: %08X\n", *(int *)&reg_01);
-+ printk(KERN_DEBUG "....... : max redirection entries: %04X\n", reg_01.bits.entries);
-+
-+ printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ);
-+ printk(KERN_DEBUG "....... : IO APIC version: %04X\n", reg_01.bits.version);
-+
-+ if (reg_01.bits.version >= 0x10) {
-+ printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw);
-+ printk(KERN_DEBUG "....... : arbitration: %02X\n", reg_02.bits.arbitration);
-+ }
-+
-+ printk(KERN_DEBUG ".... IRQ redirection table:\n");
-+
-+ printk(KERN_DEBUG " NR Dst Mask Trig IRR Pol"
-+ " Stat Dmod Deli Vect: \n");
-+
-+ for (i = 0; i <= reg_01.bits.entries; i++) {
-+ struct IO_APIC_route_entry entry;
-+
-+ entry = ioapic_read_entry(apic, i);
-+
-+ printk(KERN_DEBUG " %02x %03X ",
-+ i,
-+ entry.dest
-+ );
-+
-+ printk("%1d %1d %1d %1d %1d %1d %1d %02X\n",
-+ entry.mask,
-+ entry.trigger,
-+ entry.irr,
-+ entry.polarity,
-+ entry.delivery_status,
-+ entry.dest_mode,
-+ entry.delivery_mode,
-+ entry.vector
-+ );
-+ }
-+ }
-+ printk(KERN_DEBUG "IRQ to pin mappings:\n");
-+ for (i = 0; i < NR_IRQS; i++) {
-+ struct irq_pin_list *entry = irq_2_pin + i;
-+ if (entry->pin < 0)
-+ continue;
-+ printk(KERN_DEBUG "IRQ%d ", i);
-+ for (;;) {
-+ printk("-> %d:%d", entry->apic, entry->pin);
-+ if (!entry->next)
-+ break;
-+ entry = irq_2_pin + entry->next;
-+ }
-+ printk("\n");
-+ }
-+
-+ printk(KERN_INFO ".................................... done.\n");
-+
-+ return;
-+}
-+
-+#if 0
-+
-+static __apicdebuginit void print_APIC_bitfield (int base)
-+{
-+ unsigned int v;
-+ int i, j;
-+
-+ if (apic_verbosity == APIC_QUIET)
-+ return;
-+
-+ printk(KERN_DEBUG "0123456789abcdef0123456789abcdef\n" KERN_DEBUG);
-+ for (i = 0; i < 8; i++) {
-+ v = apic_read(base + i*0x10);
-+ for (j = 0; j < 32; j++) {
-+ if (v & (1<<j))
-+ printk("1");
-+ else
-+ printk("0");
-+ }
-+ printk("\n");
-+ }
-+}
-+
-+void __apicdebuginit print_local_APIC(void * dummy)
-+{
-+ unsigned int v, ver, maxlvt;
-+
-+ if (apic_verbosity == APIC_QUIET)
-+ return;
-+
-+ printk("\n" KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n",
-+ smp_processor_id(), hard_smp_processor_id());
-+ v = apic_read(APIC_ID);
-+ printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, GET_APIC_ID(v));
-+ v = apic_read(APIC_LVR);
-+ printk(KERN_INFO "... APIC VERSION: %08x\n", v);
-+ ver = GET_APIC_VERSION(v);
-+ maxlvt = get_maxlvt();
-+
-+ v = apic_read(APIC_TASKPRI);
-+ printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
-+
-+ v = apic_read(APIC_ARBPRI);
-+ printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v,
-+ v & APIC_ARBPRI_MASK);
-+ v = apic_read(APIC_PROCPRI);
-+ printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v);
-+
-+ v = apic_read(APIC_EOI);
-+ printk(KERN_DEBUG "... APIC EOI: %08x\n", v);
-+ v = apic_read(APIC_RRR);
-+ printk(KERN_DEBUG "... APIC RRR: %08x\n", v);
-+ v = apic_read(APIC_LDR);
-+ printk(KERN_DEBUG "... APIC LDR: %08x\n", v);
-+ v = apic_read(APIC_DFR);
-+ printk(KERN_DEBUG "... APIC DFR: %08x\n", v);
-+ v = apic_read(APIC_SPIV);
-+ printk(KERN_DEBUG "... APIC SPIV: %08x\n", v);
-+
-+ printk(KERN_DEBUG "... APIC ISR field:\n");
-+ print_APIC_bitfield(APIC_ISR);
-+ printk(KERN_DEBUG "... APIC TMR field:\n");
-+ print_APIC_bitfield(APIC_TMR);
-+ printk(KERN_DEBUG "... APIC IRR field:\n");
-+ print_APIC_bitfield(APIC_IRR);
-+
-+ v = apic_read(APIC_ESR);
-+ printk(KERN_DEBUG "... APIC ESR: %08x\n", v);
-+
-+ v = apic_read(APIC_ICR);
-+ printk(KERN_DEBUG "... APIC ICR: %08x\n", v);
-+ v = apic_read(APIC_ICR2);
-+ printk(KERN_DEBUG "... APIC ICR2: %08x\n", v);
-+
-+ v = apic_read(APIC_LVTT);
-+ printk(KERN_DEBUG "... APIC LVTT: %08x\n", v);
-+
-+ if (maxlvt > 3) { /* PC is LVT#4. */
-+ v = apic_read(APIC_LVTPC);
-+ printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v);
-+ }
-+ v = apic_read(APIC_LVT0);
-+ printk(KERN_DEBUG "... APIC LVT0: %08x\n", v);
-+ v = apic_read(APIC_LVT1);
-+ printk(KERN_DEBUG "... APIC LVT1: %08x\n", v);
-+
-+ if (maxlvt > 2) { /* ERR is LVT#3. */
-+ v = apic_read(APIC_LVTERR);
-+ printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v);
-+ }
-+
-+ v = apic_read(APIC_TMICT);
-+ printk(KERN_DEBUG "... APIC TMICT: %08x\n", v);
-+ v = apic_read(APIC_TMCCT);
-+ printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v);
-+ v = apic_read(APIC_TDCR);
-+ printk(KERN_DEBUG "... APIC TDCR: %08x\n", v);
-+ printk("\n");
-+}
-+
-+void print_all_local_APICs (void)
-+{
-+ on_each_cpu(print_local_APIC, NULL, 1, 1);
-+}
-+
-+void __apicdebuginit print_PIC(void)
-+{
-+ unsigned int v;
-+ unsigned long flags;
-+
-+ if (apic_verbosity == APIC_QUIET)
-+ return;
-+
-+ printk(KERN_DEBUG "\nprinting PIC contents\n");
-+
-+ spin_lock_irqsave(&i8259A_lock, flags);
-+
-+ v = inb(0xa1) << 8 | inb(0x21);
-+ printk(KERN_DEBUG "... PIC IMR: %04x\n", v);
-+
-+ v = inb(0xa0) << 8 | inb(0x20);
-+ printk(KERN_DEBUG "... PIC IRR: %04x\n", v);
-+
-+ outb(0x0b,0xa0);
-+ outb(0x0b,0x20);
-+ v = inb(0xa0) << 8 | inb(0x20);
-+ outb(0x0a,0xa0);
-+ outb(0x0a,0x20);
-+
-+ spin_unlock_irqrestore(&i8259A_lock, flags);
-+
-+ printk(KERN_DEBUG "... PIC ISR: %04x\n", v);
-+
-+ v = inb(0x4d1) << 8 | inb(0x4d0);
-+ printk(KERN_DEBUG "... PIC ELCR: %04x\n", v);
-+}
-+
-+#endif /* 0 */
-+
-+#else
-+void __init print_IO_APIC(void) { }
-+#endif /* !CONFIG_XEN */
-+
-+static void __init enable_IO_APIC(void)
-+{
-+ union IO_APIC_reg_01 reg_01;
-+#ifndef CONFIG_XEN
-+ int i8259_apic, i8259_pin;
-+#endif
-+ int i, apic;
-+ unsigned long flags;
-+
-+ for (i = 0; i < PIN_MAP_SIZE; i++) {
-+ irq_2_pin[i].pin = -1;
-+ irq_2_pin[i].next = 0;
-+ }
-+
-+ /*
-+ * The number of IO-APIC IRQ registers (== #pins):
-+ */
-+ for (apic = 0; apic < nr_ioapics; apic++) {
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ reg_01.raw = io_apic_read(apic, 1);
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+ nr_ioapic_registers[apic] = reg_01.bits.entries+1;
-+ }
-+#ifndef CONFIG_XEN
-+ for(apic = 0; apic < nr_ioapics; apic++) {
-+ int pin;
-+ /* See if any of the pins is in ExtINT mode */
-+ for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
-+ struct IO_APIC_route_entry entry;
-+ entry = ioapic_read_entry(apic, pin);
-+
-+ /* If the interrupt line is enabled and in ExtInt mode
-+ * I have found the pin where the i8259 is connected.
-+ */
-+ if ((entry.mask == 0) && (entry.delivery_mode == dest_ExtINT)) {
-+ ioapic_i8259.apic = apic;
-+ ioapic_i8259.pin = pin;
-+ goto found_i8259;
-+ }
-+ }
-+ }
-+ found_i8259:
-+ /* Look to see what if the MP table has reported the ExtINT */
-+ i8259_pin = find_isa_irq_pin(0, mp_ExtINT);
-+ i8259_apic = find_isa_irq_apic(0, mp_ExtINT);
-+ /* Trust the MP table if nothing is setup in the hardware */
-+ if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) {
-+ printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n");
-+ ioapic_i8259.pin = i8259_pin;
-+ ioapic_i8259.apic = i8259_apic;
-+ }
-+ /* Complain if the MP table and the hardware disagree */
-+ if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) &&
-+ (i8259_pin >= 0) && (ioapic_i8259.pin >= 0))
-+ {
-+ printk(KERN_WARNING "ExtINT in hardware and MP table differ\n");
-+ }
-+#endif
-+
-+ /*
-+ * Do not trust the IO-APIC being empty at bootup
-+ */
-+ clear_IO_APIC();
-+}
-+
-+/*
-+ * Not an __init, needed by the reboot code
-+ */
-+void disable_IO_APIC(void)
-+{
-+ /*
-+ * Clear the IO-APIC before rebooting:
-+ */
-+ clear_IO_APIC();
-+
-+#ifndef CONFIG_XEN
-+ /*
-+ * If the i8259 is routed through an IOAPIC
-+ * Put that IOAPIC in virtual wire mode
-+ * so legacy interrupts can be delivered.
-+ */
-+ if (ioapic_i8259.pin != -1) {
-+ struct IO_APIC_route_entry entry;
-+
-+ memset(&entry, 0, sizeof(entry));
-+ entry.mask = 0; /* Enabled */
-+ entry.trigger = 0; /* Edge */
-+ entry.irr = 0;
-+ entry.polarity = 0; /* High */
-+ entry.delivery_status = 0;
-+ entry.dest_mode = 0; /* Physical */
-+ entry.delivery_mode = dest_ExtINT; /* ExtInt */
-+ entry.vector = 0;
-+ entry.dest = GET_APIC_ID(apic_read(APIC_ID));
-+
-+ /*
-+ * Add it to the IO-APIC irq-routing table:
-+ */
-+ ioapic_write_entry(ioapic_i8259.apic, ioapic_i8259.pin, entry);
-+ }
-+
-+ disconnect_bsp_APIC(ioapic_i8259.pin != -1);
-+#endif
-+}
-+
-+/*
-+ * There is a nasty bug in some older SMP boards, their mptable lies
-+ * about the timer IRQ. We do the following to work around the situation:
-+ *
-+ * - timer IRQ defaults to IO-APIC IRQ
-+ * - if this function detects that timer IRQs are defunct, then we fall
-+ * back to ISA timer IRQs
-+ */
-+#ifndef CONFIG_XEN
-+static int __init timer_irq_works(void)
-+{
-+ unsigned long t1 = jiffies;
-+
-+ local_irq_enable();
-+ /* Let ten ticks pass... */
-+ mdelay((10 * 1000) / HZ);
-+
-+ /*
-+ * Expect a few ticks at least, to be sure some possible
-+ * glue logic does not lock up after one or two first
-+ * ticks in a non-ExtINT mode. Also the local APIC
-+ * might have cached one ExtINT interrupt. Finally, at
-+ * least one tick may be lost due to delays.
-+ */
-+
-+ /* jiffies wrap? */
-+ if (jiffies - t1 > 4)
-+ return 1;
-+ return 0;
-+}
-+
-+/*
-+ * In the SMP+IOAPIC case it might happen that there are an unspecified
-+ * number of pending IRQ events unhandled. These cases are very rare,
-+ * so we 'resend' these IRQs via IPIs, to the same CPU. It's much
-+ * better to do it this way as thus we do not have to be aware of
-+ * 'pending' interrupts in the IRQ path, except at this point.
-+ */
-+/*
-+ * Edge triggered needs to resend any interrupt
-+ * that was delayed but this is now handled in the device
-+ * independent code.
-+ */
-+
-+/*
-+ * Starting up a edge-triggered IO-APIC interrupt is
-+ * nasty - we need to make sure that we get the edge.
-+ * If it is already asserted for some reason, we need
-+ * return 1 to indicate that is was pending.
-+ *
-+ * This is not complete - we should be able to fake
-+ * an edge even if it isn't on the 8259A...
-+ */
-+
-+static unsigned int startup_ioapic_irq(unsigned int irq)
-+{
-+ int was_pending = 0;
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ if (irq < 16) {
-+ disable_8259A_irq(irq);
-+ if (i8259A_irq_pending(irq))
-+ was_pending = 1;
-+ }
-+ __unmask_IO_APIC_irq(irq);
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+
-+ return was_pending;
-+}
-+
-+static int ioapic_retrigger_irq(unsigned int irq)
-+{
-+ struct irq_cfg *cfg = &irq_cfg[irq];
-+ cpumask_t mask;
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&vector_lock, flags);
-+ cpus_clear(mask);
-+ cpu_set(first_cpu(cfg->domain), mask);
-+
-+ send_IPI_mask(mask, cfg->vector);
-+ spin_unlock_irqrestore(&vector_lock, flags);
-+
-+ return 1;
-+}
-+
-+/*
-+ * Level and edge triggered IO-APIC interrupts need different handling,
-+ * so we use two separate IRQ descriptors. Edge triggered IRQs can be
-+ * handled with the level-triggered descriptor, but that one has slightly
-+ * more overhead. Level-triggered interrupts cannot be handled with the
-+ * edge-triggered handler, without risking IRQ storms and other ugly
-+ * races.
-+ */
-+
-+#ifdef CONFIG_SMP
-+asmlinkage void smp_irq_move_cleanup_interrupt(void)
-+{
-+ unsigned vector, me;
-+ ack_APIC_irq();
-+ exit_idle();
-+ irq_enter();
-+
-+ me = smp_processor_id();
-+ for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
-+ unsigned int irq;
-+ struct irq_desc *desc;
-+ struct irq_cfg *cfg;
-+ irq = __get_cpu_var(vector_irq)[vector];
-+ if (irq >= NR_IRQS)
-+ continue;
-+
-+ desc = irq_desc + irq;
-+ cfg = irq_cfg + irq;
-+ spin_lock(&desc->lock);
-+ if (!cfg->move_cleanup_count)
-+ goto unlock;
-+
-+ if ((vector == cfg->vector) && cpu_isset(me, cfg->domain))
-+ goto unlock;
-+
-+ __get_cpu_var(vector_irq)[vector] = -1;
-+ cfg->move_cleanup_count--;
-+unlock:
-+ spin_unlock(&desc->lock);
-+ }
-+
-+ irq_exit();
-+}
-+
-+static void irq_complete_move(unsigned int irq)
-+{
-+ struct irq_cfg *cfg = irq_cfg + irq;
-+ unsigned vector, me;
-+
-+ if (likely(!cfg->move_in_progress))
-+ return;
-+
-+ vector = ~get_irq_regs()->orig_rax;
-+ me = smp_processor_id();
-+ if ((vector == cfg->vector) && cpu_isset(me, cfg->domain)) {
-+ cpumask_t cleanup_mask;
-+
-+ cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
-+ cfg->move_cleanup_count = cpus_weight(cleanup_mask);
-+ send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
-+ cfg->move_in_progress = 0;
-+ }
-+}
-+#else
-+static inline void irq_complete_move(unsigned int irq) {}
-+#endif
-+
-+static void ack_apic_edge(unsigned int irq)
-+{
-+ irq_complete_move(irq);
-+ move_native_irq(irq);
-+ ack_APIC_irq();
-+}
-+
-+static void ack_apic_level(unsigned int irq)
-+{
-+ int do_unmask_irq = 0;
-+
-+ irq_complete_move(irq);
-+#if defined(CONFIG_GENERIC_PENDING_IRQ) || defined(CONFIG_IRQBALANCE)
-+ /* If we are moving the irq we need to mask it */
-+ if (unlikely(irq_desc[irq].status & IRQ_MOVE_PENDING)) {
-+ do_unmask_irq = 1;
-+ mask_IO_APIC_irq(irq);
-+ }
-+#endif
-+
-+ /*
-+ * We must acknowledge the irq before we move it or the acknowledge will
-+ * not propagate properly.
-+ */
-+ ack_APIC_irq();
-+
-+ /* Now we can move and renable the irq */
-+ move_masked_irq(irq);
-+ if (unlikely(do_unmask_irq))
-+ unmask_IO_APIC_irq(irq);
-+}
-+
-+static struct irq_chip ioapic_chip __read_mostly = {
-+ .name = "IO-APIC",
-+ .startup = startup_ioapic_irq,
-+ .mask = mask_IO_APIC_irq,
-+ .unmask = unmask_IO_APIC_irq,
-+ .ack = ack_apic_edge,
-+ .eoi = ack_apic_level,
-+#ifdef CONFIG_SMP
-+ .set_affinity = set_ioapic_affinity_irq,
-+#endif
-+ .retrigger = ioapic_retrigger_irq,
-+};
-+#endif /* !CONFIG_XEN */
-+
-+static inline void init_IO_APIC_traps(void)
-+{
-+ int irq;
-+
-+ /*
-+ * NOTE! The local APIC isn't very good at handling
-+ * multiple interrupts at the same interrupt level.
-+ * As the interrupt level is determined by taking the
-+ * vector number and shifting that right by 4, we
-+ * want to spread these out a bit so that they don't
-+ * all fall in the same interrupt level.
-+ *
-+ * Also, we've got to be careful not to trash gate
-+ * 0x80, because int 0x80 is hm, kind of importantish. ;)
-+ */
-+ for (irq = 0; irq < NR_IRQS ; irq++) {
-+ int tmp = irq;
-+ if (IO_APIC_IRQ(tmp) && !irq_cfg[tmp].vector) {
-+ /*
-+ * Hmm.. We don't have an entry for this,
-+ * so default to an old-fashioned 8259
-+ * interrupt if we can..
-+ */
-+ if (irq < 16)
-+ make_8259A_irq(irq);
-+#ifndef CONFIG_XEN
-+ else
-+ /* Strange. Oh, well.. */
-+ irq_desc[irq].chip = &no_irq_chip;
-+#endif
-+ }
-+ }
-+}
-+
-+#ifndef CONFIG_XEN
-+static void enable_lapic_irq (unsigned int irq)
-+{
-+ unsigned long v;
-+
-+ v = apic_read(APIC_LVT0);
-+ apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED);
-+}
-+
-+static void disable_lapic_irq (unsigned int irq)
-+{
-+ unsigned long v;
-+
-+ v = apic_read(APIC_LVT0);
-+ apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
-+}
-+
-+static void ack_lapic_irq (unsigned int irq)
-+{
-+ ack_APIC_irq();
-+}
-+
-+static void end_lapic_irq (unsigned int i) { /* nothing */ }
-+
-+static struct hw_interrupt_type lapic_irq_type __read_mostly = {
-+ .name = "local-APIC",
-+ .typename = "local-APIC-edge",
-+ .startup = NULL, /* startup_irq() not used for IRQ0 */
-+ .shutdown = NULL, /* shutdown_irq() not used for IRQ0 */
-+ .enable = enable_lapic_irq,
-+ .disable = disable_lapic_irq,
-+ .ack = ack_lapic_irq,
-+ .end = end_lapic_irq,
-+};
-+
-+static void setup_nmi (void)
-+{
-+ /*
-+ * Dirty trick to enable the NMI watchdog ...
-+ * We put the 8259A master into AEOI mode and
-+ * unmask on all local APICs LVT0 as NMI.
-+ *
-+ * The idea to use the 8259A in AEOI mode ('8259A Virtual Wire')
-+ * is from Maciej W. Rozycki - so we do not have to EOI from
-+ * the NMI handler or the timer interrupt.
-+ */
-+ printk(KERN_INFO "activating NMI Watchdog ...");
-+
-+ enable_NMI_through_LVT0(NULL);
-+
-+ printk(" done.\n");
-+}
-+
-+/*
-+ * This looks a bit hackish but it's about the only one way of sending
-+ * a few INTA cycles to 8259As and any associated glue logic. ICR does
-+ * not support the ExtINT mode, unfortunately. We need to send these
-+ * cycles as some i82489DX-based boards have glue logic that keeps the
-+ * 8259A interrupt line asserted until INTA. --macro
-+ */
-+static inline void unlock_ExtINT_logic(void)
-+{
-+ int apic, pin, i;
-+ struct IO_APIC_route_entry entry0, entry1;
-+ unsigned char save_control, save_freq_select;
-+ unsigned long flags;
-+
-+ pin = find_isa_irq_pin(8, mp_INT);
-+ apic = find_isa_irq_apic(8, mp_INT);
-+ if (pin == -1)
-+ return;
-+
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ *(((int *)&entry0) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
-+ *(((int *)&entry0) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+ clear_IO_APIC_pin(apic, pin);
-+
-+ memset(&entry1, 0, sizeof(entry1));
-+
-+ entry1.dest_mode = 0; /* physical delivery */
-+ entry1.mask = 0; /* unmask IRQ now */
-+ entry1.dest = hard_smp_processor_id();
-+ entry1.delivery_mode = dest_ExtINT;
-+ entry1.polarity = entry0.polarity;
-+ entry1.trigger = 0;
-+ entry1.vector = 0;
-+
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry1) + 1));
-+ io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry1) + 0));
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+
-+ save_control = CMOS_READ(RTC_CONTROL);
-+ save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
-+ CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6,
-+ RTC_FREQ_SELECT);
-+ CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL);
-+
-+ i = 100;
-+ while (i-- > 0) {
-+ mdelay(10);
-+ if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF)
-+ i -= 10;
-+ }
-+
-+ CMOS_WRITE(save_control, RTC_CONTROL);
-+ CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
-+ clear_IO_APIC_pin(apic, pin);
-+
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry0) + 1));
-+ io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry0) + 0));
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+}
-+
-+/*
-+ * This code may look a bit paranoid, but it's supposed to cooperate with
-+ * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ
-+ * is so screwy. Thanks to Brian Perkins for testing/hacking this beast
-+ * fanatically on his truly buggy board.
-+ *
-+ * FIXME: really need to revamp this for modern platforms only.
-+ */
-+static inline void check_timer(void)
-+{
-+ struct irq_cfg *cfg = irq_cfg + 0;
-+ int apic1, pin1, apic2, pin2;
-+
-+ /*
-+ * get/set the timer IRQ vector:
-+ */
-+ disable_8259A_irq(0);
-+ assign_irq_vector(0, TARGET_CPUS);
-+
-+ /*
-+ * Subtle, code in do_timer_interrupt() expects an AEOI
-+ * mode for the 8259A whenever interrupts are routed
-+ * through I/O APICs. Also IRQ0 has to be enabled in
-+ * the 8259A which implies the virtual wire has to be
-+ * disabled in the local APIC.
-+ */
-+ apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
-+ init_8259A(1);
-+ if (timer_over_8254 > 0)
-+ enable_8259A_irq(0);
-+
-+ pin1 = find_isa_irq_pin(0, mp_INT);
-+ apic1 = find_isa_irq_apic(0, mp_INT);
-+ pin2 = ioapic_i8259.pin;
-+ apic2 = ioapic_i8259.apic;
-+
-+ apic_printk(APIC_VERBOSE,KERN_INFO "..TIMER: vector=0x%02X apic1=%d pin1=%d apic2=%d pin2=%d\n",
-+ cfg->vector, apic1, pin1, apic2, pin2);
-+
-+ if (pin1 != -1) {
-+ /*
-+ * Ok, does IRQ0 through the IOAPIC work?
-+ */
-+ unmask_IO_APIC_irq(0);
-+ if (!no_timer_check && timer_irq_works()) {
-+ nmi_watchdog_default();
-+ if (nmi_watchdog == NMI_IO_APIC) {
-+ disable_8259A_irq(0);
-+ setup_nmi();
-+ enable_8259A_irq(0);
-+ }
-+ if (disable_timer_pin_1 > 0)
-+ clear_IO_APIC_pin(0, pin1);
-+ return;
-+ }
-+ clear_IO_APIC_pin(apic1, pin1);
-+ apic_printk(APIC_QUIET,KERN_ERR "..MP-BIOS bug: 8254 timer not "
-+ "connected to IO-APIC\n");
-+ }
-+
-+ apic_printk(APIC_VERBOSE,KERN_INFO "...trying to set up timer (IRQ0) "
-+ "through the 8259A ... ");
-+ if (pin2 != -1) {
-+ apic_printk(APIC_VERBOSE,"\n..... (found apic %d pin %d) ...",
-+ apic2, pin2);
-+ /*
-+ * legacy devices should be connected to IO APIC #0
-+ */
-+ setup_ExtINT_IRQ0_pin(apic2, pin2, cfg->vector);
-+ if (timer_irq_works()) {
-+ apic_printk(APIC_VERBOSE," works.\n");
-+ nmi_watchdog_default();
-+ if (nmi_watchdog == NMI_IO_APIC) {
-+ setup_nmi();
-+ }
-+ return;
-+ }
-+ /*
-+ * Cleanup, just in case ...
-+ */
-+ clear_IO_APIC_pin(apic2, pin2);
-+ }
-+ apic_printk(APIC_VERBOSE," failed.\n");
-+
-+ if (nmi_watchdog == NMI_IO_APIC) {
-+ printk(KERN_WARNING "timer doesn't work through the IO-APIC - disabling NMI Watchdog!\n");
-+ nmi_watchdog = 0;
-+ }
-+
-+ apic_printk(APIC_VERBOSE, KERN_INFO "...trying to set up timer as Virtual Wire IRQ...");
-+
-+ disable_8259A_irq(0);
-+ irq_desc[0].chip = &lapic_irq_type;
-+ apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector); /* Fixed mode */
-+ enable_8259A_irq(0);
-+
-+ if (timer_irq_works()) {
-+ apic_printk(APIC_VERBOSE," works.\n");
-+ return;
-+ }
-+ apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector);
-+ apic_printk(APIC_VERBOSE," failed.\n");
-+
-+ apic_printk(APIC_VERBOSE, KERN_INFO "...trying to set up timer as ExtINT IRQ...");
-+
-+ init_8259A(0);
-+ make_8259A_irq(0);
-+ apic_write(APIC_LVT0, APIC_DM_EXTINT);
-+
-+ unlock_ExtINT_logic();
-+
-+ if (timer_irq_works()) {
-+ apic_printk(APIC_VERBOSE," works.\n");
-+ return;
-+ }
-+ apic_printk(APIC_VERBOSE," failed :(.\n");
-+ panic("IO-APIC + timer doesn't work! Try using the 'noapic' kernel parameter\n");
-+}
-+
-+static int __init notimercheck(char *s)
-+{
-+ no_timer_check = 1;
-+ return 1;
-+}
-+__setup("no_timer_check", notimercheck);
-+#else
-+#define check_timer() ((void)0)
-+int timer_uses_ioapic_pin_0 = 0;
-+#endif /* !CONFIG_XEN */
-+
-+/*
-+ *
-+ * IRQ's that are handled by the PIC in the MPS IOAPIC case.
-+ * - IRQ2 is the cascade IRQ, and cannot be a io-apic IRQ.
-+ * Linux doesn't really care, as it's not actually used
-+ * for any interrupt handling anyway.
-+ */
-+#define PIC_IRQS (1<<2)
-+
-+void __init setup_IO_APIC(void)
-+{
-+ enable_IO_APIC();
-+
-+ if (acpi_ioapic)
-+ io_apic_irqs = ~0; /* all IRQs go through IOAPIC */
-+ else
-+ io_apic_irqs = ~PIC_IRQS;
-+
-+ apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n");
-+
-+#ifndef CONFIG_XEN
-+ sync_Arb_IDs();
-+#endif /* !CONFIG_XEN */
-+ setup_IO_APIC_irqs();
-+ init_IO_APIC_traps();
-+ check_timer();
-+ if (!acpi_ioapic)
-+ print_IO_APIC();
-+}
-+
-+struct sysfs_ioapic_data {
-+ struct sys_device dev;
-+ struct IO_APIC_route_entry entry[0];
-+};
-+static struct sysfs_ioapic_data * mp_ioapic_data[MAX_IO_APICS];
-+
-+static int ioapic_suspend(struct sys_device *dev, pm_message_t state)
-+{
-+ struct IO_APIC_route_entry *entry;
-+ struct sysfs_ioapic_data *data;
-+ int i;
-+
-+ data = container_of(dev, struct sysfs_ioapic_data, dev);
-+ entry = data->entry;
-+ for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ )
-+ *entry = ioapic_read_entry(dev->id, i);
-+
-+ return 0;
-+}
-+
-+static int ioapic_resume(struct sys_device *dev)
-+{
-+ struct IO_APIC_route_entry *entry;
-+ struct sysfs_ioapic_data *data;
-+ unsigned long flags;
-+ union IO_APIC_reg_00 reg_00;
-+ int i;
-+
-+ data = container_of(dev, struct sysfs_ioapic_data, dev);
-+ entry = data->entry;
-+
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ reg_00.raw = io_apic_read(dev->id, 0);
-+ if (reg_00.bits.ID != mp_ioapics[dev->id].mpc_apicid) {
-+ reg_00.bits.ID = mp_ioapics[dev->id].mpc_apicid;
-+ io_apic_write(dev->id, 0, reg_00.raw);
-+ }
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+ for (i = 0; i < nr_ioapic_registers[dev->id]; i++)
-+ ioapic_write_entry(dev->id, i, entry[i]);
-+
-+ return 0;
-+}
-+
-+static struct sysdev_class ioapic_sysdev_class = {
-+ set_kset_name("ioapic"),
-+ .suspend = ioapic_suspend,
-+ .resume = ioapic_resume,
-+};
-+
-+static int __init ioapic_init_sysfs(void)
-+{
-+ struct sys_device * dev;
-+ int i, size, error = 0;
-+
-+ error = sysdev_class_register(&ioapic_sysdev_class);
-+ if (error)
-+ return error;
-+
-+ for (i = 0; i < nr_ioapics; i++ ) {
-+ size = sizeof(struct sys_device) + nr_ioapic_registers[i]
-+ * sizeof(struct IO_APIC_route_entry);
-+ mp_ioapic_data[i] = kmalloc(size, GFP_KERNEL);
-+ if (!mp_ioapic_data[i]) {
-+ printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
-+ continue;
-+ }
-+ memset(mp_ioapic_data[i], 0, size);
-+ dev = &mp_ioapic_data[i]->dev;
-+ dev->id = i;
-+ dev->cls = &ioapic_sysdev_class;
-+ error = sysdev_register(dev);
-+ if (error) {
-+ kfree(mp_ioapic_data[i]);
-+ mp_ioapic_data[i] = NULL;
-+ printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
-+ continue;
-+ }
-+ }
-+
-+ return 0;
-+}
-+
-+device_initcall(ioapic_init_sysfs);
-+
-+#ifndef CONFIG_XEN
-+/*
-+ * Dynamic irq allocate and deallocation
-+ */
-+int create_irq(void)
-+{
-+ /* Allocate an unused irq */
-+ int irq;
-+ int new;
-+ unsigned long flags;
-+
-+ irq = -ENOSPC;
-+ spin_lock_irqsave(&vector_lock, flags);
-+ for (new = (NR_IRQS - 1); new >= 0; new--) {
-+ if (platform_legacy_irq(new))
-+ continue;
-+ if (irq_cfg[new].vector != 0)
-+ continue;
-+ if (__assign_irq_vector(new, TARGET_CPUS) == 0)
-+ irq = new;
-+ break;
-+ }
-+ spin_unlock_irqrestore(&vector_lock, flags);
-+
-+ if (irq >= 0) {
-+ dynamic_irq_init(irq);
-+ }
-+ return irq;
-+}
-+
-+void destroy_irq(unsigned int irq)
-+{
-+ unsigned long flags;
-+
-+ dynamic_irq_cleanup(irq);
-+
-+ spin_lock_irqsave(&vector_lock, flags);
-+ __clear_irq_vector(irq);
-+ spin_unlock_irqrestore(&vector_lock, flags);
-+}
-+#endif
-+
-+/*
-+ * MSI mesage composition
-+ */
-+#ifdef CONFIG_PCI_MSI
-+static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg)
-+{
-+ struct irq_cfg *cfg = irq_cfg + irq;
-+ int err;
-+ unsigned dest;
-+ cpumask_t tmp;
-+
-+ tmp = TARGET_CPUS;
-+ err = assign_irq_vector(irq, tmp);
-+ if (!err) {
-+ cpus_and(tmp, cfg->domain, tmp);
-+ dest = cpu_mask_to_apicid(tmp);
-+
-+ msg->address_hi = MSI_ADDR_BASE_HI;
-+ msg->address_lo =
-+ MSI_ADDR_BASE_LO |
-+ ((INT_DEST_MODE == 0) ?
-+ MSI_ADDR_DEST_MODE_PHYSICAL:
-+ MSI_ADDR_DEST_MODE_LOGICAL) |
-+ ((INT_DELIVERY_MODE != dest_LowestPrio) ?
-+ MSI_ADDR_REDIRECTION_CPU:
-+ MSI_ADDR_REDIRECTION_LOWPRI) |
-+ MSI_ADDR_DEST_ID(dest);
-+
-+ msg->data =
-+ MSI_DATA_TRIGGER_EDGE |
-+ MSI_DATA_LEVEL_ASSERT |
-+ ((INT_DELIVERY_MODE != dest_LowestPrio) ?
-+ MSI_DATA_DELIVERY_FIXED:
-+ MSI_DATA_DELIVERY_LOWPRI) |
-+ MSI_DATA_VECTOR(cfg->vector);
-+ }
-+ return err;
-+}
-+
-+#ifdef CONFIG_SMP
-+static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
-+{
-+ struct irq_cfg *cfg = irq_cfg + irq;
-+ struct msi_msg msg;
-+ unsigned int dest;
-+ cpumask_t tmp;
-+
-+ cpus_and(tmp, mask, cpu_online_map);
-+ if (cpus_empty(tmp))
-+ return;
-+
-+ if (assign_irq_vector(irq, mask))
-+ return;
-+
-+ cpus_and(tmp, cfg->domain, mask);
-+ dest = cpu_mask_to_apicid(tmp);
-+
-+ read_msi_msg(irq, &msg);
-+
-+ msg.data &= ~MSI_DATA_VECTOR_MASK;
-+ msg.data |= MSI_DATA_VECTOR(cfg->vector);
-+ msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
-+ msg.address_lo |= MSI_ADDR_DEST_ID(dest);
-+
-+ write_msi_msg(irq, &msg);
-+ irq_desc[irq].affinity = mask;
-+}
-+#endif /* CONFIG_SMP */
-+
-+/*
-+ * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices,
-+ * which implement the MSI or MSI-X Capability Structure.
-+ */
-+static struct irq_chip msi_chip = {
-+ .name = "PCI-MSI",
-+ .unmask = unmask_msi_irq,
-+ .mask = mask_msi_irq,
-+ .ack = ack_apic_edge,
-+#ifdef CONFIG_SMP
-+ .set_affinity = set_msi_irq_affinity,
-+#endif
-+ .retrigger = ioapic_retrigger_irq,
-+};
-+
-+int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
-+{
-+ struct msi_msg msg;
-+ int irq, ret;
-+ irq = create_irq();
-+ if (irq < 0)
-+ return irq;
-+
-+ ret = msi_compose_msg(dev, irq, &msg);
-+ if (ret < 0) {
-+ destroy_irq(irq);
-+ return ret;
-+ }
-+
-+ set_irq_msi(irq, desc);
-+ write_msi_msg(irq, &msg);
-+
-+ set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, "edge");
-+
-+ return 0;
-+}
-+
-+void arch_teardown_msi_irq(unsigned int irq)
-+{
-+ destroy_irq(irq);
-+}
-+
-+#endif /* CONFIG_PCI_MSI */
-+
-+/*
-+ * Hypertransport interrupt support
-+ */
-+#ifdef CONFIG_HT_IRQ
-+
-+#ifdef CONFIG_SMP
-+
-+static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector)
-+{
-+ struct ht_irq_msg msg;
-+ fetch_ht_irq_msg(irq, &msg);
-+
-+ msg.address_lo &= ~(HT_IRQ_LOW_VECTOR_MASK | HT_IRQ_LOW_DEST_ID_MASK);
-+ msg.address_hi &= ~(HT_IRQ_HIGH_DEST_ID_MASK);
-+
-+ msg.address_lo |= HT_IRQ_LOW_VECTOR(vector) | HT_IRQ_LOW_DEST_ID(dest);
-+ msg.address_hi |= HT_IRQ_HIGH_DEST_ID(dest);
-+
-+ write_ht_irq_msg(irq, &msg);
-+}
-+
-+static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask)
-+{
-+ struct irq_cfg *cfg = irq_cfg + irq;
-+ unsigned int dest;
-+ cpumask_t tmp;
-+
-+ cpus_and(tmp, mask, cpu_online_map);
-+ if (cpus_empty(tmp))
-+ return;
-+
-+ if (assign_irq_vector(irq, mask))
-+ return;
-+
-+ cpus_and(tmp, cfg->domain, mask);
-+ dest = cpu_mask_to_apicid(tmp);
-+
-+ target_ht_irq(irq, dest, cfg->vector);
-+ irq_desc[irq].affinity = mask;
-+}
-+#endif
-+
-+static struct irq_chip ht_irq_chip = {
-+ .name = "PCI-HT",
-+ .mask = mask_ht_irq,
-+ .unmask = unmask_ht_irq,
-+ .ack = ack_apic_edge,
-+#ifdef CONFIG_SMP
-+ .set_affinity = set_ht_irq_affinity,
-+#endif
-+ .retrigger = ioapic_retrigger_irq,
-+};
-+
-+int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
-+{
-+ struct irq_cfg *cfg = irq_cfg + irq;
-+ int err;
-+ cpumask_t tmp;
-+
-+ tmp = TARGET_CPUS;
-+ err = assign_irq_vector(irq, tmp);
-+ if (!err) {
-+ struct ht_irq_msg msg;
-+ unsigned dest;
-+
-+ cpus_and(tmp, cfg->domain, tmp);
-+ dest = cpu_mask_to_apicid(tmp);
-+
-+ msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest);
-+
-+ msg.address_lo =
-+ HT_IRQ_LOW_BASE |
-+ HT_IRQ_LOW_DEST_ID(dest) |
-+ HT_IRQ_LOW_VECTOR(cfg->vector) |
-+ ((INT_DEST_MODE == 0) ?
-+ HT_IRQ_LOW_DM_PHYSICAL :
-+ HT_IRQ_LOW_DM_LOGICAL) |
-+ HT_IRQ_LOW_RQEOI_EDGE |
-+ ((INT_DELIVERY_MODE != dest_LowestPrio) ?
-+ HT_IRQ_LOW_MT_FIXED :
-+ HT_IRQ_LOW_MT_ARBITRATED) |
-+ HT_IRQ_LOW_IRQ_MASKED;
-+
-+ write_ht_irq_msg(irq, &msg);
-+
-+ set_irq_chip_and_handler_name(irq, &ht_irq_chip,
-+ handle_edge_irq, "edge");
-+ }
-+ return err;
-+}
-+#endif /* CONFIG_HT_IRQ */
-+
-+/* --------------------------------------------------------------------------
-+ ACPI-based IOAPIC Configuration
-+ -------------------------------------------------------------------------- */
-+
-+#ifdef CONFIG_ACPI
-+
-+#define IO_APIC_MAX_ID 0xFE
-+
-+int __init io_apic_get_redir_entries (int ioapic)
-+{
-+ union IO_APIC_reg_01 reg_01;
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ reg_01.raw = io_apic_read(ioapic, 1);
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+
-+ return reg_01.bits.entries;
-+}
-+
-+
-+int io_apic_set_pci_routing (int ioapic, int pin, int irq, int triggering, int polarity)
-+{
-+ if (!IO_APIC_IRQ(irq)) {
-+ apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n",
-+ ioapic);
-+ return -EINVAL;
-+ }
-+
-+ /*
-+ * IRQs < 16 are already in the irq_2_pin[] map
-+ */
-+ if (irq >= 16)
-+ add_pin_to_irq(irq, ioapic, pin);
-+
-+ setup_IO_APIC_irq(ioapic, pin, irq, triggering, polarity);
-+
-+ return 0;
-+}
-+
-+#endif /* CONFIG_ACPI */
-+
-+
-+#ifndef CONFIG_XEN
-+/*
-+ * This function currently is only a helper for the i386 smp boot process where
-+ * we need to reprogram the ioredtbls to cater for the cpus which have come online
-+ * so mask in all cases should simply be TARGET_CPUS
-+ */
-+#ifdef CONFIG_SMP
-+void __init setup_ioapic_dest(void)
-+{
-+ int pin, ioapic, irq, irq_entry;
-+
-+ if (skip_ioapic_setup == 1)
-+ return;
-+
-+ for (ioapic = 0; ioapic < nr_ioapics; ioapic++) {
-+ for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) {
-+ irq_entry = find_irq_entry(ioapic, pin, mp_INT);
-+ if (irq_entry == -1)
-+ continue;
-+ irq = pin_2_irq(irq_entry, ioapic, pin);
-+
-+ /* setup_IO_APIC_irqs could fail to get vector for some device
-+ * when you have too many devices, because at that time only boot
-+ * cpu is online.
-+ */
-+ if (!irq_cfg[irq].vector)
-+ setup_IO_APIC_irq(ioapic, pin, irq,
-+ irq_trigger(irq_entry),
-+ irq_polarity(irq_entry));
-+ else
-+ set_ioapic_affinity_irq(irq, TARGET_CPUS);
-+ }
-+
-+ }
-+}
-+#endif
-+#endif /* !CONFIG_XEN */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/x86_64/kernel/ioport-xen.c ubuntu-gutsy-xen/arch/x86_64/kernel/ioport-xen.c
---- ubuntu-gutsy/arch/x86_64/kernel/ioport-xen.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/arch/x86_64/kernel/ioport-xen.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,100 @@
-+/*
-+ * linux/arch/x86_64/kernel/ioport.c
-+ *
-+ * This contains the io-permission bitmap code - written by obz, with changes
-+ * by Linus.
-+ */
-+
-+#include <linux/sched.h>
-+#include <linux/kernel.h>
-+#include <linux/capability.h>
-+#include <linux/errno.h>
-+#include <linux/types.h>
-+#include <linux/ioport.h>
-+#include <linux/mm.h>
-+#include <linux/smp.h>
-+#include <linux/stddef.h>
-+#include <linux/slab.h>
-+#include <linux/thread_info.h>
-+#include <linux/syscalls.h>
-+#include <xen/interface/physdev.h>
-+
-+/* Set EXTENT bits starting at BASE in BITMAP to value TURN_ON. */
-+static void set_bitmap(unsigned long *bitmap, unsigned int base, unsigned int extent, int new_value)
-+{
-+ int i;
-+
-+ if (new_value)
-+ for (i = base; i < base + extent; i++)
-+ __set_bit(i, bitmap);
-+ else
-+ for (i = base; i < base + extent; i++)
-+ clear_bit(i, bitmap);
-+}
-+
-+/*
-+ * this changes the io permissions bitmap in the current task.
-+ */
-+asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
-+{
-+ struct thread_struct * t = &current->thread;
-+ unsigned long *bitmap;
-+ struct physdev_set_iobitmap set_iobitmap;
-+
-+ if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
-+ return -EINVAL;
-+ if (turn_on && !capable(CAP_SYS_RAWIO))
-+ return -EPERM;
-+
-+ /*
-+ * If it's the first ioperm() call in this thread's lifetime, set the
-+ * IO bitmap up. ioperm() is much less timing critical than clone(),
-+ * this is why we delay this operation until now:
-+ */
-+ if (!t->io_bitmap_ptr) {
-+ bitmap = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
-+ if (!bitmap)
-+ return -ENOMEM;
-+
-+ memset(bitmap, 0xff, IO_BITMAP_BYTES);
-+ t->io_bitmap_ptr = bitmap;
-+ set_thread_flag(TIF_IO_BITMAP);
-+
-+ set_xen_guest_handle(set_iobitmap.bitmap, (char *)bitmap);
-+ set_iobitmap.nr_ports = IO_BITMAP_BITS;
-+ HYPERVISOR_physdev_op(PHYSDEVOP_set_iobitmap, &set_iobitmap);
-+ }
-+
-+ set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
-+
-+ return 0;
-+}
-+
-+/*
-+ * sys_iopl has to be used when you want to access the IO ports
-+ * beyond the 0x3ff range: to get the full 65536 ports bitmapped
-+ * you'd need 8kB of bitmaps/process, which is a bit excessive.
-+ *
-+ */
-+
-+asmlinkage long sys_iopl(unsigned int new_iopl, struct pt_regs *regs)
-+{
-+ unsigned int old_iopl = current->thread.iopl;
-+ struct physdev_set_iopl set_iopl;
-+
-+ if (new_iopl > 3)
-+ return -EINVAL;
-+
-+ /* Need "raw I/O" privileges for direct port access. */
-+ if ((new_iopl > old_iopl) && !capable(CAP_SYS_RAWIO))
-+ return -EPERM;
-+
-+ /* Change our version of the privilege levels. */
-+ current->thread.iopl = new_iopl;
-+
-+ /* Force the change at ring 0. */
-+ set_iopl.iopl = (new_iopl == 0) ? 1 : new_iopl;
-+ HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
-+
-+ return 0;
-+}
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/x86_64/kernel/irq-xen.c ubuntu-gutsy-xen/arch/x86_64/kernel/irq-xen.c
---- ubuntu-gutsy/arch/x86_64/kernel/irq-xen.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/arch/x86_64/kernel/irq-xen.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,225 @@
-+/*
-+ * linux/arch/x86_64/kernel/irq.c
-+ *
-+ * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
-+ *
-+ * This file contains the lowest level x86_64-specific interrupt
-+ * entry and irq statistics code. All the remaining irq logic is
-+ * done by the generic kernel/irq/ code and in the
-+ * x86_64-specific irq controller code. (e.g. i8259.c and
-+ * io_apic.c.)
-+ */
-+
-+#include <linux/kernel_stat.h>
-+#include <linux/interrupt.h>
-+#include <linux/seq_file.h>
-+#include <linux/module.h>
-+#include <linux/delay.h>
-+#include <asm/uaccess.h>
-+#include <asm/io_apic.h>
-+#include <asm/idle.h>
-+#include <asm/smp.h>
-+
-+atomic_t irq_err_count;
-+
-+#ifdef CONFIG_DEBUG_STACKOVERFLOW
-+/*
-+ * Probabilistic stack overflow check:
-+ *
-+ * Only check the stack in process context, because everything else
-+ * runs on the big interrupt stacks. Checking reliably is too expensive,
-+ * so we just check from interrupts.
-+ */
-+static inline void stack_overflow_check(struct pt_regs *regs)
-+{
-+ u64 curbase = (u64)task_stack_page(current);
-+ static unsigned long warned = -60*HZ;
-+
-+ if (regs->rsp >= curbase && regs->rsp <= curbase + THREAD_SIZE &&
-+ regs->rsp < curbase + sizeof(struct thread_info) + 128 &&
-+ time_after(jiffies, warned + 60*HZ)) {
-+ printk("do_IRQ: %s near stack overflow (cur:%Lx,rsp:%lx)\n",
-+ current->comm, curbase, regs->rsp);
-+ show_stack(NULL,NULL);
-+ warned = jiffies;
-+ }
-+}
-+#endif
-+
-+/*
-+ * Generic, controller-independent functions:
-+ */
-+
-+int show_interrupts(struct seq_file *p, void *v)
-+{
-+ int i = *(loff_t *) v, j;
-+ struct irqaction * action;
-+ unsigned long flags;
-+
-+ if (i == 0) {
-+ seq_printf(p, " ");
-+ for_each_online_cpu(j)
-+ seq_printf(p, "CPU%-8d",j);
-+ seq_putc(p, '\n');
-+ }
-+
-+ if (i < NR_IRQS) {
-+ spin_lock_irqsave(&irq_desc[i].lock, flags);
-+ action = irq_desc[i].action;
-+ if (!action)
-+ goto skip;
-+ seq_printf(p, "%3d: ",i);
-+#ifndef CONFIG_SMP
-+ seq_printf(p, "%10u ", kstat_irqs(i));
-+#else
-+ for_each_online_cpu(j)
-+ seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
-+#endif
-+ seq_printf(p, " %8s", irq_desc[i].chip->name);
-+ seq_printf(p, "-%-8s", irq_desc[i].name);
-+
-+ seq_printf(p, " %s", action->name);
-+ for (action=action->next; action; action = action->next)
-+ seq_printf(p, ", %s", action->name);
-+ seq_putc(p, '\n');
-+skip:
-+ spin_unlock_irqrestore(&irq_desc[i].lock, flags);
-+ } else if (i == NR_IRQS) {
-+ seq_printf(p, "NMI: ");
-+ for_each_online_cpu(j)
-+ seq_printf(p, "%10u ", cpu_pda(j)->__nmi_count);
-+ seq_putc(p, '\n');
-+#ifdef CONFIG_X86_LOCAL_APIC
-+ seq_printf(p, "LOC: ");
-+ for_each_online_cpu(j)
-+ seq_printf(p, "%10u ", cpu_pda(j)->apic_timer_irqs);
-+ seq_putc(p, '\n');
-+#endif
-+ seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
-+ }
-+ return 0;
-+}
-+
-+/*
-+ * do_IRQ handles all normal device IRQ's (the special
-+ * SMP cross-CPU interrupts have their own specific
-+ * handlers).
-+ */
-+asmlinkage unsigned int do_IRQ(struct pt_regs *regs)
-+{
-+ struct pt_regs *old_regs = set_irq_regs(regs);
-+
-+ /* high bit used in ret_from_ code */
-+ unsigned irq = ~regs->orig_rax;
-+
-+ exit_idle();
-+ irq_enter();
-+
-+#ifdef CONFIG_DEBUG_STACKOVERFLOW
-+ stack_overflow_check(regs);
-+#endif
-+
-+ if (likely(irq < NR_IRQS))
-+ generic_handle_irq(irq);
-+ else {
-+#ifndef CONFIG_XEN
-+ if (!disable_apic)
-+ ack_APIC_irq();
-+#endif
-+ if (printk_ratelimit())
-+ printk(KERN_EMERG "%s: %d.%d No irq handler for irq\n",
-+ __func__, smp_processor_id(), irq);
-+ }
-+
-+ irq_exit();
-+
-+ set_irq_regs(old_regs);
-+ return 1;
-+}
-+
-+#ifdef CONFIG_HOTPLUG_CPU
-+void fixup_irqs(cpumask_t map)
-+{
-+ unsigned int irq;
-+ static int warned;
-+
-+ for (irq = 0; irq < NR_IRQS; irq++) {
-+ cpumask_t mask;
-+ int break_affinity = 0;
-+ int set_affinity = 1;
-+
-+ if (irq == 2)
-+ continue;
-+
-+ /* interrupt's are disabled at this point */
-+ spin_lock(&irq_desc[irq].lock);
-+
-+ if (!irq_has_action(irq) ||
-+ cpus_equal(irq_desc[irq].affinity, map)) {
-+ spin_unlock(&irq_desc[irq].lock);
-+ continue;
-+ }
-+
-+ cpus_and(mask, irq_desc[irq].affinity, map);
-+ if (cpus_empty(mask)) {
-+ break_affinity = 1;
-+ mask = map;
-+ }
-+
-+ if (irq_desc[irq].chip->mask)
-+ irq_desc[irq].chip->mask(irq);
-+
-+ if (irq_desc[irq].chip->set_affinity)
-+ irq_desc[irq].chip->set_affinity(irq, mask);
-+ else if (!(warned++))
-+ set_affinity = 0;
-+
-+ if (irq_desc[irq].chip->unmask)
-+ irq_desc[irq].chip->unmask(irq);
-+
-+ spin_unlock(&irq_desc[irq].lock);
-+
-+ if (break_affinity && set_affinity)
-+ printk("Broke affinity for irq %i\n", irq);
-+ else if (!set_affinity)
-+ printk("Cannot set affinity for irq %i\n", irq);
-+ }
-+
-+ /* That doesn't seem sufficient. Give it 1ms. */
-+ local_irq_enable();
-+ mdelay(1);
-+ local_irq_disable();
-+}
-+#endif
-+
-+extern void call_softirq(void);
-+
-+asmlinkage void do_softirq(void)
-+{
-+ __u32 pending;
-+ unsigned long flags;
-+
-+ if (in_interrupt())
-+ return;
-+
-+ local_irq_save(flags);
-+ pending = local_softirq_pending();
-+ /* Switch to interrupt stack */
-+ if (pending) {
-+ call_softirq();
-+ WARN_ON_ONCE(softirq_count());
-+ }
-+ local_irq_restore(flags);
-+}
-+EXPORT_SYMBOL(do_softirq);
-+
-+#ifndef CONFIG_X86_LOCAL_APIC
-+/*
-+ * 'what should we do if we get a hw irq event on an illegal vector'.
-+ * each architecture has to answer this themselves.
-+ */
-+void ack_bad_irq(unsigned int irq)
-+{
-+ printk("unexpected IRQ trap at irq %02x\n", irq);
-+}
-+#endif
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/x86_64/kernel/ldt-xen.c ubuntu-gutsy-xen/arch/x86_64/kernel/ldt-xen.c
---- ubuntu-gutsy/arch/x86_64/kernel/ldt-xen.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/arch/x86_64/kernel/ldt-xen.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,281 @@
-+/*
-+ * linux/arch/x86_64/kernel/ldt.c
-+ *
-+ * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds
-+ * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
-+ * Copyright (C) 2002 Andi Kleen
-+ *
-+ * This handles calls from both 32bit and 64bit mode.
-+ */
-+
-+#include <linux/errno.h>
-+#include <linux/sched.h>
-+#include <linux/string.h>
-+#include <linux/mm.h>
-+#include <linux/smp.h>
-+#include <linux/vmalloc.h>
-+#include <linux/slab.h>
-+
-+#include <asm/uaccess.h>
-+#include <asm/system.h>
-+#include <asm/ldt.h>
-+#include <asm/desc.h>
-+#include <asm/proto.h>
-+#include <asm/pgalloc.h>
-+
-+#ifdef CONFIG_SMP /* avoids "defined but not used" warnig */
-+static void flush_ldt(void *null)
-+{
-+ if (current->active_mm)
-+ load_LDT(&current->active_mm->context);
-+}
-+#endif
-+
-+static int alloc_ldt(mm_context_t *pc, unsigned mincount, int reload)
-+{
-+ void *oldldt;
-+ void *newldt;
-+ unsigned oldsize;
-+
-+ if (mincount <= (unsigned)pc->size)
-+ return 0;
-+ oldsize = pc->size;
-+ mincount = (mincount+511)&(~511);
-+ if (mincount*LDT_ENTRY_SIZE > PAGE_SIZE)
-+ newldt = vmalloc(mincount*LDT_ENTRY_SIZE);
-+ else
-+ newldt = kmalloc(mincount*LDT_ENTRY_SIZE, GFP_KERNEL);
-+
-+ if (!newldt)
-+ return -ENOMEM;
-+
-+ if (oldsize)
-+ memcpy(newldt, pc->ldt, oldsize*LDT_ENTRY_SIZE);
-+ oldldt = pc->ldt;
-+ memset(newldt+oldsize*LDT_ENTRY_SIZE, 0, (mincount-oldsize)*LDT_ENTRY_SIZE);
-+ wmb();
-+ pc->ldt = newldt;
-+ wmb();
-+ pc->size = mincount;
-+ wmb();
-+ if (reload) {
-+#ifdef CONFIG_SMP
-+ cpumask_t mask;
-+
-+ preempt_disable();
-+#endif
-+ make_pages_readonly(
-+ pc->ldt,
-+ (pc->size * LDT_ENTRY_SIZE) / PAGE_SIZE,
-+ XENFEAT_writable_descriptor_tables);
-+ load_LDT(pc);
-+#ifdef CONFIG_SMP
-+ mask = cpumask_of_cpu(smp_processor_id());
-+ if (!cpus_equal(current->mm->cpu_vm_mask, mask))
-+ smp_call_function(flush_ldt, NULL, 1, 1);
-+ preempt_enable();
-+#endif
-+ }
-+ if (oldsize) {
-+ make_pages_writable(
-+ oldldt,
-+ (oldsize * LDT_ENTRY_SIZE) / PAGE_SIZE,
-+ XENFEAT_writable_descriptor_tables);
-+ if (oldsize*LDT_ENTRY_SIZE > PAGE_SIZE)
-+ vfree(oldldt);
-+ else
-+ kfree(oldldt);
-+ }
-+ return 0;
-+}
-+
-+static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
-+{
-+ int err = alloc_ldt(new, old->size, 0);
-+ if (err < 0)
-+ return err;
-+ memcpy(new->ldt, old->ldt, old->size*LDT_ENTRY_SIZE);
-+ make_pages_readonly(
-+ new->ldt,
-+ (new->size * LDT_ENTRY_SIZE) / PAGE_SIZE,
-+ XENFEAT_writable_descriptor_tables);
-+ return 0;
-+}
-+
-+/*
-+ * we do not have to muck with descriptors here, that is
-+ * done in switch_mm() as needed.
-+ */
-+int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
-+{
-+ struct mm_struct * old_mm;
-+ int retval = 0;
-+
-+ memset(&mm->context, 0, sizeof(mm->context));
-+ init_MUTEX(&mm->context.sem);
-+ old_mm = current->mm;
-+ if (old_mm && old_mm->context.size > 0) {
-+ down(&old_mm->context.sem);
-+ retval = copy_ldt(&mm->context, &old_mm->context);
-+ up(&old_mm->context.sem);
-+ }
-+ if (retval == 0) {
-+ spin_lock(&mm_unpinned_lock);
-+ list_add(&mm->context.unpinned, &mm_unpinned);
-+ spin_unlock(&mm_unpinned_lock);
-+ }
-+ return retval;
-+}
-+
-+/*
-+ *
-+ * Don't touch the LDT register - we're already in the next thread.
-+ */
-+void destroy_context(struct mm_struct *mm)
-+{
-+ if (mm->context.size) {
-+ if (mm == current->active_mm)
-+ clear_LDT();
-+ make_pages_writable(
-+ mm->context.ldt,
-+ (mm->context.size * LDT_ENTRY_SIZE) / PAGE_SIZE,
-+ XENFEAT_writable_descriptor_tables);
-+ if (mm->context.size*LDT_ENTRY_SIZE > PAGE_SIZE)
-+ vfree(mm->context.ldt);
-+ else
-+ kfree(mm->context.ldt);
-+ mm->context.size = 0;
-+ }
-+ if (!mm->context.pinned) {
-+ spin_lock(&mm_unpinned_lock);
-+ list_del(&mm->context.unpinned);
-+ spin_unlock(&mm_unpinned_lock);
-+ }
-+}
-+
-+static int read_ldt(void __user * ptr, unsigned long bytecount)
-+{
-+ int err;
-+ unsigned long size;
-+ struct mm_struct * mm = current->mm;
-+
-+ if (!mm->context.size)
-+ return 0;
-+ if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES)
-+ bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES;
-+
-+ down(&mm->context.sem);
-+ size = mm->context.size*LDT_ENTRY_SIZE;
-+ if (size > bytecount)
-+ size = bytecount;
-+
-+ err = 0;
-+ if (copy_to_user(ptr, mm->context.ldt, size))
-+ err = -EFAULT;
-+ up(&mm->context.sem);
-+ if (err < 0)
-+ goto error_return;
-+ if (size != bytecount) {
-+ /* zero-fill the rest */
-+ if (clear_user(ptr+size, bytecount-size) != 0) {
-+ err = -EFAULT;
-+ goto error_return;
-+ }
-+ }
-+ return bytecount;
-+error_return:
-+ return err;
-+}
-+
-+static int read_default_ldt(void __user * ptr, unsigned long bytecount)
-+{
-+ /* Arbitrary number */
-+ /* x86-64 default LDT is all zeros */
-+ if (bytecount > 128)
-+ bytecount = 128;
-+ if (clear_user(ptr, bytecount))
-+ return -EFAULT;
-+ return bytecount;
-+}
-+
-+static int write_ldt(void __user * ptr, unsigned long bytecount, int oldmode)
-+{
-+ struct task_struct *me = current;
-+ struct mm_struct * mm = me->mm;
-+ __u32 entry_1, entry_2, *lp;
-+ unsigned long mach_lp;
-+ int error;
-+ struct user_desc ldt_info;
-+
-+ error = -EINVAL;
-+
-+ if (bytecount != sizeof(ldt_info))
-+ goto out;
-+ error = -EFAULT;
-+ if (copy_from_user(&ldt_info, ptr, bytecount))
-+ goto out;
-+
-+ error = -EINVAL;
-+ if (ldt_info.entry_number >= LDT_ENTRIES)
-+ goto out;
-+ if (ldt_info.contents == 3) {
-+ if (oldmode)
-+ goto out;
-+ if (ldt_info.seg_not_present == 0)
-+ goto out;
-+ }
-+
-+ down(&mm->context.sem);
-+ if (ldt_info.entry_number >= (unsigned)mm->context.size) {
-+ error = alloc_ldt(&current->mm->context, ldt_info.entry_number+1, 1);
-+ if (error < 0)
-+ goto out_unlock;
-+ }
-+
-+ lp = (__u32 *) ((ldt_info.entry_number << 3) + (char *) mm->context.ldt);
-+ mach_lp = arbitrary_virt_to_machine(lp);
-+
-+ /* Allow LDTs to be cleared by the user. */
-+ if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
-+ if (oldmode || LDT_empty(&ldt_info)) {
-+ entry_1 = 0;
-+ entry_2 = 0;
-+ goto install;
-+ }
-+ }
-+
-+ entry_1 = LDT_entry_a(&ldt_info);
-+ entry_2 = LDT_entry_b(&ldt_info);
-+ if (oldmode)
-+ entry_2 &= ~(1 << 20);
-+
-+ /* Install the new entry ... */
-+install:
-+ error = HYPERVISOR_update_descriptor(mach_lp, (unsigned long)((entry_1 | (unsigned long) entry_2 << 32)));
-+
-+out_unlock:
-+ up(&mm->context.sem);
-+out:
-+ return error;
-+}
-+
-+asmlinkage int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount)
-+{
-+ int ret = -ENOSYS;
-+
-+ switch (func) {
-+ case 0:
-+ ret = read_ldt(ptr, bytecount);
-+ break;
-+ case 1:
-+ ret = write_ldt(ptr, bytecount, 1);
-+ break;
-+ case 2:
-+ ret = read_default_ldt(ptr, bytecount);
-+ break;
-+ case 0x11:
-+ ret = write_ldt(ptr, bytecount, 0);
-+ break;
-+ }
-+ return ret;
-+}
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/x86_64/kernel/machine_kexec.c ubuntu-gutsy-xen/arch/x86_64/kernel/machine_kexec.c
---- ubuntu-gutsy/arch/x86_64/kernel/machine_kexec.c 2007-08-18 09:40:29.000000000 -0400
-+++ ubuntu-gutsy-xen/arch/x86_64/kernel/machine_kexec.c 2007-08-18 12:38:02.000000000 -0400
-@@ -24,6 +24,104 @@
- static u64 kexec_pmd1[512] PAGE_ALIGNED;
- static u64 kexec_pte1[512] PAGE_ALIGNED;
-
-+#ifdef CONFIG_XEN
-+
-+/* In the case of Xen, override hypervisor functions to be able to create
-+ * a regular identity mapping page table...
-+ */
-+
-+#include <xen/interface/kexec.h>
-+#include <xen/interface/memory.h>
-+
-+#define x__pmd(x) ((pmd_t) { (x) } )
-+#define x__pud(x) ((pud_t) { (x) } )
-+#define x__pgd(x) ((pgd_t) { (x) } )
-+
-+#define x_pmd_val(x) ((x).pmd)
-+#define x_pud_val(x) ((x).pud)
-+#define x_pgd_val(x) ((x).pgd)
-+
-+static inline void x_set_pmd(pmd_t *dst, pmd_t val)
-+{
-+ x_pmd_val(*dst) = x_pmd_val(val);
-+}
-+
-+static inline void x_set_pud(pud_t *dst, pud_t val)
-+{
-+ x_pud_val(*dst) = phys_to_machine(x_pud_val(val));
-+}
-+
-+static inline void x_pud_clear (pud_t *pud)
-+{
-+ x_pud_val(*pud) = 0;
-+}
-+
-+static inline void x_set_pgd(pgd_t *dst, pgd_t val)
-+{
-+ x_pgd_val(*dst) = phys_to_machine(x_pgd_val(val));
-+}
-+
-+static inline void x_pgd_clear (pgd_t * pgd)
-+{
-+ x_pgd_val(*pgd) = 0;
-+}
-+
-+#define X__PAGE_KERNEL_LARGE_EXEC \
-+ _PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_PSE
-+#define X_KERNPG_TABLE _PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY
-+
-+#define __ma(x) (pfn_to_mfn(__pa((x)) >> PAGE_SHIFT) << PAGE_SHIFT)
-+
-+#if PAGES_NR > KEXEC_XEN_NO_PAGES
-+#error PAGES_NR is greater than KEXEC_XEN_NO_PAGES - Xen support will break
-+#endif
-+
-+#if PA_CONTROL_PAGE != 0
-+#error PA_CONTROL_PAGE is non zero - Xen support will break
-+#endif
-+
-+void machine_kexec_setup_load_arg(xen_kexec_image_t *xki, struct kimage *image)
-+{
-+ void *control_page;
-+ void *table_page;
-+
-+ memset(xki->page_list, 0, sizeof(xki->page_list));
-+
-+ control_page = page_address(image->control_code_page) + PAGE_SIZE;
-+ memcpy(control_page, relocate_kernel, PAGE_SIZE);
-+
-+ table_page = page_address(image->control_code_page);
-+
-+ xki->page_list[PA_CONTROL_PAGE] = __ma(control_page);
-+ xki->page_list[PA_TABLE_PAGE] = __ma(table_page);
-+
-+ xki->page_list[PA_PGD] = __ma(kexec_pgd);
-+ xki->page_list[PA_PUD_0] = __ma(kexec_pud0);
-+ xki->page_list[PA_PUD_1] = __ma(kexec_pud1);
-+ xki->page_list[PA_PMD_0] = __ma(kexec_pmd0);
-+ xki->page_list[PA_PMD_1] = __ma(kexec_pmd1);
-+ xki->page_list[PA_PTE_0] = __ma(kexec_pte0);
-+ xki->page_list[PA_PTE_1] = __ma(kexec_pte1);
-+}
-+
-+#else /* CONFIG_XEN */
-+
-+#define x__pmd(x) __pmd(x)
-+#define x__pud(x) __pud(x)
-+#define x__pgd(x) __pgd(x)
-+
-+#define x_set_pmd(x, y) set_pmd(x, y)
-+#define x_set_pud(x, y) set_pud(x, y)
-+#define x_set_pgd(x, y) set_pgd(x, y)
-+
-+#define x_pud_clear(x) pud_clear(x)
-+#define x_pgd_clear(x) pgd_clear(x)
-+
-+#define X__PAGE_KERNEL_LARGE_EXEC __PAGE_KERNEL_LARGE_EXEC
-+#define X_KERNPG_TABLE _KERNPG_TABLE
-+
-+#endif /* CONFIG_XEN */
-+
- static void init_level2_page(pmd_t *level2p, unsigned long addr)
- {
- unsigned long end_addr;
-@@ -31,7 +129,7 @@
- addr &= PAGE_MASK;
- end_addr = addr + PUD_SIZE;
- while (addr < end_addr) {
-- set_pmd(level2p++, __pmd(addr | __PAGE_KERNEL_LARGE_EXEC));
-+ x_set_pmd(level2p++, x__pmd(addr | X__PAGE_KERNEL_LARGE_EXEC));
- addr += PMD_SIZE;
- }
- }
-@@ -56,12 +154,12 @@
- }
- level2p = (pmd_t *)page_address(page);
- init_level2_page(level2p, addr);
-- set_pud(level3p++, __pud(__pa(level2p) | _KERNPG_TABLE));
-+ x_set_pud(level3p++, x__pud(__pa(level2p) | X_KERNPG_TABLE));
- addr += PUD_SIZE;
- }
- /* clear the unused entries */
- while (addr < end_addr) {
-- pud_clear(level3p++);
-+ x_pud_clear(level3p++);
- addr += PUD_SIZE;
- }
- out:
-@@ -92,12 +190,12 @@
- if (result) {
- goto out;
- }
-- set_pgd(level4p++, __pgd(__pa(level3p) | _KERNPG_TABLE));
-+ x_set_pgd(level4p++, x__pgd(__pa(level3p) | X_KERNPG_TABLE));
- addr += PGDIR_SIZE;
- }
- /* clear the unused entries */
- while (addr < end_addr) {
-- pgd_clear(level4p++);
-+ x_pgd_clear(level4p++);
- addr += PGDIR_SIZE;
- }
- out:
-@@ -108,49 +206,14 @@
- static int init_pgtable(struct kimage *image, unsigned long start_pgtable)
- {
- pgd_t *level4p;
-- level4p = (pgd_t *)__va(start_pgtable);
-- return init_level4_page(image, level4p, 0, end_pfn << PAGE_SHIFT);
--}
--
--static void set_idt(void *newidt, u16 limit)
--{
-- struct desc_ptr curidt;
-+ unsigned long x_end_pfn = end_pfn;
-
-- /* x86-64 supports unaliged loads & stores */
-- curidt.size = limit;
-- curidt.address = (unsigned long)newidt;
-+#ifdef CONFIG_XEN
-+ x_end_pfn = HYPERVISOR_memory_op(XENMEM_maximum_ram_page, NULL);
-+#endif
-
-- __asm__ __volatile__ (
-- "lidtq %0\n"
-- : : "m" (curidt)
-- );
--};
--
--
--static void set_gdt(void *newgdt, u16 limit)
--{
-- struct desc_ptr curgdt;
--
-- /* x86-64 supports unaligned loads & stores */
-- curgdt.size = limit;
-- curgdt.address = (unsigned long)newgdt;
--
-- __asm__ __volatile__ (
-- "lgdtq %0\n"
-- : : "m" (curgdt)
-- );
--};
--
--static void load_segments(void)
--{
-- __asm__ __volatile__ (
-- "\tmovl %0,%%ds\n"
-- "\tmovl %0,%%es\n"
-- "\tmovl %0,%%ss\n"
-- "\tmovl %0,%%fs\n"
-- "\tmovl %0,%%gs\n"
-- : : "a" (__KERNEL_DS) : "memory"
-- );
-+ level4p = (pgd_t *)__va(start_pgtable);
-+ return init_level4_page(image, level4p, 0, x_end_pfn << PAGE_SHIFT);
- }
-
- int machine_kexec_prepare(struct kimage *image)
-@@ -174,6 +237,7 @@
- return;
- }
-
-+#ifndef CONFIG_XEN
- /*
- * Do not allocate memory (or fail in any way) in machine_kexec().
- * We are past the point of no return, committed to rebooting now.
-@@ -209,26 +273,10 @@
- page_list[PA_TABLE_PAGE] =
- (unsigned long)__pa(page_address(image->control_code_page));
-
-- /* The segment registers are funny things, they have both a
-- * visible and an invisible part. Whenever the visible part is
-- * set to a specific selector, the invisible part is loaded
-- * with from a table in memory. At no other time is the
-- * descriptor table in memory accessed.
-- *
-- * I take advantage of this here by force loading the
-- * segments, before I zap the gdt with an invalid value.
-- */
-- load_segments();
-- /* The gdt & idt are now invalid.
-- * If you want to load them you must set up your own idt & gdt.
-- */
-- set_gdt(phys_to_virt(0),0);
-- set_idt(phys_to_virt(0),0);
--
-- /* now call it */
- relocate_kernel((unsigned long)image->head, (unsigned long)page_list,
- image->start);
- }
-+#endif
-
- /* crashkernel=size@addr specifies the location to reserve for
- * a crash kernel. By reserving this memory we guarantee
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/x86_64/kernel/Makefile ubuntu-gutsy-xen/arch/x86_64/kernel/Makefile
---- ubuntu-gutsy/arch/x86_64/kernel/Makefile 2007-08-18 09:40:29.000000000 -0400
-+++ ubuntu-gutsy-xen/arch/x86_64/kernel/Makefile 2007-08-18 12:38:02.000000000 -0400
-@@ -4,7 +4,7 @@
-
- extra-y := head.o head64.o init_task.o vmlinux.lds
- EXTRA_AFLAGS := -traditional
--obj-y := process.o signal.o entry.o traps.o irq.o \
-+obj-y := process.o signal.o entry.o traps.o irq.o nmi.o \
- ptrace.o time.o ioport.o ldt.o setup.o i8259.o sys_x86_64.o \
- x8664_ksyms.o i387.o syscall.o vsyscall.o \
- setup64.o bootflag.o e820.o reboot.o quirks.o i8237.o \
-@@ -21,11 +21,13 @@
- obj-$(CONFIG_MICROCODE) += microcode.o
- obj-$(CONFIG_X86_CPUID) += cpuid.o
- obj-$(CONFIG_SMP) += smp.o smpboot.o trampoline.o tsc_sync.o
--obj-y += apic.o nmi.o
--obj-y += io_apic.o mpparse.o genapic.o genapic_flat.o
-+obj-$(CONFIG_X86_LOCAL_APIC) += apic.o
-+obj-$(CONFIG_X86_XEN_GENAPIC) += genapic.o genapic_xen.o
-+obj-$(CONFIG_X86_IO_APIC) += io_apic.o mpparse.o genapic.o genapic_flat.o
- obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o crash.o
- obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
--obj-$(CONFIG_PM) += suspend.o
-+obj-$(CONFIG_SOFTWARE_SUSPEND) += suspend.o
-+obj-$(CONFIG_ACPI_SLEEP) += suspend.o
- obj-$(CONFIG_SOFTWARE_SUSPEND) += suspend_asm.o
- obj-$(CONFIG_CPU_FREQ) += cpufreq/
- obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
-@@ -61,3 +63,21 @@
- alternative-y += ../../i386/kernel/alternative.o
- pcspeaker-y += ../../i386/kernel/pcspeaker.o
- perfctr-watchdog-y += ../../i386/kernel/cpu/perfctr-watchdog.o
-+
-+ifdef CONFIG_XEN
-+apic-y += ../../i386/kernel/apic-xen.o
-+time-y += ../../i386/kernel/time-xen.o
-+pci-dma-y += ../../i386/kernel/pci-dma-xen.o
-+microcode-$(subst m,y,$(CONFIG_MICROCODE)) := ../../i386/kernel/microcode-xen.o
-+quirks-y := ../../i386/kernel/quirks-xen.o
-+
-+n-obj-xen := early-quirks.o genapic_flat.o i8237.o i8259.o perfctr-watchdog.o \
-+ reboot.o smpboot.o trampoline.o tsc.o tsc_sync.o
-+
-+include $(srctree)/scripts/Makefile.xen
-+
-+obj-y := $(call filterxen, $(obj-y), $(n-obj-xen))
-+obj-y := $(call cherrypickxen, $(obj-y))
-+extra-y := $(call cherrypickxen, $(extra-y))
-+%/head-xen.o %/head-xen.s: EXTRA_AFLAGS :=
-+endif
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/x86_64/kernel/mpparse-xen.c ubuntu-gutsy-xen/arch/x86_64/kernel/mpparse-xen.c
---- ubuntu-gutsy/arch/x86_64/kernel/mpparse-xen.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/arch/x86_64/kernel/mpparse-xen.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,852 @@
-+/*
-+ * Intel Multiprocessor Specification 1.1 and 1.4
-+ * compliant MP-table parsing routines.
-+ *
-+ * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
-+ * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
-+ *
-+ * Fixes
-+ * Erich Boleyn : MP v1.4 and additional changes.
-+ * Alan Cox : Added EBDA scanning
-+ * Ingo Molnar : various cleanups and rewrites
-+ * Maciej W. Rozycki: Bits for default MP configurations
-+ * Paul Diefenbaugh: Added full ACPI support
-+ */
-+
-+#include <linux/mm.h>
-+#include <linux/init.h>
-+#include <linux/delay.h>
-+#include <linux/bootmem.h>
-+#include <linux/kernel_stat.h>
-+#include <linux/mc146818rtc.h>
-+#include <linux/acpi.h>
-+#include <linux/module.h>
-+
-+#include <asm/smp.h>
-+#include <asm/mtrr.h>
-+#include <asm/mpspec.h>
-+#include <asm/pgalloc.h>
-+#include <asm/io_apic.h>
-+#include <asm/proto.h>
-+#include <asm/acpi.h>
-+
-+/* Have we found an MP table */
-+int smp_found_config;
-+unsigned int __initdata maxcpus = NR_CPUS;
-+
-+/*
-+ * Various Linux-internal data structures created from the
-+ * MP-table.
-+ */
-+DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES);
-+int mp_bus_id_to_pci_bus [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 };
-+
-+static int mp_current_pci_id = 0;
-+/* I/O APIC entries */
-+struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS];
-+
-+/* # of MP IRQ source entries */
-+struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
-+
-+/* MP IRQ source entries */
-+int mp_irq_entries;
-+
-+int nr_ioapics;
-+#ifndef CONFIG_XEN
-+unsigned long mp_lapic_addr = 0;
-+#endif
-+
-+
-+/* Processor that is doing the boot up */
-+unsigned int boot_cpu_id = -1U;
-+/* Internal processor count */
-+unsigned int num_processors __cpuinitdata = 0;
-+
-+unsigned disabled_cpus __cpuinitdata;
-+
-+/* Bitmask of physically existing CPUs */
-+physid_mask_t phys_cpu_present_map = PHYSID_MASK_NONE;
-+
-+u8 bios_cpu_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
-+
-+
-+/*
-+ * Intel MP BIOS table parsing routines:
-+ */
-+
-+/*
-+ * Checksum an MP configuration block.
-+ */
-+
-+static int __init mpf_checksum(unsigned char *mp, int len)
-+{
-+ int sum = 0;
-+
-+ while (len--)
-+ sum += *mp++;
-+
-+ return sum & 0xFF;
-+}
-+
-+#ifndef CONFIG_XEN
-+static void __cpuinit MP_processor_info (struct mpc_config_processor *m)
-+{
-+ int cpu;
-+ cpumask_t tmp_map;
-+ char *bootup_cpu = "";
-+
-+ if (!(m->mpc_cpuflag & CPU_ENABLED)) {
-+ disabled_cpus++;
-+ return;
-+ }
-+ if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) {
-+ bootup_cpu = " (Bootup-CPU)";
-+ boot_cpu_id = m->mpc_apicid;
-+ }
-+
-+ printk(KERN_INFO "Processor #%d%s\n", m->mpc_apicid, bootup_cpu);
-+
-+ if (num_processors >= NR_CPUS) {
-+ printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached."
-+ " Processor ignored.\n", NR_CPUS);
-+ return;
-+ }
-+
-+ num_processors++;
-+ cpus_complement(tmp_map, cpu_present_map);
-+ cpu = first_cpu(tmp_map);
-+
-+ physid_set(m->mpc_apicid, phys_cpu_present_map);
-+ if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) {
-+ /*
-+ * bios_cpu_apicid is required to have processors listed
-+ * in same order as logical cpu numbers. Hence the first
-+ * entry is BSP, and so on.
-+ */
-+ cpu = 0;
-+ }
-+ bios_cpu_apicid[cpu] = m->mpc_apicid;
-+ x86_cpu_to_apicid[cpu] = m->mpc_apicid;
-+
-+ cpu_set(cpu, cpu_possible_map);
-+ cpu_set(cpu, cpu_present_map);
-+}
-+#else
-+static void __cpuinit MP_processor_info (struct mpc_config_processor *m)
-+{
-+ num_processors++;
-+}
-+#endif /* CONFIG_XEN */
-+
-+static void __init MP_bus_info (struct mpc_config_bus *m)
-+{
-+ char str[7];
-+
-+ memcpy(str, m->mpc_bustype, 6);
-+ str[6] = 0;
-+ Dprintk("Bus #%d is %s\n", m->mpc_busid, str);
-+
-+ if (strncmp(str, "ISA", 3) == 0) {
-+ set_bit(m->mpc_busid, mp_bus_not_pci);
-+ } else if (strncmp(str, "PCI", 3) == 0) {
-+ clear_bit(m->mpc_busid, mp_bus_not_pci);
-+ mp_bus_id_to_pci_bus[m->mpc_busid] = mp_current_pci_id;
-+ mp_current_pci_id++;
-+ } else {
-+ printk(KERN_ERR "Unknown bustype %s\n", str);
-+ }
-+}
-+
-+static int bad_ioapic(unsigned long address)
-+{
-+ if (nr_ioapics >= MAX_IO_APICS) {
-+ printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded "
-+ "(found %d)\n", MAX_IO_APICS, nr_ioapics);
-+ panic("Recompile kernel with bigger MAX_IO_APICS!\n");
-+ }
-+ if (!address) {
-+ printk(KERN_ERR "WARNING: Bogus (zero) I/O APIC address"
-+ " found in table, skipping!\n");
-+ return 1;
-+ }
-+ return 0;
-+}
-+
-+static void __init MP_ioapic_info (struct mpc_config_ioapic *m)
-+{
-+ if (!(m->mpc_flags & MPC_APIC_USABLE))
-+ return;
-+
-+ printk("I/O APIC #%d at 0x%X.\n",
-+ m->mpc_apicid, m->mpc_apicaddr);
-+
-+ if (bad_ioapic(m->mpc_apicaddr))
-+ return;
-+
-+ mp_ioapics[nr_ioapics] = *m;
-+ nr_ioapics++;
-+}
-+
-+static void __init MP_intsrc_info (struct mpc_config_intsrc *m)
-+{
-+ mp_irqs [mp_irq_entries] = *m;
-+ Dprintk("Int: type %d, pol %d, trig %d, bus %d,"
-+ " IRQ %02x, APIC ID %x, APIC INT %02x\n",
-+ m->mpc_irqtype, m->mpc_irqflag & 3,
-+ (m->mpc_irqflag >> 2) & 3, m->mpc_srcbus,
-+ m->mpc_srcbusirq, m->mpc_dstapic, m->mpc_dstirq);
-+ if (++mp_irq_entries >= MAX_IRQ_SOURCES)
-+ panic("Max # of irq sources exceeded!!\n");
-+}
-+
-+static void __init MP_lintsrc_info (struct mpc_config_lintsrc *m)
-+{
-+ Dprintk("Lint: type %d, pol %d, trig %d, bus %d,"
-+ " IRQ %02x, APIC ID %x, APIC LINT %02x\n",
-+ m->mpc_irqtype, m->mpc_irqflag & 3,
-+ (m->mpc_irqflag >> 2) &3, m->mpc_srcbusid,
-+ m->mpc_srcbusirq, m->mpc_destapic, m->mpc_destapiclint);
-+}
-+
-+/*
-+ * Read/parse the MPC
-+ */
-+
-+static int __init smp_read_mpc(struct mp_config_table *mpc)
-+{
-+ char str[16];
-+ int count=sizeof(*mpc);
-+ unsigned char *mpt=((unsigned char *)mpc)+count;
-+
-+ if (memcmp(mpc->mpc_signature,MPC_SIGNATURE,4)) {
-+ printk("MPTABLE: bad signature [%c%c%c%c]!\n",
-+ mpc->mpc_signature[0],
-+ mpc->mpc_signature[1],
-+ mpc->mpc_signature[2],
-+ mpc->mpc_signature[3]);
-+ return 0;
-+ }
-+ if (mpf_checksum((unsigned char *)mpc,mpc->mpc_length)) {
-+ printk("MPTABLE: checksum error!\n");
-+ return 0;
-+ }
-+ if (mpc->mpc_spec!=0x01 && mpc->mpc_spec!=0x04) {
-+ printk(KERN_ERR "MPTABLE: bad table version (%d)!!\n",
-+ mpc->mpc_spec);
-+ return 0;
-+ }
-+ if (!mpc->mpc_lapic) {
-+ printk(KERN_ERR "MPTABLE: null local APIC address!\n");
-+ return 0;
-+ }
-+ memcpy(str,mpc->mpc_oem,8);
-+ str[8] = 0;
-+ printk(KERN_INFO "MPTABLE: OEM ID: %s ",str);
-+
-+ memcpy(str,mpc->mpc_productid,12);
-+ str[12] = 0;
-+ printk("MPTABLE: Product ID: %s ",str);
-+
-+#ifndef CONFIG_XEN
-+ printk("MPTABLE: APIC at: 0x%X\n",mpc->mpc_lapic);
-+
-+ /* save the local APIC address, it might be non-default */
-+ if (!acpi_lapic)
-+ mp_lapic_addr = mpc->mpc_lapic;
-+#endif
-+
-+ /*
-+ * Now process the configuration blocks.
-+ */
-+ while (count < mpc->mpc_length) {
-+ switch(*mpt) {
-+ case MP_PROCESSOR:
-+ {
-+ struct mpc_config_processor *m=
-+ (struct mpc_config_processor *)mpt;
-+ if (!acpi_lapic)
-+ MP_processor_info(m);
-+ mpt += sizeof(*m);
-+ count += sizeof(*m);
-+ break;
-+ }
-+ case MP_BUS:
-+ {
-+ struct mpc_config_bus *m=
-+ (struct mpc_config_bus *)mpt;
-+ MP_bus_info(m);
-+ mpt += sizeof(*m);
-+ count += sizeof(*m);
-+ break;
-+ }
-+ case MP_IOAPIC:
-+ {
-+ struct mpc_config_ioapic *m=
-+ (struct mpc_config_ioapic *)mpt;
-+ MP_ioapic_info(m);
-+ mpt += sizeof(*m);
-+ count += sizeof(*m);
-+ break;
-+ }
-+ case MP_INTSRC:
-+ {
-+ struct mpc_config_intsrc *m=
-+ (struct mpc_config_intsrc *)mpt;
-+
-+ MP_intsrc_info(m);
-+ mpt += sizeof(*m);
-+ count += sizeof(*m);
-+ break;
-+ }
-+ case MP_LINTSRC:
-+ {
-+ struct mpc_config_lintsrc *m=
-+ (struct mpc_config_lintsrc *)mpt;
-+ MP_lintsrc_info(m);
-+ mpt += sizeof(*m);
-+ count += sizeof(*m);
-+ break;
-+ }
-+ }
-+ }
-+ setup_apic_routing();
-+ if (!num_processors)
-+ printk(KERN_ERR "MPTABLE: no processors registered!\n");
-+ return num_processors;
-+}
-+
-+static int __init ELCR_trigger(unsigned int irq)
-+{
-+ unsigned int port;
-+
-+ port = 0x4d0 + (irq >> 3);
-+ return (inb(port) >> (irq & 7)) & 1;
-+}
-+
-+static void __init construct_default_ioirq_mptable(int mpc_default_type)
-+{
-+ struct mpc_config_intsrc intsrc;
-+ int i;
-+ int ELCR_fallback = 0;
-+
-+ intsrc.mpc_type = MP_INTSRC;
-+ intsrc.mpc_irqflag = 0; /* conforming */
-+ intsrc.mpc_srcbus = 0;
-+ intsrc.mpc_dstapic = mp_ioapics[0].mpc_apicid;
-+
-+ intsrc.mpc_irqtype = mp_INT;
-+
-+ /*
-+ * If true, we have an ISA/PCI system with no IRQ entries
-+ * in the MP table. To prevent the PCI interrupts from being set up
-+ * incorrectly, we try to use the ELCR. The sanity check to see if
-+ * there is good ELCR data is very simple - IRQ0, 1, 2 and 13 can
-+ * never be level sensitive, so we simply see if the ELCR agrees.
-+ * If it does, we assume it's valid.
-+ */
-+ if (mpc_default_type == 5) {
-+ printk(KERN_INFO "ISA/PCI bus type with no IRQ information... falling back to ELCR\n");
-+
-+ if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) || ELCR_trigger(13))
-+ printk(KERN_ERR "ELCR contains invalid data... not using ELCR\n");
-+ else {
-+ printk(KERN_INFO "Using ELCR to identify PCI interrupts\n");
-+ ELCR_fallback = 1;
-+ }
-+ }
-+
-+ for (i = 0; i < 16; i++) {
-+ switch (mpc_default_type) {
-+ case 2:
-+ if (i == 0 || i == 13)
-+ continue; /* IRQ0 & IRQ13 not connected */
-+ /* fall through */
-+ default:
-+ if (i == 2)
-+ continue; /* IRQ2 is never connected */
-+ }
-+
-+ if (ELCR_fallback) {
-+ /*
-+ * If the ELCR indicates a level-sensitive interrupt, we
-+ * copy that information over to the MP table in the
-+ * irqflag field (level sensitive, active high polarity).
-+ */
-+ if (ELCR_trigger(i))
-+ intsrc.mpc_irqflag = 13;
-+ else
-+ intsrc.mpc_irqflag = 0;
-+ }
-+
-+ intsrc.mpc_srcbusirq = i;
-+ intsrc.mpc_dstirq = i ? i : 2; /* IRQ0 to INTIN2 */
-+ MP_intsrc_info(&intsrc);
-+ }
-+
-+ intsrc.mpc_irqtype = mp_ExtINT;
-+ intsrc.mpc_srcbusirq = 0;
-+ intsrc.mpc_dstirq = 0; /* 8259A to INTIN0 */
-+ MP_intsrc_info(&intsrc);
-+}
-+
-+static inline void __init construct_default_ISA_mptable(int mpc_default_type)
-+{
-+ struct mpc_config_processor processor;
-+ struct mpc_config_bus bus;
-+ struct mpc_config_ioapic ioapic;
-+ struct mpc_config_lintsrc lintsrc;
-+ int linttypes[2] = { mp_ExtINT, mp_NMI };
-+ int i;
-+
-+#ifndef CONFIG_XEN
-+ /*
-+ * local APIC has default address
-+ */
-+ mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
-+#endif
-+
-+ /*
-+ * 2 CPUs, numbered 0 & 1.
-+ */
-+ processor.mpc_type = MP_PROCESSOR;
-+ processor.mpc_apicver = 0;
-+ processor.mpc_cpuflag = CPU_ENABLED;
-+ processor.mpc_cpufeature = 0;
-+ processor.mpc_featureflag = 0;
-+ processor.mpc_reserved[0] = 0;
-+ processor.mpc_reserved[1] = 0;
-+ for (i = 0; i < 2; i++) {
-+ processor.mpc_apicid = i;
-+ MP_processor_info(&processor);
-+ }
-+
-+ bus.mpc_type = MP_BUS;
-+ bus.mpc_busid = 0;
-+ switch (mpc_default_type) {
-+ default:
-+ printk(KERN_ERR "???\nUnknown standard configuration %d\n",
-+ mpc_default_type);
-+ /* fall through */
-+ case 1:
-+ case 5:
-+ memcpy(bus.mpc_bustype, "ISA ", 6);
-+ break;
-+ }
-+ MP_bus_info(&bus);
-+ if (mpc_default_type > 4) {
-+ bus.mpc_busid = 1;
-+ memcpy(bus.mpc_bustype, "PCI ", 6);
-+ MP_bus_info(&bus);
-+ }
-+
-+ ioapic.mpc_type = MP_IOAPIC;
-+ ioapic.mpc_apicid = 2;
-+ ioapic.mpc_apicver = 0;
-+ ioapic.mpc_flags = MPC_APIC_USABLE;
-+ ioapic.mpc_apicaddr = 0xFEC00000;
-+ MP_ioapic_info(&ioapic);
-+
-+ /*
-+ * We set up most of the low 16 IO-APIC pins according to MPS rules.
-+ */
-+ construct_default_ioirq_mptable(mpc_default_type);
-+
-+ lintsrc.mpc_type = MP_LINTSRC;
-+ lintsrc.mpc_irqflag = 0; /* conforming */
-+ lintsrc.mpc_srcbusid = 0;
-+ lintsrc.mpc_srcbusirq = 0;
-+ lintsrc.mpc_destapic = MP_APIC_ALL;
-+ for (i = 0; i < 2; i++) {
-+ lintsrc.mpc_irqtype = linttypes[i];
-+ lintsrc.mpc_destapiclint = i;
-+ MP_lintsrc_info(&lintsrc);
-+ }
-+}
-+
-+static struct intel_mp_floating *mpf_found;
-+
-+/*
-+ * Scan the memory blocks for an SMP configuration block.
-+ */
-+void __init get_smp_config (void)
-+{
-+ struct intel_mp_floating *mpf = mpf_found;
-+
-+ /*
-+ * ACPI supports both logical (e.g. Hyper-Threading) and physical
-+ * processors, where MPS only supports physical.
-+ */
-+ if (acpi_lapic && acpi_ioapic) {
-+ printk(KERN_INFO "Using ACPI (MADT) for SMP configuration information\n");
-+ return;
-+ }
-+ else if (acpi_lapic)
-+ printk(KERN_INFO "Using ACPI for processor (LAPIC) configuration information\n");
-+
-+ printk("Intel MultiProcessor Specification v1.%d\n", mpf->mpf_specification);
-+
-+ /*
-+ * Now see if we need to read further.
-+ */
-+ if (mpf->mpf_feature1 != 0) {
-+
-+ printk(KERN_INFO "Default MP configuration #%d\n", mpf->mpf_feature1);
-+ construct_default_ISA_mptable(mpf->mpf_feature1);
-+
-+ } else if (mpf->mpf_physptr) {
-+
-+ /*
-+ * Read the physical hardware table. Anything here will
-+ * override the defaults.
-+ */
-+ if (!smp_read_mpc(isa_bus_to_virt(mpf->mpf_physptr))) {
-+ smp_found_config = 0;
-+ printk(KERN_ERR "BIOS bug, MP table errors detected!...\n");
-+ printk(KERN_ERR "... disabling SMP support. (tell your hw vendor)\n");
-+ return;
-+ }
-+ /*
-+ * If there are no explicit MP IRQ entries, then we are
-+ * broken. We set up most of the low 16 IO-APIC pins to
-+ * ISA defaults and hope it will work.
-+ */
-+ if (!mp_irq_entries) {
-+ struct mpc_config_bus bus;
-+
-+ printk(KERN_ERR "BIOS bug, no explicit IRQ entries, using default mptable. (tell your hw vendor)\n");
-+
-+ bus.mpc_type = MP_BUS;
-+ bus.mpc_busid = 0;
-+ memcpy(bus.mpc_bustype, "ISA ", 6);
-+ MP_bus_info(&bus);
-+
-+ construct_default_ioirq_mptable(0);
-+ }
-+
-+ } else
-+ BUG();
-+
-+ printk(KERN_INFO "Processors: %d\n", num_processors);
-+ /*
-+ * Only use the first configuration found.
-+ */
-+}
-+
-+static int __init smp_scan_config (unsigned long base, unsigned long length)
-+{
-+ extern void __bad_mpf_size(void);
-+ unsigned int *bp = isa_bus_to_virt(base);
-+ struct intel_mp_floating *mpf;
-+
-+ Dprintk("Scan SMP from %p for %ld bytes.\n", bp,length);
-+ if (sizeof(*mpf) != 16)
-+ __bad_mpf_size();
-+
-+ while (length > 0) {
-+ mpf = (struct intel_mp_floating *)bp;
-+ if ((*bp == SMP_MAGIC_IDENT) &&
-+ (mpf->mpf_length == 1) &&
-+ !mpf_checksum((unsigned char *)bp, 16) &&
-+ ((mpf->mpf_specification == 1)
-+ || (mpf->mpf_specification == 4)) ) {
-+
-+ smp_found_config = 1;
-+ mpf_found = mpf;
-+ return 1;
-+ }
-+ bp += 4;
-+ length -= 16;
-+ }
-+ return 0;
-+}
-+
-+void __init find_smp_config(void)
-+{
-+ unsigned int address;
-+
-+ /*
-+ * FIXME: Linux assumes you have 640K of base ram..
-+ * this continues the error...
-+ *
-+ * 1) Scan the bottom 1K for a signature
-+ * 2) Scan the top 1K of base RAM
-+ * 3) Scan the 64K of bios
-+ */
-+ if (smp_scan_config(0x0,0x400) ||
-+ smp_scan_config(639*0x400,0x400) ||
-+ smp_scan_config(0xF0000,0x10000))
-+ return;
-+ /*
-+ * If it is an SMP machine we should know now.
-+ *
-+ * there is a real-mode segmented pointer pointing to the
-+ * 4K EBDA area at 0x40E, calculate and scan it here.
-+ *
-+ * NOTE! There are Linux loaders that will corrupt the EBDA
-+ * area, and as such this kind of SMP config may be less
-+ * trustworthy, simply because the SMP table may have been
-+ * stomped on during early boot. These loaders are buggy and
-+ * should be fixed.
-+ */
-+
-+ address = *(unsigned short *)phys_to_virt(0x40E);
-+ address <<= 4;
-+ if (smp_scan_config(address, 0x1000))
-+ return;
-+
-+ /* If we have come this far, we did not find an MP table */
-+ printk(KERN_INFO "No mptable found.\n");
-+}
-+
-+/* --------------------------------------------------------------------------
-+ ACPI-based MP Configuration
-+ -------------------------------------------------------------------------- */
-+
-+#ifdef CONFIG_ACPI
-+
-+#ifndef CONFIG_XEN
-+void __init mp_register_lapic_address(u64 address)
-+{
-+ mp_lapic_addr = (unsigned long) address;
-+ set_fixmap_nocache(FIX_APIC_BASE, mp_lapic_addr);
-+ if (boot_cpu_id == -1U)
-+ boot_cpu_id = GET_APIC_ID(apic_read(APIC_ID));
-+}
-+#endif
-+
-+void __cpuinit mp_register_lapic (u8 id, u8 enabled)
-+{
-+ struct mpc_config_processor processor;
-+ int boot_cpu = 0;
-+
-+ if (id == boot_cpu_id)
-+ boot_cpu = 1;
-+
-+#ifndef CONFIG_XEN
-+ processor.mpc_type = MP_PROCESSOR;
-+ processor.mpc_apicid = id;
-+ processor.mpc_apicver = 0;
-+ processor.mpc_cpuflag = (enabled ? CPU_ENABLED : 0);
-+ processor.mpc_cpuflag |= (boot_cpu ? CPU_BOOTPROCESSOR : 0);
-+ processor.mpc_cpufeature = 0;
-+ processor.mpc_featureflag = 0;
-+ processor.mpc_reserved[0] = 0;
-+ processor.mpc_reserved[1] = 0;
-+#endif
-+
-+ MP_processor_info(&processor);
-+}
-+
-+#define MP_ISA_BUS 0
-+#define MP_MAX_IOAPIC_PIN 127
-+
-+static struct mp_ioapic_routing {
-+ int apic_id;
-+ int gsi_start;
-+ int gsi_end;
-+ u32 pin_programmed[4];
-+} mp_ioapic_routing[MAX_IO_APICS];
-+
-+static int mp_find_ioapic(int gsi)
-+{
-+ int i = 0;
-+
-+ /* Find the IOAPIC that manages this GSI. */
-+ for (i = 0; i < nr_ioapics; i++) {
-+ if ((gsi >= mp_ioapic_routing[i].gsi_start)
-+ && (gsi <= mp_ioapic_routing[i].gsi_end))
-+ return i;
-+ }
-+
-+ printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi);
-+ return -1;
-+}
-+
-+void __init mp_register_ioapic(u8 id, u32 address, u32 gsi_base)
-+{
-+ int idx = 0;
-+
-+ if (bad_ioapic(address))
-+ return;
-+
-+ idx = nr_ioapics++;
-+
-+ mp_ioapics[idx].mpc_type = MP_IOAPIC;
-+ mp_ioapics[idx].mpc_flags = MPC_APIC_USABLE;
-+ mp_ioapics[idx].mpc_apicaddr = address;
-+
-+#ifndef CONFIG_XEN
-+ set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
-+#endif
-+ mp_ioapics[idx].mpc_apicid = id;
-+ mp_ioapics[idx].mpc_apicver = 0;
-+
-+ /*
-+ * Build basic IRQ lookup table to facilitate gsi->io_apic lookups
-+ * and to prevent reprogramming of IOAPIC pins (PCI IRQs).
-+ */
-+ mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].mpc_apicid;
-+ mp_ioapic_routing[idx].gsi_start = gsi_base;
-+ mp_ioapic_routing[idx].gsi_end = gsi_base +
-+ io_apic_get_redir_entries(idx);
-+
-+ printk(KERN_INFO "IOAPIC[%d]: apic_id %d, address 0x%x, "
-+ "GSI %d-%d\n", idx, mp_ioapics[idx].mpc_apicid,
-+ mp_ioapics[idx].mpc_apicaddr,
-+ mp_ioapic_routing[idx].gsi_start,
-+ mp_ioapic_routing[idx].gsi_end);
-+}
-+
-+void __init
-+mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi)
-+{
-+ struct mpc_config_intsrc intsrc;
-+ int ioapic = -1;
-+ int pin = -1;
-+
-+ /*
-+ * Convert 'gsi' to 'ioapic.pin'.
-+ */
-+ ioapic = mp_find_ioapic(gsi);
-+ if (ioapic < 0)
-+ return;
-+ pin = gsi - mp_ioapic_routing[ioapic].gsi_start;
-+
-+ /*
-+ * TBD: This check is for faulty timer entries, where the override
-+ * erroneously sets the trigger to level, resulting in a HUGE
-+ * increase of timer interrupts!
-+ */
-+ if ((bus_irq == 0) && (trigger == 3))
-+ trigger = 1;
-+
-+ intsrc.mpc_type = MP_INTSRC;
-+ intsrc.mpc_irqtype = mp_INT;
-+ intsrc.mpc_irqflag = (trigger << 2) | polarity;
-+ intsrc.mpc_srcbus = MP_ISA_BUS;
-+ intsrc.mpc_srcbusirq = bus_irq; /* IRQ */
-+ intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid; /* APIC ID */
-+ intsrc.mpc_dstirq = pin; /* INTIN# */
-+
-+ Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, %d-%d\n",
-+ intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3,
-+ (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus,
-+ intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, intsrc.mpc_dstirq);
-+
-+ mp_irqs[mp_irq_entries] = intsrc;
-+ if (++mp_irq_entries == MAX_IRQ_SOURCES)
-+ panic("Max # of irq sources exceeded!\n");
-+}
-+
-+void __init mp_config_acpi_legacy_irqs(void)
-+{
-+ struct mpc_config_intsrc intsrc;
-+ int i = 0;
-+ int ioapic = -1;
-+
-+ /*
-+ * Fabricate the legacy ISA bus (bus #31).
-+ */
-+ set_bit(MP_ISA_BUS, mp_bus_not_pci);
-+
-+ /*
-+ * Locate the IOAPIC that manages the ISA IRQs (0-15).
-+ */
-+ ioapic = mp_find_ioapic(0);
-+ if (ioapic < 0)
-+ return;
-+
-+ intsrc.mpc_type = MP_INTSRC;
-+ intsrc.mpc_irqflag = 0; /* Conforming */
-+ intsrc.mpc_srcbus = MP_ISA_BUS;
-+ intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid;
-+
-+ /*
-+ * Use the default configuration for the IRQs 0-15. Unless
-+ * overridden by (MADT) interrupt source override entries.
-+ */
-+ for (i = 0; i < 16; i++) {
-+ int idx;
-+
-+ for (idx = 0; idx < mp_irq_entries; idx++) {
-+ struct mpc_config_intsrc *irq = mp_irqs + idx;
-+
-+ /* Do we already have a mapping for this ISA IRQ? */
-+ if (irq->mpc_srcbus == MP_ISA_BUS && irq->mpc_srcbusirq == i)
-+ break;
-+
-+ /* Do we already have a mapping for this IOAPIC pin */
-+ if ((irq->mpc_dstapic == intsrc.mpc_dstapic) &&
-+ (irq->mpc_dstirq == i))
-+ break;
-+ }
-+
-+ if (idx != mp_irq_entries) {
-+ printk(KERN_DEBUG "ACPI: IRQ%d used by override.\n", i);
-+ continue; /* IRQ already used */
-+ }
-+
-+ intsrc.mpc_irqtype = mp_INT;
-+ intsrc.mpc_srcbusirq = i; /* Identity mapped */
-+ intsrc.mpc_dstirq = i;
-+
-+ Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, "
-+ "%d-%d\n", intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3,
-+ (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus,
-+ intsrc.mpc_srcbusirq, intsrc.mpc_dstapic,
-+ intsrc.mpc_dstirq);
-+
-+ mp_irqs[mp_irq_entries] = intsrc;
-+ if (++mp_irq_entries == MAX_IRQ_SOURCES)
-+ panic("Max # of irq sources exceeded!\n");
-+ }
-+}
-+
-+int mp_register_gsi(u32 gsi, int triggering, int polarity)
-+{
-+ int ioapic = -1;
-+ int ioapic_pin = 0;
-+ int idx, bit = 0;
-+
-+ if (acpi_irq_model != ACPI_IRQ_MODEL_IOAPIC)
-+ return gsi;
-+
-+ /* Don't set up the ACPI SCI because it's already set up */
-+ if (acpi_gbl_FADT.sci_interrupt == gsi)
-+ return gsi;
-+
-+ ioapic = mp_find_ioapic(gsi);
-+ if (ioapic < 0) {
-+ printk(KERN_WARNING "No IOAPIC for GSI %u\n", gsi);
-+ return gsi;
-+ }
-+
-+ ioapic_pin = gsi - mp_ioapic_routing[ioapic].gsi_start;
-+
-+ /*
-+ * Avoid pin reprogramming. PRTs typically include entries
-+ * with redundant pin->gsi mappings (but unique PCI devices);
-+ * we only program the IOAPIC on the first.
-+ */
-+ bit = ioapic_pin % 32;
-+ idx = (ioapic_pin < 32) ? 0 : (ioapic_pin / 32);
-+ if (idx > 3) {
-+ printk(KERN_ERR "Invalid reference to IOAPIC pin "
-+ "%d-%d\n", mp_ioapic_routing[ioapic].apic_id,
-+ ioapic_pin);
-+ return gsi;
-+ }
-+ if ((1<<bit) & mp_ioapic_routing[ioapic].pin_programmed[idx]) {
-+ Dprintk(KERN_DEBUG "Pin %d-%d already programmed\n",
-+ mp_ioapic_routing[ioapic].apic_id, ioapic_pin);
-+ return gsi;
-+ }
-+
-+ mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit);
-+
-+ io_apic_set_pci_routing(ioapic, ioapic_pin, gsi,
-+ triggering == ACPI_EDGE_SENSITIVE ? 0 : 1,
-+ polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
-+ return gsi;
-+}
-+#endif /*CONFIG_ACPI*/
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/x86_64/kernel/nmi.c ubuntu-gutsy-xen/arch/x86_64/kernel/nmi.c
---- ubuntu-gutsy/arch/x86_64/kernel/nmi.c 2007-08-18 09:40:29.000000000 -0400
-+++ ubuntu-gutsy-xen/arch/x86_64/kernel/nmi.c 2007-08-18 12:38:02.000000000 -0400
-@@ -28,10 +28,17 @@
- #include <asm/proto.h>
- #include <asm/mce.h>
-
-+#ifdef CONFIG_SYSCTL
- int unknown_nmi_panic;
--int nmi_watchdog_enabled;
-+static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu);
-+#endif
-+
- int panic_on_unrecovered_nmi;
-
-+#ifndef CONFIG_XEN
-+
-+int nmi_watchdog_enabled;
-+
- static cpumask_t backtrace_mask = CPU_MASK_NONE;
-
- /* nmi_active:
-@@ -48,9 +55,6 @@
-
- static DEFINE_PER_CPU(short, wd_enabled);
-
--/* local prototypes */
--static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu);
--
- /* Run after command line and cpu_init init, but before all other checks */
- void nmi_watchdog_default(void)
- {
-@@ -382,6 +386,8 @@
- return rc;
- }
-
-+#endif
-+
- asmlinkage __kprobes void do_nmi(struct pt_regs * regs, long error_code)
- {
- nmi_enter();
-@@ -411,6 +417,7 @@
- return 0;
- }
-
-+#ifndef CONFIG_XEN
- /*
- * proc handler for /proc/sys/kernel/nmi
- */
-@@ -445,9 +452,11 @@
- }
- return 0;
- }
-+#endif
-
- #endif
-
-+#ifndef CONFIG_XEN
- void __trigger_all_cpu_backtrace(void)
- {
- int i;
-@@ -464,3 +473,4 @@
- EXPORT_SYMBOL(nmi_active);
- EXPORT_SYMBOL(nmi_watchdog);
- EXPORT_SYMBOL(touch_nmi_watchdog);
-+#endif /* CONFIG_XEN */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/x86_64/kernel/pci-swiotlb-xen.c ubuntu-gutsy-xen/arch/x86_64/kernel/pci-swiotlb-xen.c
---- ubuntu-gutsy/arch/x86_64/kernel/pci-swiotlb-xen.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/arch/x86_64/kernel/pci-swiotlb-xen.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,58 @@
-+/* Glue code to lib/swiotlb.c */
-+
-+#include <linux/pci.h>
-+#include <linux/cache.h>
-+#include <linux/module.h>
-+#include <linux/dma-mapping.h>
-+
-+#include <asm/proto.h>
-+#include <asm/swiotlb.h>
-+#include <asm/dma.h>
-+
-+#if 0
-+int swiotlb __read_mostly;
-+EXPORT_SYMBOL(swiotlb);
-+#endif
-+
-+void swiotlb_init(void);
-+
-+const struct dma_mapping_ops swiotlb_dma_ops = {
-+#if 0
-+ .mapping_error = swiotlb_dma_mapping_error,
-+ .alloc_coherent = swiotlb_alloc_coherent,
-+ .free_coherent = swiotlb_free_coherent,
-+ .map_single = swiotlb_map_single,
-+ .unmap_single = swiotlb_unmap_single,
-+ .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
-+ .sync_single_for_device = swiotlb_sync_single_for_device,
-+ .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu,
-+ .sync_single_range_for_device = swiotlb_sync_single_range_for_device,
-+ .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
-+ .sync_sg_for_device = swiotlb_sync_sg_for_device,
-+ .map_sg = swiotlb_map_sg,
-+ .unmap_sg = swiotlb_unmap_sg,
-+ .dma_supported = NULL,
-+#endif
-+};
-+
-+void __init pci_swiotlb_init(void)
-+{
-+#if 0
-+ /* don't initialize swiotlb if iommu=off (no_iommu=1) */
-+ if (!iommu_detected && !no_iommu && end_pfn > MAX_DMA32_PFN)
-+ swiotlb = 1;
-+ if (swiotlb_force)
-+ swiotlb = 1;
-+ if (swiotlb) {
-+ printk(KERN_INFO "PCI-DMA: Using software bounce buffering for IO (SWIOTLB)\n");
-+ swiotlb_init();
-+ dma_ops = &swiotlb_dma_ops;
-+ }
-+#else
-+ swiotlb_init();
-+ if (swiotlb) {
-+ printk(KERN_INFO "PCI-DMA: Using software bounce buffering for IO (SWIOTLB)\n");
-+ dma_ops = &swiotlb_dma_ops;
-+ }
-+#endif
-+}
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/x86_64/kernel/process-xen.c ubuntu-gutsy-xen/arch/x86_64/kernel/process-xen.c
---- ubuntu-gutsy/arch/x86_64/kernel/process-xen.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/arch/x86_64/kernel/process-xen.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,871 @@
-+/*
-+ * linux/arch/x86-64/kernel/process.c
-+ *
-+ * Copyright (C) 1995 Linus Torvalds
-+ *
-+ * Pentium III FXSR, SSE support
-+ * Gareth Hughes <gareth@valinux.com>, May 2000
-+ *
-+ * X86-64 port
-+ * Andi Kleen.
-+ *
-+ * CPU hotplug support - ashok.raj@intel.com
-+ *
-+ * Jun Nakajima <jun.nakajima@intel.com>
-+ * Modified for Xen
-+ */
-+
-+/*
-+ * This file handles the architecture-dependent parts of process handling..
-+ */
-+
-+#include <stdarg.h>
-+
-+#include <linux/cpu.h>
-+#include <linux/errno.h>
-+#include <linux/sched.h>
-+#include <linux/kernel.h>
-+#include <linux/mm.h>
-+#include <linux/elfcore.h>
-+#include <linux/smp.h>
-+#include <linux/slab.h>
-+#include <linux/user.h>
-+#include <linux/module.h>
-+#include <linux/a.out.h>
-+#include <linux/interrupt.h>
-+#include <linux/delay.h>
-+#include <linux/ptrace.h>
-+#include <linux/utsname.h>
-+#include <linux/random.h>
-+#include <linux/notifier.h>
-+#include <linux/kprobes.h>
-+#include <linux/kdebug.h>
-+
-+#include <asm/uaccess.h>
-+#include <asm/pgtable.h>
-+#include <asm/system.h>
-+#include <asm/io.h>
-+#include <asm/processor.h>
-+#include <asm/i387.h>
-+#include <asm/mmu_context.h>
-+#include <asm/pda.h>
-+#include <asm/prctl.h>
-+#include <xen/interface/platform.h>
-+#include <xen/interface/physdev.h>
-+#include <xen/interface/vcpu.h>
-+#include <asm/desc.h>
-+#include <asm/proto.h>
-+#include <asm/hardirq.h>
-+#include <asm/ia32.h>
-+#include <asm/idle.h>
-+
-+#include <xen/cpu_hotplug.h>
-+
-+asmlinkage extern void ret_from_fork(void);
-+
-+unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED;
-+
-+unsigned long boot_option_idle_override = 0;
-+EXPORT_SYMBOL(boot_option_idle_override);
-+
-+/*
-+ * Powermanagement idle function, if any..
-+ */
-+void (*pm_idle)(void);
-+EXPORT_SYMBOL(pm_idle);
-+static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
-+
-+static ATOMIC_NOTIFIER_HEAD(idle_notifier);
-+
-+void idle_notifier_register(struct notifier_block *n)
-+{
-+ atomic_notifier_chain_register(&idle_notifier, n);
-+}
-+EXPORT_SYMBOL_GPL(idle_notifier_register);
-+
-+void idle_notifier_unregister(struct notifier_block *n)
-+{
-+ atomic_notifier_chain_unregister(&idle_notifier, n);
-+}
-+EXPORT_SYMBOL(idle_notifier_unregister);
-+
-+void enter_idle(void)
-+{
-+ write_pda(isidle, 1);
-+ atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
-+}
-+
-+static void __exit_idle(void)
-+{
-+ if (test_and_clear_bit_pda(0, isidle) == 0)
-+ return;
-+ atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
-+}
-+
-+/* Called from interrupts to signify idle end */
-+void exit_idle(void)
-+{
-+ /* idle loop has pid 0 */
-+ if (current->pid)
-+ return;
-+ __exit_idle();
-+}
-+
-+/*
-+ * On SMP it's slightly faster (but much more power-consuming!)
-+ * to poll the ->need_resched flag instead of waiting for the
-+ * cross-CPU IPI to arrive. Use this option with caution.
-+ */
-+static void poll_idle (void)
-+{
-+ local_irq_enable();
-+ cpu_relax();
-+}
-+
-+static void xen_idle(void)
-+{
-+ current_thread_info()->status &= ~TS_POLLING;
-+ /*
-+ * TS_POLLING-cleared state must be visible before we
-+ * test NEED_RESCHED:
-+ */
-+ smp_mb();
-+ local_irq_disable();
-+ if (!need_resched())
-+ safe_halt();
-+ else
-+ local_irq_enable();
-+ current_thread_info()->status |= TS_POLLING;
-+}
-+
-+#ifdef CONFIG_HOTPLUG_CPU
-+static inline void play_dead(void)
-+{
-+ idle_task_exit();
-+ local_irq_disable();
-+ cpu_clear(smp_processor_id(), cpu_initialized);
-+ preempt_enable_no_resched();
-+ HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
-+ cpu_bringup();
-+}
-+#else
-+static inline void play_dead(void)
-+{
-+ BUG();
-+}
-+#endif /* CONFIG_HOTPLUG_CPU */
-+
-+/*
-+ * The idle thread. There's no useful work to be
-+ * done, so just try to conserve power and have a
-+ * low exit latency (ie sit in a loop waiting for
-+ * somebody to say that they'd like to reschedule)
-+ */
-+void cpu_idle (void)
-+{
-+ current_thread_info()->status |= TS_POLLING;
-+ /* endless idle loop with no priority at all */
-+ while (1) {
-+ while (!need_resched()) {
-+ void (*idle)(void);
-+
-+ if (__get_cpu_var(cpu_idle_state))
-+ __get_cpu_var(cpu_idle_state) = 0;
-+ rmb();
-+ idle = xen_idle; /* no alternatives */
-+ if (cpu_is_offline(smp_processor_id()))
-+ play_dead();
-+ /*
-+ * Idle routines should keep interrupts disabled
-+ * from here on, until they go to idle.
-+ * Otherwise, idle callbacks can misfire.
-+ */
-+ local_irq_disable();
-+ enter_idle();
-+ idle();
-+ /* In many cases the interrupt that ended idle
-+ has already called exit_idle. But some idle
-+ loops can be woken up without interrupt. */
-+ __exit_idle();
-+ }
-+
-+ preempt_enable_no_resched();
-+ schedule();
-+ preempt_disable();
-+ }
-+}
-+
-+void cpu_idle_wait(void)
-+{
-+ unsigned int cpu, this_cpu = get_cpu();
-+ cpumask_t map, tmp = current->cpus_allowed;
-+
-+ set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
-+ put_cpu();
-+
-+ cpus_clear(map);
-+ for_each_online_cpu(cpu) {
-+ per_cpu(cpu_idle_state, cpu) = 1;
-+ cpu_set(cpu, map);
-+ }
-+
-+ __get_cpu_var(cpu_idle_state) = 0;
-+
-+ wmb();
-+ do {
-+ ssleep(1);
-+ for_each_online_cpu(cpu) {
-+ if (cpu_isset(cpu, map) &&
-+ !per_cpu(cpu_idle_state, cpu))
-+ cpu_clear(cpu, map);
-+ }
-+ cpus_and(map, map, cpu_online_map);
-+ } while (!cpus_empty(map));
-+
-+ set_cpus_allowed(current, tmp);
-+}
-+EXPORT_SYMBOL_GPL(cpu_idle_wait);
-+
-+void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
-+{
-+}
-+
-+static int __init idle_setup (char *str)
-+{
-+ if (!strcmp(str, "poll")) {
-+ printk("using polling idle threads.\n");
-+ pm_idle = poll_idle;
-+ } else if (!strcmp(str, "mwait"))
-+ force_mwait = 1;
-+ else
-+ return -1;
-+
-+ boot_option_idle_override = 1;
-+ return 0;
-+}
-+early_param("idle", idle_setup);
-+
-+/* Prints also some state that isn't saved in the pt_regs */
-+void __show_regs(struct pt_regs * regs)
-+{
-+ unsigned long fs, gs, shadowgs;
-+ unsigned int fsindex,gsindex;
-+ unsigned int ds,cs,es;
-+
-+ printk("\n");
-+ print_modules();
-+ printk("Pid: %d, comm: %.20s %s %s %.*s\n",
-+ current->pid, current->comm, print_tainted(),
-+ init_utsname()->release,
-+ (int)strcspn(init_utsname()->version, " "),
-+ init_utsname()->version);
-+ printk("RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->rip);
-+ printk_address(regs->rip);
-+ printk("RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss, regs->rsp,
-+ regs->eflags);
-+ printk("RAX: %016lx RBX: %016lx RCX: %016lx\n",
-+ regs->rax, regs->rbx, regs->rcx);
-+ printk("RDX: %016lx RSI: %016lx RDI: %016lx\n",
-+ regs->rdx, regs->rsi, regs->rdi);
-+ printk("RBP: %016lx R08: %016lx R09: %016lx\n",
-+ regs->rbp, regs->r8, regs->r9);
-+ printk("R10: %016lx R11: %016lx R12: %016lx\n",
-+ regs->r10, regs->r11, regs->r12);
-+ printk("R13: %016lx R14: %016lx R15: %016lx\n",
-+ regs->r13, regs->r14, regs->r15);
-+
-+ asm("mov %%ds,%0" : "=r" (ds));
-+ asm("mov %%cs,%0" : "=r" (cs));
-+ asm("mov %%es,%0" : "=r" (es));
-+ asm("mov %%fs,%0" : "=r" (fsindex));
-+ asm("mov %%gs,%0" : "=r" (gsindex));
-+
-+ rdmsrl(MSR_FS_BASE, fs);
-+ rdmsrl(MSR_GS_BASE, gs);
-+ rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
-+
-+ printk("FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
-+ fs,fsindex,gs,gsindex,shadowgs);
-+ printk("CS: %04x DS: %04x ES: %04x\n", cs, ds, es);
-+
-+}
-+
-+void show_regs(struct pt_regs *regs)
-+{
-+ printk("CPU %d:", smp_processor_id());
-+ __show_regs(regs);
-+ show_trace(NULL, regs, (void *)(regs + 1));
-+}
-+
-+/*
-+ * Free current thread data structures etc..
-+ */
-+void exit_thread(void)
-+{
-+ struct task_struct *me = current;
-+ struct thread_struct *t = &me->thread;
-+
-+ if (me->thread.io_bitmap_ptr) {
-+#ifndef CONFIG_X86_NO_TSS
-+ struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
-+#endif
-+#ifdef CONFIG_XEN
-+ struct physdev_set_iobitmap iobmp_op;
-+ memset(&iobmp_op, 0, sizeof(iobmp_op));
-+#endif
-+
-+ kfree(t->io_bitmap_ptr);
-+ t->io_bitmap_ptr = NULL;
-+ clear_thread_flag(TIF_IO_BITMAP);
-+ /*
-+ * Careful, clear this in the TSS too:
-+ */
-+#ifndef CONFIG_X86_NO_TSS
-+ memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
-+ put_cpu();
-+#endif
-+#ifdef CONFIG_XEN
-+ HYPERVISOR_physdev_op(PHYSDEVOP_set_iobitmap, &iobmp_op);
-+#endif
-+ t->io_bitmap_max = 0;
-+ }
-+}
-+
-+void load_gs_index(unsigned gs)
-+{
-+ HYPERVISOR_set_segment_base(SEGBASE_GS_USER_SEL, gs);
-+}
-+
-+void flush_thread(void)
-+{
-+ struct task_struct *tsk = current;
-+
-+ if (test_tsk_thread_flag(tsk, TIF_ABI_PENDING)) {
-+ clear_tsk_thread_flag(tsk, TIF_ABI_PENDING);
-+ if (test_tsk_thread_flag(tsk, TIF_IA32)) {
-+ clear_tsk_thread_flag(tsk, TIF_IA32);
-+ } else {
-+ set_tsk_thread_flag(tsk, TIF_IA32);
-+ current_thread_info()->status |= TS_COMPAT;
-+ }
-+ }
-+ clear_tsk_thread_flag(tsk, TIF_DEBUG);
-+
-+ tsk->thread.debugreg0 = 0;
-+ tsk->thread.debugreg1 = 0;
-+ tsk->thread.debugreg2 = 0;
-+ tsk->thread.debugreg3 = 0;
-+ tsk->thread.debugreg6 = 0;
-+ tsk->thread.debugreg7 = 0;
-+ memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
-+ /*
-+ * Forget coprocessor state..
-+ */
-+ clear_fpu(tsk);
-+ clear_used_math();
-+}
-+
-+void release_thread(struct task_struct *dead_task)
-+{
-+ if (dead_task->mm) {
-+ if (dead_task->mm->context.size) {
-+ printk("WARNING: dead process %8s still has LDT? <%p/%d>\n",
-+ dead_task->comm,
-+ dead_task->mm->context.ldt,
-+ dead_task->mm->context.size);
-+ BUG();
-+ }
-+ }
-+}
-+
-+static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr)
-+{
-+ struct user_desc ud = {
-+ .base_addr = addr,
-+ .limit = 0xfffff,
-+ .seg_32bit = 1,
-+ .limit_in_pages = 1,
-+ .useable = 1,
-+ };
-+ struct n_desc_struct *desc = (void *)t->thread.tls_array;
-+ desc += tls;
-+ desc->a = LDT_entry_a(&ud);
-+ desc->b = LDT_entry_b(&ud);
-+}
-+
-+static inline u32 read_32bit_tls(struct task_struct *t, int tls)
-+{
-+ struct desc_struct *desc = (void *)t->thread.tls_array;
-+ desc += tls;
-+ return desc->base0 |
-+ (((u32)desc->base1) << 16) |
-+ (((u32)desc->base2) << 24);
-+}
-+
-+/*
-+ * This gets called before we allocate a new thread and copy
-+ * the current task into it.
-+ */
-+void prepare_to_copy(struct task_struct *tsk)
-+{
-+ unlazy_fpu(tsk);
-+}
-+
-+int copy_thread(int nr, unsigned long clone_flags, unsigned long rsp,
-+ unsigned long unused,
-+ struct task_struct * p, struct pt_regs * regs)
-+{
-+ int err;
-+ struct pt_regs * childregs;
-+ struct task_struct *me = current;
-+
-+ childregs = ((struct pt_regs *)
-+ (THREAD_SIZE + task_stack_page(p))) - 1;
-+ *childregs = *regs;
-+
-+ childregs->rax = 0;
-+ childregs->rsp = rsp;
-+ if (rsp == ~0UL)
-+ childregs->rsp = (unsigned long)childregs;
-+
-+ p->thread.rsp = (unsigned long) childregs;
-+ p->thread.rsp0 = (unsigned long) (childregs+1);
-+ p->thread.userrsp = me->thread.userrsp;
-+
-+ set_tsk_thread_flag(p, TIF_FORK);
-+
-+ p->thread.fs = me->thread.fs;
-+ p->thread.gs = me->thread.gs;
-+
-+ asm("mov %%gs,%0" : "=m" (p->thread.gsindex));
-+ asm("mov %%fs,%0" : "=m" (p->thread.fsindex));
-+ asm("mov %%es,%0" : "=m" (p->thread.es));
-+ asm("mov %%ds,%0" : "=m" (p->thread.ds));
-+
-+ if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
-+ p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
-+ if (!p->thread.io_bitmap_ptr) {
-+ p->thread.io_bitmap_max = 0;
-+ return -ENOMEM;
-+ }
-+ memcpy(p->thread.io_bitmap_ptr, me->thread.io_bitmap_ptr,
-+ IO_BITMAP_BYTES);
-+ set_tsk_thread_flag(p, TIF_IO_BITMAP);
-+ }
-+
-+ /*
-+ * Set a new TLS for the child thread?
-+ */
-+ if (clone_flags & CLONE_SETTLS) {
-+#ifdef CONFIG_IA32_EMULATION
-+ if (test_thread_flag(TIF_IA32))
-+ err = ia32_child_tls(p, childregs);
-+ else
-+#endif
-+ err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8);
-+ if (err)
-+ goto out;
-+ }
-+ p->thread.iopl = current->thread.iopl;
-+
-+ err = 0;
-+out:
-+ if (err && p->thread.io_bitmap_ptr) {
-+ kfree(p->thread.io_bitmap_ptr);
-+ p->thread.io_bitmap_max = 0;
-+ }
-+ return err;
-+}
-+
-+/*
-+ * This special macro can be used to load a debugging register
-+ */
-+#define loaddebug(thread,r) set_debugreg(thread->debugreg ## r, r)
-+
-+static inline void __switch_to_xtra(struct task_struct *prev_p,
-+ struct task_struct *next_p)
-+{
-+ struct thread_struct *prev, *next;
-+
-+ prev = &prev_p->thread,
-+ next = &next_p->thread;
-+
-+ if (test_tsk_thread_flag(next_p, TIF_DEBUG)) {
-+ loaddebug(next, 0);
-+ loaddebug(next, 1);
-+ loaddebug(next, 2);
-+ loaddebug(next, 3);
-+ /* no 4 and 5 */
-+ loaddebug(next, 6);
-+ loaddebug(next, 7);
-+ }
-+}
-+
-+/*
-+ * switch_to(x,y) should switch tasks from x to y.
-+ *
-+ * This could still be optimized:
-+ * - fold all the options into a flag word and test it with a single test.
-+ * - could test fs/gs bitsliced
-+ *
-+ * Kprobes not supported here. Set the probe on schedule instead.
-+ */
-+__kprobes struct task_struct *
-+__switch_to(struct task_struct *prev_p, struct task_struct *next_p)
-+{
-+ struct thread_struct *prev = &prev_p->thread,
-+ *next = &next_p->thread;
-+ int cpu = smp_processor_id();
-+#ifndef CONFIG_X86_NO_TSS
-+ struct tss_struct *tss = &per_cpu(init_tss, cpu);
-+#endif
-+ struct physdev_set_iopl iopl_op;
-+ struct physdev_set_iobitmap iobmp_op;
-+ multicall_entry_t _mcl[8], *mcl = _mcl;
-+
-+ /* we're going to use this soon, after a few expensive things */
-+ if (next_p->fpu_counter>5)
-+ prefetch(&next->i387.fxsave);
-+
-+ /*
-+ * This is basically '__unlazy_fpu', except that we queue a
-+ * multicall to indicate FPU task switch, rather than
-+ * synchronously trapping to Xen.
-+ * The AMD workaround requires it to be after DS reload, or
-+ * after DS has been cleared, which we do in __prepare_arch_switch.
-+ */
-+ if (task_thread_info(prev_p)->status & TS_USEDFPU) {
-+ __save_init_fpu(prev_p); /* _not_ save_init_fpu() */
-+ mcl->op = __HYPERVISOR_fpu_taskswitch;
-+ mcl->args[0] = 1;
-+ mcl++;
-+ } else
-+ prev_p->fpu_counter = 0;
-+
-+ /*
-+ * Reload esp0, LDT and the page table pointer:
-+ */
-+ mcl->op = __HYPERVISOR_stack_switch;
-+ mcl->args[0] = __KERNEL_DS;
-+ mcl->args[1] = next->rsp0;
-+ mcl++;
-+
-+ /*
-+ * Load the per-thread Thread-Local Storage descriptor.
-+ * This is load_TLS(next, cpu) with multicalls.
-+ */
-+#define C(i) do { \
-+ if (unlikely(next->tls_array[i] != prev->tls_array[i])) { \
-+ mcl->op = __HYPERVISOR_update_descriptor; \
-+ mcl->args[0] = virt_to_machine( \
-+ &cpu_gdt(cpu)[GDT_ENTRY_TLS_MIN + i]); \
-+ mcl->args[1] = next->tls_array[i]; \
-+ mcl++; \
-+ } \
-+} while (0)
-+ C(0); C(1); C(2);
-+#undef C
-+
-+ if (unlikely(prev->iopl != next->iopl)) {
-+ iopl_op.iopl = (next->iopl == 0) ? 1 : next->iopl;
-+ mcl->op = __HYPERVISOR_physdev_op;
-+ mcl->args[0] = PHYSDEVOP_set_iopl;
-+ mcl->args[1] = (unsigned long)&iopl_op;
-+ mcl++;
-+ }
-+
-+ if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr)) {
-+ set_xen_guest_handle(iobmp_op.bitmap,
-+ (char *)next->io_bitmap_ptr);
-+ iobmp_op.nr_ports = next->io_bitmap_ptr ? IO_BITMAP_BITS : 0;
-+ mcl->op = __HYPERVISOR_physdev_op;
-+ mcl->args[0] = PHYSDEVOP_set_iobitmap;
-+ mcl->args[1] = (unsigned long)&iobmp_op;
-+ mcl++;
-+ }
-+
-+ (void)HYPERVISOR_multicall(_mcl, mcl - _mcl);
-+ /*
-+ * Switch DS and ES.
-+ * This won't pick up thread selector changes, but I guess that is ok.
-+ */
-+ if (unlikely(next->es))
-+ loadsegment(es, next->es);
-+
-+ if (unlikely(next->ds))
-+ loadsegment(ds, next->ds);
-+
-+ /*
-+ * Switch FS and GS.
-+ */
-+ if (unlikely(next->fsindex))
-+ loadsegment(fs, next->fsindex);
-+
-+ if (next->fs)
-+ HYPERVISOR_set_segment_base(SEGBASE_FS, next->fs);
-+
-+ if (unlikely(next->gsindex))
-+ load_gs_index(next->gsindex);
-+
-+ if (next->gs)
-+ HYPERVISOR_set_segment_base(SEGBASE_GS_USER, next->gs);
-+
-+ /*
-+ * Switch the PDA context.
-+ */
-+ prev->userrsp = read_pda(oldrsp);
-+ write_pda(oldrsp, next->userrsp);
-+ write_pda(pcurrent, next_p);
-+ write_pda(kernelstack,
-+ (unsigned long)task_stack_page(next_p) + THREAD_SIZE - PDA_STACKOFFSET);
-+#ifdef CONFIG_CC_STACKPROTECTOR
-+ write_pda(stack_canary, next_p->stack_canary);
-+
-+ /*
-+ * Build time only check to make sure the stack_canary is at
-+ * offset 40 in the pda; this is a gcc ABI requirement
-+ */
-+ BUILD_BUG_ON(offsetof(struct x8664_pda, stack_canary) != 40);
-+#endif
-+
-+ /*
-+ * Now maybe reload the debug registers
-+ */
-+ if (unlikely(task_thread_info(next_p)->flags & _TIF_WORK_CTXSW))
-+ __switch_to_xtra(prev_p, next_p);
-+
-+ /* If the task has used fpu the last 5 timeslices, just do a full
-+ * restore of the math state immediately to avoid the trap; the
-+ * chances of needing FPU soon are obviously high now
-+ */
-+ if (next_p->fpu_counter>5)
-+ math_state_restore();
-+ return prev_p;
-+}
-+
-+/*
-+ * sys_execve() executes a new program.
-+ */
-+asmlinkage
-+long sys_execve(char __user *name, char __user * __user *argv,
-+ char __user * __user *envp, struct pt_regs regs)
-+{
-+ long error;
-+ char * filename;
-+
-+ filename = getname(name);
-+ error = PTR_ERR(filename);
-+ if (IS_ERR(filename))
-+ return error;
-+ error = do_execve(filename, argv, envp, &regs);
-+ if (error == 0) {
-+ task_lock(current);
-+ current->ptrace &= ~PT_DTRACE;
-+ task_unlock(current);
-+ }
-+ putname(filename);
-+ return error;
-+}
-+
-+void set_personality_64bit(void)
-+{
-+ /* inherit personality from parent */
-+
-+ /* Make sure to be in 64bit mode */
-+ clear_thread_flag(TIF_IA32);
-+
-+ /* TBD: overwrites user setup. Should have two bits.
-+ But 64bit processes have always behaved this way,
-+ so it's not too bad. The main problem is just that
-+ 32bit childs are affected again. */
-+ current->personality &= ~READ_IMPLIES_EXEC;
-+}
-+
-+asmlinkage long sys_fork(struct pt_regs *regs)
-+{
-+ return do_fork(SIGCHLD, regs->rsp, regs, 0, NULL, NULL);
-+}
-+
-+asmlinkage long
-+sys_clone(unsigned long clone_flags, unsigned long newsp,
-+ void __user *parent_tid, void __user *child_tid, struct pt_regs *regs)
-+{
-+ if (!newsp)
-+ newsp = regs->rsp;
-+ return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
-+}
-+
-+/*
-+ * This is trivial, and on the face of it looks like it
-+ * could equally well be done in user mode.
-+ *
-+ * Not so, for quite unobvious reasons - register pressure.
-+ * In user mode vfork() cannot have a stack frame, and if
-+ * done by calling the "clone()" system call directly, you
-+ * do not have enough call-clobbered registers to hold all
-+ * the information you need.
-+ */
-+asmlinkage long sys_vfork(struct pt_regs *regs)
-+{
-+ return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->rsp, regs, 0,
-+ NULL, NULL);
-+}
-+
-+unsigned long get_wchan(struct task_struct *p)
-+{
-+ unsigned long stack;
-+ u64 fp,rip;
-+ int count = 0;
-+
-+ if (!p || p == current || p->state==TASK_RUNNING)
-+ return 0;
-+ stack = (unsigned long)task_stack_page(p);
-+ if (p->thread.rsp < stack || p->thread.rsp > stack+THREAD_SIZE)
-+ return 0;
-+ fp = *(u64 *)(p->thread.rsp);
-+ do {
-+ if (fp < (unsigned long)stack ||
-+ fp > (unsigned long)stack+THREAD_SIZE)
-+ return 0;
-+ rip = *(u64 *)(fp+8);
-+ if (!in_sched_functions(rip))
-+ return rip;
-+ fp = *(u64 *)fp;
-+ } while (count++ < 16);
-+ return 0;
-+}
-+
-+long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
-+{
-+ int ret = 0;
-+ int doit = task == current;
-+ int cpu;
-+
-+ switch (code) {
-+ case ARCH_SET_GS:
-+ if (addr >= TASK_SIZE_OF(task))
-+ return -EPERM;
-+ cpu = get_cpu();
-+ /* handle small bases via the GDT because that's faster to
-+ switch. */
-+ if (addr <= 0xffffffff) {
-+ set_32bit_tls(task, GS_TLS, addr);
-+ if (doit) {
-+ load_TLS(&task->thread, cpu);
-+ load_gs_index(GS_TLS_SEL);
-+ }
-+ task->thread.gsindex = GS_TLS_SEL;
-+ task->thread.gs = 0;
-+ } else {
-+ task->thread.gsindex = 0;
-+ task->thread.gs = addr;
-+ if (doit) {
-+ load_gs_index(0);
-+ ret = HYPERVISOR_set_segment_base(
-+ SEGBASE_GS_USER, addr);
-+ }
-+ }
-+ put_cpu();
-+ break;
-+ case ARCH_SET_FS:
-+ /* Not strictly needed for fs, but do it for symmetry
-+ with gs */
-+ if (addr >= TASK_SIZE_OF(task))
-+ return -EPERM;
-+ cpu = get_cpu();
-+ /* handle small bases via the GDT because that's faster to
-+ switch. */
-+ if (addr <= 0xffffffff) {
-+ set_32bit_tls(task, FS_TLS, addr);
-+ if (doit) {
-+ load_TLS(&task->thread, cpu);
-+ asm volatile("movl %0,%%fs" :: "r"(FS_TLS_SEL));
-+ }
-+ task->thread.fsindex = FS_TLS_SEL;
-+ task->thread.fs = 0;
-+ } else {
-+ task->thread.fsindex = 0;
-+ task->thread.fs = addr;
-+ if (doit) {
-+ /* set the selector to 0 to not confuse
-+ __switch_to */
-+ asm volatile("movl %0,%%fs" :: "r" (0));
-+ ret = HYPERVISOR_set_segment_base(SEGBASE_FS,
-+ addr);
-+ }
-+ }
-+ put_cpu();
-+ break;
-+ case ARCH_GET_FS: {
-+ unsigned long base;
-+ if (task->thread.fsindex == FS_TLS_SEL)
-+ base = read_32bit_tls(task, FS_TLS);
-+ else if (doit)
-+ rdmsrl(MSR_FS_BASE, base);
-+ else
-+ base = task->thread.fs;
-+ ret = put_user(base, (unsigned long __user *)addr);
-+ break;
-+ }
-+ case ARCH_GET_GS: {
-+ unsigned long base;
-+ unsigned gsindex;
-+ if (task->thread.gsindex == GS_TLS_SEL)
-+ base = read_32bit_tls(task, GS_TLS);
-+ else if (doit) {
-+ asm("movl %%gs,%0" : "=r" (gsindex));
-+ if (gsindex)
-+ rdmsrl(MSR_KERNEL_GS_BASE, base);
-+ else
-+ base = task->thread.gs;
-+ }
-+ else
-+ base = task->thread.gs;
-+ ret = put_user(base, (unsigned long __user *)addr);
-+ break;
-+ }
-+
-+ default:
-+ ret = -EINVAL;
-+ break;
-+ }
-+
-+ return ret;
-+}
-+
-+long sys_arch_prctl(int code, unsigned long addr)
-+{
-+ return do_arch_prctl(current, code, addr);
-+}
-+
-+/*
-+ * Capture the user space registers if the task is not running (in user space)
-+ */
-+int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
-+{
-+ struct pt_regs *pp, ptregs;
-+
-+ pp = task_pt_regs(tsk);
-+
-+ ptregs = *pp;
-+ ptregs.cs &= 0xffff;
-+ ptregs.ss &= 0xffff;
-+
-+ elf_core_copy_regs(regs, &ptregs);
-+
-+ boot_option_idle_override = 1;
-+ return 1;
-+}
-+
-+unsigned long arch_align_stack(unsigned long sp)
-+{
-+ if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
-+ sp -= get_random_int() % 8192;
-+ return sp & ~0xf;
-+}
-+
-+#ifndef CONFIG_SMP
-+void _restore_vcpu(void)
-+{
-+}
-+#endif
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/x86_64/kernel/relocate_kernel.S ubuntu-gutsy-xen/arch/x86_64/kernel/relocate_kernel.S
---- ubuntu-gutsy/arch/x86_64/kernel/relocate_kernel.S 2007-08-18 09:40:29.000000000 -0400
-+++ ubuntu-gutsy-xen/arch/x86_64/kernel/relocate_kernel.S 2007-08-18 12:38:02.000000000 -0400
-@@ -159,13 +159,39 @@
- movq PTR(PA_PGD)(%rsi), %r9
- movq %r9, %cr3
-
-+ /* setup idt */
-+ movq %r8, %rax
-+ addq $(idt_80 - relocate_kernel), %rax
-+ lidtq (%rax)
-+
-+ /* setup gdt */
-+ movq %r8, %rax
-+ addq $(gdt - relocate_kernel), %rax
-+ movq %r8, %r9
-+ addq $((gdt_80 - relocate_kernel) + 2), %r9
-+ movq %rax, (%r9)
-+
-+ movq %r8, %rax
-+ addq $(gdt_80 - relocate_kernel), %rax
-+ lgdtq (%rax)
-+
-+ /* setup data segment registers */
-+ xorl %eax, %eax
-+ movl %eax, %ds
-+ movl %eax, %es
-+ movl %eax, %fs
-+ movl %eax, %gs
-+ movl %eax, %ss
-+
- /* setup a new stack at the end of the physical control page */
- lea 4096(%r8), %rsp
-
-- /* jump to identity mapped page */
-- addq $(identity_mapped - relocate_kernel), %r8
-- pushq %r8
-- ret
-+ /* load new code segment and jump to identity mapped page */
-+ movq %r8, %rax
-+ addq $(identity_mapped - relocate_kernel), %rax
-+ pushq $(gdt_cs - gdt)
-+ pushq %rax
-+ lretq
-
- identity_mapped:
- /* store the start address on the stack */
-@@ -272,5 +298,19 @@
- xorq %r13, %r13
- xorq %r14, %r14
- xorq %r15, %r15
--
- ret
-+
-+ .align 16
-+gdt:
-+ .quad 0x0000000000000000 /* NULL descriptor */
-+gdt_cs:
-+ .quad 0x00af9a000000ffff
-+gdt_end:
-+
-+gdt_80:
-+ .word gdt_end - gdt - 1 /* limit */
-+ .quad 0 /* base - filled in by code above */
-+
-+idt_80:
-+ .word 0 /* limit */
-+ .quad 0 /* base */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/x86_64/kernel/setup64-xen.c ubuntu-gutsy-xen/arch/x86_64/kernel/setup64-xen.c
---- ubuntu-gutsy/arch/x86_64/kernel/setup64-xen.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/arch/x86_64/kernel/setup64-xen.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,358 @@
-+/*
-+ * X86-64 specific CPU setup.
-+ * Copyright (C) 1995 Linus Torvalds
-+ * Copyright 2001, 2002, 2003 SuSE Labs / Andi Kleen.
-+ * See setup.c for older changelog.
-+ *
-+ * Jun Nakajima <jun.nakajima@intel.com>
-+ * Modified for Xen
-+ *
-+ */
-+#include <linux/init.h>
-+#include <linux/kernel.h>
-+#include <linux/sched.h>
-+#include <linux/string.h>
-+#include <linux/bootmem.h>
-+#include <linux/bitops.h>
-+#include <linux/module.h>
-+#include <asm/bootsetup.h>
-+#include <asm/pda.h>
-+#include <asm/pgtable.h>
-+#include <asm/processor.h>
-+#include <asm/desc.h>
-+#include <asm/atomic.h>
-+#include <asm/mmu_context.h>
-+#include <asm/smp.h>
-+#include <asm/i387.h>
-+#include <asm/percpu.h>
-+#include <asm/proto.h>
-+#include <asm/sections.h>
-+#ifdef CONFIG_XEN
-+#include <asm/hypervisor.h>
-+#endif
-+
-+char x86_boot_params[BOOT_PARAM_SIZE] __initdata;
-+
-+cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE;
-+
-+struct x8664_pda *_cpu_pda[NR_CPUS] __read_mostly;
-+EXPORT_SYMBOL(_cpu_pda);
-+struct x8664_pda boot_cpu_pda[NR_CPUS] __cacheline_aligned;
-+
-+#ifndef CONFIG_X86_NO_IDT
-+struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table };
-+#endif
-+
-+char boot_cpu_stack[IRQSTACKSIZE] __attribute__((section(".bss.page_aligned")));
-+
-+unsigned long __supported_pte_mask __read_mostly = ~0UL;
-+EXPORT_SYMBOL(__supported_pte_mask);
-+static int do_not_nx __cpuinitdata = 0;
-+
-+/* noexec=on|off
-+Control non executable mappings for 64bit processes.
-+
-+on Enable(default)
-+off Disable
-+*/
-+static int __init nonx_setup(char *str)
-+{
-+ if (!str)
-+ return -EINVAL;
-+ if (!strncmp(str, "on", 2)) {
-+ __supported_pte_mask |= _PAGE_NX;
-+ do_not_nx = 0;
-+ } else if (!strncmp(str, "off", 3)) {
-+ do_not_nx = 1;
-+ __supported_pte_mask &= ~_PAGE_NX;
-+ }
-+ return 0;
-+}
-+early_param("noexec", nonx_setup);
-+
-+int force_personality32 = 0;
-+
-+/* noexec32=on|off
-+Control non executable heap for 32bit processes.
-+To control the stack too use noexec=off
-+
-+on PROT_READ does not imply PROT_EXEC for 32bit processes
-+off PROT_READ implies PROT_EXEC (default)
-+*/
-+static int __init nonx32_setup(char *str)
-+{
-+ if (!strcmp(str, "on"))
-+ force_personality32 &= ~READ_IMPLIES_EXEC;
-+ else if (!strcmp(str, "off"))
-+ force_personality32 |= READ_IMPLIES_EXEC;
-+ return 1;
-+}
-+__setup("noexec32=", nonx32_setup);
-+
-+/*
-+ * Great future plan:
-+ * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
-+ * Always point %gs to its beginning
-+ */
-+void __init setup_per_cpu_areas(void)
-+{
-+ int i;
-+ unsigned long size;
-+
-+#ifdef CONFIG_HOTPLUG_CPU
-+ prefill_possible_map();
-+#endif
-+
-+ /* Copy section for each CPU (we discard the original) */
-+ size = PERCPU_ENOUGH_ROOM;
-+
-+ printk(KERN_INFO "PERCPU: Allocating %lu bytes of per cpu data\n", size);
-+ for_each_cpu_mask (i, cpu_possible_map) {
-+ char *ptr;
-+
-+ if (!NODE_DATA(cpu_to_node(i))) {
-+ printk("cpu with no node %d, num_online_nodes %d\n",
-+ i, num_online_nodes());
-+ ptr = alloc_bootmem_pages(size);
-+ } else {
-+ ptr = alloc_bootmem_pages_node(NODE_DATA(cpu_to_node(i)), size);
-+ }
-+ if (!ptr)
-+ panic("Cannot allocate cpu data for CPU %d\n", i);
-+ cpu_pda(i)->data_offset = ptr - __per_cpu_start;
-+ memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
-+ }
-+}
-+
-+#ifdef CONFIG_XEN
-+static void switch_pt(void)
-+{
-+ xen_pt_switch(__pa_symbol(init_level4_pgt));
-+ xen_new_user_pt(__pa_symbol(__user_pgd(init_level4_pgt)));
-+}
-+
-+void __cpuinit cpu_gdt_init(struct desc_ptr *gdt_descr)
-+{
-+ unsigned long frames[16];
-+ unsigned long va;
-+ int f;
-+
-+ for (va = gdt_descr->address, f = 0;
-+ va < gdt_descr->address + gdt_descr->size;
-+ va += PAGE_SIZE, f++) {
-+ frames[f] = virt_to_mfn(va);
-+ make_page_readonly(
-+ (void *)va, XENFEAT_writable_descriptor_tables);
-+ }
-+ if (HYPERVISOR_set_gdt(frames, gdt_descr->size /
-+ sizeof (struct desc_struct)))
-+ BUG();
-+}
-+#else
-+static void switch_pt(void)
-+{
-+ asm volatile("movq %0,%%cr3" :: "r" (__pa_symbol(&init_level4_pgt)));
-+}
-+
-+void __init cpu_gdt_init(struct desc_ptr *gdt_descr)
-+{
-+ asm volatile("lgdt %0" :: "m" (*gdt_descr));
-+ asm volatile("lidt %0" :: "m" (idt_descr));
-+}
-+#endif
-+
-+void pda_init(int cpu)
-+{
-+ struct x8664_pda *pda = cpu_pda(cpu);
-+
-+ /* Setup up data that may be needed in __get_free_pages early */
-+ asm volatile("movl %0,%%fs ; movl %0,%%gs" :: "r" (0));
-+#ifndef CONFIG_XEN
-+ /* Memory clobbers used to order PDA accessed */
-+ mb();
-+ wrmsrl(MSR_GS_BASE, pda);
-+ mb();
-+#else
-+ HYPERVISOR_set_segment_base(SEGBASE_GS_KERNEL, (unsigned long)pda);
-+#endif
-+ pda->cpunumber = cpu;
-+ pda->irqcount = -1;
-+ pda->kernelstack =
-+ (unsigned long)stack_thread_info() - PDA_STACKOFFSET + THREAD_SIZE;
-+ pda->active_mm = &init_mm;
-+ pda->mmu_state = 0;
-+
-+ if (cpu == 0) {
-+#ifdef CONFIG_XEN
-+ xen_init_pt();
-+#endif
-+ /* others are initialized in smpboot.c */
-+ pda->pcurrent = &init_task;
-+ pda->irqstackptr = boot_cpu_stack;
-+ } else {
-+ pda->irqstackptr = (char *)
-+ __get_free_pages(GFP_ATOMIC, IRQSTACK_ORDER);
-+ if (!pda->irqstackptr)
-+ panic("cannot allocate irqstack for cpu %d", cpu);
-+ }
-+
-+ switch_pt();
-+
-+ pda->irqstackptr += IRQSTACKSIZE-64;
-+}
-+
-+#ifndef CONFIG_X86_NO_TSS
-+char boot_exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]
-+__attribute__((section(".bss.page_aligned")));
-+#endif
-+
-+extern asmlinkage void ignore_sysret(void);
-+
-+/* May not be marked __init: used by software suspend */
-+void syscall_init(void)
-+{
-+#ifndef CONFIG_XEN
-+ /*
-+ * LSTAR and STAR live in a bit strange symbiosis.
-+ * They both write to the same internal register. STAR allows to set CS/DS
-+ * but only a 32bit target. LSTAR sets the 64bit rip.
-+ */
-+ wrmsrl(MSR_STAR, ((u64)__USER32_CS)<<48 | ((u64)__KERNEL_CS)<<32);
-+ wrmsrl(MSR_LSTAR, system_call);
-+ wrmsrl(MSR_CSTAR, ignore_sysret);
-+
-+ /* Flags to clear on syscall */
-+ wrmsrl(MSR_SYSCALL_MASK, EF_TF|EF_DF|EF_IE|0x3000);
-+#endif
-+#ifdef CONFIG_IA32_EMULATION
-+ syscall32_cpu_init ();
-+#endif
-+}
-+
-+void __cpuinit check_efer(void)
-+{
-+ unsigned long efer;
-+
-+ rdmsrl(MSR_EFER, efer);
-+ if (!(efer & EFER_NX) || do_not_nx) {
-+ __supported_pte_mask &= ~_PAGE_NX;
-+ }
-+}
-+
-+unsigned long kernel_eflags;
-+
-+/*
-+ * cpu_init() initializes state that is per-CPU. Some data is already
-+ * initialized (naturally) in the bootstrap process, such as the GDT
-+ * and IDT. We reload them nevertheless, this function acts as a
-+ * 'CPU state barrier', nothing should get across.
-+ * A lot of state is already set up in PDA init.
-+ */
-+void __cpuinit cpu_init (void)
-+{
-+ int cpu = stack_smp_processor_id();
-+#ifndef CONFIG_X86_NO_TSS
-+ struct tss_struct *t = &per_cpu(init_tss, cpu);
-+ struct orig_ist *orig_ist = &per_cpu(orig_ist, cpu);
-+ unsigned long v;
-+ char *estacks = NULL;
-+ unsigned i;
-+#endif
-+ struct task_struct *me;
-+
-+ /* CPU 0 is initialised in head64.c */
-+ if (cpu != 0) {
-+ pda_init(cpu);
-+ }
-+#ifndef CONFIG_X86_NO_TSS
-+ else
-+ estacks = boot_exception_stacks;
-+#endif
-+
-+ me = current;
-+
-+ if (cpu_test_and_set(cpu, cpu_initialized))
-+ panic("CPU#%d already initialized!\n", cpu);
-+
-+ printk("Initializing CPU#%d\n", cpu);
-+
-+ clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
-+
-+ /*
-+ * Initialize the per-CPU GDT with the boot GDT,
-+ * and set up the GDT descriptor:
-+ */
-+#ifndef CONFIG_XEN
-+ if (cpu)
-+ memcpy(cpu_gdt(cpu), cpu_gdt_table, GDT_SIZE);
-+#endif
-+
-+ cpu_gdt_descr[cpu].size = GDT_SIZE;
-+ cpu_gdt_init(&cpu_gdt_descr[cpu]);
-+
-+ memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
-+ syscall_init();
-+
-+ wrmsrl(MSR_FS_BASE, 0);
-+ wrmsrl(MSR_KERNEL_GS_BASE, 0);
-+ barrier();
-+
-+ check_efer();
-+
-+#ifndef CONFIG_X86_NO_TSS
-+ /*
-+ * set up and load the per-CPU TSS
-+ */
-+ for (v = 0; v < N_EXCEPTION_STACKS; v++) {
-+ static const unsigned int order[N_EXCEPTION_STACKS] = {
-+ [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STACK_ORDER,
-+ [DEBUG_STACK - 1] = DEBUG_STACK_ORDER
-+ };
-+ if (cpu) {
-+ estacks = (char *)__get_free_pages(GFP_ATOMIC, order[v]);
-+ if (!estacks)
-+ panic("Cannot allocate exception stack %ld %d\n",
-+ v, cpu);
-+ }
-+ estacks += PAGE_SIZE << order[v];
-+ orig_ist->ist[v] = t->ist[v] = (unsigned long)estacks;
-+ }
-+
-+ t->io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
-+ /*
-+ * <= is required because the CPU will access up to
-+ * 8 bits beyond the end of the IO permission bitmap.
-+ */
-+ for (i = 0; i <= IO_BITMAP_LONGS; i++)
-+ t->io_bitmap[i] = ~0UL;
-+#endif
-+
-+ atomic_inc(&init_mm.mm_count);
-+ me->active_mm = &init_mm;
-+ if (me->mm)
-+ BUG();
-+ enter_lazy_tlb(&init_mm, me);
-+
-+#ifndef CONFIG_X86_NO_TSS
-+ set_tss_desc(cpu, t);
-+#endif
-+#ifndef CONFIG_XEN
-+ load_TR_desc();
-+#endif
-+ load_LDT(&init_mm.context);
-+
-+ /*
-+ * Clear all 6 debug registers:
-+ */
-+
-+ set_debugreg(0UL, 0);
-+ set_debugreg(0UL, 1);
-+ set_debugreg(0UL, 2);
-+ set_debugreg(0UL, 3);
-+ set_debugreg(0UL, 6);
-+ set_debugreg(0UL, 7);
-+
-+ fpu_init();
-+
-+ raw_local_save_flags(kernel_eflags);
-+}
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/x86_64/kernel/setup-xen.c ubuntu-gutsy-xen/arch/x86_64/kernel/setup-xen.c
---- ubuntu-gutsy/arch/x86_64/kernel/setup-xen.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/arch/x86_64/kernel/setup-xen.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,1346 @@
-+/*
-+ * linux/arch/x86-64/kernel/setup.c
-+ *
-+ * Copyright (C) 1995 Linus Torvalds
-+ *
-+ * Nov 2001 Dave Jones <davej@suse.de>
-+ * Forked from i386 setup code.
-+ */
-+
-+/*
-+ * This file handles the architecture-dependent parts of initialization
-+ */
-+
-+#include <linux/errno.h>
-+#include <linux/sched.h>
-+#include <linux/kernel.h>
-+#include <linux/mm.h>
-+#include <linux/stddef.h>
-+#include <linux/unistd.h>
-+#include <linux/ptrace.h>
-+#include <linux/slab.h>
-+#include <linux/user.h>
-+#include <linux/a.out.h>
-+#include <linux/screen_info.h>
-+#include <linux/ioport.h>
-+#include <linux/delay.h>
-+#include <linux/init.h>
-+#include <linux/initrd.h>
-+#include <linux/highmem.h>
-+#include <linux/bootmem.h>
-+#include <linux/module.h>
-+#include <asm/processor.h>
-+#include <linux/console.h>
-+#include <linux/seq_file.h>
-+#include <linux/crash_dump.h>
-+#include <linux/root_dev.h>
-+#include <linux/pci.h>
-+#include <linux/acpi.h>
-+#include <linux/kallsyms.h>
-+#include <linux/edd.h>
-+#include <linux/mmzone.h>
-+#include <linux/kexec.h>
-+#include <linux/cpufreq.h>
-+#include <linux/dmi.h>
-+#include <linux/dma-mapping.h>
-+#include <linux/ctype.h>
-+
-+#include <asm/mtrr.h>
-+#include <asm/uaccess.h>
-+#include <asm/system.h>
-+#include <asm/io.h>
-+#include <asm/smp.h>
-+#include <asm/msr.h>
-+#include <asm/desc.h>
-+#include <video/edid.h>
-+#include <asm/e820.h>
-+#include <asm/dma.h>
-+#include <asm/mpspec.h>
-+#include <asm/mmu_context.h>
-+#include <asm/bootsetup.h>
-+#include <asm/proto.h>
-+#include <asm/setup.h>
-+#include <asm/mach_apic.h>
-+#include <asm/numa.h>
-+#include <asm/sections.h>
-+#include <asm/dmi.h>
-+#ifdef CONFIG_XEN
-+#include <linux/percpu.h>
-+#include <xen/interface/physdev.h>
-+#include "setup_arch_pre.h"
-+#include <asm/hypervisor.h>
-+#include <xen/interface/nmi.h>
-+#include <xen/features.h>
-+#include <xen/firmware.h>
-+#include <xen/xencons.h>
-+#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
-+#define PFN_PHYS(x) ((x) << PAGE_SHIFT)
-+#include <asm/mach-xen/setup_arch_post.h>
-+#include <xen/interface/memory.h>
-+
-+#ifdef CONFIG_XEN
-+#include <xen/interface/kexec.h>
-+#endif
-+
-+extern unsigned long start_pfn;
-+extern struct edid_info edid_info;
-+
-+shared_info_t *HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
-+EXPORT_SYMBOL(HYPERVISOR_shared_info);
-+
-+extern char hypercall_page[PAGE_SIZE];
-+EXPORT_SYMBOL(hypercall_page);
-+
-+static int xen_panic_event(struct notifier_block *, unsigned long, void *);
-+static struct notifier_block xen_panic_block = {
-+ xen_panic_event, NULL, 0 /* try to go last */
-+};
-+
-+unsigned long *phys_to_machine_mapping;
-+unsigned long *pfn_to_mfn_frame_list_list, *pfn_to_mfn_frame_list[512];
-+
-+EXPORT_SYMBOL(phys_to_machine_mapping);
-+
-+DEFINE_PER_CPU(multicall_entry_t, multicall_list[8]);
-+DEFINE_PER_CPU(int, nr_multicall_ents);
-+
-+/* Raw start-of-day parameters from the hypervisor. */
-+start_info_t *xen_start_info;
-+EXPORT_SYMBOL(xen_start_info);
-+#endif
-+
-+/*
-+ * Machine setup..
-+ */
-+
-+struct cpuinfo_x86 boot_cpu_data __read_mostly;
-+EXPORT_SYMBOL(boot_cpu_data);
-+
-+unsigned long mmu_cr4_features;
-+
-+/* Boot loader ID as an integer, for the benefit of proc_dointvec */
-+int bootloader_type;
-+
-+unsigned long saved_video_mode;
-+
-+int force_mwait __cpuinitdata;
-+
-+/*
-+ * Early DMI memory
-+ */
-+int dmi_alloc_index;
-+char dmi_alloc_data[DMI_MAX_DATA];
-+
-+/*
-+ * Setup options
-+ */
-+struct screen_info screen_info;
-+EXPORT_SYMBOL(screen_info);
-+struct sys_desc_table_struct {
-+ unsigned short length;
-+ unsigned char table[0];
-+};
-+
-+struct edid_info edid_info;
-+EXPORT_SYMBOL_GPL(edid_info);
-+
-+extern int root_mountflags;
-+
-+char __initdata command_line[COMMAND_LINE_SIZE];
-+
-+struct resource standard_io_resources[] = {
-+ { .name = "dma1", .start = 0x00, .end = 0x1f,
-+ .flags = IORESOURCE_BUSY | IORESOURCE_IO },
-+ { .name = "pic1", .start = 0x20, .end = 0x21,
-+ .flags = IORESOURCE_BUSY | IORESOURCE_IO },
-+ { .name = "timer0", .start = 0x40, .end = 0x43,
-+ .flags = IORESOURCE_BUSY | IORESOURCE_IO },
-+ { .name = "timer1", .start = 0x50, .end = 0x53,
-+ .flags = IORESOURCE_BUSY | IORESOURCE_IO },
-+ { .name = "keyboard", .start = 0x60, .end = 0x6f,
-+ .flags = IORESOURCE_BUSY | IORESOURCE_IO },
-+ { .name = "dma page reg", .start = 0x80, .end = 0x8f,
-+ .flags = IORESOURCE_BUSY | IORESOURCE_IO },
-+ { .name = "pic2", .start = 0xa0, .end = 0xa1,
-+ .flags = IORESOURCE_BUSY | IORESOURCE_IO },
-+ { .name = "dma2", .start = 0xc0, .end = 0xdf,
-+ .flags = IORESOURCE_BUSY | IORESOURCE_IO },
-+ { .name = "fpu", .start = 0xf0, .end = 0xff,
-+ .flags = IORESOURCE_BUSY | IORESOURCE_IO }
-+};
-+
-+#define IORESOURCE_RAM (IORESOURCE_BUSY | IORESOURCE_MEM)
-+
-+struct resource data_resource = {
-+ .name = "Kernel data",
-+ .start = 0,
-+ .end = 0,
-+ .flags = IORESOURCE_RAM,
-+};
-+struct resource code_resource = {
-+ .name = "Kernel code",
-+ .start = 0,
-+ .end = 0,
-+ .flags = IORESOURCE_RAM,
-+};
-+
-+#ifdef CONFIG_PROC_VMCORE
-+/* elfcorehdr= specifies the location of elf core header
-+ * stored by the crashed kernel. This option will be passed
-+ * by kexec loader to the capture kernel.
-+ */
-+static int __init setup_elfcorehdr(char *arg)
-+{
-+ char *end;
-+ if (!arg)
-+ return -EINVAL;
-+ elfcorehdr_addr = memparse(arg, &end);
-+ return end > arg ? 0 : -EINVAL;
-+}
-+early_param("elfcorehdr", setup_elfcorehdr);
-+#endif
-+
-+#ifndef CONFIG_NUMA
-+static void __init
-+contig_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
-+{
-+ unsigned long bootmap_size, bootmap;
-+
-+ bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
-+ bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size);
-+ if (bootmap == -1L)
-+ panic("Cannot find bootmem map of size %ld\n",bootmap_size);
-+ bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn);
-+ e820_register_active_regions(0, start_pfn, end_pfn);
-+#ifdef CONFIG_XEN
-+ free_bootmem_with_active_regions(0, xen_start_info->nr_pages);
-+#else
-+ free_bootmem_with_active_regions(0, end_pfn);
-+#endif
-+ reserve_bootmem(bootmap, bootmap_size);
-+}
-+#endif
-+
-+#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
-+struct edd edd;
-+#ifdef CONFIG_EDD_MODULE
-+EXPORT_SYMBOL(edd);
-+#endif
-+#ifndef CONFIG_XEN
-+/**
-+ * copy_edd() - Copy the BIOS EDD information
-+ * from boot_params into a safe place.
-+ *
-+ */
-+static inline void copy_edd(void)
-+{
-+ memcpy(edd.mbr_signature, EDD_MBR_SIGNATURE, sizeof(edd.mbr_signature));
-+ memcpy(edd.edd_info, EDD_BUF, sizeof(edd.edd_info));
-+ edd.mbr_signature_nr = EDD_MBR_SIG_NR;
-+ edd.edd_info_nr = EDD_NR;
-+}
-+#endif
-+#else
-+static inline void copy_edd(void)
-+{
-+}
-+#endif
-+
-+#ifndef CONFIG_XEN
-+#define EBDA_ADDR_POINTER 0x40E
-+
-+unsigned __initdata ebda_addr;
-+unsigned __initdata ebda_size;
-+
-+static void discover_ebda(void)
-+{
-+ /*
-+ * there is a real-mode segmented pointer pointing to the
-+ * 4K EBDA area at 0x40E
-+ */
-+ ebda_addr = *(unsigned short *)__va(EBDA_ADDR_POINTER);
-+ ebda_addr <<= 4;
-+
-+ ebda_size = *(unsigned short *)__va(ebda_addr);
-+
-+ /* Round EBDA up to pages */
-+ if (ebda_size == 0)
-+ ebda_size = 1;
-+ ebda_size <<= 10;
-+ ebda_size = round_up(ebda_size + (ebda_addr & ~PAGE_MASK), PAGE_SIZE);
-+ if (ebda_size > 64*1024)
-+ ebda_size = 64*1024;
-+}
-+#else
-+#define discover_ebda() ((void)0)
-+#endif
-+
-+void __init setup_arch(char **cmdline_p)
-+{
-+#ifdef CONFIG_XEN
-+ extern struct e820map machine_e820;
-+
-+ printk(KERN_INFO "Command line: %s\n", boot_command_line);
-+
-+ ROOT_DEV = MKDEV(RAMDISK_MAJOR,0);
-+ screen_info = SCREEN_INFO;
-+
-+ if (is_initial_xendomain()) {
-+ /* This is drawn from a dump from vgacon:startup in
-+ * standard Linux. */
-+ screen_info.orig_video_mode = 3;
-+ screen_info.orig_video_isVGA = 1;
-+ screen_info.orig_video_lines = 25;
-+ screen_info.orig_video_cols = 80;
-+ screen_info.orig_video_ega_bx = 3;
-+ screen_info.orig_video_points = 16;
-+ screen_info.orig_y = screen_info.orig_video_lines - 1;
-+ if (xen_start_info->console.dom0.info_size >=
-+ sizeof(struct dom0_vga_console_info)) {
-+ const struct dom0_vga_console_info *info =
-+ (struct dom0_vga_console_info *)(
-+ (char *)xen_start_info +
-+ xen_start_info->console.dom0.info_off);
-+ dom0_init_screen_info(info);
-+ }
-+ xen_start_info->console.domU.mfn = 0;
-+ xen_start_info->console.domU.evtchn = 0;
-+ } else {
-+ screen_info.orig_video_isVGA = 0;
-+
-+ /* Register a call for panic conditions. */
-+ atomic_notifier_chain_register(&panic_notifier_list, &xen_panic_block);
-+ }
-+
-+ copy_edid();
-+ saved_video_mode = SAVED_VIDEO_MODE;
-+ bootloader_type = LOADER_TYPE;
-+
-+#ifdef CONFIG_BLK_DEV_RAM
-+ rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
-+ rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
-+ rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
-+
-+
-+#endif
-+
-+ HYPERVISOR_vm_assist(VMASST_CMD_enable,
-+ VMASST_TYPE_writable_pagetables);
-+
-+ ARCH_SETUP
-+#else
-+ printk(KERN_INFO "Command line: %s\n", boot_command_line);
-+
-+ ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);
-+ screen_info = SCREEN_INFO;
-+ edid_info = EDID_INFO;
-+ saved_video_mode = SAVED_VIDEO_MODE;
-+ bootloader_type = LOADER_TYPE;
-+
-+#ifdef CONFIG_BLK_DEV_RAM
-+ rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
-+ rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
-+ rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
-+#endif
-+#endif /* !CONFIG_XEN */
-+ setup_memory_region();
-+ copy_edd();
-+
-+ if (!MOUNT_ROOT_RDONLY)
-+ root_mountflags &= ~MS_RDONLY;
-+ init_mm.start_code = (unsigned long) &_text;
-+ init_mm.end_code = (unsigned long) &_etext;
-+ init_mm.end_data = (unsigned long) &_edata;
-+ init_mm.brk = (unsigned long) &_end;
-+
-+ code_resource.start = virt_to_phys(&_text);
-+ code_resource.end = virt_to_phys(&_etext)-1;
-+ data_resource.start = virt_to_phys(&_etext);
-+ data_resource.end = virt_to_phys(&_edata)-1;
-+
-+ early_identify_cpu(&boot_cpu_data);
-+
-+ strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
-+ *cmdline_p = command_line;
-+
-+ parse_early_param();
-+
-+ finish_e820_parsing();
-+
-+ e820_register_active_regions(0, 0, -1UL);
-+ /*
-+ * partially used pages are not usable - thus
-+ * we are rounding upwards:
-+ */
-+ end_pfn = e820_end_of_ram();
-+ num_physpages = end_pfn;
-+
-+ check_efer();
-+
-+ discover_ebda();
-+
-+ init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT));
-+
-+ if (is_initial_xendomain())
-+ dmi_scan_machine();
-+
-+ /* How many end-of-memory variables you have, grandma! */
-+ max_low_pfn = end_pfn;
-+ max_pfn = end_pfn;
-+ high_memory = (void *)__va(end_pfn * PAGE_SIZE - 1) + 1;
-+
-+ /* Remove active ranges so rediscovery with NUMA-awareness happens */
-+ remove_all_active_ranges();
-+
-+#ifdef CONFIG_ACPI_NUMA
-+ /*
-+ * Parse SRAT to discover nodes.
-+ */
-+ acpi_numa_init();
-+#endif
-+
-+#ifdef CONFIG_NUMA
-+ numa_initmem_init(0, end_pfn);
-+#else
-+ contig_initmem_init(0, end_pfn);
-+#endif
-+
-+#ifdef CONFIG_XEN
-+ /*
-+ * Reserve kernel, physmap, start info, initial page tables, and
-+ * direct mapping.
-+ */
-+ reserve_bootmem_generic(__pa_symbol(&_text),
-+ (table_end << PAGE_SHIFT) - __pa_symbol(&_text));
-+#else
-+ /* Reserve direct mapping */
-+ reserve_bootmem_generic(table_start << PAGE_SHIFT,
-+ (table_end - table_start) << PAGE_SHIFT);
-+
-+ /* reserve kernel */
-+ reserve_bootmem_generic(__pa_symbol(&_text),
-+ __pa_symbol(&_end) - __pa_symbol(&_text));
-+
-+ /*
-+ * reserve physical page 0 - it's a special BIOS page on many boxes,
-+ * enabling clean reboots, SMP operation, laptop functions.
-+ */
-+ reserve_bootmem_generic(0, PAGE_SIZE);
-+
-+ /* reserve ebda region */
-+ if (ebda_addr)
-+ reserve_bootmem_generic(ebda_addr, ebda_size);
-+#ifdef CONFIG_NUMA
-+ /* reserve nodemap region */
-+ if (nodemap_addr)
-+ reserve_bootmem_generic(nodemap_addr, nodemap_size);
-+#endif
-+
-+#ifdef CONFIG_SMP
-+ /* Reserve SMP trampoline */
-+ reserve_bootmem_generic(SMP_TRAMPOLINE_BASE, 2*PAGE_SIZE);
-+#endif
-+#endif
-+
-+#ifdef CONFIG_ACPI_SLEEP
-+ /*
-+ * Reserve low memory region for sleep support.
-+ */
-+ acpi_reserve_bootmem();
-+#endif
-+#ifdef CONFIG_BLK_DEV_INITRD
-+#ifndef CONFIG_XEN
-+ if (LOADER_TYPE && INITRD_START) {
-+#else
-+ if (xen_start_info->mod_start) {
-+#endif
-+ if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
-+#ifndef CONFIG_XEN
-+ reserve_bootmem_generic(INITRD_START, INITRD_SIZE);
-+#else
-+ initrd_below_start_ok = 1;
-+#endif
-+ initrd_start = INITRD_START + PAGE_OFFSET;
-+ initrd_end = initrd_start+INITRD_SIZE;
-+ }
-+ else {
-+ printk(KERN_ERR "initrd extends beyond end of memory "
-+ "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
-+ (unsigned long)(INITRD_START + INITRD_SIZE),
-+ (unsigned long)(end_pfn << PAGE_SHIFT));
-+ initrd_start = 0;
-+ }
-+ }
-+#endif
-+#ifdef CONFIG_KEXEC
-+#ifdef CONFIG_XEN
-+ xen_machine_kexec_setup_resources();
-+#else
-+ if (crashk_res.start != crashk_res.end) {
-+ reserve_bootmem_generic(crashk_res.start,
-+ crashk_res.end - crashk_res.start + 1);
-+ }
-+#endif
-+#endif
-+
-+ paging_init();
-+#ifdef CONFIG_X86_LOCAL_APIC
-+ /*
-+ * Find and reserve possible boot-time SMP configuration:
-+ */
-+ find_smp_config();
-+#endif
-+#ifdef CONFIG_XEN
-+ {
-+ int i, j, k, fpp;
-+
-+ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
-+ /* Make sure we have a large enough P->M table. */
-+ phys_to_machine_mapping = alloc_bootmem_pages(
-+ end_pfn * sizeof(unsigned long));
-+ memset(phys_to_machine_mapping, ~0,
-+ end_pfn * sizeof(unsigned long));
-+ memcpy(phys_to_machine_mapping,
-+ (unsigned long *)xen_start_info->mfn_list,
-+ xen_start_info->nr_pages * sizeof(unsigned long));
-+ free_bootmem(
-+ __pa(xen_start_info->mfn_list),
-+ PFN_PHYS(PFN_UP(xen_start_info->nr_pages *
-+ sizeof(unsigned long))));
-+
-+ /*
-+ * Initialise the list of the frames that specify the
-+ * list of frames that make up the p2m table. Used by
-+ * save/restore.
-+ */
-+ pfn_to_mfn_frame_list_list = alloc_bootmem_pages(PAGE_SIZE);
-+
-+ fpp = PAGE_SIZE/sizeof(unsigned long);
-+ for (i=0, j=0, k=-1; i< end_pfn; i+=fpp, j++) {
-+ if ((j % fpp) == 0) {
-+ k++;
-+ BUG_ON(k>=fpp);
-+ pfn_to_mfn_frame_list[k] =
-+ alloc_bootmem_pages(PAGE_SIZE);
-+ pfn_to_mfn_frame_list_list[k] =
-+ virt_to_mfn(pfn_to_mfn_frame_list[k]);
-+ j=0;
-+ }
-+ pfn_to_mfn_frame_list[k][j] =
-+ virt_to_mfn(&phys_to_machine_mapping[i]);
-+ }
-+ HYPERVISOR_shared_info->arch.max_pfn = end_pfn;
-+ HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
-+ virt_to_mfn(pfn_to_mfn_frame_list_list);
-+ }
-+
-+ }
-+
-+#ifdef CONFIG_ACPI
-+ if (!is_initial_xendomain()) {
-+ acpi_disabled = 1;
-+ acpi_ht = 0;
-+ }
-+#endif
-+#endif
-+
-+#if defined(CONFIG_PCI) && !defined(CONFIG_XEN)
-+ early_quirks();
-+#endif
-+
-+ /*
-+ * set this early, so we dont allocate cpu0
-+ * if MADT list doesnt list BSP first
-+ * mpparse.c/MP_processor_info() allocates logical cpu numbers.
-+ */
-+ cpu_set(0, cpu_present_map);
-+#ifdef CONFIG_ACPI
-+ /*
-+ * Initialize the ACPI boot-time table parser (gets the RSDP and SDT).
-+ * Call this early for SRAT node setup.
-+ */
-+ acpi_boot_table_init();
-+
-+ /*
-+ * Read APIC and some other early information from ACPI tables.
-+ */
-+ acpi_boot_init();
-+#endif
-+
-+ init_cpu_to_node();
-+
-+#ifdef CONFIG_X86_LOCAL_APIC
-+ /*
-+ * get boot-time SMP configuration:
-+ */
-+ if (smp_found_config)
-+ get_smp_config();
-+#ifndef CONFIG_XEN
-+ init_apic_mappings();
-+#endif
-+#endif
-+#if defined(CONFIG_XEN) && defined(CONFIG_SMP) && !defined(CONFIG_HOTPLUG_CPU)
-+ prefill_possible_map();
-+#endif
-+
-+ /*
-+ * We trust e820 completely. No explicit ROM probing in memory.
-+ */
-+#ifdef CONFIG_XEN
-+ if (is_initial_xendomain()) {
-+ struct xen_memory_map memmap;
-+
-+ memmap.nr_entries = E820MAX;
-+ set_xen_guest_handle(memmap.buffer, machine_e820.map);
-+
-+ if (HYPERVISOR_memory_op(XENMEM_machine_memory_map, &memmap))
-+ BUG();
-+ machine_e820.nr_map = memmap.nr_entries;
-+
-+ e820_reserve_resources(machine_e820.map, machine_e820.nr_map);
-+ }
-+#else
-+ e820_reserve_resources(e820.map, e820.nr_map);
-+ e820_mark_nosave_regions();
-+#endif
-+
-+ {
-+ unsigned i;
-+ /* request I/O space for devices used on all i[345]86 PCs */
-+ for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++)
-+ request_resource(&ioport_resource, &standard_io_resources[i]);
-+ }
-+
-+#ifdef CONFIG_XEN
-+ if (is_initial_xendomain())
-+ e820_setup_gap(machine_e820.map, machine_e820.nr_map);
-+#else
-+ e820_setup_gap(e820.map, e820.nr_map);
-+#endif
-+
-+#ifdef CONFIG_XEN
-+ {
-+ struct physdev_set_iopl set_iopl;
-+
-+ set_iopl.iopl = 1;
-+ HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
-+
-+ if (is_initial_xendomain()) {
-+#ifdef CONFIG_VT
-+#if defined(CONFIG_VGA_CONSOLE)
-+ conswitchp = &vga_con;
-+#elif defined(CONFIG_DUMMY_CONSOLE)
-+ conswitchp = &dummy_con;
-+#endif
-+#endif
-+ } else {
-+#if defined(CONFIG_VT) && defined(CONFIG_DUMMY_CONSOLE)
-+ conswitchp = &dummy_con;
-+#endif
-+ }
-+ }
-+ xencons_early_setup();
-+#else /* CONFIG_XEN */
-+
-+#ifdef CONFIG_VT
-+#if defined(CONFIG_VGA_CONSOLE)
-+ conswitchp = &vga_con;
-+#elif defined(CONFIG_DUMMY_CONSOLE)
-+ conswitchp = &dummy_con;
-+#endif
-+#endif
-+
-+#endif /* !CONFIG_XEN */
-+}
-+
-+#ifdef CONFIG_XEN
-+static int
-+xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
-+{
-+ HYPERVISOR_shutdown(SHUTDOWN_crash);
-+ /* we're never actually going to get here... */
-+ return NOTIFY_DONE;
-+}
-+#endif /* !CONFIG_XEN */
-+
-+
-+static int __cpuinit get_model_name(struct cpuinfo_x86 *c)
-+{
-+ unsigned int *v;
-+
-+ if (c->extended_cpuid_level < 0x80000004)
-+ return 0;
-+
-+ v = (unsigned int *) c->x86_model_id;
-+ cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
-+ cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
-+ cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
-+ c->x86_model_id[48] = 0;
-+ return 1;
-+}
-+
-+
-+static void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
-+{
-+ unsigned int n, dummy, eax, ebx, ecx, edx;
-+
-+ n = c->extended_cpuid_level;
-+
-+ if (n >= 0x80000005) {
-+ cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
-+ printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
-+ edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
-+ c->x86_cache_size=(ecx>>24)+(edx>>24);
-+ /* On K8 L1 TLB is inclusive, so don't count it */
-+ c->x86_tlbsize = 0;
-+ }
-+
-+ if (n >= 0x80000006) {
-+ cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
-+ ecx = cpuid_ecx(0x80000006);
-+ c->x86_cache_size = ecx >> 16;
-+ c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
-+
-+ printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
-+ c->x86_cache_size, ecx & 0xFF);
-+ }
-+
-+ if (n >= 0x80000007)
-+ cpuid(0x80000007, &dummy, &dummy, &dummy, &c->x86_power);
-+ if (n >= 0x80000008) {
-+ cpuid(0x80000008, &eax, &dummy, &dummy, &dummy);
-+ c->x86_virt_bits = (eax >> 8) & 0xff;
-+ c->x86_phys_bits = eax & 0xff;
-+ }
-+}
-+
-+#ifdef CONFIG_NUMA
-+static int nearby_node(int apicid)
-+{
-+ int i;
-+ for (i = apicid - 1; i >= 0; i--) {
-+ int node = apicid_to_node[i];
-+ if (node != NUMA_NO_NODE && node_online(node))
-+ return node;
-+ }
-+ for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
-+ int node = apicid_to_node[i];
-+ if (node != NUMA_NO_NODE && node_online(node))
-+ return node;
-+ }
-+ return first_node(node_online_map); /* Shouldn't happen */
-+}
-+#endif
-+
-+/*
-+ * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
-+ * Assumes number of cores is a power of two.
-+ */
-+static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
-+{
-+#ifdef CONFIG_SMP
-+ unsigned bits;
-+#ifdef CONFIG_NUMA
-+ int cpu = smp_processor_id();
-+ int node = 0;
-+ unsigned apicid = hard_smp_processor_id();
-+#endif
-+ unsigned ecx = cpuid_ecx(0x80000008);
-+
-+ c->x86_max_cores = (ecx & 0xff) + 1;
-+
-+ /* CPU telling us the core id bits shift? */
-+ bits = (ecx >> 12) & 0xF;
-+
-+ /* Otherwise recompute */
-+ if (bits == 0) {
-+ while ((1 << bits) < c->x86_max_cores)
-+ bits++;
-+ }
-+
-+ /* Low order bits define the core id (index of core in socket) */
-+ c->cpu_core_id = c->phys_proc_id & ((1 << bits)-1);
-+ /* Convert the APIC ID into the socket ID */
-+ c->phys_proc_id = phys_pkg_id(bits);
-+
-+#ifdef CONFIG_NUMA
-+ node = c->phys_proc_id;
-+ if (apicid_to_node[apicid] != NUMA_NO_NODE)
-+ node = apicid_to_node[apicid];
-+ if (!node_online(node)) {
-+ /* Two possibilities here:
-+ - The CPU is missing memory and no node was created.
-+ In that case try picking one from a nearby CPU
-+ - The APIC IDs differ from the HyperTransport node IDs
-+ which the K8 northbridge parsing fills in.
-+ Assume they are all increased by a constant offset,
-+ but in the same order as the HT nodeids.
-+ If that doesn't result in a usable node fall back to the
-+ path for the previous case. */
-+ int ht_nodeid = apicid - (cpu_data[0].phys_proc_id << bits);
-+ if (ht_nodeid >= 0 &&
-+ apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
-+ node = apicid_to_node[ht_nodeid];
-+ /* Pick a nearby node */
-+ if (!node_online(node))
-+ node = nearby_node(apicid);
-+ }
-+ numa_set_node(cpu, node);
-+
-+ printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
-+#endif
-+#endif
-+}
-+
-+static void __cpuinit init_amd(struct cpuinfo_x86 *c)
-+{
-+ unsigned level;
-+
-+#ifdef CONFIG_SMP
-+ unsigned long value;
-+
-+ /*
-+ * Disable TLB flush filter by setting HWCR.FFDIS on K8
-+ * bit 6 of msr C001_0015
-+ *
-+ * Errata 63 for SH-B3 steppings
-+ * Errata 122 for all steppings (F+ have it disabled by default)
-+ */
-+ if (c->x86 == 15) {
-+ rdmsrl(MSR_K8_HWCR, value);
-+ value |= 1 << 6;
-+ wrmsrl(MSR_K8_HWCR, value);
-+ }
-+#endif
-+
-+ /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
-+ 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
-+ clear_bit(0*32+31, &c->x86_capability);
-+
-+ /* On C+ stepping K8 rep microcode works well for copy/memset */
-+ level = cpuid_eax(1);
-+ if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58))
-+ set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
-+
-+ /* Enable workaround for FXSAVE leak */
-+ if (c->x86 >= 6)
-+ set_bit(X86_FEATURE_FXSAVE_LEAK, &c->x86_capability);
-+
-+ level = get_model_name(c);
-+ if (!level) {
-+ switch (c->x86) {
-+ case 15:
-+ /* Should distinguish Models here, but this is only
-+ a fallback anyways. */
-+ strcpy(c->x86_model_id, "Hammer");
-+ break;
-+ }
-+ }
-+ display_cacheinfo(c);
-+
-+ /* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */
-+ if (c->x86_power & (1<<8))
-+ set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
-+
-+ /* Multi core CPU? */
-+ if (c->extended_cpuid_level >= 0x80000008)
-+ amd_detect_cmp(c);
-+
-+ /* Fix cpuid4 emulation for more */
-+ num_cache_leaves = 3;
-+
-+ /* RDTSC can be speculated around */
-+ clear_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
-+
-+ /* Family 10 doesn't support C states in MWAIT so don't use it */
-+ if (c->x86 == 0x10 && !force_mwait)
-+ clear_bit(X86_FEATURE_MWAIT, &c->x86_capability);
-+}
-+
-+static void __cpuinit detect_ht(struct cpuinfo_x86 *c)
-+{
-+#ifdef CONFIG_SMP
-+ u32 eax, ebx, ecx, edx;
-+ int index_msb, core_bits;
-+
-+ cpuid(1, &eax, &ebx, &ecx, &edx);
-+
-+
-+ if (!cpu_has(c, X86_FEATURE_HT))
-+ return;
-+ if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
-+ goto out;
-+
-+ smp_num_siblings = (ebx & 0xff0000) >> 16;
-+
-+ if (smp_num_siblings == 1) {
-+ printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
-+ } else if (smp_num_siblings > 1 ) {
-+
-+ if (smp_num_siblings > NR_CPUS) {
-+ printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);
-+ smp_num_siblings = 1;
-+ return;
-+ }
-+
-+ index_msb = get_count_order(smp_num_siblings);
-+ c->phys_proc_id = phys_pkg_id(index_msb);
-+
-+ smp_num_siblings = smp_num_siblings / c->x86_max_cores;
-+
-+ index_msb = get_count_order(smp_num_siblings) ;
-+
-+ core_bits = get_count_order(c->x86_max_cores);
-+
-+ c->cpu_core_id = phys_pkg_id(index_msb) &
-+ ((1 << core_bits) - 1);
-+ }
-+out:
-+ if ((c->x86_max_cores * smp_num_siblings) > 1) {
-+ printk(KERN_INFO "CPU: Physical Processor ID: %d\n", c->phys_proc_id);
-+ printk(KERN_INFO "CPU: Processor Core ID: %d\n", c->cpu_core_id);
-+ }
-+
-+#endif
-+}
-+
-+/*
-+ * find out the number of processor cores on the die
-+ */
-+static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
-+{
-+ unsigned int eax, t;
-+
-+ if (c->cpuid_level < 4)
-+ return 1;
-+
-+ cpuid_count(4, 0, &eax, &t, &t, &t);
-+
-+ if (eax & 0x1f)
-+ return ((eax >> 26) + 1);
-+ else
-+ return 1;
-+}
-+
-+static void srat_detect_node(void)
-+{
-+#ifdef CONFIG_NUMA
-+ unsigned node;
-+ int cpu = smp_processor_id();
-+ int apicid = hard_smp_processor_id();
-+
-+ /* Don't do the funky fallback heuristics the AMD version employs
-+ for now. */
-+ node = apicid_to_node[apicid];
-+ if (node == NUMA_NO_NODE)
-+ node = first_node(node_online_map);
-+ numa_set_node(cpu, node);
-+
-+ printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
-+#endif
-+}
-+
-+static void __cpuinit init_intel(struct cpuinfo_x86 *c)
-+{
-+ /* Cache sizes */
-+ unsigned n;
-+
-+ init_intel_cacheinfo(c);
-+ if (c->cpuid_level > 9 ) {
-+ unsigned eax = cpuid_eax(10);
-+ /* Check for version and the number of counters */
-+ if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
-+ set_bit(X86_FEATURE_ARCH_PERFMON, &c->x86_capability);
-+ }
-+
-+ if (cpu_has_ds) {
-+ unsigned int l1, l2;
-+ rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
-+ if (!(l1 & (1<<11)))
-+ set_bit(X86_FEATURE_BTS, c->x86_capability);
-+ if (!(l1 & (1<<12)))
-+ set_bit(X86_FEATURE_PEBS, c->x86_capability);
-+ }
-+
-+ n = c->extended_cpuid_level;
-+ if (n >= 0x80000008) {
-+ unsigned eax = cpuid_eax(0x80000008);
-+ c->x86_virt_bits = (eax >> 8) & 0xff;
-+ c->x86_phys_bits = eax & 0xff;
-+ /* CPUID workaround for Intel 0F34 CPU */
-+ if (c->x86_vendor == X86_VENDOR_INTEL &&
-+ c->x86 == 0xF && c->x86_model == 0x3 &&
-+ c->x86_mask == 0x4)
-+ c->x86_phys_bits = 36;
-+ }
-+
-+ if (c->x86 == 15)
-+ c->x86_cache_alignment = c->x86_clflush_size * 2;
-+ if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
-+ (c->x86 == 0x6 && c->x86_model >= 0x0e))
-+ set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
-+ if (c->x86 == 6)
-+ set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
-+ if (c->x86 == 15)
-+ set_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
-+ else
-+ clear_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
-+ c->x86_max_cores = intel_num_cpu_cores(c);
-+
-+ srat_detect_node();
-+}
-+
-+static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
-+{
-+ char *v = c->x86_vendor_id;
-+
-+ if (!strcmp(v, "AuthenticAMD"))
-+ c->x86_vendor = X86_VENDOR_AMD;
-+ else if (!strcmp(v, "GenuineIntel"))
-+ c->x86_vendor = X86_VENDOR_INTEL;
-+ else
-+ c->x86_vendor = X86_VENDOR_UNKNOWN;
-+}
-+
-+struct cpu_model_info {
-+ int vendor;
-+ int family;
-+ char *model_names[16];
-+};
-+
-+/* Do some early cpuid on the boot CPU to get some parameter that are
-+ needed before check_bugs. Everything advanced is in identify_cpu
-+ below. */
-+void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
-+{
-+ u32 tfms;
-+
-+ c->loops_per_jiffy = loops_per_jiffy;
-+ c->x86_cache_size = -1;
-+ c->x86_vendor = X86_VENDOR_UNKNOWN;
-+ c->x86_model = c->x86_mask = 0; /* So far unknown... */
-+ c->x86_vendor_id[0] = '\0'; /* Unset */
-+ c->x86_model_id[0] = '\0'; /* Unset */
-+ c->x86_clflush_size = 64;
-+ c->x86_cache_alignment = c->x86_clflush_size;
-+ c->x86_max_cores = 1;
-+ c->extended_cpuid_level = 0;
-+ memset(&c->x86_capability, 0, sizeof c->x86_capability);
-+
-+ /* Get vendor name */
-+ cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
-+ (unsigned int *)&c->x86_vendor_id[0],
-+ (unsigned int *)&c->x86_vendor_id[8],
-+ (unsigned int *)&c->x86_vendor_id[4]);
-+
-+ get_cpu_vendor(c);
-+
-+ /* Initialize the standard set of capabilities */
-+ /* Note that the vendor-specific code below might override */
-+
-+ /* Intel-defined flags: level 0x00000001 */
-+ if (c->cpuid_level >= 0x00000001) {
-+ __u32 misc;
-+ cpuid(0x00000001, &tfms, &misc, &c->x86_capability[4],
-+ &c->x86_capability[0]);
-+ c->x86 = (tfms >> 8) & 0xf;
-+ c->x86_model = (tfms >> 4) & 0xf;
-+ c->x86_mask = tfms & 0xf;
-+ if (c->x86 == 0xf)
-+ c->x86 += (tfms >> 20) & 0xff;
-+ if (c->x86 >= 0x6)
-+ c->x86_model += ((tfms >> 16) & 0xF) << 4;
-+ if (c->x86_capability[0] & (1<<19))
-+ c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
-+ } else {
-+ /* Have CPUID level 0 only - unheard of */
-+ c->x86 = 4;
-+ }
-+
-+#ifdef CONFIG_SMP
-+ c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff;
-+#endif
-+}
-+
-+/*
-+ * This does the hard work of actually picking apart the CPU stuff...
-+ */
-+void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
-+{
-+ int i;
-+ u32 xlvl;
-+
-+ early_identify_cpu(c);
-+
-+ /* AMD-defined flags: level 0x80000001 */
-+ xlvl = cpuid_eax(0x80000000);
-+ c->extended_cpuid_level = xlvl;
-+ if ((xlvl & 0xffff0000) == 0x80000000) {
-+ if (xlvl >= 0x80000001) {
-+ c->x86_capability[1] = cpuid_edx(0x80000001);
-+ c->x86_capability[6] = cpuid_ecx(0x80000001);
-+ }
-+ if (xlvl >= 0x80000004)
-+ get_model_name(c); /* Default name */
-+ }
-+
-+ /* Transmeta-defined flags: level 0x80860001 */
-+ xlvl = cpuid_eax(0x80860000);
-+ if ((xlvl & 0xffff0000) == 0x80860000) {
-+ /* Don't set x86_cpuid_level here for now to not confuse. */
-+ if (xlvl >= 0x80860001)
-+ c->x86_capability[2] = cpuid_edx(0x80860001);
-+ }
-+
-+ c->apicid = phys_pkg_id(0);
-+
-+ /*
-+ * Vendor-specific initialization. In this section we
-+ * canonicalize the feature flags, meaning if there are
-+ * features a certain CPU supports which CPUID doesn't
-+ * tell us, CPUID claiming incorrect flags, or other bugs,
-+ * we handle them here.
-+ *
-+ * At the end of this section, c->x86_capability better
-+ * indicate the features this CPU genuinely supports!
-+ */
-+ switch (c->x86_vendor) {
-+ case X86_VENDOR_AMD:
-+ init_amd(c);
-+ break;
-+
-+ case X86_VENDOR_INTEL:
-+ init_intel(c);
-+ break;
-+
-+ case X86_VENDOR_UNKNOWN:
-+ default:
-+ display_cacheinfo(c);
-+ break;
-+ }
-+
-+ select_idle_routine(c);
-+ detect_ht(c);
-+
-+ /*
-+ * On SMP, boot_cpu_data holds the common feature set between
-+ * all CPUs; so make sure that we indicate which features are
-+ * common between the CPUs. The first time this routine gets
-+ * executed, c == &boot_cpu_data.
-+ */
-+ if (c != &boot_cpu_data) {
-+ /* AND the already accumulated flags with these */
-+ for (i = 0 ; i < NCAPINTS ; i++)
-+ boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
-+ }
-+
-+#ifdef CONFIG_X86_MCE
-+ mcheck_init(c);
-+#endif
-+ if (c != &boot_cpu_data)
-+ mtrr_ap_init();
-+#ifdef CONFIG_NUMA
-+ numa_add_cpu(smp_processor_id());
-+#endif
-+}
-+
-+
-+void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
-+{
-+ if (c->x86_model_id[0])
-+ printk("%s", c->x86_model_id);
-+
-+ if (c->x86_mask || c->cpuid_level >= 0)
-+ printk(" stepping %02x\n", c->x86_mask);
-+ else
-+ printk("\n");
-+}
-+
-+/*
-+ * Get CPU information for use by the procfs.
-+ */
-+
-+static int show_cpuinfo(struct seq_file *m, void *v)
-+{
-+ struct cpuinfo_x86 *c = v;
-+
-+ /*
-+ * These flag bits must match the definitions in <asm/cpufeature.h>.
-+ * NULL means this bit is undefined or reserved; either way it doesn't
-+ * have meaning as far as Linux is concerned. Note that it's important
-+ * to realize there is a difference between this table and CPUID -- if
-+ * applications want to get the raw CPUID data, they should access
-+ * /dev/cpu/<cpu_nr>/cpuid instead.
-+ */
-+ static char *x86_cap_flags[] = {
-+ /* Intel-defined */
-+ "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
-+ "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
-+ "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
-+ "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", NULL,
-+
-+ /* AMD-defined */
-+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-+ NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
-+ NULL, NULL, NULL, NULL, "nx", NULL, "mmxext", NULL,
-+ NULL, "fxsr_opt", "pdpe1gb", "rdtscp", NULL, "lm",
-+ "3dnowext", "3dnow",
-+
-+ /* Transmeta-defined */
-+ "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
-+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-+
-+ /* Other (Linux-defined) */
-+ "cxmmx", NULL, "cyrix_arr", "centaur_mcr", NULL,
-+ "constant_tsc", NULL, NULL,
-+ "up", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-+
-+ /* Intel-defined (#2) */
-+ "pni", NULL, NULL, "monitor", "ds_cpl", "vmx", "smx", "est",
-+ "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
-+ NULL, NULL, "dca", NULL, NULL, NULL, NULL, "popcnt",
-+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-+
-+ /* VIA/Cyrix/Centaur-defined */
-+ NULL, NULL, "rng", "rng_en", NULL, NULL, "ace", "ace_en",
-+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-+
-+ /* AMD-defined (#2) */
-+ "lahf_lm", "cmp_legacy", "svm", "extapic", "cr8_legacy",
-+ "altmovcr8", "abm", "sse4a",
-+ "misalignsse", "3dnowprefetch",
-+ "osvw", "ibs", NULL, NULL, NULL, NULL,
-+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-+ };
-+ static char *x86_power_flags[] = {
-+ "ts", /* temperature sensor */
-+ "fid", /* frequency id control */
-+ "vid", /* voltage id control */
-+ "ttp", /* thermal trip */
-+ "tm",
-+ "stc",
-+ "100mhzsteps",
-+ "hwpstate",
-+ "", /* tsc invariant mapped to constant_tsc */
-+ /* nothing */
-+ };
-+
-+
-+#ifdef CONFIG_SMP
-+ if (!cpu_online(c-cpu_data))
-+ return 0;
-+#endif
-+
-+ seq_printf(m,"processor\t: %u\n"
-+ "vendor_id\t: %s\n"
-+ "cpu family\t: %d\n"
-+ "model\t\t: %d\n"
-+ "model name\t: %s\n",
-+ (unsigned)(c-cpu_data),
-+ c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
-+ c->x86,
-+ (int)c->x86_model,
-+ c->x86_model_id[0] ? c->x86_model_id : "unknown");
-+
-+ if (c->x86_mask || c->cpuid_level >= 0)
-+ seq_printf(m, "stepping\t: %d\n", c->x86_mask);
-+ else
-+ seq_printf(m, "stepping\t: unknown\n");
-+
-+ if (cpu_has(c,X86_FEATURE_TSC)) {
-+ unsigned int freq = cpufreq_quick_get((unsigned)(c-cpu_data));
-+ if (!freq)
-+ freq = cpu_khz;
-+ seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
-+ freq / 1000, (freq % 1000));
-+ }
-+
-+ /* Cache size */
-+ if (c->x86_cache_size >= 0)
-+ seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
-+
-+#ifdef CONFIG_SMP
-+ if (smp_num_siblings * c->x86_max_cores > 1) {
-+ int cpu = c - cpu_data;
-+ seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
-+ seq_printf(m, "siblings\t: %d\n", cpus_weight(cpu_core_map[cpu]));
-+ seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
-+ seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
-+ }
-+#endif
-+
-+ seq_printf(m,
-+ "fpu\t\t: yes\n"
-+ "fpu_exception\t: yes\n"
-+ "cpuid level\t: %d\n"
-+ "wp\t\t: yes\n"
-+ "flags\t\t:",
-+ c->cpuid_level);
-+
-+ {
-+ int i;
-+ for ( i = 0 ; i < 32*NCAPINTS ; i++ )
-+ if (cpu_has(c, i) && x86_cap_flags[i] != NULL)
-+ seq_printf(m, " %s", x86_cap_flags[i]);
-+ }
-+
-+ seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
-+ c->loops_per_jiffy/(500000/HZ),
-+ (c->loops_per_jiffy/(5000/HZ)) % 100);
-+
-+ if (c->x86_tlbsize > 0)
-+ seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize);
-+ seq_printf(m, "clflush size\t: %d\n", c->x86_clflush_size);
-+ seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment);
-+
-+ seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n",
-+ c->x86_phys_bits, c->x86_virt_bits);
-+
-+ seq_printf(m, "power management:");
-+ {
-+ unsigned i;
-+ for (i = 0; i < 32; i++)
-+ if (c->x86_power & (1 << i)) {
-+ if (i < ARRAY_SIZE(x86_power_flags) &&
-+ x86_power_flags[i])
-+ seq_printf(m, "%s%s",
-+ x86_power_flags[i][0]?" ":"",
-+ x86_power_flags[i]);
-+ else
-+ seq_printf(m, " [%d]", i);
-+ }
-+ }
-+
-+ seq_printf(m, "\n\n");
-+
-+ return 0;
-+}
-+
-+static void *c_start(struct seq_file *m, loff_t *pos)
-+{
-+ return *pos < NR_CPUS ? cpu_data + *pos : NULL;
-+}
-+
-+static void *c_next(struct seq_file *m, void *v, loff_t *pos)
-+{
-+ ++*pos;
-+ return c_start(m, pos);
-+}
-+
-+static void c_stop(struct seq_file *m, void *v)
-+{
-+}
-+
-+struct seq_operations cpuinfo_op = {
-+ .start =c_start,
-+ .next = c_next,
-+ .stop = c_stop,
-+ .show = show_cpuinfo,
-+};
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/x86_64/kernel/smp-xen.c ubuntu-gutsy-xen/arch/x86_64/kernel/smp-xen.c
---- ubuntu-gutsy/arch/x86_64/kernel/smp-xen.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/arch/x86_64/kernel/smp-xen.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,563 @@
-+/*
-+ * Intel SMP support routines.
-+ *
-+ * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
-+ * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
-+ * (c) 2002,2003 Andi Kleen, SuSE Labs.
-+ *
-+ * This code is released under the GNU General Public License version 2 or
-+ * later.
-+ */
-+
-+#include <linux/init.h>
-+
-+#include <linux/mm.h>
-+#include <linux/delay.h>
-+#include <linux/spinlock.h>
-+#include <linux/smp.h>
-+#include <linux/kernel_stat.h>
-+#include <linux/mc146818rtc.h>
-+#include <linux/interrupt.h>
-+
-+#include <asm/mtrr.h>
-+#include <asm/pgalloc.h>
-+#include <asm/tlbflush.h>
-+#include <asm/mach_apic.h>
-+#include <asm/mmu_context.h>
-+#include <asm/proto.h>
-+#include <asm/apicdef.h>
-+#include <asm/idle.h>
-+#ifdef CONFIG_XEN
-+#include <xen/evtchn.h>
-+#endif
-+
-+#ifndef CONFIG_XEN
-+/*
-+ * Smarter SMP flushing macros.
-+ * c/o Linus Torvalds.
-+ *
-+ * These mean you can really definitely utterly forget about
-+ * writing to user space from interrupts. (Its not allowed anyway).
-+ *
-+ * Optimizations Manfred Spraul <manfred@colorfullife.com>
-+ *
-+ * More scalable flush, from Andi Kleen
-+ *
-+ * To avoid global state use 8 different call vectors.
-+ * Each CPU uses a specific vector to trigger flushes on other
-+ * CPUs. Depending on the received vector the target CPUs look into
-+ * the right per cpu variable for the flush data.
-+ *
-+ * With more than 8 CPUs they are hashed to the 8 available
-+ * vectors. The limited global vector space forces us to this right now.
-+ * In future when interrupts are split into per CPU domains this could be
-+ * fixed, at the cost of triggering multiple IPIs in some cases.
-+ */
-+
-+union smp_flush_state {
-+ struct {
-+ cpumask_t flush_cpumask;
-+ struct mm_struct *flush_mm;
-+ unsigned long flush_va;
-+#define FLUSH_ALL -1ULL
-+ spinlock_t tlbstate_lock;
-+ };
-+ char pad[SMP_CACHE_BYTES];
-+} ____cacheline_aligned;
-+
-+/* State is put into the per CPU data section, but padded
-+ to a full cache line because other CPUs can access it and we don't
-+ want false sharing in the per cpu data segment. */
-+static DEFINE_PER_CPU(union smp_flush_state, flush_state);
-+#endif
-+
-+/*
-+ * We cannot call mmdrop() because we are in interrupt context,
-+ * instead update mm->cpu_vm_mask.
-+ */
-+static inline void leave_mm(unsigned long cpu)
-+{
-+ if (read_pda(mmu_state) == TLBSTATE_OK)
-+ BUG();
-+ cpu_clear(cpu, read_pda(active_mm)->cpu_vm_mask);
-+ load_cr3(swapper_pg_dir);
-+}
-+
-+#ifndef CONFIG_XEN
-+/*
-+ *
-+ * The flush IPI assumes that a thread switch happens in this order:
-+ * [cpu0: the cpu that switches]
-+ * 1) switch_mm() either 1a) or 1b)
-+ * 1a) thread switch to a different mm
-+ * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask);
-+ * Stop ipi delivery for the old mm. This is not synchronized with
-+ * the other cpus, but smp_invalidate_interrupt ignore flush ipis
-+ * for the wrong mm, and in the worst case we perform a superfluous
-+ * tlb flush.
-+ * 1a2) set cpu mmu_state to TLBSTATE_OK
-+ * Now the smp_invalidate_interrupt won't call leave_mm if cpu0
-+ * was in lazy tlb mode.
-+ * 1a3) update cpu active_mm
-+ * Now cpu0 accepts tlb flushes for the new mm.
-+ * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask);
-+ * Now the other cpus will send tlb flush ipis.
-+ * 1a4) change cr3.
-+ * 1b) thread switch without mm change
-+ * cpu active_mm is correct, cpu0 already handles
-+ * flush ipis.
-+ * 1b1) set cpu mmu_state to TLBSTATE_OK
-+ * 1b2) test_and_set the cpu bit in cpu_vm_mask.
-+ * Atomically set the bit [other cpus will start sending flush ipis],
-+ * and test the bit.
-+ * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
-+ * 2) switch %%esp, ie current
-+ *
-+ * The interrupt must handle 2 special cases:
-+ * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
-+ * - the cpu performs speculative tlb reads, i.e. even if the cpu only
-+ * runs in kernel space, the cpu could load tlb entries for user space
-+ * pages.
-+ *
-+ * The good news is that cpu mmu_state is local to each cpu, no
-+ * write/read ordering problems.
-+ */
-+
-+/*
-+ * TLB flush IPI:
-+ *
-+ * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
-+ * 2) Leave the mm if we are in the lazy tlb mode.
-+ *
-+ * Interrupts are disabled.
-+ */
-+
-+asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs)
-+{
-+ int cpu;
-+ int sender;
-+ union smp_flush_state *f;
-+
-+ cpu = smp_processor_id();
-+ /*
-+ * orig_rax contains the negated interrupt vector.
-+ * Use that to determine where the sender put the data.
-+ */
-+ sender = ~regs->orig_rax - INVALIDATE_TLB_VECTOR_START;
-+ f = &per_cpu(flush_state, sender);
-+
-+ if (!cpu_isset(cpu, f->flush_cpumask))
-+ goto out;
-+ /*
-+ * This was a BUG() but until someone can quote me the
-+ * line from the intel manual that guarantees an IPI to
-+ * multiple CPUs is retried _only_ on the erroring CPUs
-+ * its staying as a return
-+ *
-+ * BUG();
-+ */
-+
-+ if (f->flush_mm == read_pda(active_mm)) {
-+ if (read_pda(mmu_state) == TLBSTATE_OK) {
-+ if (f->flush_va == FLUSH_ALL)
-+ local_flush_tlb();
-+ else
-+ __flush_tlb_one(f->flush_va);
-+ } else
-+ leave_mm(cpu);
-+ }
-+out:
-+ ack_APIC_irq();
-+ cpu_clear(cpu, f->flush_cpumask);
-+}
-+
-+static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
-+ unsigned long va)
-+{
-+ int sender;
-+ union smp_flush_state *f;
-+
-+ /* Caller has disabled preemption */
-+ sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS;
-+ f = &per_cpu(flush_state, sender);
-+
-+ /* Could avoid this lock when
-+ num_online_cpus() <= NUM_INVALIDATE_TLB_VECTORS, but it is
-+ probably not worth checking this for a cache-hot lock. */
-+ spin_lock(&f->tlbstate_lock);
-+
-+ f->flush_mm = mm;
-+ f->flush_va = va;
-+ cpus_or(f->flush_cpumask, cpumask, f->flush_cpumask);
-+
-+ /*
-+ * We have to send the IPI only to
-+ * CPUs affected.
-+ */
-+ send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR_START + sender);
-+
-+ while (!cpus_empty(f->flush_cpumask))
-+ cpu_relax();
-+
-+ f->flush_mm = NULL;
-+ f->flush_va = 0;
-+ spin_unlock(&f->tlbstate_lock);
-+}
-+
-+int __cpuinit init_smp_flush(void)
-+{
-+ int i;
-+ for_each_cpu_mask(i, cpu_possible_map) {
-+ spin_lock_init(&per_cpu(flush_state, i).tlbstate_lock);
-+ }
-+ return 0;
-+}
-+
-+core_initcall(init_smp_flush);
-+
-+void flush_tlb_current_task(void)
-+{
-+ struct mm_struct *mm = current->mm;
-+ cpumask_t cpu_mask;
-+
-+ preempt_disable();
-+ cpu_mask = mm->cpu_vm_mask;
-+ cpu_clear(smp_processor_id(), cpu_mask);
-+
-+ local_flush_tlb();
-+ if (!cpus_empty(cpu_mask))
-+ flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
-+ preempt_enable();
-+}
-+EXPORT_SYMBOL(flush_tlb_current_task);
-+
-+void flush_tlb_mm (struct mm_struct * mm)
-+{
-+ cpumask_t cpu_mask;
-+
-+ preempt_disable();
-+ cpu_mask = mm->cpu_vm_mask;
-+ cpu_clear(smp_processor_id(), cpu_mask);
-+
-+ if (current->active_mm == mm) {
-+ if (current->mm)
-+ local_flush_tlb();
-+ else
-+ leave_mm(smp_processor_id());
-+ }
-+ if (!cpus_empty(cpu_mask))
-+ flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
-+
-+ preempt_enable();
-+}
-+EXPORT_SYMBOL(flush_tlb_mm);
-+
-+void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
-+{
-+ struct mm_struct *mm = vma->vm_mm;
-+ cpumask_t cpu_mask;
-+
-+ preempt_disable();
-+ cpu_mask = mm->cpu_vm_mask;
-+ cpu_clear(smp_processor_id(), cpu_mask);
-+
-+ if (current->active_mm == mm) {
-+ if(current->mm)
-+ __flush_tlb_one(va);
-+ else
-+ leave_mm(smp_processor_id());
-+ }
-+
-+ if (!cpus_empty(cpu_mask))
-+ flush_tlb_others(cpu_mask, mm, va);
-+
-+ preempt_enable();
-+}
-+EXPORT_SYMBOL(flush_tlb_page);
-+
-+static void do_flush_tlb_all(void* info)
-+{
-+ unsigned long cpu = smp_processor_id();
-+
-+ __flush_tlb_all();
-+ if (read_pda(mmu_state) == TLBSTATE_LAZY)
-+ leave_mm(cpu);
-+}
-+
-+void flush_tlb_all(void)
-+{
-+ on_each_cpu(do_flush_tlb_all, NULL, 1, 1);
-+}
-+#else
-+asmlinkage void smp_invalidate_interrupt (void)
-+{ return; }
-+void flush_tlb_current_task(void)
-+{ xen_tlb_flush_mask(&current->mm->cpu_vm_mask); }
-+void flush_tlb_mm (struct mm_struct * mm)
-+{ xen_tlb_flush_mask(&mm->cpu_vm_mask); }
-+void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
-+{ xen_invlpg_mask(&vma->vm_mm->cpu_vm_mask, va); }
-+void flush_tlb_all(void)
-+{ xen_tlb_flush_all(); }
-+#endif /* Xen */
-+
-+/*
-+ * this function sends a 'reschedule' IPI to another CPU.
-+ * it goes straight through and wastes no time serializing
-+ * anything. Worst case is that we lose a reschedule ...
-+ */
-+
-+void smp_send_reschedule(int cpu)
-+{
-+ send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
-+}
-+
-+/*
-+ * Structure and data for smp_call_function(). This is designed to minimise
-+ * static memory requirements. It also looks cleaner.
-+ */
-+static DEFINE_SPINLOCK(call_lock);
-+
-+struct call_data_struct {
-+ void (*func) (void *info);
-+ void *info;
-+ atomic_t started;
-+ atomic_t finished;
-+ int wait;
-+};
-+
-+static struct call_data_struct * call_data;
-+
-+void lock_ipi_call_lock(void)
-+{
-+ spin_lock_irq(&call_lock);
-+}
-+
-+void unlock_ipi_call_lock(void)
-+{
-+ spin_unlock_irq(&call_lock);
-+}
-+
-+/*
-+ * this function sends a 'generic call function' IPI to one other CPU
-+ * in the system.
-+ *
-+ * cpu is a standard Linux logical CPU number.
-+ */
-+static void
-+__smp_call_function_single(int cpu, void (*func) (void *info), void *info,
-+ int nonatomic, int wait)
-+{
-+ struct call_data_struct data;
-+ int cpus = 1;
-+
-+ data.func = func;
-+ data.info = info;
-+ atomic_set(&data.started, 0);
-+ data.wait = wait;
-+ if (wait)
-+ atomic_set(&data.finished, 0);
-+
-+ call_data = &data;
-+ wmb();
-+ /* Send a message to all other CPUs and wait for them to respond */
-+ send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNCTION_VECTOR);
-+
-+ /* Wait for response */
-+ while (atomic_read(&data.started) != cpus)
-+ cpu_relax();
-+
-+ if (!wait)
-+ return;
-+
-+ while (atomic_read(&data.finished) != cpus)
-+ cpu_relax();
-+}
-+
-+/*
-+ * smp_call_function_single - Run a function on another CPU
-+ * @func: The function to run. This must be fast and non-blocking.
-+ * @info: An arbitrary pointer to pass to the function.
-+ * @nonatomic: Currently unused.
-+ * @wait: If true, wait until function has completed on other CPUs.
-+ *
-+ * Retrurns 0 on success, else a negative status code.
-+ *
-+ * Does not return until the remote CPU is nearly ready to execute <func>
-+ * or is or has executed.
-+ */
-+
-+int smp_call_function_single (int cpu, void (*func) (void *info), void *info,
-+ int nonatomic, int wait)
-+{
-+ /* prevent preemption and reschedule on another processor */
-+ int me = get_cpu();
-+ if (cpu == me) {
-+ put_cpu();
-+ return 0;
-+ }
-+
-+ /* Can deadlock when called with interrupts disabled */
-+ WARN_ON(irqs_disabled());
-+
-+ spin_lock_bh(&call_lock);
-+ __smp_call_function_single(cpu, func, info, nonatomic, wait);
-+ spin_unlock_bh(&call_lock);
-+ put_cpu();
-+ return 0;
-+}
-+EXPORT_SYMBOL(smp_call_function_single);
-+
-+/*
-+ * this function sends a 'generic call function' IPI to all other CPUs
-+ * in the system.
-+ */
-+static void __smp_call_function (void (*func) (void *info), void *info,
-+ int nonatomic, int wait)
-+{
-+ struct call_data_struct data;
-+ int cpus = num_online_cpus()-1;
-+
-+ if (!cpus)
-+ return;
-+
-+ data.func = func;
-+ data.info = info;
-+ atomic_set(&data.started, 0);
-+ data.wait = wait;
-+ if (wait)
-+ atomic_set(&data.finished, 0);
-+
-+ call_data = &data;
-+ wmb();
-+ /* Send a message to all other CPUs and wait for them to respond */
-+ send_IPI_allbutself(CALL_FUNCTION_VECTOR);
-+
-+ /* Wait for response */
-+ while (atomic_read(&data.started) != cpus)
-+#ifndef CONFIG_XEN
-+ cpu_relax();
-+#else
-+ barrier();
-+#endif
-+
-+ if (!wait)
-+ return;
-+
-+ while (atomic_read(&data.finished) != cpus)
-+#ifndef CONFIG_XEN
-+ cpu_relax();
-+#else
-+ barrier();
-+#endif
-+}
-+
-+/*
-+ * smp_call_function - run a function on all other CPUs.
-+ * @func: The function to run. This must be fast and non-blocking.
-+ * @info: An arbitrary pointer to pass to the function.
-+ * @nonatomic: currently unused.
-+ * @wait: If true, wait (atomically) until function has completed on other
-+ * CPUs.
-+ *
-+ * Returns 0 on success, else a negative status code. Does not return until
-+ * remote CPUs are nearly ready to execute func or are or have executed.
-+ *
-+ * You must not call this function with disabled interrupts or from a
-+ * hardware interrupt handler or from a bottom half handler.
-+ * Actually there are a few legal cases, like panic.
-+ */
-+int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
-+ int wait)
-+{
-+ spin_lock(&call_lock);
-+ __smp_call_function(func,info,nonatomic,wait);
-+ spin_unlock(&call_lock);
-+ return 0;
-+}
-+EXPORT_SYMBOL(smp_call_function);
-+
-+static void stop_this_cpu(void *dummy)
-+{
-+ local_irq_disable();
-+ /*
-+ * Remove this CPU:
-+ */
-+ cpu_clear(smp_processor_id(), cpu_online_map);
-+ mask_evtchn_local();
-+ for (;;)
-+ halt();
-+}
-+
-+void smp_send_stop(void)
-+{
-+ int nolock;
-+ unsigned long flags;
-+
-+#ifndef CONFIG_XEN
-+ if (reboot_force)
-+ return;
-+#endif
-+
-+ /* Don't deadlock on the call lock in panic */
-+ nolock = !spin_trylock(&call_lock);
-+ local_irq_save(flags);
-+ __smp_call_function(stop_this_cpu, NULL, 0, 0);
-+ if (!nolock)
-+ spin_unlock(&call_lock);
-+ mask_evtchn_local();
-+ local_irq_restore(flags);
-+}
-+
-+/*
-+ * Reschedule call back. Nothing to do,
-+ * all the work is done automatically when
-+ * we return from the interrupt.
-+ */
-+#ifndef CONFIG_XEN
-+asmlinkage void smp_reschedule_interrupt(void)
-+#else
-+asmlinkage irqreturn_t smp_reschedule_interrupt(int irq, void *ctx)
-+#endif
-+{
-+#ifndef CONFIG_XEN
-+ ack_APIC_irq();
-+#else
-+ return IRQ_HANDLED;
-+#endif
-+}
-+
-+#ifndef CONFIG_XEN
-+asmlinkage void smp_call_function_interrupt(void)
-+#else
-+asmlinkage irqreturn_t smp_call_function_interrupt(int irq, void *ctx)
-+#endif
-+{
-+ void (*func) (void *info) = call_data->func;
-+ void *info = call_data->info;
-+ int wait = call_data->wait;
-+
-+#ifndef CONFIG_XEN
-+ ack_APIC_irq();
-+#endif
-+ /*
-+ * Notify initiating CPU that I've grabbed the data and am
-+ * about to execute the function
-+ */
-+ mb();
-+ atomic_inc(&call_data->started);
-+ /*
-+ * At this point the info structure may be out of scope unless wait==1
-+ */
-+ exit_idle();
-+ irq_enter();
-+ (*func)(info);
-+ irq_exit();
-+ if (wait) {
-+ mb();
-+ atomic_inc(&call_data->finished);
-+ }
-+#ifdef CONFIG_XEN
-+ return IRQ_HANDLED;
-+#endif
-+}
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/x86_64/kernel/traps-xen.c ubuntu-gutsy-xen/arch/x86_64/kernel/traps-xen.c
---- ubuntu-gutsy/arch/x86_64/kernel/traps-xen.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/arch/x86_64/kernel/traps-xen.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,1159 @@
-+/*
-+ * linux/arch/x86-64/traps.c
-+ *
-+ * Copyright (C) 1991, 1992 Linus Torvalds
-+ * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
-+ *
-+ * Pentium III FXSR, SSE support
-+ * Gareth Hughes <gareth@valinux.com>, May 2000
-+ */
-+
-+/*
-+ * 'Traps.c' handles hardware traps and faults after we have saved some
-+ * state in 'entry.S'.
-+ */
-+#include <linux/sched.h>
-+#include <linux/kernel.h>
-+#include <linux/string.h>
-+#include <linux/errno.h>
-+#include <linux/ptrace.h>
-+#include <linux/timer.h>
-+#include <linux/mm.h>
-+#include <linux/init.h>
-+#include <linux/delay.h>
-+#include <linux/spinlock.h>
-+#include <linux/interrupt.h>
-+#include <linux/kallsyms.h>
-+#include <linux/module.h>
-+#include <linux/moduleparam.h>
-+#include <linux/nmi.h>
-+#include <linux/kprobes.h>
-+#include <linux/kexec.h>
-+#include <linux/unwind.h>
-+#include <linux/uaccess.h>
-+#include <linux/bug.h>
-+#include <linux/kdebug.h>
-+
-+#include <asm/system.h>
-+#include <asm/io.h>
-+#include <asm/atomic.h>
-+#include <asm/debugreg.h>
-+#include <asm/desc.h>
-+#include <asm/i387.h>
-+#include <asm/processor.h>
-+#include <asm/unwind.h>
-+#include <asm/smp.h>
-+#include <asm/pgalloc.h>
-+#include <asm/pda.h>
-+#include <asm/proto.h>
-+#include <asm/nmi.h>
-+#include <asm/stacktrace.h>
-+
-+asmlinkage void divide_error(void);
-+asmlinkage void debug(void);
-+asmlinkage void nmi(void);
-+asmlinkage void int3(void);
-+asmlinkage void overflow(void);
-+asmlinkage void bounds(void);
-+asmlinkage void invalid_op(void);
-+asmlinkage void device_not_available(void);
-+asmlinkage void double_fault(void);
-+asmlinkage void coprocessor_segment_overrun(void);
-+asmlinkage void invalid_TSS(void);
-+asmlinkage void segment_not_present(void);
-+asmlinkage void stack_segment(void);
-+asmlinkage void general_protection(void);
-+asmlinkage void page_fault(void);
-+asmlinkage void coprocessor_error(void);
-+asmlinkage void simd_coprocessor_error(void);
-+asmlinkage void reserved(void);
-+asmlinkage void alignment_check(void);
-+asmlinkage void machine_check(void);
-+asmlinkage void spurious_interrupt_bug(void);
-+
-+static inline void conditional_sti(struct pt_regs *regs)
-+{
-+ if (regs->eflags & X86_EFLAGS_IF)
-+ local_irq_enable();
-+}
-+
-+static inline void preempt_conditional_sti(struct pt_regs *regs)
-+{
-+ preempt_disable();
-+ if (regs->eflags & X86_EFLAGS_IF)
-+ local_irq_enable();
-+}
-+
-+static inline void preempt_conditional_cli(struct pt_regs *regs)
-+{
-+ if (regs->eflags & X86_EFLAGS_IF)
-+ local_irq_disable();
-+ /* Make sure to not schedule here because we could be running
-+ on an exception stack. */
-+ preempt_enable_no_resched();
-+}
-+
-+int kstack_depth_to_print = 12;
-+
-+#ifdef CONFIG_KALLSYMS
-+void printk_address(unsigned long address)
-+{
-+ unsigned long offset = 0, symsize;
-+ const char *symname;
-+ char *modname;
-+ char *delim = ":";
-+ char namebuf[128];
-+
-+ symname = kallsyms_lookup(address, &symsize, &offset,
-+ &modname, namebuf);
-+ if (!symname) {
-+ printk(" [<%016lx>]\n", address);
-+ return;
-+ }
-+ if (!modname)
-+ modname = delim = "";
-+ printk(" [<%016lx>] %s%s%s%s+0x%lx/0x%lx\n",
-+ address, delim, modname, delim, symname, offset, symsize);
-+}
-+#else
-+void printk_address(unsigned long address)
-+{
-+ printk(" [<%016lx>]\n", address);
-+}
-+#endif
-+
-+static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
-+ unsigned *usedp, char **idp)
-+{
-+#ifndef CONFIG_X86_NO_TSS
-+ static char ids[][8] = {
-+ [DEBUG_STACK - 1] = "#DB",
-+ [NMI_STACK - 1] = "NMI",
-+ [DOUBLEFAULT_STACK - 1] = "#DF",
-+ [STACKFAULT_STACK - 1] = "#SS",
-+ [MCE_STACK - 1] = "#MC",
-+#if DEBUG_STKSZ > EXCEPTION_STKSZ
-+ [N_EXCEPTION_STACKS ... N_EXCEPTION_STACKS + DEBUG_STKSZ / EXCEPTION_STKSZ - 2] = "#DB[?]"
-+#endif
-+ };
-+ unsigned k;
-+
-+ /*
-+ * Iterate over all exception stacks, and figure out whether
-+ * 'stack' is in one of them:
-+ */
-+ for (k = 0; k < N_EXCEPTION_STACKS; k++) {
-+ unsigned long end = per_cpu(orig_ist, cpu).ist[k];
-+ /*
-+ * Is 'stack' above this exception frame's end?
-+ * If yes then skip to the next frame.
-+ */
-+ if (stack >= end)
-+ continue;
-+ /*
-+ * Is 'stack' above this exception frame's start address?
-+ * If yes then we found the right frame.
-+ */
-+ if (stack >= end - EXCEPTION_STKSZ) {
-+ /*
-+ * Make sure we only iterate through an exception
-+ * stack once. If it comes up for the second time
-+ * then there's something wrong going on - just
-+ * break out and return NULL:
-+ */
-+ if (*usedp & (1U << k))
-+ break;
-+ *usedp |= 1U << k;
-+ *idp = ids[k];
-+ return (unsigned long *)end;
-+ }
-+ /*
-+ * If this is a debug stack, and if it has a larger size than
-+ * the usual exception stacks, then 'stack' might still
-+ * be within the lower portion of the debug stack:
-+ */
-+#if DEBUG_STKSZ > EXCEPTION_STKSZ
-+ if (k == DEBUG_STACK - 1 && stack >= end - DEBUG_STKSZ) {
-+ unsigned j = N_EXCEPTION_STACKS - 1;
-+
-+ /*
-+ * Black magic. A large debug stack is composed of
-+ * multiple exception stack entries, which we
-+ * iterate through now. Dont look:
-+ */
-+ do {
-+ ++j;
-+ end -= EXCEPTION_STKSZ;
-+ ids[j][4] = '1' + (j - N_EXCEPTION_STACKS);
-+ } while (stack < end - EXCEPTION_STKSZ);
-+ if (*usedp & (1U << j))
-+ break;
-+ *usedp |= 1U << j;
-+ *idp = ids[j];
-+ return (unsigned long *)end;
-+ }
-+#endif
-+ }
-+#endif
-+ return NULL;
-+}
-+
-+#define MSG(txt) ops->warning(data, txt)
-+
-+/*
-+ * x86-64 can have upto three kernel stacks:
-+ * process stack
-+ * interrupt stack
-+ * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
-+ */
-+
-+static inline int valid_stack_ptr(struct thread_info *tinfo, void *p)
-+{
-+ void *t = (void *)tinfo;
-+ return p > t && p < t + THREAD_SIZE - 3;
-+}
-+
-+void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
-+ unsigned long *stack,
-+ struct stacktrace_ops *ops, void *data)
-+{
-+ const unsigned cpu = get_cpu();
-+ unsigned long *irqstack_end = (unsigned long*)cpu_pda(cpu)->irqstackptr;
-+ unsigned used = 0;
-+ struct thread_info *tinfo;
-+
-+ if (!tsk)
-+ tsk = current;
-+
-+ if (!stack) {
-+ unsigned long dummy;
-+ stack = &dummy;
-+ if (tsk && tsk != current)
-+ stack = (unsigned long *)tsk->thread.rsp;
-+ }
-+
-+ /*
-+ * Print function call entries within a stack. 'cond' is the
-+ * "end of stackframe" condition, that the 'stack++'
-+ * iteration will eventually trigger.
-+ */
-+#define HANDLE_STACK(cond) \
-+ do while (cond) { \
-+ unsigned long addr = *stack++; \
-+ /* Use unlocked access here because except for NMIs \
-+ we should be already protected against module unloads */ \
-+ if (__kernel_text_address(addr)) { \
-+ /* \
-+ * If the address is either in the text segment of the \
-+ * kernel, or in the region which contains vmalloc'ed \
-+ * memory, it *may* be the address of a calling \
-+ * routine; if so, print it so that someone tracing \
-+ * down the cause of the crash will be able to figure \
-+ * out the call path that was taken. \
-+ */ \
-+ ops->address(data, addr); \
-+ } \
-+ } while (0)
-+
-+ /*
-+ * Print function call entries in all stacks, starting at the
-+ * current stack address. If the stacks consist of nested
-+ * exceptions
-+ */
-+ for (;;) {
-+ char *id;
-+ unsigned long *estack_end;
-+ estack_end = in_exception_stack(cpu, (unsigned long)stack,
-+ &used, &id);
-+
-+ if (estack_end) {
-+ if (ops->stack(data, id) < 0)
-+ break;
-+ HANDLE_STACK (stack < estack_end);
-+ ops->stack(data, "<EOE>");
-+ /*
-+ * We link to the next stack via the
-+ * second-to-last pointer (index -2 to end) in the
-+ * exception stack:
-+ */
-+ stack = (unsigned long *) estack_end[-2];
-+ continue;
-+ }
-+ if (irqstack_end) {
-+ unsigned long *irqstack;
-+ irqstack = irqstack_end -
-+ (IRQSTACKSIZE - 64) / sizeof(*irqstack);
-+
-+ if (stack >= irqstack && stack < irqstack_end) {
-+ if (ops->stack(data, "IRQ") < 0)
-+ break;
-+ HANDLE_STACK (stack < irqstack_end);
-+ /*
-+ * We link to the next stack (which would be
-+ * the process stack normally) the last
-+ * pointer (index -1 to end) in the IRQ stack:
-+ */
-+ stack = (unsigned long *) (irqstack_end[-1]);
-+ irqstack_end = NULL;
-+ ops->stack(data, "EOI");
-+ continue;
-+ }
-+ }
-+ break;
-+ }
-+
-+ /*
-+ * This handles the process stack:
-+ */
-+ tinfo = task_thread_info(tsk);
-+ HANDLE_STACK (valid_stack_ptr(tinfo, stack));
-+#undef HANDLE_STACK
-+ put_cpu();
-+}
-+EXPORT_SYMBOL(dump_trace);
-+
-+static void
-+print_trace_warning_symbol(void *data, char *msg, unsigned long symbol)
-+{
-+ print_symbol(msg, symbol);
-+ printk("\n");
-+}
-+
-+static void print_trace_warning(void *data, char *msg)
-+{
-+ printk("%s\n", msg);
-+}
-+
-+static int print_trace_stack(void *data, char *name)
-+{
-+ printk(" <%s> ", name);
-+ return 0;
-+}
-+
-+static void print_trace_address(void *data, unsigned long addr)
-+{
-+ printk_address(addr);
-+}
-+
-+static struct stacktrace_ops print_trace_ops = {
-+ .warning = print_trace_warning,
-+ .warning_symbol = print_trace_warning_symbol,
-+ .stack = print_trace_stack,
-+ .address = print_trace_address,
-+};
-+
-+void
-+show_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long *stack)
-+{
-+ printk("\nCall Trace:\n");
-+ dump_trace(tsk, regs, stack, &print_trace_ops, NULL);
-+ printk("\n");
-+}
-+
-+static void
-+_show_stack(struct task_struct *tsk, struct pt_regs *regs, unsigned long *rsp)
-+{
-+ unsigned long *stack;
-+ int i;
-+ const int cpu = smp_processor_id();
-+ unsigned long *irqstack_end = (unsigned long *) (cpu_pda(cpu)->irqstackptr);
-+ unsigned long *irqstack = (unsigned long *) (cpu_pda(cpu)->irqstackptr - IRQSTACKSIZE);
-+
-+ // debugging aid: "show_stack(NULL, NULL);" prints the
-+ // back trace for this cpu.
-+
-+ if (rsp == NULL) {
-+ if (tsk)
-+ rsp = (unsigned long *)tsk->thread.rsp;
-+ else
-+ rsp = (unsigned long *)&rsp;
-+ }
-+
-+ stack = rsp;
-+ for(i=0; i < kstack_depth_to_print; i++) {
-+ if (stack >= irqstack && stack <= irqstack_end) {
-+ if (stack == irqstack_end) {
-+ stack = (unsigned long *) (irqstack_end[-1]);
-+ printk(" <EOI> ");
-+ }
-+ } else {
-+ if (((long) stack & (THREAD_SIZE-1)) == 0)
-+ break;
-+ }
-+ if (i && ((i % 4) == 0))
-+ printk("\n");
-+ printk(" %016lx", *stack++);
-+ touch_nmi_watchdog();
-+ }
-+ show_trace(tsk, regs, rsp);
-+}
-+
-+void show_stack(struct task_struct *tsk, unsigned long * rsp)
-+{
-+ _show_stack(tsk, NULL, rsp);
-+}
-+
-+/*
-+ * The architecture-independent dump_stack generator
-+ */
-+void dump_stack(void)
-+{
-+ unsigned long dummy;
-+ show_trace(NULL, NULL, &dummy);
-+}
-+
-+EXPORT_SYMBOL(dump_stack);
-+
-+void show_registers(struct pt_regs *regs)
-+{
-+ int i;
-+ int in_kernel = !user_mode(regs);
-+ unsigned long rsp;
-+ const int cpu = smp_processor_id();
-+ struct task_struct *cur = cpu_pda(cpu)->pcurrent;
-+
-+ rsp = regs->rsp;
-+ printk("CPU %d ", cpu);
-+ __show_regs(regs);
-+ printk("Process %s (pid: %d, threadinfo %p, task %p)\n",
-+ cur->comm, cur->pid, task_thread_info(cur), cur);
-+
-+ /*
-+ * When in-kernel, we also print out the stack and code at the
-+ * time of the fault..
-+ */
-+ if (in_kernel) {
-+ printk("Stack: ");
-+ _show_stack(NULL, regs, (unsigned long*)rsp);
-+
-+ printk("\nCode: ");
-+ if (regs->rip < PAGE_OFFSET)
-+ goto bad;
-+
-+ for (i=0; i<20; i++) {
-+ unsigned char c;
-+ if (__get_user(c, &((unsigned char*)regs->rip)[i])) {
-+bad:
-+ printk(" Bad RIP value.");
-+ break;
-+ }
-+ printk("%02x ", c);
-+ }
-+ }
-+ printk("\n");
-+}
-+
-+int is_valid_bugaddr(unsigned long rip)
-+{
-+ unsigned short ud2;
-+
-+ if (__copy_from_user(&ud2, (const void __user *) rip, sizeof(ud2)))
-+ return 0;
-+
-+ return ud2 == 0x0b0f;
-+}
-+
-+#ifdef CONFIG_BUG
-+void out_of_line_bug(void)
-+{
-+ BUG();
-+}
-+EXPORT_SYMBOL(out_of_line_bug);
-+#endif
-+
-+static DEFINE_SPINLOCK(die_lock);
-+static int die_owner = -1;
-+static unsigned int die_nest_count;
-+
-+unsigned __kprobes long oops_begin(void)
-+{
-+ int cpu;
-+ unsigned long flags;
-+
-+ oops_enter();
-+
-+ /* racy, but better than risking deadlock. */
-+ local_irq_save(flags);
-+ cpu = smp_processor_id();
-+ if (!spin_trylock(&die_lock)) {
-+ if (cpu == die_owner)
-+ /* nested oops. should stop eventually */;
-+ else
-+ spin_lock(&die_lock);
-+ }
-+ die_nest_count++;
-+ die_owner = cpu;
-+ console_verbose();
-+ bust_spinlocks(1);
-+ return flags;
-+}
-+
-+void __kprobes oops_end(unsigned long flags)
-+{
-+ die_owner = -1;
-+ bust_spinlocks(0);
-+ die_nest_count--;
-+ if (die_nest_count)
-+ /* We still own the lock */
-+ local_irq_restore(flags);
-+ else
-+ /* Nest count reaches zero, release the lock. */
-+ spin_unlock_irqrestore(&die_lock, flags);
-+ if (panic_on_oops)
-+ panic("Fatal exception");
-+ oops_exit();
-+}
-+
-+void __kprobes __die(const char * str, struct pt_regs * regs, long err)
-+{
-+ static int die_counter;
-+ printk(KERN_EMERG "%s: %04lx [%u] ", str, err & 0xffff,++die_counter);
-+#ifdef CONFIG_PREEMPT
-+ printk("PREEMPT ");
-+#endif
-+#ifdef CONFIG_SMP
-+ printk("SMP ");
-+#endif
-+#ifdef CONFIG_DEBUG_PAGEALLOC
-+ printk("DEBUG_PAGEALLOC");
-+#endif
-+ printk("\n");
-+ notify_die(DIE_OOPS, str, regs, err, current->thread.trap_no, SIGSEGV);
-+ show_registers(regs);
-+ /* Executive summary in case the oops scrolled away */
-+ printk(KERN_ALERT "RIP ");
-+ printk_address(regs->rip);
-+ printk(" RSP <%016lx>\n", regs->rsp);
-+ if (kexec_should_crash(current))
-+ crash_kexec(regs);
-+}
-+
-+void die(const char * str, struct pt_regs * regs, long err)
-+{
-+ unsigned long flags = oops_begin();
-+
-+ if (!user_mode(regs))
-+ report_bug(regs->rip);
-+
-+ __die(str, regs, err);
-+ oops_end(flags);
-+ do_exit(SIGSEGV);
-+}
-+
-+#ifdef CONFIG_X86_LOCAL_APIC
-+void __kprobes die_nmi(char *str, struct pt_regs *regs, int do_panic)
-+{
-+ unsigned long flags = oops_begin();
-+
-+ /*
-+ * We are in trouble anyway, lets at least try
-+ * to get a message out.
-+ */
-+ printk(str, smp_processor_id());
-+ show_registers(regs);
-+ if (kexec_should_crash(current))
-+ crash_kexec(regs);
-+ if (do_panic || panic_on_oops)
-+ panic("Non maskable interrupt");
-+ oops_end(flags);
-+ nmi_exit();
-+ local_irq_enable();
-+ do_exit(SIGSEGV);
-+}
-+#endif
-+
-+static void __kprobes do_trap(int trapnr, int signr, char *str,
-+ struct pt_regs * regs, long error_code,
-+ siginfo_t *info)
-+{
-+ struct task_struct *tsk = current;
-+
-+ if (user_mode(regs)) {
-+ /*
-+ * We want error_code and trap_no set for userspace
-+ * faults and kernelspace faults which result in
-+ * die(), but not kernelspace faults which are fixed
-+ * up. die() gives the process no chance to handle
-+ * the signal and notice the kernel fault information,
-+ * so that won't result in polluting the information
-+ * about previously queued, but not yet delivered,
-+ * faults. See also do_general_protection below.
-+ */
-+ tsk->thread.error_code = error_code;
-+ tsk->thread.trap_no = trapnr;
-+
-+ if (exception_trace && unhandled_signal(tsk, signr))
-+ printk(KERN_INFO
-+ "%s[%d] trap %s rip:%lx rsp:%lx error:%lx\n",
-+ tsk->comm, tsk->pid, str,
-+ regs->rip, regs->rsp, error_code);
-+
-+ if (info)
-+ force_sig_info(signr, info, tsk);
-+ else
-+ force_sig(signr, tsk);
-+ return;
-+ }
-+
-+
-+ /* kernel trap */
-+ {
-+ const struct exception_table_entry *fixup;
-+ fixup = search_exception_tables(regs->rip);
-+ if (fixup)
-+ regs->rip = fixup->fixup;
-+ else {
-+ tsk->thread.error_code = error_code;
-+ tsk->thread.trap_no = trapnr;
-+ die(str, regs, error_code);
-+ }
-+ return;
-+ }
-+}
-+
-+#define DO_ERROR(trapnr, signr, str, name) \
-+asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
-+{ \
-+ if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
-+ == NOTIFY_STOP) \
-+ return; \
-+ conditional_sti(regs); \
-+ do_trap(trapnr, signr, str, regs, error_code, NULL); \
-+}
-+
-+#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
-+asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
-+{ \
-+ siginfo_t info; \
-+ info.si_signo = signr; \
-+ info.si_errno = 0; \
-+ info.si_code = sicode; \
-+ info.si_addr = (void __user *)siaddr; \
-+ if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
-+ == NOTIFY_STOP) \
-+ return; \
-+ conditional_sti(regs); \
-+ do_trap(trapnr, signr, str, regs, error_code, &info); \
-+}
-+
-+DO_ERROR_INFO( 0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->rip)
-+DO_ERROR( 4, SIGSEGV, "overflow", overflow)
-+DO_ERROR( 5, SIGSEGV, "bounds", bounds)
-+DO_ERROR_INFO( 6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->rip)
-+DO_ERROR( 7, SIGSEGV, "device not available", device_not_available)
-+DO_ERROR( 9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun)
-+DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
-+DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
-+DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
-+DO_ERROR(18, SIGSEGV, "reserved", reserved)
-+
-+/* Runs on IST stack */
-+asmlinkage void do_stack_segment(struct pt_regs *regs, long error_code)
-+{
-+ if (notify_die(DIE_TRAP, "stack segment", regs, error_code,
-+ 12, SIGBUS) == NOTIFY_STOP)
-+ return;
-+ preempt_conditional_sti(regs);
-+ do_trap(12, SIGBUS, "stack segment", regs, error_code, NULL);
-+ preempt_conditional_cli(regs);
-+}
-+
-+asmlinkage void do_double_fault(struct pt_regs * regs, long error_code)
-+{
-+ static const char str[] = "double fault";
-+ struct task_struct *tsk = current;
-+
-+ /* Return not checked because double check cannot be ignored */
-+ notify_die(DIE_TRAP, str, regs, error_code, 8, SIGSEGV);
-+
-+ tsk->thread.error_code = error_code;
-+ tsk->thread.trap_no = 8;
-+
-+ /* This is always a kernel trap and never fixable (and thus must
-+ never return). */
-+ for (;;)
-+ die(str, regs, error_code);
-+}
-+
-+asmlinkage void __kprobes do_general_protection(struct pt_regs * regs,
-+ long error_code)
-+{
-+ struct task_struct *tsk = current;
-+
-+ conditional_sti(regs);
-+
-+ if (user_mode(regs)) {
-+ tsk->thread.error_code = error_code;
-+ tsk->thread.trap_no = 13;
-+
-+ if (exception_trace && unhandled_signal(tsk, SIGSEGV))
-+ printk(KERN_INFO
-+ "%s[%d] general protection rip:%lx rsp:%lx error:%lx\n",
-+ tsk->comm, tsk->pid,
-+ regs->rip, regs->rsp, error_code);
-+
-+ force_sig(SIGSEGV, tsk);
-+ return;
-+ }
-+
-+ /* kernel gp */
-+ {
-+ const struct exception_table_entry *fixup;
-+ fixup = search_exception_tables(regs->rip);
-+ if (fixup) {
-+ regs->rip = fixup->fixup;
-+ return;
-+ }
-+
-+ tsk->thread.error_code = error_code;
-+ tsk->thread.trap_no = 13;
-+ if (notify_die(DIE_GPF, "general protection fault", regs,
-+ error_code, 13, SIGSEGV) == NOTIFY_STOP)
-+ return;
-+ die("general protection fault", regs, error_code);
-+ }
-+}
-+
-+static __kprobes void
-+mem_parity_error(unsigned char reason, struct pt_regs * regs)
-+{
-+ printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x.\n",
-+ reason);
-+ printk(KERN_EMERG "You have some hardware problem, likely on the PCI bus.\n");
-+
-+ if (panic_on_unrecovered_nmi)
-+ panic("NMI: Not continuing");
-+
-+ printk(KERN_EMERG "Dazed and confused, but trying to continue\n");
-+
-+#if 0 /* XEN */
-+ /* Clear and disable the memory parity error line. */
-+ reason = (reason & 0xf) | 4;
-+ outb(reason, 0x61);
-+#endif /* XEN */
-+}
-+
-+static __kprobes void
-+io_check_error(unsigned char reason, struct pt_regs * regs)
-+{
-+ printk("NMI: IOCK error (debug interrupt?)\n");
-+ show_registers(regs);
-+
-+#if 0 /* XEN */
-+ /* Re-enable the IOCK line, wait for a few seconds */
-+ reason = (reason & 0xf) | 8;
-+ outb(reason, 0x61);
-+ mdelay(2000);
-+ reason &= ~8;
-+ outb(reason, 0x61);
-+#endif /* XEN */
-+}
-+
-+static __kprobes void
-+unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
-+{
-+ printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x.\n",
-+ reason);
-+ printk(KERN_EMERG "Do you have a strange power saving mode enabled?\n");
-+
-+ if (panic_on_unrecovered_nmi)
-+ panic("NMI: Not continuing");
-+
-+ printk(KERN_EMERG "Dazed and confused, but trying to continue\n");
-+}
-+
-+/* Runs on IST stack. This code must keep interrupts off all the time.
-+ Nested NMIs are prevented by the CPU. */
-+asmlinkage __kprobes void default_do_nmi(struct pt_regs *regs)
-+{
-+ unsigned char reason = 0;
-+ int cpu;
-+
-+ cpu = smp_processor_id();
-+
-+ /* Only the BSP gets external NMIs from the system. */
-+ if (!cpu)
-+ reason = get_nmi_reason();
-+
-+ if (!(reason & 0xc0)) {
-+ if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT)
-+ == NOTIFY_STOP)
-+ return;
-+#if defined(CONFIG_X86_LOCAL_APIC) && !defined(CONFIG_XEN)
-+ /*
-+ * Ok, so this is none of the documented NMI sources,
-+ * so it must be the NMI watchdog.
-+ */
-+ if (nmi_watchdog_tick(regs,reason))
-+ return;
-+#endif
-+ if (!do_nmi_callback(regs,cpu))
-+ unknown_nmi_error(reason, regs);
-+
-+ return;
-+ }
-+ if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP)
-+ return;
-+
-+ /* AK: following checks seem to be broken on modern chipsets. FIXME */
-+
-+ if (reason & 0x80)
-+ mem_parity_error(reason, regs);
-+ if (reason & 0x40)
-+ io_check_error(reason, regs);
-+}
-+
-+/* runs on IST stack. */
-+asmlinkage void __kprobes do_int3(struct pt_regs * regs, long error_code)
-+{
-+ if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) == NOTIFY_STOP) {
-+ return;
-+ }
-+ preempt_conditional_sti(regs);
-+ do_trap(3, SIGTRAP, "int3", regs, error_code, NULL);
-+ preempt_conditional_cli(regs);
-+}
-+
-+/* Help handler running on IST stack to switch back to user stack
-+ for scheduling or signal handling. The actual stack switch is done in
-+ entry.S */
-+asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
-+{
-+ struct pt_regs *regs = eregs;
-+ /* Did already sync */
-+ if (eregs == (struct pt_regs *)eregs->rsp)
-+ ;
-+ /* Exception from user space */
-+ else if (user_mode(eregs))
-+ regs = task_pt_regs(current);
-+ /* Exception from kernel and interrupts are enabled. Move to
-+ kernel process stack. */
-+ else if (eregs->eflags & X86_EFLAGS_IF)
-+ regs = (struct pt_regs *)(eregs->rsp -= sizeof(struct pt_regs));
-+ if (eregs != regs)
-+ *regs = *eregs;
-+ return regs;
-+}
-+
-+/* runs on IST stack. */
-+asmlinkage void __kprobes do_debug(struct pt_regs * regs,
-+ unsigned long error_code)
-+{
-+ unsigned long condition;
-+ struct task_struct *tsk = current;
-+ siginfo_t info;
-+
-+ get_debugreg(condition, 6);
-+
-+ if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
-+ SIGTRAP) == NOTIFY_STOP)
-+ return;
-+
-+ preempt_conditional_sti(regs);
-+
-+ /* Mask out spurious debug traps due to lazy DR7 setting */
-+ if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) {
-+ if (!tsk->thread.debugreg7) {
-+ goto clear_dr7;
-+ }
-+ }
-+
-+ tsk->thread.debugreg6 = condition;
-+
-+ /* Mask out spurious TF errors due to lazy TF clearing */
-+ if (condition & DR_STEP) {
-+ /*
-+ * The TF error should be masked out only if the current
-+ * process is not traced and if the TRAP flag has been set
-+ * previously by a tracing process (condition detected by
-+ * the PT_DTRACE flag); remember that the i386 TRAP flag
-+ * can be modified by the process itself in user mode,
-+ * allowing programs to debug themselves without the ptrace()
-+ * interface.
-+ */
-+ if (!user_mode(regs))
-+ goto clear_TF_reenable;
-+ /*
-+ * Was the TF flag set by a debugger? If so, clear it now,
-+ * so that register information is correct.
-+ */
-+ if (tsk->ptrace & PT_DTRACE) {
-+ regs->eflags &= ~TF_MASK;
-+ tsk->ptrace &= ~PT_DTRACE;
-+ }
-+ }
-+
-+ /* Ok, finally something we can handle */
-+ tsk->thread.trap_no = 1;
-+ tsk->thread.error_code = error_code;
-+ info.si_signo = SIGTRAP;
-+ info.si_errno = 0;
-+ info.si_code = TRAP_BRKPT;
-+ info.si_addr = user_mode(regs) ? (void __user *)regs->rip : NULL;
-+ force_sig_info(SIGTRAP, &info, tsk);
-+
-+clear_dr7:
-+ set_debugreg(0UL, 7);
-+ preempt_conditional_cli(regs);
-+ return;
-+
-+clear_TF_reenable:
-+ set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
-+ regs->eflags &= ~TF_MASK;
-+ preempt_conditional_cli(regs);
-+}
-+
-+static int kernel_math_error(struct pt_regs *regs, const char *str, int trapnr)
-+{
-+ const struct exception_table_entry *fixup;
-+ fixup = search_exception_tables(regs->rip);
-+ if (fixup) {
-+ regs->rip = fixup->fixup;
-+ return 1;
-+ }
-+ notify_die(DIE_GPF, str, regs, 0, trapnr, SIGFPE);
-+ /* Illegal floating point operation in the kernel */
-+ current->thread.trap_no = trapnr;
-+ die(str, regs, 0);
-+ return 0;
-+}
-+
-+/*
-+ * Note that we play around with the 'TS' bit in an attempt to get
-+ * the correct behaviour even in the presence of the asynchronous
-+ * IRQ13 behaviour
-+ */
-+asmlinkage void do_coprocessor_error(struct pt_regs *regs)
-+{
-+ void __user *rip = (void __user *)(regs->rip);
-+ struct task_struct * task;
-+ siginfo_t info;
-+ unsigned short cwd, swd;
-+
-+ conditional_sti(regs);
-+ if (!user_mode(regs) &&
-+ kernel_math_error(regs, "kernel x87 math error", 16))
-+ return;
-+
-+ /*
-+ * Save the info for the exception handler and clear the error.
-+ */
-+ task = current;
-+ save_init_fpu(task);
-+ task->thread.trap_no = 16;
-+ task->thread.error_code = 0;
-+ info.si_signo = SIGFPE;
-+ info.si_errno = 0;
-+ info.si_code = __SI_FAULT;
-+ info.si_addr = rip;
-+ /*
-+ * (~cwd & swd) will mask out exceptions that are not set to unmasked
-+ * status. 0x3f is the exception bits in these regs, 0x200 is the
-+ * C1 reg you need in case of a stack fault, 0x040 is the stack
-+ * fault bit. We should only be taking one exception at a time,
-+ * so if this combination doesn't produce any single exception,
-+ * then we have a bad program that isn't synchronizing its FPU usage
-+ * and it will suffer the consequences since we won't be able to
-+ * fully reproduce the context of the exception
-+ */
-+ cwd = get_fpu_cwd(task);
-+ swd = get_fpu_swd(task);
-+ switch (swd & ~cwd & 0x3f) {
-+ case 0x000:
-+ default:
-+ break;
-+ case 0x001: /* Invalid Op */
-+ /*
-+ * swd & 0x240 == 0x040: Stack Underflow
-+ * swd & 0x240 == 0x240: Stack Overflow
-+ * User must clear the SF bit (0x40) if set
-+ */
-+ info.si_code = FPE_FLTINV;
-+ break;
-+ case 0x002: /* Denormalize */
-+ case 0x010: /* Underflow */
-+ info.si_code = FPE_FLTUND;
-+ break;
-+ case 0x004: /* Zero Divide */
-+ info.si_code = FPE_FLTDIV;
-+ break;
-+ case 0x008: /* Overflow */
-+ info.si_code = FPE_FLTOVF;
-+ break;
-+ case 0x020: /* Precision */
-+ info.si_code = FPE_FLTRES;
-+ break;
-+ }
-+ force_sig_info(SIGFPE, &info, task);
-+}
-+
-+asmlinkage void bad_intr(void)
-+{
-+ printk("bad interrupt");
-+}
-+
-+asmlinkage void do_simd_coprocessor_error(struct pt_regs *regs)
-+{
-+ void __user *rip = (void __user *)(regs->rip);
-+ struct task_struct * task;
-+ siginfo_t info;
-+ unsigned short mxcsr;
-+
-+ conditional_sti(regs);
-+ if (!user_mode(regs) &&
-+ kernel_math_error(regs, "kernel simd math error", 19))
-+ return;
-+
-+ /*
-+ * Save the info for the exception handler and clear the error.
-+ */
-+ task = current;
-+ save_init_fpu(task);
-+ task->thread.trap_no = 19;
-+ task->thread.error_code = 0;
-+ info.si_signo = SIGFPE;
-+ info.si_errno = 0;
-+ info.si_code = __SI_FAULT;
-+ info.si_addr = rip;
-+ /*
-+ * The SIMD FPU exceptions are handled a little differently, as there
-+ * is only a single status/control register. Thus, to determine which
-+ * unmasked exception was caught we must mask the exception mask bits
-+ * at 0x1f80, and then use these to mask the exception bits at 0x3f.
-+ */
-+ mxcsr = get_fpu_mxcsr(task);
-+ switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) {
-+ case 0x000:
-+ default:
-+ break;
-+ case 0x001: /* Invalid Op */
-+ info.si_code = FPE_FLTINV;
-+ break;
-+ case 0x002: /* Denormalize */
-+ case 0x010: /* Underflow */
-+ info.si_code = FPE_FLTUND;
-+ break;
-+ case 0x004: /* Zero Divide */
-+ info.si_code = FPE_FLTDIV;
-+ break;
-+ case 0x008: /* Overflow */
-+ info.si_code = FPE_FLTOVF;
-+ break;
-+ case 0x020: /* Precision */
-+ info.si_code = FPE_FLTRES;
-+ break;
-+ }
-+ force_sig_info(SIGFPE, &info, task);
-+}
-+
-+asmlinkage void do_spurious_interrupt_bug(struct pt_regs * regs)
-+{
-+}
-+
-+#ifndef CONFIG_XEN
-+asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void)
-+{
-+}
-+
-+asmlinkage void __attribute__((weak)) mce_threshold_interrupt(void)
-+{
-+}
-+#endif
-+
-+/*
-+ * 'math_state_restore()' saves the current math information in the
-+ * old math state array, and gets the new ones from the current task
-+ *
-+ * Careful.. There are problems with IBM-designed IRQ13 behaviour.
-+ * Don't touch unless you *really* know how it works.
-+ */
-+asmlinkage void math_state_restore(void)
-+{
-+ struct task_struct *me = current;
-+ /* clts(); */ /* 'clts' is done for us by Xen during virtual trap. */
-+
-+ if (!used_math())
-+ init_fpu(me);
-+ restore_fpu_checking(&me->thread.i387.fxsave);
-+ task_thread_info(me)->status |= TS_USEDFPU;
-+ me->fpu_counter++;
-+}
-+
-+
-+/*
-+ * NB. All these are "interrupt gates" (i.e. events_mask is set) because we
-+ * specify <dpl>|4 in the second field.
-+ */
-+static trap_info_t trap_table[] = {
-+ { 0, 0|4, __KERNEL_CS, (unsigned long)divide_error },
-+ { 1, 0|4, __KERNEL_CS, (unsigned long)debug },
-+ { 3, 3|4, __KERNEL_CS, (unsigned long)int3 },
-+ { 4, 3|4, __KERNEL_CS, (unsigned long)overflow },
-+ { 5, 0|4, __KERNEL_CS, (unsigned long)bounds },
-+ { 6, 0|4, __KERNEL_CS, (unsigned long)invalid_op },
-+ { 7, 0|4, __KERNEL_CS, (unsigned long)device_not_available },
-+ { 9, 0|4, __KERNEL_CS, (unsigned long)coprocessor_segment_overrun},
-+ { 10, 0|4, __KERNEL_CS, (unsigned long)invalid_TSS },
-+ { 11, 0|4, __KERNEL_CS, (unsigned long)segment_not_present },
-+ { 12, 0|4, __KERNEL_CS, (unsigned long)stack_segment },
-+ { 13, 0|4, __KERNEL_CS, (unsigned long)general_protection },
-+ { 14, 0|4, __KERNEL_CS, (unsigned long)page_fault },
-+ { 15, 0|4, __KERNEL_CS, (unsigned long)spurious_interrupt_bug },
-+ { 16, 0|4, __KERNEL_CS, (unsigned long)coprocessor_error },
-+ { 17, 0|4, __KERNEL_CS, (unsigned long)alignment_check },
-+#ifdef CONFIG_X86_MCE
-+ { 18, 0|4, __KERNEL_CS, (unsigned long)machine_check },
-+#endif
-+ { 19, 0|4, __KERNEL_CS, (unsigned long)simd_coprocessor_error },
-+#ifdef CONFIG_IA32_EMULATION
-+ { IA32_SYSCALL_VECTOR, 3|4, __KERNEL_CS, (unsigned long)ia32_syscall},
-+#endif
-+ { 0, 0, 0, 0 }
-+};
-+
-+void __init trap_init(void)
-+{
-+ int ret;
-+
-+ ret = HYPERVISOR_set_trap_table(trap_table);
-+
-+ if (ret)
-+ printk("HYPERVISOR_set_trap_table faild: error %d\n",
-+ ret);
-+
-+ /*
-+ * Should be a barrier for any external CPU state.
-+ */
-+ cpu_init();
-+}
-+
-+void smp_trap_init(trap_info_t *trap_ctxt)
-+{
-+ trap_info_t *t = trap_table;
-+
-+ for (t = trap_table; t->address; t++) {
-+ trap_ctxt[t->vector].flags = t->flags;
-+ trap_ctxt[t->vector].cs = t->cs;
-+ trap_ctxt[t->vector].address = t->address;
-+ }
-+}
-+
-+
-+static int __init oops_setup(char *s)
-+{
-+ if (!s)
-+ return -EINVAL;
-+ if (!strcmp(s, "panic"))
-+ panic_on_oops = 1;
-+ return 0;
-+}
-+early_param("oops", oops_setup);
-+
-+static int __init kstack_setup(char *s)
-+{
-+ if (!s)
-+ return -EINVAL;
-+ kstack_depth_to_print = simple_strtoul(s,NULL,0);
-+ return 0;
-+}
-+early_param("kstack", kstack_setup);
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/x86_64/kernel/vsyscall-xen.c ubuntu-gutsy-xen/arch/x86_64/kernel/vsyscall-xen.c
---- ubuntu-gutsy/arch/x86_64/kernel/vsyscall-xen.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/arch/x86_64/kernel/vsyscall-xen.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,370 @@
-+/*
-+ * linux/arch/x86_64/kernel/vsyscall.c
-+ *
-+ * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
-+ * Copyright 2003 Andi Kleen, SuSE Labs.
-+ *
-+ * Thanks to hpa@transmeta.com for some useful hint.
-+ * Special thanks to Ingo Molnar for his early experience with
-+ * a different vsyscall implementation for Linux/IA32 and for the name.
-+ *
-+ * vsyscall 1 is located at -10Mbyte, vsyscall 2 is located
-+ * at virtual address -10Mbyte+1024bytes etc... There are at max 4
-+ * vsyscalls. One vsyscall can reserve more than 1 slot to avoid
-+ * jumping out of line if necessary. We cannot add more with this
-+ * mechanism because older kernels won't return -ENOSYS.
-+ * If we want more than four we need a vDSO.
-+ *
-+ * Note: the concept clashes with user mode linux. If you use UML and
-+ * want per guest time just set the kernel.vsyscall64 sysctl to 0.
-+ */
-+
-+#include <linux/time.h>
-+#include <linux/init.h>
-+#include <linux/kernel.h>
-+#include <linux/timer.h>
-+#include <linux/seqlock.h>
-+#include <linux/jiffies.h>
-+#include <linux/sysctl.h>
-+#include <linux/clocksource.h>
-+#include <linux/getcpu.h>
-+#include <linux/cpu.h>
-+#include <linux/smp.h>
-+#include <linux/notifier.h>
-+
-+#include <asm/vsyscall.h>
-+#include <asm/pgtable.h>
-+#include <asm/page.h>
-+#include <asm/unistd.h>
-+#include <asm/fixmap.h>
-+#include <asm/errno.h>
-+#include <asm/io.h>
-+#include <asm/segment.h>
-+#include <asm/desc.h>
-+#include <asm/topology.h>
-+
-+#define __vsyscall(nr) __attribute__ ((unused,__section__(".vsyscall_" #nr)))
-+#define __syscall_clobber "r11","rcx","memory"
-+#define __pa_vsymbol(x) \
-+ ({unsigned long v; \
-+ extern char __vsyscall_0; \
-+ asm("" : "=r" (v) : "0" (x)); \
-+ ((v - VSYSCALL_FIRST_PAGE) + __pa_symbol(&__vsyscall_0)); })
-+
-+/*
-+ * vsyscall_gtod_data contains data that is :
-+ * - readonly from vsyscalls
-+ * - writen by timer interrupt or systcl (/proc/sys/kernel/vsyscall64)
-+ * Try to keep this structure as small as possible to avoid cache line ping pongs
-+ */
-+struct vsyscall_gtod_data_t {
-+ seqlock_t lock;
-+
-+ /* open coded 'struct timespec' */
-+ time_t wall_time_sec;
-+ u32 wall_time_nsec;
-+
-+ int sysctl_enabled;
-+ struct timezone sys_tz;
-+ struct { /* extract of a clocksource struct */
-+ cycle_t (*vread)(void);
-+ cycle_t cycle_last;
-+ cycle_t mask;
-+ u32 mult;
-+ u32 shift;
-+ } clock;
-+};
-+int __vgetcpu_mode __section_vgetcpu_mode;
-+
-+struct vsyscall_gtod_data_t __vsyscall_gtod_data __section_vsyscall_gtod_data =
-+{
-+ .lock = SEQLOCK_UNLOCKED,
-+ .sysctl_enabled = 1,
-+};
-+
-+void update_vsyscall(struct timespec *wall_time, struct clocksource *clock)
-+{
-+ unsigned long flags;
-+
-+ write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
-+ /* copy vsyscall data */
-+ vsyscall_gtod_data.clock.vread = clock->vread;
-+ vsyscall_gtod_data.clock.cycle_last = clock->cycle_last;
-+ vsyscall_gtod_data.clock.mask = clock->mask;
-+ vsyscall_gtod_data.clock.mult = clock->mult;
-+ vsyscall_gtod_data.clock.shift = clock->shift;
-+ vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec;
-+ vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec;
-+ vsyscall_gtod_data.sys_tz = sys_tz;
-+ write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags);
-+}
-+
-+/* RED-PEN may want to readd seq locking, but then the variable should be
-+ * write-once.
-+ */
-+static __always_inline void do_get_tz(struct timezone * tz)
-+{
-+ *tz = __vsyscall_gtod_data.sys_tz;
-+}
-+
-+static __always_inline int gettimeofday(struct timeval *tv, struct timezone *tz)
-+{
-+ int ret;
-+ asm volatile("vsysc2: syscall"
-+ : "=a" (ret)
-+ : "0" (__NR_gettimeofday),"D" (tv),"S" (tz)
-+ : __syscall_clobber );
-+ return ret;
-+}
-+
-+static __always_inline long time_syscall(long *t)
-+{
-+ long secs;
-+ asm volatile("vsysc1: syscall"
-+ : "=a" (secs)
-+ : "0" (__NR_time),"D" (t) : __syscall_clobber);
-+ return secs;
-+}
-+
-+static __always_inline void do_vgettimeofday(struct timeval * tv)
-+{
-+ cycle_t now, base, mask, cycle_delta;
-+ unsigned seq;
-+ unsigned long mult, shift, nsec;
-+ cycle_t (*vread)(void);
-+ do {
-+ seq = read_seqbegin(&__vsyscall_gtod_data.lock);
-+
-+ vread = __vsyscall_gtod_data.clock.vread;
-+ if (unlikely(!__vsyscall_gtod_data.sysctl_enabled || !vread)) {
-+ gettimeofday(tv,NULL);
-+ return;
-+ }
-+ now = vread();
-+ base = __vsyscall_gtod_data.clock.cycle_last;
-+ mask = __vsyscall_gtod_data.clock.mask;
-+ mult = __vsyscall_gtod_data.clock.mult;
-+ shift = __vsyscall_gtod_data.clock.shift;
-+
-+ tv->tv_sec = __vsyscall_gtod_data.wall_time_sec;
-+ nsec = __vsyscall_gtod_data.wall_time_nsec;
-+ } while (read_seqretry(&__vsyscall_gtod_data.lock, seq));
-+
-+ /* calculate interval: */
-+ cycle_delta = (now - base) & mask;
-+ /* convert to nsecs: */
-+ nsec += (cycle_delta * mult) >> shift;
-+
-+ while (nsec >= NSEC_PER_SEC) {
-+ tv->tv_sec += 1;
-+ nsec -= NSEC_PER_SEC;
-+ }
-+ tv->tv_usec = nsec / NSEC_PER_USEC;
-+}
-+
-+int __vsyscall(0) vgettimeofday(struct timeval * tv, struct timezone * tz)
-+{
-+ if (tv)
-+ do_vgettimeofday(tv);
-+ if (tz)
-+ do_get_tz(tz);
-+ return 0;
-+}
-+
-+/* This will break when the xtime seconds get inaccurate, but that is
-+ * unlikely */
-+time_t __vsyscall(1) vtime(time_t *t)
-+{
-+ struct timeval tv;
-+ time_t result;
-+ if (unlikely(!__vsyscall_gtod_data.sysctl_enabled))
-+ return time_syscall(t);
-+
-+ vgettimeofday(&tv, 0);
-+ result = tv.tv_sec;
-+ if (t)
-+ *t = result;
-+ return result;
-+}
-+
-+/* Fast way to get current CPU and node.
-+ This helps to do per node and per CPU caches in user space.
-+ The result is not guaranteed without CPU affinity, but usually
-+ works out because the scheduler tries to keep a thread on the same
-+ CPU.
-+
-+ tcache must point to a two element sized long array.
-+ All arguments can be NULL. */
-+long __vsyscall(2)
-+vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache)
-+{
-+ unsigned int dummy, p;
-+ unsigned long j = 0;
-+
-+ /* Fast cache - only recompute value once per jiffies and avoid
-+ relatively costly rdtscp/cpuid otherwise.
-+ This works because the scheduler usually keeps the process
-+ on the same CPU and this syscall doesn't guarantee its
-+ results anyways.
-+ We do this here because otherwise user space would do it on
-+ its own in a likely inferior way (no access to jiffies).
-+ If you don't like it pass NULL. */
-+ if (tcache && tcache->blob[0] == (j = __jiffies)) {
-+ p = tcache->blob[1];
-+ } else if (__vgetcpu_mode == VGETCPU_RDTSCP) {
-+ /* Load per CPU data from RDTSCP */
-+ rdtscp(dummy, dummy, p);
-+ } else {
-+ /* Load per CPU data from GDT */
-+ asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
-+ }
-+ if (tcache) {
-+ tcache->blob[0] = j;
-+ tcache->blob[1] = p;
-+ }
-+ if (cpu)
-+ *cpu = p & 0xfff;
-+ if (node)
-+ *node = p >> 12;
-+ return 0;
-+}
-+
-+long __vsyscall(3) venosys_1(void)
-+{
-+ return -ENOSYS;
-+}
-+
-+#ifdef CONFIG_SYSCTL
-+
-+#define SYSCALL 0x050f
-+#define NOP2 0x9090
-+
-+/*
-+ * NOP out syscall in vsyscall page when not needed.
-+ */
-+static int vsyscall_sysctl_change(ctl_table *ctl, int write, struct file * filp,
-+ void __user *buffer, size_t *lenp, loff_t *ppos)
-+{
-+ extern u16 vsysc1, vsysc2;
-+ u16 __iomem *map1;
-+ u16 __iomem *map2;
-+ int ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
-+ if (!write)
-+ return ret;
-+ /* gcc has some trouble with __va(__pa()), so just do it this
-+ way. */
-+ map1 = ioremap(__pa_vsymbol(&vsysc1), 2);
-+ if (!map1)
-+ return -ENOMEM;
-+ map2 = ioremap(__pa_vsymbol(&vsysc2), 2);
-+ if (!map2) {
-+ ret = -ENOMEM;
-+ goto out;
-+ }
-+ if (!vsyscall_gtod_data.sysctl_enabled) {
-+ writew(SYSCALL, map1);
-+ writew(SYSCALL, map2);
-+ } else {
-+ writew(NOP2, map1);
-+ writew(NOP2, map2);
-+ }
-+ iounmap(map2);
-+out:
-+ iounmap(map1);
-+ return ret;
-+}
-+
-+static int vsyscall_sysctl_nostrat(ctl_table *t, int __user *name, int nlen,
-+ void __user *oldval, size_t __user *oldlenp,
-+ void __user *newval, size_t newlen)
-+{
-+ return -ENOSYS;
-+}
-+
-+static ctl_table kernel_table2[] = {
-+ { .ctl_name = 99, .procname = "vsyscall64",
-+ .data = &vsyscall_gtod_data.sysctl_enabled, .maxlen = sizeof(int),
-+ .mode = 0644,
-+ .strategy = vsyscall_sysctl_nostrat,
-+ .proc_handler = vsyscall_sysctl_change },
-+ {}
-+};
-+
-+static ctl_table kernel_root_table2[] = {
-+ { .ctl_name = CTL_KERN, .procname = "kernel", .mode = 0555,
-+ .child = kernel_table2 },
-+ {}
-+};
-+
-+#endif
-+
-+/* Assume __initcall executes before all user space. Hopefully kmod
-+ doesn't violate that. We'll find out if it does. */
-+static void __cpuinit vsyscall_set_cpu(int cpu)
-+{
-+ unsigned long d;
-+ unsigned long node = 0;
-+#ifdef CONFIG_NUMA
-+ node = cpu_to_node[cpu];
-+#endif
-+ if (cpu_has(&cpu_data[cpu], X86_FEATURE_RDTSCP))
-+ write_rdtscp_aux((node << 12) | cpu);
-+
-+ /* Store cpu number in limit so that it can be loaded quickly
-+ in user space in vgetcpu.
-+ 12 bits for the CPU and 8 bits for the node. */
-+ d = 0x0f40000000000ULL;
-+ d |= cpu;
-+ d |= (node & 0xf) << 12;
-+ d |= (node >> 4) << 48;
-+ HYPERVISOR_update_descriptor(virt_to_machine(&cpu_gdt(cpu)[GDT_ENTRY_PER_CPU]), d);
-+}
-+
-+static void __cpuinit cpu_vsyscall_init(void *arg)
-+{
-+ /* preemption should be already off */
-+ vsyscall_set_cpu(raw_smp_processor_id());
-+}
-+
-+static int __cpuinit
-+cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg)
-+{
-+ long cpu = (long)arg;
-+ if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN)
-+ smp_call_function_single(cpu, cpu_vsyscall_init, NULL, 0, 1);
-+ return NOTIFY_DONE;
-+}
-+
-+static void __init map_vsyscall(void)
-+{
-+ extern char __vsyscall_0;
-+ unsigned long physaddr_page0 = __pa_symbol(&__vsyscall_0);
-+
-+ /* Note that VSYSCALL_MAPPED_PAGES must agree with the code below. */
-+ __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_page0, PAGE_KERNEL_VSYSCALL);
-+}
-+
-+static int __init vsyscall_init(void)
-+{
-+ BUG_ON(((unsigned long) &vgettimeofday !=
-+ VSYSCALL_ADDR(__NR_vgettimeofday)));
-+ BUG_ON((unsigned long) &vtime != VSYSCALL_ADDR(__NR_vtime));
-+ BUG_ON((VSYSCALL_ADDR(0) != __fix_to_virt(VSYSCALL_FIRST_PAGE)));
-+ BUG_ON((unsigned long) &vgetcpu != VSYSCALL_ADDR(__NR_vgetcpu));
-+ map_vsyscall();
-+#ifdef CONFIG_XEN
-+ vsyscall_gtod_data.sysctl_enabled = 0; /* disable vgettimeofay() */
-+ if (boot_cpu_has(X86_FEATURE_RDTSCP))
-+ vgetcpu_mode = VGETCPU_RDTSCP;
-+ else
-+ vgetcpu_mode = VGETCPU_LSL;
-+#endif
-+#ifdef CONFIG_SYSCTL
-+ register_sysctl_table(kernel_root_table2);
-+#endif
-+ on_each_cpu(cpu_vsyscall_init, NULL, 0, 1);
-+ hotcpu_notifier(cpu_vsyscall_notifier, 0);
-+ return 0;
-+}
-+
-+__initcall(vsyscall_init);
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/x86_64/kernel/xen_entry.S ubuntu-gutsy-xen/arch/x86_64/kernel/xen_entry.S
---- ubuntu-gutsy/arch/x86_64/kernel/xen_entry.S 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/arch/x86_64/kernel/xen_entry.S 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,40 @@
-+/*
-+ * Copied from arch/xen/i386/kernel/entry.S
-+ */
-+/* Offsets into shared_info_t. */
-+#define evtchn_upcall_pending /* 0 */
-+#define evtchn_upcall_mask 1
-+
-+#define sizeof_vcpu_shift 6
-+
-+#ifdef CONFIG_SMP
-+//#define preempt_disable(reg) incl threadinfo_preempt_count(reg)
-+//#define preempt_enable(reg) decl threadinfo_preempt_count(reg)
-+#define preempt_disable(reg)
-+#define preempt_enable(reg)
-+#define XEN_GET_VCPU_INFO(reg) preempt_disable(%rbp) ; \
-+ movq %gs:pda_cpunumber,reg ; \
-+ shl $32, reg ; \
-+ shr $32-sizeof_vcpu_shift,reg ; \
-+ addq HYPERVISOR_shared_info,reg
-+#define XEN_PUT_VCPU_INFO(reg) preempt_enable(%rbp) ; \
-+#define XEN_PUT_VCPU_INFO_fixup .byte 0xff,0xff,0xff
-+#else
-+#define XEN_GET_VCPU_INFO(reg) movq HYPERVISOR_shared_info,reg
-+#define XEN_PUT_VCPU_INFO(reg)
-+#define XEN_PUT_VCPU_INFO_fixup
-+#endif
-+
-+#define XEN_LOCKED_BLOCK_EVENTS(reg) movb $1,evtchn_upcall_mask(reg)
-+#define XEN_LOCKED_UNBLOCK_EVENTS(reg) movb $0,evtchn_upcall_mask(reg)
-+#define XEN_BLOCK_EVENTS(reg) XEN_GET_VCPU_INFO(reg) ; \
-+ XEN_LOCKED_BLOCK_EVENTS(reg) ; \
-+ XEN_PUT_VCPU_INFO(reg)
-+#define XEN_UNBLOCK_EVENTS(reg) XEN_GET_VCPU_INFO(reg) ; \
-+ XEN_LOCKED_UNBLOCK_EVENTS(reg) ; \
-+ XEN_PUT_VCPU_INFO(reg)
-+#define XEN_TEST_PENDING(reg) testb $0xFF,evtchn_upcall_pending(reg)
-+
-+VGCF_IN_SYSCALL = (1<<8)
-+
-+
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/x86_64/Makefile ubuntu-gutsy-xen/arch/x86_64/Makefile
---- ubuntu-gutsy/arch/x86_64/Makefile 2007-08-18 09:40:29.000000000 -0400
-+++ ubuntu-gutsy-xen/arch/x86_64/Makefile 2007-08-18 12:38:02.000000000 -0400
-@@ -36,6 +36,10 @@
- $(call cc-option,-march=core2,$(call cc-option,-mtune=generic))
- cflags-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=generic)
-
-+cppflags-$(CONFIG_XEN) += \
-+ -D__XEN_INTERFACE_VERSION__=$(CONFIG_XEN_INTERFACE_VERSION)
-+CPPFLAGS += $(cppflags-y)
-+
- cflags-y += -m64
- cflags-y += -mno-red-zone
- cflags-y += -mcmodel=kernel
-@@ -86,6 +90,21 @@
- PHONY += bzImage bzlilo install archmrproper \
- fdimage fdimage144 fdimage288 isoimage archclean
-
-+ifdef CONFIG_XEN
-+CPPFLAGS := -Iinclude$(if $(KBUILD_SRC),2)/asm/mach-xen $(CPPFLAGS)
-+head-y := arch/x86_64/kernel/head-xen.o arch/x86_64/kernel/head64-xen.o arch/x86_64/kernel/init_task.o
-+LDFLAGS_vmlinux := -e startup_64
-+boot := arch/i386/boot-xen
-+.PHONY: vmlinuz
-+#Default target when executing "make"
-+all: vmlinuz
-+
-+vmlinuz: vmlinux
-+ $(Q)$(MAKE) $(build)=$(boot) $@
-+
-+install:
-+ $(Q)$(MAKE) $(build)=$(boot) XENGUEST=$(XENGUEST) $@
-+else
- #Default target when executing "make"
- all: bzImage
-
-@@ -106,6 +125,7 @@
-
- install:
- $(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(BOOTIMAGE) $@
-+endif
-
- archclean:
- $(Q)$(MAKE) $(clean)=$(boot)
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/x86_64/mm/fault-xen.c ubuntu-gutsy-xen/arch/x86_64/mm/fault-xen.c
---- ubuntu-gutsy/arch/x86_64/mm/fault-xen.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/arch/x86_64/mm/fault-xen.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,706 @@
-+/*
-+ * linux/arch/x86-64/mm/fault.c
-+ *
-+ * Copyright (C) 1995 Linus Torvalds
-+ * Copyright (C) 2001,2002 Andi Kleen, SuSE Labs.
-+ */
-+
-+#include <linux/signal.h>
-+#include <linux/sched.h>
-+#include <linux/kernel.h>
-+#include <linux/errno.h>
-+#include <linux/string.h>
-+#include <linux/types.h>
-+#include <linux/ptrace.h>
-+#include <linux/mman.h>
-+#include <linux/mm.h>
-+#include <linux/smp.h>
-+#include <linux/interrupt.h>
-+#include <linux/init.h>
-+#include <linux/tty.h>
-+#include <linux/vt_kern.h> /* For unblank_screen() */
-+#include <linux/compiler.h>
-+#include <linux/vmalloc.h>
-+#include <linux/module.h>
-+#include <linux/kprobes.h>
-+#include <linux/uaccess.h>
-+#include <linux/kdebug.h>
-+
-+#include <asm/system.h>
-+#include <asm/pgalloc.h>
-+#include <asm/smp.h>
-+#include <asm/tlbflush.h>
-+#include <asm/proto.h>
-+#include <asm-generic/sections.h>
-+
-+/* Page fault error code bits */
-+#define PF_PROT (1<<0) /* or no page found */
-+#define PF_WRITE (1<<1)
-+#define PF_USER (1<<2)
-+#define PF_RSVD (1<<3)
-+#define PF_INSTR (1<<4)
-+
-+static ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain);
-+
-+/* Hook to register for page fault notifications */
-+int register_page_fault_notifier(struct notifier_block *nb)
-+{
-+ vmalloc_sync_all();
-+ return atomic_notifier_chain_register(&notify_page_fault_chain, nb);
-+}
-+EXPORT_SYMBOL_GPL(register_page_fault_notifier);
-+
-+int unregister_page_fault_notifier(struct notifier_block *nb)
-+{
-+ return atomic_notifier_chain_unregister(&notify_page_fault_chain, nb);
-+}
-+EXPORT_SYMBOL_GPL(unregister_page_fault_notifier);
-+
-+static inline int notify_page_fault(struct pt_regs *regs, long err)
-+{
-+ struct die_args args = {
-+ .regs = regs,
-+ .str = "page fault",
-+ .err = err,
-+ .trapnr = 14,
-+ .signr = SIGSEGV
-+ };
-+ return atomic_notifier_call_chain(&notify_page_fault_chain,
-+ DIE_PAGE_FAULT, &args);
-+}
-+
-+/* Sometimes the CPU reports invalid exceptions on prefetch.
-+ Check that here and ignore.
-+ Opcode checker based on code by Richard Brunner */
-+static noinline int is_prefetch(struct pt_regs *regs, unsigned long addr,
-+ unsigned long error_code)
-+{
-+ unsigned char *instr;
-+ int scan_more = 1;
-+ int prefetch = 0;
-+ unsigned char *max_instr;
-+
-+ /* If it was a exec fault ignore */
-+ if (error_code & PF_INSTR)
-+ return 0;
-+
-+ instr = (unsigned char __user *)convert_rip_to_linear(current, regs);
-+ max_instr = instr + 15;
-+
-+ if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE)
-+ return 0;
-+
-+ while (scan_more && instr < max_instr) {
-+ unsigned char opcode;
-+ unsigned char instr_hi;
-+ unsigned char instr_lo;
-+
-+ if (probe_kernel_address(instr, opcode))
-+ break;
-+
-+ instr_hi = opcode & 0xf0;
-+ instr_lo = opcode & 0x0f;
-+ instr++;
-+
-+ switch (instr_hi) {
-+ case 0x20:
-+ case 0x30:
-+ /* Values 0x26,0x2E,0x36,0x3E are valid x86
-+ prefixes. In long mode, the CPU will signal
-+ invalid opcode if some of these prefixes are
-+ present so we will never get here anyway */
-+ scan_more = ((instr_lo & 7) == 0x6);
-+ break;
-+
-+ case 0x40:
-+ /* In AMD64 long mode, 0x40 to 0x4F are valid REX prefixes
-+ Need to figure out under what instruction mode the
-+ instruction was issued ... */
-+ /* Could check the LDT for lm, but for now it's good
-+ enough to assume that long mode only uses well known
-+ segments or kernel. */
-+ scan_more = (!user_mode(regs)) || (regs->cs == __USER_CS);
-+ break;
-+
-+ case 0x60:
-+ /* 0x64 thru 0x67 are valid prefixes in all modes. */
-+ scan_more = (instr_lo & 0xC) == 0x4;
-+ break;
-+ case 0xF0:
-+ /* 0xF0, 0xF2, and 0xF3 are valid prefixes in all modes. */
-+ scan_more = !instr_lo || (instr_lo>>1) == 1;
-+ break;
-+ case 0x00:
-+ /* Prefetch instruction is 0x0F0D or 0x0F18 */
-+ scan_more = 0;
-+ if (probe_kernel_address(instr, opcode))
-+ break;
-+ prefetch = (instr_lo == 0xF) &&
-+ (opcode == 0x0D || opcode == 0x18);
-+ break;
-+ default:
-+ scan_more = 0;
-+ break;
-+ }
-+ }
-+ return prefetch;
-+}
-+
-+static int bad_address(void *p)
-+{
-+ unsigned long dummy;
-+ return probe_kernel_address((unsigned long *)p, dummy);
-+}
-+
-+void dump_pagetable(unsigned long address)
-+{
-+ pgd_t *pgd;
-+ pud_t *pud;
-+ pmd_t *pmd;
-+ pte_t *pte;
-+
-+ pgd = __va(read_cr3() & PHYSICAL_PAGE_MASK);
-+ pgd += pgd_index(address);
-+ if (bad_address(pgd)) goto bad;
-+ printk("PGD %lx ", pgd_val(*pgd));
-+ if (!pgd_present(*pgd)) goto ret;
-+
-+ pud = pud_offset(pgd, address);
-+ if (bad_address(pud)) goto bad;
-+ printk("PUD %lx ", pud_val(*pud));
-+ if (!pud_present(*pud)) goto ret;
-+
-+ pmd = pmd_offset(pud, address);
-+ if (bad_address(pmd)) goto bad;
-+ printk("PMD %lx ", pmd_val(*pmd));
-+ if (!pmd_present(*pmd)) goto ret;
-+
-+ pte = pte_offset_kernel(pmd, address);
-+ if (bad_address(pte)) goto bad;
-+ printk("PTE %lx", pte_val(*pte));
-+ret:
-+ printk("\n");
-+ return;
-+bad:
-+ printk("BAD\n");
-+}
-+
-+static const char errata93_warning[] =
-+KERN_ERR "******* Your BIOS seems to not contain a fix for K8 errata #93\n"
-+KERN_ERR "******* Working around it, but it may cause SEGVs or burn power.\n"
-+KERN_ERR "******* Please consider a BIOS update.\n"
-+KERN_ERR "******* Disabling USB legacy in the BIOS may also help.\n";
-+
-+/* Workaround for K8 erratum #93 & buggy BIOS.
-+ BIOS SMM functions are required to use a specific workaround
-+ to avoid corruption of the 64bit RIP register on C stepping K8.
-+ A lot of BIOS that didn't get tested properly miss this.
-+ The OS sees this as a page fault with the upper 32bits of RIP cleared.
-+ Try to work around it here.
-+ Note we only handle faults in kernel here. */
-+
-+static int is_errata93(struct pt_regs *regs, unsigned long address)
-+{
-+ static int warned;
-+ if (address != regs->rip)
-+ return 0;
-+ if ((address >> 32) != 0)
-+ return 0;
-+ address |= 0xffffffffUL << 32;
-+ if ((address >= (u64)_stext && address <= (u64)_etext) ||
-+ (address >= MODULES_VADDR && address <= MODULES_END)) {
-+ if (!warned) {
-+ printk(errata93_warning);
-+ warned = 1;
-+ }
-+ regs->rip = address;
-+ return 1;
-+ }
-+ return 0;
-+}
-+
-+int unhandled_signal(struct task_struct *tsk, int sig)
-+{
-+ if (is_init(tsk))
-+ return 1;
-+ if (tsk->ptrace & PT_PTRACED)
-+ return 0;
-+ return (tsk->sighand->action[sig-1].sa.sa_handler == SIG_IGN) ||
-+ (tsk->sighand->action[sig-1].sa.sa_handler == SIG_DFL);
-+}
-+
-+static noinline void pgtable_bad(unsigned long address, struct pt_regs *regs,
-+ unsigned long error_code)
-+{
-+ unsigned long flags = oops_begin();
-+ struct task_struct *tsk;
-+
-+ printk(KERN_ALERT "%s: Corrupted page table at address %lx\n",
-+ current->comm, address);
-+ dump_pagetable(address);
-+ tsk = current;
-+ tsk->thread.cr2 = address;
-+ tsk->thread.trap_no = 14;
-+ tsk->thread.error_code = error_code;
-+ __die("Bad pagetable", regs, error_code);
-+ oops_end(flags);
-+ do_exit(SIGKILL);
-+}
-+
-+/*
-+ * Handle a fault on the vmalloc area
-+ *
-+ * This assumes no large pages in there.
-+ */
-+static int vmalloc_fault(unsigned long address)
-+{
-+ pgd_t *pgd, *pgd_ref;
-+ pud_t *pud, *pud_ref;
-+ pmd_t *pmd, *pmd_ref;
-+ pte_t *pte, *pte_ref;
-+
-+ /* Copy kernel mappings over when needed. This can also
-+ happen within a race in page table update. In the later
-+ case just flush. */
-+
-+ /* On Xen the line below does not always work. Needs investigating! */
-+ /*pgd = pgd_offset(current->mm ?: &init_mm, address);*/
-+ pgd = __va(read_cr3() & PHYSICAL_PAGE_MASK);
-+ pgd += pgd_index(address);
-+ pgd_ref = pgd_offset_k(address);
-+ if (pgd_none(*pgd_ref))
-+ return -1;
-+ if (pgd_none(*pgd))
-+ set_pgd(pgd, *pgd_ref);
-+ else
-+ BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
-+
-+ /* Below here mismatches are bugs because these lower tables
-+ are shared */
-+
-+ pud = pud_offset(pgd, address);
-+ pud_ref = pud_offset(pgd_ref, address);
-+ if (pud_none(*pud_ref))
-+ return -1;
-+ if (pud_none(*pud) || pud_page_vaddr(*pud) != pud_page_vaddr(*pud_ref))
-+ BUG();
-+ pmd = pmd_offset(pud, address);
-+ pmd_ref = pmd_offset(pud_ref, address);
-+ if (pmd_none(*pmd_ref))
-+ return -1;
-+ if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref))
-+ BUG();
-+ pte_ref = pte_offset_kernel(pmd_ref, address);
-+ if (!pte_present(*pte_ref))
-+ return -1;
-+ pte = pte_offset_kernel(pmd, address);
-+ /* Don't use pte_page here, because the mappings can point
-+ outside mem_map, and the NUMA hash lookup cannot handle
-+ that. */
-+ if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref))
-+ BUG();
-+ return 0;
-+}
-+
-+int page_fault_trace = 0;
-+int exception_trace = 1;
-+
-+
-+#define MEM_VERBOSE 1
-+
-+#ifdef MEM_VERBOSE
-+#define MEM_LOG(_f, _a...) \
-+ printk("fault.c:[%d]-> " _f "\n", \
-+ __LINE__ , ## _a )
-+#else
-+#define MEM_LOG(_f, _a...) ((void)0)
-+#endif
-+
-+static int spurious_fault(struct pt_regs *regs,
-+ unsigned long address,
-+ unsigned long error_code)
-+{
-+ pgd_t *pgd;
-+ pud_t *pud;
-+ pmd_t *pmd;
-+ pte_t *pte;
-+
-+#ifdef CONFIG_XEN
-+ /* Faults in hypervisor area are never spurious. */
-+ if ((address >= HYPERVISOR_VIRT_START) &&
-+ (address < HYPERVISOR_VIRT_END))
-+ return 0;
-+#endif
-+
-+ /* Reserved-bit violation or user access to kernel space? */
-+ if (error_code & (PF_RSVD|PF_USER))
-+ return 0;
-+
-+ pgd = init_mm.pgd + pgd_index(address);
-+ if (!pgd_present(*pgd))
-+ return 0;
-+
-+ pud = pud_offset(pgd, address);
-+ if (!pud_present(*pud))
-+ return 0;
-+
-+ pmd = pmd_offset(pud, address);
-+ if (!pmd_present(*pmd))
-+ return 0;
-+
-+ pte = pte_offset_kernel(pmd, address);
-+ if (!pte_present(*pte))
-+ return 0;
-+ if ((error_code & PF_WRITE) && !pte_write(*pte))
-+ return 0;
-+ if ((error_code & PF_INSTR) && (pte_val(*pte) & _PAGE_NX))
-+ return 0;
-+
-+ return 1;
-+}
-+
-+/*
-+ * This routine handles page faults. It determines the address,
-+ * and the problem, and then passes it off to one of the appropriate
-+ * routines.
-+ */
-+asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
-+ unsigned long error_code)
-+{
-+ struct task_struct *tsk;
-+ struct mm_struct *mm;
-+ struct vm_area_struct * vma, * prev_vma;
-+ unsigned long address;
-+ const struct exception_table_entry *fixup;
-+ int write;
-+ unsigned long flags;
-+ siginfo_t info;
-+
-+ if (!user_mode(regs))
-+ error_code &= ~PF_USER; /* means kernel */
-+
-+ tsk = current;
-+ mm = tsk->mm;
-+ prefetchw(&mm->mmap_sem);
-+
-+ /* get the address */
-+ address = current_vcpu_info()->arch.cr2;
-+
-+ info.si_code = SEGV_MAPERR;
-+
-+
-+ /*
-+ * We fault-in kernel-space virtual memory on-demand. The
-+ * 'reference' page table is init_mm.pgd.
-+ *
-+ * NOTE! We MUST NOT take any locks for this case. We may
-+ * be in an interrupt or a critical region, and should
-+ * only copy the information from the master page table,
-+ * nothing more.
-+ *
-+ * This verifies that the fault happens in kernel space
-+ * (error_code & 4) == 0, and that the fault was not a
-+ * protection error (error_code & 9) == 0.
-+ */
-+ if (unlikely(address >= TASK_SIZE64)) {
-+ /*
-+ * Don't check for the module range here: its PML4
-+ * is always initialized because it's shared with the main
-+ * kernel text. Only vmalloc may need PML4 syncups.
-+ */
-+ if (!(error_code & (PF_RSVD|PF_USER|PF_PROT)) &&
-+ ((address >= VMALLOC_START && address < VMALLOC_END))) {
-+ if (vmalloc_fault(address) >= 0)
-+ return;
-+ }
-+ /* Can take a spurious fault if mapping changes R/O -> R/W. */
-+ if (spurious_fault(regs, address, error_code))
-+ return;
-+ if (notify_page_fault(regs, error_code) == NOTIFY_STOP)
-+ return;
-+ /*
-+ * Don't take the mm semaphore here. If we fixup a prefetch
-+ * fault we could otherwise deadlock.
-+ */
-+ goto bad_area_nosemaphore;
-+ }
-+
-+ if (notify_page_fault(regs, error_code) == NOTIFY_STOP)
-+ return;
-+
-+ if (likely(regs->eflags & X86_EFLAGS_IF))
-+ local_irq_enable();
-+
-+ if (unlikely(page_fault_trace))
-+ printk("pagefault rip:%lx rsp:%lx cs:%lu ss:%lu address %lx error %lx\n",
-+ regs->rip,regs->rsp,regs->cs,regs->ss,address,error_code);
-+
-+ if (unlikely(error_code & PF_RSVD))
-+ pgtable_bad(address, regs, error_code);
-+
-+ /*
-+ * If we're in an interrupt or have no user
-+ * context, we must not take the fault..
-+ */
-+ if (unlikely(in_atomic() || !mm))
-+ goto bad_area_nosemaphore;
-+
-+ again:
-+ /* When running in the kernel we expect faults to occur only to
-+ * addresses in user space. All other faults represent errors in the
-+ * kernel and should generate an OOPS. Unfortunatly, in the case of an
-+ * erroneous fault occurring in a code path which already holds mmap_sem
-+ * we will deadlock attempting to validate the fault against the
-+ * address space. Luckily the kernel only validly references user
-+ * space from well defined areas of code, which are listed in the
-+ * exceptions table.
-+ *
-+ * As the vast majority of faults will be valid we will only perform
-+ * the source reference check when there is a possibilty of a deadlock.
-+ * Attempt to lock the address space, if we cannot we then validate the
-+ * source. If this is invalid we can skip the address space check,
-+ * thus avoiding the deadlock.
-+ */
-+ if (!down_read_trylock(&mm->mmap_sem)) {
-+ if ((error_code & PF_USER) == 0 &&
-+ !search_exception_tables(regs->rip))
-+ goto bad_area_nosemaphore;
-+ down_read(&mm->mmap_sem);
-+ }
-+
-+ vma = find_vma(mm, address);
-+ if (!vma)
-+ goto bad_area;
-+ if (likely(vma->vm_start <= address))
-+ goto good_area;
-+ if (!(vma->vm_flags & VM_GROWSDOWN))
-+ goto bad_area;
-+ if (error_code & 4) {
-+ /* Allow userspace just enough access below the stack pointer
-+ * to let the 'enter' instruction work.
-+ */
-+ if (address + 65536 + 32 * sizeof(unsigned long) < regs->rsp)
-+ goto bad_area;
-+ }
-+ /*
-+ * find_vma_prev is just a bit slower, because it cannot
-+ * use the mmap_cache, so we run it only in the growsdown
-+ * slow path and we leave find_vma in the fast path.
-+ */
-+ find_vma_prev(current->mm, address, &prev_vma);
-+ if (expand_stack(vma, address))
-+ goto bad_area;
-+/*
-+ * Ok, we have a good vm_area for this memory access, so
-+ * we can handle it..
-+ */
-+good_area:
-+ info.si_code = SEGV_ACCERR;
-+ write = 0;
-+ switch (error_code & (PF_PROT|PF_WRITE)) {
-+ default: /* 3: write, present */
-+ /* fall through */
-+ case PF_WRITE: /* write, not present */
-+ if (!(vma->vm_flags & VM_WRITE))
-+ goto bad_area;
-+ write++;
-+ break;
-+ case PF_PROT: /* read, present */
-+ goto bad_area;
-+ case 0: /* read, not present */
-+ if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
-+ goto bad_area;
-+ }
-+
-+ /*
-+ * If for any reason at all we couldn't handle the fault,
-+ * make sure we exit gracefully rather than endlessly redo
-+ * the fault.
-+ */
-+ switch (handle_mm_fault(mm, vma, address, write)) {
-+ case VM_FAULT_MINOR:
-+ tsk->min_flt++;
-+ break;
-+ case VM_FAULT_MAJOR:
-+ tsk->maj_flt++;
-+ break;
-+ case VM_FAULT_SIGBUS:
-+ goto do_sigbus;
-+ default:
-+ goto out_of_memory;
-+ }
-+
-+ up_read(&mm->mmap_sem);
-+ return;
-+
-+/*
-+ * Something tried to access memory that isn't in our memory map..
-+ * Fix it, but check if it's kernel or user first..
-+ */
-+bad_area:
-+ up_read(&mm->mmap_sem);
-+
-+bad_area_nosemaphore:
-+ /* User mode accesses just cause a SIGSEGV */
-+ if (error_code & PF_USER) {
-+
-+ /*
-+ * It's possible to have interrupts off here.
-+ */
-+ local_irq_enable();
-+
-+ if (is_prefetch(regs, address, error_code))
-+ return;
-+
-+ /* Work around K8 erratum #100 K8 in compat mode
-+ occasionally jumps to illegal addresses >4GB. We
-+ catch this here in the page fault handler because
-+ these addresses are not reachable. Just detect this
-+ case and return. Any code segment in LDT is
-+ compatibility mode. */
-+ if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) &&
-+ (address >> 32))
-+ return;
-+
-+ if (exception_trace && unhandled_signal(tsk, SIGSEGV)) {
-+ printk(
-+ "%s%s[%d]: segfault at %016lx rip %016lx rsp %016lx error %lx\n",
-+ tsk->pid > 1 ? KERN_INFO : KERN_EMERG,
-+ tsk->comm, tsk->pid, address, regs->rip,
-+ regs->rsp, error_code);
-+ }
-+
-+ tsk->thread.cr2 = address;
-+ /* Kernel addresses are always protection faults */
-+ tsk->thread.error_code = error_code | (address >= TASK_SIZE);
-+ tsk->thread.trap_no = 14;
-+ info.si_signo = SIGSEGV;
-+ info.si_errno = 0;
-+ /* info.si_code has been set above */
-+ info.si_addr = (void __user *)address;
-+ force_sig_info(SIGSEGV, &info, tsk);
-+ return;
-+ }
-+
-+no_context:
-+
-+ /* Are we prepared to handle this kernel fault? */
-+ fixup = search_exception_tables(regs->rip);
-+ if (fixup) {
-+ regs->rip = fixup->fixup;
-+ return;
-+ }
-+
-+ /*
-+ * Hall of shame of CPU/BIOS bugs.
-+ */
-+
-+ if (is_prefetch(regs, address, error_code))
-+ return;
-+
-+ if (is_errata93(regs, address))
-+ return;
-+
-+/*
-+ * Oops. The kernel tried to access some bad page. We'll have to
-+ * terminate things with extreme prejudice.
-+ */
-+
-+ flags = oops_begin();
-+
-+ if (address < PAGE_SIZE)
-+ printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
-+ else
-+ printk(KERN_ALERT "Unable to handle kernel paging request");
-+ printk(" at %016lx RIP: \n" KERN_ALERT,address);
-+ printk_address(regs->rip);
-+ dump_pagetable(address);
-+ tsk->thread.cr2 = address;
-+ tsk->thread.trap_no = 14;
-+ tsk->thread.error_code = error_code;
-+ __die("Oops", regs, error_code);
-+ /* Executive summary in case the body of the oops scrolled away */
-+ printk(KERN_EMERG "CR2: %016lx\n", address);
-+ oops_end(flags);
-+ do_exit(SIGKILL);
-+
-+/*
-+ * We ran out of memory, or some other thing happened to us that made
-+ * us unable to handle the page fault gracefully.
-+ */
-+out_of_memory:
-+ up_read(&mm->mmap_sem);
-+ if (is_init(current)) {
-+ yield();
-+ goto again;
-+ }
-+ printk("VM: killing process %s\n", tsk->comm);
-+ if (error_code & 4)
-+ do_exit(SIGKILL);
-+ goto no_context;
-+
-+do_sigbus:
-+ up_read(&mm->mmap_sem);
-+
-+ /* Kernel mode? Handle exceptions or die */
-+ if (!(error_code & PF_USER))
-+ goto no_context;
-+
-+ tsk->thread.cr2 = address;
-+ tsk->thread.error_code = error_code;
-+ tsk->thread.trap_no = 14;
-+ info.si_signo = SIGBUS;
-+ info.si_errno = 0;
-+ info.si_code = BUS_ADRERR;
-+ info.si_addr = (void __user *)address;
-+ force_sig_info(SIGBUS, &info, tsk);
-+ return;
-+}
-+
-+DEFINE_SPINLOCK(pgd_lock);
-+LIST_HEAD(pgd_list);
-+
-+void vmalloc_sync_all(void)
-+{
-+ /* Note that races in the updates of insync and start aren't
-+ problematic:
-+ insync can only get set bits added, and updates to start are only
-+ improving performance (without affecting correctness if undone). */
-+ static DECLARE_BITMAP(insync, PTRS_PER_PGD);
-+ static unsigned long start = VMALLOC_START & PGDIR_MASK;
-+ unsigned long address;
-+
-+ for (address = start; address <= VMALLOC_END; address += PGDIR_SIZE) {
-+ if (!test_bit(pgd_index(address), insync)) {
-+ const pgd_t *pgd_ref = pgd_offset_k(address);
-+ struct page *page;
-+
-+ if (pgd_none(*pgd_ref))
-+ continue;
-+ spin_lock(&pgd_lock);
-+ list_for_each_entry(page, &pgd_list, lru) {
-+ pgd_t *pgd;
-+ pgd = (pgd_t *)page_address(page) + pgd_index(address);
-+ if (pgd_none(*pgd))
-+ set_pgd(pgd, *pgd_ref);
-+ else
-+ BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
-+ }
-+ spin_unlock(&pgd_lock);
-+ set_bit(pgd_index(address), insync);
-+ }
-+ if (address == start)
-+ start = address + PGDIR_SIZE;
-+ }
-+ /* Check that there is no need to do the same for the modules area. */
-+ BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL));
-+ BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) ==
-+ (__START_KERNEL & PGDIR_MASK)));
-+}
-+
-+static int __init enable_pagefaulttrace(char *str)
-+{
-+ page_fault_trace = 1;
-+ return 1;
-+}
-+__setup("pagefaulttrace", enable_pagefaulttrace);
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/x86_64/mm/init-xen.c ubuntu-gutsy-xen/arch/x86_64/mm/init-xen.c
---- ubuntu-gutsy/arch/x86_64/mm/init-xen.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/arch/x86_64/mm/init-xen.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,1204 @@
-+/*
-+ * linux/arch/x86_64/mm/init.c
-+ *
-+ * Copyright (C) 1995 Linus Torvalds
-+ * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
-+ * Copyright (C) 2002,2003 Andi Kleen <ak@suse.de>
-+ *
-+ * Jun Nakajima <jun.nakajima@intel.com>
-+ * Modified for Xen.
-+ */
-+
-+#include <linux/signal.h>
-+#include <linux/sched.h>
-+#include <linux/kernel.h>
-+#include <linux/errno.h>
-+#include <linux/string.h>
-+#include <linux/types.h>
-+#include <linux/ptrace.h>
-+#include <linux/mman.h>
-+#include <linux/mm.h>
-+#include <linux/swap.h>
-+#include <linux/smp.h>
-+#include <linux/init.h>
-+#include <linux/pagemap.h>
-+#include <linux/bootmem.h>
-+#include <linux/proc_fs.h>
-+#include <linux/pci.h>
-+#include <linux/pfn.h>
-+#include <linux/poison.h>
-+#include <linux/dma-mapping.h>
-+#include <linux/module.h>
-+#include <linux/memory_hotplug.h>
-+#include <linux/nmi.h>
-+
-+#include <asm/processor.h>
-+#include <asm/system.h>
-+#include <asm/uaccess.h>
-+#include <asm/pgtable.h>
-+#include <asm/pgalloc.h>
-+#include <asm/dma.h>
-+#include <asm/fixmap.h>
-+#include <asm/e820.h>
-+#include <asm/apic.h>
-+#include <asm/tlb.h>
-+#include <asm/mmu_context.h>
-+#include <asm/proto.h>
-+#include <asm/smp.h>
-+#include <asm/sections.h>
-+
-+#include <xen/features.h>
-+
-+#ifndef Dprintk
-+#define Dprintk(x...)
-+#endif
-+
-+const struct dma_mapping_ops* dma_ops;
-+EXPORT_SYMBOL(dma_ops);
-+
-+#if CONFIG_XEN_COMPAT <= 0x030002
-+unsigned int __kernel_page_user;
-+EXPORT_SYMBOL(__kernel_page_user);
-+#endif
-+
-+int after_bootmem;
-+
-+extern unsigned long *contiguous_bitmap;
-+
-+DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
-+extern unsigned long start_pfn;
-+
-+/*
-+ * Use this until direct mapping is established, i.e. before __va() is
-+ * available in init_memory_mapping().
-+ */
-+
-+#define addr_to_page(addr, page) \
-+ (addr) &= PHYSICAL_PAGE_MASK; \
-+ (page) = ((unsigned long *) ((unsigned long) \
-+ (((mfn_to_pfn((addr) >> PAGE_SHIFT)) << PAGE_SHIFT) + \
-+ __START_KERNEL_map)))
-+
-+static void __meminit early_make_page_readonly(void *va, unsigned int feature)
-+{
-+ unsigned long addr, _va = (unsigned long)va;
-+ pte_t pte, *ptep;
-+ unsigned long *page = (unsigned long *) init_level4_pgt;
-+
-+ if (xen_feature(feature))
-+ return;
-+
-+ if (after_bootmem) {
-+ make_page_readonly(va, feature);
-+ return;
-+ }
-+
-+ addr = (unsigned long) page[pgd_index(_va)];
-+ addr_to_page(addr, page);
-+
-+ addr = page[pud_index(_va)];
-+ addr_to_page(addr, page);
-+
-+ addr = page[pmd_index(_va)];
-+ addr_to_page(addr, page);
-+
-+ ptep = (pte_t *) &page[pte_index(_va)];
-+
-+ pte.pte = ptep->pte & ~_PAGE_RW;
-+ if (HYPERVISOR_update_va_mapping(_va, pte, 0))
-+ BUG();
-+}
-+
-+static void __make_page_readonly(void *va)
-+{
-+ pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t pte, *ptep;
-+ unsigned long addr = (unsigned long) va;
-+
-+ pgd = pgd_offset_k(addr);
-+ pud = pud_offset(pgd, addr);
-+ pmd = pmd_offset(pud, addr);
-+ ptep = pte_offset_kernel(pmd, addr);
-+
-+ pte.pte = ptep->pte & ~_PAGE_RW;
-+ if (HYPERVISOR_update_va_mapping(addr, pte, 0))
-+ xen_l1_entry_update(ptep, pte); /* fallback */
-+
-+ if ((addr >= VMALLOC_START) && (addr < VMALLOC_END))
-+ __make_page_readonly(__va(pte_pfn(pte) << PAGE_SHIFT));
-+}
-+
-+static void __make_page_writable(void *va)
-+{
-+ pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t pte, *ptep;
-+ unsigned long addr = (unsigned long) va;
-+
-+ pgd = pgd_offset_k(addr);
-+ pud = pud_offset(pgd, addr);
-+ pmd = pmd_offset(pud, addr);
-+ ptep = pte_offset_kernel(pmd, addr);
-+
-+ pte.pte = ptep->pte | _PAGE_RW;
-+ if (HYPERVISOR_update_va_mapping(addr, pte, 0))
-+ xen_l1_entry_update(ptep, pte); /* fallback */
-+
-+ if ((addr >= VMALLOC_START) && (addr < VMALLOC_END))
-+ __make_page_writable(__va(pte_pfn(pte) << PAGE_SHIFT));
-+}
-+
-+void make_page_readonly(void *va, unsigned int feature)
-+{
-+ if (!xen_feature(feature))
-+ __make_page_readonly(va);
-+}
-+
-+void make_page_writable(void *va, unsigned int feature)
-+{
-+ if (!xen_feature(feature))
-+ __make_page_writable(va);
-+}
-+
-+void make_pages_readonly(void *va, unsigned nr, unsigned int feature)
-+{
-+ if (xen_feature(feature))
-+ return;
-+
-+ while (nr-- != 0) {
-+ __make_page_readonly(va);
-+ va = (void*)((unsigned long)va + PAGE_SIZE);
-+ }
-+}
-+
-+void make_pages_writable(void *va, unsigned nr, unsigned int feature)
-+{
-+ if (xen_feature(feature))
-+ return;
-+
-+ while (nr-- != 0) {
-+ __make_page_writable(va);
-+ va = (void*)((unsigned long)va + PAGE_SIZE);
-+ }
-+}
-+
-+/*
-+ * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
-+ * physical space so we can cache the place of the first one and move
-+ * around without checking the pgd every time.
-+ */
-+
-+void show_mem(void)
-+{
-+ long i, total = 0, reserved = 0;
-+ long shared = 0, cached = 0;
-+ pg_data_t *pgdat;
-+ struct page *page;
-+
-+ printk(KERN_INFO "Mem-info:\n");
-+ show_free_areas();
-+ printk(KERN_INFO "Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
-+
-+ for_each_online_pgdat(pgdat) {
-+ for (i = 0; i < pgdat->node_spanned_pages; ++i) {
-+ /* this loop can take a while with 256 GB and 4k pages
-+ so update the NMI watchdog */
-+ if (unlikely(i % MAX_ORDER_NR_PAGES == 0)) {
-+ touch_nmi_watchdog();
-+ }
-+ if (!pfn_valid(pgdat->node_start_pfn + i))
-+ continue;
-+ page = pfn_to_page(pgdat->node_start_pfn + i);
-+ total++;
-+ if (PageReserved(page))
-+ reserved++;
-+ else if (PageSwapCache(page))
-+ cached++;
-+ else if (page_count(page))
-+ shared += page_count(page) - 1;
-+ }
-+ }
-+ printk(KERN_INFO "%lu pages of RAM\n", total);
-+ printk(KERN_INFO "%lu reserved pages\n",reserved);
-+ printk(KERN_INFO "%lu pages shared\n",shared);
-+ printk(KERN_INFO "%lu pages swap cached\n",cached);
-+}
-+
-+static __init void *spp_getpage(void)
-+{
-+ void *ptr;
-+ if (after_bootmem)
-+ ptr = (void *) get_zeroed_page(GFP_ATOMIC);
-+ else if (start_pfn < table_end) {
-+ ptr = __va(start_pfn << PAGE_SHIFT);
-+ start_pfn++;
-+ memset(ptr, 0, PAGE_SIZE);
-+ } else
-+ ptr = alloc_bootmem_pages(PAGE_SIZE);
-+ if (!ptr || ((unsigned long)ptr & ~PAGE_MASK))
-+ panic("set_pte_phys: cannot allocate page data %s\n", after_bootmem?"after bootmem":"");
-+
-+ Dprintk("spp_getpage %p\n", ptr);
-+ return ptr;
-+}
-+
-+#define pgd_offset_u(address) (__user_pgd(init_level4_pgt) + pgd_index(address))
-+#define pud_offset_u(address) (level3_user_pgt + pud_index(address))
-+
-+static __init void set_pte_phys(unsigned long vaddr,
-+ unsigned long phys, pgprot_t prot, int user_mode)
-+{
-+ pgd_t *pgd;
-+ pud_t *pud;
-+ pmd_t *pmd;
-+ pte_t *pte, new_pte;
-+
-+ Dprintk("set_pte_phys %lx to %lx\n", vaddr, phys);
-+
-+ pgd = (user_mode ? pgd_offset_u(vaddr) : pgd_offset_k(vaddr));
-+ if (pgd_none(*pgd)) {
-+ printk("PGD FIXMAP MISSING, it should be setup in head.S!\n");
-+ return;
-+ }
-+ pud = (user_mode ? pud_offset_u(vaddr) : pud_offset(pgd, vaddr));
-+ if (pud_none(*pud)) {
-+ pmd = (pmd_t *) spp_getpage();
-+ make_page_readonly(pmd, XENFEAT_writable_page_tables);
-+ set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
-+ if (pmd != pmd_offset(pud, 0)) {
-+ printk("PAGETABLE BUG #01! %p <-> %p\n", pmd, pmd_offset(pud,0));
-+ return;
-+ }
-+ }
-+ pmd = pmd_offset(pud, vaddr);
-+ if (pmd_none(*pmd)) {
-+ pte = (pte_t *) spp_getpage();
-+ make_page_readonly(pte, XENFEAT_writable_page_tables);
-+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
-+ if (pte != pte_offset_kernel(pmd, 0)) {
-+ printk("PAGETABLE BUG #02!\n");
-+ return;
-+ }
-+ }
-+ if (pgprot_val(prot))
-+ new_pte = pfn_pte(phys >> PAGE_SHIFT, prot);
-+ else
-+ new_pte = __pte(0);
-+
-+ pte = pte_offset_kernel(pmd, vaddr);
-+ if (!pte_none(*pte) &&
-+ pte_val(*pte) != (pte_val(new_pte) & __supported_pte_mask))
-+ pte_ERROR(*pte);
-+ set_pte(pte, new_pte);
-+
-+ /*
-+ * It's enough to flush this one mapping.
-+ * (PGE mappings get flushed as well)
-+ */
-+ __flush_tlb_one(vaddr);
-+}
-+
-+static __init void set_pte_phys_ma(unsigned long vaddr,
-+ unsigned long phys, pgprot_t prot)
-+{
-+ pgd_t *pgd;
-+ pud_t *pud;
-+ pmd_t *pmd;
-+ pte_t *pte, new_pte;
-+
-+ Dprintk("set_pte_phys %lx to %lx\n", vaddr, phys);
-+
-+ pgd = pgd_offset_k(vaddr);
-+ if (pgd_none(*pgd)) {
-+ printk("PGD FIXMAP MISSING, it should be setup in head.S!\n");
-+ return;
-+ }
-+ pud = pud_offset(pgd, vaddr);
-+ if (pud_none(*pud)) {
-+
-+ pmd = (pmd_t *) spp_getpage();
-+ make_page_readonly(pmd, XENFEAT_writable_page_tables);
-+ set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
-+ if (pmd != pmd_offset(pud, 0)) {
-+ printk("PAGETABLE BUG #01! %p <-> %p\n", pmd, pmd_offset(pud,0));
-+ return;
-+ }
-+ }
-+ pmd = pmd_offset(pud, vaddr);
-+ if (pmd_none(*pmd)) {
-+ pte = (pte_t *) spp_getpage();
-+ make_page_readonly(pte, XENFEAT_writable_page_tables);
-+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
-+ if (pte != pte_offset_kernel(pmd, 0)) {
-+ printk("PAGETABLE BUG #02!\n");
-+ return;
-+ }
-+ }
-+ new_pte = pfn_pte_ma(phys >> PAGE_SHIFT, prot);
-+
-+ pte = pte_offset_kernel(pmd, vaddr);
-+ set_pte(pte, new_pte);
-+
-+ /*
-+ * It's enough to flush this one mapping.
-+ * (PGE mappings get flushed as well)
-+ */
-+ __flush_tlb_one(vaddr);
-+}
-+
-+/* NOTE: this is meant to be run only at boot */
-+void __init
-+__set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
-+{
-+ unsigned long address = __fix_to_virt(idx);
-+
-+ if (idx >= __end_of_fixed_addresses) {
-+ printk("Invalid __set_fixmap\n");
-+ return;
-+ }
-+ switch (idx) {
-+ case VSYSCALL_LAST_PAGE ... VSYSCALL_FIRST_PAGE:
-+ set_pte_phys(address, phys, prot, 0);
-+ set_pte_phys(address, phys, prot, 1);
-+ break;
-+ default:
-+ set_pte_phys_ma(address, phys, prot);
-+ break;
-+ }
-+}
-+
-+unsigned long __meminitdata table_start, table_end;
-+
-+static __meminit void *alloc_static_page(unsigned long *phys)
-+{
-+ unsigned long va = (start_pfn << PAGE_SHIFT) + __START_KERNEL_map;
-+
-+ if (after_bootmem) {
-+ void *adr = (void *)get_zeroed_page(GFP_ATOMIC);
-+
-+ *phys = __pa(adr);
-+ return adr;
-+ }
-+
-+ *phys = start_pfn << PAGE_SHIFT;
-+ start_pfn++;
-+ memset((void *)va, 0, PAGE_SIZE);
-+ return (void *)va;
-+}
-+
-+#define PTE_SIZE PAGE_SIZE
-+
-+static inline void __set_pte(pte_t *dst, pte_t val)
-+{
-+ *dst = val;
-+}
-+
-+static inline int make_readonly(unsigned long paddr)
-+{
-+ extern char __vsyscall_0;
-+ int readonly = 0;
-+
-+ /* Make new page tables read-only. */
-+ if (!xen_feature(XENFEAT_writable_page_tables)
-+ && (paddr >= (table_start << PAGE_SHIFT))
-+ && (paddr < (table_end << PAGE_SHIFT)))
-+ readonly = 1;
-+ /* Make old page tables read-only. */
-+ if (!xen_feature(XENFEAT_writable_page_tables)
-+ && (paddr >= (xen_start_info->pt_base - __START_KERNEL_map))
-+ && (paddr < (start_pfn << PAGE_SHIFT)))
-+ readonly = 1;
-+
-+ /*
-+ * No need for writable mapping of kernel image. This also ensures that
-+ * page and descriptor tables embedded inside don't have writable
-+ * mappings. Exclude the vsyscall area here, allowing alternative
-+ * instruction patching to work.
-+ */
-+ if ((paddr >= __pa_symbol(&_text)) && (paddr < __pa_symbol(&_end))
-+ && !(paddr >= __pa_symbol(&__vsyscall_0)
-+ && paddr < __pa_symbol(&__vsyscall_0) + PAGE_SIZE))
-+ readonly = 1;
-+
-+ return readonly;
-+}
-+
-+#ifndef CONFIG_XEN
-+/* Must run before zap_low_mappings */
-+__meminit void *early_ioremap(unsigned long addr, unsigned long size)
-+{
-+ unsigned long vaddr;
-+ pmd_t *pmd, *last_pmd;
-+ int i, pmds;
-+
-+ pmds = ((addr & ~PMD_MASK) + size + ~PMD_MASK) / PMD_SIZE;
-+ vaddr = __START_KERNEL_map;
-+ pmd = level2_kernel_pgt;
-+ last_pmd = level2_kernel_pgt + PTRS_PER_PMD - 1;
-+ for (; pmd <= last_pmd; pmd++, vaddr += PMD_SIZE) {
-+ for (i = 0; i < pmds; i++) {
-+ if (pmd_present(pmd[i]))
-+ goto next;
-+ }
-+ vaddr += addr & ~PMD_MASK;
-+ addr &= PMD_MASK;
-+ for (i = 0; i < pmds; i++, addr += PMD_SIZE)
-+ set_pmd(pmd + i,__pmd(addr | _KERNPG_TABLE | _PAGE_PSE));
-+ __flush_tlb();
-+ return (void *)vaddr;
-+ next:
-+ ;
-+ }
-+ printk("early_ioremap(0x%lx, %lu) failed\n", addr, size);
-+ return NULL;
-+}
-+
-+/* To avoid virtual aliases later */
-+__meminit void early_iounmap(void *addr, unsigned long size)
-+{
-+ unsigned long vaddr;
-+ pmd_t *pmd;
-+ int i, pmds;
-+
-+ vaddr = (unsigned long)addr;
-+ pmds = ((vaddr & ~PMD_MASK) + size + ~PMD_MASK) / PMD_SIZE;
-+ pmd = level2_kernel_pgt + pmd_index(vaddr);
-+ for (i = 0; i < pmds; i++)
-+ pmd_clear(pmd + i);
-+ __flush_tlb();
-+}
-+#endif
-+
-+static void __meminit
-+phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end)
-+{
-+ int i = pmd_index(address);
-+
-+ for (; i < PTRS_PER_PMD; i++) {
-+ unsigned long pte_phys;
-+ pmd_t *pmd = pmd_page + pmd_index(address);
-+ pte_t *pte, *pte_save;
-+ int k;
-+
-+ if (address >= end) {
-+ if (!after_bootmem)
-+ for (; i < PTRS_PER_PMD; i++, pmd++)
-+ set_pmd(pmd, __pmd(0));
-+ break;
-+ }
-+
-+ if (pmd_val(*pmd))
-+ continue;
-+
-+ pte = alloc_static_page(&pte_phys);
-+ pte_save = pte;
-+ for (k = 0; k < PTRS_PER_PTE; pte++, k++, address += PTE_SIZE) {
-+ unsigned long pteval = address | _PAGE_NX | _KERNPG_TABLE;
-+
-+ if (address >= (after_bootmem
-+ ? end
-+ : xen_start_info->nr_pages << PAGE_SHIFT))
-+ pteval = 0;
-+ else if (make_readonly(address))
-+ pteval &= ~_PAGE_RW;
-+ __set_pte(pte, __pte(pteval & __supported_pte_mask));
-+ }
-+ pte = pte_save;
-+ early_make_page_readonly(pte, XENFEAT_writable_page_tables);
-+ set_pmd(pmd, __pmd(pte_phys | _KERNPG_TABLE));
-+ }
-+}
-+
-+static void __meminit
-+phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end)
-+{
-+ pmd_t *pmd = pmd_offset(pud,0);
-+ spin_lock(&init_mm.page_table_lock);
-+ phys_pmd_init(pmd, address, end);
-+ spin_unlock(&init_mm.page_table_lock);
-+ __flush_tlb_all();
-+}
-+
-+static void __meminit phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end)
-+{
-+ int i = pud_index(addr);
-+
-+ for (; i < PTRS_PER_PUD; i++, addr = (addr & PUD_MASK) + PUD_SIZE ) {
-+ unsigned long pmd_phys;
-+ pud_t *pud = pud_page + pud_index(addr);
-+ pmd_t *pmd;
-+
-+ if (addr >= end)
-+ break;
-+
-+ if (pud_val(*pud)) {
-+ phys_pmd_update(pud, addr, end);
-+ continue;
-+ }
-+
-+ pmd = alloc_static_page(&pmd_phys);
-+ early_make_page_readonly(pmd, XENFEAT_writable_page_tables);
-+ spin_lock(&init_mm.page_table_lock);
-+ set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE));
-+ phys_pmd_init(pmd, addr, end);
-+ spin_unlock(&init_mm.page_table_lock);
-+ }
-+ __flush_tlb();
-+}
-+
-+void __init xen_init_pt(void)
-+{
-+ unsigned long addr, *page;
-+
-+ /* Find the initial pte page that was built for us. */
-+ page = (unsigned long *)xen_start_info->pt_base;
-+ addr = page[pgd_index(__START_KERNEL_map)];
-+ addr_to_page(addr, page);
-+ addr = page[pud_index(__START_KERNEL_map)];
-+ addr_to_page(addr, page);
-+
-+#if CONFIG_XEN_COMPAT <= 0x030002
-+ /* On Xen 3.0.2 and older we may need to explicitly specify _PAGE_USER
-+ in kernel PTEs. We check that here. */
-+ if (HYPERVISOR_xen_version(XENVER_version, NULL) <= 0x30000) {
-+ unsigned long *pg;
-+ pte_t pte;
-+
-+ /* Mess with the initial mapping of page 0. It's not needed. */
-+ BUILD_BUG_ON(__START_KERNEL <= __START_KERNEL_map);
-+ addr = page[pmd_index(__START_KERNEL_map)];
-+ addr_to_page(addr, pg);
-+ pte.pte = pg[pte_index(__START_KERNEL_map)];
-+ BUG_ON(!(pte.pte & _PAGE_PRESENT));
-+
-+ /* If _PAGE_USER isn't set, we obviously do not need it. */
-+ if (pte.pte & _PAGE_USER) {
-+ /* _PAGE_USER is needed, but is it set implicitly? */
-+ pte.pte &= ~_PAGE_USER;
-+ if ((HYPERVISOR_update_va_mapping(__START_KERNEL_map,
-+ pte, 0) != 0) ||
-+ !(pg[pte_index(__START_KERNEL_map)] & _PAGE_USER))
-+ /* We need to explicitly specify _PAGE_USER. */
-+ __kernel_page_user = _PAGE_USER;
-+ }
-+ }
-+#endif
-+
-+ /* Construct mapping of initial pte page in our own directories. */
-+ init_level4_pgt[pgd_index(__START_KERNEL_map)] =
-+ __pgd(__pa_symbol(level3_kernel_pgt) | _PAGE_TABLE);
-+ level3_kernel_pgt[pud_index(__START_KERNEL_map)] =
-+ __pud(__pa_symbol(level2_kernel_pgt) | _PAGE_TABLE);
-+ memcpy(level2_kernel_pgt, page, PAGE_SIZE);
-+
-+ __user_pgd(init_level4_pgt)[pgd_index(VSYSCALL_START)] =
-+ __pgd(__pa_symbol(level3_user_pgt) | _PAGE_TABLE);
-+
-+ early_make_page_readonly(init_level4_pgt,
-+ XENFEAT_writable_page_tables);
-+ early_make_page_readonly(__user_pgd(init_level4_pgt),
-+ XENFEAT_writable_page_tables);
-+ early_make_page_readonly(level3_kernel_pgt,
-+ XENFEAT_writable_page_tables);
-+ early_make_page_readonly(level3_user_pgt,
-+ XENFEAT_writable_page_tables);
-+ early_make_page_readonly(level2_kernel_pgt,
-+ XENFEAT_writable_page_tables);
-+
-+ if (!xen_feature(XENFEAT_writable_page_tables)) {
-+ xen_pgd_pin(__pa_symbol(init_level4_pgt));
-+ xen_pgd_pin(__pa_symbol(__user_pgd(init_level4_pgt)));
-+ }
-+}
-+
-+static void __init extend_init_mapping(unsigned long tables_space)
-+{
-+ unsigned long va = __START_KERNEL_map;
-+ unsigned long phys, addr, *pte_page;
-+ pmd_t *pmd;
-+ pte_t *pte, new_pte;
-+ unsigned long *page = (unsigned long *)init_level4_pgt;
-+
-+ addr = page[pgd_index(va)];
-+ addr_to_page(addr, page);
-+ addr = page[pud_index(va)];
-+ addr_to_page(addr, page);
-+
-+ /* Kill mapping of low 1MB. */
-+ while (va < (unsigned long)&_text) {
-+ HYPERVISOR_update_va_mapping(va, __pte_ma(0), 0);
-+ va += PAGE_SIZE;
-+ }
-+
-+ /* Ensure init mappings cover kernel text/data and initial tables. */
-+ while (va < (__START_KERNEL_map
-+ + (start_pfn << PAGE_SHIFT)
-+ + tables_space)) {
-+ pmd = (pmd_t *)&page[pmd_index(va)];
-+ if (pmd_none(*pmd)) {
-+ pte_page = alloc_static_page(&phys);
-+ early_make_page_readonly(
-+ pte_page, XENFEAT_writable_page_tables);
-+ set_pmd(pmd, __pmd(phys | _KERNPG_TABLE));
-+ } else {
-+ addr = page[pmd_index(va)];
-+ addr_to_page(addr, pte_page);
-+ }
-+ pte = (pte_t *)&pte_page[pte_index(va)];
-+ if (pte_none(*pte)) {
-+ new_pte = pfn_pte(
-+ (va - __START_KERNEL_map) >> PAGE_SHIFT,
-+ __pgprot(_KERNPG_TABLE));
-+ xen_l1_entry_update(pte, new_pte);
-+ }
-+ va += PAGE_SIZE;
-+ }
-+
-+ /* Finally, blow away any spurious initial mappings. */
-+ while (1) {
-+ pmd = (pmd_t *)&page[pmd_index(va)];
-+ if (pmd_none(*pmd))
-+ break;
-+ HYPERVISOR_update_va_mapping(va, __pte_ma(0), 0);
-+ va += PAGE_SIZE;
-+ }
-+}
-+
-+static void __init find_early_table_space(unsigned long end)
-+{
-+ unsigned long puds, pmds, ptes, tables;
-+
-+ puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
-+ pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
-+ ptes = (end + PTE_SIZE - 1) >> PAGE_SHIFT;
-+
-+ tables = round_up(puds * 8, PAGE_SIZE) +
-+ round_up(pmds * 8, PAGE_SIZE) +
-+ round_up(ptes * 8, PAGE_SIZE);
-+
-+ extend_init_mapping(tables);
-+
-+ table_start = start_pfn;
-+ table_end = table_start + (tables>>PAGE_SHIFT);
-+
-+ early_printk("kernel direct mapping tables up to %lx @ %lx-%lx\n",
-+ end, table_start << PAGE_SHIFT,
-+ (table_start << PAGE_SHIFT) + tables);
-+}
-+
-+static void xen_finish_init_mapping(void)
-+{
-+ unsigned long i, start, end;
-+
-+ /* Re-vector virtual addresses pointing into the initial
-+ mapping to the just-established permanent ones. */
-+ xen_start_info = __va(__pa(xen_start_info));
-+ xen_start_info->pt_base = (unsigned long)
-+ __va(__pa(xen_start_info->pt_base));
-+ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
-+ phys_to_machine_mapping =
-+ __va(__pa(xen_start_info->mfn_list));
-+ xen_start_info->mfn_list = (unsigned long)
-+ phys_to_machine_mapping;
-+ }
-+ if (xen_start_info->mod_start)
-+ xen_start_info->mod_start = (unsigned long)
-+ __va(__pa(xen_start_info->mod_start));
-+
-+ /* Destroy the Xen-created mappings beyond the kernel image as
-+ * well as the temporary mappings created above. Prevents
-+ * overlap with modules area (if init mapping is very big).
-+ */
-+ start = PAGE_ALIGN((unsigned long)_end);
-+ end = __START_KERNEL_map + (table_end << PAGE_SHIFT);
-+ for (; start < end; start += PAGE_SIZE)
-+ WARN_ON(HYPERVISOR_update_va_mapping(
-+ start, __pte_ma(0), 0));
-+
-+ /* Allocate pte's for initial fixmaps from 'start_pfn' allocator. */
-+ table_end = ~0UL;
-+
-+ /*
-+ * Prefetch pte's for the bt_ioremap() area. It gets used before the
-+ * boot-time allocator is online, so allocate-on-demand would fail.
-+ */
-+ for (i = FIX_BTMAP_END; i <= FIX_BTMAP_BEGIN; i++)
-+ __set_fixmap(i, 0, __pgprot(0));
-+
-+ /* Switch to the real shared_info page, and clear the dummy page. */
-+ set_fixmap(FIX_SHARED_INFO, xen_start_info->shared_info);
-+ HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
-+ memset(empty_zero_page, 0, sizeof(empty_zero_page));
-+
-+ /* Set up mapping of lowest 1MB of physical memory. */
-+ for (i = 0; i < NR_FIX_ISAMAPS; i++)
-+ if (is_initial_xendomain())
-+ set_fixmap(FIX_ISAMAP_BEGIN - i, i * PAGE_SIZE);
-+ else
-+ __set_fixmap(FIX_ISAMAP_BEGIN - i,
-+ virt_to_mfn(empty_zero_page)
-+ << PAGE_SHIFT,
-+ PAGE_KERNEL_RO);
-+
-+ /* Disable the 'start_pfn' allocator. */
-+ table_end = start_pfn;
-+}
-+
-+/* Setup the direct mapping of the physical memory at PAGE_OFFSET.
-+ This runs before bootmem is initialized and gets pages directly from the
-+ physical memory. To access them they are temporarily mapped. */
-+void __meminit init_memory_mapping(unsigned long start, unsigned long end)
-+{
-+ unsigned long next;
-+
-+ Dprintk("init_memory_mapping\n");
-+
-+ /*
-+ * Find space for the kernel direct mapping tables.
-+ * Later we should allocate these tables in the local node of the memory
-+ * mapped. Unfortunately this is done currently before the nodes are
-+ * discovered.
-+ */
-+ if (!after_bootmem)
-+ find_early_table_space(end);
-+
-+ start = (unsigned long)__va(start);
-+ end = (unsigned long)__va(end);
-+
-+ for (; start < end; start = next) {
-+ unsigned long pud_phys;
-+ pgd_t *pgd = pgd_offset_k(start);
-+ pud_t *pud;
-+
-+ if (after_bootmem) {
-+ pud = pud_offset(pgd, start & PGDIR_MASK);
-+ make_page_readonly(pud, XENFEAT_writable_page_tables);
-+ pud_phys = __pa(pud);
-+ } else {
-+ pud = alloc_static_page(&pud_phys);
-+ early_make_page_readonly(pud, XENFEAT_writable_page_tables);
-+ }
-+ next = start + PGDIR_SIZE;
-+ if (next > end)
-+ next = end;
-+ phys_pud_init(pud, __pa(start), __pa(next));
-+ if (!after_bootmem)
-+ set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys));
-+ }
-+
-+ if (!after_bootmem) {
-+ BUG_ON(start_pfn != table_end);
-+ xen_finish_init_mapping();
-+ }
-+
-+ __flush_tlb_all();
-+}
-+
-+#ifndef CONFIG_NUMA
-+void __init paging_init(void)
-+{
-+ unsigned long max_zone_pfns[MAX_NR_ZONES];
-+
-+ memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
-+ max_zone_pfns[ZONE_DMA] = end_pfn;
-+ max_zone_pfns[ZONE_DMA32] = end_pfn;
-+ max_zone_pfns[ZONE_NORMAL] = end_pfn;
-+
-+ memory_present(0, 0, end_pfn);
-+ sparse_init();
-+ free_area_init_nodes(max_zone_pfns);
-+
-+ init_mm.context.pinned = 1;
-+}
-+#endif
-+
-+/* Unmap a kernel mapping if it exists. This is useful to avoid prefetches
-+ from the CPU leading to inconsistent cache lines. address and size
-+ must be aligned to 2MB boundaries.
-+ Does nothing when the mapping doesn't exist. */
-+void __init clear_kernel_mapping(unsigned long address, unsigned long size)
-+{
-+ unsigned long end = address + size;
-+
-+ BUG_ON(address & ~LARGE_PAGE_MASK);
-+ BUG_ON(size & ~LARGE_PAGE_MASK);
-+
-+ for (; address < end; address += LARGE_PAGE_SIZE) {
-+ pgd_t *pgd = pgd_offset_k(address);
-+ pud_t *pud;
-+ pmd_t *pmd;
-+ if (pgd_none(*pgd))
-+ continue;
-+ pud = pud_offset(pgd, address);
-+ if (pud_none(*pud))
-+ continue;
-+ pmd = pmd_offset(pud, address);
-+ if (!pmd || pmd_none(*pmd))
-+ continue;
-+ if (0 == (pmd_val(*pmd) & _PAGE_PSE)) {
-+ /* Could handle this, but it should not happen currently. */
-+ printk(KERN_ERR
-+ "clear_kernel_mapping: mapping has been split. will leak memory\n");
-+ pmd_ERROR(*pmd);
-+ }
-+ set_pmd(pmd, __pmd(0));
-+ }
-+ __flush_tlb_all();
-+}
-+
-+/*
-+ * Memory hotplug specific functions
-+ */
-+void online_page(struct page *page)
-+{
-+ ClearPageReserved(page);
-+ init_page_count(page);
-+ __free_page(page);
-+ totalram_pages++;
-+ num_physpages++;
-+}
-+
-+#ifdef CONFIG_MEMORY_HOTPLUG
-+/*
-+ * Memory is added always to NORMAL zone. This means you will never get
-+ * additional DMA/DMA32 memory.
-+ */
-+int arch_add_memory(int nid, u64 start, u64 size)
-+{
-+ struct pglist_data *pgdat = NODE_DATA(nid);
-+ struct zone *zone = pgdat->node_zones + ZONE_NORMAL;
-+ unsigned long start_pfn = start >> PAGE_SHIFT;
-+ unsigned long nr_pages = size >> PAGE_SHIFT;
-+ int ret;
-+
-+ init_memory_mapping(start, (start + size -1));
-+
-+ ret = __add_pages(zone, start_pfn, nr_pages);
-+ if (ret)
-+ goto error;
-+
-+ return ret;
-+error:
-+ printk("%s: Problem encountered in __add_pages!\n", __func__);
-+ return ret;
-+}
-+EXPORT_SYMBOL_GPL(arch_add_memory);
-+
-+int remove_memory(u64 start, u64 size)
-+{
-+ return -EINVAL;
-+}
-+EXPORT_SYMBOL_GPL(remove_memory);
-+
-+#if !defined(CONFIG_ACPI_NUMA) && defined(CONFIG_NUMA)
-+int memory_add_physaddr_to_nid(u64 start)
-+{
-+ return 0;
-+}
-+EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
-+#endif
-+
-+#endif /* CONFIG_MEMORY_HOTPLUG */
-+
-+#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
-+/*
-+ * Memory Hotadd without sparsemem. The mem_maps have been allocated in advance,
-+ * just online the pages.
-+ */
-+int __add_pages(struct zone *z, unsigned long start_pfn, unsigned long nr_pages)
-+{
-+ int err = -EIO;
-+ unsigned long pfn;
-+ unsigned long total = 0, mem = 0;
-+ for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) {
-+ if (pfn_valid(pfn)) {
-+ online_page(pfn_to_page(pfn));
-+ err = 0;
-+ mem++;
-+ }
-+ total++;
-+ }
-+ if (!err) {
-+ z->spanned_pages += total;
-+ z->present_pages += mem;
-+ z->zone_pgdat->node_spanned_pages += total;
-+ z->zone_pgdat->node_present_pages += mem;
-+ }
-+ return err;
-+}
-+#endif
-+
-+static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, kcore_modules,
-+ kcore_vsyscall;
-+
-+void __init mem_init(void)
-+{
-+ long codesize, reservedpages, datasize, initsize;
-+ unsigned long pfn;
-+
-+ contiguous_bitmap = alloc_bootmem_low_pages(
-+ (end_pfn + 2*BITS_PER_LONG) >> 3);
-+ BUG_ON(!contiguous_bitmap);
-+ memset(contiguous_bitmap, 0, (end_pfn + 2*BITS_PER_LONG) >> 3);
-+
-+ pci_iommu_alloc();
-+
-+ /* clear the zero-page */
-+ memset(empty_zero_page, 0, PAGE_SIZE);
-+
-+ reservedpages = 0;
-+
-+ /* this will put all low memory onto the freelists */
-+#ifdef CONFIG_NUMA
-+ totalram_pages = numa_free_all_bootmem();
-+#else
-+ totalram_pages = free_all_bootmem();
-+#endif
-+ /* XEN: init and count pages outside initial allocation. */
-+ for (pfn = xen_start_info->nr_pages; pfn < max_pfn; pfn++) {
-+ ClearPageReserved(pfn_to_page(pfn));
-+ init_page_count(pfn_to_page(pfn));
-+ totalram_pages++;
-+ }
-+ reservedpages = end_pfn - totalram_pages -
-+ absent_pages_in_range(0, end_pfn);
-+
-+ after_bootmem = 1;
-+
-+ codesize = (unsigned long) &_etext - (unsigned long) &_text;
-+ datasize = (unsigned long) &_edata - (unsigned long) &_etext;
-+ initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
-+
-+ /* Register memory areas for /proc/kcore */
-+ kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
-+ kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
-+ VMALLOC_END-VMALLOC_START);
-+ kclist_add(&kcore_kernel, &_stext, _end - _stext);
-+ kclist_add(&kcore_modules, (void *)MODULES_VADDR, MODULES_LEN);
-+ kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START,
-+ VSYSCALL_END - VSYSCALL_START);
-+
-+ printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n",
-+ (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
-+ end_pfn << (PAGE_SHIFT-10),
-+ codesize >> 10,
-+ reservedpages << (PAGE_SHIFT-10),
-+ datasize >> 10,
-+ initsize >> 10);
-+}
-+
-+void free_init_pages(char *what, unsigned long begin, unsigned long end)
-+{
-+ unsigned long addr;
-+
-+ if (begin >= end)
-+ return;
-+
-+ printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
-+ for (addr = begin; addr < end; addr += PAGE_SIZE) {
-+ ClearPageReserved(virt_to_page(addr));
-+ init_page_count(virt_to_page(addr));
-+ memset((void *)(addr & ~(PAGE_SIZE-1)),
-+ POISON_FREE_INITMEM, PAGE_SIZE);
-+ if (addr >= __START_KERNEL_map) {
-+ /* make_readonly() reports all kernel addresses. */
-+ __make_page_writable(__va(__pa(addr)));
-+ change_page_attr_addr(addr, 1, __pgprot(0));
-+ }
-+ free_page(addr);
-+ totalram_pages++;
-+ }
-+ if (addr > __START_KERNEL_map)
-+ global_flush_tlb();
-+}
-+
-+void free_initmem(void)
-+{
-+ free_init_pages("unused kernel memory",
-+ (unsigned long)(&__init_begin),
-+ (unsigned long)(&__init_end));
-+}
-+
-+#ifdef CONFIG_DEBUG_RODATA
-+
-+void mark_rodata_ro(void)
-+{
-+ unsigned long start = (unsigned long)_stext, end;
-+
-+#ifdef CONFIG_HOTPLUG_CPU
-+ /* It must still be possible to apply SMP alternatives. */
-+ if (num_possible_cpus() > 1)
-+ start = (unsigned long)_etext;
-+#endif
-+
-+#ifdef CONFIG_KPROBES
-+ start = (unsigned long)__start_rodata;
-+#endif
-+
-+ end = (unsigned long)__end_rodata;
-+ start = (start + PAGE_SIZE - 1) & PAGE_MASK;
-+ end &= PAGE_MASK;
-+ if (end <= start)
-+ return;
-+
-+ change_page_attr_addr(start, (end - start) >> PAGE_SHIFT, PAGE_KERNEL_RO);
-+
-+ printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
-+ (end - start) >> 10);
-+
-+ /*
-+ * change_page_attr_addr() requires a global_flush_tlb() call after it.
-+ * We do this after the printk so that if something went wrong in the
-+ * change, the printk gets out at least to give a better debug hint
-+ * of who is the culprit.
-+ */
-+ global_flush_tlb();
-+}
-+#endif
-+
-+#ifdef CONFIG_BLK_DEV_INITRD
-+void free_initrd_mem(unsigned long start, unsigned long end)
-+{
-+ free_init_pages("initrd memory", start, end);
-+}
-+#endif
-+
-+void __init reserve_bootmem_generic(unsigned long phys, unsigned len)
-+{
-+#ifdef CONFIG_NUMA
-+ int nid = phys_to_nid(phys);
-+#endif
-+ unsigned long pfn = phys >> PAGE_SHIFT;
-+ if (pfn >= end_pfn) {
-+ /* This can happen with kdump kernels when accessing firmware
-+ tables. */
-+ if (pfn < end_pfn_map)
-+ return;
-+ printk(KERN_ERR "reserve_bootmem: illegal reserve %lx %u\n",
-+ phys, len);
-+ return;
-+ }
-+
-+ /* Should check here against the e820 map to avoid double free */
-+#ifdef CONFIG_NUMA
-+ reserve_bootmem_node(NODE_DATA(nid), phys, len);
-+#else
-+ reserve_bootmem(phys, len);
-+#endif
-+#ifndef CONFIG_XEN
-+ if (phys+len <= MAX_DMA_PFN*PAGE_SIZE) {
-+ static unsigned long dma_reserve __initdata;
-+
-+ dma_reserve += len / PAGE_SIZE;
-+ set_dma_reserve(dma_reserve);
-+ }
-+#endif
-+}
-+
-+int kern_addr_valid(unsigned long addr)
-+{
-+ unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
-+ pgd_t *pgd;
-+ pud_t *pud;
-+ pmd_t *pmd;
-+ pte_t *pte;
-+
-+ if (above != 0 && above != -1UL)
-+ return 0;
-+
-+ pgd = pgd_offset_k(addr);
-+ if (pgd_none(*pgd))
-+ return 0;
-+
-+ pud = pud_offset(pgd, addr);
-+ if (pud_none(*pud))
-+ return 0;
-+
-+ pmd = pmd_offset(pud, addr);
-+ if (pmd_none(*pmd))
-+ return 0;
-+ if (pmd_large(*pmd))
-+ return pfn_valid(pmd_pfn(*pmd));
-+
-+ pte = pte_offset_kernel(pmd, addr);
-+ if (pte_none(*pte))
-+ return 0;
-+ return pfn_valid(pte_pfn(*pte));
-+}
-+
-+#ifdef CONFIG_SYSCTL
-+#include <linux/sysctl.h>
-+
-+extern int exception_trace, page_fault_trace;
-+
-+static ctl_table debug_table2[] = {
-+ {
-+ .ctl_name = 99,
-+ .procname = "exception-trace",
-+ .data = &exception_trace,
-+ .maxlen = sizeof(int),
-+ .mode = 0644,
-+ .proc_handler = proc_dointvec
-+ },
-+ {}
-+};
-+
-+static ctl_table debug_root_table2[] = {
-+ {
-+ .ctl_name = CTL_DEBUG,
-+ .procname = "debug",
-+ .mode = 0555,
-+ .child = debug_table2
-+ },
-+ {}
-+};
-+
-+static __init int x8664_sysctl_init(void)
-+{
-+ register_sysctl_table(debug_root_table2);
-+ return 0;
-+}
-+__initcall(x8664_sysctl_init);
-+#endif
-+
-+/* A pseudo VMA to allow ptrace access for the vsyscall page. This only
-+ covers the 64bit vsyscall page now. 32bit has a real VMA now and does
-+ not need special handling anymore. */
-+
-+static struct vm_area_struct gate_vma = {
-+ .vm_start = VSYSCALL_START,
-+ .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES << PAGE_SHIFT),
-+ .vm_page_prot = PAGE_READONLY_EXEC,
-+ .vm_flags = VM_READ | VM_EXEC
-+};
-+
-+struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
-+{
-+#ifdef CONFIG_IA32_EMULATION
-+ if (test_tsk_thread_flag(tsk, TIF_IA32))
-+ return NULL;
-+#endif
-+ return &gate_vma;
-+}
-+
-+int in_gate_area(struct task_struct *task, unsigned long addr)
-+{
-+ struct vm_area_struct *vma = get_gate_vma(task);
-+ if (!vma)
-+ return 0;
-+ return (addr >= vma->vm_start) && (addr < vma->vm_end);
-+}
-+
-+/* Use this when you have no reliable task/vma, typically from interrupt
-+ * context. It is less reliable than using the task's vma and may give
-+ * false positives.
-+ */
-+int in_gate_area_no_task(unsigned long addr)
-+{
-+ return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END);
-+}
-+
-+#ifndef CONFIG_XEN
-+void *alloc_bootmem_high_node(pg_data_t *pgdat, unsigned long size)
-+{
-+ return __alloc_bootmem_core(pgdat->bdata, size,
-+ SMP_CACHE_BYTES, (4UL*1024*1024*1024), 0);
-+}
-+#endif
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/x86_64/mm/Makefile ubuntu-gutsy-xen/arch/x86_64/mm/Makefile
---- ubuntu-gutsy/arch/x86_64/mm/Makefile 2007-08-18 09:40:29.000000000 -0400
-+++ ubuntu-gutsy-xen/arch/x86_64/mm/Makefile 2007-08-18 12:38:02.000000000 -0400
-@@ -9,3 +9,13 @@
- obj-$(CONFIG_ACPI_NUMA) += srat.o
-
- hugetlbpage-y = ../../i386/mm/hugetlbpage.o
-+
-+ifdef CONFIG_XEN
-+include $(srctree)/scripts/Makefile.xen
-+
-+ioremap-y += ../../i386/mm/ioremap-xen.o
-+hypervisor-y += ../../i386/mm/hypervisor.o
-+obj-y += hypervisor.o
-+
-+obj-y := $(call cherrypickxen, $(obj-y))
-+endif
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/x86_64/mm/pageattr-xen.c ubuntu-gutsy-xen/arch/x86_64/mm/pageattr-xen.c
---- ubuntu-gutsy/arch/x86_64/mm/pageattr-xen.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/arch/x86_64/mm/pageattr-xen.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,506 @@
-+/*
-+ * Copyright 2002 Andi Kleen, SuSE Labs.
-+ * Thanks to Ben LaHaise for precious feedback.
-+ */
-+
-+#include <linux/mm.h>
-+#include <linux/sched.h>
-+#include <linux/highmem.h>
-+#include <linux/module.h>
-+#include <linux/slab.h>
-+#include <asm/uaccess.h>
-+#include <asm/processor.h>
-+#include <asm/tlbflush.h>
-+#include <asm/io.h>
-+
-+#ifdef CONFIG_XEN
-+#include <asm/pgalloc.h>
-+#include <asm/mmu_context.h>
-+
-+LIST_HEAD(mm_unpinned);
-+DEFINE_SPINLOCK(mm_unpinned_lock);
-+
-+static void _pin_lock(struct mm_struct *mm, int lock) {
-+ if (lock)
-+ spin_lock(&mm->page_table_lock);
-+#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
-+ /* While mm->page_table_lock protects us against insertions and
-+ * removals of higher level page table pages, it doesn't protect
-+ * against updates of pte-s. Such updates, however, require the
-+ * pte pages to be in consistent state (unpinned+writable or
-+ * pinned+readonly). The pinning and attribute changes, however
-+ * cannot be done atomically, which is why such updates must be
-+ * prevented from happening concurrently.
-+ * Note that no pte lock can ever elsewhere be acquired nesting
-+ * with an already acquired one in the same mm, or with the mm's
-+ * page_table_lock already acquired, as that would break in the
-+ * non-split case (where all these are actually resolving to the
-+ * one page_table_lock). Thus acquiring all of them here is not
-+ * going to result in dead locks, and the order of acquires
-+ * doesn't matter.
-+ */
-+ {
-+ pgd_t *pgd = mm->pgd;
-+ unsigned g;
-+
-+ for (g = 0; g <= ((TASK_SIZE64-1) / PGDIR_SIZE); g++, pgd++) {
-+ pud_t *pud;
-+ unsigned u;
-+
-+ if (pgd_none(*pgd))
-+ continue;
-+ pud = pud_offset(pgd, 0);
-+ for (u = 0; u < PTRS_PER_PUD; u++, pud++) {
-+ pmd_t *pmd;
-+ unsigned m;
-+
-+ if (pud_none(*pud))
-+ continue;
-+ pmd = pmd_offset(pud, 0);
-+ for (m = 0; m < PTRS_PER_PMD; m++, pmd++) {
-+ spinlock_t *ptl;
-+
-+ if (pmd_none(*pmd))
-+ continue;
-+ ptl = pte_lockptr(0, pmd);
-+ if (lock)
-+ spin_lock(ptl);
-+ else
-+ spin_unlock(ptl);
-+ }
-+ }
-+ }
-+ }
-+#endif
-+ if (!lock)
-+ spin_unlock(&mm->page_table_lock);
-+}
-+#define pin_lock(mm) _pin_lock(mm, 1)
-+#define pin_unlock(mm) _pin_lock(mm, 0)
-+
-+static inline void mm_walk_set_prot(void *pt, pgprot_t flags)
-+{
-+ struct page *page = virt_to_page(pt);
-+ unsigned long pfn = page_to_pfn(page);
-+ int rc;
-+
-+ rc = HYPERVISOR_update_va_mapping(
-+ (unsigned long)__va(pfn << PAGE_SHIFT),
-+ pfn_pte(pfn, flags), 0);
-+ if (rc)
-+ BUG();
-+}
-+
-+static void mm_walk(struct mm_struct *mm, pgprot_t flags)
-+{
-+ pgd_t *pgd;
-+ pud_t *pud;
-+ pmd_t *pmd;
-+ pte_t *pte;
-+ int g,u,m;
-+
-+ pgd = mm->pgd;
-+ /*
-+ * Cannot iterate up to USER_PTRS_PER_PGD as these pagetables may not
-+ * be the 'current' task's pagetables (e.g., current may be 32-bit,
-+ * but the pagetables may be for a 64-bit task).
-+ * Subtracting 1 from TASK_SIZE64 means the loop limit is correct
-+ * regardless of whether TASK_SIZE64 is a multiple of PGDIR_SIZE.
-+ */
-+ for (g = 0; g <= ((TASK_SIZE64-1) / PGDIR_SIZE); g++, pgd++) {
-+ if (pgd_none(*pgd))
-+ continue;
-+ pud = pud_offset(pgd, 0);
-+ if (PTRS_PER_PUD > 1) /* not folded */
-+ mm_walk_set_prot(pud,flags);
-+ for (u = 0; u < PTRS_PER_PUD; u++, pud++) {
-+ if (pud_none(*pud))
-+ continue;
-+ pmd = pmd_offset(pud, 0);
-+ if (PTRS_PER_PMD > 1) /* not folded */
-+ mm_walk_set_prot(pmd,flags);
-+ for (m = 0; m < PTRS_PER_PMD; m++, pmd++) {
-+ if (pmd_none(*pmd))
-+ continue;
-+ pte = pte_offset_kernel(pmd,0);
-+ mm_walk_set_prot(pte,flags);
-+ }
-+ }
-+ }
-+}
-+
-+void mm_pin(struct mm_struct *mm)
-+{
-+ if (xen_feature(XENFEAT_writable_page_tables))
-+ return;
-+
-+ pin_lock(mm);
-+
-+ mm_walk(mm, PAGE_KERNEL_RO);
-+ if (HYPERVISOR_update_va_mapping(
-+ (unsigned long)mm->pgd,
-+ pfn_pte(virt_to_phys(mm->pgd)>>PAGE_SHIFT, PAGE_KERNEL_RO),
-+ UVMF_TLB_FLUSH))
-+ BUG();
-+ if (HYPERVISOR_update_va_mapping(
-+ (unsigned long)__user_pgd(mm->pgd),
-+ pfn_pte(virt_to_phys(__user_pgd(mm->pgd))>>PAGE_SHIFT,
-+ PAGE_KERNEL_RO),
-+ UVMF_TLB_FLUSH))
-+ BUG();
-+ xen_pgd_pin(__pa(mm->pgd)); /* kernel */
-+ xen_pgd_pin(__pa(__user_pgd(mm->pgd))); /* user */
-+ mm->context.pinned = 1;
-+ spin_lock(&mm_unpinned_lock);
-+ list_del(&mm->context.unpinned);
-+ spin_unlock(&mm_unpinned_lock);
-+
-+ pin_unlock(mm);
-+}
-+
-+void mm_unpin(struct mm_struct *mm)
-+{
-+ if (xen_feature(XENFEAT_writable_page_tables))
-+ return;
-+
-+ pin_lock(mm);
-+
-+ xen_pgd_unpin(__pa(mm->pgd));
-+ xen_pgd_unpin(__pa(__user_pgd(mm->pgd)));
-+ if (HYPERVISOR_update_va_mapping(
-+ (unsigned long)mm->pgd,
-+ pfn_pte(virt_to_phys(mm->pgd)>>PAGE_SHIFT, PAGE_KERNEL), 0))
-+ BUG();
-+ if (HYPERVISOR_update_va_mapping(
-+ (unsigned long)__user_pgd(mm->pgd),
-+ pfn_pte(virt_to_phys(__user_pgd(mm->pgd))>>PAGE_SHIFT,
-+ PAGE_KERNEL), 0))
-+ BUG();
-+ mm_walk(mm, PAGE_KERNEL);
-+ xen_tlb_flush();
-+ mm->context.pinned = 0;
-+ spin_lock(&mm_unpinned_lock);
-+ list_add(&mm->context.unpinned, &mm_unpinned);
-+ spin_unlock(&mm_unpinned_lock);
-+
-+ pin_unlock(mm);
-+}
-+
-+void mm_pin_all(void)
-+{
-+ if (xen_feature(XENFEAT_writable_page_tables))
-+ return;
-+
-+ /*
-+ * Allow uninterrupted access to the mm_unpinned list. We don't
-+ * actually take the mm_unpinned_lock as it is taken inside mm_pin().
-+ * All other CPUs must be at a safe point (e.g., in stop_machine
-+ * or offlined entirely).
-+ */
-+ preempt_disable();
-+ while (!list_empty(&mm_unpinned))
-+ mm_pin(list_entry(mm_unpinned.next, struct mm_struct,
-+ context.unpinned));
-+ preempt_enable();
-+}
-+
-+void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
-+{
-+ if (!mm->context.pinned)
-+ mm_pin(mm);
-+}
-+
-+void arch_exit_mmap(struct mm_struct *mm)
-+{
-+ struct task_struct *tsk = current;
-+
-+ task_lock(tsk);
-+
-+ /*
-+ * We aggressively remove defunct pgd from cr3. We execute unmap_vmas()
-+ * *much* faster this way, as no tlb flushes means bigger wrpt batches.
-+ */
-+ if (tsk->active_mm == mm) {
-+ tsk->active_mm = &init_mm;
-+ atomic_inc(&init_mm.mm_count);
-+
-+ switch_mm(mm, &init_mm, tsk);
-+
-+ atomic_dec(&mm->mm_count);
-+ BUG_ON(atomic_read(&mm->mm_count) == 0);
-+ }
-+
-+ task_unlock(tsk);
-+
-+ if ( mm->context.pinned && (atomic_read(&mm->mm_count) == 1) &&
-+ !mm->context.has_foreign_mappings )
-+ mm_unpin(mm);
-+}
-+
-+struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
-+{
-+ struct page *pte;
-+
-+ pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
-+ if (pte) {
-+ SetPageForeign(pte, pte_free);
-+ init_page_count(pte);
-+ }
-+ return pte;
-+}
-+
-+void pte_free(struct page *pte)
-+{
-+ unsigned long va = (unsigned long)__va(page_to_pfn(pte)<<PAGE_SHIFT);
-+
-+ if (!pte_write(*virt_to_ptep(va)))
-+ if (HYPERVISOR_update_va_mapping(
-+ va, pfn_pte(page_to_pfn(pte), PAGE_KERNEL), 0))
-+ BUG();
-+
-+ ClearPageForeign(pte);
-+ init_page_count(pte);
-+
-+ __free_page(pte);
-+}
-+#endif /* CONFIG_XEN */
-+
-+static inline pte_t *lookup_address(unsigned long address)
-+{
-+ pgd_t *pgd = pgd_offset_k(address);
-+ pud_t *pud;
-+ pmd_t *pmd;
-+ pte_t *pte;
-+ if (pgd_none(*pgd))
-+ return NULL;
-+ pud = pud_offset(pgd, address);
-+ if (!pud_present(*pud))
-+ return NULL;
-+ pmd = pmd_offset(pud, address);
-+ if (!pmd_present(*pmd))
-+ return NULL;
-+ if (pmd_large(*pmd))
-+ return (pte_t *)pmd;
-+ pte = pte_offset_kernel(pmd, address);
-+ if (pte && !pte_present(*pte))
-+ pte = NULL;
-+ return pte;
-+}
-+
-+static struct page *split_large_page(unsigned long address, pgprot_t prot,
-+ pgprot_t ref_prot)
-+{
-+ int i;
-+ unsigned long addr;
-+ struct page *base = alloc_pages(GFP_KERNEL, 0);
-+ pte_t *pbase;
-+ if (!base)
-+ return NULL;
-+ /*
-+ * page_private is used to track the number of entries in
-+ * the page table page have non standard attributes.
-+ */
-+ SetPagePrivate(base);
-+ page_private(base) = 0;
-+
-+ address = __pa(address);
-+ addr = address & LARGE_PAGE_MASK;
-+ pbase = (pte_t *)page_address(base);
-+ for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
-+ pbase[i] = pfn_pte(addr >> PAGE_SHIFT,
-+ addr == address ? prot : ref_prot);
-+ }
-+ return base;
-+}
-+
-+static void cache_flush_page(void *adr)
-+{
-+ int i;
-+ for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
-+ asm volatile("clflush (%0)" :: "r" (adr + i));
-+}
-+
-+static void flush_kernel_map(void *arg)
-+{
-+ struct list_head *l = (struct list_head *)arg;
-+ struct page *pg;
-+
-+ /* When clflush is available always use it because it is
-+ much cheaper than WBINVD. Disable clflush for now because
-+ the high level code is not ready yet */
-+ if (1 || !cpu_has_clflush)
-+ asm volatile("wbinvd" ::: "memory");
-+ else list_for_each_entry(pg, l, lru) {
-+ void *adr = page_address(pg);
-+ if (cpu_has_clflush)
-+ cache_flush_page(adr);
-+ }
-+ __flush_tlb_all();
-+}
-+
-+static inline void flush_map(struct list_head *l)
-+{
-+ on_each_cpu(flush_kernel_map, l, 1, 1);
-+}
-+
-+static LIST_HEAD(deferred_pages); /* protected by init_mm.mmap_sem */
-+
-+static inline void save_page(struct page *fpage)
-+{
-+ list_add(&fpage->lru, &deferred_pages);
-+}
-+
-+/*
-+ * No more special protections in this 2/4MB area - revert to a
-+ * large page again.
-+ */
-+static void revert_page(unsigned long address, pgprot_t ref_prot)
-+{
-+ pgd_t *pgd;
-+ pud_t *pud;
-+ pmd_t *pmd;
-+ pte_t large_pte;
-+ unsigned long pfn;
-+
-+ pgd = pgd_offset_k(address);
-+ BUG_ON(pgd_none(*pgd));
-+ pud = pud_offset(pgd,address);
-+ BUG_ON(pud_none(*pud));
-+ pmd = pmd_offset(pud, address);
-+ BUG_ON(pmd_val(*pmd) & _PAGE_PSE);
-+ pfn = (__pa(address) & LARGE_PAGE_MASK) >> PAGE_SHIFT;
-+ large_pte = pfn_pte(pfn, ref_prot);
-+ large_pte = pte_mkhuge(large_pte);
-+ set_pte((pte_t *)pmd, large_pte);
-+}
-+
-+static int
-+__change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot,
-+ pgprot_t ref_prot)
-+{
-+ pte_t *kpte;
-+ struct page *kpte_page;
-+ pgprot_t ref_prot2;
-+ kpte = lookup_address(address);
-+ if (!kpte) return 0;
-+ kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK);
-+ if (pgprot_val(prot) != pgprot_val(ref_prot)) {
-+ if (!pte_huge(*kpte)) {
-+ set_pte(kpte, pfn_pte(pfn, prot));
-+ } else {
-+ /*
-+ * split_large_page will take the reference for this
-+ * change_page_attr on the split page.
-+ */
-+ struct page *split;
-+ ref_prot2 = pte_pgprot(pte_clrhuge(*kpte));
-+ split = split_large_page(address, prot, ref_prot2);
-+ if (!split)
-+ return -ENOMEM;
-+ set_pte(kpte, mk_pte(split, ref_prot2));
-+ kpte_page = split;
-+ }
-+ page_private(kpte_page)++;
-+ } else if (!pte_huge(*kpte)) {
-+ set_pte(kpte, pfn_pte(pfn, ref_prot));
-+ BUG_ON(page_private(kpte_page) == 0);
-+ page_private(kpte_page)--;
-+ } else
-+ BUG();
-+
-+ /* on x86-64 the direct mapping set at boot is not using 4k pages */
-+ /*
-+ * ..., but the XEN guest kernels (currently) do:
-+ * If the pte was reserved, it means it was created at boot
-+ * time (not via split_large_page) and in turn we must not
-+ * replace it with a large page.
-+ */
-+#ifndef CONFIG_XEN
-+ BUG_ON(PageReserved(kpte_page));
-+#else
-+ if (PageReserved(kpte_page))
-+ return 0;
-+#endif
-+
-+ if (page_private(kpte_page) == 0) {
-+ save_page(kpte_page);
-+ revert_page(address, ref_prot);
-+ }
-+ return 0;
-+}
-+
-+/*
-+ * Change the page attributes of an page in the linear mapping.
-+ *
-+ * This should be used when a page is mapped with a different caching policy
-+ * than write-back somewhere - some CPUs do not like it when mappings with
-+ * different caching policies exist. This changes the page attributes of the
-+ * in kernel linear mapping too.
-+ *
-+ * The caller needs to ensure that there are no conflicting mappings elsewhere.
-+ * This function only deals with the kernel linear map.
-+ *
-+ * Caller must call global_flush_tlb() after this.
-+ */
-+int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot)
-+{
-+ int err = 0, kernel_map = 0;
-+ int i;
-+
-+ if (address >= __START_KERNEL_map
-+ && address < __START_KERNEL_map + KERNEL_TEXT_SIZE) {
-+ address = (unsigned long)__va(__pa(address));
-+ kernel_map = 1;
-+ }
-+
-+ down_write(&init_mm.mmap_sem);
-+ for (i = 0; i < numpages; i++, address += PAGE_SIZE) {
-+ unsigned long pfn = __pa(address) >> PAGE_SHIFT;
-+
-+ if (!kernel_map || pte_present(pfn_pte(0, prot))) {
-+ err = __change_page_attr(address, pfn, prot, PAGE_KERNEL);
-+ if (err)
-+ break;
-+ }
-+ /* Handle kernel mapping too which aliases part of the
-+ * lowmem */
-+ if (__pa(address) < KERNEL_TEXT_SIZE) {
-+ unsigned long addr2;
-+ pgprot_t prot2;
-+ addr2 = __START_KERNEL_map + __pa(address);
-+ /* Make sure the kernel mappings stay executable */
-+ prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot)));
-+ err = __change_page_attr(addr2, pfn, prot2,
-+ PAGE_KERNEL_EXEC);
-+ }
-+ }
-+ up_write(&init_mm.mmap_sem);
-+ return err;
-+}
-+
-+/* Don't call this for MMIO areas that may not have a mem_map entry */
-+int change_page_attr(struct page *page, int numpages, pgprot_t prot)
-+{
-+ unsigned long addr = (unsigned long)page_address(page);
-+ return change_page_attr_addr(addr, numpages, prot);
-+}
-+
-+void global_flush_tlb(void)
-+{
-+ struct page *pg, *next;
-+ struct list_head l;
-+
-+ down_read(&init_mm.mmap_sem);
-+ list_replace_init(&deferred_pages, &l);
-+ up_read(&init_mm.mmap_sem);
-+
-+ flush_map(&l);
-+
-+ list_for_each_entry_safe(pg, next, &l, lru) {
-+ ClearPagePrivate(pg);
-+ __free_page(pg);
-+ }
-+}
-+
-+EXPORT_SYMBOL(change_page_attr);
-+EXPORT_SYMBOL(global_flush_tlb);
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/x86_64/oprofile/Makefile ubuntu-gutsy-xen/arch/x86_64/oprofile/Makefile
---- ubuntu-gutsy/arch/x86_64/oprofile/Makefile 2007-08-18 09:40:29.000000000 -0400
-+++ ubuntu-gutsy-xen/arch/x86_64/oprofile/Makefile 2007-08-18 12:38:02.000000000 -0400
-@@ -11,9 +11,15 @@
- oprofilefs.o oprofile_stats.o \
- timer_int.o )
-
-+ifdef CONFIG_XEN
-+XENOPROF_COMMON_OBJS = $(addprefix ../../../drivers/xen/xenoprof/, \
-+ xenoprofile.o)
-+OPROFILE-y := xenoprof.o
-+else
- OPROFILE-y := init.o backtrace.o
- OPROFILE-$(CONFIG_X86_LOCAL_APIC) += nmi_int.o op_model_athlon.o op_model_p4.o \
- op_model_ppro.o
- OPROFILE-$(CONFIG_X86_IO_APIC) += nmi_timer_int.o
--
--oprofile-y = $(DRIVER_OBJS) $(addprefix ../../i386/oprofile/, $(OPROFILE-y))
-+endif
-+oprofile-y = $(DRIVER_OBJS) $(XENOPROF_COMMON_OBJS) \
-+ $(addprefix ../../i386/oprofile/, $(OPROFILE-y))
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/arch/x86_64/pci/Makefile ubuntu-gutsy-xen/arch/x86_64/pci/Makefile
---- ubuntu-gutsy/arch/x86_64/pci/Makefile 2007-08-18 09:40:29.000000000 -0400
-+++ ubuntu-gutsy-xen/arch/x86_64/pci/Makefile 2007-08-18 12:38:02.000000000 -0400
-@@ -15,8 +15,13 @@
-
- obj-$(CONFIG_NUMA) += k8-bus.o
-
-+# pcifront should be after mmconfig.o and direct.o as it should only
-+# take over if direct access to the PCI bus is unavailable
-+obj-$(CONFIG_XEN_PCIDEV_FRONTEND) += pcifront.o
-+
- direct-y += ../../i386/pci/direct.o
- acpi-y += ../../i386/pci/acpi.o
-+pcifront-y += ../../i386/pci/pcifront.o
- legacy-y += ../../i386/pci/legacy.o
- irq-y += ../../i386/pci/irq.o
- common-y += ../../i386/pci/common.o
-@@ -25,3 +30,10 @@
- init-y += ../../i386/pci/init.o
- early-y += ../../i386/pci/early.o
- mmconfig-shared-y += ../../i386/pci/mmconfig-shared.o
-+
-+ifdef CONFIG_XEN
-+irq-y := ../../i386/pci/irq-xen.o
-+include $(srctree)/scripts/Makefile.xen
-+
-+obj-y := $(call cherrypickxen, $(obj-y))
-+endif
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/acpi/Kconfig ubuntu-gutsy-xen/drivers/acpi/Kconfig
---- ubuntu-gutsy/drivers/acpi/Kconfig 2007-08-18 09:40:29.000000000 -0400
-+++ ubuntu-gutsy-xen/drivers/acpi/Kconfig 2007-08-18 12:38:02.000000000 -0400
-@@ -48,7 +48,7 @@
-
- config ACPI_SLEEP
- bool "Sleep States"
-- depends on X86 && (!SMP || SUSPEND_SMP)
-+ depends on X86 && (!SMP || SUSPEND_SMP) && !XEN
- depends on PM
- default y
- ---help---
-@@ -319,6 +319,7 @@
- config X86_PM_TIMER
- bool "Power Management Timer Support" if EMBEDDED
- depends on X86
-+ depends on !XEN
- default y
- help
- The Power Management Timer is available on all ACPI-capable,
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/char/agp/intel-agp.c ubuntu-gutsy-xen/drivers/char/agp/intel-agp.c
---- ubuntu-gutsy/drivers/char/agp/intel-agp.c 2007-08-18 09:40:29.000000000 -0400
-+++ ubuntu-gutsy-xen/drivers/char/agp/intel-agp.c 2007-08-18 12:38:02.000000000 -0400
-@@ -208,6 +208,13 @@
- if (page == NULL)
- return NULL;
-
-+#ifdef CONFIG_XEN
-+ if (xen_create_contiguous_region((unsigned long)page_address(page), 2, 32)) {
-+ __free_pages(page, 2);
-+ return NULL;
-+ }
-+#endif
-+
- if (change_page_attr(page, 4, PAGE_KERNEL_NOCACHE) < 0) {
- change_page_attr(page, 4, PAGE_KERNEL);
- global_flush_tlb();
-@@ -231,6 +238,9 @@
- page = virt_to_page(addr);
- change_page_attr(page, 4, PAGE_KERNEL);
- global_flush_tlb();
-+#ifdef CONFIG_XEN
-+ xen_destroy_contiguous_region((unsigned long)page_address(page), 2);
-+#endif
- put_page(page);
- unlock_page(page);
- __free_pages(page, 2);
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/char/mem.c ubuntu-gutsy-xen/drivers/char/mem.c
---- ubuntu-gutsy/drivers/char/mem.c 2007-08-18 09:40:29.000000000 -0400
-+++ ubuntu-gutsy-xen/drivers/char/mem.c 2007-08-18 12:38:02.000000000 -0400
-@@ -101,6 +101,7 @@
- }
- #endif
-
-+#ifndef ARCH_HAS_DEV_MEM
- /*
- * This funcion reads the *physical* memory. The f_pos points directly to the
- * memory location.
-@@ -223,6 +224,7 @@
- *ppos += written;
- return written;
- }
-+#endif
-
- #ifndef __HAVE_PHYS_MEM_ACCESS_PROT
- static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
-@@ -809,6 +811,7 @@
- #define open_kmem open_mem
- #define open_oldmem open_mem
-
-+#ifndef ARCH_HAS_DEV_MEM
- static const struct file_operations mem_fops = {
- .llseek = memory_lseek,
- .read = read_mem,
-@@ -817,6 +820,9 @@
- .open = open_mem,
- .get_unmapped_area = get_unmapped_area_mem,
- };
-+#else
-+extern const struct file_operations mem_fops;
-+#endif
-
- static const struct file_operations kmem_fops = {
- .llseek = memory_lseek,
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/char/tpm/Kconfig ubuntu-gutsy-xen/drivers/char/tpm/Kconfig
---- ubuntu-gutsy/drivers/char/tpm/Kconfig 2007-08-18 09:40:29.000000000 -0400
-+++ ubuntu-gutsy-xen/drivers/char/tpm/Kconfig 2007-08-18 12:38:02.000000000 -0400
-@@ -59,5 +59,13 @@
- Further information on this driver and the supported hardware
- can be found at http://www.prosec.rub.de/tpm
-
--endmenu
-+config TCG_XEN
-+ tristate "XEN TPM Interface"
-+ depends on TCG_TPM && XEN
-+ ---help---
-+ If you want to make TPM support available to a Xen user domain,
-+ say Yes and it will be accessible from within Linux.
-+ To compile this driver as a module, choose M here; the module
-+ will be called tpm_xenu.
-
-+endmenu
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/char/tpm/Makefile ubuntu-gutsy-xen/drivers/char/tpm/Makefile
---- ubuntu-gutsy/drivers/char/tpm/Makefile 2007-08-18 09:40:29.000000000 -0400
-+++ ubuntu-gutsy-xen/drivers/char/tpm/Makefile 2007-08-18 12:38:02.000000000 -0400
-@@ -9,3 +9,5 @@
- obj-$(CONFIG_TCG_NSC) += tpm_nsc.o
- obj-$(CONFIG_TCG_ATMEL) += tpm_atmel.o
- obj-$(CONFIG_TCG_INFINEON) += tpm_infineon.o
-+obj-$(CONFIG_TCG_XEN) += tpm_xenu.o
-+tpm_xenu-y = tpm_xen.o tpm_vtpm.o
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/char/tpm/tpm.h ubuntu-gutsy-xen/drivers/char/tpm/tpm.h
---- ubuntu-gutsy/drivers/char/tpm/tpm.h 2007-08-18 09:40:29.000000000 -0400
-+++ ubuntu-gutsy-xen/drivers/char/tpm/tpm.h 2007-08-18 12:38:02.000000000 -0400
-@@ -106,6 +106,9 @@
- struct dentry **bios_dir;
-
- struct list_head list;
-+#ifdef CONFIG_XEN
-+ void *priv;
-+#endif
- };
-
- #define to_tpm_chip(n) container_of(n, struct tpm_chip, vendor)
-@@ -122,6 +125,18 @@
- outb(value & 0xFF, base+1);
- }
-
-+#ifdef CONFIG_XEN
-+static inline void *chip_get_private(const struct tpm_chip *chip)
-+{
-+ return chip->priv;
-+}
-+
-+static inline void chip_set_private(struct tpm_chip *chip, void *priv)
-+{
-+ chip->priv = priv;
-+}
-+#endif
-+
- extern void tpm_get_timeouts(struct tpm_chip *);
- extern void tpm_gen_interrupt(struct tpm_chip *);
- extern void tpm_continue_selftest(struct tpm_chip *);
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/char/tpm/tpm_vtpm.c ubuntu-gutsy-xen/drivers/char/tpm/tpm_vtpm.c
---- ubuntu-gutsy/drivers/char/tpm/tpm_vtpm.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/char/tpm/tpm_vtpm.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,542 @@
-+/*
-+ * Copyright (C) 2006 IBM Corporation
-+ *
-+ * Authors:
-+ * Stefan Berger <stefanb@us.ibm.com>
-+ *
-+ * Generic device driver part for device drivers in a virtualized
-+ * environment.
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License as
-+ * published by the Free Software Foundation, version 2 of the
-+ * License.
-+ *
-+ */
-+
-+#include <asm/uaccess.h>
-+#include <linux/list.h>
-+#include <linux/device.h>
-+#include <linux/interrupt.h>
-+#include <linux/platform_device.h>
-+#include "tpm.h"
-+#include "tpm_vtpm.h"
-+
-+/* read status bits */
-+enum {
-+ STATUS_BUSY = 0x01,
-+ STATUS_DATA_AVAIL = 0x02,
-+ STATUS_READY = 0x04
-+};
-+
-+struct transmission {
-+ struct list_head next;
-+
-+ unsigned char *request;
-+ size_t request_len;
-+ size_t request_buflen;
-+
-+ unsigned char *response;
-+ size_t response_len;
-+ size_t response_buflen;
-+
-+ unsigned int flags;
-+};
-+
-+enum {
-+ TRANSMISSION_FLAG_WAS_QUEUED = 0x1
-+};
-+
-+
-+enum {
-+ DATAEX_FLAG_QUEUED_ONLY = 0x1
-+};
-+
-+
-+/* local variables */
-+
-+/* local function prototypes */
-+static int _vtpm_send_queued(struct tpm_chip *chip);
-+
-+
-+/* =============================================================
-+ * Some utility functions
-+ * =============================================================
-+ */
-+static void vtpm_state_init(struct vtpm_state *vtpms)
-+{
-+ vtpms->current_request = NULL;
-+ spin_lock_init(&vtpms->req_list_lock);
-+ init_waitqueue_head(&vtpms->req_wait_queue);
-+ INIT_LIST_HEAD(&vtpms->queued_requests);
-+
-+ vtpms->current_response = NULL;
-+ spin_lock_init(&vtpms->resp_list_lock);
-+ init_waitqueue_head(&vtpms->resp_wait_queue);
-+
-+ vtpms->disconnect_time = jiffies;
-+}
-+
-+
-+static inline struct transmission *transmission_alloc(void)
-+{
-+ return kzalloc(sizeof(struct transmission), GFP_ATOMIC);
-+}
-+
-+static unsigned char *
-+transmission_set_req_buffer(struct transmission *t,
-+ unsigned char *buffer, size_t len)
-+{
-+ if (t->request_buflen < len) {
-+ kfree(t->request);
-+ t->request = kmalloc(len, GFP_KERNEL);
-+ if (!t->request) {
-+ t->request_buflen = 0;
-+ return NULL;
-+ }
-+ t->request_buflen = len;
-+ }
-+
-+ memcpy(t->request, buffer, len);
-+ t->request_len = len;
-+
-+ return t->request;
-+}
-+
-+static unsigned char *
-+transmission_set_res_buffer(struct transmission *t,
-+ const unsigned char *buffer, size_t len)
-+{
-+ if (t->response_buflen < len) {
-+ kfree(t->response);
-+ t->response = kmalloc(len, GFP_ATOMIC);
-+ if (!t->response) {
-+ t->response_buflen = 0;
-+ return NULL;
-+ }
-+ t->response_buflen = len;
-+ }
-+
-+ memcpy(t->response, buffer, len);
-+ t->response_len = len;
-+
-+ return t->response;
-+}
-+
-+static inline void transmission_free(struct transmission *t)
-+{
-+ kfree(t->request);
-+ kfree(t->response);
-+ kfree(t);
-+}
-+
-+/* =============================================================
-+ * Interface with the lower layer driver
-+ * =============================================================
-+ */
-+/*
-+ * Lower layer uses this function to make a response available.
-+ */
-+int vtpm_vd_recv(const struct tpm_chip *chip,
-+ const unsigned char *buffer, size_t count,
-+ void *ptr)
-+{
-+ unsigned long flags;
-+ int ret_size = 0;
-+ struct transmission *t;
-+ struct vtpm_state *vtpms;
-+
-+ vtpms = (struct vtpm_state *)chip_get_private(chip);
-+
-+ /*
-+ * The list with requests must contain one request
-+ * only and the element there must be the one that
-+ * was passed to me from the front-end.
-+ */
-+ spin_lock_irqsave(&vtpms->resp_list_lock, flags);
-+ if (vtpms->current_request != ptr) {
-+ spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
-+ return 0;
-+ }
-+
-+ if ((t = vtpms->current_request)) {
-+ transmission_free(t);
-+ vtpms->current_request = NULL;
-+ }
-+
-+ t = transmission_alloc();
-+ if (t) {
-+ if (!transmission_set_res_buffer(t, buffer, count)) {
-+ transmission_free(t);
-+ spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
-+ return -ENOMEM;
-+ }
-+ ret_size = count;
-+ vtpms->current_response = t;
-+ wake_up_interruptible(&vtpms->resp_wait_queue);
-+ }
-+ spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
-+
-+ return ret_size;
-+}
-+
-+
-+/*
-+ * Lower layer indicates its status (connected/disconnected)
-+ */
-+void vtpm_vd_status(const struct tpm_chip *chip, u8 vd_status)
-+{
-+ struct vtpm_state *vtpms;
-+
-+ vtpms = (struct vtpm_state *)chip_get_private(chip);
-+
-+ vtpms->vd_status = vd_status;
-+ if ((vtpms->vd_status & TPM_VD_STATUS_CONNECTED) == 0) {
-+ vtpms->disconnect_time = jiffies;
-+ }
-+}
-+
-+/* =============================================================
-+ * Interface with the generic TPM driver
-+ * =============================================================
-+ */
-+static int vtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
-+{
-+ int rc = 0;
-+ unsigned long flags;
-+ struct vtpm_state *vtpms;
-+
-+ vtpms = (struct vtpm_state *)chip_get_private(chip);
-+
-+ /*
-+ * Check if the previous operation only queued the command
-+ * In this case there won't be a response, so I just
-+ * return from here and reset that flag. In any other
-+ * case I should receive a response from the back-end.
-+ */
-+ spin_lock_irqsave(&vtpms->resp_list_lock, flags);
-+ if ((vtpms->flags & DATAEX_FLAG_QUEUED_ONLY) != 0) {
-+ vtpms->flags &= ~DATAEX_FLAG_QUEUED_ONLY;
-+ spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
-+ /*
-+ * The first few commands (measurements) must be
-+ * queued since it might not be possible to talk to the
-+ * TPM, yet.
-+ * Return a response of up to 30 '0's.
-+ */
-+
-+ count = min_t(size_t, count, 30);
-+ memset(buf, 0x0, count);
-+ return count;
-+ }
-+ /*
-+ * Check whether something is in the responselist and if
-+ * there's nothing in the list wait for something to appear.
-+ */
-+
-+ if (!vtpms->current_response) {
-+ spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
-+ interruptible_sleep_on_timeout(&vtpms->resp_wait_queue,
-+ 1000);
-+ spin_lock_irqsave(&vtpms->resp_list_lock ,flags);
-+ }
-+
-+ if (vtpms->current_response) {
-+ struct transmission *t = vtpms->current_response;
-+ vtpms->current_response = NULL;
-+ rc = min(count, t->response_len);
-+ memcpy(buf, t->response, rc);
-+ transmission_free(t);
-+ }
-+
-+ spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
-+ return rc;
-+}
-+
-+static int vtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
-+{
-+ int rc = 0;
-+ unsigned long flags;
-+ struct transmission *t = transmission_alloc();
-+ struct vtpm_state *vtpms;
-+
-+ vtpms = (struct vtpm_state *)chip_get_private(chip);
-+
-+ if (!t)
-+ return -ENOMEM;
-+ /*
-+ * If there's a current request, it must be the
-+ * previous request that has timed out.
-+ */
-+ spin_lock_irqsave(&vtpms->req_list_lock, flags);
-+ if (vtpms->current_request != NULL) {
-+ printk("WARNING: Sending although there is a request outstanding.\n"
-+ " Previous request must have timed out.\n");
-+ transmission_free(vtpms->current_request);
-+ vtpms->current_request = NULL;
-+ }
-+ spin_unlock_irqrestore(&vtpms->req_list_lock, flags);
-+
-+ /*
-+ * Queue the packet if the driver below is not
-+ * ready, yet, or there is any packet already
-+ * in the queue.
-+ * If the driver below is ready, unqueue all
-+ * packets first before sending our current
-+ * packet.
-+ * For each unqueued packet, except for the
-+ * last (=current) packet, call the function
-+ * tpm_xen_recv to wait for the response to come
-+ * back.
-+ */
-+ if ((vtpms->vd_status & TPM_VD_STATUS_CONNECTED) == 0) {
-+ if (time_after(jiffies,
-+ vtpms->disconnect_time + HZ * 10)) {
-+ rc = -ENOENT;
-+ } else {
-+ goto queue_it;
-+ }
-+ } else {
-+ /*
-+ * Send all queued packets.
-+ */
-+ if (_vtpm_send_queued(chip) == 0) {
-+
-+ vtpms->current_request = t;
-+
-+ rc = vtpm_vd_send(vtpms->tpm_private,
-+ buf,
-+ count,
-+ t);
-+ /*
-+ * The generic TPM driver will call
-+ * the function to receive the response.
-+ */
-+ if (rc < 0) {
-+ vtpms->current_request = NULL;
-+ goto queue_it;
-+ }
-+ } else {
-+queue_it:
-+ if (!transmission_set_req_buffer(t, buf, count)) {
-+ transmission_free(t);
-+ rc = -ENOMEM;
-+ goto exit;
-+ }
-+ /*
-+ * An error occurred. Don't event try
-+ * to send the current request. Just
-+ * queue it.
-+ */
-+ spin_lock_irqsave(&vtpms->req_list_lock, flags);
-+ vtpms->flags |= DATAEX_FLAG_QUEUED_ONLY;
-+ list_add_tail(&t->next, &vtpms->queued_requests);
-+ spin_unlock_irqrestore(&vtpms->req_list_lock, flags);
-+ }
-+ }
-+
-+exit:
-+ return rc;
-+}
-+
-+
-+/*
-+ * Send all queued requests.
-+ */
-+static int _vtpm_send_queued(struct tpm_chip *chip)
-+{
-+ int rc;
-+ int error = 0;
-+ long flags;
-+ unsigned char buffer[1];
-+ struct vtpm_state *vtpms;
-+ vtpms = (struct vtpm_state *)chip_get_private(chip);
-+
-+ spin_lock_irqsave(&vtpms->req_list_lock, flags);
-+
-+ while (!list_empty(&vtpms->queued_requests)) {
-+ /*
-+ * Need to dequeue them.
-+ * Read the result into a dummy buffer.
-+ */
-+ struct transmission *qt = (struct transmission *)
-+ vtpms->queued_requests.next;
-+ list_del(&qt->next);
-+ vtpms->current_request = qt;
-+ spin_unlock_irqrestore(&vtpms->req_list_lock, flags);
-+
-+ rc = vtpm_vd_send(vtpms->tpm_private,
-+ qt->request,
-+ qt->request_len,
-+ qt);
-+
-+ if (rc < 0) {
-+ spin_lock_irqsave(&vtpms->req_list_lock, flags);
-+ if ((qt = vtpms->current_request) != NULL) {
-+ /*
-+ * requeue it at the beginning
-+ * of the list
-+ */
-+ list_add(&qt->next,
-+ &vtpms->queued_requests);
-+ }
-+ vtpms->current_request = NULL;
-+ error = 1;
-+ break;
-+ }
-+ /*
-+ * After this point qt is not valid anymore!
-+ * It is freed when the front-end is delivering
-+ * the data by calling tpm_recv
-+ */
-+ /*
-+ * Receive response into provided dummy buffer
-+ */
-+ rc = vtpm_recv(chip, buffer, sizeof(buffer));
-+ spin_lock_irqsave(&vtpms->req_list_lock, flags);
-+ }
-+
-+ spin_unlock_irqrestore(&vtpms->req_list_lock, flags);
-+
-+ return error;
-+}
-+
-+static void vtpm_cancel(struct tpm_chip *chip)
-+{
-+ unsigned long flags;
-+ struct vtpm_state *vtpms = (struct vtpm_state *)chip_get_private(chip);
-+
-+ spin_lock_irqsave(&vtpms->resp_list_lock,flags);
-+
-+ if (!vtpms->current_response && vtpms->current_request) {
-+ spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
-+ interruptible_sleep_on(&vtpms->resp_wait_queue);
-+ spin_lock_irqsave(&vtpms->resp_list_lock,flags);
-+ }
-+
-+ if (vtpms->current_response) {
-+ struct transmission *t = vtpms->current_response;
-+ vtpms->current_response = NULL;
-+ transmission_free(t);
-+ }
-+
-+ spin_unlock_irqrestore(&vtpms->resp_list_lock,flags);
-+}
-+
-+static u8 vtpm_status(struct tpm_chip *chip)
-+{
-+ u8 rc = 0;
-+ unsigned long flags;
-+ struct vtpm_state *vtpms;
-+
-+ vtpms = (struct vtpm_state *)chip_get_private(chip);
-+
-+ spin_lock_irqsave(&vtpms->resp_list_lock, flags);
-+ /*
-+ * Data are available if:
-+ * - there's a current response
-+ * - the last packet was queued only (this is fake, but necessary to
-+ * get the generic TPM layer to call the receive function.)
-+ */
-+ if (vtpms->current_response ||
-+ 0 != (vtpms->flags & DATAEX_FLAG_QUEUED_ONLY)) {
-+ rc = STATUS_DATA_AVAIL;
-+ } else if (!vtpms->current_response && !vtpms->current_request) {
-+ rc = STATUS_READY;
-+ }
-+
-+ spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
-+ return rc;
-+}
-+
-+static struct file_operations vtpm_ops = {
-+ .owner = THIS_MODULE,
-+ .llseek = no_llseek,
-+ .open = tpm_open,
-+ .read = tpm_read,
-+ .write = tpm_write,
-+ .release = tpm_release,
-+};
-+
-+static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
-+static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
-+static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL);
-+static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL);
-+static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL);
-+static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated,
-+ NULL);
-+static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
-+static DEVICE_ATTR(cancel, S_IWUSR |S_IWGRP, NULL, tpm_store_cancel);
-+
-+static struct attribute *vtpm_attrs[] = {
-+ &dev_attr_pubek.attr,
-+ &dev_attr_pcrs.attr,
-+ &dev_attr_enabled.attr,
-+ &dev_attr_active.attr,
-+ &dev_attr_owned.attr,
-+ &dev_attr_temp_deactivated.attr,
-+ &dev_attr_caps.attr,
-+ &dev_attr_cancel.attr,
-+ NULL,
-+};
-+
-+static struct attribute_group vtpm_attr_grp = { .attrs = vtpm_attrs };
-+
-+#define TPM_LONG_TIMEOUT (10 * 60 * HZ)
-+
-+static struct tpm_vendor_specific tpm_vtpm = {
-+ .recv = vtpm_recv,
-+ .send = vtpm_send,
-+ .cancel = vtpm_cancel,
-+ .status = vtpm_status,
-+ .req_complete_mask = STATUS_BUSY | STATUS_DATA_AVAIL,
-+ .req_complete_val = STATUS_DATA_AVAIL,
-+ .req_canceled = STATUS_READY,
-+ .attr_group = &vtpm_attr_grp,
-+ .miscdev = {
-+ .fops = &vtpm_ops,
-+ },
-+ .duration = {
-+ TPM_LONG_TIMEOUT,
-+ TPM_LONG_TIMEOUT,
-+ TPM_LONG_TIMEOUT,
-+ },
-+};
-+
-+struct tpm_chip *init_vtpm(struct device *dev,
-+ struct tpm_private *tp)
-+{
-+ long rc;
-+ struct tpm_chip *chip;
-+ struct vtpm_state *vtpms;
-+
-+ vtpms = kzalloc(sizeof(struct vtpm_state), GFP_KERNEL);
-+ if (!vtpms)
-+ return ERR_PTR(-ENOMEM);
-+
-+ vtpm_state_init(vtpms);
-+ vtpms->tpm_private = tp;
-+
-+ chip = tpm_register_hardware(dev, &tpm_vtpm);
-+ if (!chip) {
-+ rc = -ENODEV;
-+ goto err_free_mem;
-+ }
-+
-+ chip_set_private(chip, vtpms);
-+
-+ return chip;
-+
-+err_free_mem:
-+ kfree(vtpms);
-+
-+ return ERR_PTR(rc);
-+}
-+
-+void cleanup_vtpm(struct device *dev)
-+{
-+ struct tpm_chip *chip = dev_get_drvdata(dev);
-+ struct vtpm_state *vtpms = (struct vtpm_state*)chip_get_private(chip);
-+ tpm_remove_hardware(dev);
-+ kfree(vtpms);
-+}
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/char/tpm/tpm_vtpm.h ubuntu-gutsy-xen/drivers/char/tpm/tpm_vtpm.h
---- ubuntu-gutsy/drivers/char/tpm/tpm_vtpm.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/char/tpm/tpm_vtpm.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,55 @@
-+#ifndef TPM_VTPM_H
-+#define TPM_VTPM_H
-+
-+struct tpm_chip;
-+struct tpm_private;
-+
-+struct vtpm_state {
-+ struct transmission *current_request;
-+ spinlock_t req_list_lock;
-+ wait_queue_head_t req_wait_queue;
-+
-+ struct list_head queued_requests;
-+
-+ struct transmission *current_response;
-+ spinlock_t resp_list_lock;
-+ wait_queue_head_t resp_wait_queue; // processes waiting for responses
-+
-+ u8 vd_status;
-+ u8 flags;
-+
-+ unsigned long disconnect_time;
-+
-+ /*
-+ * The following is a private structure of the underlying
-+ * driver. It is passed as parameter in the send function.
-+ */
-+ struct tpm_private *tpm_private;
-+};
-+
-+
-+enum vdev_status {
-+ TPM_VD_STATUS_DISCONNECTED = 0x0,
-+ TPM_VD_STATUS_CONNECTED = 0x1
-+};
-+
-+/* this function is called from tpm_vtpm.c */
-+int vtpm_vd_send(struct tpm_private * tp,
-+ const u8 * buf, size_t count, void *ptr);
-+
-+/* these functions are offered by tpm_vtpm.c */
-+struct tpm_chip *init_vtpm(struct device *,
-+ struct tpm_private *);
-+void cleanup_vtpm(struct device *);
-+int vtpm_vd_recv(const struct tpm_chip* chip,
-+ const unsigned char *buffer, size_t count, void *ptr);
-+void vtpm_vd_status(const struct tpm_chip *, u8 status);
-+
-+static inline struct tpm_private *tpm_private_from_dev(struct device *dev)
-+{
-+ struct tpm_chip *chip = dev_get_drvdata(dev);
-+ struct vtpm_state *vtpms = chip_get_private(chip);
-+ return vtpms->tpm_private;
-+}
-+
-+#endif
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/char/tpm/tpm_xen.c ubuntu-gutsy-xen/drivers/char/tpm/tpm_xen.c
---- ubuntu-gutsy/drivers/char/tpm/tpm_xen.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/char/tpm/tpm_xen.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,719 @@
-+/*
-+ * Copyright (c) 2005, IBM Corporation
-+ *
-+ * Author: Stefan Berger, stefanb@us.ibm.com
-+ * Grant table support: Mahadevan Gomathisankaran
-+ *
-+ * This code has been derived from drivers/xen/netfront/netfront.c
-+ *
-+ * Copyright (c) 2002-2004, K A Fraser
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#include <linux/errno.h>
-+#include <linux/err.h>
-+#include <linux/interrupt.h>
-+#include <linux/mutex.h>
-+#include <asm/uaccess.h>
-+#include <xen/evtchn.h>
-+#include <xen/interface/grant_table.h>
-+#include <xen/interface/io/tpmif.h>
-+#include <xen/gnttab.h>
-+#include <xen/xenbus.h>
-+#include "tpm.h"
-+#include "tpm_vtpm.h"
-+
-+#undef DEBUG
-+
-+/* local structures */
-+struct tpm_private {
-+ struct tpm_chip *chip;
-+
-+ tpmif_tx_interface_t *tx;
-+ atomic_t refcnt;
-+ unsigned int irq;
-+ u8 is_connected;
-+ u8 is_suspended;
-+
-+ spinlock_t tx_lock;
-+
-+ struct tx_buffer *tx_buffers[TPMIF_TX_RING_SIZE];
-+
-+ atomic_t tx_busy;
-+ void *tx_remember;
-+
-+ domid_t backend_id;
-+ wait_queue_head_t wait_q;
-+
-+ struct xenbus_device *dev;
-+ int ring_ref;
-+};
-+
-+struct tx_buffer {
-+ unsigned int size; // available space in data
-+ unsigned int len; // used space in data
-+ unsigned char *data; // pointer to a page
-+};
-+
-+
-+/* locally visible variables */
-+static grant_ref_t gref_head;
-+static struct tpm_private *my_priv;
-+
-+/* local function prototypes */
-+static irqreturn_t tpmif_int(int irq,
-+ void *tpm_priv);
-+static void tpmif_rx_action(unsigned long unused);
-+static int tpmif_connect(struct xenbus_device *dev,
-+ struct tpm_private *tp,
-+ domid_t domid);
-+static DECLARE_TASKLET(tpmif_rx_tasklet, tpmif_rx_action, 0);
-+static int tpmif_allocate_tx_buffers(struct tpm_private *tp);
-+static void tpmif_free_tx_buffers(struct tpm_private *tp);
-+static void tpmif_set_connected_state(struct tpm_private *tp,
-+ u8 newstate);
-+static int tpm_xmit(struct tpm_private *tp,
-+ const u8 * buf, size_t count, int userbuffer,
-+ void *remember);
-+static void destroy_tpmring(struct tpm_private *tp);
-+void __exit tpmif_exit(void);
-+
-+#define DPRINTK(fmt, args...) \
-+ pr_debug("xen_tpm_fr (%s:%d) " fmt, __FUNCTION__, __LINE__, ##args)
-+#define IPRINTK(fmt, args...) \
-+ printk(KERN_INFO "xen_tpm_fr: " fmt, ##args)
-+#define WPRINTK(fmt, args...) \
-+ printk(KERN_WARNING "xen_tpm_fr: " fmt, ##args)
-+
-+#define GRANT_INVALID_REF 0
-+
-+
-+static inline int
-+tx_buffer_copy(struct tx_buffer *txb, const u8 *src, int len,
-+ int isuserbuffer)
-+{
-+ int copied = len;
-+
-+ if (len > txb->size)
-+ copied = txb->size;
-+ if (isuserbuffer) {
-+ if (copy_from_user(txb->data, src, copied))
-+ return -EFAULT;
-+ } else {
-+ memcpy(txb->data, src, copied);
-+ }
-+ txb->len = len;
-+ return copied;
-+}
-+
-+static inline struct tx_buffer *tx_buffer_alloc(void)
-+{
-+ struct tx_buffer *txb;
-+
-+ txb = kzalloc(sizeof(struct tx_buffer), GFP_KERNEL);
-+ if (!txb)
-+ return NULL;
-+
-+ txb->len = 0;
-+ txb->size = PAGE_SIZE;
-+ txb->data = (unsigned char *)__get_free_page(GFP_KERNEL);
-+ if (txb->data == NULL) {
-+ kfree(txb);
-+ txb = NULL;
-+ }
-+
-+ return txb;
-+}
-+
-+
-+static inline void tx_buffer_free(struct tx_buffer *txb)
-+{
-+ if (txb) {
-+ free_page((long)txb->data);
-+ kfree(txb);
-+ }
-+}
-+
-+/**************************************************************
-+ Utility function for the tpm_private structure
-+**************************************************************/
-+static void tpm_private_init(struct tpm_private *tp)
-+{
-+ spin_lock_init(&tp->tx_lock);
-+ init_waitqueue_head(&tp->wait_q);
-+ atomic_set(&tp->refcnt, 1);
-+}
-+
-+static void tpm_private_put(void)
-+{
-+ if (!atomic_dec_and_test(&my_priv->refcnt))
-+ return;
-+
-+ tpmif_free_tx_buffers(my_priv);
-+ kfree(my_priv);
-+ my_priv = NULL;
-+}
-+
-+static struct tpm_private *tpm_private_get(void)
-+{
-+ int err;
-+
-+ if (my_priv) {
-+ atomic_inc(&my_priv->refcnt);
-+ return my_priv;
-+ }
-+
-+ my_priv = kzalloc(sizeof(struct tpm_private), GFP_KERNEL);
-+ if (!my_priv)
-+ return NULL;
-+
-+ tpm_private_init(my_priv);
-+ err = tpmif_allocate_tx_buffers(my_priv);
-+ if (err < 0)
-+ tpm_private_put();
-+
-+ return my_priv;
-+}
-+
-+/**************************************************************
-+
-+ The interface to let the tpm plugin register its callback
-+ function and send data to another partition using this module
-+
-+**************************************************************/
-+
-+static DEFINE_MUTEX(suspend_lock);
-+/*
-+ * Send data via this module by calling this function
-+ */
-+int vtpm_vd_send(struct tpm_private *tp,
-+ const u8 * buf, size_t count, void *ptr)
-+{
-+ int sent;
-+
-+ mutex_lock(&suspend_lock);
-+ sent = tpm_xmit(tp, buf, count, 0, ptr);
-+ mutex_unlock(&suspend_lock);
-+
-+ return sent;
-+}
-+
-+/**************************************************************
-+ XENBUS support code
-+**************************************************************/
-+
-+static int setup_tpmring(struct xenbus_device *dev,
-+ struct tpm_private *tp)
-+{
-+ tpmif_tx_interface_t *sring;
-+ int err;
-+
-+ tp->ring_ref = GRANT_INVALID_REF;
-+
-+ sring = (void *)__get_free_page(GFP_KERNEL);
-+ if (!sring) {
-+ xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
-+ return -ENOMEM;
-+ }
-+ tp->tx = sring;
-+
-+ err = xenbus_grant_ring(dev, virt_to_mfn(tp->tx));
-+ if (err < 0) {
-+ free_page((unsigned long)sring);
-+ tp->tx = NULL;
-+ xenbus_dev_fatal(dev, err, "allocating grant reference");
-+ goto fail;
-+ }
-+ tp->ring_ref = err;
-+
-+ err = tpmif_connect(dev, tp, dev->otherend_id);
-+ if (err)
-+ goto fail;
-+
-+ return 0;
-+fail:
-+ destroy_tpmring(tp);
-+ return err;
-+}
-+
-+
-+static void destroy_tpmring(struct tpm_private *tp)
-+{
-+ tpmif_set_connected_state(tp, 0);
-+
-+ if (tp->ring_ref != GRANT_INVALID_REF) {
-+ gnttab_end_foreign_access(tp->ring_ref, 0,
-+ (unsigned long)tp->tx);
-+ tp->ring_ref = GRANT_INVALID_REF;
-+ tp->tx = NULL;
-+ }
-+
-+ if (tp->irq)
-+ unbind_from_irqhandler(tp->irq, tp);
-+
-+ tp->irq = 0;
-+}
-+
-+
-+static int talk_to_backend(struct xenbus_device *dev,
-+ struct tpm_private *tp)
-+{
-+ const char *message = NULL;
-+ int err;
-+ struct xenbus_transaction xbt;
-+
-+ err = setup_tpmring(dev, tp);
-+ if (err) {
-+ xenbus_dev_fatal(dev, err, "setting up ring");
-+ goto out;
-+ }
-+
-+again:
-+ err = xenbus_transaction_start(&xbt);
-+ if (err) {
-+ xenbus_dev_fatal(dev, err, "starting transaction");
-+ goto destroy_tpmring;
-+ }
-+
-+ err = xenbus_printf(xbt, dev->nodename,
-+ "ring-ref","%u", tp->ring_ref);
-+ if (err) {
-+ message = "writing ring-ref";
-+ goto abort_transaction;
-+ }
-+
-+ err = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
-+ irq_to_evtchn_port(tp->irq));
-+ if (err) {
-+ message = "writing event-channel";
-+ goto abort_transaction;
-+ }
-+
-+ err = xenbus_transaction_end(xbt, 0);
-+ if (err == -EAGAIN)
-+ goto again;
-+ if (err) {
-+ xenbus_dev_fatal(dev, err, "completing transaction");
-+ goto destroy_tpmring;
-+ }
-+
-+ xenbus_switch_state(dev, XenbusStateConnected);
-+
-+ return 0;
-+
-+abort_transaction:
-+ xenbus_transaction_end(xbt, 1);
-+ if (message)
-+ xenbus_dev_error(dev, err, "%s", message);
-+destroy_tpmring:
-+ destroy_tpmring(tp);
-+out:
-+ return err;
-+}
-+
-+/**
-+ * Callback received when the backend's state changes.
-+ */
-+static void backend_changed(struct xenbus_device *dev,
-+ enum xenbus_state backend_state)
-+{
-+ struct tpm_private *tp = tpm_private_from_dev(&dev->dev);
-+ DPRINTK("\n");
-+
-+ switch (backend_state) {
-+ case XenbusStateInitialising:
-+ case XenbusStateInitWait:
-+ case XenbusStateInitialised:
-+ case XenbusStateUnknown:
-+ break;
-+
-+ case XenbusStateConnected:
-+ tpmif_set_connected_state(tp, 1);
-+ break;
-+
-+ case XenbusStateClosing:
-+ tpmif_set_connected_state(tp, 0);
-+ xenbus_frontend_closed(dev);
-+ break;
-+
-+ case XenbusStateClosed:
-+ tpmif_set_connected_state(tp, 0);
-+ if (tp->is_suspended == 0)
-+ device_unregister(&dev->dev);
-+ xenbus_frontend_closed(dev);
-+ break;
-+ }
-+}
-+
-+static int tpmfront_probe(struct xenbus_device *dev,
-+ const struct xenbus_device_id *id)
-+{
-+ int err;
-+ int handle;
-+ struct tpm_private *tp = tpm_private_get();
-+
-+ if (!tp)
-+ return -ENOMEM;
-+
-+ tp->chip = init_vtpm(&dev->dev, tp);
-+ if (IS_ERR(tp->chip))
-+ return PTR_ERR(tp->chip);
-+
-+ err = xenbus_scanf(XBT_NIL, dev->nodename,
-+ "handle", "%i", &handle);
-+ if (XENBUS_EXIST_ERR(err))
-+ return err;
-+
-+ if (err < 0) {
-+ xenbus_dev_fatal(dev,err,"reading virtual-device");
-+ return err;
-+ }
-+
-+ tp->dev = dev;
-+
-+ err = talk_to_backend(dev, tp);
-+ if (err) {
-+ tpm_private_put();
-+ return err;
-+ }
-+
-+ return 0;
-+}
-+
-+
-+static int tpmfront_remove(struct xenbus_device *dev)
-+{
-+ struct tpm_private *tp = tpm_private_from_dev(&dev->dev);
-+ destroy_tpmring(tp);
-+ cleanup_vtpm(&dev->dev);
-+ return 0;
-+}
-+
-+static int tpmfront_suspend(struct xenbus_device *dev)
-+{
-+ struct tpm_private *tp = tpm_private_from_dev(&dev->dev);
-+ u32 ctr;
-+
-+ /* Take the lock, preventing any application from sending. */
-+ mutex_lock(&suspend_lock);
-+ tp->is_suspended = 1;
-+
-+ for (ctr = 0; atomic_read(&tp->tx_busy); ctr++) {
-+ if ((ctr % 10) == 0)
-+ printk("TPM-FE [INFO]: Waiting for outstanding "
-+ "request.\n");
-+ /* Wait for a request to be responded to. */
-+ interruptible_sleep_on_timeout(&tp->wait_q, 100);
-+ }
-+
-+ return 0;
-+}
-+
-+static int tpmfront_suspend_finish(struct tpm_private *tp)
-+{
-+ tp->is_suspended = 0;
-+ /* Allow applications to send again. */
-+ mutex_unlock(&suspend_lock);
-+ return 0;
-+}
-+
-+static int tpmfront_suspend_cancel(struct xenbus_device *dev)
-+{
-+ struct tpm_private *tp = tpm_private_from_dev(&dev->dev);
-+ return tpmfront_suspend_finish(tp);
-+}
-+
-+static int tpmfront_resume(struct xenbus_device *dev)
-+{
-+ struct tpm_private *tp = tpm_private_from_dev(&dev->dev);
-+ destroy_tpmring(tp);
-+ return talk_to_backend(dev, tp);
-+}
-+
-+static int tpmif_connect(struct xenbus_device *dev,
-+ struct tpm_private *tp,
-+ domid_t domid)
-+{
-+ int err;
-+
-+ tp->backend_id = domid;
-+
-+ err = bind_listening_port_to_irqhandler(
-+ domid, tpmif_int, IRQF_SAMPLE_RANDOM, "tpmif", tp);
-+ if (err <= 0) {
-+ WPRINTK("bind_listening_port_to_irqhandler failed "
-+ "(err=%d)\n", err);
-+ return err;
-+ }
-+ tp->irq = err;
-+
-+ return 0;
-+}
-+
-+static struct xenbus_device_id tpmfront_ids[] = {
-+ { "vtpm" },
-+ { "" }
-+};
-+
-+static struct xenbus_driver tpmfront = {
-+ .name = "vtpm",
-+ .owner = THIS_MODULE,
-+ .ids = tpmfront_ids,
-+ .probe = tpmfront_probe,
-+ .remove = tpmfront_remove,
-+ .resume = tpmfront_resume,
-+ .otherend_changed = backend_changed,
-+ .suspend = tpmfront_suspend,
-+ .suspend_cancel = tpmfront_suspend_cancel,
-+};
-+
-+static void __init init_tpm_xenbus(void)
-+{
-+ xenbus_register_frontend(&tpmfront);
-+}
-+
-+static int tpmif_allocate_tx_buffers(struct tpm_private *tp)
-+{
-+ unsigned int i;
-+
-+ for (i = 0; i < TPMIF_TX_RING_SIZE; i++) {
-+ tp->tx_buffers[i] = tx_buffer_alloc();
-+ if (!tp->tx_buffers[i]) {
-+ tpmif_free_tx_buffers(tp);
-+ return -ENOMEM;
-+ }
-+ }
-+ return 0;
-+}
-+
-+static void tpmif_free_tx_buffers(struct tpm_private *tp)
-+{
-+ unsigned int i;
-+
-+ for (i = 0; i < TPMIF_TX_RING_SIZE; i++)
-+ tx_buffer_free(tp->tx_buffers[i]);
-+}
-+
-+static void tpmif_rx_action(unsigned long priv)
-+{
-+ struct tpm_private *tp = (struct tpm_private *)priv;
-+ int i = 0;
-+ unsigned int received;
-+ unsigned int offset = 0;
-+ u8 *buffer;
-+ tpmif_tx_request_t *tx = &tp->tx->ring[i].req;
-+
-+ atomic_set(&tp->tx_busy, 0);
-+ wake_up_interruptible(&tp->wait_q);
-+
-+ received = tx->size;
-+
-+ buffer = kmalloc(received, GFP_ATOMIC);
-+ if (!buffer)
-+ return;
-+
-+ for (i = 0; i < TPMIF_TX_RING_SIZE && offset < received; i++) {
-+ struct tx_buffer *txb = tp->tx_buffers[i];
-+ tpmif_tx_request_t *tx;
-+ unsigned int tocopy;
-+
-+ tx = &tp->tx->ring[i].req;
-+ tocopy = tx->size;
-+ if (tocopy > PAGE_SIZE)
-+ tocopy = PAGE_SIZE;
-+
-+ memcpy(&buffer[offset], txb->data, tocopy);
-+
-+ gnttab_release_grant_reference(&gref_head, tx->ref);
-+
-+ offset += tocopy;
-+ }
-+
-+ vtpm_vd_recv(tp->chip, buffer, received, tp->tx_remember);
-+ kfree(buffer);
-+}
-+
-+
-+static irqreturn_t tpmif_int(int irq, void *tpm_priv)
-+{
-+ struct tpm_private *tp = tpm_priv;
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&tp->tx_lock, flags);
-+ tpmif_rx_tasklet.data = (unsigned long)tp;
-+ tasklet_schedule(&tpmif_rx_tasklet);
-+ spin_unlock_irqrestore(&tp->tx_lock, flags);
-+
-+ return IRQ_HANDLED;
-+}
-+
-+
-+static int tpm_xmit(struct tpm_private *tp,
-+ const u8 * buf, size_t count, int isuserbuffer,
-+ void *remember)
-+{
-+ tpmif_tx_request_t *tx;
-+ TPMIF_RING_IDX i;
-+ unsigned int offset = 0;
-+
-+ spin_lock_irq(&tp->tx_lock);
-+
-+ if (unlikely(atomic_read(&tp->tx_busy))) {
-+ printk("tpm_xmit: There's an outstanding request/response "
-+ "on the way!\n");
-+ spin_unlock_irq(&tp->tx_lock);
-+ return -EBUSY;
-+ }
-+
-+ if (tp->is_connected != 1) {
-+ spin_unlock_irq(&tp->tx_lock);
-+ return -EIO;
-+ }
-+
-+ for (i = 0; count > 0 && i < TPMIF_TX_RING_SIZE; i++) {
-+ struct tx_buffer *txb = tp->tx_buffers[i];
-+ int copied;
-+
-+ if (!txb) {
-+ DPRINTK("txb (i=%d) is NULL. buffers initilized?\n"
-+ "Not transmitting anything!\n", i);
-+ spin_unlock_irq(&tp->tx_lock);
-+ return -EFAULT;
-+ }
-+
-+ copied = tx_buffer_copy(txb, &buf[offset], count,
-+ isuserbuffer);
-+ if (copied < 0) {
-+ /* An error occurred */
-+ spin_unlock_irq(&tp->tx_lock);
-+ return copied;
-+ }
-+ count -= copied;
-+ offset += copied;
-+
-+ tx = &tp->tx->ring[i].req;
-+ tx->addr = virt_to_machine(txb->data);
-+ tx->size = txb->len;
-+
-+ DPRINTK("First 4 characters sent by TPM-FE are "
-+ "0x%02x 0x%02x 0x%02x 0x%02x\n",
-+ txb->data[0],txb->data[1],txb->data[2],txb->data[3]);
-+
-+ /* Get the granttable reference for this page. */
-+ tx->ref = gnttab_claim_grant_reference(&gref_head);
-+ if (tx->ref == -ENOSPC) {
-+ spin_unlock_irq(&tp->tx_lock);
-+ DPRINTK("Grant table claim reference failed in "
-+ "func:%s line:%d file:%s\n",
-+ __FUNCTION__, __LINE__, __FILE__);
-+ return -ENOSPC;
-+ }
-+ gnttab_grant_foreign_access_ref(tx->ref,
-+ tp->backend_id,
-+ virt_to_mfn(txb->data),
-+ 0 /*RW*/);
-+ wmb();
-+ }
-+
-+ atomic_set(&tp->tx_busy, 1);
-+ tp->tx_remember = remember;
-+
-+ mb();
-+
-+ notify_remote_via_irq(tp->irq);
-+
-+ spin_unlock_irq(&tp->tx_lock);
-+ return offset;
-+}
-+
-+
-+static void tpmif_notify_upperlayer(struct tpm_private *tp)
-+{
-+ /* Notify upper layer about the state of the connection to the BE. */
-+ vtpm_vd_status(tp->chip, (tp->is_connected
-+ ? TPM_VD_STATUS_CONNECTED
-+ : TPM_VD_STATUS_DISCONNECTED));
-+}
-+
-+
-+static void tpmif_set_connected_state(struct tpm_private *tp, u8 is_connected)
-+{
-+ /*
-+ * Don't notify upper layer if we are in suspend mode and
-+ * should disconnect - assumption is that we will resume
-+ * The mutex keeps apps from sending.
-+ */
-+ if (is_connected == 0 && tp->is_suspended == 1)
-+ return;
-+
-+ /*
-+ * Unlock the mutex if we are connected again
-+ * after being suspended - now resuming.
-+ * This also removes the suspend state.
-+ */
-+ if (is_connected == 1 && tp->is_suspended == 1)
-+ tpmfront_suspend_finish(tp);
-+
-+ if (is_connected != tp->is_connected) {
-+ tp->is_connected = is_connected;
-+ tpmif_notify_upperlayer(tp);
-+ }
-+}
-+
-+
-+
-+/* =================================================================
-+ * Initialization function.
-+ * =================================================================
-+ */
-+
-+
-+static int __init tpmif_init(void)
-+{
-+ struct tpm_private *tp;
-+
-+ if (is_initial_xendomain())
-+ return -EPERM;
-+
-+ tp = tpm_private_get();
-+ if (!tp)
-+ return -ENOMEM;
-+
-+ IPRINTK("Initialising the vTPM driver.\n");
-+ if (gnttab_alloc_grant_references(TPMIF_TX_RING_SIZE,
-+ &gref_head) < 0) {
-+ tpm_private_put();
-+ return -EFAULT;
-+ }
-+
-+ init_tpm_xenbus();
-+ return 0;
-+}
-+
-+
-+module_init(tpmif_init);
-+
-+MODULE_LICENSE("Dual BSD/GPL");
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/char/tty_io.c ubuntu-gutsy-xen/drivers/char/tty_io.c
---- ubuntu-gutsy/drivers/char/tty_io.c 2007-08-18 09:40:29.000000000 -0400
-+++ ubuntu-gutsy-xen/drivers/char/tty_io.c 2007-08-18 12:38:02.000000000 -0400
-@@ -133,6 +133,8 @@
- DEFINE_MUTEX(tty_mutex);
- EXPORT_SYMBOL(tty_mutex);
-
-+int console_use_vt = 1;
-+
- #ifdef CONFIG_UNIX98_PTYS
- extern struct tty_driver *ptm_driver; /* Unix98 pty masters; for /dev/ptmx */
- extern int pty_limit; /* Config limit on Unix98 ptys */
-@@ -2581,7 +2583,7 @@
- goto got_driver;
- }
- #ifdef CONFIG_VT
-- if (device == MKDEV(TTY_MAJOR,0)) {
-+ if (console_use_vt && device == MKDEV(TTY_MAJOR,0)) {
- extern struct tty_driver *console_driver;
- driver = console_driver;
- index = fg_console;
-@@ -4033,6 +4035,8 @@
- #endif
-
- #ifdef CONFIG_VT
-+ if (!console_use_vt)
-+ goto out_vt;
- cdev_init(&vc0_cdev, &console_fops);
- if (cdev_add(&vc0_cdev, MKDEV(TTY_MAJOR, 0), 1) ||
- register_chrdev_region(MKDEV(TTY_MAJOR, 0), 1, "/dev/vc/0") < 0)
-@@ -4040,6 +4044,7 @@
- device_create(tty_class, NULL, MKDEV(TTY_MAJOR, 0), "tty0");
-
- vty_init();
-+ out_vt:
- #endif
- return 0;
- }
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/ide/ide-lib.c ubuntu-gutsy-xen/drivers/ide/ide-lib.c
---- ubuntu-gutsy/drivers/ide/ide-lib.c 2007-08-18 09:40:30.000000000 -0400
-+++ ubuntu-gutsy-xen/drivers/ide/ide-lib.c 2007-08-18 12:38:02.000000000 -0400
-@@ -341,12 +341,21 @@
- {
- u64 addr = BLK_BOUNCE_HIGH; /* dma64_addr_t */
-
-+#ifndef CONFIG_XEN
- if (!PCI_DMA_BUS_IS_PHYS) {
- addr = BLK_BOUNCE_ANY;
- } else if (on && drive->media == ide_disk) {
- if (HWIF(drive)->pci_dev)
- addr = HWIF(drive)->pci_dev->dma_mask;
- }
-+#else
-+ if (on && drive->media == ide_disk) {
-+ if (!PCI_DMA_BUS_IS_PHYS)
-+ addr = BLK_BOUNCE_ANY;
-+ else if (HWIF(drive)->pci_dev)
-+ addr = HWIF(drive)->pci_dev->dma_mask;
-+ }
-+#endif
-
- if (drive->queue)
- blk_queue_bounce_limit(drive->queue, addr);
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/kvm/Kconfig ubuntu-gutsy-xen/drivers/kvm/Kconfig
---- ubuntu-gutsy/drivers/kvm/Kconfig 2007-08-18 09:40:30.000000000 -0400
-+++ ubuntu-gutsy-xen/drivers/kvm/Kconfig 2007-08-18 12:38:02.000000000 -0400
-@@ -6,7 +6,7 @@
-
- config KVM
- tristate "Kernel-based Virtual Machine (KVM) support"
-- depends on X86 && EXPERIMENTAL
-+ depends on X86 && EXPERIMENTAL && !XEN
- ---help---
- Support hosting fully virtualized guest machines using hardware
- virtualization extensions. You will need a fairly recent
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/Makefile ubuntu-gutsy-xen/drivers/Makefile
---- ubuntu-gutsy/drivers/Makefile 2007-08-18 09:40:29.000000000 -0400
-+++ ubuntu-gutsy-xen/drivers/Makefile 2007-08-18 12:38:02.000000000 -0400
-@@ -31,6 +31,7 @@
- obj-$(CONFIG_NUBUS) += nubus/
- obj-$(CONFIG_ATM) += atm/
- obj-y += macintosh/
-+obj-$(CONFIG_XEN) += xen/
- obj-$(CONFIG_IDE) += ide/
- obj-$(CONFIG_FC4) += fc4/
- obj-$(CONFIG_SCSI) += scsi/
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/oprofile/buffer_sync.c ubuntu-gutsy-xen/drivers/oprofile/buffer_sync.c
---- ubuntu-gutsy/drivers/oprofile/buffer_sync.c 2007-08-18 09:40:31.000000000 -0400
-+++ ubuntu-gutsy-xen/drivers/oprofile/buffer_sync.c 2007-08-18 12:38:02.000000000 -0400
-@@ -6,6 +6,10 @@
- *
- * @author John Levon <levon@movementarian.org>
- *
-+ * Modified by Aravind Menon for Xen
-+ * These modifications are:
-+ * Copyright (C) 2005 Hewlett-Packard Co.
-+ *
- * This is the core of the buffer management. Each
- * CPU buffer is processed and entered into the
- * global event buffer. Such processing is necessary
-@@ -39,6 +43,9 @@
- static DEFINE_SPINLOCK(task_mortuary);
- static void process_task_mortuary(void);
-
-+#ifdef CONFIG_XEN
-+static int cpu_current_domain[NR_CPUS];
-+#endif
-
- /* Take ownership of the task struct and place it on the
- * list for processing. Only after two full buffer syncs
-@@ -147,6 +154,13 @@
- int sync_start(void)
- {
- int err;
-+#ifdef CONFIG_XEN
-+ int i;
-+
-+ for (i = 0; i < NR_CPUS; i++) {
-+ cpu_current_domain[i] = COORDINATOR_DOMAIN;
-+ }
-+#endif
-
- start_cpu_work();
-
-@@ -276,15 +290,33 @@
- last_cookie = INVALID_COOKIE;
- }
-
--static void add_kernel_ctx_switch(unsigned int in_kernel)
-+static void add_cpu_mode_switch(unsigned int cpu_mode)
- {
- add_event_entry(ESCAPE_CODE);
-- if (in_kernel)
-- add_event_entry(KERNEL_ENTER_SWITCH_CODE);
-- else
-- add_event_entry(KERNEL_EXIT_SWITCH_CODE);
-+ switch (cpu_mode) {
-+ case CPU_MODE_USER:
-+ add_event_entry(USER_ENTER_SWITCH_CODE);
-+ break;
-+ case CPU_MODE_KERNEL:
-+ add_event_entry(KERNEL_ENTER_SWITCH_CODE);
-+ break;
-+ case CPU_MODE_XEN:
-+ add_event_entry(XEN_ENTER_SWITCH_CODE);
-+ break;
-+ default:
-+ break;
-+ }
- }
--
-+
-+#ifdef CONFIG_XEN
-+static void add_domain_switch(unsigned long domain_id)
-+{
-+ add_event_entry(ESCAPE_CODE);
-+ add_event_entry(DOMAIN_SWITCH_CODE);
-+ add_event_entry(domain_id);
-+}
-+#endif
-+
- static void
- add_user_ctx_switch(struct task_struct const * task, unsigned long cookie)
- {
-@@ -349,9 +381,9 @@
- * for later lookup from userspace.
- */
- static int
--add_sample(struct mm_struct * mm, struct op_sample * s, int in_kernel)
-+add_sample(struct mm_struct * mm, struct op_sample * s, int cpu_mode)
- {
-- if (in_kernel) {
-+ if (cpu_mode >= CPU_MODE_KERNEL) {
- add_sample_entry(s->eip, s->event);
- return 1;
- } else if (mm) {
-@@ -497,15 +529,24 @@
- struct mm_struct *mm = NULL;
- struct task_struct * new;
- unsigned long cookie = 0;
-- int in_kernel = 1;
-+ int cpu_mode = 1;
- unsigned int i;
- sync_buffer_state state = sb_buffer_start;
- unsigned long available;
-+ int domain_switch = 0;
-
- mutex_lock(&buffer_mutex);
-
- add_cpu_switch(cpu);
-
-+#ifdef CONFIG_XEN
-+ /* We need to assign the first samples in this CPU buffer to the
-+ same domain that we were processing at the last sync_buffer */
-+ if (cpu_current_domain[cpu] != COORDINATOR_DOMAIN) {
-+ add_domain_switch(cpu_current_domain[cpu]);
-+ }
-+#endif
-+
- /* Remember, only we can modify tail_pos */
-
- available = get_slots(cpu_buf);
-@@ -513,16 +554,20 @@
- for (i = 0; i < available; ++i) {
- struct op_sample * s = &cpu_buf->buffer[cpu_buf->tail_pos];
-
-- if (is_code(s->eip)) {
-- if (s->event <= CPU_IS_KERNEL) {
-- /* kernel/userspace switch */
-- in_kernel = s->event;
-+ if (is_code(s->eip) && !domain_switch) {
-+ if (s->event <= CPU_MODE_XEN) {
-+ /* xen/kernel/userspace switch */
-+ cpu_mode = s->event;
- if (state == sb_buffer_start)
- state = sb_sample_start;
-- add_kernel_ctx_switch(s->event);
-+ add_cpu_mode_switch(s->event);
- } else if (s->event == CPU_TRACE_BEGIN) {
- state = sb_bt_start;
- add_trace_begin();
-+#ifdef CONFIG_XEN
-+ } else if (s->event == CPU_DOMAIN_SWITCH) {
-+ domain_switch = 1;
-+#endif
- } else {
- struct mm_struct * oldmm = mm;
-
-@@ -536,8 +581,18 @@
- add_user_ctx_switch(new, cookie);
- }
- } else {
-+#ifdef CONFIG_XEN
-+ if (domain_switch) {
-+ cpu_current_domain[cpu] = s->eip;
-+ add_domain_switch(s->eip);
-+ domain_switch = 0;
-+ } else if (cpu_current_domain[cpu] !=
-+ COORDINATOR_DOMAIN) {
-+ add_sample_entry(s->eip, s->event);
-+ } else
-+#endif
- if (state >= sb_bt_start &&
-- !add_sample(mm, s, in_kernel)) {
-+ !add_sample(mm, s, cpu_mode)) {
- if (state == sb_bt_start) {
- state = sb_bt_ignore;
- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
-@@ -549,6 +604,13 @@
- }
- release_mm(mm);
-
-+#ifdef CONFIG_XEN
-+ /* We reset domain to COORDINATOR at each CPU switch */
-+ if (cpu_current_domain[cpu] != COORDINATOR_DOMAIN) {
-+ add_domain_switch(COORDINATOR_DOMAIN);
-+ }
-+#endif
-+
- mark_done(cpu);
-
- mutex_unlock(&buffer_mutex);
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/oprofile/cpu_buffer.c ubuntu-gutsy-xen/drivers/oprofile/cpu_buffer.c
---- ubuntu-gutsy/drivers/oprofile/cpu_buffer.c 2007-08-18 09:40:31.000000000 -0400
-+++ ubuntu-gutsy-xen/drivers/oprofile/cpu_buffer.c 2007-08-18 12:38:02.000000000 -0400
-@@ -6,6 +6,10 @@
- *
- * @author John Levon <levon@movementarian.org>
- *
-+ * Modified by Aravind Menon for Xen
-+ * These modifications are:
-+ * Copyright (C) 2005 Hewlett-Packard Co.
-+ *
- * Each CPU has a local buffer that stores PC value/event
- * pairs. We also log context switches when we notice them.
- * Eventually each CPU's buffer is processed into the global
-@@ -34,6 +38,12 @@
- #define DEFAULT_TIMER_EXPIRE (HZ / 10)
- static int work_enabled;
-
-+#ifndef CONFIG_XEN
-+#define current_domain COORDINATOR_DOMAIN
-+#else
-+static int32_t current_domain = COORDINATOR_DOMAIN;
-+#endif
-+
- void free_cpu_buffers(void)
- {
- int i;
-@@ -57,7 +67,7 @@
- goto fail;
-
- b->last_task = NULL;
-- b->last_is_kernel = -1;
-+ b->last_cpu_mode = -1;
- b->tracing = 0;
- b->buffer_size = buffer_size;
- b->tail_pos = 0;
-@@ -113,7 +123,7 @@
- * collected will populate the buffer with proper
- * values to initialize the buffer
- */
-- cpu_buf->last_is_kernel = -1;
-+ cpu_buf->last_cpu_mode = -1;
- cpu_buf->last_task = NULL;
- }
-
-@@ -163,13 +173,13 @@
- * because of the head/tail separation of the writer and reader
- * of the CPU buffer.
- *
-- * is_kernel is needed because on some architectures you cannot
-+ * cpu_mode is needed because on some architectures you cannot
- * tell if you are in kernel or user space simply by looking at
-- * pc. We tag this in the buffer by generating kernel enter/exit
-- * events whenever is_kernel changes
-+ * pc. We tag this in the buffer by generating kernel/user (and xen)
-+ * enter events whenever cpu_mode changes
- */
- static int log_sample(struct oprofile_cpu_buffer * cpu_buf, unsigned long pc,
-- int is_kernel, unsigned long event)
-+ int cpu_mode, unsigned long event)
- {
- struct task_struct * task;
-
-@@ -180,18 +190,18 @@
- return 0;
- }
-
-- is_kernel = !!is_kernel;
--
- task = current;
-
- /* notice a switch from user->kernel or vice versa */
-- if (cpu_buf->last_is_kernel != is_kernel) {
-- cpu_buf->last_is_kernel = is_kernel;
-- add_code(cpu_buf, is_kernel);
-+ if (cpu_buf->last_cpu_mode != cpu_mode) {
-+ cpu_buf->last_cpu_mode = cpu_mode;
-+ add_code(cpu_buf, cpu_mode);
- }
--
-+
- /* notice a task switch */
-- if (cpu_buf->last_task != task) {
-+ /* if not processing other domain samples */
-+ if ((cpu_buf->last_task != task) &&
-+ (current_domain == COORDINATOR_DOMAIN)) {
- cpu_buf->last_task = task;
- add_code(cpu_buf, (unsigned long)task);
- }
-@@ -275,6 +285,27 @@
- add_sample(cpu_buf, pc, 0);
- }
-
-+#ifdef CONFIG_XEN
-+int oprofile_add_domain_switch(int32_t domain_id)
-+{
-+ struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[smp_processor_id()];
-+
-+ /* should have space for switching into and out of domain
-+ (2 slots each) plus one sample and one cpu mode switch */
-+ if (((nr_available_slots(cpu_buf) < 6) &&
-+ (domain_id != COORDINATOR_DOMAIN)) ||
-+ (nr_available_slots(cpu_buf) < 2))
-+ return 0;
-+
-+ add_code(cpu_buf, CPU_DOMAIN_SWITCH);
-+ add_sample(cpu_buf, domain_id, 0);
-+
-+ current_domain = domain_id;
-+
-+ return 1;
-+}
-+#endif
-+
- /*
- * This serves to avoid cpu buffer overflow, and makes sure
- * the task mortuary progresses
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/oprofile/cpu_buffer.h ubuntu-gutsy-xen/drivers/oprofile/cpu_buffer.h
---- ubuntu-gutsy/drivers/oprofile/cpu_buffer.h 2007-08-18 09:40:31.000000000 -0400
-+++ ubuntu-gutsy-xen/drivers/oprofile/cpu_buffer.h 2007-08-18 12:38:02.000000000 -0400
-@@ -36,7 +36,7 @@
- volatile unsigned long tail_pos;
- unsigned long buffer_size;
- struct task_struct * last_task;
-- int last_is_kernel;
-+ int last_cpu_mode;
- int tracing;
- struct op_sample * buffer;
- unsigned long sample_received;
-@@ -51,7 +51,10 @@
- void cpu_buffer_reset(struct oprofile_cpu_buffer * cpu_buf);
-
- /* transient events for the CPU buffer -> event buffer */
--#define CPU_IS_KERNEL 1
--#define CPU_TRACE_BEGIN 2
-+#define CPU_MODE_USER 0
-+#define CPU_MODE_KERNEL 1
-+#define CPU_MODE_XEN 2
-+#define CPU_TRACE_BEGIN 3
-+#define CPU_DOMAIN_SWITCH 4
-
- #endif /* OPROFILE_CPU_BUFFER_H */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/oprofile/event_buffer.h ubuntu-gutsy-xen/drivers/oprofile/event_buffer.h
---- ubuntu-gutsy/drivers/oprofile/event_buffer.h 2007-08-18 09:40:31.000000000 -0400
-+++ ubuntu-gutsy-xen/drivers/oprofile/event_buffer.h 2007-08-18 12:38:02.000000000 -0400
-@@ -29,15 +29,20 @@
- #define CPU_SWITCH_CODE 2
- #define COOKIE_SWITCH_CODE 3
- #define KERNEL_ENTER_SWITCH_CODE 4
--#define KERNEL_EXIT_SWITCH_CODE 5
-+#define USER_ENTER_SWITCH_CODE 5
- #define MODULE_LOADED_CODE 6
- #define CTX_TGID_CODE 7
- #define TRACE_BEGIN_CODE 8
- #define TRACE_END_CODE 9
-+#define XEN_ENTER_SWITCH_CODE 10
-+#define DOMAIN_SWITCH_CODE 11
-
- #define INVALID_COOKIE ~0UL
- #define NO_COOKIE 0UL
-
-+/* Constant used to refer to coordinator domain (Xen) */
-+#define COORDINATOR_DOMAIN -1
-+
- /* add data to the event buffer */
- void add_event_entry(unsigned long data);
-
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/oprofile/oprof.c ubuntu-gutsy-xen/drivers/oprofile/oprof.c
---- ubuntu-gutsy/drivers/oprofile/oprof.c 2007-08-18 09:40:31.000000000 -0400
-+++ ubuntu-gutsy-xen/drivers/oprofile/oprof.c 2007-08-18 12:38:02.000000000 -0400
-@@ -5,6 +5,10 @@
- * @remark Read the file COPYING
- *
- * @author John Levon <levon@movementarian.org>
-+ *
-+ * Modified by Aravind Menon for Xen
-+ * These modifications are:
-+ * Copyright (C) 2005 Hewlett-Packard Co.
- */
-
- #include <linux/kernel.h>
-@@ -19,7 +23,7 @@
- #include "cpu_buffer.h"
- #include "buffer_sync.h"
- #include "oprofile_stats.h"
--
-+
- struct oprofile_operations oprofile_ops;
-
- unsigned long oprofile_started;
-@@ -33,6 +37,34 @@
- */
- static int timer = 0;
-
-+#ifdef CONFIG_XEN
-+int oprofile_set_active(int active_domains[], unsigned int adomains)
-+{
-+ int err;
-+
-+ if (!oprofile_ops.set_active)
-+ return -EINVAL;
-+
-+ mutex_lock(&start_mutex);
-+ err = oprofile_ops.set_active(active_domains, adomains);
-+ mutex_unlock(&start_mutex);
-+ return err;
-+}
-+
-+int oprofile_set_passive(int passive_domains[], unsigned int pdomains)
-+{
-+ int err;
-+
-+ if (!oprofile_ops.set_passive)
-+ return -EINVAL;
-+
-+ mutex_lock(&start_mutex);
-+ err = oprofile_ops.set_passive(passive_domains, pdomains);
-+ mutex_unlock(&start_mutex);
-+ return err;
-+}
-+#endif
-+
- int oprofile_setup(void)
- {
- int err;
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/oprofile/oprof.h ubuntu-gutsy-xen/drivers/oprofile/oprof.h
---- ubuntu-gutsy/drivers/oprofile/oprof.h 2007-08-18 09:40:31.000000000 -0400
-+++ ubuntu-gutsy-xen/drivers/oprofile/oprof.h 2007-08-18 12:38:02.000000000 -0400
-@@ -35,5 +35,8 @@
- void oprofile_timer_init(struct oprofile_operations * ops);
-
- int oprofile_set_backtrace(unsigned long depth);
-+
-+int oprofile_set_active(int active_domains[], unsigned int adomains);
-+int oprofile_set_passive(int passive_domains[], unsigned int pdomains);
-
- #endif /* OPROF_H */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/oprofile/oprofile_files.c ubuntu-gutsy-xen/drivers/oprofile/oprofile_files.c
---- ubuntu-gutsy/drivers/oprofile/oprofile_files.c 2007-08-18 09:40:31.000000000 -0400
-+++ ubuntu-gutsy-xen/drivers/oprofile/oprofile_files.c 2007-08-18 12:38:02.000000000 -0400
-@@ -5,15 +5,21 @@
- * @remark Read the file COPYING
- *
- * @author John Levon <levon@movementarian.org>
-+ *
-+ * Modified by Aravind Menon for Xen
-+ * These modifications are:
-+ * Copyright (C) 2005 Hewlett-Packard Co.
- */
-
- #include <linux/fs.h>
- #include <linux/oprofile.h>
-+#include <asm/uaccess.h>
-+#include <linux/ctype.h>
-
- #include "event_buffer.h"
- #include "oprofile_stats.h"
- #include "oprof.h"
--
-+
- unsigned long fs_buffer_size = 131072;
- unsigned long fs_cpu_buffer_size = 8192;
- unsigned long fs_buffer_watershed = 32768; /* FIXME: tune */
-@@ -117,11 +123,208 @@
- static const struct file_operations dump_fops = {
- .write = dump_write,
- };
--
-+
-+#ifdef CONFIG_XEN
-+
-+#define TMPBUFSIZE 512
-+
-+static unsigned int adomains = 0;
-+static int active_domains[MAX_OPROF_DOMAINS + 1];
-+static DEFINE_MUTEX(adom_mutex);
-+
-+static ssize_t adomain_write(struct file * file, char const __user * buf,
-+ size_t count, loff_t * offset)
-+{
-+ char *tmpbuf;
-+ char *startp, *endp;
-+ int i;
-+ unsigned long val;
-+ ssize_t retval = count;
-+
-+ if (*offset)
-+ return -EINVAL;
-+ if (count > TMPBUFSIZE - 1)
-+ return -EINVAL;
-+
-+ if (!(tmpbuf = kmalloc(TMPBUFSIZE, GFP_KERNEL)))
-+ return -ENOMEM;
-+
-+ if (copy_from_user(tmpbuf, buf, count)) {
-+ kfree(tmpbuf);
-+ return -EFAULT;
-+ }
-+ tmpbuf[count] = 0;
-+
-+ mutex_lock(&adom_mutex);
-+
-+ startp = tmpbuf;
-+ /* Parse one more than MAX_OPROF_DOMAINS, for easy error checking */
-+ for (i = 0; i <= MAX_OPROF_DOMAINS; i++) {
-+ val = simple_strtoul(startp, &endp, 0);
-+ if (endp == startp)
-+ break;
-+ while (ispunct(*endp) || isspace(*endp))
-+ endp++;
-+ active_domains[i] = val;
-+ if (active_domains[i] != val)
-+ /* Overflow, force error below */
-+ i = MAX_OPROF_DOMAINS + 1;
-+ startp = endp;
-+ }
-+ /* Force error on trailing junk */
-+ adomains = *startp ? MAX_OPROF_DOMAINS + 1 : i;
-+
-+ kfree(tmpbuf);
-+
-+ if (adomains > MAX_OPROF_DOMAINS
-+ || oprofile_set_active(active_domains, adomains)) {
-+ adomains = 0;
-+ retval = -EINVAL;
-+ }
-+
-+ mutex_unlock(&adom_mutex);
-+ return retval;
-+}
-+
-+static ssize_t adomain_read(struct file * file, char __user * buf,
-+ size_t count, loff_t * offset)
-+{
-+ char * tmpbuf;
-+ size_t len;
-+ int i;
-+ ssize_t retval;
-+
-+ if (!(tmpbuf = kmalloc(TMPBUFSIZE, GFP_KERNEL)))
-+ return -ENOMEM;
-+
-+ mutex_lock(&adom_mutex);
-+
-+ len = 0;
-+ for (i = 0; i < adomains; i++)
-+ len += snprintf(tmpbuf + len,
-+ len < TMPBUFSIZE ? TMPBUFSIZE - len : 0,
-+ "%u ", active_domains[i]);
-+ WARN_ON(len > TMPBUFSIZE);
-+ if (len != 0 && len <= TMPBUFSIZE)
-+ tmpbuf[len-1] = '\n';
-+
-+ mutex_unlock(&adom_mutex);
-+
-+ retval = simple_read_from_buffer(buf, count, offset, tmpbuf, len);
-+
-+ kfree(tmpbuf);
-+ return retval;
-+}
-+
-+
-+static struct file_operations active_domain_ops = {
-+ .read = adomain_read,
-+ .write = adomain_write,
-+};
-+
-+static unsigned int pdomains = 0;
-+static int passive_domains[MAX_OPROF_DOMAINS];
-+static DEFINE_MUTEX(pdom_mutex);
-+
-+static ssize_t pdomain_write(struct file * file, char const __user * buf,
-+ size_t count, loff_t * offset)
-+{
-+ char *tmpbuf;
-+ char *startp, *endp;
-+ int i;
-+ unsigned long val;
-+ ssize_t retval = count;
-+
-+ if (*offset)
-+ return -EINVAL;
-+ if (count > TMPBUFSIZE - 1)
-+ return -EINVAL;
-+
-+ if (!(tmpbuf = kmalloc(TMPBUFSIZE, GFP_KERNEL)))
-+ return -ENOMEM;
-+
-+ if (copy_from_user(tmpbuf, buf, count)) {
-+ kfree(tmpbuf);
-+ return -EFAULT;
-+ }
-+ tmpbuf[count] = 0;
-+
-+ mutex_lock(&pdom_mutex);
-+
-+ startp = tmpbuf;
-+ /* Parse one more than MAX_OPROF_DOMAINS, for easy error checking */
-+ for (i = 0; i <= MAX_OPROF_DOMAINS; i++) {
-+ val = simple_strtoul(startp, &endp, 0);
-+ if (endp == startp)
-+ break;
-+ while (ispunct(*endp) || isspace(*endp))
-+ endp++;
-+ passive_domains[i] = val;
-+ if (passive_domains[i] != val)
-+ /* Overflow, force error below */
-+ i = MAX_OPROF_DOMAINS + 1;
-+ startp = endp;
-+ }
-+ /* Force error on trailing junk */
-+ pdomains = *startp ? MAX_OPROF_DOMAINS + 1 : i;
-+
-+ kfree(tmpbuf);
-+
-+ if (pdomains > MAX_OPROF_DOMAINS
-+ || oprofile_set_passive(passive_domains, pdomains)) {
-+ pdomains = 0;
-+ retval = -EINVAL;
-+ }
-+
-+ mutex_unlock(&pdom_mutex);
-+ return retval;
-+}
-+
-+static ssize_t pdomain_read(struct file * file, char __user * buf,
-+ size_t count, loff_t * offset)
-+{
-+ char * tmpbuf;
-+ size_t len;
-+ int i;
-+ ssize_t retval;
-+
-+ if (!(tmpbuf = kmalloc(TMPBUFSIZE, GFP_KERNEL)))
-+ return -ENOMEM;
-+
-+ mutex_lock(&pdom_mutex);
-+
-+ len = 0;
-+ for (i = 0; i < pdomains; i++)
-+ len += snprintf(tmpbuf + len,
-+ len < TMPBUFSIZE ? TMPBUFSIZE - len : 0,
-+ "%u ", passive_domains[i]);
-+ WARN_ON(len > TMPBUFSIZE);
-+ if (len != 0 && len <= TMPBUFSIZE)
-+ tmpbuf[len-1] = '\n';
-+
-+ mutex_unlock(&pdom_mutex);
-+
-+ retval = simple_read_from_buffer(buf, count, offset, tmpbuf, len);
-+
-+ kfree(tmpbuf);
-+ return retval;
-+}
-+
-+static struct file_operations passive_domain_ops = {
-+ .read = pdomain_read,
-+ .write = pdomain_write,
-+};
-+
-+#endif /* CONFIG_XEN */
-+
- void oprofile_create_files(struct super_block * sb, struct dentry * root)
- {
- oprofilefs_create_file(sb, root, "enable", &enable_fops);
- oprofilefs_create_file_perm(sb, root, "dump", &dump_fops, 0666);
-+#ifdef CONFIG_XEN
-+ oprofilefs_create_file(sb, root, "active_domains", &active_domain_ops);
-+ oprofilefs_create_file(sb, root, "passive_domains", &passive_domain_ops);
-+#endif
- oprofilefs_create_file(sb, root, "buffer", &event_buffer_fops);
- oprofilefs_create_ulong(sb, root, "buffer_size", &fs_buffer_size);
- oprofilefs_create_ulong(sb, root, "buffer_watershed", &fs_buffer_watershed);
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/pci/Kconfig ubuntu-gutsy-xen/drivers/pci/Kconfig
---- ubuntu-gutsy/drivers/pci/Kconfig 2007-08-18 09:40:31.000000000 -0400
-+++ ubuntu-gutsy-xen/drivers/pci/Kconfig 2007-08-18 12:38:02.000000000 -0400
-@@ -34,7 +34,7 @@
- config HT_IRQ
- bool "Interrupts on hypertransport devices"
- default y
-- depends on PCI && X86_LOCAL_APIC && X86_IO_APIC
-+ depends on PCI && X86_LOCAL_APIC && X86_IO_APIC && !XEN
- help
- This allows native hypertransport devices to use interrupts.
-
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/serial/Kconfig ubuntu-gutsy-xen/drivers/serial/Kconfig
---- ubuntu-gutsy/drivers/serial/Kconfig 2007-08-18 09:40:32.000000000 -0400
-+++ ubuntu-gutsy-xen/drivers/serial/Kconfig 2007-08-18 12:38:02.000000000 -0400
-@@ -12,6 +12,7 @@
- config SERIAL_8250
- tristate "8250/16550 and compatible serial support"
- depends on (BROKEN || !SPARC)
-+ depends on !XEN_DISABLE_SERIAL
- select SERIAL_CORE
- ---help---
- This selects whether you want to include the driver for the standard
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/video/console/Kconfig ubuntu-gutsy-xen/drivers/video/console/Kconfig
---- ubuntu-gutsy/drivers/video/console/Kconfig 2007-08-18 09:40:32.000000000 -0400
-+++ ubuntu-gutsy-xen/drivers/video/console/Kconfig 2007-08-18 12:38:02.000000000 -0400
-@@ -46,6 +46,7 @@
- config VIDEO_SELECT
- bool "Video mode selection support"
- depends on X86 && VGA_CONSOLE
-+ depends on !XEN
- ---help---
- This enables support for text mode selection on kernel startup. If
- you want to take advantage of some high-resolution text mode your
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/video/Kconfig ubuntu-gutsy-xen/drivers/video/Kconfig
---- ubuntu-gutsy/drivers/video/Kconfig 2007-08-18 09:40:32.000000000 -0400
-+++ ubuntu-gutsy-xen/drivers/video/Kconfig 2007-08-18 12:38:02.000000000 -0400
-@@ -1477,7 +1477,7 @@
- tristate "Cyberblade/i1 support"
- depends on FB && PCI && X86_32 && !64BIT
- select FB_CFB_IMAGEBLIT
-- select VIDEO_SELECT
-+ select VIDEO_SELECT if !XEN
- ---help---
- This driver is supposed to support the Trident Cyberblade/i1
- graphics core integrated in the VIA VT8601A North Bridge,
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/balloon/balloon.c ubuntu-gutsy-xen/drivers/xen/balloon/balloon.c
---- ubuntu-gutsy/drivers/xen/balloon/balloon.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/balloon/balloon.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,734 @@
-+/******************************************************************************
-+ * balloon.c
-+ *
-+ * Xen balloon driver - enables returning/claiming memory to/from Xen.
-+ *
-+ * Copyright (c) 2003, B Dragovic
-+ * Copyright (c) 2003-2004, M Williamson, K Fraser
-+ * Copyright (c) 2005 Dan M. Smith, IBM Corporation
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#include <linux/kernel.h>
-+#include <linux/module.h>
-+#include <linux/sched.h>
-+#include <linux/errno.h>
-+#include <linux/mm.h>
-+#include <linux/mman.h>
-+#include <linux/smp_lock.h>
-+#include <linux/pagemap.h>
-+#include <linux/bootmem.h>
-+#include <linux/highmem.h>
-+#include <linux/vmalloc.h>
-+#include <linux/mutex.h>
-+#include <xen/xen_proc.h>
-+#include <asm/hypervisor.h>
-+#include <xen/balloon.h>
-+#include <xen/interface/memory.h>
-+#include <asm/maddr.h>
-+#include <asm/page.h>
-+#include <asm/pgalloc.h>
-+#include <asm/pgtable.h>
-+#include <asm/uaccess.h>
-+#include <asm/tlb.h>
-+#include <linux/highmem.h>
-+#include <linux/list.h>
-+#include <xen/xenbus.h>
-+#include "common.h"
-+
-+#ifdef HAVE_XEN_PLATFORM_COMPAT_H
-+#include <xen/platform-compat.h>
-+#endif
-+
-+#ifdef CONFIG_PROC_FS
-+static struct proc_dir_entry *balloon_pde;
-+#endif
-+
-+static DEFINE_MUTEX(balloon_mutex);
-+
-+/*
-+ * Protects atomic reservation decrease/increase against concurrent increases.
-+ * Also protects non-atomic updates of current_pages and driver_pages, and
-+ * balloon lists.
-+ */
-+DEFINE_SPINLOCK(balloon_lock);
-+
-+struct balloon_stats balloon_stats;
-+
-+/* We increase/decrease in batches which fit in a page */
-+static unsigned long frame_list[PAGE_SIZE / sizeof(unsigned long)];
-+
-+/* VM /proc information for memory */
-+extern unsigned long totalram_pages;
-+#ifdef CONFIG_HIGHMEM
-+extern unsigned long totalhigh_pages;
-+#define totalhigh_pages(op) (totalhigh_pages op)
-+#else
-+#undef totalhigh_pages
-+#define totalhigh_pages(op)
-+#endif
-+extern unsigned long num_physpages;
-+
-+/* List of ballooned pages, threaded through the mem_map array. */
-+static LIST_HEAD(ballooned_pages);
-+
-+/* Main work function, always executed in process context. */
-+static void balloon_process(struct work_struct *unused);
-+static DECLARE_WORK(balloon_worker, balloon_process);
-+static struct timer_list balloon_timer;
-+
-+/* When ballooning out (allocating memory to return to Xen) we don't really
-+ want the kernel to try too hard since that can trigger the oom killer. */
-+#define GFP_BALLOON \
-+ (GFP_HIGHUSER | __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC)
-+
-+#define PAGE_TO_LIST(p) (&(p)->lru)
-+#define LIST_TO_PAGE(l) list_entry((l), struct page, lru)
-+#define UNLIST_PAGE(p) \
-+ do { \
-+ list_del(PAGE_TO_LIST(p)); \
-+ PAGE_TO_LIST(p)->next = NULL; \
-+ PAGE_TO_LIST(p)->prev = NULL; \
-+ } while(0)
-+
-+#define IPRINTK(fmt, args...) \
-+ printk(KERN_INFO "xen_mem: " fmt, ##args)
-+#define WPRINTK(fmt, args...) \
-+ printk(KERN_WARNING "xen_mem: " fmt, ##args)
-+
-+/* balloon_append: add the given page to the balloon. */
-+static void balloon_append(struct page *page)
-+{
-+ /* Lowmem is re-populated first, so highmem pages go at list tail. */
-+ if (PageHighMem(page)) {
-+ list_add_tail(PAGE_TO_LIST(page), &ballooned_pages);
-+ bs.balloon_high++;
-+ totalhigh_pages(--);
-+ } else {
-+ list_add(PAGE_TO_LIST(page), &ballooned_pages);
-+ bs.balloon_low++;
-+ }
-+}
-+
-+/* balloon_retrieve: rescue a page from the balloon, if it is not empty. */
-+static struct page *balloon_retrieve(void)
-+{
-+ struct page *page;
-+
-+ if (list_empty(&ballooned_pages))
-+ return NULL;
-+
-+ page = LIST_TO_PAGE(ballooned_pages.next);
-+ UNLIST_PAGE(page);
-+
-+ if (PageHighMem(page)) {
-+ bs.balloon_high--;
-+ totalhigh_pages(++);
-+ }
-+ else
-+ bs.balloon_low--;
-+
-+ return page;
-+}
-+
-+static struct page *balloon_first_page(void)
-+{
-+ if (list_empty(&ballooned_pages))
-+ return NULL;
-+ return LIST_TO_PAGE(ballooned_pages.next);
-+}
-+
-+static struct page *balloon_next_page(struct page *page)
-+{
-+ struct list_head *next = PAGE_TO_LIST(page)->next;
-+ if (next == &ballooned_pages)
-+ return NULL;
-+ return LIST_TO_PAGE(next);
-+}
-+
-+static void balloon_alarm(unsigned long unused)
-+{
-+ schedule_work(&balloon_worker);
-+}
-+
-+static unsigned long current_target(void)
-+{
-+ unsigned long target = min(bs.target_pages, bs.hard_limit);
-+ if (target > (bs.current_pages + bs.balloon_low + bs.balloon_high))
-+ target = bs.current_pages + bs.balloon_low + bs.balloon_high;
-+ return target;
-+}
-+
-+static int increase_reservation(unsigned long nr_pages)
-+{
-+ unsigned long pfn, i, flags;
-+ struct page *page;
-+ long rc;
-+ struct xen_memory_reservation reservation = {
-+ .address_bits = 0,
-+ .extent_order = 0,
-+ .domid = DOMID_SELF
-+ };
-+
-+ if (nr_pages > ARRAY_SIZE(frame_list))
-+ nr_pages = ARRAY_SIZE(frame_list);
-+
-+ balloon_lock(flags);
-+
-+ page = balloon_first_page();
-+ for (i = 0; i < nr_pages; i++) {
-+ BUG_ON(page == NULL);
-+ frame_list[i] = page_to_pfn(page);;
-+ page = balloon_next_page(page);
-+ }
-+
-+ set_xen_guest_handle(reservation.extent_start, frame_list);
-+ reservation.nr_extents = nr_pages;
-+ rc = HYPERVISOR_memory_op(
-+ XENMEM_populate_physmap, &reservation);
-+ if (rc < nr_pages) {
-+ if (rc > 0) {
-+ int ret;
-+
-+ /* We hit the Xen hard limit: reprobe. */
-+ reservation.nr_extents = rc;
-+ ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
-+ &reservation);
-+ BUG_ON(ret != rc);
-+ }
-+ if (rc >= 0)
-+ bs.hard_limit = (bs.current_pages + rc -
-+ bs.driver_pages);
-+ goto out;
-+ }
-+
-+ for (i = 0; i < nr_pages; i++) {
-+ page = balloon_retrieve();
-+ BUG_ON(page == NULL);
-+
-+ pfn = page_to_pfn(page);
-+ BUG_ON(!xen_feature(XENFEAT_auto_translated_physmap) &&
-+ phys_to_machine_mapping_valid(pfn));
-+
-+ set_phys_to_machine(pfn, frame_list[i]);
-+
-+#ifdef CONFIG_XEN
-+ /* Link back into the page tables if not highmem. */
-+ if (pfn < max_low_pfn) {
-+ int ret;
-+ ret = HYPERVISOR_update_va_mapping(
-+ (unsigned long)__va(pfn << PAGE_SHIFT),
-+ pfn_pte_ma(frame_list[i], PAGE_KERNEL),
-+ 0);
-+ BUG_ON(ret);
-+ }
-+#endif
-+
-+ /* Relinquish the page back to the allocator. */
-+ ClearPageReserved(page);
-+ init_page_count(page);
-+ __free_page(page);
-+ }
-+
-+ bs.current_pages += nr_pages;
-+ totalram_pages = bs.current_pages;
-+
-+ out:
-+ balloon_unlock(flags);
-+
-+ return 0;
-+}
-+
-+static int decrease_reservation(unsigned long nr_pages)
-+{
-+ unsigned long pfn, i, flags;
-+ struct page *page;
-+ void *v;
-+ int need_sleep = 0;
-+ int ret;
-+ struct xen_memory_reservation reservation = {
-+ .address_bits = 0,
-+ .extent_order = 0,
-+ .domid = DOMID_SELF
-+ };
-+
-+ if (nr_pages > ARRAY_SIZE(frame_list))
-+ nr_pages = ARRAY_SIZE(frame_list);
-+
-+ for (i = 0; i < nr_pages; i++) {
-+ if ((page = alloc_page(GFP_BALLOON)) == NULL) {
-+ nr_pages = i;
-+ need_sleep = 1;
-+ break;
-+ }
-+
-+ pfn = page_to_pfn(page);
-+ frame_list[i] = pfn_to_mfn(pfn);
-+
-+ if (!PageHighMem(page)) {
-+ v = phys_to_virt(pfn << PAGE_SHIFT);
-+ scrub_pages(v, 1);
-+#ifdef CONFIG_XEN
-+ ret = HYPERVISOR_update_va_mapping(
-+ (unsigned long)v, __pte_ma(0), 0);
-+ BUG_ON(ret);
-+#endif
-+ }
-+#ifdef CONFIG_XEN_SCRUB_PAGES
-+ else {
-+ v = kmap(page);
-+ scrub_pages(v, 1);
-+ kunmap(page);
-+ }
-+#endif
-+ }
-+
-+#ifdef CONFIG_XEN
-+ /* Ensure that ballooned highmem pages don't have kmaps. */
-+ kmap_flush_unused();
-+ flush_tlb_all();
-+#endif
-+
-+ balloon_lock(flags);
-+
-+ /* No more mappings: invalidate P2M and add to balloon. */
-+ for (i = 0; i < nr_pages; i++) {
-+ pfn = mfn_to_pfn(frame_list[i]);
-+ set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
-+ balloon_append(pfn_to_page(pfn));
-+ }
-+
-+ set_xen_guest_handle(reservation.extent_start, frame_list);
-+ reservation.nr_extents = nr_pages;
-+ ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
-+ BUG_ON(ret != nr_pages);
-+
-+ bs.current_pages -= nr_pages;
-+ totalram_pages = bs.current_pages;
-+
-+ balloon_unlock(flags);
-+
-+ return need_sleep;
-+}
-+
-+/*
-+ * We avoid multiple worker processes conflicting via the balloon mutex.
-+ * We may of course race updates of the target counts (which are protected
-+ * by the balloon lock), or with changes to the Xen hard limit, but we will
-+ * recover from these in time.
-+ */
-+static void balloon_process(struct work_struct *unused)
-+{
-+ int need_sleep = 0;
-+ long credit;
-+
-+ mutex_lock(&balloon_mutex);
-+
-+ do {
-+ credit = current_target() - bs.current_pages;
-+ if (credit > 0)
-+ need_sleep = (increase_reservation(credit) != 0);
-+ if (credit < 0)
-+ need_sleep = (decrease_reservation(-credit) != 0);
-+
-+#ifndef CONFIG_PREEMPT
-+ if (need_resched())
-+ schedule();
-+#endif
-+ } while ((credit != 0) && !need_sleep);
-+
-+ /* Schedule more work if there is some still to be done. */
-+ if (current_target() != bs.current_pages)
-+ mod_timer(&balloon_timer, jiffies + HZ);
-+
-+ mutex_unlock(&balloon_mutex);
-+}
-+
-+/* Resets the Xen limit, sets new target, and kicks off processing. */
-+void balloon_set_new_target(unsigned long target)
-+{
-+ /* No need for lock. Not read-modify-write updates. */
-+ bs.hard_limit = ~0UL;
-+ bs.target_pages = target;
-+ schedule_work(&balloon_worker);
-+}
-+
-+static struct xenbus_watch target_watch =
-+{
-+ .node = "memory/target"
-+};
-+
-+/*
-+ * Compute the minimum value this domain can be ballooned down to
-+ * (in kilo bytes).
-+ */
-+static unsigned long min_target(void)
-+{
-+ unsigned long min_kib;
-+ unsigned long curr_kib = current_target() << (PAGE_SHIFT - 10);
-+ const unsigned long max_kib = max_pfn << (PAGE_SHIFT - 10);
-+
-+ /* Simple continuous piecewiese linear function:
-+ * max MB -> min MB gradient
-+ * 0 0
-+ * 16 16
-+ * 32 24
-+ * 128 72 (1/2)
-+ * 512 168 (1/4)
-+ * 2048 360 (1/8)
-+ * 8192 552 (1/32)
-+ * 32768 1320
-+ * 65536 2344
-+ */
-+ if (max_kib < 131072UL)
-+ min_kib = 8192UL + (max_kib >> 1);
-+ else if (max_kib < 524288UL)
-+ min_kib = 40960UL + (max_kib >> 2);
-+ else if (max_kib < 2097152UL)
-+ min_kib = 106496UL + (max_kib >> 3);
-+ else
-+ min_kib = 303104UL + (max_kib >> 5);
-+
-+ if (min_kib > curr_kib)
-+ min_kib = curr_kib; /* Don't enforce growth */
-+
-+ return min_kib;
-+}
-+
-+/* React to a change in the target key */
-+static void watch_target(struct xenbus_watch *watch,
-+ const char **vec, unsigned int len)
-+{
-+ unsigned long long new_target;
-+ unsigned long min_value = min_target();
-+ int err;
-+
-+ err = xenbus_scanf(XBT_NIL, "memory", "target", "%llu", &new_target);
-+ if (err != 1) {
-+ /* This is ok (for domain0 at least) - so just return */
-+ return;
-+ }
-+
-+ /* The given memory/target value is in KiB, so it needs converting to
-+ * pages. PAGE_SHIFT converts bytes to pages, hence PAGE_SHIFT - 10.
-+ * But first make sure that we are not lowering the value below the
-+ * "minimum".
-+ */
-+ if (new_target < min_value)
-+ new_target = min_value;
-+
-+ printk(KERN_INFO "Setting mem allocation to %llu kiB\n", new_target);
-+ balloon_set_new_target(new_target >> (PAGE_SHIFT - 10));
-+}
-+
-+static int balloon_init_watcher(struct notifier_block *notifier,
-+ unsigned long event,
-+ void *data)
-+{
-+ int err;
-+
-+ err = register_xenbus_watch(&target_watch);
-+ if (err)
-+ printk(KERN_ERR "Failed to set balloon watcher\n");
-+
-+ return NOTIFY_DONE;
-+}
-+
-+#ifdef CONFIG_PROC_FS
-+static int balloon_write(struct file *file, const char __user *buffer,
-+ unsigned long count, void *data)
-+{
-+ char memstring[64], *endchar;
-+ unsigned long long target_bytes;
-+
-+ if (!capable(CAP_SYS_ADMIN))
-+ return -EPERM;
-+
-+ if (count <= 1)
-+ return -EBADMSG; /* runt */
-+ if (count > sizeof(memstring))
-+ return -EFBIG; /* too long */
-+
-+ if (copy_from_user(memstring, buffer, count))
-+ return -EFAULT;
-+ memstring[sizeof(memstring)-1] = '\0';
-+
-+ target_bytes = memparse(memstring, &endchar);
-+ balloon_set_new_target(target_bytes >> PAGE_SHIFT);
-+
-+ return count;
-+}
-+
-+static int balloon_read(char *page, char **start, off_t off,
-+ int count, int *eof, void *data)
-+{
-+ int len;
-+
-+ len = sprintf(
-+ page,
-+ "Current allocation: %8lu kB\n"
-+ "Requested target: %8lu kB\n"
-+ "Minimum target: %8lu kB\n"
-+ "Maximum target: %8lu kB\n"
-+ "Low-mem balloon: %8lu kB\n"
-+ "High-mem balloon: %8lu kB\n"
-+ "Driver pages: %8lu kB\n"
-+ "Xen hard limit: ",
-+ PAGES2KB(bs.current_pages), PAGES2KB(bs.target_pages),
-+ min_target(), PAGES2KB(num_physpages),
-+ PAGES2KB(bs.balloon_low), PAGES2KB(bs.balloon_high),
-+ PAGES2KB(bs.driver_pages));
-+
-+ if (bs.hard_limit != ~0UL)
-+ len += sprintf(page + len, "%8lu kB\n",
-+ PAGES2KB(bs.hard_limit));
-+ else
-+ len += sprintf(page + len, " ??? kB\n");
-+
-+ *eof = 1;
-+ return len;
-+}
-+#endif
-+
-+static struct notifier_block xenstore_notifier;
-+
-+static int __init balloon_init(void)
-+{
-+#if defined(CONFIG_X86) && defined(CONFIG_XEN)
-+ unsigned long pfn;
-+ struct page *page;
-+#endif
-+
-+ if (!is_running_on_xen())
-+ return -ENODEV;
-+
-+ IPRINTK("Initialising balloon driver.\n");
-+
-+#ifdef CONFIG_XEN
-+ bs.current_pages = min(xen_start_info->nr_pages, max_pfn);
-+ totalram_pages = bs.current_pages;
-+#else
-+ bs.current_pages = totalram_pages;
-+#endif
-+ bs.target_pages = bs.current_pages;
-+ bs.balloon_low = 0;
-+ bs.balloon_high = 0;
-+ bs.driver_pages = 0UL;
-+ bs.hard_limit = ~0UL;
-+
-+ init_timer(&balloon_timer);
-+ balloon_timer.data = 0;
-+ balloon_timer.function = balloon_alarm;
-+
-+#ifdef CONFIG_PROC_FS
-+ if ((balloon_pde = create_xen_proc_entry("balloon", 0644)) == NULL) {
-+ WPRINTK("Unable to create /proc/xen/balloon.\n");
-+ return -1;
-+ }
-+
-+ balloon_pde->read_proc = balloon_read;
-+ balloon_pde->write_proc = balloon_write;
-+#endif
-+ balloon_sysfs_init();
-+
-+#if defined(CONFIG_X86) && defined(CONFIG_XEN)
-+ /* Initialise the balloon with excess memory space. */
-+ for (pfn = xen_start_info->nr_pages; pfn < max_pfn; pfn++) {
-+ page = pfn_to_page(pfn);
-+ if (!PageReserved(page))
-+ balloon_append(page);
-+ }
-+#endif
-+
-+ target_watch.callback = watch_target;
-+ xenstore_notifier.notifier_call = balloon_init_watcher;
-+
-+ register_xenstore_notifier(&xenstore_notifier);
-+
-+ return 0;
-+}
-+
-+subsys_initcall(balloon_init);
-+
-+static void balloon_exit(void)
-+{
-+ /* XXX - release balloon here */
-+ return;
-+}
-+
-+module_exit(balloon_exit);
-+
-+void balloon_update_driver_allowance(long delta)
-+{
-+ unsigned long flags;
-+
-+ balloon_lock(flags);
-+ bs.driver_pages += delta;
-+ balloon_unlock(flags);
-+}
-+
-+#ifdef CONFIG_XEN
-+static int dealloc_pte_fn(
-+ pte_t *pte, struct page *pmd_page, unsigned long addr, void *data)
-+{
-+ unsigned long mfn = pte_mfn(*pte);
-+ int ret;
-+ struct xen_memory_reservation reservation = {
-+ .nr_extents = 1,
-+ .extent_order = 0,
-+ .domid = DOMID_SELF
-+ };
-+ set_xen_guest_handle(reservation.extent_start, &mfn);
-+ set_pte_at(&init_mm, addr, pte, __pte_ma(0));
-+ set_phys_to_machine(__pa(addr) >> PAGE_SHIFT, INVALID_P2M_ENTRY);
-+ ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
-+ BUG_ON(ret != 1);
-+ return 0;
-+}
-+#endif
-+
-+struct page **alloc_empty_pages_and_pagevec(int nr_pages)
-+{
-+ unsigned long vaddr, flags;
-+ struct page *page, **pagevec;
-+ int i, ret;
-+
-+ pagevec = kmalloc(sizeof(page) * nr_pages, GFP_KERNEL);
-+ if (pagevec == NULL)
-+ return NULL;
-+
-+ for (i = 0; i < nr_pages; i++) {
-+ page = pagevec[i] = alloc_page(GFP_KERNEL);
-+ if (page == NULL)
-+ goto err;
-+
-+ vaddr = (unsigned long)page_address(page);
-+
-+ scrub_pages(vaddr, 1);
-+
-+ balloon_lock(flags);
-+
-+ if (xen_feature(XENFEAT_auto_translated_physmap)) {
-+ unsigned long gmfn = page_to_pfn(page);
-+ struct xen_memory_reservation reservation = {
-+ .nr_extents = 1,
-+ .extent_order = 0,
-+ .domid = DOMID_SELF
-+ };
-+ set_xen_guest_handle(reservation.extent_start, &gmfn);
-+ ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
-+ &reservation);
-+ if (ret == 1)
-+ ret = 0; /* success */
-+ } else {
-+#ifdef CONFIG_XEN
-+ ret = apply_to_page_range(&init_mm, vaddr, PAGE_SIZE,
-+ dealloc_pte_fn, NULL);
-+#else
-+ /* Cannot handle non-auto translate mode. */
-+ ret = 1;
-+#endif
-+ }
-+
-+ if (ret != 0) {
-+ balloon_unlock(flags);
-+ __free_page(page);
-+ goto err;
-+ }
-+
-+ totalram_pages = --bs.current_pages;
-+
-+ balloon_unlock(flags);
-+ }
-+
-+ out:
-+ schedule_work(&balloon_worker);
-+#ifdef CONFIG_XEN
-+ flush_tlb_all();
-+#endif
-+ return pagevec;
-+
-+ err:
-+ balloon_lock(flags);
-+ while (--i >= 0)
-+ balloon_append(pagevec[i]);
-+ balloon_unlock(flags);
-+ kfree(pagevec);
-+ pagevec = NULL;
-+ goto out;
-+}
-+
-+static void _free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages, int free_vec)
-+{
-+ unsigned long flags;
-+ int i;
-+
-+ if (pagevec == NULL)
-+ return;
-+
-+ balloon_lock(flags);
-+ for (i = 0; i < nr_pages; i++) {
-+ BUG_ON(page_count(pagevec[i]) != 1);
-+ balloon_append(pagevec[i]);
-+ }
-+ balloon_unlock(flags);
-+
-+ if (free_vec)
-+ kfree(pagevec);
-+ else
-+ totalram_pages = bs.current_pages -= nr_pages;
-+
-+ schedule_work(&balloon_worker);
-+}
-+
-+void free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages)
-+{
-+ _free_empty_pages_and_pagevec(pagevec, nr_pages, 1);
-+}
-+
-+void free_empty_pages(struct page **pagevec, int nr_pages)
-+{
-+ _free_empty_pages_and_pagevec(pagevec, nr_pages, 0);
-+}
-+
-+void balloon_release_driver_page(struct page *page)
-+{
-+ unsigned long flags;
-+
-+ balloon_lock(flags);
-+ balloon_append(page);
-+ bs.driver_pages--;
-+ balloon_unlock(flags);
-+
-+ schedule_work(&balloon_worker);
-+}
-+
-+EXPORT_SYMBOL_GPL(balloon_update_driver_allowance);
-+EXPORT_SYMBOL_GPL(alloc_empty_pages_and_pagevec);
-+EXPORT_SYMBOL_GPL(free_empty_pages_and_pagevec);
-+EXPORT_SYMBOL_GPL(balloon_release_driver_page);
-+
-+MODULE_LICENSE("Dual BSD/GPL");
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/balloon/common.h ubuntu-gutsy-xen/drivers/xen/balloon/common.h
---- ubuntu-gutsy/drivers/xen/balloon/common.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/balloon/common.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,58 @@
-+/******************************************************************************
-+ * balloon/common.h
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#ifndef __XEN_BALLOON_COMMON_H__
-+#define __XEN_BALLOON_COMMON_H__
-+
-+#define PAGES2KB(_p) ((_p)<<(PAGE_SHIFT-10))
-+
-+struct balloon_stats {
-+ /* We aim for 'current allocation' == 'target allocation'. */
-+ unsigned long current_pages;
-+ unsigned long target_pages;
-+ /* We may hit the hard limit in Xen. If we do then we remember it. */
-+ unsigned long hard_limit;
-+ /*
-+ * Drivers may alter the memory reservation independently, but they
-+ * must inform the balloon driver so we avoid hitting the hard limit.
-+ */
-+ unsigned long driver_pages;
-+ /* Number of pages in high- and low-memory balloons. */
-+ unsigned long balloon_low;
-+ unsigned long balloon_high;
-+};
-+
-+extern struct balloon_stats balloon_stats;
-+#define bs balloon_stats
-+
-+int balloon_sysfs_init(void);
-+void balloon_sysfs_exit(void);
-+
-+void balloon_set_new_target(unsigned long target);
-+
-+#endif /* __XEN_BALLOON_COMMON_H__ */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/balloon/Makefile ubuntu-gutsy-xen/drivers/xen/balloon/Makefile
---- ubuntu-gutsy/drivers/xen/balloon/Makefile 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/balloon/Makefile 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,2 @@
-+
-+obj-y := balloon.o sysfs.o
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/balloon/sysfs.c ubuntu-gutsy-xen/drivers/xen/balloon/sysfs.c
---- ubuntu-gutsy/drivers/xen/balloon/sysfs.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/balloon/sysfs.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,171 @@
-+/******************************************************************************
-+ * balloon/sysfs.c
-+ *
-+ * Xen balloon driver - sysfs interfaces.
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#include <linux/capability.h>
-+#include <linux/errno.h>
-+#include <linux/stat.h>
-+#include <linux/string.h>
-+#include <linux/sysdev.h>
-+#include <linux/module.h>
-+#include "common.h"
-+
-+#ifdef HAVE_XEN_PLATFORM_COMPAT_H
-+#include <xen/platform-compat.h>
-+#endif
-+
-+#define BALLOON_CLASS_NAME "memory"
-+
-+#define BALLOON_SHOW(name, format, args...) \
-+ static ssize_t show_##name(struct sys_device *dev, \
-+ char *buf) \
-+ { \
-+ return sprintf(buf, format, ##args); \
-+ } \
-+ static SYSDEV_ATTR(name, S_IRUGO, show_##name, NULL)
-+
-+BALLOON_SHOW(current_kb, "%lu\n", PAGES2KB(bs.current_pages));
-+BALLOON_SHOW(low_kb, "%lu\n", PAGES2KB(bs.balloon_low));
-+BALLOON_SHOW(high_kb, "%lu\n", PAGES2KB(bs.balloon_high));
-+BALLOON_SHOW(hard_limit_kb,
-+ (bs.hard_limit!=~0UL) ? "%lu\n" : "???\n",
-+ (bs.hard_limit!=~0UL) ? PAGES2KB(bs.hard_limit) : 0);
-+BALLOON_SHOW(driver_kb, "%lu\n", PAGES2KB(bs.driver_pages));
-+
-+static ssize_t show_target_kb(struct sys_device *dev, char *buf)
-+{
-+ return sprintf(buf, "%lu\n", PAGES2KB(bs.target_pages));
-+}
-+
-+static ssize_t store_target_kb(struct sys_device *dev,
-+ const char *buf,
-+ size_t count)
-+{
-+ char memstring[64], *endchar;
-+ unsigned long long target_bytes;
-+
-+ if (!capable(CAP_SYS_ADMIN))
-+ return -EPERM;
-+
-+ if (count <= 1)
-+ return -EBADMSG; /* runt */
-+ if (count > sizeof(memstring))
-+ return -EFBIG; /* too long */
-+ strcpy(memstring, buf);
-+
-+ target_bytes = memparse(memstring, &endchar);
-+ balloon_set_new_target(target_bytes >> PAGE_SHIFT);
-+
-+ return count;
-+}
-+
-+static SYSDEV_ATTR(target_kb, S_IRUGO | S_IWUSR,
-+ show_target_kb, store_target_kb);
-+
-+static struct sysdev_attribute *balloon_attrs[] = {
-+ &attr_target_kb,
-+};
-+
-+static struct attribute *balloon_info_attrs[] = {
-+ &attr_current_kb.attr,
-+ &attr_low_kb.attr,
-+ &attr_high_kb.attr,
-+ &attr_hard_limit_kb.attr,
-+ &attr_driver_kb.attr,
-+ NULL
-+};
-+
-+static struct attribute_group balloon_info_group = {
-+ .name = "info",
-+ .attrs = balloon_info_attrs,
-+};
-+
-+static struct sysdev_class balloon_sysdev_class = {
-+ set_kset_name(BALLOON_CLASS_NAME),
-+};
-+
-+static struct sys_device balloon_sysdev;
-+
-+static int register_balloon(struct sys_device *sysdev)
-+{
-+ int i, error;
-+
-+ error = sysdev_class_register(&balloon_sysdev_class);
-+ if (error)
-+ return error;
-+
-+ sysdev->id = 0;
-+ sysdev->cls = &balloon_sysdev_class;
-+
-+ error = sysdev_register(sysdev);
-+ if (error) {
-+ sysdev_class_unregister(&balloon_sysdev_class);
-+ return error;
-+ }
-+
-+ for (i = 0; i < ARRAY_SIZE(balloon_attrs); i++) {
-+ error = sysdev_create_file(sysdev, balloon_attrs[i]);
-+ if (error)
-+ goto fail;
-+ }
-+
-+ error = sysfs_create_group(&sysdev->kobj, &balloon_info_group);
-+ if (error)
-+ goto fail;
-+
-+ return 0;
-+
-+ fail:
-+ while (--i >= 0)
-+ sysdev_remove_file(sysdev, balloon_attrs[i]);
-+ sysdev_unregister(sysdev);
-+ sysdev_class_unregister(&balloon_sysdev_class);
-+ return error;
-+}
-+
-+static void unregister_balloon(struct sys_device *sysdev)
-+{
-+ int i;
-+
-+ sysfs_remove_group(&sysdev->kobj, &balloon_info_group);
-+ for (i = 0; i < ARRAY_SIZE(balloon_attrs); i++)
-+ sysdev_remove_file(sysdev, balloon_attrs[i]);
-+ sysdev_unregister(sysdev);
-+ sysdev_class_unregister(&balloon_sysdev_class);
-+}
-+
-+int balloon_sysfs_init(void)
-+{
-+ return register_balloon(&balloon_sysdev);
-+}
-+
-+void balloon_sysfs_exit(void)
-+{
-+ unregister_balloon(&balloon_sysdev);
-+}
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/blkback/blkback.c ubuntu-gutsy-xen/drivers/xen/blkback/blkback.c
---- ubuntu-gutsy/drivers/xen/blkback/blkback.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/blkback/blkback.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,618 @@
-+/******************************************************************************
-+ * arch/xen/drivers/blkif/backend/main.c
-+ *
-+ * Back-end of the driver for virtual block devices. This portion of the
-+ * driver exports a 'unified' block-device interface that can be accessed
-+ * by any operating system that implements a compatible front end. A
-+ * reference front-end implementation can be found in:
-+ * arch/xen/drivers/blkif/frontend
-+ *
-+ * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
-+ * Copyright (c) 2005, Christopher Clark
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#include <linux/spinlock.h>
-+#include <linux/kthread.h>
-+#include <linux/freezer.h>
-+#include <linux/list.h>
-+#include <xen/balloon.h>
-+#include <asm/hypervisor.h>
-+#include "common.h"
-+
-+/*
-+ * These are rather arbitrary. They are fairly large because adjacent requests
-+ * pulled from a communication ring are quite likely to end up being part of
-+ * the same scatter/gather request at the disc.
-+ *
-+ * ** TRY INCREASING 'blkif_reqs' IF WRITE SPEEDS SEEM TOO LOW **
-+ *
-+ * This will increase the chances of being able to write whole tracks.
-+ * 64 should be enough to keep us competitive with Linux.
-+ */
-+static int blkif_reqs = 64;
-+module_param_named(reqs, blkif_reqs, int, 0);
-+MODULE_PARM_DESC(reqs, "Number of blkback requests to allocate");
-+
-+/* Run-time switchable: /sys/module/blkback/parameters/ */
-+static unsigned int log_stats = 0;
-+static unsigned int debug_lvl = 0;
-+module_param(log_stats, int, 0644);
-+module_param(debug_lvl, int, 0644);
-+
-+/*
-+ * Each outstanding request that we've passed to the lower device layers has a
-+ * 'pending_req' allocated to it. Each buffer_head that completes decrements
-+ * the pendcnt towards zero. When it hits zero, the specified domain has a
-+ * response queued for it, with the saved 'id' passed back.
-+ */
-+typedef struct {
-+ blkif_t *blkif;
-+ u64 id;
-+ int nr_pages;
-+ atomic_t pendcnt;
-+ unsigned short operation;
-+ int status;
-+ struct list_head free_list;
-+} pending_req_t;
-+
-+static pending_req_t *pending_reqs;
-+static struct list_head pending_free;
-+static DEFINE_SPINLOCK(pending_free_lock);
-+static DECLARE_WAIT_QUEUE_HEAD(pending_free_wq);
-+
-+#define BLKBACK_INVALID_HANDLE (~0)
-+
-+static struct page **pending_pages;
-+static grant_handle_t *pending_grant_handles;
-+
-+static inline int vaddr_pagenr(pending_req_t *req, int seg)
-+{
-+ return (req - pending_reqs) * BLKIF_MAX_SEGMENTS_PER_REQUEST + seg;
-+}
-+
-+static inline unsigned long vaddr(pending_req_t *req, int seg)
-+{
-+ unsigned long pfn = page_to_pfn(pending_pages[vaddr_pagenr(req, seg)]);
-+ return (unsigned long)pfn_to_kaddr(pfn);
-+}
-+
-+#define pending_handle(_req, _seg) \
-+ (pending_grant_handles[vaddr_pagenr(_req, _seg)])
-+
-+
-+static int do_block_io_op(blkif_t *blkif);
-+static void dispatch_rw_block_io(blkif_t *blkif,
-+ blkif_request_t *req,
-+ pending_req_t *pending_req);
-+static void make_response(blkif_t *blkif, u64 id,
-+ unsigned short op, int st);
-+
-+/******************************************************************
-+ * misc small helpers
-+ */
-+static pending_req_t* alloc_req(void)
-+{
-+ pending_req_t *req = NULL;
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&pending_free_lock, flags);
-+ if (!list_empty(&pending_free)) {
-+ req = list_entry(pending_free.next, pending_req_t, free_list);
-+ list_del(&req->free_list);
-+ }
-+ spin_unlock_irqrestore(&pending_free_lock, flags);
-+ return req;
-+}
-+
-+static void free_req(pending_req_t *req)
-+{
-+ unsigned long flags;
-+ int was_empty;
-+
-+ spin_lock_irqsave(&pending_free_lock, flags);
-+ was_empty = list_empty(&pending_free);
-+ list_add(&req->free_list, &pending_free);
-+ spin_unlock_irqrestore(&pending_free_lock, flags);
-+ if (was_empty)
-+ wake_up(&pending_free_wq);
-+}
-+
-+static void unplug_queue(blkif_t *blkif)
-+{
-+ if (blkif->plug == NULL)
-+ return;
-+ if (blkif->plug->unplug_fn)
-+ blkif->plug->unplug_fn(blkif->plug);
-+ blk_put_queue(blkif->plug);
-+ blkif->plug = NULL;
-+}
-+
-+static void plug_queue(blkif_t *blkif, struct bio *bio)
-+{
-+ request_queue_t *q = bdev_get_queue(bio->bi_bdev);
-+
-+ if (q == blkif->plug)
-+ return;
-+ unplug_queue(blkif);
-+ blk_get_queue(q);
-+ blkif->plug = q;
-+}
-+
-+static void fast_flush_area(pending_req_t *req)
-+{
-+ struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
-+ unsigned int i, invcount = 0;
-+ grant_handle_t handle;
-+ int ret;
-+
-+ for (i = 0; i < req->nr_pages; i++) {
-+ handle = pending_handle(req, i);
-+ if (handle == BLKBACK_INVALID_HANDLE)
-+ continue;
-+ gnttab_set_unmap_op(&unmap[i], vaddr(req, i), GNTMAP_host_map,
-+ handle);
-+ pending_handle(req, i) = BLKBACK_INVALID_HANDLE;
-+ invcount++;
-+ }
-+
-+ ret = HYPERVISOR_grant_table_op(
-+ GNTTABOP_unmap_grant_ref, unmap, invcount);
-+ BUG_ON(ret);
-+}
-+
-+/******************************************************************
-+ * SCHEDULER FUNCTIONS
-+ */
-+
-+static void print_stats(blkif_t *blkif)
-+{
-+ printk(KERN_DEBUG "%s: oo %3d | rd %4d | wr %4d | br %4d\n",
-+ current->comm, blkif->st_oo_req,
-+ blkif->st_rd_req, blkif->st_wr_req, blkif->st_br_req);
-+ blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000);
-+ blkif->st_rd_req = 0;
-+ blkif->st_wr_req = 0;
-+ blkif->st_oo_req = 0;
-+}
-+
-+int blkif_schedule(void *arg)
-+{
-+ blkif_t *blkif = arg;
-+
-+ blkif_get(blkif);
-+
-+ if (debug_lvl)
-+ printk(KERN_DEBUG "%s: started\n", current->comm);
-+
-+ while (!kthread_should_stop()) {
-+ if (try_to_freeze())
-+ continue;
-+
-+ wait_event_interruptible(
-+ blkif->wq,
-+ blkif->waiting_reqs || kthread_should_stop());
-+ wait_event_interruptible(
-+ pending_free_wq,
-+ !list_empty(&pending_free) || kthread_should_stop());
-+
-+ blkif->waiting_reqs = 0;
-+ smp_mb(); /* clear flag *before* checking for work */
-+
-+ if (do_block_io_op(blkif))
-+ blkif->waiting_reqs = 1;
-+ unplug_queue(blkif);
-+
-+ if (log_stats && time_after(jiffies, blkif->st_print))
-+ print_stats(blkif);
-+ }
-+
-+ if (log_stats)
-+ print_stats(blkif);
-+ if (debug_lvl)
-+ printk(KERN_DEBUG "%s: exiting\n", current->comm);
-+
-+ blkif->xenblkd = NULL;
-+ blkif_put(blkif);
-+
-+ return 0;
-+}
-+
-+/******************************************************************
-+ * COMPLETION CALLBACK -- Called as bh->b_end_io()
-+ */
-+
-+static void __end_block_io_op(pending_req_t *pending_req, int error)
-+{
-+ /* An error fails the entire request. */
-+ if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) &&
-+ (error == -EOPNOTSUPP)) {
-+ DPRINTK("blkback: write barrier op failed, not supported\n");
-+ blkback_barrier(XBT_NIL, pending_req->blkif->be, 0);
-+ pending_req->status = BLKIF_RSP_EOPNOTSUPP;
-+ } else if (error) {
-+ DPRINTK("Buffer not up-to-date at end of operation, "
-+ "error=%d\n", error);
-+ pending_req->status = BLKIF_RSP_ERROR;
-+ }
-+
-+ if (atomic_dec_and_test(&pending_req->pendcnt)) {
-+ fast_flush_area(pending_req);
-+ make_response(pending_req->blkif, pending_req->id,
-+ pending_req->operation, pending_req->status);
-+ blkif_put(pending_req->blkif);
-+ free_req(pending_req);
-+ }
-+}
-+
-+static int end_block_io_op(struct bio *bio, unsigned int done, int error)
-+{
-+ if (bio->bi_size != 0)
-+ return 1;
-+ __end_block_io_op(bio->bi_private, error);
-+ bio_put(bio);
-+ return error;
-+}
-+
-+
-+/******************************************************************************
-+ * NOTIFICATION FROM GUEST OS.
-+ */
-+
-+static void blkif_notify_work(blkif_t *blkif)
-+{
-+ blkif->waiting_reqs = 1;
-+ wake_up(&blkif->wq);
-+}
-+
-+irqreturn_t blkif_be_int(int irq, void *dev_id)
-+{
-+ blkif_notify_work(dev_id);
-+ return IRQ_HANDLED;
-+}
-+
-+
-+
-+/******************************************************************
-+ * DOWNWARD CALLS -- These interface with the block-device layer proper.
-+ */
-+
-+static int do_block_io_op(blkif_t *blkif)
-+{
-+ blkif_back_rings_t *blk_rings = &blkif->blk_rings;
-+ blkif_request_t req;
-+ pending_req_t *pending_req;
-+ RING_IDX rc, rp;
-+ int more_to_do = 0;
-+
-+ rc = blk_rings->common.req_cons;
-+ rp = blk_rings->common.sring->req_prod;
-+ rmb(); /* Ensure we see queued requests up to 'rp'. */
-+
-+ while ((rc != rp)) {
-+
-+ if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc))
-+ break;
-+
-+ pending_req = alloc_req();
-+ if (NULL == pending_req) {
-+ blkif->st_oo_req++;
-+ more_to_do = 1;
-+ break;
-+ }
-+
-+ switch (blkif->blk_protocol) {
-+ case BLKIF_PROTOCOL_NATIVE:
-+ memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc), sizeof(req));
-+ break;
-+ case BLKIF_PROTOCOL_X86_32:
-+ blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc));
-+ break;
-+ case BLKIF_PROTOCOL_X86_64:
-+ blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc));
-+ break;
-+ default:
-+ BUG();
-+ }
-+ blk_rings->common.req_cons = ++rc; /* before make_response() */
-+
-+ switch (req.operation) {
-+ case BLKIF_OP_READ:
-+ blkif->st_rd_req++;
-+ dispatch_rw_block_io(blkif, &req, pending_req);
-+ break;
-+ case BLKIF_OP_WRITE_BARRIER:
-+ blkif->st_br_req++;
-+ /* fall through */
-+ case BLKIF_OP_WRITE:
-+ blkif->st_wr_req++;
-+ dispatch_rw_block_io(blkif, &req, pending_req);
-+ break;
-+ default:
-+ DPRINTK("error: unknown block io operation [%d]\n",
-+ req.operation);
-+ make_response(blkif, req.id, req.operation,
-+ BLKIF_RSP_ERROR);
-+ free_req(pending_req);
-+ break;
-+ }
-+ }
-+ return more_to_do;
-+}
-+
-+static void dispatch_rw_block_io(blkif_t *blkif,
-+ blkif_request_t *req,
-+ pending_req_t *pending_req)
-+{
-+ extern void ll_rw_block(int rw, int nr, struct buffer_head * bhs[]);
-+ struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
-+ struct phys_req preq;
-+ struct {
-+ unsigned long buf; unsigned int nsec;
-+ } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
-+ unsigned int nseg;
-+ struct bio *bio = NULL, *biolist[BLKIF_MAX_SEGMENTS_PER_REQUEST];
-+ int ret, i, nbio = 0;
-+ int operation;
-+
-+ switch (req->operation) {
-+ case BLKIF_OP_READ:
-+ operation = READ;
-+ break;
-+ case BLKIF_OP_WRITE:
-+ operation = WRITE;
-+ break;
-+ case BLKIF_OP_WRITE_BARRIER:
-+ operation = WRITE_BARRIER;
-+ break;
-+ default:
-+ operation = 0; /* make gcc happy */
-+ BUG();
-+ }
-+
-+ /* Check that number of segments is sane. */
-+ nseg = req->nr_segments;
-+ if (unlikely(nseg == 0) ||
-+ unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
-+ DPRINTK("Bad number of segments in request (%d)\n", nseg);
-+ goto fail_response;
-+ }
-+
-+ preq.dev = req->handle;
-+ preq.sector_number = req->sector_number;
-+ preq.nr_sects = 0;
-+
-+ pending_req->blkif = blkif;
-+ pending_req->id = req->id;
-+ pending_req->operation = req->operation;
-+ pending_req->status = BLKIF_RSP_OKAY;
-+ pending_req->nr_pages = nseg;
-+
-+ for (i = 0; i < nseg; i++) {
-+ uint32_t flags;
-+
-+ seg[i].nsec = req->seg[i].last_sect -
-+ req->seg[i].first_sect + 1;
-+
-+ if ((req->seg[i].last_sect >= (PAGE_SIZE >> 9)) ||
-+ (req->seg[i].last_sect < req->seg[i].first_sect))
-+ goto fail_response;
-+ preq.nr_sects += seg[i].nsec;
-+
-+ flags = GNTMAP_host_map;
-+ if (operation != READ)
-+ flags |= GNTMAP_readonly;
-+ gnttab_set_map_op(&map[i], vaddr(pending_req, i), flags,
-+ req->seg[i].gref, blkif->domid);
-+ }
-+
-+ ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, nseg);
-+ BUG_ON(ret);
-+
-+ for (i = 0; i < nseg; i++) {
-+ if (unlikely(map[i].status != 0)) {
-+ DPRINTK("invalid buffer -- could not remap it\n");
-+ map[i].handle = BLKBACK_INVALID_HANDLE;
-+ ret |= 1;
-+ }
-+
-+ pending_handle(pending_req, i) = map[i].handle;
-+
-+ if (ret)
-+ continue;
-+
-+ set_phys_to_machine(__pa(vaddr(
-+ pending_req, i)) >> PAGE_SHIFT,
-+ FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT));
-+ seg[i].buf = map[i].dev_bus_addr |
-+ (req->seg[i].first_sect << 9);
-+ }
-+
-+ if (ret)
-+ goto fail_flush;
-+
-+ if (vbd_translate(&preq, blkif, operation) != 0) {
-+ DPRINTK("access denied: %s of [%llu,%llu] on dev=%04x\n",
-+ operation == READ ? "read" : "write",
-+ preq.sector_number,
-+ preq.sector_number + preq.nr_sects, preq.dev);
-+ goto fail_flush;
-+ }
-+
-+ for (i = 0; i < nseg; i++) {
-+ if (((int)preq.sector_number|(int)seg[i].nsec) &
-+ ((bdev_hardsect_size(preq.bdev) >> 9) - 1)) {
-+ DPRINTK("Misaligned I/O request from domain %d",
-+ blkif->domid);
-+ goto fail_put_bio;
-+ }
-+
-+ while ((bio == NULL) ||
-+ (bio_add_page(bio,
-+ virt_to_page(vaddr(pending_req, i)),
-+ seg[i].nsec << 9,
-+ seg[i].buf & ~PAGE_MASK) == 0)) {
-+ bio = biolist[nbio++] = bio_alloc(GFP_KERNEL, nseg-i);
-+ if (unlikely(bio == NULL))
-+ goto fail_put_bio;
-+
-+ bio->bi_bdev = preq.bdev;
-+ bio->bi_private = pending_req;
-+ bio->bi_end_io = end_block_io_op;
-+ bio->bi_sector = preq.sector_number;
-+ }
-+
-+ preq.sector_number += seg[i].nsec;
-+ }
-+
-+ plug_queue(blkif, bio);
-+ atomic_set(&pending_req->pendcnt, nbio);
-+ blkif_get(blkif);
-+
-+ for (i = 0; i < nbio; i++)
-+ submit_bio(operation, biolist[i]);
-+
-+ if (operation == READ)
-+ blkif->st_rd_sect += preq.nr_sects;
-+ else if (operation == WRITE)
-+ blkif->st_wr_sect += preq.nr_sects;
-+
-+ return;
-+
-+ fail_put_bio:
-+ for (i = 0; i < (nbio-1); i++)
-+ bio_put(biolist[i]);
-+ fail_flush:
-+ fast_flush_area(pending_req);
-+ fail_response:
-+ make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
-+ free_req(pending_req);
-+}
-+
-+
-+
-+/******************************************************************
-+ * MISCELLANEOUS SETUP / TEARDOWN / DEBUGGING
-+ */
-+
-+
-+static void make_response(blkif_t *blkif, u64 id,
-+ unsigned short op, int st)
-+{
-+ blkif_response_t resp;
-+ unsigned long flags;
-+ blkif_back_rings_t *blk_rings = &blkif->blk_rings;
-+ int more_to_do = 0;
-+ int notify;
-+
-+ resp.id = id;
-+ resp.operation = op;
-+ resp.status = st;
-+
-+ spin_lock_irqsave(&blkif->blk_ring_lock, flags);
-+ /* Place on the response ring for the relevant domain. */
-+ switch (blkif->blk_protocol) {
-+ case BLKIF_PROTOCOL_NATIVE:
-+ memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt),
-+ &resp, sizeof(resp));
-+ break;
-+ case BLKIF_PROTOCOL_X86_32:
-+ memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt),
-+ &resp, sizeof(resp));
-+ break;
-+ case BLKIF_PROTOCOL_X86_64:
-+ memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt),
-+ &resp, sizeof(resp));
-+ break;
-+ default:
-+ BUG();
-+ }
-+ blk_rings->common.rsp_prod_pvt++;
-+ RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
-+ if (blk_rings->common.rsp_prod_pvt == blk_rings->common.req_cons) {
-+ /*
-+ * Tail check for pending requests. Allows frontend to avoid
-+ * notifications if requests are already in flight (lower
-+ * overheads and promotes batching).
-+ */
-+ RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
-+
-+ } else if (RING_HAS_UNCONSUMED_REQUESTS(&blk_rings->common)) {
-+ more_to_do = 1;
-+ }
-+
-+ spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
-+
-+ if (more_to_do)
-+ blkif_notify_work(blkif);
-+ if (notify)
-+ notify_remote_via_irq(blkif->irq);
-+}
-+
-+static int __init blkif_init(void)
-+{
-+ int i, mmap_pages;
-+
-+ if (!is_running_on_xen())
-+ return -ENODEV;
-+
-+ mmap_pages = blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST;
-+
-+ pending_reqs = kmalloc(sizeof(pending_reqs[0]) *
-+ blkif_reqs, GFP_KERNEL);
-+ pending_grant_handles = kmalloc(sizeof(pending_grant_handles[0]) *
-+ mmap_pages, GFP_KERNEL);
-+ pending_pages = alloc_empty_pages_and_pagevec(mmap_pages);
-+
-+ if (!pending_reqs || !pending_grant_handles || !pending_pages)
-+ goto out_of_memory;
-+
-+ for (i = 0; i < mmap_pages; i++)
-+ pending_grant_handles[i] = BLKBACK_INVALID_HANDLE;
-+
-+ blkif_interface_init();
-+
-+ memset(pending_reqs, 0, sizeof(pending_reqs));
-+ INIT_LIST_HEAD(&pending_free);
-+
-+ for (i = 0; i < blkif_reqs; i++)
-+ list_add_tail(&pending_reqs[i].free_list, &pending_free);
-+
-+ blkif_xenbus_init();
-+
-+ return 0;
-+
-+ out_of_memory:
-+ kfree(pending_reqs);
-+ kfree(pending_grant_handles);
-+ free_empty_pages_and_pagevec(pending_pages, mmap_pages);
-+ printk("%s: out of memory\n", __FUNCTION__);
-+ return -ENOMEM;
-+}
-+
-+module_init(blkif_init);
-+
-+MODULE_LICENSE("Dual BSD/GPL");
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/blkback/cdrom.c ubuntu-gutsy-xen/drivers/xen/blkback/cdrom.c
---- ubuntu-gutsy/drivers/xen/blkback/cdrom.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/blkback/cdrom.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,169 @@
-+/******************************************************************************
-+ * blkback/cdrom.c
-+ *
-+ * Routines for managing cdrom watch and media-present attribute of a
-+ * cdrom type virtual block device (VBD).
-+ *
-+ * Copyright (c) 2003-2005, Keir Fraser & Steve Hand
-+ * Copyright (c) 2007 Pat Campbell
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#include "common.h"
-+
-+#undef DPRINTK
-+#define DPRINTK(_f, _a...) \
-+ printk("(%s() file=%s, line=%d) " _f "\n", \
-+ __PRETTY_FUNCTION__, __FILE__ , __LINE__ , ##_a )
-+
-+
-+#define MEDIA_PRESENT "media-present"
-+
-+static void cdrom_media_changed(struct xenbus_watch *, const char **, unsigned int);
-+
-+/**
-+ * Writes media-present=1 attribute for the given vbd device if not
-+ * already there
-+ */
-+static int cdrom_xenstore_write_media_present(struct backend_info *be)
-+{
-+ struct xenbus_device *dev = be->dev;
-+ struct xenbus_transaction xbt;
-+ int err;
-+ int media_present;
-+
-+ DPRINTK(" ");
-+
-+ err = xenbus_scanf(XBT_NIL, dev->nodename, MEDIA_PRESENT, "%d",
-+ &media_present);
-+ if ( 0 < err) {
-+ DPRINTK("already written err%d", err);
-+ return(0);
-+ }
-+ media_present = 1;
-+
-+again:
-+ err = xenbus_transaction_start(&xbt);
-+ if (err) {
-+ xenbus_dev_fatal(dev, err, "starting transaction");
-+ return(-1);
-+ }
-+
-+ err = xenbus_printf(xbt, dev->nodename, MEDIA_PRESENT, "%d", media_present );
-+ if (err) {
-+ xenbus_dev_fatal(dev, err, "writing %s/%s",
-+ dev->nodename, MEDIA_PRESENT);
-+ goto abort;
-+ }
-+ err = xenbus_transaction_end(xbt, 0);
-+ if (err == -EAGAIN)
-+ goto again;
-+ if (err)
-+ xenbus_dev_fatal(dev, err, "ending transaction");
-+ return(0);
-+ abort:
-+ xenbus_transaction_end(xbt, 1);
-+ return(-1);
-+}
-+
-+/**
-+ *
-+ */
-+int cdrom_is_type(struct backend_info *be)
-+{
-+ DPRINTK( "type:%x", be->blkif->vbd.type );
-+ if ( be->blkif->vbd.type & VDISK_CDROM && be->blkif->vbd.type & GENHD_FL_REMOVABLE){
-+ return(1);
-+ }
-+ return(0);
-+}
-+
-+/**
-+ *
-+ */
-+void cdrom_add_media_watch(struct backend_info *be)
-+{
-+ struct xenbus_device *dev = be->dev;
-+ int err;
-+
-+ DPRINTK( "nodename:%s", dev->nodename);
-+ if (cdrom_is_type(be)) {
-+ DPRINTK("is a cdrom");
-+ if ( cdrom_xenstore_write_media_present(be) == 0 ) {
-+ DPRINTK( "xenstore wrote OK");
-+ err = xenbus_watch_path2(dev, dev->nodename, MEDIA_PRESENT,
-+ &be->backend_cdrom_watch, cdrom_media_changed);
-+ if (err) {
-+ DPRINTK( "media_present watch add failed" );
-+ }
-+ }
-+ }
-+}
-+
-+/**
-+ * Callback received when the "media_present" xenstore node is changed
-+ */
-+static void cdrom_media_changed(struct xenbus_watch *watch,
-+ const char **vec, unsigned int len)
-+{
-+ int err;
-+ unsigned media_present;
-+ struct backend_info *be
-+ = container_of(watch, struct backend_info, backend_cdrom_watch);
-+ struct xenbus_device *dev = be->dev;
-+
-+ DPRINTK(" ");
-+
-+ if ( !(cdrom_is_type(be))) {
-+ DPRINTK("callback not for a cdrom" );
-+ return;
-+ }
-+
-+ err = xenbus_scanf(XBT_NIL, dev->nodename, MEDIA_PRESENT, "%d",
-+ &media_present);
-+ if (err == 0 || err == -ENOENT) {
-+ DPRINTK("xenbus_read of cdrom media_present node error:%d",err);
-+ return;
-+ }
-+
-+ if (media_present == 0) {
-+ vbd_free(&be->blkif->vbd);
-+ }
-+ else {
-+ char *p = strrchr(dev->otherend, '/') + 1;
-+ long handle = simple_strtoul(p, NULL, 0);
-+
-+ if (be->blkif->vbd.bdev == NULL) {
-+ err = vbd_create(be->blkif, handle, be->major, be->minor,
-+ (NULL == strchr(be->mode, 'w')));
-+ if (err) {
-+ be->major = be->minor = 0;
-+ xenbus_dev_fatal(dev, err, "creating vbd structure");
-+ return;
-+ }
-+ }
-+ }
-+}
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/blkback/common.h ubuntu-gutsy-xen/drivers/xen/blkback/common.h
---- ubuntu-gutsy/drivers/xen/blkback/common.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/blkback/common.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,154 @@
-+/*
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#ifndef __BLKIF__BACKEND__COMMON_H__
-+#define __BLKIF__BACKEND__COMMON_H__
-+
-+#include <linux/version.h>
-+#include <linux/module.h>
-+#include <linux/interrupt.h>
-+#include <linux/slab.h>
-+#include <linux/blkdev.h>
-+#include <linux/vmalloc.h>
-+#include <linux/wait.h>
-+#include <asm/io.h>
-+#include <asm/setup.h>
-+#include <asm/pgalloc.h>
-+#include <xen/evtchn.h>
-+#include <asm/hypervisor.h>
-+#include <xen/blkif.h>
-+#include <xen/gnttab.h>
-+#include <xen/driver_util.h>
-+#include <xen/xenbus.h>
-+
-+#define DPRINTK(_f, _a...) \
-+ pr_debug("(file=%s, line=%d) " _f, \
-+ __FILE__ , __LINE__ , ## _a )
-+
-+struct vbd {
-+ blkif_vdev_t handle; /* what the domain refers to this vbd as */
-+ unsigned char readonly; /* Non-zero -> read-only */
-+ unsigned char type; /* VDISK_xxx */
-+ u32 pdevice; /* phys device that this vbd maps to */
-+ struct block_device *bdev;
-+};
-+
-+struct backend_info;
-+
-+typedef struct blkif_st {
-+ /* Unique identifier for this interface. */
-+ domid_t domid;
-+ unsigned int handle;
-+ /* Physical parameters of the comms window. */
-+ unsigned int irq;
-+ /* Comms information. */
-+ enum blkif_protocol blk_protocol;
-+ blkif_back_rings_t blk_rings;
-+ struct vm_struct *blk_ring_area;
-+ /* The VBD attached to this interface. */
-+ struct vbd vbd;
-+ /* Back pointer to the backend_info. */
-+ struct backend_info *be;
-+ /* Private fields. */
-+ spinlock_t blk_ring_lock;
-+ atomic_t refcnt;
-+
-+ wait_queue_head_t wq;
-+ struct task_struct *xenblkd;
-+ unsigned int waiting_reqs;
-+ request_queue_t *plug;
-+
-+ /* statistics */
-+ unsigned long st_print;
-+ int st_rd_req;
-+ int st_wr_req;
-+ int st_oo_req;
-+ int st_br_req;
-+ int st_rd_sect;
-+ int st_wr_sect;
-+
-+ wait_queue_head_t waiting_to_free;
-+
-+ grant_handle_t shmem_handle;
-+ grant_ref_t shmem_ref;
-+} blkif_t;
-+
-+struct backend_info
-+{
-+ struct xenbus_device *dev;
-+ blkif_t *blkif;
-+ struct xenbus_watch backend_watch;
-+ struct xenbus_watch backend_cdrom_watch;
-+ unsigned major;
-+ unsigned minor;
-+ char *mode;
-+};
-+
-+blkif_t *blkif_alloc(domid_t domid);
-+void blkif_disconnect(blkif_t *blkif);
-+void blkif_free(blkif_t *blkif);
-+int blkif_map(blkif_t *blkif, unsigned long shared_page, unsigned int evtchn);
-+
-+#define blkif_get(_b) (atomic_inc(&(_b)->refcnt))
-+#define blkif_put(_b) \
-+ do { \
-+ if (atomic_dec_and_test(&(_b)->refcnt)) \
-+ wake_up(&(_b)->waiting_to_free);\
-+ } while (0)
-+
-+/* Create a vbd. */
-+int vbd_create(blkif_t *blkif, blkif_vdev_t vdevice, unsigned major,
-+ unsigned minor, int readonly);
-+void vbd_free(struct vbd *vbd);
-+
-+unsigned long long vbd_size(struct vbd *vbd);
-+unsigned int vbd_info(struct vbd *vbd);
-+unsigned long vbd_secsize(struct vbd *vbd);
-+
-+struct phys_req {
-+ unsigned short dev;
-+ unsigned short nr_sects;
-+ struct block_device *bdev;
-+ blkif_sector_t sector_number;
-+};
-+
-+int vbd_translate(struct phys_req *req, blkif_t *blkif, int operation);
-+
-+void blkif_interface_init(void);
-+
-+void blkif_xenbus_init(void);
-+
-+irqreturn_t blkif_be_int(int irq, void *dev_id);
-+int blkif_schedule(void *arg);
-+
-+int blkback_barrier(struct xenbus_transaction xbt,
-+ struct backend_info *be, int state);
-+
-+/* cdrom media change */
-+int cdrom_is_type(struct backend_info *be);
-+void cdrom_add_media_watch(struct backend_info *be);
-+
-+#endif /* __BLKIF__BACKEND__COMMON_H__ */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/blkback/interface.c ubuntu-gutsy-xen/drivers/xen/blkback/interface.c
---- ubuntu-gutsy/drivers/xen/blkback/interface.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/blkback/interface.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,181 @@
-+/******************************************************************************
-+ * arch/xen/drivers/blkif/backend/interface.c
-+ *
-+ * Block-device interface management.
-+ *
-+ * Copyright (c) 2004, Keir Fraser
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#include "common.h"
-+#include <xen/evtchn.h>
-+#include <linux/kthread.h>
-+
-+static struct kmem_cache *blkif_cachep;
-+
-+blkif_t *blkif_alloc(domid_t domid)
-+{
-+ blkif_t *blkif;
-+
-+ blkif = kmem_cache_alloc(blkif_cachep, GFP_KERNEL);
-+ if (!blkif)
-+ return ERR_PTR(-ENOMEM);
-+
-+ memset(blkif, 0, sizeof(*blkif));
-+ blkif->domid = domid;
-+ spin_lock_init(&blkif->blk_ring_lock);
-+ atomic_set(&blkif->refcnt, 1);
-+ init_waitqueue_head(&blkif->wq);
-+ blkif->st_print = jiffies;
-+ init_waitqueue_head(&blkif->waiting_to_free);
-+
-+ return blkif;
-+}
-+
-+static int map_frontend_page(blkif_t *blkif, unsigned long shared_page)
-+{
-+ struct gnttab_map_grant_ref op;
-+
-+ gnttab_set_map_op(&op, (unsigned long)blkif->blk_ring_area->addr,
-+ GNTMAP_host_map, shared_page, blkif->domid);
-+
-+ if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
-+ BUG();
-+
-+ if (op.status) {
-+ DPRINTK(" Grant table operation failure !\n");
-+ return op.status;
-+ }
-+
-+ blkif->shmem_ref = shared_page;
-+ blkif->shmem_handle = op.handle;
-+
-+ return 0;
-+}
-+
-+static void unmap_frontend_page(blkif_t *blkif)
-+{
-+ struct gnttab_unmap_grant_ref op;
-+
-+ gnttab_set_unmap_op(&op, (unsigned long)blkif->blk_ring_area->addr,
-+ GNTMAP_host_map, blkif->shmem_handle);
-+
-+ if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
-+ BUG();
-+}
-+
-+int blkif_map(blkif_t *blkif, unsigned long shared_page, unsigned int evtchn)
-+{
-+ int err;
-+
-+ /* Already connected through? */
-+ if (blkif->irq)
-+ return 0;
-+
-+ if ( (blkif->blk_ring_area = alloc_vm_area(PAGE_SIZE)) == NULL )
-+ return -ENOMEM;
-+
-+ err = map_frontend_page(blkif, shared_page);
-+ if (err) {
-+ free_vm_area(blkif->blk_ring_area);
-+ return err;
-+ }
-+
-+ switch (blkif->blk_protocol) {
-+ case BLKIF_PROTOCOL_NATIVE:
-+ {
-+ blkif_sring_t *sring;
-+ sring = (blkif_sring_t *)blkif->blk_ring_area->addr;
-+ BACK_RING_INIT(&blkif->blk_rings.native, sring, PAGE_SIZE);
-+ break;
-+ }
-+ case BLKIF_PROTOCOL_X86_32:
-+ {
-+ blkif_x86_32_sring_t *sring_x86_32;
-+ sring_x86_32 = (blkif_x86_32_sring_t *)blkif->blk_ring_area->addr;
-+ BACK_RING_INIT(&blkif->blk_rings.x86_32, sring_x86_32, PAGE_SIZE);
-+ break;
-+ }
-+ case BLKIF_PROTOCOL_X86_64:
-+ {
-+ blkif_x86_64_sring_t *sring_x86_64;
-+ sring_x86_64 = (blkif_x86_64_sring_t *)blkif->blk_ring_area->addr;
-+ BACK_RING_INIT(&blkif->blk_rings.x86_64, sring_x86_64, PAGE_SIZE);
-+ break;
-+ }
-+ default:
-+ BUG();
-+ }
-+
-+ err = bind_interdomain_evtchn_to_irqhandler(
-+ blkif->domid, evtchn, blkif_be_int, 0, "blkif-backend", blkif);
-+ if (err < 0)
-+ {
-+ unmap_frontend_page(blkif);
-+ free_vm_area(blkif->blk_ring_area);
-+ blkif->blk_rings.common.sring = NULL;
-+ return err;
-+ }
-+ blkif->irq = err;
-+
-+ return 0;
-+}
-+
-+void blkif_disconnect(blkif_t *blkif)
-+{
-+ if (blkif->xenblkd) {
-+ kthread_stop(blkif->xenblkd);
-+ blkif->xenblkd = NULL;
-+ }
-+
-+ atomic_dec(&blkif->refcnt);
-+ wait_event(blkif->waiting_to_free, atomic_read(&blkif->refcnt) == 0);
-+ atomic_inc(&blkif->refcnt);
-+
-+ if (blkif->irq) {
-+ unbind_from_irqhandler(blkif->irq, blkif);
-+ blkif->irq = 0;
-+ }
-+
-+ if (blkif->blk_rings.common.sring) {
-+ unmap_frontend_page(blkif);
-+ free_vm_area(blkif->blk_ring_area);
-+ blkif->blk_rings.common.sring = NULL;
-+ }
-+}
-+
-+void blkif_free(blkif_t *blkif)
-+{
-+ if (!atomic_dec_and_test(&blkif->refcnt))
-+ BUG();
-+ kmem_cache_free(blkif_cachep, blkif);
-+}
-+
-+void __init blkif_interface_init(void)
-+{
-+ blkif_cachep = kmem_cache_create("blkif_cache", sizeof(blkif_t),
-+ 0, 0, NULL, NULL);
-+}
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/blkback/Makefile ubuntu-gutsy-xen/drivers/xen/blkback/Makefile
---- ubuntu-gutsy/drivers/xen/blkback/Makefile 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/blkback/Makefile 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,3 @@
-+obj-$(CONFIG_XEN_BLKDEV_BACKEND) := blkbk.o
-+
-+blkbk-y := blkback.o xenbus.o interface.o vbd.o cdrom.o
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/blkback/vbd.c ubuntu-gutsy-xen/drivers/xen/blkback/vbd.c
---- ubuntu-gutsy/drivers/xen/blkback/vbd.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/blkback/vbd.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,121 @@
-+/******************************************************************************
-+ * blkback/vbd.c
-+ *
-+ * Routines for managing virtual block devices (VBDs).
-+ *
-+ * Copyright (c) 2003-2005, Keir Fraser & Steve Hand
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#include "common.h"
-+
-+#define vbd_sz(_v) ((_v)->bdev->bd_part ? \
-+ (_v)->bdev->bd_part->nr_sects : (_v)->bdev->bd_disk->capacity)
-+
-+unsigned long long vbd_size(struct vbd *vbd)
-+{
-+ return vbd_sz(vbd);
-+}
-+
-+unsigned int vbd_info(struct vbd *vbd)
-+{
-+ return vbd->type | (vbd->readonly?VDISK_READONLY:0);
-+}
-+
-+unsigned long vbd_secsize(struct vbd *vbd)
-+{
-+ return bdev_hardsect_size(vbd->bdev);
-+}
-+
-+int vbd_create(blkif_t *blkif, blkif_vdev_t handle, unsigned major,
-+ unsigned minor, int readonly)
-+{
-+ struct vbd *vbd;
-+ struct block_device *bdev;
-+
-+ vbd = &blkif->vbd;
-+ vbd->handle = handle;
-+ vbd->readonly = readonly;
-+ vbd->type = 0;
-+
-+ vbd->pdevice = MKDEV(major, minor);
-+
-+ bdev = open_by_devnum(vbd->pdevice,
-+ vbd->readonly ? FMODE_READ : FMODE_WRITE);
-+
-+ if (IS_ERR(bdev)) {
-+ DPRINTK("vbd_creat: device %08x could not be opened.\n",
-+ vbd->pdevice);
-+ return -ENOENT;
-+ }
-+
-+ vbd->bdev = bdev;
-+
-+ if (vbd->bdev->bd_disk == NULL) {
-+ DPRINTK("vbd_creat: device %08x doesn't exist.\n",
-+ vbd->pdevice);
-+ vbd_free(vbd);
-+ return -ENOENT;
-+ }
-+
-+ if (vbd->bdev->bd_disk->flags & GENHD_FL_CD)
-+ vbd->type |= VDISK_CDROM;
-+ if (vbd->bdev->bd_disk->flags & GENHD_FL_REMOVABLE)
-+ vbd->type |= VDISK_REMOVABLE;
-+
-+ DPRINTK("Successful creation of handle=%04x (dom=%u)\n",
-+ handle, blkif->domid);
-+ return 0;
-+}
-+
-+void vbd_free(struct vbd *vbd)
-+{
-+ if (vbd->bdev)
-+ blkdev_put(vbd->bdev);
-+ vbd->bdev = NULL;
-+}
-+
-+int vbd_translate(struct phys_req *req, blkif_t *blkif, int operation)
-+{
-+ struct vbd *vbd = &blkif->vbd;
-+ int rc = -EACCES;
-+
-+ if ((operation != READ) && vbd->readonly)
-+ goto out;
-+
-+ if (vbd->bdev == NULL)
-+ goto out;
-+
-+ if (unlikely((req->sector_number + req->nr_sects) > vbd_sz(vbd)))
-+ goto out;
-+
-+ req->dev = vbd->pdevice;
-+ req->bdev = vbd->bdev;
-+ rc = 0;
-+
-+ out:
-+ return rc;
-+}
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/blkback/xenbus.c ubuntu-gutsy-xen/drivers/xen/blkback/xenbus.c
---- ubuntu-gutsy/drivers/xen/blkback/xenbus.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/blkback/xenbus.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,540 @@
-+/* Xenbus code for blkif backend
-+ Copyright (C) 2005 Rusty Russell <rusty@rustcorp.com.au>
-+ Copyright (C) 2005 XenSource Ltd
-+
-+ This program is free software; you can redistribute it and/or modify
-+ it under the terms of the GNU General Public License as published by
-+ the Free Software Foundation; either version 2 of the License, or
-+ (at your option) any later version.
-+
-+ This program is distributed in the hope that it will be useful,
-+ but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ GNU General Public License for more details.
-+
-+ You should have received a copy of the GNU General Public License
-+ along with this program; if not, write to the Free Software
-+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-+*/
-+
-+#include <stdarg.h>
-+#include <linux/module.h>
-+#include <linux/kthread.h>
-+#include "common.h"
-+#include "../core/domctl.h"
-+
-+#undef DPRINTK
-+#define DPRINTK(fmt, args...) \
-+ pr_debug("blkback/xenbus (%s:%d) " fmt ".\n", \
-+ __FUNCTION__, __LINE__, ##args)
-+
-+static void connect(struct backend_info *);
-+static int connect_ring(struct backend_info *);
-+static void backend_changed(struct xenbus_watch *, const char **,
-+ unsigned int);
-+
-+static int blkback_name(blkif_t *blkif, char *buf)
-+{
-+ char *devpath, *devname;
-+ struct xenbus_device *dev = blkif->be->dev;
-+
-+ devpath = xenbus_read(XBT_NIL, dev->nodename, "dev", NULL);
-+ if (IS_ERR(devpath))
-+ return PTR_ERR(devpath);
-+
-+ if ((devname = strstr(devpath, "/dev/")) != NULL)
-+ devname += strlen("/dev/");
-+ else
-+ devname = devpath;
-+
-+ snprintf(buf, TASK_COMM_LEN, "blkback.%d.%s", blkif->domid, devname);
-+ kfree(devpath);
-+
-+ return 0;
-+}
-+
-+static void update_blkif_status(blkif_t *blkif)
-+{
-+ int err;
-+ char name[TASK_COMM_LEN];
-+
-+ /* Not ready to connect? */
-+ if (!blkif->irq || !blkif->vbd.bdev)
-+ return;
-+
-+ /* Already connected? */
-+ if (blkif->be->dev->state == XenbusStateConnected)
-+ return;
-+
-+ /* Attempt to connect: exit if we fail to. */
-+ connect(blkif->be);
-+ if (blkif->be->dev->state != XenbusStateConnected)
-+ return;
-+
-+ err = blkback_name(blkif, name);
-+ if (err) {
-+ xenbus_dev_error(blkif->be->dev, err, "get blkback dev name");
-+ return;
-+ }
-+
-+ blkif->xenblkd = kthread_run(blkif_schedule, blkif, name);
-+ if (IS_ERR(blkif->xenblkd)) {
-+ err = PTR_ERR(blkif->xenblkd);
-+ blkif->xenblkd = NULL;
-+ xenbus_dev_error(blkif->be->dev, err, "start xenblkd");
-+ }
-+}
-+
-+
-+/****************************************************************
-+ * sysfs interface for VBD I/O requests
-+ */
-+
-+#define VBD_SHOW(name, format, args...) \
-+ static ssize_t show_##name(struct device *_dev, \
-+ struct device_attribute *attr, \
-+ char *buf) \
-+ { \
-+ struct xenbus_device *dev = to_xenbus_device(_dev); \
-+ struct backend_info *be = dev->dev.driver_data; \
-+ \
-+ return sprintf(buf, format, ##args); \
-+ } \
-+ DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
-+
-+VBD_SHOW(oo_req, "%d\n", be->blkif->st_oo_req);
-+VBD_SHOW(rd_req, "%d\n", be->blkif->st_rd_req);
-+VBD_SHOW(wr_req, "%d\n", be->blkif->st_wr_req);
-+VBD_SHOW(br_req, "%d\n", be->blkif->st_br_req);
-+VBD_SHOW(rd_sect, "%d\n", be->blkif->st_rd_sect);
-+VBD_SHOW(wr_sect, "%d\n", be->blkif->st_wr_sect);
-+
-+static struct attribute *vbdstat_attrs[] = {
-+ &dev_attr_oo_req.attr,
-+ &dev_attr_rd_req.attr,
-+ &dev_attr_wr_req.attr,
-+ &dev_attr_br_req.attr,
-+ &dev_attr_rd_sect.attr,
-+ &dev_attr_wr_sect.attr,
-+ NULL
-+};
-+
-+static struct attribute_group vbdstat_group = {
-+ .name = "statistics",
-+ .attrs = vbdstat_attrs,
-+};
-+
-+VBD_SHOW(physical_device, "%x:%x\n", be->major, be->minor);
-+VBD_SHOW(mode, "%s\n", be->mode);
-+
-+int xenvbd_sysfs_addif(struct xenbus_device *dev)
-+{
-+ int error;
-+
-+ error = device_create_file(&dev->dev, &dev_attr_physical_device);
-+ if (error)
-+ goto fail1;
-+
-+ error = device_create_file(&dev->dev, &dev_attr_mode);
-+ if (error)
-+ goto fail2;
-+
-+ error = sysfs_create_group(&dev->dev.kobj, &vbdstat_group);
-+ if (error)
-+ goto fail3;
-+
-+ return 0;
-+
-+fail3: sysfs_remove_group(&dev->dev.kobj, &vbdstat_group);
-+fail2: device_remove_file(&dev->dev, &dev_attr_mode);
-+fail1: device_remove_file(&dev->dev, &dev_attr_physical_device);
-+ return error;
-+}
-+
-+void xenvbd_sysfs_delif(struct xenbus_device *dev)
-+{
-+ sysfs_remove_group(&dev->dev.kobj, &vbdstat_group);
-+ device_remove_file(&dev->dev, &dev_attr_mode);
-+ device_remove_file(&dev->dev, &dev_attr_physical_device);
-+}
-+
-+static int blkback_remove(struct xenbus_device *dev)
-+{
-+ struct backend_info *be = dev->dev.driver_data;
-+
-+ DPRINTK("");
-+
-+ if (be->major || be->minor)
-+ xenvbd_sysfs_delif(dev);
-+
-+ if (be->backend_watch.node) {
-+ unregister_xenbus_watch(&be->backend_watch);
-+ kfree(be->backend_watch.node);
-+ be->backend_watch.node = NULL;
-+ }
-+
-+ if (be->backend_cdrom_watch.node) {
-+ unregister_xenbus_watch(&be->backend_cdrom_watch);
-+ kfree(be->backend_cdrom_watch.node);
-+ be->backend_cdrom_watch.node = NULL;
-+ }
-+
-+ if (be->blkif) {
-+ blkif_disconnect(be->blkif);
-+ vbd_free(&be->blkif->vbd);
-+ blkif_free(be->blkif);
-+ be->blkif = NULL;
-+ }
-+
-+ kfree(be);
-+ dev->dev.driver_data = NULL;
-+ return 0;
-+}
-+
-+int blkback_barrier(struct xenbus_transaction xbt,
-+ struct backend_info *be, int state)
-+{
-+ struct xenbus_device *dev = be->dev;
-+ int err;
-+
-+ err = xenbus_printf(xbt, dev->nodename, "feature-barrier",
-+ "%d", state);
-+ if (err)
-+ xenbus_dev_fatal(dev, err, "writing feature-barrier");
-+
-+ return err;
-+}
-+
-+/**
-+ * Entry point to this code when a new device is created. Allocate the basic
-+ * structures, and watch the store waiting for the hotplug scripts to tell us
-+ * the device's physical major and minor numbers. Switch to InitWait.
-+ */
-+static int blkback_probe(struct xenbus_device *dev,
-+ const struct xenbus_device_id *id)
-+{
-+ int err;
-+ struct backend_info *be = kzalloc(sizeof(struct backend_info),
-+ GFP_KERNEL);
-+ if (!be) {
-+ xenbus_dev_fatal(dev, -ENOMEM,
-+ "allocating backend structure");
-+ return -ENOMEM;
-+ }
-+ be->dev = dev;
-+ dev->dev.driver_data = be;
-+
-+ be->blkif = blkif_alloc(dev->otherend_id);
-+ if (IS_ERR(be->blkif)) {
-+ err = PTR_ERR(be->blkif);
-+ be->blkif = NULL;
-+ xenbus_dev_fatal(dev, err, "creating block interface");
-+ goto fail;
-+ }
-+
-+ /* setup back pointer */
-+ be->blkif->be = be;
-+
-+ err = xenbus_watch_path2(dev, dev->nodename, "physical-device",
-+ &be->backend_watch, backend_changed);
-+ if (err)
-+ goto fail;
-+
-+ err = xenbus_switch_state(dev, XenbusStateInitWait);
-+ if (err)
-+ goto fail;
-+
-+ return 0;
-+
-+fail:
-+ DPRINTK("failed");
-+ blkback_remove(dev);
-+ return err;
-+}
-+
-+
-+/**
-+ * Callback received when the hotplug scripts have placed the physical-device
-+ * node. Read it and the mode node, and create a vbd. If the frontend is
-+ * ready, connect.
-+ */
-+static void backend_changed(struct xenbus_watch *watch,
-+ const char **vec, unsigned int len)
-+{
-+ int err;
-+ unsigned major;
-+ unsigned minor;
-+ struct backend_info *be
-+ = container_of(watch, struct backend_info, backend_watch);
-+ struct xenbus_device *dev = be->dev;
-+
-+ DPRINTK("");
-+
-+ err = xenbus_scanf(XBT_NIL, dev->nodename, "physical-device", "%x:%x",
-+ &major, &minor);
-+ if (XENBUS_EXIST_ERR(err)) {
-+ /* Since this watch will fire once immediately after it is
-+ registered, we expect this. Ignore it, and wait for the
-+ hotplug scripts. */
-+ return;
-+ }
-+ if (err != 2) {
-+ xenbus_dev_fatal(dev, err, "reading physical-device");
-+ return;
-+ }
-+
-+ if ((be->major || be->minor) &&
-+ ((be->major != major) || (be->minor != minor))) {
-+ printk(KERN_WARNING
-+ "blkback: changing physical device (from %x:%x to "
-+ "%x:%x) not supported.\n", be->major, be->minor,
-+ major, minor);
-+ return;
-+ }
-+
-+ be->mode = xenbus_read(XBT_NIL, dev->nodename, "mode", NULL);
-+ if (IS_ERR(be->mode)) {
-+ err = PTR_ERR(be->mode);
-+ be->mode = NULL;
-+ xenbus_dev_fatal(dev, err, "reading mode");
-+ return;
-+ }
-+
-+ if (be->major == 0 && be->minor == 0) {
-+ /* Front end dir is a number, which is used as the handle. */
-+
-+ char *p = strrchr(dev->otherend, '/') + 1;
-+ long handle = simple_strtoul(p, NULL, 0);
-+
-+ be->major = major;
-+ be->minor = minor;
-+
-+ err = vbd_create(be->blkif, handle, major, minor,
-+ (NULL == strchr(be->mode, 'w')));
-+ if (err) {
-+ be->major = be->minor = 0;
-+ xenbus_dev_fatal(dev, err, "creating vbd structure");
-+ return;
-+ }
-+
-+ err = xenvbd_sysfs_addif(dev);
-+ if (err) {
-+ vbd_free(&be->blkif->vbd);
-+ be->major = be->minor = 0;
-+ xenbus_dev_fatal(dev, err, "creating sysfs entries");
-+ return;
-+ }
-+
-+ /* We're potentially connected now */
-+ update_blkif_status(be->blkif);
-+
-+ /* Add watch for cdrom media status if necessay */
-+ cdrom_add_media_watch(be);
-+ }
-+}
-+
-+
-+/**
-+ * Callback received when the frontend's state changes.
-+ */
-+static void frontend_changed(struct xenbus_device *dev,
-+ enum xenbus_state frontend_state)
-+{
-+ struct backend_info *be = dev->dev.driver_data;
-+ int err;
-+
-+ DPRINTK("%s", xenbus_strstate(frontend_state));
-+
-+ switch (frontend_state) {
-+ case XenbusStateInitialising:
-+ if (dev->state == XenbusStateClosed) {
-+ printk(KERN_INFO "%s: %s: prepare for reconnect\n",
-+ __FUNCTION__, dev->nodename);
-+ xenbus_switch_state(dev, XenbusStateInitWait);
-+ }
-+ break;
-+
-+ case XenbusStateInitialised:
-+ case XenbusStateConnected:
-+ /* Ensure we connect even when two watches fire in
-+ close successsion and we miss the intermediate value
-+ of frontend_state. */
-+ if (dev->state == XenbusStateConnected)
-+ break;
-+
-+ err = connect_ring(be);
-+ if (err)
-+ break;
-+ update_blkif_status(be->blkif);
-+ break;
-+
-+ case XenbusStateClosing:
-+ blkif_disconnect(be->blkif);
-+ xenbus_switch_state(dev, XenbusStateClosing);
-+ break;
-+
-+ case XenbusStateClosed:
-+ xenbus_switch_state(dev, XenbusStateClosed);
-+ if (xenbus_dev_is_online(dev))
-+ break;
-+ /* fall through if not online */
-+ case XenbusStateUnknown:
-+ device_unregister(&dev->dev);
-+ break;
-+
-+ default:
-+ xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
-+ frontend_state);
-+ break;
-+ }
-+}
-+
-+
-+/* ** Connection ** */
-+
-+
-+/**
-+ * Write the physical details regarding the block device to the store, and
-+ * switch to Connected state.
-+ */
-+static void connect(struct backend_info *be)
-+{
-+ struct xenbus_transaction xbt;
-+ int err;
-+ struct xenbus_device *dev = be->dev;
-+
-+ DPRINTK("%s", dev->otherend);
-+
-+ /* Supply the information about the device the frontend needs */
-+again:
-+ err = xenbus_transaction_start(&xbt);
-+ if (err) {
-+ xenbus_dev_fatal(dev, err, "starting transaction");
-+ return;
-+ }
-+
-+ err = blkback_barrier(xbt, be, 1);
-+ if (err)
-+ goto abort;
-+
-+ err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
-+ vbd_size(&be->blkif->vbd));
-+ if (err) {
-+ xenbus_dev_fatal(dev, err, "writing %s/sectors",
-+ dev->nodename);
-+ goto abort;
-+ }
-+
-+ /* FIXME: use a typename instead */
-+ err = xenbus_printf(xbt, dev->nodename, "info", "%u",
-+ vbd_info(&be->blkif->vbd));
-+ if (err) {
-+ xenbus_dev_fatal(dev, err, "writing %s/info",
-+ dev->nodename);
-+ goto abort;
-+ }
-+ err = xenbus_printf(xbt, dev->nodename, "sector-size", "%lu",
-+ vbd_secsize(&be->blkif->vbd));
-+ if (err) {
-+ xenbus_dev_fatal(dev, err, "writing %s/sector-size",
-+ dev->nodename);
-+ goto abort;
-+ }
-+
-+ err = xenbus_transaction_end(xbt, 0);
-+ if (err == -EAGAIN)
-+ goto again;
-+ if (err)
-+ xenbus_dev_fatal(dev, err, "ending transaction");
-+
-+ err = xenbus_switch_state(dev, XenbusStateConnected);
-+ if (err)
-+ xenbus_dev_fatal(dev, err, "switching to Connected state",
-+ dev->nodename);
-+
-+ return;
-+ abort:
-+ xenbus_transaction_end(xbt, 1);
-+}
-+
-+static int connect_ring(struct backend_info *be)
-+{
-+ struct xenbus_device *dev = be->dev;
-+ unsigned long ring_ref;
-+ unsigned int evtchn;
-+ char protocol[64] = "";
-+ int err;
-+
-+ DPRINTK("%s", dev->otherend);
-+
-+ err = xenbus_gather(XBT_NIL, dev->otherend, "ring-ref", "%lu", &ring_ref,
-+ "event-channel", "%u", &evtchn, NULL);
-+ if (err) {
-+ xenbus_dev_fatal(dev, err,
-+ "reading %s/ring-ref and event-channel",
-+ dev->otherend);
-+ return err;
-+ }
-+
-+ be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE;
-+ err = xenbus_gather(XBT_NIL, dev->otherend, "protocol",
-+ "%63s", protocol, NULL);
-+ if (err) {
-+ strcpy(protocol, "unspecified");
-+ be->blkif->blk_protocol = xen_guest_blkif_protocol(be->blkif->domid);
-+ }
-+ else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_NATIVE))
-+ be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE;
-+ else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_32))
-+ be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_32;
-+ else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_64))
-+ be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_64;
-+#if 1 /* maintain compatibility with early sles10-sp1 and paravirt netware betas */
-+ else if (0 == strcmp(protocol, "1"))
-+ be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_32;
-+ else if (0 == strcmp(protocol, "2"))
-+ be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_64;
-+#endif
-+ else {
-+ xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol);
-+ return -1;
-+ }
-+ printk(KERN_INFO
-+ "blkback: ring-ref %ld, event-channel %d, protocol %d (%s)\n",
-+ ring_ref, evtchn, be->blkif->blk_protocol, protocol);
-+
-+ /* Map the shared frame, irq etc. */
-+ err = blkif_map(be->blkif, ring_ref, evtchn);
-+ if (err) {
-+ xenbus_dev_fatal(dev, err, "mapping ring-ref %lu port %u",
-+ ring_ref, evtchn);
-+ return err;
-+ }
-+
-+ return 0;
-+}
-+
-+
-+/* ** Driver Registration ** */
-+
-+
-+static struct xenbus_device_id blkback_ids[] = {
-+ { "vbd" },
-+ { "" }
-+};
-+
-+
-+static struct xenbus_driver blkback = {
-+ .name = "vbd",
-+ .owner = THIS_MODULE,
-+ .ids = blkback_ids,
-+ .probe = blkback_probe,
-+ .remove = blkback_remove,
-+ .otherend_changed = frontend_changed
-+};
-+
-+
-+void blkif_xenbus_init(void)
-+{
-+ xenbus_register_backend(&blkback);
-+}
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/blkfront/blkfront.c ubuntu-gutsy-xen/drivers/xen/blkfront/blkfront.c
---- ubuntu-gutsy/drivers/xen/blkfront/blkfront.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/blkfront/blkfront.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,912 @@
-+/******************************************************************************
-+ * blkfront.c
-+ *
-+ * XenLinux virtual block-device driver.
-+ *
-+ * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
-+ * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
-+ * Copyright (c) 2004, Christian Limpach
-+ * Copyright (c) 2004, Andrew Warfield
-+ * Copyright (c) 2005, Christopher Clark
-+ * Copyright (c) 2005, XenSource Ltd
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#include <linux/version.h>
-+#include "block.h"
-+#include <linux/cdrom.h>
-+#include <linux/sched.h>
-+#include <linux/interrupt.h>
-+#include <scsi/scsi.h>
-+#include <xen/evtchn.h>
-+#include <xen/xenbus.h>
-+#include <xen/interface/grant_table.h>
-+#include <xen/interface/io/protocols.h>
-+#include <xen/gnttab.h>
-+#include <asm/hypervisor.h>
-+#include <asm/maddr.h>
-+
-+#ifdef HAVE_XEN_PLATFORM_COMPAT_H
-+#include <xen/platform-compat.h>
-+#endif
-+
-+#define BLKIF_STATE_DISCONNECTED 0
-+#define BLKIF_STATE_CONNECTED 1
-+#define BLKIF_STATE_SUSPENDED 2
-+
-+#define MAXIMUM_OUTSTANDING_BLOCK_REQS \
-+ (BLKIF_MAX_SEGMENTS_PER_REQUEST * BLK_RING_SIZE)
-+#define GRANT_INVALID_REF 0
-+
-+static void connect(struct blkfront_info *);
-+static void blkfront_closing(struct xenbus_device *);
-+static int blkfront_remove(struct xenbus_device *);
-+static int talk_to_backend(struct xenbus_device *, struct blkfront_info *);
-+static int setup_blkring(struct xenbus_device *, struct blkfront_info *);
-+
-+static void kick_pending_request_queues(struct blkfront_info *);
-+
-+static irqreturn_t blkif_int(int irq, void *dev_id);
-+static void blkif_restart_queue(struct work_struct *arg);
-+static void blkif_recover(struct blkfront_info *);
-+static void blkif_completion(struct blk_shadow *);
-+static void blkif_free(struct blkfront_info *, int);
-+
-+
-+/**
-+ * Entry point to this code when a new device is created. Allocate the basic
-+ * structures and the ring buffer for communication with the backend, and
-+ * inform the backend of the appropriate details for those. Switch to
-+ * Initialised state.
-+ */
-+static int blkfront_probe(struct xenbus_device *dev,
-+ const struct xenbus_device_id *id)
-+{
-+ int err, vdevice, i;
-+ struct blkfront_info *info;
-+
-+ /* FIXME: Use dynamic device id if this is not set. */
-+ err = xenbus_scanf(XBT_NIL, dev->nodename,
-+ "virtual-device", "%i", &vdevice);
-+ if (err != 1) {
-+ xenbus_dev_fatal(dev, err, "reading virtual-device");
-+ return err;
-+ }
-+
-+ info = kzalloc(sizeof(*info), GFP_KERNEL);
-+ if (!info) {
-+ xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure");
-+ return -ENOMEM;
-+ }
-+
-+ info->xbdev = dev;
-+ info->vdevice = vdevice;
-+ info->connected = BLKIF_STATE_DISCONNECTED;
-+ INIT_WORK(&info->work, blkif_restart_queue);
-+
-+ for (i = 0; i < BLK_RING_SIZE; i++)
-+ info->shadow[i].req.id = i+1;
-+ info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff;
-+
-+ /* Front end dir is a number, which is used as the id. */
-+ info->handle = simple_strtoul(strrchr(dev->nodename,'/')+1, NULL, 0);
-+ dev->dev.driver_data = info;
-+
-+ err = talk_to_backend(dev, info);
-+ if (err) {
-+ kfree(info);
-+ dev->dev.driver_data = NULL;
-+ return err;
-+ }
-+
-+ return 0;
-+}
-+
-+
-+/**
-+ * We are reconnecting to the backend, due to a suspend/resume, or a backend
-+ * driver restart. We tear down our blkif structure and recreate it, but
-+ * leave the device-layer structures intact so that this is transparent to the
-+ * rest of the kernel.
-+ */
-+static int blkfront_resume(struct xenbus_device *dev)
-+{
-+ struct blkfront_info *info = dev->dev.driver_data;
-+ int err;
-+
-+ DPRINTK("blkfront_resume: %s\n", dev->nodename);
-+
-+ blkif_free(info, info->connected == BLKIF_STATE_CONNECTED);
-+
-+ err = talk_to_backend(dev, info);
-+ if (info->connected == BLKIF_STATE_SUSPENDED && !err)
-+ blkif_recover(info);
-+
-+ return err;
-+}
-+
-+
-+/* Common code used when first setting up, and when resuming. */
-+static int talk_to_backend(struct xenbus_device *dev,
-+ struct blkfront_info *info)
-+{
-+ const char *message = NULL;
-+ struct xenbus_transaction xbt;
-+ int err;
-+
-+ /* Create shared ring, alloc event channel. */
-+ err = setup_blkring(dev, info);
-+ if (err)
-+ goto out;
-+
-+again:
-+ err = xenbus_transaction_start(&xbt);
-+ if (err) {
-+ xenbus_dev_fatal(dev, err, "starting transaction");
-+ goto destroy_blkring;
-+ }
-+
-+ err = xenbus_printf(xbt, dev->nodename,
-+ "ring-ref","%u", info->ring_ref);
-+ if (err) {
-+ message = "writing ring-ref";
-+ goto abort_transaction;
-+ }
-+ err = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
-+ irq_to_evtchn_port(info->irq));
-+ if (err) {
-+ message = "writing event-channel";
-+ goto abort_transaction;
-+ }
-+ err = xenbus_printf(xbt, dev->nodename, "protocol", "%s",
-+ XEN_IO_PROTO_ABI_NATIVE);
-+ if (err) {
-+ message = "writing protocol";
-+ goto abort_transaction;
-+ }
-+
-+ err = xenbus_transaction_end(xbt, 0);
-+ if (err) {
-+ if (err == -EAGAIN)
-+ goto again;
-+ xenbus_dev_fatal(dev, err, "completing transaction");
-+ goto destroy_blkring;
-+ }
-+
-+ xenbus_switch_state(dev, XenbusStateInitialised);
-+
-+ return 0;
-+
-+ abort_transaction:
-+ xenbus_transaction_end(xbt, 1);
-+ if (message)
-+ xenbus_dev_fatal(dev, err, "%s", message);
-+ destroy_blkring:
-+ blkif_free(info, 0);
-+ out:
-+ return err;
-+}
-+
-+
-+static int setup_blkring(struct xenbus_device *dev,
-+ struct blkfront_info *info)
-+{
-+ blkif_sring_t *sring;
-+ int err;
-+
-+ info->ring_ref = GRANT_INVALID_REF;
-+
-+ sring = (blkif_sring_t *)__get_free_page(GFP_KERNEL);
-+ if (!sring) {
-+ xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
-+ return -ENOMEM;
-+ }
-+ SHARED_RING_INIT(sring);
-+ FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE);
-+
-+ err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring));
-+ if (err < 0) {
-+ free_page((unsigned long)sring);
-+ info->ring.sring = NULL;
-+ goto fail;
-+ }
-+ info->ring_ref = err;
-+
-+ err = bind_listening_port_to_irqhandler(
-+ dev->otherend_id, blkif_int, IRQF_SAMPLE_RANDOM, "blkif", info);
-+ if (err <= 0) {
-+ xenbus_dev_fatal(dev, err,
-+ "bind_listening_port_to_irqhandler");
-+ goto fail;
-+ }
-+ info->irq = err;
-+
-+ return 0;
-+fail:
-+ blkif_free(info, 0);
-+ return err;
-+}
-+
-+
-+/**
-+ * Callback received when the backend's state changes.
-+ */
-+static void backend_changed(struct xenbus_device *dev,
-+ enum xenbus_state backend_state)
-+{
-+ struct blkfront_info *info = dev->dev.driver_data;
-+ struct block_device *bd;
-+
-+ DPRINTK("blkfront:backend_changed.\n");
-+
-+ switch (backend_state) {
-+ case XenbusStateInitialising:
-+ case XenbusStateInitWait:
-+ case XenbusStateInitialised:
-+ case XenbusStateUnknown:
-+ case XenbusStateClosed:
-+ break;
-+
-+ case XenbusStateConnected:
-+ connect(info);
-+ break;
-+
-+ case XenbusStateClosing:
-+ bd = bdget(info->dev);
-+ if (bd == NULL)
-+ xenbus_dev_fatal(dev, -ENODEV, "bdget failed");
-+
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
-+ down(&bd->bd_sem);
-+#else
-+ mutex_lock(&bd->bd_mutex);
-+#endif
-+ if (info->users > 0)
-+ xenbus_dev_error(dev, -EBUSY,
-+ "Device in use; refusing to close");
-+ else
-+ blkfront_closing(dev);
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
-+ up(&bd->bd_sem);
-+#else
-+ mutex_unlock(&bd->bd_mutex);
-+#endif
-+ bdput(bd);
-+ break;
-+ }
-+}
-+
-+
-+/* ** Connection ** */
-+
-+
-+/*
-+ * Invoked when the backend is finally 'ready' (and has told produced
-+ * the details about the physical device - #sectors, size, etc).
-+ */
-+static void connect(struct blkfront_info *info)
-+{
-+ unsigned long long sectors;
-+ unsigned long sector_size;
-+ unsigned int binfo;
-+ int err;
-+
-+ if ((info->connected == BLKIF_STATE_CONNECTED) ||
-+ (info->connected == BLKIF_STATE_SUSPENDED) )
-+ return;
-+
-+ DPRINTK("blkfront.c:connect:%s.\n", info->xbdev->otherend);
-+
-+ err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
-+ "sectors", "%Lu", &sectors,
-+ "info", "%u", &binfo,
-+ "sector-size", "%lu", &sector_size,
-+ NULL);
-+ if (err) {
-+ xenbus_dev_fatal(info->xbdev, err,
-+ "reading backend fields at %s",
-+ info->xbdev->otherend);
-+ return;
-+ }
-+
-+ err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
-+ "feature-barrier", "%lu", &info->feature_barrier,
-+ NULL);
-+ if (err)
-+ info->feature_barrier = 0;
-+
-+ err = xlvbd_add(sectors, info->vdevice, binfo, sector_size, info);
-+ if (err) {
-+ xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s",
-+ info->xbdev->otherend);
-+ return;
-+ }
-+
-+ (void)xenbus_switch_state(info->xbdev, XenbusStateConnected);
-+
-+ /* Kick pending requests. */
-+ spin_lock_irq(&blkif_io_lock);
-+ info->connected = BLKIF_STATE_CONNECTED;
-+ kick_pending_request_queues(info);
-+ spin_unlock_irq(&blkif_io_lock);
-+
-+ add_disk(info->gd);
-+
-+ info->is_ready = 1;
-+}
-+
-+/**
-+ * Handle the change of state of the backend to Closing. We must delete our
-+ * device-layer structures now, to ensure that writes are flushed through to
-+ * the backend. Once is this done, we can switch to Closed in
-+ * acknowledgement.
-+ */
-+static void blkfront_closing(struct xenbus_device *dev)
-+{
-+ struct blkfront_info *info = dev->dev.driver_data;
-+ unsigned long flags;
-+
-+ DPRINTK("blkfront_closing: %s removed\n", dev->nodename);
-+
-+ if (info->rq == NULL)
-+ goto out;
-+
-+ spin_lock_irqsave(&blkif_io_lock, flags);
-+ /* No more blkif_request(). */
-+ blk_stop_queue(info->rq);
-+ /* No more gnttab callback work. */
-+ gnttab_cancel_free_callback(&info->callback);
-+ spin_unlock_irqrestore(&blkif_io_lock, flags);
-+
-+ /* Flush gnttab callback work. Must be done with no locks held. */
-+ flush_scheduled_work();
-+
-+ xlvbd_del(info);
-+
-+ out:
-+ xenbus_frontend_closed(dev);
-+}
-+
-+
-+static int blkfront_remove(struct xenbus_device *dev)
-+{
-+ struct blkfront_info *info = dev->dev.driver_data;
-+
-+ DPRINTK("blkfront_remove: %s removed\n", dev->nodename);
-+
-+ blkif_free(info, 0);
-+
-+ kfree(info);
-+
-+ return 0;
-+}
-+
-+
-+static inline int GET_ID_FROM_FREELIST(
-+ struct blkfront_info *info)
-+{
-+ unsigned long free = info->shadow_free;
-+ BUG_ON(free > BLK_RING_SIZE);
-+ info->shadow_free = info->shadow[free].req.id;
-+ info->shadow[free].req.id = 0x0fffffee; /* debug */
-+ return free;
-+}
-+
-+static inline void ADD_ID_TO_FREELIST(
-+ struct blkfront_info *info, unsigned long id)
-+{
-+ info->shadow[id].req.id = info->shadow_free;
-+ info->shadow[id].request = 0;
-+ info->shadow_free = id;
-+}
-+
-+static inline void flush_requests(struct blkfront_info *info)
-+{
-+ int notify;
-+
-+ RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->ring, notify);
-+
-+ if (notify)
-+ notify_remote_via_irq(info->irq);
-+}
-+
-+static void kick_pending_request_queues(struct blkfront_info *info)
-+{
-+ if (!RING_FULL(&info->ring)) {
-+ /* Re-enable calldowns. */
-+ blk_start_queue(info->rq);
-+ /* Kick things off immediately. */
-+ do_blkif_request(info->rq);
-+ }
-+}
-+
-+static void blkif_restart_queue(struct work_struct *arg)
-+{
-+ struct blkfront_info *info = container_of(arg, struct blkfront_info, work);
-+ spin_lock_irq(&blkif_io_lock);
-+ if (info->connected == BLKIF_STATE_CONNECTED)
-+ kick_pending_request_queues(info);
-+ spin_unlock_irq(&blkif_io_lock);
-+}
-+
-+static void blkif_restart_queue_callback(void *arg)
-+{
-+ struct blkfront_info *info = (struct blkfront_info *)arg;
-+ schedule_work(&info->work);
-+}
-+
-+int blkif_open(struct inode *inode, struct file *filep)
-+{
-+ struct blkfront_info *info = inode->i_bdev->bd_disk->private_data;
-+ info->users++;
-+ return 0;
-+}
-+
-+
-+int blkif_release(struct inode *inode, struct file *filep)
-+{
-+ struct blkfront_info *info = inode->i_bdev->bd_disk->private_data;
-+ info->users--;
-+ if (info->users == 0) {
-+ /* Check whether we have been instructed to close. We will
-+ have ignored this request initially, as the device was
-+ still mounted. */
-+ struct xenbus_device * dev = info->xbdev;
-+ enum xenbus_state state = xenbus_read_driver_state(dev->otherend);
-+
-+ if (state == XenbusStateClosing)
-+ blkfront_closing(dev);
-+ }
-+ return 0;
-+}
-+
-+
-+int blkif_ioctl(struct inode *inode, struct file *filep,
-+ unsigned command, unsigned long argument)
-+{
-+ int i;
-+
-+ DPRINTK_IOCTL("command: 0x%x, argument: 0x%lx, dev: 0x%04x\n",
-+ command, (long)argument, inode->i_rdev);
-+
-+ switch (command) {
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
-+ case HDIO_GETGEO: {
-+ struct block_device *bd = inode->i_bdev;
-+ struct hd_geometry geo;
-+ int ret;
-+
-+ if (!argument)
-+ return -EINVAL;
-+
-+ geo.start = get_start_sect(bd);
-+ ret = blkif_getgeo(bd, &geo);
-+ if (ret)
-+ return ret;
-+
-+ if (copy_to_user((struct hd_geometry __user *)argument, &geo,
-+ sizeof(geo)))
-+ return -EFAULT;
-+
-+ return 0;
-+ }
-+#endif
-+ case CDROMMULTISESSION:
-+ DPRINTK("FIXME: support multisession CDs later\n");
-+ for (i = 0; i < sizeof(struct cdrom_multisession); i++)
-+ if (put_user(0, (char __user *)(argument + i)))
-+ return -EFAULT;
-+ return 0;
-+
-+ default:
-+ /*printk(KERN_ALERT "ioctl %08x not supported by Xen blkdev\n",
-+ command);*/
-+ return -EINVAL; /* same return as native Linux */
-+ }
-+
-+ return 0;
-+}
-+
-+
-+int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg)
-+{
-+ /* We don't have real geometry info, but let's at least return
-+ values consistent with the size of the device */
-+ sector_t nsect = get_capacity(bd->bd_disk);
-+ sector_t cylinders = nsect;
-+
-+ hg->heads = 0xff;
-+ hg->sectors = 0x3f;
-+ sector_div(cylinders, hg->heads * hg->sectors);
-+ hg->cylinders = cylinders;
-+ if ((sector_t)(hg->cylinders + 1) * hg->heads * hg->sectors < nsect)
-+ hg->cylinders = 0xffff;
-+ return 0;
-+}
-+
-+
-+/*
-+ * blkif_queue_request
-+ *
-+ * request block io
-+ *
-+ * id: for guest use only.
-+ * operation: BLKIF_OP_{READ,WRITE,PROBE}
-+ * buffer: buffer to read/write into. this should be a
-+ * virtual address in the guest os.
-+ */
-+static int blkif_queue_request(struct request *req)
-+{
-+ struct blkfront_info *info = req->rq_disk->private_data;
-+ unsigned long buffer_mfn;
-+ blkif_request_t *ring_req;
-+ struct bio *bio;
-+ struct bio_vec *bvec;
-+ int idx;
-+ unsigned long id;
-+ unsigned int fsect, lsect;
-+ int ref;
-+ grant_ref_t gref_head;
-+
-+ if (unlikely(info->connected != BLKIF_STATE_CONNECTED))
-+ return 1;
-+
-+ if (gnttab_alloc_grant_references(
-+ BLKIF_MAX_SEGMENTS_PER_REQUEST, &gref_head) < 0) {
-+ gnttab_request_free_callback(
-+ &info->callback,
-+ blkif_restart_queue_callback,
-+ info,
-+ BLKIF_MAX_SEGMENTS_PER_REQUEST);
-+ return 1;
-+ }
-+
-+ /* Fill out a communications ring structure. */
-+ ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt);
-+ id = GET_ID_FROM_FREELIST(info);
-+ info->shadow[id].request = (unsigned long)req;
-+
-+ ring_req->id = id;
-+ ring_req->sector_number = (blkif_sector_t)req->sector;
-+ ring_req->handle = info->handle;
-+
-+ ring_req->operation = rq_data_dir(req) ?
-+ BLKIF_OP_WRITE : BLKIF_OP_READ;
-+ if (blk_barrier_rq(req))
-+ ring_req->operation = BLKIF_OP_WRITE_BARRIER;
-+
-+ ring_req->nr_segments = 0;
-+ rq_for_each_bio (bio, req) {
-+ bio_for_each_segment (bvec, bio, idx) {
-+ BUG_ON(ring_req->nr_segments
-+ == BLKIF_MAX_SEGMENTS_PER_REQUEST);
-+ buffer_mfn = page_to_phys(bvec->bv_page) >> PAGE_SHIFT;
-+ fsect = bvec->bv_offset >> 9;
-+ lsect = fsect + (bvec->bv_len >> 9) - 1;
-+ /* install a grant reference. */
-+ ref = gnttab_claim_grant_reference(&gref_head);
-+ BUG_ON(ref == -ENOSPC);
-+
-+ gnttab_grant_foreign_access_ref(
-+ ref,
-+ info->xbdev->otherend_id,
-+ buffer_mfn,
-+ rq_data_dir(req) );
-+
-+ info->shadow[id].frame[ring_req->nr_segments] =
-+ mfn_to_pfn(buffer_mfn);
-+
-+ ring_req->seg[ring_req->nr_segments] =
-+ (struct blkif_request_segment) {
-+ .gref = ref,
-+ .first_sect = fsect,
-+ .last_sect = lsect };
-+
-+ ring_req->nr_segments++;
-+ }
-+ }
-+
-+ info->ring.req_prod_pvt++;
-+
-+ /* Keep a private copy so we can reissue requests when recovering. */
-+ info->shadow[id].req = *ring_req;
-+
-+ gnttab_free_grant_references(gref_head);
-+
-+ return 0;
-+}
-+
-+/*
-+ * do_blkif_request
-+ * read a block; request is in a request queue
-+ */
-+void do_blkif_request(request_queue_t *rq)
-+{
-+ struct blkfront_info *info = NULL;
-+ struct request *req;
-+ int queued;
-+
-+ DPRINTK("Entered do_blkif_request\n");
-+
-+ queued = 0;
-+
-+ while ((req = elv_next_request(rq)) != NULL) {
-+ info = req->rq_disk->private_data;
-+ if (!blk_fs_request(req)) {
-+ end_request(req, 0);
-+ continue;
-+ }
-+
-+ if (RING_FULL(&info->ring))
-+ goto wait;
-+
-+ DPRINTK("do_blk_req %p: cmd %p, sec %llx, "
-+ "(%u/%li) buffer:%p [%s]\n",
-+ req, req->cmd, (long long)req->sector,
-+ req->current_nr_sectors,
-+ req->nr_sectors, req->buffer,
-+ rq_data_dir(req) ? "write" : "read");
-+
-+
-+ blkdev_dequeue_request(req);
-+ if (blkif_queue_request(req)) {
-+ blk_requeue_request(rq, req);
-+ wait:
-+ /* Avoid pointless unplugs. */
-+ blk_stop_queue(rq);
-+ break;
-+ }
-+
-+ queued++;
-+ }
-+
-+ if (queued != 0)
-+ flush_requests(info);
-+}
-+
-+
-+static irqreturn_t blkif_int(int irq, void *dev_id)
-+{
-+ struct request *req;
-+ blkif_response_t *bret;
-+ RING_IDX i, rp;
-+ unsigned long flags;
-+ struct blkfront_info *info = (struct blkfront_info *)dev_id;
-+ int uptodate;
-+
-+ spin_lock_irqsave(&blkif_io_lock, flags);
-+
-+ if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) {
-+ spin_unlock_irqrestore(&blkif_io_lock, flags);
-+ return IRQ_HANDLED;
-+ }
-+
-+ again:
-+ rp = info->ring.sring->rsp_prod;
-+ rmb(); /* Ensure we see queued responses up to 'rp'. */
-+
-+ for (i = info->ring.rsp_cons; i != rp; i++) {
-+ unsigned long id;
-+ int ret;
-+
-+ bret = RING_GET_RESPONSE(&info->ring, i);
-+ id = bret->id;
-+ req = (struct request *)info->shadow[id].request;
-+
-+ blkif_completion(&info->shadow[id]);
-+
-+ ADD_ID_TO_FREELIST(info, id);
-+
-+ uptodate = (bret->status == BLKIF_RSP_OKAY);
-+ switch (bret->operation) {
-+ case BLKIF_OP_WRITE_BARRIER:
-+ if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
-+ printk("blkfront: %s: write barrier op failed\n",
-+ info->gd->disk_name);
-+ uptodate = -EOPNOTSUPP;
-+ info->feature_barrier = 0;
-+ xlvbd_barrier(info);
-+ }
-+ /* fall through */
-+ case BLKIF_OP_READ:
-+ case BLKIF_OP_WRITE:
-+ if (unlikely(bret->status != BLKIF_RSP_OKAY))
-+ DPRINTK("Bad return from blkdev data "
-+ "request: %x\n", bret->status);
-+
-+ ret = end_that_request_first(req, uptodate,
-+ req->hard_nr_sectors);
-+ BUG_ON(ret);
-+ end_that_request_last(req, uptodate);
-+ break;
-+ default:
-+ BUG();
-+ }
-+ }
-+
-+ info->ring.rsp_cons = i;
-+
-+ if (i != info->ring.req_prod_pvt) {
-+ int more_to_do;
-+ RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do);
-+ if (more_to_do)
-+ goto again;
-+ } else
-+ info->ring.sring->rsp_event = i + 1;
-+
-+ kick_pending_request_queues(info);
-+
-+ spin_unlock_irqrestore(&blkif_io_lock, flags);
-+
-+ return IRQ_HANDLED;
-+}
-+
-+static void blkif_free(struct blkfront_info *info, int suspend)
-+{
-+ /* Prevent new requests being issued until we fix things up. */
-+ spin_lock_irq(&blkif_io_lock);
-+ info->connected = suspend ?
-+ BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED;
-+ /* No more blkif_request(). */
-+ if (info->rq)
-+ blk_stop_queue(info->rq);
-+ /* No more gnttab callback work. */
-+ gnttab_cancel_free_callback(&info->callback);
-+ spin_unlock_irq(&blkif_io_lock);
-+
-+ /* Flush gnttab callback work. Must be done with no locks held. */
-+ flush_scheduled_work();
-+
-+ /* Free resources associated with old device channel. */
-+ if (info->ring_ref != GRANT_INVALID_REF) {
-+ gnttab_end_foreign_access(info->ring_ref, 0,
-+ (unsigned long)info->ring.sring);
-+ info->ring_ref = GRANT_INVALID_REF;
-+ info->ring.sring = NULL;
-+ }
-+ if (info->irq)
-+ unbind_from_irqhandler(info->irq, info);
-+ info->irq = 0;
-+}
-+
-+static void blkif_completion(struct blk_shadow *s)
-+{
-+ int i;
-+ for (i = 0; i < s->req.nr_segments; i++)
-+ gnttab_end_foreign_access(s->req.seg[i].gref, 0, 0UL);
-+}
-+
-+static void blkif_recover(struct blkfront_info *info)
-+{
-+ int i;
-+ blkif_request_t *req;
-+ struct blk_shadow *copy;
-+ int j;
-+
-+ /* Stage 1: Make a safe copy of the shadow state. */
-+ copy = kmalloc(sizeof(info->shadow), GFP_KERNEL | __GFP_NOFAIL);
-+ memcpy(copy, info->shadow, sizeof(info->shadow));
-+
-+ /* Stage 2: Set up free list. */
-+ memset(&info->shadow, 0, sizeof(info->shadow));
-+ for (i = 0; i < BLK_RING_SIZE; i++)
-+ info->shadow[i].req.id = i+1;
-+ info->shadow_free = info->ring.req_prod_pvt;
-+ info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff;
-+
-+ /* Stage 3: Find pending requests and requeue them. */
-+ for (i = 0; i < BLK_RING_SIZE; i++) {
-+ /* Not in use? */
-+ if (copy[i].request == 0)
-+ continue;
-+
-+ /* Grab a request slot and copy shadow state into it. */
-+ req = RING_GET_REQUEST(
-+ &info->ring, info->ring.req_prod_pvt);
-+ *req = copy[i].req;
-+
-+ /* We get a new request id, and must reset the shadow state. */
-+ req->id = GET_ID_FROM_FREELIST(info);
-+ memcpy(&info->shadow[req->id], &copy[i], sizeof(copy[i]));
-+
-+ /* Rewrite any grant references invalidated by susp/resume. */
-+ for (j = 0; j < req->nr_segments; j++)
-+ gnttab_grant_foreign_access_ref(
-+ req->seg[j].gref,
-+ info->xbdev->otherend_id,
-+ pfn_to_mfn(info->shadow[req->id].frame[j]),
-+ rq_data_dir(
-+ (struct request *)
-+ info->shadow[req->id].request));
-+ info->shadow[req->id].req = *req;
-+
-+ info->ring.req_prod_pvt++;
-+ }
-+
-+ kfree(copy);
-+
-+ (void)xenbus_switch_state(info->xbdev, XenbusStateConnected);
-+
-+ spin_lock_irq(&blkif_io_lock);
-+
-+ /* Now safe for us to use the shared ring */
-+ info->connected = BLKIF_STATE_CONNECTED;
-+
-+ /* Send off requeued requests */
-+ flush_requests(info);
-+
-+ /* Kick any other new requests queued since we resumed */
-+ kick_pending_request_queues(info);
-+
-+ spin_unlock_irq(&blkif_io_lock);
-+}
-+
-+int blkfront_is_ready(struct xenbus_device *dev)
-+{
-+ struct blkfront_info *info = dev->dev.driver_data;
-+
-+ return info->is_ready;
-+}
-+
-+
-+/* ** Driver Registration ** */
-+
-+
-+static struct xenbus_device_id blkfront_ids[] = {
-+ { "vbd" },
-+ { "" }
-+};
-+
-+
-+static struct xenbus_driver blkfront = {
-+ .name = "vbd",
-+ .owner = THIS_MODULE,
-+ .ids = blkfront_ids,
-+ .probe = blkfront_probe,
-+ .remove = blkfront_remove,
-+ .resume = blkfront_resume,
-+ .otherend_changed = backend_changed,
-+ .is_ready = blkfront_is_ready,
-+};
-+
-+
-+static int __init xlblk_init(void)
-+{
-+ if (!is_running_on_xen())
-+ return -ENODEV;
-+
-+ return xenbus_register_frontend(&blkfront);
-+}
-+module_init(xlblk_init);
-+
-+
-+static void xlblk_exit(void)
-+{
-+ return xenbus_unregister_driver(&blkfront);
-+}
-+module_exit(xlblk_exit);
-+
-+MODULE_LICENSE("Dual BSD/GPL");
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/blkfront/block.h ubuntu-gutsy-xen/drivers/xen/blkfront/block.h
---- ubuntu-gutsy/drivers/xen/blkfront/block.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/blkfront/block.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,143 @@
-+/******************************************************************************
-+ * block.h
-+ *
-+ * Shared definitions between all levels of XenLinux Virtual block devices.
-+ *
-+ * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
-+ * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
-+ * Copyright (c) 2004-2005, Christian Limpach
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#ifndef __XEN_DRIVERS_BLOCK_H__
-+#define __XEN_DRIVERS_BLOCK_H__
-+
-+#include <linux/version.h>
-+#include <linux/module.h>
-+#include <linux/kernel.h>
-+#include <linux/sched.h>
-+#include <linux/slab.h>
-+#include <linux/string.h>
-+#include <linux/errno.h>
-+#include <linux/fs.h>
-+#include <linux/hdreg.h>
-+#include <linux/blkdev.h>
-+#include <linux/major.h>
-+#include <asm/hypervisor.h>
-+#include <xen/xenbus.h>
-+#include <xen/gnttab.h>
-+#include <xen/interface/xen.h>
-+#include <xen/interface/io/blkif.h>
-+#include <xen/interface/io/ring.h>
-+#include <asm/io.h>
-+#include <asm/atomic.h>
-+#include <asm/uaccess.h>
-+
-+#define DPRINTK(_f, _a...) pr_debug(_f, ## _a)
-+
-+#if 0
-+#define DPRINTK_IOCTL(_f, _a...) printk(KERN_ALERT _f, ## _a)
-+#else
-+#define DPRINTK_IOCTL(_f, _a...) ((void)0)
-+#endif
-+
-+struct xlbd_type_info
-+{
-+ int partn_shift;
-+ int disks_per_major;
-+ char *devname;
-+ char *diskname;
-+};
-+
-+struct xlbd_major_info
-+{
-+ int major;
-+ int index;
-+ int usage;
-+ struct xlbd_type_info *type;
-+};
-+
-+struct blk_shadow {
-+ blkif_request_t req;
-+ unsigned long request;
-+ unsigned long frame[BLKIF_MAX_SEGMENTS_PER_REQUEST];
-+};
-+
-+#define BLK_RING_SIZE __RING_SIZE((blkif_sring_t *)0, PAGE_SIZE)
-+
-+/*
-+ * We have one of these per vbd, whether ide, scsi or 'other'. They
-+ * hang in private_data off the gendisk structure. We may end up
-+ * putting all kinds of interesting stuff here :-)
-+ */
-+struct blkfront_info
-+{
-+ struct xenbus_device *xbdev;
-+ dev_t dev;
-+ struct gendisk *gd;
-+ int vdevice;
-+ blkif_vdev_t handle;
-+ int connected;
-+ int ring_ref;
-+ blkif_front_ring_t ring;
-+ unsigned int irq;
-+ struct xlbd_major_info *mi;
-+ request_queue_t *rq;
-+ struct work_struct work;
-+ struct gnttab_free_callback callback;
-+ struct blk_shadow shadow[BLK_RING_SIZE];
-+ unsigned long shadow_free;
-+ int feature_barrier;
-+ int is_ready;
-+
-+ /**
-+ * The number of people holding this device open. We won't allow a
-+ * hot-unplug unless this is 0.
-+ */
-+ int users;
-+};
-+
-+extern spinlock_t blkif_io_lock;
-+
-+extern int blkif_open(struct inode *inode, struct file *filep);
-+extern int blkif_release(struct inode *inode, struct file *filep);
-+extern int blkif_ioctl(struct inode *inode, struct file *filep,
-+ unsigned command, unsigned long argument);
-+extern int blkif_getgeo(struct block_device *, struct hd_geometry *);
-+extern int blkif_check(dev_t dev);
-+extern int blkif_revalidate(dev_t dev);
-+extern void do_blkif_request (request_queue_t *rq);
-+
-+/* Virtual block-device subsystem. */
-+/* Note that xlvbd_add doesn't call add_disk for you: you're expected
-+ to call add_disk on info->gd once the disk is properly connected
-+ up. */
-+int xlvbd_add(blkif_sector_t capacity, int device,
-+ u16 vdisk_info, u16 sector_size, struct blkfront_info *info);
-+void xlvbd_del(struct blkfront_info *info);
-+int xlvbd_barrier(struct blkfront_info *info);
-+
-+#endif /* __XEN_DRIVERS_BLOCK_H__ */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/blkfront/Makefile ubuntu-gutsy-xen/drivers/xen/blkfront/Makefile
---- ubuntu-gutsy/drivers/xen/blkfront/Makefile 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/blkfront/Makefile 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,5 @@
-+
-+obj-$(CONFIG_XEN_BLKDEV_FRONTEND) := xenblk.o
-+
-+xenblk-objs := blkfront.o vbd.o
-+
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/blkfront/vbd.c ubuntu-gutsy-xen/drivers/xen/blkfront/vbd.c
---- ubuntu-gutsy/drivers/xen/blkfront/vbd.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/blkfront/vbd.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,372 @@
-+/******************************************************************************
-+ * vbd.c
-+ *
-+ * XenLinux virtual block-device driver (xvd).
-+ *
-+ * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
-+ * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
-+ * Copyright (c) 2004-2005, Christian Limpach
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#include "block.h"
-+#include <linux/blkdev.h>
-+#include <linux/list.h>
-+
-+#ifdef HAVE_XEN_PLATFORM_COMPAT_H
-+#include <xen/platform-compat.h>
-+#endif
-+
-+#define BLKIF_MAJOR(dev) ((dev)>>8)
-+#define BLKIF_MINOR(dev) ((dev) & 0xff)
-+
-+/*
-+ * For convenience we distinguish between ide, scsi and 'other' (i.e.,
-+ * potentially combinations of the two) in the naming scheme and in a few other
-+ * places.
-+ */
-+
-+#define NUM_IDE_MAJORS 10
-+#define NUM_SCSI_MAJORS 17
-+#define NUM_VBD_MAJORS 1
-+
-+static struct xlbd_type_info xlbd_ide_type = {
-+ .partn_shift = 6,
-+ .disks_per_major = 2,
-+ .devname = "ide",
-+ .diskname = "hd",
-+};
-+
-+static struct xlbd_type_info xlbd_scsi_type = {
-+ .partn_shift = 4,
-+ .disks_per_major = 16,
-+ .devname = "sd",
-+ .diskname = "sd",
-+};
-+
-+static struct xlbd_type_info xlbd_vbd_type = {
-+ .partn_shift = 4,
-+ .disks_per_major = 16,
-+ .devname = "xvd",
-+ .diskname = "xvd",
-+};
-+
-+static struct xlbd_major_info *major_info[NUM_IDE_MAJORS + NUM_SCSI_MAJORS +
-+ NUM_VBD_MAJORS];
-+
-+#define XLBD_MAJOR_IDE_START 0
-+#define XLBD_MAJOR_SCSI_START (NUM_IDE_MAJORS)
-+#define XLBD_MAJOR_VBD_START (NUM_IDE_MAJORS + NUM_SCSI_MAJORS)
-+
-+#define XLBD_MAJOR_IDE_RANGE XLBD_MAJOR_IDE_START ... XLBD_MAJOR_SCSI_START - 1
-+#define XLBD_MAJOR_SCSI_RANGE XLBD_MAJOR_SCSI_START ... XLBD_MAJOR_VBD_START - 1
-+#define XLBD_MAJOR_VBD_RANGE XLBD_MAJOR_VBD_START ... XLBD_MAJOR_VBD_START + NUM_VBD_MAJORS - 1
-+
-+/* Information about our VBDs. */
-+#define MAX_VBDS 64
-+static LIST_HEAD(vbds_list);
-+
-+static struct block_device_operations xlvbd_block_fops =
-+{
-+ .owner = THIS_MODULE,
-+ .open = blkif_open,
-+ .release = blkif_release,
-+ .ioctl = blkif_ioctl,
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
-+ .getgeo = blkif_getgeo
-+#endif
-+};
-+
-+DEFINE_SPINLOCK(blkif_io_lock);
-+
-+static struct xlbd_major_info *
-+xlbd_alloc_major_info(int major, int minor, int index)
-+{
-+ struct xlbd_major_info *ptr;
-+
-+ ptr = kzalloc(sizeof(struct xlbd_major_info), GFP_KERNEL);
-+ if (ptr == NULL)
-+ return NULL;
-+
-+ ptr->major = major;
-+
-+ switch (index) {
-+ case XLBD_MAJOR_IDE_RANGE:
-+ ptr->type = &xlbd_ide_type;
-+ ptr->index = index - XLBD_MAJOR_IDE_START;
-+ break;
-+ case XLBD_MAJOR_SCSI_RANGE:
-+ ptr->type = &xlbd_scsi_type;
-+ ptr->index = index - XLBD_MAJOR_SCSI_START;
-+ break;
-+ case XLBD_MAJOR_VBD_RANGE:
-+ ptr->type = &xlbd_vbd_type;
-+ ptr->index = index - XLBD_MAJOR_VBD_START;
-+ break;
-+ }
-+
-+ if (register_blkdev(ptr->major, ptr->type->devname)) {
-+ kfree(ptr);
-+ return NULL;
-+ }
-+
-+ printk("xen-vbd: registered block device major %i\n", ptr->major);
-+ major_info[index] = ptr;
-+ return ptr;
-+}
-+
-+static struct xlbd_major_info *
-+xlbd_get_major_info(int vdevice)
-+{
-+ struct xlbd_major_info *mi;
-+ int major, minor, index;
-+
-+ major = BLKIF_MAJOR(vdevice);
-+ minor = BLKIF_MINOR(vdevice);
-+
-+ switch (major) {
-+ case IDE0_MAJOR: index = 0; break;
-+ case IDE1_MAJOR: index = 1; break;
-+ case IDE2_MAJOR: index = 2; break;
-+ case IDE3_MAJOR: index = 3; break;
-+ case IDE4_MAJOR: index = 4; break;
-+ case IDE5_MAJOR: index = 5; break;
-+ case IDE6_MAJOR: index = 6; break;
-+ case IDE7_MAJOR: index = 7; break;
-+ case IDE8_MAJOR: index = 8; break;
-+ case IDE9_MAJOR: index = 9; break;
-+ case SCSI_DISK0_MAJOR: index = 10; break;
-+ case SCSI_DISK1_MAJOR ... SCSI_DISK7_MAJOR:
-+ index = 11 + major - SCSI_DISK1_MAJOR;
-+ break;
-+ case SCSI_DISK8_MAJOR ... SCSI_DISK15_MAJOR:
-+ index = 18 + major - SCSI_DISK8_MAJOR;
-+ break;
-+ case SCSI_CDROM_MAJOR: index = 26; break;
-+ default: index = 27; break;
-+ }
-+
-+ mi = ((major_info[index] != NULL) ? major_info[index] :
-+ xlbd_alloc_major_info(major, minor, index));
-+ if (mi)
-+ mi->usage++;
-+ return mi;
-+}
-+
-+static void
-+xlbd_put_major_info(struct xlbd_major_info *mi)
-+{
-+ mi->usage--;
-+ /* XXX: release major if 0 */
-+}
-+
-+static int
-+xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
-+{
-+ request_queue_t *rq;
-+
-+ rq = blk_init_queue(do_blkif_request, &blkif_io_lock);
-+ if (rq == NULL)
-+ return -1;
-+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
-+ elevator_init(rq, "noop");
-+#else
-+ elevator_init(rq, &elevator_noop);
-+#endif
-+
-+ /* Hard sector size and max sectors impersonate the equiv. hardware. */
-+ blk_queue_hardsect_size(rq, sector_size);
-+ blk_queue_max_sectors(rq, 512);
-+
-+ /* Each segment in a request is up to an aligned page in size. */
-+ blk_queue_segment_boundary(rq, PAGE_SIZE - 1);
-+ blk_queue_max_segment_size(rq, PAGE_SIZE);
-+
-+ /* Ensure a merged request will fit in a single I/O ring slot. */
-+ blk_queue_max_phys_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST);
-+ blk_queue_max_hw_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST);
-+
-+ /* Make sure buffer addresses are sector-aligned. */
-+ blk_queue_dma_alignment(rq, 511);
-+
-+ gd->queue = rq;
-+
-+ return 0;
-+}
-+
-+static int
-+xlvbd_alloc_gendisk(int minor, blkif_sector_t capacity, int vdevice,
-+ u16 vdisk_info, u16 sector_size,
-+ struct blkfront_info *info)
-+{
-+ struct gendisk *gd;
-+ struct xlbd_major_info *mi;
-+ int nr_minors = 1;
-+ int err = -ENODEV;
-+ unsigned int offset;
-+
-+ BUG_ON(info->gd != NULL);
-+ BUG_ON(info->mi != NULL);
-+ BUG_ON(info->rq != NULL);
-+
-+ mi = xlbd_get_major_info(vdevice);
-+ if (mi == NULL)
-+ goto out;
-+ info->mi = mi;
-+
-+ if ((minor & ((1 << mi->type->partn_shift) - 1)) == 0)
-+ nr_minors = 1 << mi->type->partn_shift;
-+
-+ gd = alloc_disk(nr_minors);
-+ if (gd == NULL)
-+ goto out;
-+
-+ offset = mi->index * mi->type->disks_per_major +
-+ (minor >> mi->type->partn_shift);
-+ if (nr_minors > 1) {
-+ if (offset < 26) {
-+ sprintf(gd->disk_name, "%s%c",
-+ mi->type->diskname, 'a' + offset );
-+ }
-+ else {
-+ sprintf(gd->disk_name, "%s%c%c",
-+ mi->type->diskname,
-+ 'a' + ((offset/26)-1), 'a' + (offset%26) );
-+ }
-+ }
-+ else {
-+ if (offset < 26) {
-+ sprintf(gd->disk_name, "%s%c%d",
-+ mi->type->diskname,
-+ 'a' + offset,
-+ minor & ((1 << mi->type->partn_shift) - 1));
-+ }
-+ else {
-+ sprintf(gd->disk_name, "%s%c%c%d",
-+ mi->type->diskname,
-+ 'a' + ((offset/26)-1), 'a' + (offset%26),
-+ minor & ((1 << mi->type->partn_shift) - 1));
-+ }
-+ }
-+
-+ gd->major = mi->major;
-+ gd->first_minor = minor;
-+ gd->fops = &xlvbd_block_fops;
-+ gd->private_data = info;
-+ gd->driverfs_dev = &(info->xbdev->dev);
-+ set_capacity(gd, capacity);
-+
-+ if (xlvbd_init_blk_queue(gd, sector_size)) {
-+ del_gendisk(gd);
-+ goto out;
-+ }
-+
-+ info->rq = gd->queue;
-+ info->gd = gd;
-+
-+ if (info->feature_barrier)
-+ xlvbd_barrier(info);
-+
-+ if (vdisk_info & VDISK_READONLY)
-+ set_disk_ro(gd, 1);
-+
-+ if (vdisk_info & VDISK_REMOVABLE)
-+ gd->flags |= GENHD_FL_REMOVABLE;
-+
-+ if (vdisk_info & VDISK_CDROM)
-+ gd->flags |= GENHD_FL_CD;
-+
-+ return 0;
-+
-+ out:
-+ if (mi)
-+ xlbd_put_major_info(mi);
-+ info->mi = NULL;
-+ return err;
-+}
-+
-+int
-+xlvbd_add(blkif_sector_t capacity, int vdevice, u16 vdisk_info,
-+ u16 sector_size, struct blkfront_info *info)
-+{
-+ struct block_device *bd;
-+ int err = 0;
-+
-+ info->dev = MKDEV(BLKIF_MAJOR(vdevice), BLKIF_MINOR(vdevice));
-+
-+ bd = bdget(info->dev);
-+ if (bd == NULL)
-+ return -ENODEV;
-+
-+ err = xlvbd_alloc_gendisk(BLKIF_MINOR(vdevice), capacity, vdevice,
-+ vdisk_info, sector_size, info);
-+
-+ bdput(bd);
-+ return err;
-+}
-+
-+void
-+xlvbd_del(struct blkfront_info *info)
-+{
-+ if (info->mi == NULL)
-+ return;
-+
-+ BUG_ON(info->gd == NULL);
-+ del_gendisk(info->gd);
-+ put_disk(info->gd);
-+ info->gd = NULL;
-+
-+ xlbd_put_major_info(info->mi);
-+ info->mi = NULL;
-+
-+ BUG_ON(info->rq == NULL);
-+ blk_cleanup_queue(info->rq);
-+ info->rq = NULL;
-+}
-+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
-+int
-+xlvbd_barrier(struct blkfront_info *info)
-+{
-+ int err;
-+
-+ err = blk_queue_ordered(info->rq,
-+ info->feature_barrier ? QUEUE_ORDERED_DRAIN : QUEUE_ORDERED_NONE, NULL);
-+ if (err)
-+ return err;
-+ printk("blkfront: %s: barriers %s\n",
-+ info->gd->disk_name, info->feature_barrier ? "enabled" : "disabled");
-+ return 0;
-+}
-+#else
-+int
-+xlvbd_barrier(struct blkfront_info *info)
-+{
-+ printk("blkfront: %s: barriers disabled\n", info->gd->disk_name);
-+ return -ENOSYS;
-+}
-+#endif
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/blktap/blktap.c ubuntu-gutsy-xen/drivers/xen/blktap/blktap.c
---- ubuntu-gutsy/drivers/xen/blktap/blktap.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/blktap/blktap.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,1526 @@
-+/******************************************************************************
-+ * drivers/xen/blktap/blktap.c
-+ *
-+ * Back-end driver for user level virtual block devices. This portion of the
-+ * driver exports a 'unified' block-device interface that can be accessed
-+ * by any operating system that implements a compatible front end. Requests
-+ * are remapped to a user-space memory region.
-+ *
-+ * Based on the blkback driver code.
-+ *
-+ * Copyright (c) 2004-2005, Andrew Warfield and Julian Chesterfield
-+ *
-+ * Clean ups and fix ups:
-+ * Copyright (c) 2006, Steven Rostedt - Red Hat, Inc.
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#include <linux/spinlock.h>
-+#include <linux/kthread.h>
-+#include <linux/freezer.h>
-+#include <linux/list.h>
-+#include <asm/hypervisor.h>
-+#include "common.h"
-+#include <xen/balloon.h>
-+#include <xen/driver_util.h>
-+#include <linux/kernel.h>
-+#include <linux/fs.h>
-+#include <linux/mm.h>
-+#include <linux/errno.h>
-+#include <linux/major.h>
-+#include <linux/gfp.h>
-+#include <linux/poll.h>
-+#include <asm/tlbflush.h>
-+
-+#define MAX_TAP_DEV 256 /*the maximum number of tapdisk ring devices */
-+#define MAX_DEV_NAME 100 /*the max tapdisk ring device name e.g. blktap0 */
-+
-+/*
-+ * The maximum number of requests that can be outstanding at any time
-+ * is determined by
-+ *
-+ * [mmap_alloc * MAX_PENDING_REQS * BLKIF_MAX_SEGMENTS_PER_REQUEST]
-+ *
-+ * where mmap_alloc < MAX_DYNAMIC_MEM.
-+ *
-+ * TODO:
-+ * mmap_alloc is initialised to 2 and should be adjustable on the fly via
-+ * sysfs.
-+ */
-+#define BLK_RING_SIZE __RING_SIZE((blkif_sring_t *)0, PAGE_SIZE)
-+#define MAX_DYNAMIC_MEM BLK_RING_SIZE
-+#define MAX_PENDING_REQS BLK_RING_SIZE
-+#define MMAP_PAGES (MAX_PENDING_REQS * BLKIF_MAX_SEGMENTS_PER_REQUEST)
-+#define MMAP_VADDR(_start, _req,_seg) \
-+ (_start + \
-+ ((_req) * BLKIF_MAX_SEGMENTS_PER_REQUEST * PAGE_SIZE) + \
-+ ((_seg) * PAGE_SIZE))
-+static int blkif_reqs = MAX_PENDING_REQS;
-+static int mmap_pages = MMAP_PAGES;
-+
-+#define RING_PAGES 1 /* BLKTAP - immediately before the mmap area, we
-+ * have a bunch of pages reserved for shared
-+ * memory rings.
-+ */
-+
-+/*Data struct handed back to userspace for tapdisk device to VBD mapping*/
-+typedef struct domid_translate {
-+ unsigned short domid;
-+ unsigned short busid;
-+} domid_translate_t ;
-+
-+/*Data struct associated with each of the tapdisk devices*/
-+typedef struct tap_blkif {
-+ struct vm_area_struct *vma; /*Shared memory area */
-+ unsigned long rings_vstart; /*Kernel memory mapping */
-+ unsigned long user_vstart; /*User memory mapping */
-+ unsigned long dev_inuse; /*One process opens device at a time. */
-+ unsigned long dev_pending; /*In process of being opened */
-+ unsigned long ring_ok; /*make this ring->state */
-+ blkif_front_ring_t ufe_ring; /*Rings up to user space. */
-+ wait_queue_head_t wait; /*for poll */
-+ unsigned long mode; /*current switching mode */
-+ int minor; /*Minor number for tapdisk device */
-+ pid_t pid; /*tapdisk process id */
-+ enum { RUNNING, CLEANSHUTDOWN } status; /*Detect a clean userspace
-+ shutdown */
-+ unsigned long *idx_map; /*Record the user ring id to kern
-+ [req id, idx] tuple */
-+ blkif_t *blkif; /*Associate blkif with tapdev */
-+ struct domid_translate trans; /*Translation from domid to bus. */
-+} tap_blkif_t;
-+
-+static struct tap_blkif *tapfds[MAX_TAP_DEV];
-+static int blktap_next_minor;
-+
-+module_param(blkif_reqs, int, 0);
-+/* Run-time switchable: /sys/module/blktap/parameters/ */
-+static unsigned int log_stats = 0;
-+static unsigned int debug_lvl = 0;
-+module_param(log_stats, int, 0644);
-+module_param(debug_lvl, int, 0644);
-+
-+/*
-+ * Each outstanding request that we've passed to the lower device layers has a
-+ * 'pending_req' allocated to it. Each buffer_head that completes decrements
-+ * the pendcnt towards zero. When it hits zero, the specified domain has a
-+ * response queued for it, with the saved 'id' passed back.
-+ */
-+typedef struct {
-+ blkif_t *blkif;
-+ u64 id;
-+ unsigned short mem_idx;
-+ int nr_pages;
-+ atomic_t pendcnt;
-+ unsigned short operation;
-+ int status;
-+ struct list_head free_list;
-+ int inuse;
-+} pending_req_t;
-+
-+static pending_req_t *pending_reqs[MAX_PENDING_REQS];
-+static struct list_head pending_free;
-+static DEFINE_SPINLOCK(pending_free_lock);
-+static DECLARE_WAIT_QUEUE_HEAD (pending_free_wq);
-+static int alloc_pending_reqs;
-+
-+typedef unsigned int PEND_RING_IDX;
-+
-+static inline int MASK_PEND_IDX(int i) {
-+ return (i & (MAX_PENDING_REQS-1));
-+}
-+
-+static inline unsigned int RTN_PEND_IDX(pending_req_t *req, int idx) {
-+ return (req - pending_reqs[idx]);
-+}
-+
-+#define NR_PENDING_REQS (MAX_PENDING_REQS - pending_prod + pending_cons)
-+
-+#define BLKBACK_INVALID_HANDLE (~0)
-+
-+static struct page **foreign_pages[MAX_DYNAMIC_MEM];
-+static inline unsigned long idx_to_kaddr(
-+ unsigned int mmap_idx, unsigned int req_idx, unsigned int sg_idx)
-+{
-+ unsigned int arr_idx = req_idx*BLKIF_MAX_SEGMENTS_PER_REQUEST + sg_idx;
-+ unsigned long pfn = page_to_pfn(foreign_pages[mmap_idx][arr_idx]);
-+ return (unsigned long)pfn_to_kaddr(pfn);
-+}
-+
-+static unsigned short mmap_alloc = 0;
-+static unsigned short mmap_lock = 0;
-+static unsigned short mmap_inuse = 0;
-+
-+/******************************************************************
-+ * GRANT HANDLES
-+ */
-+
-+/* When using grant tables to map a frame for device access then the
-+ * handle returned must be used to unmap the frame. This is needed to
-+ * drop the ref count on the frame.
-+ */
-+struct grant_handle_pair
-+{
-+ grant_handle_t kernel;
-+ grant_handle_t user;
-+};
-+#define INVALID_GRANT_HANDLE 0xFFFF
-+
-+static struct grant_handle_pair
-+ pending_grant_handles[MAX_DYNAMIC_MEM][MMAP_PAGES];
-+#define pending_handle(_id, _idx, _i) \
-+ (pending_grant_handles[_id][((_idx) * BLKIF_MAX_SEGMENTS_PER_REQUEST) \
-+ + (_i)])
-+
-+
-+static int blktap_read_ufe_ring(tap_blkif_t *info); /*local prototypes*/
-+
-+#define BLKTAP_MINOR 0 /*/dev/xen/blktap has a dynamic major */
-+#define BLKTAP_DEV_DIR "/dev/xen"
-+
-+static int blktap_major;
-+
-+/* blktap IOCTLs: */
-+#define BLKTAP_IOCTL_KICK_FE 1
-+#define BLKTAP_IOCTL_KICK_BE 2 /* currently unused */
-+#define BLKTAP_IOCTL_SETMODE 3
-+#define BLKTAP_IOCTL_SENDPID 4
-+#define BLKTAP_IOCTL_NEWINTF 5
-+#define BLKTAP_IOCTL_MINOR 6
-+#define BLKTAP_IOCTL_MAJOR 7
-+#define BLKTAP_QUERY_ALLOC_REQS 8
-+#define BLKTAP_IOCTL_FREEINTF 9
-+#define BLKTAP_IOCTL_PRINT_IDXS 100
-+
-+/* blktap switching modes: (Set with BLKTAP_IOCTL_SETMODE) */
-+#define BLKTAP_MODE_PASSTHROUGH 0x00000000 /* default */
-+#define BLKTAP_MODE_INTERCEPT_FE 0x00000001
-+#define BLKTAP_MODE_INTERCEPT_BE 0x00000002 /* unimp. */
-+
-+#define BLKTAP_MODE_INTERPOSE \
-+ (BLKTAP_MODE_INTERCEPT_FE | BLKTAP_MODE_INTERCEPT_BE)
-+
-+
-+static inline int BLKTAP_MODE_VALID(unsigned long arg)
-+{
-+ return ((arg == BLKTAP_MODE_PASSTHROUGH ) ||
-+ (arg == BLKTAP_MODE_INTERCEPT_FE) ||
-+ (arg == BLKTAP_MODE_INTERPOSE ));
-+}
-+
-+/* Requests passing through the tap to userspace are re-assigned an ID.
-+ * We must record a mapping between the BE [IDX,ID] tuple and the userspace
-+ * ring ID.
-+ */
-+
-+static inline unsigned long MAKE_ID(domid_t fe_dom, PEND_RING_IDX idx)
-+{
-+ return ((fe_dom << 16) | MASK_PEND_IDX(idx));
-+}
-+
-+extern inline PEND_RING_IDX ID_TO_IDX(unsigned long id)
-+{
-+ return (PEND_RING_IDX)(id & 0x0000ffff);
-+}
-+
-+extern inline int ID_TO_MIDX(unsigned long id)
-+{
-+ return (int)(id >> 16);
-+}
-+
-+#define INVALID_REQ 0xdead0000
-+
-+/*TODO: Convert to a free list*/
-+static inline int GET_NEXT_REQ(unsigned long *idx_map)
-+{
-+ int i;
-+ for (i = 0; i < MAX_PENDING_REQS; i++)
-+ if (idx_map[i] == INVALID_REQ)
-+ return i;
-+
-+ return INVALID_REQ;
-+}
-+
-+
-+#define BLKTAP_INVALID_HANDLE(_g) \
-+ (((_g->kernel) == INVALID_GRANT_HANDLE) && \
-+ ((_g->user) == INVALID_GRANT_HANDLE))
-+
-+#define BLKTAP_INVALIDATE_HANDLE(_g) do { \
-+ (_g)->kernel = INVALID_GRANT_HANDLE; (_g)->user = INVALID_GRANT_HANDLE; \
-+ } while(0)
-+
-+
-+/******************************************************************
-+ * BLKTAP VM OPS
-+ */
-+
-+static struct page *blktap_nopage(struct vm_area_struct *vma,
-+ unsigned long address,
-+ int *type)
-+{
-+ /*
-+ * if the page has not been mapped in by the driver then return
-+ * NOPAGE_SIGBUS to the domain.
-+ */
-+
-+ return NOPAGE_SIGBUS;
-+}
-+
-+struct vm_operations_struct blktap_vm_ops = {
-+ nopage: blktap_nopage,
-+};
-+
-+/******************************************************************
-+ * BLKTAP FILE OPS
-+ */
-+
-+/*Function Declarations*/
-+static tap_blkif_t *get_next_free_dev(void);
-+static int blktap_open(struct inode *inode, struct file *filp);
-+static int blktap_release(struct inode *inode, struct file *filp);
-+static int blktap_mmap(struct file *filp, struct vm_area_struct *vma);
-+static int blktap_ioctl(struct inode *inode, struct file *filp,
-+ unsigned int cmd, unsigned long arg);
-+static unsigned int blktap_poll(struct file *file, poll_table *wait);
-+
-+static const struct file_operations blktap_fops = {
-+ .owner = THIS_MODULE,
-+ .poll = blktap_poll,
-+ .ioctl = blktap_ioctl,
-+ .open = blktap_open,
-+ .release = blktap_release,
-+ .mmap = blktap_mmap,
-+};
-+
-+
-+static tap_blkif_t *get_next_free_dev(void)
-+{
-+ struct class *class;
-+ tap_blkif_t *info;
-+ int minor;
-+
-+ /*
-+ * This is called only from the ioctl, which
-+ * means we should always have interrupts enabled.
-+ */
-+ BUG_ON(irqs_disabled());
-+
-+ spin_lock_irq(&pending_free_lock);
-+
-+ /* tapfds[0] is always NULL */
-+
-+ for (minor = 1; minor < blktap_next_minor; minor++) {
-+ info = tapfds[minor];
-+ /* we could have failed a previous attempt. */
-+ if (!info ||
-+ ((info->dev_inuse == 0) &&
-+ (info->dev_pending == 0)) ) {
-+ info->dev_pending = 1;
-+ goto found;
-+ }
-+ }
-+ info = NULL;
-+ minor = -1;
-+
-+ /*
-+ * We didn't find free device. If we can still allocate
-+ * more, then we grab the next device minor that is
-+ * available. This is done while we are still under
-+ * the protection of the pending_free_lock.
-+ */
-+ if (blktap_next_minor < MAX_TAP_DEV)
-+ minor = blktap_next_minor++;
-+found:
-+ spin_unlock_irq(&pending_free_lock);
-+
-+ if (!info && minor > 0) {
-+ info = kzalloc(sizeof(*info), GFP_KERNEL);
-+ if (unlikely(!info)) {
-+ /*
-+ * If we failed here, try to put back
-+ * the next minor number. But if one
-+ * was just taken, then we just lose this
-+ * minor. We can try to allocate this
-+ * minor again later.
-+ */
-+ spin_lock_irq(&pending_free_lock);
-+ if (blktap_next_minor == minor+1)
-+ blktap_next_minor--;
-+ spin_unlock_irq(&pending_free_lock);
-+ goto out;
-+ }
-+
-+ info->minor = minor;
-+ /*
-+ * Make sure that we have a minor before others can
-+ * see us.
-+ */
-+ wmb();
-+ tapfds[minor] = info;
-+
-+ if ((class = get_xen_class()) != NULL)
-+ class_device_create(class, NULL,
-+ MKDEV(blktap_major, minor), NULL,
-+ "blktap%d", minor);
-+ }
-+
-+out:
-+ return info;
-+}
-+
-+int dom_to_devid(domid_t domid, int xenbus_id, blkif_t *blkif)
-+{
-+ tap_blkif_t *info;
-+ int i;
-+
-+ for (i = 1; i < blktap_next_minor; i++) {
-+ info = tapfds[i];
-+ if ( info &&
-+ (info->trans.domid == domid) &&
-+ (info->trans.busid == xenbus_id) ) {
-+ info->blkif = blkif;
-+ info->status = RUNNING;
-+ return i;
-+ }
-+ }
-+ return -1;
-+}
-+
-+void signal_tapdisk(int idx)
-+{
-+ tap_blkif_t *info;
-+ struct task_struct *ptask;
-+
-+ info = tapfds[idx];
-+ if ((idx < 0) || (idx > MAX_TAP_DEV) || !info)
-+ return;
-+
-+ if (info->pid > 0) {
-+ ptask = find_task_by_pid(info->pid);
-+ if (ptask)
-+ info->status = CLEANSHUTDOWN;
-+ }
-+ info->blkif = NULL;
-+
-+ return;
-+}
-+
-+static int blktap_open(struct inode *inode, struct file *filp)
-+{
-+ blkif_sring_t *sring;
-+ int idx = iminor(inode) - BLKTAP_MINOR;
-+ tap_blkif_t *info;
-+ int i;
-+
-+ /* ctrl device, treat differently */
-+ if (!idx)
-+ return 0;
-+
-+ info = tapfds[idx];
-+
-+ if ((idx < 0) || (idx > MAX_TAP_DEV) || !info) {
-+ WPRINTK("Unable to open device /dev/xen/blktap%d\n",
-+ idx);
-+ return -ENODEV;
-+ }
-+
-+ DPRINTK("Opening device /dev/xen/blktap%d\n",idx);
-+
-+ /*Only one process can access device at a time*/
-+ if (test_and_set_bit(0, &info->dev_inuse))
-+ return -EBUSY;
-+
-+ info->dev_pending = 0;
-+
-+ /* Allocate the fe ring. */
-+ sring = (blkif_sring_t *)get_zeroed_page(GFP_KERNEL);
-+ if (sring == NULL)
-+ goto fail_nomem;
-+
-+ SetPageReserved(virt_to_page(sring));
-+
-+ SHARED_RING_INIT(sring);
-+ FRONT_RING_INIT(&info->ufe_ring, sring, PAGE_SIZE);
-+
-+ filp->private_data = info;
-+ info->vma = NULL;
-+
-+ info->idx_map = kmalloc(sizeof(unsigned long) * MAX_PENDING_REQS,
-+ GFP_KERNEL);
-+
-+ if (idx > 0) {
-+ init_waitqueue_head(&info->wait);
-+ for (i = 0; i < MAX_PENDING_REQS; i++)
-+ info->idx_map[i] = INVALID_REQ;
-+ }
-+
-+ DPRINTK("Tap open: device /dev/xen/blktap%d\n",idx);
-+ return 0;
-+
-+ fail_nomem:
-+ return -ENOMEM;
-+}
-+
-+static int blktap_release(struct inode *inode, struct file *filp)
-+{
-+ tap_blkif_t *info = filp->private_data;
-+
-+ /* check for control device */
-+ if (!info)
-+ return 0;
-+
-+ info->dev_inuse = 0;
-+ DPRINTK("Freeing device [/dev/xen/blktap%d]\n",info->minor);
-+
-+ /* Free the ring page. */
-+ ClearPageReserved(virt_to_page(info->ufe_ring.sring));
-+ free_page((unsigned long) info->ufe_ring.sring);
-+
-+ /* Clear any active mappings and free foreign map table */
-+ if (info->vma) {
-+ zap_page_range(
-+ info->vma, info->vma->vm_start,
-+ info->vma->vm_end - info->vma->vm_start, NULL);
-+ info->vma = NULL;
-+ }
-+
-+ if ( (info->status != CLEANSHUTDOWN) && (info->blkif != NULL) ) {
-+ if (info->blkif->xenblkd != NULL) {
-+ kthread_stop(info->blkif->xenblkd);
-+ info->blkif->xenblkd = NULL;
-+ }
-+ info->status = CLEANSHUTDOWN;
-+ }
-+ return 0;
-+}
-+
-+
-+/* Note on mmap:
-+ * We need to map pages to user space in a way that will allow the block
-+ * subsystem set up direct IO to them. This couldn't be done before, because
-+ * there isn't really a sane way to translate a user virtual address down to a
-+ * physical address when the page belongs to another domain.
-+ *
-+ * My first approach was to map the page in to kernel memory, add an entry
-+ * for it in the physical frame list (using alloc_lomem_region as in blkback)
-+ * and then attempt to map that page up to user space. This is disallowed
-+ * by xen though, which realizes that we don't really own the machine frame
-+ * underlying the physical page.
-+ *
-+ * The new approach is to provide explicit support for this in xen linux.
-+ * The VMA now has a flag, VM_FOREIGN, to indicate that it contains pages
-+ * mapped from other vms. vma->vm_private_data is set up as a mapping
-+ * from pages to actual page structs. There is a new clause in get_user_pages
-+ * that does the right thing for this sort of mapping.
-+ */
-+static int blktap_mmap(struct file *filp, struct vm_area_struct *vma)
-+{
-+ int size;
-+ struct page **map;
-+ int i;
-+ tap_blkif_t *info = filp->private_data;
-+
-+ if (info == NULL) {
-+ WPRINTK("blktap: mmap, retrieving idx failed\n");
-+ return -ENOMEM;
-+ }
-+
-+ vma->vm_flags |= VM_RESERVED;
-+ vma->vm_ops = &blktap_vm_ops;
-+
-+ size = vma->vm_end - vma->vm_start;
-+ if (size != ((mmap_pages + RING_PAGES) << PAGE_SHIFT)) {
-+ WPRINTK("you _must_ map exactly %d pages!\n",
-+ mmap_pages + RING_PAGES);
-+ return -EAGAIN;
-+ }
-+
-+ size >>= PAGE_SHIFT;
-+ info->rings_vstart = vma->vm_start;
-+ info->user_vstart = info->rings_vstart + (RING_PAGES << PAGE_SHIFT);
-+
-+ /* Map the ring pages to the start of the region and reserve it. */
-+ if (remap_pfn_range(vma, vma->vm_start,
-+ __pa(info->ufe_ring.sring) >> PAGE_SHIFT,
-+ PAGE_SIZE, vma->vm_page_prot)) {
-+ WPRINTK("Mapping user ring failed!\n");
-+ goto fail;
-+ }
-+
-+ /* Mark this VM as containing foreign pages, and set up mappings. */
-+ map = kzalloc(((vma->vm_end - vma->vm_start) >> PAGE_SHIFT)
-+ * sizeof(struct page_struct*),
-+ GFP_KERNEL);
-+ if (map == NULL) {
-+ WPRINTK("Couldn't alloc VM_FOREIGN map.\n");
-+ goto fail;
-+ }
-+
-+ for (i = 0; i < ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT); i++)
-+ map[i] = NULL;
-+
-+ vma->vm_private_data = map;
-+ vma->vm_flags |= VM_FOREIGN;
-+
-+ info->vma = vma;
-+ info->ring_ok = 1;
-+ return 0;
-+ fail:
-+ /* Clear any active mappings. */
-+ zap_page_range(vma, vma->vm_start,
-+ vma->vm_end - vma->vm_start, NULL);
-+
-+ return -ENOMEM;
-+}
-+
-+
-+static int blktap_ioctl(struct inode *inode, struct file *filp,
-+ unsigned int cmd, unsigned long arg)
-+{
-+ tap_blkif_t *info = filp->private_data;
-+
-+ switch(cmd) {
-+ case BLKTAP_IOCTL_KICK_FE:
-+ {
-+ /* There are fe messages to process. */
-+ return blktap_read_ufe_ring(info);
-+ }
-+ case BLKTAP_IOCTL_SETMODE:
-+ {
-+ if (info) {
-+ if (BLKTAP_MODE_VALID(arg)) {
-+ info->mode = arg;
-+ /* XXX: may need to flush rings here. */
-+ DPRINTK("blktap: set mode to %lx\n",
-+ arg);
-+ return 0;
-+ }
-+ }
-+ return 0;
-+ }
-+ case BLKTAP_IOCTL_PRINT_IDXS:
-+ {
-+ if (info) {
-+ printk("User Rings: \n-----------\n");
-+ printk("UF: rsp_cons: %2d, req_prod_prv: %2d "
-+ "| req_prod: %2d, rsp_prod: %2d\n",
-+ info->ufe_ring.rsp_cons,
-+ info->ufe_ring.req_prod_pvt,
-+ info->ufe_ring.sring->req_prod,
-+ info->ufe_ring.sring->rsp_prod);
-+ }
-+ return 0;
-+ }
-+ case BLKTAP_IOCTL_SENDPID:
-+ {
-+ if (info) {
-+ info->pid = (pid_t)arg;
-+ DPRINTK("blktap: pid received %d\n",
-+ info->pid);
-+ }
-+ return 0;
-+ }
-+ case BLKTAP_IOCTL_NEWINTF:
-+ {
-+ uint64_t val = (uint64_t)arg;
-+ domid_translate_t *tr = (domid_translate_t *)&val;
-+
-+ DPRINTK("NEWINTF Req for domid %d and bus id %d\n",
-+ tr->domid, tr->busid);
-+ info = get_next_free_dev();
-+ if (!info) {
-+ WPRINTK("Error initialising /dev/xen/blktap - "
-+ "No more devices\n");
-+ return -1;
-+ }
-+ info->trans.domid = tr->domid;
-+ info->trans.busid = tr->busid;
-+ return info->minor;
-+ }
-+ case BLKTAP_IOCTL_FREEINTF:
-+ {
-+ unsigned long dev = arg;
-+ unsigned long flags;
-+
-+ info = tapfds[dev];
-+
-+ if ((dev > MAX_TAP_DEV) || !info)
-+ return 0; /* should this be an error? */
-+
-+ spin_lock_irqsave(&pending_free_lock, flags);
-+ if (info->dev_pending)
-+ info->dev_pending = 0;
-+ spin_unlock_irqrestore(&pending_free_lock, flags);
-+
-+ return 0;
-+ }
-+ case BLKTAP_IOCTL_MINOR:
-+ {
-+ unsigned long dev = arg;
-+
-+ info = tapfds[dev];
-+
-+ if ((dev > MAX_TAP_DEV) || !info)
-+ return -EINVAL;
-+
-+ return info->minor;
-+ }
-+ case BLKTAP_IOCTL_MAJOR:
-+ return blktap_major;
-+
-+ case BLKTAP_QUERY_ALLOC_REQS:
-+ {
-+ WPRINTK("BLKTAP_QUERY_ALLOC_REQS ioctl: %d/%d\n",
-+ alloc_pending_reqs, blkif_reqs);
-+ return (alloc_pending_reqs/blkif_reqs) * 100;
-+ }
-+ }
-+ return -ENOIOCTLCMD;
-+}
-+
-+static unsigned int blktap_poll(struct file *filp, poll_table *wait)
-+{
-+ tap_blkif_t *info = filp->private_data;
-+
-+ /* do not work on the control device */
-+ if (!info)
-+ return 0;
-+
-+ poll_wait(filp, &info->wait, wait);
-+ if (info->ufe_ring.req_prod_pvt != info->ufe_ring.sring->req_prod) {
-+ RING_PUSH_REQUESTS(&info->ufe_ring);
-+ return POLLIN | POLLRDNORM;
-+ }
-+ return 0;
-+}
-+
-+void blktap_kick_user(int idx)
-+{
-+ tap_blkif_t *info;
-+
-+ info = tapfds[idx];
-+
-+ if ((idx < 0) || (idx > MAX_TAP_DEV) || !info)
-+ return;
-+
-+ wake_up_interruptible(&info->wait);
-+
-+ return;
-+}
-+
-+static int do_block_io_op(blkif_t *blkif);
-+static void dispatch_rw_block_io(blkif_t *blkif,
-+ blkif_request_t *req,
-+ pending_req_t *pending_req);
-+static void make_response(blkif_t *blkif, u64 id,
-+ unsigned short op, int st);
-+
-+/******************************************************************
-+ * misc small helpers
-+ */
-+static int req_increase(void)
-+{
-+ int i, j;
-+
-+ if (mmap_alloc >= MAX_PENDING_REQS || mmap_lock)
-+ return -EINVAL;
-+
-+ pending_reqs[mmap_alloc] = kzalloc(sizeof(pending_req_t)
-+ * blkif_reqs, GFP_KERNEL);
-+ foreign_pages[mmap_alloc] = alloc_empty_pages_and_pagevec(mmap_pages);
-+
-+ if (!pending_reqs[mmap_alloc] || !foreign_pages[mmap_alloc])
-+ goto out_of_memory;
-+
-+ DPRINTK("%s: reqs=%d, pages=%d\n",
-+ __FUNCTION__, blkif_reqs, mmap_pages);
-+
-+ for (i = 0; i < MAX_PENDING_REQS; i++) {
-+ list_add_tail(&pending_reqs[mmap_alloc][i].free_list,
-+ &pending_free);
-+ pending_reqs[mmap_alloc][i].mem_idx = mmap_alloc;
-+ for (j = 0; j < BLKIF_MAX_SEGMENTS_PER_REQUEST; j++)
-+ BLKTAP_INVALIDATE_HANDLE(&pending_handle(mmap_alloc,
-+ i, j));
-+ }
-+
-+ mmap_alloc++;
-+ DPRINTK("# MMAPs increased to %d\n",mmap_alloc);
-+ return 0;
-+
-+ out_of_memory:
-+ free_empty_pages_and_pagevec(foreign_pages[mmap_alloc], mmap_pages);
-+ kfree(pending_reqs[mmap_alloc]);
-+ WPRINTK("%s: out of memory\n", __FUNCTION__);
-+ return -ENOMEM;
-+}
-+
-+static void mmap_req_del(int mmap)
-+{
-+ BUG_ON(!spin_is_locked(&pending_free_lock));
-+
-+ kfree(pending_reqs[mmap]);
-+ pending_reqs[mmap] = NULL;
-+
-+ free_empty_pages_and_pagevec(foreign_pages[mmap_alloc], mmap_pages);
-+ foreign_pages[mmap] = NULL;
-+
-+ mmap_lock = 0;
-+ DPRINTK("# MMAPs decreased to %d\n",mmap_alloc);
-+ mmap_alloc--;
-+}
-+
-+static pending_req_t* alloc_req(void)
-+{
-+ pending_req_t *req = NULL;
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&pending_free_lock, flags);
-+
-+ if (!list_empty(&pending_free)) {
-+ req = list_entry(pending_free.next, pending_req_t, free_list);
-+ list_del(&req->free_list);
-+ }
-+
-+ if (req) {
-+ req->inuse = 1;
-+ alloc_pending_reqs++;
-+ }
-+ spin_unlock_irqrestore(&pending_free_lock, flags);
-+
-+ return req;
-+}
-+
-+static void free_req(pending_req_t *req)
-+{
-+ unsigned long flags;
-+ int was_empty;
-+
-+ spin_lock_irqsave(&pending_free_lock, flags);
-+
-+ alloc_pending_reqs--;
-+ req->inuse = 0;
-+ if (mmap_lock && (req->mem_idx == mmap_alloc-1)) {
-+ mmap_inuse--;
-+ if (mmap_inuse == 0) mmap_req_del(mmap_alloc-1);
-+ spin_unlock_irqrestore(&pending_free_lock, flags);
-+ return;
-+ }
-+ was_empty = list_empty(&pending_free);
-+ list_add(&req->free_list, &pending_free);
-+
-+ spin_unlock_irqrestore(&pending_free_lock, flags);
-+
-+ if (was_empty)
-+ wake_up(&pending_free_wq);
-+}
-+
-+static void fast_flush_area(pending_req_t *req, int k_idx, int u_idx,
-+ int tapidx)
-+{
-+ struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST*2];
-+ unsigned int i, invcount = 0;
-+ struct grant_handle_pair *khandle;
-+ uint64_t ptep;
-+ int ret, mmap_idx;
-+ unsigned long kvaddr, uvaddr;
-+ tap_blkif_t *info;
-+
-+
-+ info = tapfds[tapidx];
-+
-+ if ((tapidx < 0) || (tapidx > MAX_TAP_DEV) || !info) {
-+ WPRINTK("fast_flush: Couldn't get info!\n");
-+ return;
-+ }
-+
-+ if (info->vma != NULL &&
-+ xen_feature(XENFEAT_auto_translated_physmap)) {
-+ down_write(&info->vma->vm_mm->mmap_sem);
-+ zap_page_range(info->vma,
-+ MMAP_VADDR(info->user_vstart, u_idx, 0),
-+ req->nr_pages << PAGE_SHIFT, NULL);
-+ up_write(&info->vma->vm_mm->mmap_sem);
-+ }
-+
-+ mmap_idx = req->mem_idx;
-+
-+ for (i = 0; i < req->nr_pages; i++) {
-+ kvaddr = idx_to_kaddr(mmap_idx, k_idx, i);
-+ uvaddr = MMAP_VADDR(info->user_vstart, u_idx, i);
-+
-+ khandle = &pending_handle(mmap_idx, k_idx, i);
-+
-+ if (khandle->kernel != INVALID_GRANT_HANDLE) {
-+ gnttab_set_unmap_op(&unmap[invcount],
-+ idx_to_kaddr(mmap_idx, k_idx, i),
-+ GNTMAP_host_map, khandle->kernel);
-+ invcount++;
-+ }
-+
-+ if (khandle->user != INVALID_GRANT_HANDLE) {
-+ BUG_ON(xen_feature(XENFEAT_auto_translated_physmap));
-+ if (create_lookup_pte_addr(
-+ info->vma->vm_mm,
-+ MMAP_VADDR(info->user_vstart, u_idx, i),
-+ &ptep) !=0) {
-+ WPRINTK("Couldn't get a pte addr!\n");
-+ return;
-+ }
-+
-+ gnttab_set_unmap_op(&unmap[invcount], ptep,
-+ GNTMAP_host_map
-+ | GNTMAP_application_map
-+ | GNTMAP_contains_pte,
-+ khandle->user);
-+ invcount++;
-+ }
-+
-+ BLKTAP_INVALIDATE_HANDLE(khandle);
-+ }
-+ ret = HYPERVISOR_grant_table_op(
-+ GNTTABOP_unmap_grant_ref, unmap, invcount);
-+ BUG_ON(ret);
-+
-+ if (info->vma != NULL && !xen_feature(XENFEAT_auto_translated_physmap))
-+ zap_page_range(info->vma,
-+ MMAP_VADDR(info->user_vstart, u_idx, 0),
-+ req->nr_pages << PAGE_SHIFT, NULL);
-+}
-+
-+/******************************************************************
-+ * SCHEDULER FUNCTIONS
-+ */
-+
-+static void print_stats(blkif_t *blkif)
-+{
-+ printk(KERN_DEBUG "%s: oo %3d | rd %4d | wr %4d\n",
-+ current->comm, blkif->st_oo_req,
-+ blkif->st_rd_req, blkif->st_wr_req);
-+ blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000);
-+ blkif->st_rd_req = 0;
-+ blkif->st_wr_req = 0;
-+ blkif->st_oo_req = 0;
-+}
-+
-+int tap_blkif_schedule(void *arg)
-+{
-+ blkif_t *blkif = arg;
-+
-+ blkif_get(blkif);
-+
-+ if (debug_lvl)
-+ printk(KERN_DEBUG "%s: started\n", current->comm);
-+
-+ while (!kthread_should_stop()) {
-+ if (try_to_freeze())
-+ continue;
-+
-+ wait_event_interruptible(
-+ blkif->wq,
-+ blkif->waiting_reqs || kthread_should_stop());
-+ wait_event_interruptible(
-+ pending_free_wq,
-+ !list_empty(&pending_free) || kthread_should_stop());
-+
-+ blkif->waiting_reqs = 0;
-+ smp_mb(); /* clear flag *before* checking for work */
-+
-+ if (do_block_io_op(blkif))
-+ blkif->waiting_reqs = 1;
-+
-+ if (log_stats && time_after(jiffies, blkif->st_print))
-+ print_stats(blkif);
-+ }
-+
-+ if (log_stats)
-+ print_stats(blkif);
-+ if (debug_lvl)
-+ printk(KERN_DEBUG "%s: exiting\n", current->comm);
-+
-+ blkif->xenblkd = NULL;
-+ blkif_put(blkif);
-+
-+ return 0;
-+}
-+
-+/******************************************************************
-+ * COMPLETION CALLBACK -- Called by user level ioctl()
-+ */
-+
-+static int blktap_read_ufe_ring(tap_blkif_t *info)
-+{
-+ /* This is called to read responses from the UFE ring. */
-+ RING_IDX i, j, rp;
-+ blkif_response_t *resp;
-+ blkif_t *blkif=NULL;
-+ int pending_idx, usr_idx, mmap_idx;
-+ pending_req_t *pending_req;
-+
-+ if (!info)
-+ return 0;
-+
-+ /* We currently only forward packets in INTERCEPT_FE mode. */
-+ if (!(info->mode & BLKTAP_MODE_INTERCEPT_FE))
-+ return 0;
-+
-+ /* for each outstanding message on the UFEring */
-+ rp = info->ufe_ring.sring->rsp_prod;
-+ rmb();
-+
-+ for (i = info->ufe_ring.rsp_cons; i != rp; i++) {
-+ blkif_response_t res;
-+ resp = RING_GET_RESPONSE(&info->ufe_ring, i);
-+ memcpy(&res, resp, sizeof(res));
-+ mb(); /* rsp_cons read by RING_FULL() in do_block_io_op(). */
-+ ++info->ufe_ring.rsp_cons;
-+
-+ /*retrieve [usr_idx] to [mmap_idx,pending_idx] mapping*/
-+ usr_idx = (int)res.id;
-+ pending_idx = MASK_PEND_IDX(ID_TO_IDX(info->idx_map[usr_idx]));
-+ mmap_idx = ID_TO_MIDX(info->idx_map[usr_idx]);
-+
-+ if ( (mmap_idx >= mmap_alloc) ||
-+ (ID_TO_IDX(info->idx_map[usr_idx]) >= MAX_PENDING_REQS) )
-+ WPRINTK("Incorrect req map"
-+ "[%d], internal map [%d,%d (%d)]\n",
-+ usr_idx, mmap_idx,
-+ ID_TO_IDX(info->idx_map[usr_idx]),
-+ MASK_PEND_IDX(
-+ ID_TO_IDX(info->idx_map[usr_idx])));
-+
-+ pending_req = &pending_reqs[mmap_idx][pending_idx];
-+ blkif = pending_req->blkif;
-+
-+ for (j = 0; j < pending_req->nr_pages; j++) {
-+
-+ unsigned long kvaddr, uvaddr;
-+ struct page **map = info->vma->vm_private_data;
-+ struct page *pg;
-+ int offset;
-+
-+ uvaddr = MMAP_VADDR(info->user_vstart, usr_idx, j);
-+ kvaddr = idx_to_kaddr(mmap_idx, pending_idx, j);
-+
-+ pg = pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
-+ ClearPageReserved(pg);
-+ offset = (uvaddr - info->vma->vm_start)
-+ >> PAGE_SHIFT;
-+ map[offset] = NULL;
-+ }
-+ fast_flush_area(pending_req, pending_idx, usr_idx, info->minor);
-+ info->idx_map[usr_idx] = INVALID_REQ;
-+ make_response(blkif, pending_req->id, res.operation,
-+ res.status);
-+ blkif_put(pending_req->blkif);
-+ free_req(pending_req);
-+ }
-+
-+ return 0;
-+}
-+
-+
-+/******************************************************************************
-+ * NOTIFICATION FROM GUEST OS.
-+ */
-+
-+static void blkif_notify_work(blkif_t *blkif)
-+{
-+ blkif->waiting_reqs = 1;
-+ wake_up(&blkif->wq);
-+}
-+
-+irqreturn_t tap_blkif_be_int(int irq, void *dev_id)
-+{
-+ blkif_notify_work(dev_id);
-+ return IRQ_HANDLED;
-+}
-+
-+
-+
-+/******************************************************************
-+ * DOWNWARD CALLS -- These interface with the block-device layer proper.
-+ */
-+static int print_dbug = 1;
-+static int do_block_io_op(blkif_t *blkif)
-+{
-+ blkif_back_rings_t *blk_rings = &blkif->blk_rings;
-+ blkif_request_t req;
-+ pending_req_t *pending_req;
-+ RING_IDX rc, rp;
-+ int more_to_do = 0;
-+ tap_blkif_t *info;
-+
-+ rc = blk_rings->common.req_cons;
-+ rp = blk_rings->common.sring->req_prod;
-+ rmb(); /* Ensure we see queued requests up to 'rp'. */
-+
-+ /*Check blkif has corresponding UE ring*/
-+ if (blkif->dev_num < 0) {
-+ /*oops*/
-+ if (print_dbug) {
-+ WPRINTK("Corresponding UE "
-+ "ring does not exist!\n");
-+ print_dbug = 0; /*We only print this message once*/
-+ }
-+ return 0;
-+ }
-+
-+ info = tapfds[blkif->dev_num];
-+
-+ if (blkif->dev_num > MAX_TAP_DEV || !info || !info->dev_inuse) {
-+ if (print_dbug) {
-+ WPRINTK("Can't get UE info!\n");
-+ print_dbug = 0;
-+ }
-+ return 0;
-+ }
-+
-+ while (rc != rp) {
-+
-+ if (RING_FULL(&info->ufe_ring)) {
-+ WPRINTK("RING_FULL! More to do\n");
-+ more_to_do = 1;
-+ break;
-+ }
-+
-+ if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc)) {
-+ WPRINTK("RING_REQUEST_CONS_OVERFLOW!"
-+ " More to do\n");
-+ more_to_do = 1;
-+ break;
-+ }
-+
-+ pending_req = alloc_req();
-+ if (NULL == pending_req) {
-+ blkif->st_oo_req++;
-+ more_to_do = 1;
-+ break;
-+ }
-+
-+ switch (blkif->blk_protocol) {
-+ case BLKIF_PROTOCOL_NATIVE:
-+ memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc),
-+ sizeof(req));
-+ break;
-+ case BLKIF_PROTOCOL_X86_32:
-+ blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc));
-+ break;
-+ case BLKIF_PROTOCOL_X86_64:
-+ blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc));
-+ break;
-+ default:
-+ BUG();
-+ }
-+ blk_rings->common.req_cons = ++rc; /* before make_response() */
-+
-+ switch (req.operation) {
-+ case BLKIF_OP_READ:
-+ blkif->st_rd_req++;
-+ dispatch_rw_block_io(blkif, &req, pending_req);
-+ break;
-+
-+ case BLKIF_OP_WRITE:
-+ blkif->st_wr_req++;
-+ dispatch_rw_block_io(blkif, &req, pending_req);
-+ break;
-+
-+ default:
-+ WPRINTK("unknown operation [%d]\n",
-+ req.operation);
-+ make_response(blkif, req.id, req.operation,
-+ BLKIF_RSP_ERROR);
-+ free_req(pending_req);
-+ break;
-+ }
-+ }
-+
-+ blktap_kick_user(blkif->dev_num);
-+
-+ return more_to_do;
-+}
-+
-+static void dispatch_rw_block_io(blkif_t *blkif,
-+ blkif_request_t *req,
-+ pending_req_t *pending_req)
-+{
-+ extern void ll_rw_block(int rw, int nr, struct buffer_head * bhs[]);
-+ int op, operation = (req->operation == BLKIF_OP_WRITE) ? WRITE : READ;
-+ struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST*2];
-+ unsigned int nseg;
-+ int ret, i, nr_sects = 0;
-+ tap_blkif_t *info;
-+ uint64_t sector;
-+ blkif_request_t *target;
-+ int pending_idx = RTN_PEND_IDX(pending_req,pending_req->mem_idx);
-+ int usr_idx;
-+ uint16_t mmap_idx = pending_req->mem_idx;
-+
-+ if (blkif->dev_num < 0 || blkif->dev_num > MAX_TAP_DEV)
-+ goto fail_response;
-+
-+ info = tapfds[blkif->dev_num];
-+ if (info == NULL)
-+ goto fail_response;
-+
-+ /* Check we have space on user ring - should never fail. */
-+ usr_idx = GET_NEXT_REQ(info->idx_map);
-+ if (usr_idx == INVALID_REQ) {
-+ BUG();
-+ goto fail_response;
-+ }
-+
-+ /* Check that number of segments is sane. */
-+ nseg = req->nr_segments;
-+ if ( unlikely(nseg == 0) ||
-+ unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST) ) {
-+ WPRINTK("Bad number of segments in request (%d)\n", nseg);
-+ goto fail_response;
-+ }
-+
-+ /* Make sure userspace is ready. */
-+ if (!info->ring_ok) {
-+ WPRINTK("blktap: ring not ready for requests!\n");
-+ goto fail_response;
-+ }
-+
-+ if (RING_FULL(&info->ufe_ring)) {
-+ WPRINTK("blktap: fe_ring is full, can't add "
-+ "IO Request will be dropped. %d %d\n",
-+ RING_SIZE(&info->ufe_ring),
-+ RING_SIZE(&blkif->blk_rings.common));
-+ goto fail_response;
-+ }
-+
-+ pending_req->blkif = blkif;
-+ pending_req->id = req->id;
-+ pending_req->operation = operation;
-+ pending_req->status = BLKIF_RSP_OKAY;
-+ pending_req->nr_pages = nseg;
-+ op = 0;
-+ for (i = 0; i < nseg; i++) {
-+ unsigned long uvaddr;
-+ unsigned long kvaddr;
-+ uint64_t ptep;
-+ uint32_t flags;
-+
-+ uvaddr = MMAP_VADDR(info->user_vstart, usr_idx, i);
-+ kvaddr = idx_to_kaddr(mmap_idx, pending_idx, i);
-+
-+ sector = req->sector_number + ((PAGE_SIZE / 512) * i);
-+ if( (blkif->sectors > 0) && (sector >= blkif->sectors) ) {
-+ WPRINTK("BLKTAP: Sector request greater"
-+ "than size\n");
-+ WPRINTK("BLKTAP: %s request sector"
-+ "[%llu,%llu], Total [%llu]\n",
-+ (req->operation ==
-+ BLKIF_OP_WRITE ? "WRITE" : "READ"),
-+ (long long unsigned) sector,
-+ (long long unsigned) sector>>9,
-+ (long long unsigned) blkif->sectors);
-+ }
-+
-+ flags = GNTMAP_host_map;
-+ if (operation == WRITE)
-+ flags |= GNTMAP_readonly;
-+ gnttab_set_map_op(&map[op], kvaddr, flags,
-+ req->seg[i].gref, blkif->domid);
-+ op++;
-+
-+ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
-+ /* Now map it to user. */
-+ ret = create_lookup_pte_addr(info->vma->vm_mm,
-+ uvaddr, &ptep);
-+ if (ret) {
-+ WPRINTK("Couldn't get a pte addr!\n");
-+ goto fail_flush;
-+ }
-+
-+ flags = GNTMAP_host_map | GNTMAP_application_map
-+ | GNTMAP_contains_pte;
-+ if (operation == WRITE)
-+ flags |= GNTMAP_readonly;
-+ gnttab_set_map_op(&map[op], ptep, flags,
-+ req->seg[i].gref, blkif->domid);
-+ op++;
-+ }
-+
-+ nr_sects += (req->seg[i].last_sect -
-+ req->seg[i].first_sect + 1);
-+ }
-+
-+ ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, op);
-+ BUG_ON(ret);
-+
-+ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
-+ for (i = 0; i < (nseg*2); i+=2) {
-+ unsigned long uvaddr;
-+ unsigned long kvaddr;
-+ unsigned long offset;
-+ struct page *pg;
-+
-+ uvaddr = MMAP_VADDR(info->user_vstart, usr_idx, i/2);
-+ kvaddr = idx_to_kaddr(mmap_idx, pending_idx, i/2);
-+
-+ if (unlikely(map[i].status != 0)) {
-+ WPRINTK("invalid kernel buffer -- "
-+ "could not remap it\n");
-+ ret |= 1;
-+ map[i].handle = INVALID_GRANT_HANDLE;
-+ }
-+
-+ if (unlikely(map[i+1].status != 0)) {
-+ WPRINTK("invalid user buffer -- "
-+ "could not remap it\n");
-+ ret |= 1;
-+ map[i+1].handle = INVALID_GRANT_HANDLE;
-+ }
-+
-+ pending_handle(mmap_idx, pending_idx, i/2).kernel
-+ = map[i].handle;
-+ pending_handle(mmap_idx, pending_idx, i/2).user
-+ = map[i+1].handle;
-+
-+ if (ret)
-+ continue;
-+
-+ set_phys_to_machine(__pa(kvaddr) >> PAGE_SHIFT,
-+ FOREIGN_FRAME(map[i].dev_bus_addr
-+ >> PAGE_SHIFT));
-+ offset = (uvaddr - info->vma->vm_start) >> PAGE_SHIFT;
-+ pg = pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
-+ ((struct page **)info->vma->vm_private_data)[offset] =
-+ pg;
-+ }
-+ } else {
-+ for (i = 0; i < nseg; i++) {
-+ unsigned long uvaddr;
-+ unsigned long kvaddr;
-+ unsigned long offset;
-+ struct page *pg;
-+
-+ uvaddr = MMAP_VADDR(info->user_vstart, usr_idx, i);
-+ kvaddr = idx_to_kaddr(mmap_idx, pending_idx, i);
-+
-+ if (unlikely(map[i].status != 0)) {
-+ WPRINTK("invalid kernel buffer -- "
-+ "could not remap it\n");
-+ ret |= 1;
-+ map[i].handle = INVALID_GRANT_HANDLE;
-+ }
-+
-+ pending_handle(mmap_idx, pending_idx, i).kernel
-+ = map[i].handle;
-+
-+ if (ret)
-+ continue;
-+
-+ offset = (uvaddr - info->vma->vm_start) >> PAGE_SHIFT;
-+ pg = pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
-+ ((struct page **)info->vma->vm_private_data)[offset] =
-+ pg;
-+ }
-+ }
-+
-+ if (ret)
-+ goto fail_flush;
-+
-+ if (xen_feature(XENFEAT_auto_translated_physmap))
-+ down_write(&info->vma->vm_mm->mmap_sem);
-+ /* Mark mapped pages as reserved: */
-+ for (i = 0; i < req->nr_segments; i++) {
-+ unsigned long kvaddr;
-+ struct page *pg;
-+
-+ kvaddr = idx_to_kaddr(mmap_idx, pending_idx, i);
-+ pg = pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
-+ SetPageReserved(pg);
-+ if (xen_feature(XENFEAT_auto_translated_physmap)) {
-+ ret = vm_insert_page(info->vma,
-+ MMAP_VADDR(info->user_vstart,
-+ usr_idx, i), pg);
-+ if (ret) {
-+ up_write(&info->vma->vm_mm->mmap_sem);
-+ goto fail_flush;
-+ }
-+ }
-+ }
-+ if (xen_feature(XENFEAT_auto_translated_physmap))
-+ up_write(&info->vma->vm_mm->mmap_sem);
-+
-+ /*record [mmap_idx,pending_idx] to [usr_idx] mapping*/
-+ info->idx_map[usr_idx] = MAKE_ID(mmap_idx, pending_idx);
-+
-+ blkif_get(blkif);
-+ /* Finally, write the request message to the user ring. */
-+ target = RING_GET_REQUEST(&info->ufe_ring,
-+ info->ufe_ring.req_prod_pvt);
-+ memcpy(target, req, sizeof(*req));
-+ target->id = usr_idx;
-+ wmb(); /* blktap_poll() reads req_prod_pvt asynchronously */
-+ info->ufe_ring.req_prod_pvt++;
-+
-+ if (operation == READ)
-+ blkif->st_rd_sect += nr_sects;
-+ else if (operation == WRITE)
-+ blkif->st_wr_sect += nr_sects;
-+
-+ return;
-+
-+ fail_flush:
-+ WPRINTK("Reached Fail_flush\n");
-+ fast_flush_area(pending_req, pending_idx, usr_idx, blkif->dev_num);
-+ fail_response:
-+ make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
-+ free_req(pending_req);
-+}
-+
-+
-+
-+/******************************************************************
-+ * MISCELLANEOUS SETUP / TEARDOWN / DEBUGGING
-+ */
-+
-+
-+static void make_response(blkif_t *blkif, u64 id,
-+ unsigned short op, int st)
-+{
-+ blkif_response_t resp;
-+ unsigned long flags;
-+ blkif_back_rings_t *blk_rings = &blkif->blk_rings;
-+ int more_to_do = 0;
-+ int notify;
-+
-+ resp.id = id;
-+ resp.operation = op;
-+ resp.status = st;
-+
-+ spin_lock_irqsave(&blkif->blk_ring_lock, flags);
-+ /* Place on the response ring for the relevant domain. */
-+ switch (blkif->blk_protocol) {
-+ case BLKIF_PROTOCOL_NATIVE:
-+ memcpy(RING_GET_RESPONSE(&blk_rings->native,
-+ blk_rings->native.rsp_prod_pvt),
-+ &resp, sizeof(resp));
-+ break;
-+ case BLKIF_PROTOCOL_X86_32:
-+ memcpy(RING_GET_RESPONSE(&blk_rings->x86_32,
-+ blk_rings->x86_32.rsp_prod_pvt),
-+ &resp, sizeof(resp));
-+ break;
-+ case BLKIF_PROTOCOL_X86_64:
-+ memcpy(RING_GET_RESPONSE(&blk_rings->x86_64,
-+ blk_rings->x86_64.rsp_prod_pvt),
-+ &resp, sizeof(resp));
-+ break;
-+ default:
-+ BUG();
-+ }
-+ blk_rings->common.rsp_prod_pvt++;
-+ RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
-+
-+ if (blk_rings->common.rsp_prod_pvt == blk_rings->common.req_cons) {
-+ /*
-+ * Tail check for pending requests. Allows frontend to avoid
-+ * notifications if requests are already in flight (lower
-+ * overheads and promotes batching).
-+ */
-+ RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
-+ } else if (RING_HAS_UNCONSUMED_REQUESTS(&blk_rings->common)) {
-+ more_to_do = 1;
-+ }
-+
-+ spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
-+ if (more_to_do)
-+ blkif_notify_work(blkif);
-+ if (notify)
-+ notify_remote_via_irq(blkif->irq);
-+}
-+
-+static int __init blkif_init(void)
-+{
-+ int i, ret;
-+ struct class *class;
-+
-+ if (!is_running_on_xen())
-+ return -ENODEV;
-+
-+ INIT_LIST_HEAD(&pending_free);
-+ for(i = 0; i < 2; i++) {
-+ ret = req_increase();
-+ if (ret)
-+ break;
-+ }
-+ if (i == 0)
-+ return ret;
-+
-+ tap_blkif_interface_init();
-+
-+ alloc_pending_reqs = 0;
-+
-+ tap_blkif_xenbus_init();
-+
-+ /* Dynamically allocate a major for this device */
-+ ret = register_chrdev(0, "blktap", &blktap_fops);
-+
-+ if (ret < 0) {
-+ WPRINTK("Couldn't register /dev/xen/blktap\n");
-+ return -ENOMEM;
-+ }
-+
-+ blktap_major = ret;
-+
-+ /* tapfds[0] is always NULL */
-+ blktap_next_minor++;
-+
-+ DPRINTK("Created misc_dev [/dev/xen/blktap%d]\n",i);
-+
-+ /* Make sure the xen class exists */
-+ if ((class = get_xen_class()) != NULL) {
-+ /*
-+ * This will allow udev to create the blktap ctrl device.
-+ * We only want to create blktap0 first. We don't want
-+ * to flood the sysfs system with needless blktap devices.
-+ * We only create the device when a request of a new device is
-+ * made.
-+ */
-+ class_device_create(class, NULL,
-+ MKDEV(blktap_major, 0), NULL,
-+ "blktap0");
-+ } else {
-+ /* this is bad, but not fatal */
-+ WPRINTK("blktap: sysfs xen_class not created\n");
-+ }
-+
-+ DPRINTK("Blktap device successfully created\n");
-+
-+ return 0;
-+}
-+
-+module_init(blkif_init);
-+
-+MODULE_LICENSE("Dual BSD/GPL");
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/blktap/blocktap.c ubuntu-gutsy-xen/drivers/xen/blktap/blocktap.c
---- ubuntu-gutsy/drivers/xen/blktap/blocktap.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/blktap/blocktap.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1 @@
-+#include "blktap.c"
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/blktap/common.h ubuntu-gutsy-xen/drivers/xen/blktap/common.h
---- ubuntu-gutsy/drivers/xen/blktap/common.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/blktap/common.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,121 @@
-+/*
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#ifndef __BLKIF__BACKEND__COMMON_H__
-+#define __BLKIF__BACKEND__COMMON_H__
-+
-+#include <linux/version.h>
-+#include <linux/module.h>
-+#include <linux/interrupt.h>
-+#include <linux/slab.h>
-+#include <linux/blkdev.h>
-+#include <linux/vmalloc.h>
-+#include <asm/io.h>
-+#include <asm/setup.h>
-+#include <asm/pgalloc.h>
-+#include <xen/evtchn.h>
-+#include <asm/hypervisor.h>
-+#include <xen/blkif.h>
-+#include <xen/gnttab.h>
-+#include <xen/driver_util.h>
-+
-+#define DPRINTK(_f, _a...) pr_debug("(file=%s, line=%d) " _f, \
-+ __FILE__ , __LINE__ , ## _a )
-+
-+#define WPRINTK(fmt, args...) printk(KERN_WARNING "blk_tap: " fmt, ##args)
-+
-+struct backend_info;
-+
-+typedef struct blkif_st {
-+ /* Unique identifier for this interface. */
-+ domid_t domid;
-+ unsigned int handle;
-+ /* Physical parameters of the comms window. */
-+ unsigned int irq;
-+ /* Comms information. */
-+ enum blkif_protocol blk_protocol;
-+ blkif_back_rings_t blk_rings;
-+ struct vm_struct *blk_ring_area;
-+ /* Back pointer to the backend_info. */
-+ struct backend_info *be;
-+ /* Private fields. */
-+ spinlock_t blk_ring_lock;
-+ atomic_t refcnt;
-+
-+ wait_queue_head_t wq;
-+ struct task_struct *xenblkd;
-+ unsigned int waiting_reqs;
-+ request_queue_t *plug;
-+
-+ /* statistics */
-+ unsigned long st_print;
-+ int st_rd_req;
-+ int st_wr_req;
-+ int st_oo_req;
-+ int st_rd_sect;
-+ int st_wr_sect;
-+
-+ wait_queue_head_t waiting_to_free;
-+
-+ grant_handle_t shmem_handle;
-+ grant_ref_t shmem_ref;
-+
-+ int dev_num;
-+ uint64_t sectors;
-+} blkif_t;
-+
-+blkif_t *tap_alloc_blkif(domid_t domid);
-+void tap_blkif_free(blkif_t *blkif);
-+int tap_blkif_map(blkif_t *blkif, unsigned long shared_page,
-+ unsigned int evtchn);
-+void tap_blkif_unmap(blkif_t *blkif);
-+
-+#define blkif_get(_b) (atomic_inc(&(_b)->refcnt))
-+#define blkif_put(_b) \
-+ do { \
-+ if (atomic_dec_and_test(&(_b)->refcnt)) \
-+ wake_up(&(_b)->waiting_to_free);\
-+ } while (0)
-+
-+
-+struct phys_req {
-+ unsigned short dev;
-+ unsigned short nr_sects;
-+ struct block_device *bdev;
-+ blkif_sector_t sector_number;
-+};
-+
-+void tap_blkif_interface_init(void);
-+
-+void tap_blkif_xenbus_init(void);
-+
-+irqreturn_t tap_blkif_be_int(int irq, void *dev_id);
-+int tap_blkif_schedule(void *arg);
-+
-+int dom_to_devid(domid_t domid, int xenbus_id, blkif_t *blkif);
-+void signal_tapdisk(int idx);
-+
-+#endif /* __BLKIF__BACKEND__COMMON_H__ */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/blktap/interface.c ubuntu-gutsy-xen/drivers/xen/blktap/interface.c
---- ubuntu-gutsy/drivers/xen/blktap/interface.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/blktap/interface.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,174 @@
-+/******************************************************************************
-+ * drivers/xen/blktap/interface.c
-+ *
-+ * Block-device interface management.
-+ *
-+ * Copyright (c) 2004, Keir Fraser
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+
-+ */
-+
-+#include "common.h"
-+#include <xen/evtchn.h>
-+
-+static struct kmem_cache *blkif_cachep;
-+
-+blkif_t *tap_alloc_blkif(domid_t domid)
-+{
-+ blkif_t *blkif;
-+
-+ blkif = kmem_cache_alloc(blkif_cachep, GFP_KERNEL);
-+ if (!blkif)
-+ return ERR_PTR(-ENOMEM);
-+
-+ memset(blkif, 0, sizeof(*blkif));
-+ blkif->domid = domid;
-+ spin_lock_init(&blkif->blk_ring_lock);
-+ atomic_set(&blkif->refcnt, 1);
-+ init_waitqueue_head(&blkif->wq);
-+ blkif->st_print = jiffies;
-+ init_waitqueue_head(&blkif->waiting_to_free);
-+
-+ return blkif;
-+}
-+
-+static int map_frontend_page(blkif_t *blkif, unsigned long shared_page)
-+{
-+ struct gnttab_map_grant_ref op;
-+
-+ gnttab_set_map_op(&op, (unsigned long)blkif->blk_ring_area->addr,
-+ GNTMAP_host_map, shared_page, blkif->domid);
-+
-+ if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
-+ BUG();
-+
-+ if (op.status) {
-+ DPRINTK(" Grant table operation failure !\n");
-+ return op.status;
-+ }
-+
-+ blkif->shmem_ref = shared_page;
-+ blkif->shmem_handle = op.handle;
-+
-+ return 0;
-+}
-+
-+static void unmap_frontend_page(blkif_t *blkif)
-+{
-+ struct gnttab_unmap_grant_ref op;
-+
-+ gnttab_set_unmap_op(&op, (unsigned long)blkif->blk_ring_area->addr,
-+ GNTMAP_host_map, blkif->shmem_handle);
-+
-+ if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
-+ BUG();
-+}
-+
-+int tap_blkif_map(blkif_t *blkif, unsigned long shared_page,
-+ unsigned int evtchn)
-+{
-+ int err;
-+
-+ /* Already connected through? */
-+ if (blkif->irq)
-+ return 0;
-+
-+ if ( (blkif->blk_ring_area = alloc_vm_area(PAGE_SIZE)) == NULL )
-+ return -ENOMEM;
-+
-+ err = map_frontend_page(blkif, shared_page);
-+ if (err) {
-+ free_vm_area(blkif->blk_ring_area);
-+ return err;
-+ }
-+
-+ switch (blkif->blk_protocol) {
-+ case BLKIF_PROTOCOL_NATIVE:
-+ {
-+ blkif_sring_t *sring;
-+ sring = (blkif_sring_t *)blkif->blk_ring_area->addr;
-+ BACK_RING_INIT(&blkif->blk_rings.native, sring, PAGE_SIZE);
-+ break;
-+ }
-+ case BLKIF_PROTOCOL_X86_32:
-+ {
-+ blkif_x86_32_sring_t *sring_x86_32;
-+ sring_x86_32 = (blkif_x86_32_sring_t *)blkif->blk_ring_area->addr;
-+ BACK_RING_INIT(&blkif->blk_rings.x86_32, sring_x86_32, PAGE_SIZE);
-+ break;
-+ }
-+ case BLKIF_PROTOCOL_X86_64:
-+ {
-+ blkif_x86_64_sring_t *sring_x86_64;
-+ sring_x86_64 = (blkif_x86_64_sring_t *)blkif->blk_ring_area->addr;
-+ BACK_RING_INIT(&blkif->blk_rings.x86_64, sring_x86_64, PAGE_SIZE);
-+ break;
-+ }
-+ default:
-+ BUG();
-+ }
-+
-+ err = bind_interdomain_evtchn_to_irqhandler(
-+ blkif->domid, evtchn, tap_blkif_be_int,
-+ 0, "blkif-backend", blkif);
-+ if (err < 0) {
-+ unmap_frontend_page(blkif);
-+ free_vm_area(blkif->blk_ring_area);
-+ blkif->blk_rings.common.sring = NULL;
-+ return err;
-+ }
-+ blkif->irq = err;
-+
-+ return 0;
-+}
-+
-+void tap_blkif_unmap(blkif_t *blkif)
-+{
-+ if (blkif->irq) {
-+ unbind_from_irqhandler(blkif->irq, blkif);
-+ blkif->irq = 0;
-+ }
-+ if (blkif->blk_rings.common.sring) {
-+ unmap_frontend_page(blkif);
-+ free_vm_area(blkif->blk_ring_area);
-+ blkif->blk_rings.common.sring = NULL;
-+ }
-+}
-+
-+void tap_blkif_free(blkif_t *blkif)
-+{
-+ atomic_dec(&blkif->refcnt);
-+ wait_event(blkif->waiting_to_free, atomic_read(&blkif->refcnt) == 0);
-+
-+ tap_blkif_unmap(blkif);
-+ kmem_cache_free(blkif_cachep, blkif);
-+}
-+
-+void __init tap_blkif_interface_init(void)
-+{
-+ blkif_cachep = kmem_cache_create("blktapif_cache", sizeof(blkif_t),
-+ 0, 0, NULL, NULL);
-+}
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/blktap/Makefile ubuntu-gutsy-xen/drivers/xen/blktap/Makefile
---- ubuntu-gutsy/drivers/xen/blktap/Makefile 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/blktap/Makefile 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,5 @@
-+LINUXINCLUDE += -I../xen/include/public/io
-+
-+obj-$(CONFIG_XEN_BLKDEV_TAP) := blktap.o
-+
-+blktap-y := xenbus.o interface.o blocktap.o
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/blktap/xenbus.c ubuntu-gutsy-xen/drivers/xen/blktap/xenbus.c
---- ubuntu-gutsy/drivers/xen/blktap/xenbus.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/blktap/xenbus.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,481 @@
-+/* drivers/xen/blktap/xenbus.c
-+ *
-+ * Xenbus code for blktap
-+ *
-+ * Copyright (c) 2004-2005, Andrew Warfield and Julian Chesterfield
-+ *
-+ * Based on the blkback xenbus code:
-+ *
-+ * Copyright (C) 2005 Rusty Russell <rusty@rustcorp.com.au>
-+ * Copyright (C) 2005 XenSource Ltd
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#include <stdarg.h>
-+#include <linux/module.h>
-+#include <linux/kthread.h>
-+#include <xen/xenbus.h>
-+#include "common.h"
-+#include "../core/domctl.h"
-+
-+
-+struct backend_info
-+{
-+ struct xenbus_device *dev;
-+ blkif_t *blkif;
-+ struct xenbus_watch backend_watch;
-+ int xenbus_id;
-+ int group_added;
-+};
-+
-+
-+static void connect(struct backend_info *);
-+static int connect_ring(struct backend_info *);
-+static int blktap_remove(struct xenbus_device *dev);
-+static int blktap_probe(struct xenbus_device *dev,
-+ const struct xenbus_device_id *id);
-+static void tap_backend_changed(struct xenbus_watch *, const char **,
-+ unsigned int);
-+static void tap_frontend_changed(struct xenbus_device *dev,
-+ enum xenbus_state frontend_state);
-+
-+static int strsep_len(const char *str, char c, unsigned int len)
-+{
-+ unsigned int i;
-+
-+ for (i = 0; str[i]; i++)
-+ if (str[i] == c) {
-+ if (len == 0)
-+ return i;
-+ len--;
-+ }
-+ return (len == 0) ? i : -ERANGE;
-+}
-+
-+static long get_id(const char *str)
-+{
-+ int len,end;
-+ const char *ptr;
-+ char *tptr, num[10];
-+
-+ len = strsep_len(str, '/', 2);
-+ end = strlen(str);
-+ if ( (len < 0) || (end < 0) ) return -1;
-+
-+ ptr = str + len + 1;
-+ strncpy(num,ptr,end - len);
-+ tptr = num + (end - (len + 1));
-+ *tptr = '\0';
-+ DPRINTK("Get_id called for %s (%s)\n",str,num);
-+
-+ return simple_strtol(num, NULL, 10);
-+}
-+
-+static int blktap_name(blkif_t *blkif, char *buf)
-+{
-+ char *devpath, *devname;
-+ struct xenbus_device *dev = blkif->be->dev;
-+
-+ devpath = xenbus_read(XBT_NIL, dev->nodename, "dev", NULL);
-+ if (IS_ERR(devpath))
-+ return PTR_ERR(devpath);
-+
-+ if ((devname = strstr(devpath, "/dev/")) != NULL)
-+ devname += strlen("/dev/");
-+ else
-+ devname = devpath;
-+
-+ snprintf(buf, TASK_COMM_LEN, "blktap.%d.%s", blkif->domid, devname);
-+ kfree(devpath);
-+
-+ return 0;
-+}
-+
-+/****************************************************************
-+ * sysfs interface for VBD I/O requests
-+ */
-+
-+#define VBD_SHOW(name, format, args...) \
-+ static ssize_t show_##name(struct device *_dev, \
-+ struct device_attribute *attr, \
-+ char *buf) \
-+ { \
-+ struct xenbus_device *dev = to_xenbus_device(_dev); \
-+ struct backend_info *be = dev->dev.driver_data; \
-+ \
-+ return sprintf(buf, format, ##args); \
-+ } \
-+ DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
-+
-+VBD_SHOW(tap_oo_req, "%d\n", be->blkif->st_oo_req);
-+VBD_SHOW(tap_rd_req, "%d\n", be->blkif->st_rd_req);
-+VBD_SHOW(tap_wr_req, "%d\n", be->blkif->st_wr_req);
-+VBD_SHOW(tap_rd_sect, "%d\n", be->blkif->st_rd_sect);
-+VBD_SHOW(tap_wr_sect, "%d\n", be->blkif->st_wr_sect);
-+
-+static struct attribute *tapstat_attrs[] = {
-+ &dev_attr_tap_oo_req.attr,
-+ &dev_attr_tap_rd_req.attr,
-+ &dev_attr_tap_wr_req.attr,
-+ &dev_attr_tap_rd_sect.attr,
-+ &dev_attr_tap_wr_sect.attr,
-+ NULL
-+};
-+
-+static struct attribute_group tapstat_group = {
-+ .name = "statistics",
-+ .attrs = tapstat_attrs,
-+};
-+
-+int xentap_sysfs_addif(struct xenbus_device *dev)
-+{
-+ int err;
-+ struct backend_info *be = dev->dev.driver_data;
-+ err = sysfs_create_group(&dev->dev.kobj, &tapstat_group);
-+ if (!err)
-+ be->group_added = 1;
-+ return err;
-+}
-+
-+void xentap_sysfs_delif(struct xenbus_device *dev)
-+{
-+ sysfs_remove_group(&dev->dev.kobj, &tapstat_group);
-+}
-+
-+static int blktap_remove(struct xenbus_device *dev)
-+{
-+ struct backend_info *be = dev->dev.driver_data;
-+
-+ if (be->group_added)
-+ xentap_sysfs_delif(be->dev);
-+ if (be->backend_watch.node) {
-+ unregister_xenbus_watch(&be->backend_watch);
-+ kfree(be->backend_watch.node);
-+ be->backend_watch.node = NULL;
-+ }
-+ if (be->blkif) {
-+ if (be->blkif->xenblkd)
-+ kthread_stop(be->blkif->xenblkd);
-+ signal_tapdisk(be->blkif->dev_num);
-+ tap_blkif_free(be->blkif);
-+ be->blkif = NULL;
-+ }
-+ kfree(be);
-+ dev->dev.driver_data = NULL;
-+ return 0;
-+}
-+
-+static void tap_update_blkif_status(blkif_t *blkif)
-+{
-+ int err;
-+ char name[TASK_COMM_LEN];
-+
-+ /* Not ready to connect? */
-+ if(!blkif->irq || !blkif->sectors) {
-+ return;
-+ }
-+
-+ /* Already connected? */
-+ if (blkif->be->dev->state == XenbusStateConnected)
-+ return;
-+
-+ /* Attempt to connect: exit if we fail to. */
-+ connect(blkif->be);
-+ if (blkif->be->dev->state != XenbusStateConnected)
-+ return;
-+
-+ err = blktap_name(blkif, name);
-+ if (err) {
-+ xenbus_dev_error(blkif->be->dev, err, "get blktap dev name");
-+ return;
-+ }
-+
-+ err = xentap_sysfs_addif(blkif->be->dev);
-+ if (err) {
-+ xenbus_dev_fatal(blkif->be->dev, err,
-+ "creating sysfs entries");
-+ return;
-+ }
-+
-+ blkif->xenblkd = kthread_run(tap_blkif_schedule, blkif, name);
-+ if (IS_ERR(blkif->xenblkd)) {
-+ err = PTR_ERR(blkif->xenblkd);
-+ blkif->xenblkd = NULL;
-+ xenbus_dev_fatal(blkif->be->dev, err, "start xenblkd");
-+ WPRINTK("Error starting thread\n");
-+ }
-+}
-+
-+/**
-+ * Entry point to this code when a new device is created. Allocate
-+ * the basic structures, and watch the store waiting for the
-+ * user-space program to tell us the physical device info. Switch to
-+ * InitWait.
-+ */
-+static int blktap_probe(struct xenbus_device *dev,
-+ const struct xenbus_device_id *id)
-+{
-+ int err;
-+ struct backend_info *be = kzalloc(sizeof(struct backend_info),
-+ GFP_KERNEL);
-+ if (!be) {
-+ xenbus_dev_fatal(dev, -ENOMEM,
-+ "allocating backend structure");
-+ return -ENOMEM;
-+ }
-+
-+ be->dev = dev;
-+ dev->dev.driver_data = be;
-+ be->xenbus_id = get_id(dev->nodename);
-+
-+ be->blkif = tap_alloc_blkif(dev->otherend_id);
-+ if (IS_ERR(be->blkif)) {
-+ err = PTR_ERR(be->blkif);
-+ be->blkif = NULL;
-+ xenbus_dev_fatal(dev, err, "creating block interface");
-+ goto fail;
-+ }
-+
-+ /* setup back pointer */
-+ be->blkif->be = be;
-+ be->blkif->sectors = 0;
-+
-+ /* set a watch on disk info, waiting for userspace to update details*/
-+ err = xenbus_watch_path2(dev, dev->nodename, "info",
-+ &be->backend_watch, tap_backend_changed);
-+ if (err)
-+ goto fail;
-+
-+ err = xenbus_switch_state(dev, XenbusStateInitWait);
-+ if (err)
-+ goto fail;
-+ return 0;
-+
-+fail:
-+ DPRINTK("blktap probe failed\n");
-+ blktap_remove(dev);
-+ return err;
-+}
-+
-+
-+/**
-+ * Callback received when the user space code has placed the device
-+ * information in xenstore.
-+ */
-+static void tap_backend_changed(struct xenbus_watch *watch,
-+ const char **vec, unsigned int len)
-+{
-+ int err;
-+ unsigned long info;
-+ struct backend_info *be
-+ = container_of(watch, struct backend_info, backend_watch);
-+ struct xenbus_device *dev = be->dev;
-+
-+ /**
-+ * Check to see whether userspace code has opened the image
-+ * and written sector
-+ * and disk info to xenstore
-+ */
-+ err = xenbus_gather(XBT_NIL, dev->nodename, "info", "%lu", &info,
-+ NULL);
-+ if (XENBUS_EXIST_ERR(err))
-+ return;
-+ if (err) {
-+ xenbus_dev_error(dev, err, "getting info");
-+ return;
-+ }
-+
-+ DPRINTK("Userspace update on disk info, %lu\n",info);
-+
-+ err = xenbus_gather(XBT_NIL, dev->nodename, "sectors", "%llu",
-+ &be->blkif->sectors, NULL);
-+
-+ /* Associate tap dev with domid*/
-+ be->blkif->dev_num = dom_to_devid(be->blkif->domid, be->xenbus_id,
-+ be->blkif);
-+ DPRINTK("Thread started for domid [%d], connecting disk\n",
-+ be->blkif->dev_num);
-+
-+ tap_update_blkif_status(be->blkif);
-+}
-+
-+/**
-+ * Callback received when the frontend's state changes.
-+ */
-+static void tap_frontend_changed(struct xenbus_device *dev,
-+ enum xenbus_state frontend_state)
-+{
-+ struct backend_info *be = dev->dev.driver_data;
-+ int err;
-+
-+ DPRINTK("\n");
-+
-+ switch (frontend_state) {
-+ case XenbusStateInitialising:
-+ if (dev->state == XenbusStateClosed) {
-+ printk(KERN_INFO "%s: %s: prepare for reconnect\n",
-+ __FUNCTION__, dev->nodename);
-+ xenbus_switch_state(dev, XenbusStateInitWait);
-+ }
-+ break;
-+
-+ case XenbusStateInitialised:
-+ case XenbusStateConnected:
-+ /* Ensure we connect even when two watches fire in
-+ close successsion and we miss the intermediate value
-+ of frontend_state. */
-+ if (dev->state == XenbusStateConnected)
-+ break;
-+
-+ err = connect_ring(be);
-+ if (err)
-+ break;
-+ tap_update_blkif_status(be->blkif);
-+ break;
-+
-+ case XenbusStateClosing:
-+ if (be->blkif->xenblkd) {
-+ kthread_stop(be->blkif->xenblkd);
-+ be->blkif->xenblkd = NULL;
-+ }
-+ xenbus_switch_state(dev, XenbusStateClosing);
-+ break;
-+
-+ case XenbusStateClosed:
-+ xenbus_switch_state(dev, XenbusStateClosed);
-+ if (xenbus_dev_is_online(dev))
-+ break;
-+ /* fall through if not online */
-+ case XenbusStateUnknown:
-+ device_unregister(&dev->dev);
-+ break;
-+
-+ default:
-+ xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
-+ frontend_state);
-+ break;
-+ }
-+}
-+
-+
-+/**
-+ * Switch to Connected state.
-+ */
-+static void connect(struct backend_info *be)
-+{
-+ int err;
-+
-+ struct xenbus_device *dev = be->dev;
-+
-+ err = xenbus_switch_state(dev, XenbusStateConnected);
-+ if (err)
-+ xenbus_dev_fatal(dev, err, "switching to Connected state",
-+ dev->nodename);
-+
-+ return;
-+}
-+
-+static int connect_ring(struct backend_info *be)
-+{
-+ struct xenbus_device *dev = be->dev;
-+ unsigned long ring_ref;
-+ unsigned int evtchn;
-+ char protocol[64];
-+ int err;
-+
-+ DPRINTK("%s\n", dev->otherend);
-+
-+ err = xenbus_gather(XBT_NIL, dev->otherend, "ring-ref", "%lu",
-+ &ring_ref, "event-channel", "%u", &evtchn, NULL);
-+ if (err) {
-+ xenbus_dev_fatal(dev, err,
-+ "reading %s/ring-ref and event-channel",
-+ dev->otherend);
-+ return err;
-+ }
-+
-+ be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE;
-+ err = xenbus_gather(XBT_NIL, dev->otherend, "protocol",
-+ "%63s", protocol, NULL);
-+ if (err) {
-+ strcpy(protocol, "unspecified");
-+ be->blkif->blk_protocol = xen_guest_blkif_protocol(be->blkif->domid);
-+ }
-+ else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_NATIVE))
-+ be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE;
-+ else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_32))
-+ be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_32;
-+ else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_64))
-+ be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_64;
-+#if 1 /* maintain compatibility with early sles10-sp1 and paravirt netware betas */
-+ else if (0 == strcmp(protocol, "1"))
-+ be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_32;
-+ else if (0 == strcmp(protocol, "2"))
-+ be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_64;
-+#endif
-+ else {
-+ xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol);
-+ return -1;
-+ }
-+ printk(KERN_INFO
-+ "blktap: ring-ref %ld, event-channel %d, protocol %d (%s)\n",
-+ ring_ref, evtchn, be->blkif->blk_protocol, protocol);
-+
-+ /* Map the shared frame, irq etc. */
-+ err = tap_blkif_map(be->blkif, ring_ref, evtchn);
-+ if (err) {
-+ xenbus_dev_fatal(dev, err, "mapping ring-ref %lu port %u",
-+ ring_ref, evtchn);
-+ return err;
-+ }
-+
-+ return 0;
-+}
-+
-+
-+/* ** Driver Registration ** */
-+
-+
-+static struct xenbus_device_id blktap_ids[] = {
-+ { "tap" },
-+ { "" }
-+};
-+
-+
-+static struct xenbus_driver blktap = {
-+ .name = "tap",
-+ .owner = THIS_MODULE,
-+ .ids = blktap_ids,
-+ .probe = blktap_probe,
-+ .remove = blktap_remove,
-+ .otherend_changed = tap_frontend_changed
-+};
-+
-+
-+void tap_blkif_xenbus_init(void)
-+{
-+ xenbus_register_backend(&blktap);
-+}
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/char/Makefile ubuntu-gutsy-xen/drivers/xen/char/Makefile
---- ubuntu-gutsy/drivers/xen/char/Makefile 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/char/Makefile 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,2 @@
-+
-+obj-y := mem.o
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/char/mem.c ubuntu-gutsy-xen/drivers/xen/char/mem.c
---- ubuntu-gutsy/drivers/xen/char/mem.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/char/mem.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,202 @@
-+/*
-+ * Originally from linux/drivers/char/mem.c
-+ *
-+ * Copyright (C) 1991, 1992 Linus Torvalds
-+ *
-+ * Added devfs support.
-+ * Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
-+ * Shared /dev/zero mmaping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
-+ */
-+
-+#include <linux/mm.h>
-+#include <linux/miscdevice.h>
-+#include <linux/slab.h>
-+#include <linux/vmalloc.h>
-+#include <linux/mman.h>
-+#include <linux/random.h>
-+#include <linux/init.h>
-+#include <linux/raw.h>
-+#include <linux/tty.h>
-+#include <linux/capability.h>
-+#include <linux/ptrace.h>
-+#include <linux/device.h>
-+#include <asm/pgalloc.h>
-+#include <asm/uaccess.h>
-+#include <asm/io.h>
-+#include <asm/hypervisor.h>
-+
-+#ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
-+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
-+{
-+ return 1;
-+}
-+#endif
-+
-+/*
-+ * This funcion reads the *physical* memory. The f_pos points directly to the
-+ * memory location.
-+ */
-+static ssize_t read_mem(struct file * file, char __user * buf,
-+ size_t count, loff_t *ppos)
-+{
-+ unsigned long p = *ppos, ignored;
-+ ssize_t read = 0, sz;
-+ void __iomem *v;
-+
-+ if (!valid_phys_addr_range(p, count))
-+ return -EFAULT;
-+
-+ while (count > 0) {
-+ /*
-+ * Handle first page in case it's not aligned
-+ */
-+ if (-p & (PAGE_SIZE - 1))
-+ sz = -p & (PAGE_SIZE - 1);
-+ else
-+ sz = PAGE_SIZE;
-+
-+ sz = min_t(unsigned long, sz, count);
-+
-+ v = xlate_dev_mem_ptr(p, sz);
-+ if (IS_ERR(v) || v == NULL) {
-+ /*
-+ * Some programs (e.g., dmidecode) groove off into
-+ * weird RAM areas where no tables can possibly exist
-+ * (because Xen will have stomped on them!). These
-+ * programs get rather upset if we let them know that
-+ * Xen failed their access, so we fake out a read of
-+ * all zeroes.
-+ */
-+ if (clear_user(buf, count))
-+ return -EFAULT;
-+ read += count;
-+ break;
-+ }
-+
-+ ignored = copy_to_user(buf, v, sz);
-+ xlate_dev_mem_ptr_unmap(v);
-+ if (ignored)
-+ return -EFAULT;
-+ buf += sz;
-+ p += sz;
-+ count -= sz;
-+ read += sz;
-+ }
-+
-+ *ppos += read;
-+ return read;
-+}
-+
-+static ssize_t write_mem(struct file * file, const char __user * buf,
-+ size_t count, loff_t *ppos)
-+{
-+ unsigned long p = *ppos, ignored;
-+ ssize_t written = 0, sz;
-+ void __iomem *v;
-+
-+ if (!valid_phys_addr_range(p, count))
-+ return -EFAULT;
-+
-+ while (count > 0) {
-+ /*
-+ * Handle first page in case it's not aligned
-+ */
-+ if (-p & (PAGE_SIZE - 1))
-+ sz = -p & (PAGE_SIZE - 1);
-+ else
-+ sz = PAGE_SIZE;
-+
-+ sz = min_t(unsigned long, sz, count);
-+
-+ v = xlate_dev_mem_ptr(p, sz);
-+ if (v == NULL)
-+ break;
-+ if (IS_ERR(v)) {
-+ if (written == 0)
-+ return PTR_ERR(v);
-+ break;
-+ }
-+
-+ ignored = copy_from_user(v, buf, sz);
-+ xlate_dev_mem_ptr_unmap(v);
-+ if (ignored) {
-+ written += sz - ignored;
-+ if (written)
-+ break;
-+ return -EFAULT;
-+ }
-+ buf += sz;
-+ p += sz;
-+ count -= sz;
-+ written += sz;
-+ }
-+
-+ *ppos += written;
-+ return written;
-+}
-+
-+#ifndef ARCH_HAS_DEV_MEM_MMAP_MEM
-+static inline int uncached_access(struct file *file)
-+{
-+ if (file->f_flags & O_SYNC)
-+ return 1;
-+ /* Xen sets correct MTRR type on non-RAM for us. */
-+ return 0;
-+}
-+
-+static int xen_mmap_mem(struct file * file, struct vm_area_struct * vma)
-+{
-+ size_t size = vma->vm_end - vma->vm_start;
-+
-+ if (uncached_access(file))
-+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-+
-+ /* We want to return the real error code, not EAGAIN. */
-+ return direct_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
-+ size, vma->vm_page_prot, DOMID_IO);
-+}
-+#endif
-+
-+/*
-+ * The memory devices use the full 32/64 bits of the offset, and so we cannot
-+ * check against negative addresses: they are ok. The return value is weird,
-+ * though, in that case (0).
-+ *
-+ * also note that seeking relative to the "end of file" isn't supported:
-+ * it has no meaning, so it returns -EINVAL.
-+ */
-+static loff_t memory_lseek(struct file * file, loff_t offset, int orig)
-+{
-+ loff_t ret;
-+
-+ mutex_lock(&file->f_path.dentry->d_inode->i_mutex);
-+ switch (orig) {
-+ case 0:
-+ file->f_pos = offset;
-+ ret = file->f_pos;
-+ force_successful_syscall_return();
-+ break;
-+ case 1:
-+ file->f_pos += offset;
-+ ret = file->f_pos;
-+ force_successful_syscall_return();
-+ break;
-+ default:
-+ ret = -EINVAL;
-+ }
-+ mutex_unlock(&file->f_path.dentry->d_inode->i_mutex);
-+ return ret;
-+}
-+
-+static int open_mem(struct inode * inode, struct file * filp)
-+{
-+ return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
-+}
-+
-+const struct file_operations mem_fops = {
-+ .llseek = memory_lseek,
-+ .read = read_mem,
-+ .write = write_mem,
-+ .mmap = xen_mmap_mem,
-+ .open = open_mem,
-+};
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/console/console.c ubuntu-gutsy-xen/drivers/xen/console/console.c
---- ubuntu-gutsy/drivers/xen/console/console.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/console/console.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,689 @@
-+/******************************************************************************
-+ * console.c
-+ *
-+ * Virtual console driver.
-+ *
-+ * Copyright (c) 2002-2004, K A Fraser.
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#include <linux/version.h>
-+#include <linux/module.h>
-+#include <linux/errno.h>
-+#include <linux/signal.h>
-+#include <linux/sched.h>
-+#include <linux/interrupt.h>
-+#include <linux/tty.h>
-+#include <linux/tty_flip.h>
-+#include <linux/serial.h>
-+#include <linux/major.h>
-+#include <linux/ptrace.h>
-+#include <linux/ioport.h>
-+#include <linux/mm.h>
-+#include <linux/slab.h>
-+#include <linux/init.h>
-+#include <linux/console.h>
-+#include <linux/bootmem.h>
-+#include <linux/sysrq.h>
-+#include <linux/screen_info.h>
-+#include <linux/vt.h>
-+#include <asm/io.h>
-+#include <asm/irq.h>
-+#include <asm/uaccess.h>
-+#include <xen/interface/xen.h>
-+#include <xen/interface/event_channel.h>
-+#include <asm/hypervisor.h>
-+#include <xen/evtchn.h>
-+#include <xen/xenbus.h>
-+#include <xen/xencons.h>
-+
-+/*
-+ * Modes:
-+ * 'xencons=off' [XC_OFF]: Console is disabled.
-+ * 'xencons=tty' [XC_TTY]: Console attached to '/dev/tty[0-9]+'.
-+ * 'xencons=ttyS' [XC_SERIAL]: Console attached to '/dev/ttyS[0-9]+'.
-+ * 'xencons=xvc' [XC_XVC]: Console attached to '/dev/xvc0'.
-+ * default: DOM0 -> XC_SERIAL ; all others -> XC_TTY.
-+ *
-+ * NB. In mode XC_TTY, we create dummy consoles for tty2-63. This suppresses
-+ * warnings from standard distro startup scripts.
-+ */
-+static enum {
-+ XC_OFF, XC_TTY, XC_SERIAL, XC_XVC
-+} xc_mode;
-+static int xc_num = -1;
-+
-+/* /dev/xvc0 device number allocated by lanana.org. */
-+#define XEN_XVC_MAJOR 204
-+#define XEN_XVC_MINOR 191
-+
-+void xencons_early_setup(void)
-+{
-+ if (is_initial_xendomain()) {
-+#ifdef CONFIG_XEN_DISABLE_SERIAL
-+ xc_mode = XC_SERIAL;
-+#endif
-+ } else {
-+#ifdef CONFIG_XEN_FRAMEBUFFER
-+ xc_mode = XC_XVC;
-+#else
-+ extern int console_use_vt;
-+
-+ xc_mode = XC_TTY;
-+ console_use_vt = 0;
-+#endif
-+ }
-+}
-+
-+static int __init xencons_setup(char *str)
-+{
-+ char *q;
-+ int n;
-+ extern int console_use_vt;
-+
-+ console_use_vt = 1;
-+ if (!strncmp(str, "ttyS", 4)) {
-+ xc_mode = XC_SERIAL;
-+ str += 4;
-+ } else if (!strncmp(str, "tty", 3)) {
-+ xc_mode = XC_TTY;
-+ str += 3;
-+ console_use_vt = 0;
-+ } else if (!strncmp(str, "xvc", 3)) {
-+ xc_mode = XC_XVC;
-+ str += 3;
-+ } else if (!strncmp(str, "off", 3)) {
-+ xc_mode = XC_OFF;
-+ str += 3;
-+ }
-+
-+ n = simple_strtol(str, &q, 10);
-+ if (q != str)
-+ xc_num = n;
-+
-+ return 1;
-+}
-+__setup("xencons=", xencons_setup);
-+
-+/* The kernel and user-land drivers share a common transmit buffer. */
-+static unsigned int wbuf_size = 4096;
-+#define WBUF_MASK(_i) ((_i)&(wbuf_size-1))
-+static char *wbuf;
-+static unsigned int wc, wp; /* write_cons, write_prod */
-+
-+static int __init xencons_bufsz_setup(char *str)
-+{
-+ unsigned int goal;
-+ goal = simple_strtoul(str, NULL, 0);
-+ if (goal) {
-+ goal = roundup_pow_of_two(goal);
-+ if (wbuf_size < goal)
-+ wbuf_size = goal;
-+ }
-+ return 1;
-+}
-+__setup("xencons_bufsz=", xencons_bufsz_setup);
-+
-+/* This lock protects accesses to the common transmit buffer. */
-+static DEFINE_SPINLOCK(xencons_lock);
-+
-+/* Common transmit-kick routine. */
-+static void __xencons_tx_flush(void);
-+
-+static struct tty_driver *xencons_driver;
-+
-+/******************** Kernel console driver ********************************/
-+
-+static void kcons_write(struct console *c, const char *s, unsigned int count)
-+{
-+ int i = 0;
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&xencons_lock, flags);
-+
-+ while (i < count) {
-+ for (; i < count; i++) {
-+ if ((wp - wc) >= (wbuf_size - 1))
-+ break;
-+ if ((wbuf[WBUF_MASK(wp++)] = s[i]) == '\n')
-+ wbuf[WBUF_MASK(wp++)] = '\r';
-+ }
-+
-+ __xencons_tx_flush();
-+ }
-+
-+ spin_unlock_irqrestore(&xencons_lock, flags);
-+}
-+
-+static void kcons_write_dom0(struct console *c, const char *s, unsigned int count)
-+{
-+
-+ while (count > 0) {
-+ int rc;
-+ rc = HYPERVISOR_console_io( CONSOLEIO_write, count, (char *)s);
-+ if (rc <= 0)
-+ break;
-+ count -= rc;
-+ s += rc;
-+ }
-+}
-+
-+static struct tty_driver *kcons_device(struct console *c, int *index)
-+{
-+ *index = 0;
-+ return xencons_driver;
-+}
-+
-+static struct console kcons_info = {
-+ .device = kcons_device,
-+ .flags = CON_PRINTBUFFER | CON_ENABLED,
-+ .index = -1,
-+};
-+
-+static int __init xen_console_init(void)
-+{
-+ if (!is_running_on_xen())
-+ goto out;
-+
-+ if (is_initial_xendomain()) {
-+ kcons_info.write = kcons_write_dom0;
-+ } else {
-+ if (!xen_start_info->console.domU.evtchn)
-+ goto out;
-+ kcons_info.write = kcons_write;
-+ }
-+
-+ switch (xc_mode) {
-+ case XC_XVC:
-+ strcpy(kcons_info.name, "xvc");
-+ if (xc_num == -1)
-+ xc_num = 0;
-+ break;
-+
-+ case XC_SERIAL:
-+ strcpy(kcons_info.name, "ttyS");
-+ if (xc_num == -1)
-+ xc_num = 0;
-+ break;
-+
-+ case XC_TTY:
-+ strcpy(kcons_info.name, "tty");
-+ if (xc_num == -1)
-+ xc_num = 1;
-+ break;
-+
-+ default:
-+ goto out;
-+ }
-+
-+ wbuf = alloc_bootmem(wbuf_size);
-+
-+ register_console(&kcons_info);
-+
-+ out:
-+ return 0;
-+}
-+console_initcall(xen_console_init);
-+
-+/*** Useful function for console debugging -- goes straight to Xen. ***/
-+asmlinkage int xprintk(const char *fmt, ...)
-+{
-+ va_list args;
-+ int printk_len;
-+ static char printk_buf[1024];
-+
-+ /* Emit the output into the temporary buffer */
-+ va_start(args, fmt);
-+ printk_len = vsnprintf(printk_buf, sizeof(printk_buf), fmt, args);
-+ va_end(args);
-+
-+ /* Send the processed output directly to Xen. */
-+ kcons_write_dom0(NULL, printk_buf, printk_len);
-+
-+ return 0;
-+}
-+
-+/*** Forcibly flush console data before dying. ***/
-+void xencons_force_flush(void)
-+{
-+ int sz;
-+
-+ /* Emergency console is synchronous, so there's nothing to flush. */
-+ if (!is_running_on_xen() ||
-+ is_initial_xendomain() ||
-+ !xen_start_info->console.domU.evtchn)
-+ return;
-+
-+ /* Spin until console data is flushed through to the daemon. */
-+ while (wc != wp) {
-+ int sent = 0;
-+ if ((sz = wp - wc) == 0)
-+ continue;
-+ sent = xencons_ring_send(&wbuf[WBUF_MASK(wc)], sz);
-+ if (sent > 0)
-+ wc += sent;
-+ }
-+}
-+
-+
-+/******************** User-space console driver (/dev/console) ************/
-+
-+#define DRV(_d) (_d)
-+#define DUMMY_TTY(_tty) ((xc_mode == XC_TTY) && \
-+ ((_tty)->index != (xc_num - 1)))
-+
-+static struct ktermios *xencons_termios[MAX_NR_CONSOLES];
-+static struct ktermios *xencons_termios_locked[MAX_NR_CONSOLES];
-+static struct tty_struct *xencons_tty;
-+static int xencons_priv_irq;
-+static char x_char;
-+
-+void xencons_rx(char *buf, unsigned len)
-+{
-+ int i;
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&xencons_lock, flags);
-+ if (xencons_tty == NULL)
-+ goto out;
-+
-+ for (i = 0; i < len; i++) {
-+#ifdef CONFIG_MAGIC_SYSRQ
-+ if (sysrq_on()) {
-+ static unsigned long sysrq_requested;
-+
-+ if (buf[i] == '\x0f') { /* ^O */
-+ if (!sysrq_requested) {
-+ sysrq_requested = jiffies;
-+ continue; /* don't print sysrq key */
-+ }
-+ sysrq_requested = 0;
-+ } else if (sysrq_requested) {
-+ unsigned long sysrq_timeout =
-+ sysrq_requested + HZ*2;
-+ sysrq_requested = 0;
-+ if (time_before(jiffies, sysrq_timeout)) {
-+ spin_unlock_irqrestore(
-+ &xencons_lock, flags);
-+ handle_sysrq(buf[i], xencons_tty);
-+ spin_lock_irqsave(
-+ &xencons_lock, flags);
-+ continue;
-+ }
-+ }
-+ }
-+#endif
-+ tty_insert_flip_char(xencons_tty, buf[i], 0);
-+ }
-+ tty_flip_buffer_push(xencons_tty);
-+
-+ out:
-+ spin_unlock_irqrestore(&xencons_lock, flags);
-+}
-+
-+static void __xencons_tx_flush(void)
-+{
-+ int sent, sz, work_done = 0;
-+
-+ if (x_char) {
-+ if (is_initial_xendomain())
-+ kcons_write_dom0(NULL, &x_char, 1);
-+ else
-+ while (x_char)
-+ if (xencons_ring_send(&x_char, 1) == 1)
-+ break;
-+ x_char = 0;
-+ work_done = 1;
-+ }
-+
-+ while (wc != wp) {
-+ sz = wp - wc;
-+ if (sz > (wbuf_size - WBUF_MASK(wc)))
-+ sz = wbuf_size - WBUF_MASK(wc);
-+ if (is_initial_xendomain()) {
-+ kcons_write_dom0(NULL, &wbuf[WBUF_MASK(wc)], sz);
-+ wc += sz;
-+ } else {
-+ sent = xencons_ring_send(&wbuf[WBUF_MASK(wc)], sz);
-+ if (sent == 0)
-+ break;
-+ wc += sent;
-+ }
-+ work_done = 1;
-+ }
-+
-+ if (work_done && (xencons_tty != NULL)) {
-+ wake_up_interruptible(&xencons_tty->write_wait);
-+ if ((xencons_tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) &&
-+ (xencons_tty->ldisc.write_wakeup != NULL))
-+ (xencons_tty->ldisc.write_wakeup)(xencons_tty);
-+ }
-+}
-+
-+void xencons_tx(void)
-+{
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&xencons_lock, flags);
-+ __xencons_tx_flush();
-+ spin_unlock_irqrestore(&xencons_lock, flags);
-+}
-+
-+/* Privileged receive callback and transmit kicker. */
-+static irqreturn_t xencons_priv_interrupt(int irq, void *dev_id)
-+{
-+ static char rbuf[16];
-+ int l;
-+
-+ while ((l = HYPERVISOR_console_io(CONSOLEIO_read, 16, rbuf)) > 0)
-+ xencons_rx(rbuf, l);
-+
-+ xencons_tx();
-+
-+ return IRQ_HANDLED;
-+}
-+
-+static int xencons_write_room(struct tty_struct *tty)
-+{
-+ return wbuf_size - (wp - wc);
-+}
-+
-+static int xencons_chars_in_buffer(struct tty_struct *tty)
-+{
-+ return wp - wc;
-+}
-+
-+static void xencons_send_xchar(struct tty_struct *tty, char ch)
-+{
-+ unsigned long flags;
-+
-+ if (DUMMY_TTY(tty))
-+ return;
-+
-+ spin_lock_irqsave(&xencons_lock, flags);
-+ x_char = ch;
-+ __xencons_tx_flush();
-+ spin_unlock_irqrestore(&xencons_lock, flags);
-+}
-+
-+static void xencons_throttle(struct tty_struct *tty)
-+{
-+ if (DUMMY_TTY(tty))
-+ return;
-+
-+ if (I_IXOFF(tty))
-+ xencons_send_xchar(tty, STOP_CHAR(tty));
-+}
-+
-+static void xencons_unthrottle(struct tty_struct *tty)
-+{
-+ if (DUMMY_TTY(tty))
-+ return;
-+
-+ if (I_IXOFF(tty)) {
-+ if (x_char != 0)
-+ x_char = 0;
-+ else
-+ xencons_send_xchar(tty, START_CHAR(tty));
-+ }
-+}
-+
-+static void xencons_flush_buffer(struct tty_struct *tty)
-+{
-+ unsigned long flags;
-+
-+ if (DUMMY_TTY(tty))
-+ return;
-+
-+ spin_lock_irqsave(&xencons_lock, flags);
-+ wc = wp = 0;
-+ spin_unlock_irqrestore(&xencons_lock, flags);
-+}
-+
-+static inline int __xencons_put_char(int ch)
-+{
-+ char _ch = (char)ch;
-+ if ((wp - wc) == wbuf_size)
-+ return 0;
-+ wbuf[WBUF_MASK(wp++)] = _ch;
-+ return 1;
-+}
-+
-+static int xencons_write(
-+ struct tty_struct *tty,
-+ const unsigned char *buf,
-+ int count)
-+{
-+ int i;
-+ unsigned long flags;
-+
-+ if (DUMMY_TTY(tty))
-+ return count;
-+
-+ spin_lock_irqsave(&xencons_lock, flags);
-+
-+ for (i = 0; i < count; i++)
-+ if (!__xencons_put_char(buf[i]))
-+ break;
-+
-+ if (i != 0)
-+ __xencons_tx_flush();
-+
-+ spin_unlock_irqrestore(&xencons_lock, flags);
-+
-+ return i;
-+}
-+
-+static void xencons_put_char(struct tty_struct *tty, u_char ch)
-+{
-+ unsigned long flags;
-+
-+ if (DUMMY_TTY(tty))
-+ return;
-+
-+ spin_lock_irqsave(&xencons_lock, flags);
-+ (void)__xencons_put_char(ch);
-+ spin_unlock_irqrestore(&xencons_lock, flags);
-+}
-+
-+static void xencons_flush_chars(struct tty_struct *tty)
-+{
-+ unsigned long flags;
-+
-+ if (DUMMY_TTY(tty))
-+ return;
-+
-+ spin_lock_irqsave(&xencons_lock, flags);
-+ __xencons_tx_flush();
-+ spin_unlock_irqrestore(&xencons_lock, flags);
-+}
-+
-+static void xencons_wait_until_sent(struct tty_struct *tty, int timeout)
-+{
-+ unsigned long orig_jiffies = jiffies;
-+
-+ if (DUMMY_TTY(tty))
-+ return;
-+
-+ while (DRV(tty->driver)->chars_in_buffer(tty)) {
-+ set_current_state(TASK_INTERRUPTIBLE);
-+ schedule_timeout(1);
-+ if (signal_pending(current))
-+ break;
-+ if (timeout && time_after(jiffies, orig_jiffies + timeout))
-+ break;
-+ }
-+
-+ set_current_state(TASK_RUNNING);
-+}
-+
-+static int xencons_open(struct tty_struct *tty, struct file *filp)
-+{
-+ unsigned long flags;
-+
-+ if (DUMMY_TTY(tty))
-+ return 0;
-+
-+ spin_lock_irqsave(&xencons_lock, flags);
-+ tty->driver_data = NULL;
-+ if (xencons_tty == NULL)
-+ xencons_tty = tty;
-+ __xencons_tx_flush();
-+ spin_unlock_irqrestore(&xencons_lock, flags);
-+
-+ return 0;
-+}
-+
-+static void xencons_close(struct tty_struct *tty, struct file *filp)
-+{
-+ unsigned long flags;
-+
-+ if (DUMMY_TTY(tty))
-+ return;
-+
-+ mutex_lock(&tty_mutex);
-+
-+ if (tty->count != 1) {
-+ mutex_unlock(&tty_mutex);
-+ return;
-+ }
-+
-+ /* Prevent other threads from re-opening this tty. */
-+ set_bit(TTY_CLOSING, &tty->flags);
-+ mutex_unlock(&tty_mutex);
-+
-+ tty->closing = 1;
-+ tty_wait_until_sent(tty, 0);
-+ if (DRV(tty->driver)->flush_buffer != NULL)
-+ DRV(tty->driver)->flush_buffer(tty);
-+ if (tty->ldisc.flush_buffer != NULL)
-+ tty->ldisc.flush_buffer(tty);
-+ tty->closing = 0;
-+ spin_lock_irqsave(&xencons_lock, flags);
-+ xencons_tty = NULL;
-+ spin_unlock_irqrestore(&xencons_lock, flags);
-+}
-+
-+static struct tty_operations xencons_ops = {
-+ .open = xencons_open,
-+ .close = xencons_close,
-+ .write = xencons_write,
-+ .write_room = xencons_write_room,
-+ .put_char = xencons_put_char,
-+ .flush_chars = xencons_flush_chars,
-+ .chars_in_buffer = xencons_chars_in_buffer,
-+ .send_xchar = xencons_send_xchar,
-+ .flush_buffer = xencons_flush_buffer,
-+ .throttle = xencons_throttle,
-+ .unthrottle = xencons_unthrottle,
-+ .wait_until_sent = xencons_wait_until_sent,
-+};
-+
-+static int __init xencons_init(void)
-+{
-+ int rc;
-+
-+ if (!is_running_on_xen())
-+ return -ENODEV;
-+
-+ if (xc_mode == XC_OFF)
-+ return 0;
-+
-+ if (!is_initial_xendomain()) {
-+ rc = xencons_ring_init();
-+ if (rc)
-+ return rc;
-+ }
-+
-+ xencons_driver = alloc_tty_driver((xc_mode == XC_TTY) ?
-+ MAX_NR_CONSOLES : 1);
-+ if (xencons_driver == NULL)
-+ return -ENOMEM;
-+
-+ DRV(xencons_driver)->name = "xencons";
-+ DRV(xencons_driver)->major = TTY_MAJOR;
-+ DRV(xencons_driver)->type = TTY_DRIVER_TYPE_SERIAL;
-+ DRV(xencons_driver)->subtype = SERIAL_TYPE_NORMAL;
-+ DRV(xencons_driver)->init_termios = tty_std_termios;
-+ DRV(xencons_driver)->flags =
-+ TTY_DRIVER_REAL_RAW |
-+ TTY_DRIVER_RESET_TERMIOS;
-+ DRV(xencons_driver)->termios = xencons_termios;
-+ DRV(xencons_driver)->termios_locked = xencons_termios_locked;
-+
-+ switch (xc_mode) {
-+ case XC_XVC:
-+ printk(KERN_INFO "xencons_init: Initializing xen vfb;"
-+ " pass xencons=tty to prevent this\n");
-+ DRV(xencons_driver)->name = "xvc";
-+ DRV(xencons_driver)->major = XEN_XVC_MAJOR;
-+ DRV(xencons_driver)->minor_start = XEN_XVC_MINOR;
-+ DRV(xencons_driver)->name_base = xc_num;
-+ break;
-+ case XC_SERIAL:
-+ DRV(xencons_driver)->name = "ttyS";
-+ DRV(xencons_driver)->minor_start = 64 + xc_num;
-+ DRV(xencons_driver)->name_base = xc_num;
-+ break;
-+ default:
-+ DRV(xencons_driver)->name = "tty";
-+ DRV(xencons_driver)->minor_start = 1;
-+ DRV(xencons_driver)->name_base = 1;
-+ break;
-+ }
-+
-+ tty_set_operations(xencons_driver, &xencons_ops);
-+
-+ if ((rc = tty_register_driver(DRV(xencons_driver))) != 0) {
-+ printk("WARNING: Failed to register Xen virtual "
-+ "console driver as '%s%d'\n",
-+ DRV(xencons_driver)->name,
-+ DRV(xencons_driver)->name_base);
-+ put_tty_driver(xencons_driver);
-+ xencons_driver = NULL;
-+ return rc;
-+ }
-+
-+ if (is_initial_xendomain()) {
-+ xencons_priv_irq = bind_virq_to_irqhandler(
-+ VIRQ_CONSOLE,
-+ 0,
-+ xencons_priv_interrupt,
-+ 0,
-+ "console",
-+ NULL);
-+ BUG_ON(xencons_priv_irq < 0);
-+ }
-+
-+ printk("Xen virtual console successfully installed as %s%d\n",
-+ DRV(xencons_driver)->name, xc_num);
-+
-+ return 0;
-+}
-+
-+module_init(xencons_init);
-+
-+MODULE_LICENSE("Dual BSD/GPL");
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/console/dom0.c ubuntu-gutsy-xen/drivers/xen/console/dom0.c
---- ubuntu-gutsy/drivers/xen/console/dom0.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/console/dom0.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,70 @@
-+/******************************************************************************
-+ * dom0.c
-+ *
-+ * Dom0 console parameter initialization.
-+ *
-+ * Copyright (c) 2002-2004, K A Fraser.
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#include <linux/types.h>
-+#include <linux/screen_info.h>
-+#include <xen/interface/xen.h>
-+#include <xen/xencons.h>
-+
-+void dom0_init_screen_info(const struct dom0_vga_console_info *info)
-+{
-+ switch (info->video_type) {
-+ case XEN_VGATYPE_TEXT_MODE_3:
-+ screen_info.orig_video_mode = 3;
-+ screen_info.orig_video_ega_bx = 3;
-+ screen_info.orig_video_isVGA = 1;
-+ screen_info.orig_video_lines = info->u.text_mode_3.rows;
-+ screen_info.orig_video_cols = info->u.text_mode_3.columns;
-+ screen_info.orig_x = info->u.text_mode_3.cursor_x;
-+ screen_info.orig_y = info->u.text_mode_3.cursor_y;
-+ screen_info.orig_video_points =
-+ info->u.text_mode_3.font_height;
-+ break;
-+ case XEN_VGATYPE_VESA_LFB:
-+ screen_info.orig_video_isVGA = VIDEO_TYPE_VLFB;
-+ screen_info.lfb_width = info->u.vesa_lfb.width;
-+ screen_info.lfb_height = info->u.vesa_lfb.height;
-+ screen_info.lfb_depth = info->u.vesa_lfb.bits_per_pixel;
-+ screen_info.lfb_base = info->u.vesa_lfb.lfb_base;
-+ screen_info.lfb_size = info->u.vesa_lfb.lfb_size;
-+ screen_info.lfb_linelength = info->u.vesa_lfb.bytes_per_line;
-+ screen_info.red_size = info->u.vesa_lfb.red_size;
-+ screen_info.red_pos = info->u.vesa_lfb.red_pos;
-+ screen_info.green_size = info->u.vesa_lfb.green_size;
-+ screen_info.green_pos = info->u.vesa_lfb.green_pos;
-+ screen_info.blue_size = info->u.vesa_lfb.blue_size;
-+ screen_info.blue_pos = info->u.vesa_lfb.blue_pos;
-+ screen_info.rsvd_size = info->u.vesa_lfb.rsvd_size;
-+ screen_info.rsvd_pos = info->u.vesa_lfb.rsvd_pos;
-+ break;
-+ }
-+}
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/console/Makefile ubuntu-gutsy-xen/drivers/xen/console/Makefile
---- ubuntu-gutsy/drivers/xen/console/Makefile 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/console/Makefile 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,3 @@
-+
-+obj-$(CONFIG_XEN_CONSOLE) := console.o xencons_ring.o
-+obj-$(CONFIG_XEN_PRIVILEGED_GUEST) += dom0.o
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/console/xencons_ring.c ubuntu-gutsy-xen/drivers/xen/console/xencons_ring.c
---- ubuntu-gutsy/drivers/xen/console/xencons_ring.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/console/xencons_ring.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,143 @@
-+/*
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#include <linux/version.h>
-+#include <linux/module.h>
-+#include <linux/errno.h>
-+#include <linux/signal.h>
-+#include <linux/sched.h>
-+#include <linux/interrupt.h>
-+#include <linux/tty.h>
-+#include <linux/tty_flip.h>
-+#include <linux/serial.h>
-+#include <linux/major.h>
-+#include <linux/ptrace.h>
-+#include <linux/ioport.h>
-+#include <linux/mm.h>
-+#include <linux/slab.h>
-+
-+#include <asm/hypervisor.h>
-+#include <xen/evtchn.h>
-+#include <xen/xencons.h>
-+#include <linux/wait.h>
-+#include <linux/interrupt.h>
-+#include <linux/sched.h>
-+#include <linux/err.h>
-+#include <xen/interface/io/console.h>
-+
-+static int xencons_irq;
-+
-+static inline struct xencons_interface *xencons_interface(void)
-+{
-+ return mfn_to_virt(xen_start_info->console.domU.mfn);
-+}
-+
-+static inline void notify_daemon(void)
-+{
-+ /* Use evtchn: this is called early, before irq is set up. */
-+ notify_remote_via_evtchn(xen_start_info->console.domU.evtchn);
-+}
-+
-+int xencons_ring_send(const char *data, unsigned len)
-+{
-+ int sent = 0;
-+ struct xencons_interface *intf = xencons_interface();
-+ XENCONS_RING_IDX cons, prod;
-+
-+ cons = intf->out_cons;
-+ prod = intf->out_prod;
-+ mb();
-+ BUG_ON((prod - cons) > sizeof(intf->out));
-+
-+ while ((sent < len) && ((prod - cons) < sizeof(intf->out)))
-+ intf->out[MASK_XENCONS_IDX(prod++, intf->out)] = data[sent++];
-+
-+ wmb();
-+ intf->out_prod = prod;
-+
-+ notify_daemon();
-+
-+ return sent;
-+}
-+
-+static irqreturn_t handle_input(int irq, void *unused)
-+{
-+ struct xencons_interface *intf = xencons_interface();
-+ XENCONS_RING_IDX cons, prod;
-+
-+ cons = intf->in_cons;
-+ prod = intf->in_prod;
-+ mb();
-+ BUG_ON((prod - cons) > sizeof(intf->in));
-+
-+ while (cons != prod) {
-+ xencons_rx(intf->in+MASK_XENCONS_IDX(cons,intf->in), 1);
-+ cons++;
-+ }
-+
-+ mb();
-+ intf->in_cons = cons;
-+
-+ notify_daemon();
-+
-+ xencons_tx();
-+
-+ return IRQ_HANDLED;
-+}
-+
-+int xencons_ring_init(void)
-+{
-+ int irq;
-+
-+ if (xencons_irq)
-+ unbind_from_irqhandler(xencons_irq, NULL);
-+ xencons_irq = 0;
-+
-+ if (!is_running_on_xen() ||
-+ is_initial_xendomain() ||
-+ !xen_start_info->console.domU.evtchn)
-+ return -ENODEV;
-+
-+ irq = bind_caller_port_to_irqhandler(
-+ xen_start_info->console.domU.evtchn,
-+ handle_input, 0, "xencons", NULL);
-+ if (irq < 0) {
-+ printk(KERN_ERR "XEN console request irq failed %i\n", irq);
-+ return irq;
-+ }
-+
-+ xencons_irq = irq;
-+
-+ /* In case we have in-flight data after save/restore... */
-+ notify_daemon();
-+
-+ return 0;
-+}
-+
-+void xencons_resume(void)
-+{
-+ (void)xencons_ring_init();
-+}
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/core/cpu_hotplug.c ubuntu-gutsy-xen/drivers/xen/core/cpu_hotplug.c
---- ubuntu-gutsy/drivers/xen/core/cpu_hotplug.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/core/cpu_hotplug.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,172 @@
-+#include <linux/init.h>
-+#include <linux/kernel.h>
-+#include <linux/sched.h>
-+#include <linux/notifier.h>
-+#include <linux/cpu.h>
-+#include <xen/cpu_hotplug.h>
-+#include <xen/xenbus.h>
-+
-+/*
-+ * Set of CPUs that remote admin software will allow us to bring online.
-+ * Notified to us via xenbus.
-+ */
-+static cpumask_t xenbus_allowed_cpumask;
-+
-+/* Set of CPUs that local admin will allow us to bring online. */
-+static cpumask_t local_allowed_cpumask = CPU_MASK_ALL;
-+
-+static int local_cpu_hotplug_request(void)
-+{
-+ /*
-+ * We assume a CPU hotplug request comes from local admin if it is made
-+ * via a userspace process (i.e., one with a real mm_struct).
-+ */
-+ return (current->mm != NULL);
-+}
-+
-+static void vcpu_hotplug(unsigned int cpu)
-+{
-+ int err;
-+ char dir[32], state[32];
-+
-+ if ((cpu >= NR_CPUS) || !cpu_possible(cpu))
-+ return;
-+
-+ sprintf(dir, "cpu/%d", cpu);
-+ err = xenbus_scanf(XBT_NIL, dir, "availability", "%s", state);
-+ if (err != 1) {
-+ printk(KERN_ERR "XENBUS: Unable to read cpu state\n");
-+ return;
-+ }
-+
-+ if (strcmp(state, "online") == 0) {
-+ cpu_set(cpu, xenbus_allowed_cpumask);
-+ (void)cpu_up(cpu);
-+ } else if (strcmp(state, "offline") == 0) {
-+ cpu_clear(cpu, xenbus_allowed_cpumask);
-+ (void)cpu_down(cpu);
-+ } else {
-+ printk(KERN_ERR "XENBUS: unknown state(%s) on CPU%d\n",
-+ state, cpu);
-+ }
-+}
-+
-+static void handle_vcpu_hotplug_event(
-+ struct xenbus_watch *watch, const char **vec, unsigned int len)
-+{
-+ int cpu;
-+ char *cpustr;
-+ const char *node = vec[XS_WATCH_PATH];
-+
-+ if ((cpustr = strstr(node, "cpu/")) != NULL) {
-+ sscanf(cpustr, "cpu/%d", &cpu);
-+ vcpu_hotplug(cpu);
-+ }
-+}
-+
-+static int smpboot_cpu_notify(struct notifier_block *notifier,
-+ unsigned long action, void *hcpu)
-+{
-+ int cpu = (long)hcpu;
-+
-+ /*
-+ * We do this in a callback notifier rather than __cpu_disable()
-+ * because local_cpu_hotplug_request() does not work in the latter
-+ * as it's always executed from within a stopmachine kthread.
-+ */
-+ if ((action == CPU_DOWN_PREPARE) && local_cpu_hotplug_request())
-+ cpu_clear(cpu, local_allowed_cpumask);
-+
-+ return NOTIFY_OK;
-+}
-+
-+static int setup_cpu_watcher(struct notifier_block *notifier,
-+ unsigned long event, void *data)
-+{
-+ int i;
-+
-+ static struct xenbus_watch cpu_watch = {
-+ .node = "cpu",
-+ .callback = handle_vcpu_hotplug_event,
-+ .flags = XBWF_new_thread };
-+ (void)register_xenbus_watch(&cpu_watch);
-+
-+ if (!is_initial_xendomain()) {
-+ for_each_possible_cpu(i)
-+ vcpu_hotplug(i);
-+ printk(KERN_INFO "Brought up %ld CPUs\n",
-+ (long)num_online_cpus());
-+ }
-+
-+ return NOTIFY_DONE;
-+}
-+
-+static int __init setup_vcpu_hotplug_event(void)
-+{
-+ static struct notifier_block hotplug_cpu = {
-+ .notifier_call = smpboot_cpu_notify };
-+ static struct notifier_block xsn_cpu = {
-+ .notifier_call = setup_cpu_watcher };
-+
-+ if (!is_running_on_xen())
-+ return -ENODEV;
-+
-+ register_cpu_notifier(&hotplug_cpu);
-+ register_xenstore_notifier(&xsn_cpu);
-+
-+ return 0;
-+}
-+
-+arch_initcall(setup_vcpu_hotplug_event);
-+
-+int smp_suspend(void)
-+{
-+ int cpu, err;
-+
-+ for_each_online_cpu(cpu) {
-+ if (cpu == 0)
-+ continue;
-+ err = cpu_down(cpu);
-+ if (err) {
-+ printk(KERN_CRIT "Failed to take all CPUs "
-+ "down: %d.\n", err);
-+ for_each_possible_cpu(cpu)
-+ vcpu_hotplug(cpu);
-+ return err;
-+ }
-+ }
-+
-+ return 0;
-+}
-+
-+void smp_resume(void)
-+{
-+ int cpu;
-+
-+ for_each_possible_cpu(cpu)
-+ vcpu_hotplug(cpu);
-+}
-+
-+int cpu_up_check(unsigned int cpu)
-+{
-+ int rc = 0;
-+
-+ if (local_cpu_hotplug_request()) {
-+ cpu_set(cpu, local_allowed_cpumask);
-+ if (!cpu_isset(cpu, xenbus_allowed_cpumask)) {
-+ printk("%s: attempt to bring up CPU %u disallowed by "
-+ "remote admin.\n", __FUNCTION__, cpu);
-+ rc = -EBUSY;
-+ }
-+ } else if (!cpu_isset(cpu, local_allowed_cpumask) ||
-+ !cpu_isset(cpu, xenbus_allowed_cpumask)) {
-+ rc = -EBUSY;
-+ }
-+
-+ return rc;
-+}
-+
-+void init_xenbus_allowed_cpumask(void)
-+{
-+ xenbus_allowed_cpumask = cpu_present_map;
-+}
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/core/domctl.c ubuntu-gutsy-xen/drivers/xen/core/domctl.c
---- ubuntu-gutsy/drivers/xen/core/domctl.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/core/domctl.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,133 @@
-+/*
-+ * !!! dirty hack alert !!!
-+ *
-+ * Problem: old guests kernels don't have a "protocol" node
-+ * in the frontend xenstore directory, so mixing
-+ * 32 and 64bit domains doesn't work.
-+ *
-+ * Upstream plans to solve this in the tools, by letting them
-+ * create a protocol node. Which certainly makes sense.
-+ * But it isn't trivial and isn't done yet. Too bad.
-+ *
-+ * So for the time being we use the get_address_size domctl
-+ * hypercall for a pretty good guess. Not nice as the domctl
-+ * hypercall isn't supposed to be used by the kernel. Because
-+ * we don't want to have dependencies between dom0 kernel and
-+ * xen kernel versions. Now we have one. Ouch.
-+ */
-+
-+#include <linux/kernel.h>
-+#include <linux/module.h>
-+#include <asm/hypervisor.h>
-+#include <xen/blkif.h>
-+
-+#include "domctl.h"
-+
-+/* stuff copied from xen/interface/domctl.h, which we can't
-+ * include directly for the reasons outlined above .... */
-+
-+#define XEN_DOMCTL_set_address_size 35
-+#define XEN_DOMCTL_get_address_size 36
-+typedef struct xen_domctl_address_size {
-+ uint32_t size;
-+} xen_domctl_address_size_t;
-+
-+#define native_address_size (sizeof(unsigned long)*8)
-+
-+/* v4: sles10 sp1: xen 3.0.4 + 32-on-64 patches */
-+struct xen_domctl_v4 {
-+ uint32_t cmd;
-+ uint32_t interface_version; /* XEN_DOMCTL_INTERFACE_VERSION */
-+ domid_t domain;
-+ union {
-+ /* left out lots of other struct xen_domctl_foobar */
-+ struct xen_domctl_address_size address_size;
-+ uint64_t dummy_align;
-+ uint8_t dummy_pad[128];
-+ } u;
-+};
-+
-+/* v5: upstream: xen 3.0.5 */
-+typedef __attribute__((aligned(8))) uint64_t uint64_aligned_t;
-+struct xen_domctl_v5 {
-+ uint32_t cmd;
-+ uint32_t interface_version;
-+ domid_t domain;
-+ union {
-+ struct xen_domctl_address_size address_size;
-+ uint64_aligned_t dummy_align;
-+ uint8_t dummy_pad[128];
-+ } u;
-+};
-+
-+/* The actual code comes here */
-+
-+static int xen_guest_address_size_v4(int domid)
-+{
-+ struct xen_domctl_v4 domctl;
-+ int rc;
-+
-+ memset(&domctl, 0, sizeof(domctl));
-+ domctl.cmd = XEN_DOMCTL_get_address_size;
-+ domctl.interface_version = 4;
-+ domctl.domain = domid;
-+ if (0 != (rc = _hypercall1(int, domctl, &domctl)))
-+ return rc;
-+ return domctl.u.address_size.size;
-+}
-+
-+static int xen_guest_address_size_v5(int domid)
-+{
-+ struct xen_domctl_v5 domctl;
-+ int rc;
-+
-+ memset(&domctl, 0, sizeof(domctl));
-+ domctl.cmd = XEN_DOMCTL_get_address_size;
-+ domctl.interface_version = 5;
-+ domctl.domain = domid;
-+ if (0 != (rc = _hypercall1(int, domctl, &domctl)))
-+ return rc;
-+ return domctl.u.address_size.size;
-+}
-+
-+int xen_guest_address_size(int domid)
-+{
-+ int ret;
-+
-+ ret = xen_guest_address_size_v4(domid);
-+ if (ret == 32 || ret == 64) {
-+ printk("%s: v4 domctl worked ok: %d\n", __FUNCTION__, ret);
-+ goto done;
-+ }
-+
-+ ret = xen_guest_address_size_v5(domid);
-+ if (ret == 32 || ret == 64) {
-+ printk("%s: v5 domctl worked ok: %d\n", __FUNCTION__, ret);
-+ goto done;
-+ }
-+
-+ ret = native_address_size;
-+ printk("%s: v4,v5 domctls failed, assuming native: %d\n",
-+ __FUNCTION__, ret);
-+
-+ done:
-+ return ret;
-+}
-+EXPORT_SYMBOL_GPL(xen_guest_address_size);
-+
-+int xen_guest_blkif_protocol(int domid)
-+{
-+ int address_size;
-+
-+ address_size = xen_guest_address_size(domid);
-+ printk(KERN_DEBUG "%s: domain %d: got address size %d\n",
-+ __FUNCTION__, domid, address_size);
-+ if (address_size == native_address_size)
-+ return BLKIF_PROTOCOL_NATIVE;
-+ if (address_size == 32)
-+ return BLKIF_PROTOCOL_X86_32;
-+ if (address_size == 64)
-+ return BLKIF_PROTOCOL_X86_64;
-+ return BLKIF_PROTOCOL_NATIVE;
-+}
-+EXPORT_SYMBOL_GPL(xen_guest_blkif_protocol);
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/core/domctl.h ubuntu-gutsy-xen/drivers/xen/core/domctl.h
---- ubuntu-gutsy/drivers/xen/core/domctl.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/core/domctl.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,2 @@
-+int xen_guest_address_size(int domid);
-+int xen_guest_blkif_protocol(int domid);
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/core/evtchn.c ubuntu-gutsy-xen/drivers/xen/core/evtchn.c
---- ubuntu-gutsy/drivers/xen/core/evtchn.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/core/evtchn.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,1021 @@
-+/******************************************************************************
-+ * evtchn.c
-+ *
-+ * Communication via Xen event channels.
-+ *
-+ * Copyright (c) 2002-2005, K A Fraser
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#include <linux/module.h>
-+#include <linux/irq.h>
-+#include <linux/interrupt.h>
-+#include <linux/sched.h>
-+#include <linux/kernel_stat.h>
-+#include <linux/version.h>
-+#include <asm/atomic.h>
-+#include <asm/system.h>
-+#include <asm/ptrace.h>
-+#include <asm/synch_bitops.h>
-+#include <xen/evtchn.h>
-+#include <xen/interface/event_channel.h>
-+#include <xen/interface/physdev.h>
-+#include <asm/hypervisor.h>
-+#include <linux/mc146818rtc.h> /* RTC_IRQ */
-+
-+/*
-+ * This lock protects updates to the following mapping and reference-count
-+ * arrays. The lock does not need to be acquired to read the mapping tables.
-+ */
-+static DEFINE_SPINLOCK(irq_mapping_update_lock);
-+
-+/* IRQ <-> event-channel mappings. */
-+static int evtchn_to_irq[NR_EVENT_CHANNELS] = {
-+ [0 ... NR_EVENT_CHANNELS-1] = -1 };
-+
-+/* Packed IRQ information: binding type, sub-type index, and event channel. */
-+static u32 irq_info[NR_IRQS];
-+
-+/* Binding types. */
-+enum {
-+ IRQT_UNBOUND,
-+ IRQT_PIRQ,
-+ IRQT_VIRQ,
-+ IRQT_IPI,
-+ IRQT_LOCAL_PORT,
-+ IRQT_CALLER_PORT
-+};
-+
-+/* Constructor for packed IRQ information. */
-+static inline u32 mk_irq_info(u32 type, u32 index, u32 evtchn)
-+{
-+ return ((type << 24) | (index << 16) | evtchn);
-+}
-+
-+/* Convenient shorthand for packed representation of an unbound IRQ. */
-+#define IRQ_UNBOUND mk_irq_info(IRQT_UNBOUND, 0, 0)
-+
-+/*
-+ * Accessors for packed IRQ information.
-+ */
-+
-+static inline unsigned int evtchn_from_irq(int irq)
-+{
-+ return (u16)(irq_info[irq]);
-+}
-+
-+static inline unsigned int index_from_irq(int irq)
-+{
-+ return (u8)(irq_info[irq] >> 16);
-+}
-+
-+static inline unsigned int type_from_irq(int irq)
-+{
-+ return (u8)(irq_info[irq] >> 24);
-+}
-+
-+/* IRQ <-> VIRQ mapping. */
-+DEFINE_PER_CPU(int, virq_to_irq[NR_VIRQS]) = {[0 ... NR_VIRQS-1] = -1};
-+
-+/* IRQ <-> IPI mapping. */
-+#ifndef NR_IPIS
-+#define NR_IPIS 1
-+#endif
-+DEFINE_PER_CPU(int, ipi_to_irq[NR_IPIS]) = {[0 ... NR_IPIS-1] = -1};
-+
-+/* Reference counts for bindings to IRQs. */
-+static int irq_bindcount[NR_IRQS];
-+
-+/* Bitmap indicating which PIRQs require Xen to be notified on unmask. */
-+static DECLARE_BITMAP(pirq_needs_eoi, NR_PIRQS);
-+
-+#ifdef CONFIG_SMP
-+
-+static u8 cpu_evtchn[NR_EVENT_CHANNELS];
-+static unsigned long cpu_evtchn_mask[NR_CPUS][NR_EVENT_CHANNELS/BITS_PER_LONG];
-+
-+static inline unsigned long active_evtchns(unsigned int cpu, shared_info_t *sh,
-+ unsigned int idx)
-+{
-+ return (sh->evtchn_pending[idx] &
-+ cpu_evtchn_mask[cpu][idx] &
-+ ~sh->evtchn_mask[idx]);
-+}
-+
-+static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
-+{
-+ int irq = evtchn_to_irq[chn];
-+
-+ BUG_ON(irq == -1);
-+ irq_desc[irq].affinity = cpumask_of_cpu(cpu);
-+
-+ clear_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu_evtchn[chn]]);
-+ set_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu]);
-+ cpu_evtchn[chn] = cpu;
-+}
-+
-+static void init_evtchn_cpu_bindings(void)
-+{
-+ int i;
-+
-+ /* By default all event channels notify CPU#0. */
-+ for (i = 0; i < NR_IRQS; i++)
-+ irq_desc[i].affinity = cpumask_of_cpu(0);
-+
-+ memset(cpu_evtchn, 0, sizeof(cpu_evtchn));
-+ memset(cpu_evtchn_mask[0], ~0, sizeof(cpu_evtchn_mask[0]));
-+}
-+
-+static inline unsigned int cpu_from_evtchn(unsigned int evtchn)
-+{
-+ return cpu_evtchn[evtchn];
-+}
-+
-+void mask_evtchn_local(void)
-+{
-+ unsigned i, cpu = smp_processor_id();
-+ shared_info_t *s = HYPERVISOR_shared_info;
-+
-+ for (i = 0; i < NR_EVENT_CHANNELS; ++i)
-+ if (cpu_evtchn[i] == cpu)
-+ synch_set_bit(i, &s->evtchn_mask[0]);
-+}
-+
-+#else
-+
-+static inline unsigned long active_evtchns(unsigned int cpu, shared_info_t *sh,
-+ unsigned int idx)
-+{
-+ return (sh->evtchn_pending[idx] & ~sh->evtchn_mask[idx]);
-+}
-+
-+static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
-+{
-+}
-+
-+static void init_evtchn_cpu_bindings(void)
-+{
-+}
-+
-+static inline unsigned int cpu_from_evtchn(unsigned int evtchn)
-+{
-+ return 0;
-+}
-+
-+#endif
-+
-+/* Upcall to generic IRQ layer. */
-+#ifdef CONFIG_X86
-+extern fastcall unsigned int do_IRQ(struct pt_regs *regs);
-+void __init xen_init_IRQ(void);
-+void __init init_IRQ(void)
-+{
-+ irq_ctx_init(0);
-+ xen_init_IRQ();
-+}
-+#if defined (__i386__)
-+static inline void exit_idle(void) {}
-+#define IRQ_REG orig_eax
-+#elif defined (__x86_64__)
-+#include <asm/idle.h>
-+#define IRQ_REG orig_rax
-+#endif
-+#define do_IRQ(irq, regs) do { \
-+ (regs)->IRQ_REG = ~(irq); \
-+ do_IRQ((regs)); \
-+} while (0)
-+#endif
-+
-+/* Xen will never allocate port zero for any purpose. */
-+#define VALID_EVTCHN(chn) ((chn) != 0)
-+
-+/*
-+ * Force a proper event-channel callback from Xen after clearing the
-+ * callback mask. We do this in a very simple manner, by making a call
-+ * down into Xen. The pending flag will be checked by Xen on return.
-+ */
-+void force_evtchn_callback(void)
-+{
-+ (void)HYPERVISOR_xen_version(0, NULL);
-+}
-+/* Not a GPL symbol: used in ubiquitous macros, so too restrictive. */
-+EXPORT_SYMBOL(force_evtchn_callback);
-+
-+static DEFINE_PER_CPU(unsigned int, upcall_count) = { 0 };
-+
-+/* NB. Interrupts are disabled on entry. */
-+asmlinkage void evtchn_do_upcall(struct pt_regs *regs)
-+{
-+ unsigned long l1, l2;
-+ unsigned int l1i, l2i, port, count;
-+ int irq, cpu = smp_processor_id();
-+ shared_info_t *s = HYPERVISOR_shared_info;
-+ vcpu_info_t *vcpu_info = &s->vcpu_info[cpu];
-+
-+ do {
-+ /* Avoid a callback storm when we reenable delivery. */
-+ vcpu_info->evtchn_upcall_pending = 0;
-+
-+ /* Nested invocations bail immediately. */
-+ if (unlikely(per_cpu(upcall_count, cpu)++))
-+ return;
-+
-+#ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
-+ /* Clear master flag /before/ clearing selector flag. */
-+ rmb();
-+#endif
-+ l1 = xchg(&vcpu_info->evtchn_pending_sel, 0);
-+ while (l1 != 0) {
-+ l1i = __ffs(l1);
-+ l1 &= ~(1UL << l1i);
-+
-+ while ((l2 = active_evtchns(cpu, s, l1i)) != 0) {
-+ l2i = __ffs(l2);
-+
-+ port = (l1i * BITS_PER_LONG) + l2i;
-+ if ((irq = evtchn_to_irq[port]) != -1)
-+ do_IRQ(irq, regs);
-+ else {
-+ exit_idle();
-+ evtchn_device_upcall(port);
-+ }
-+ }
-+ }
-+
-+ /* If there were nested callbacks then we have more to do. */
-+ count = per_cpu(upcall_count, cpu);
-+ per_cpu(upcall_count, cpu) = 0;
-+ } while (unlikely(count != 1));
-+}
-+
-+static int find_unbound_irq(void)
-+{
-+ static int warned;
-+ int dynirq, irq;
-+
-+ for (dynirq = 0; dynirq < NR_DYNIRQS; dynirq++) {
-+ irq = dynirq_to_irq(dynirq);
-+ if (irq_bindcount[irq] == 0)
-+ return irq;
-+ }
-+
-+ if (!warned) {
-+ warned = 1;
-+ printk(KERN_WARNING "No available IRQ to bind to: "
-+ "increase NR_DYNIRQS.\n");
-+ }
-+
-+ return -ENOSPC;
-+}
-+
-+static int bind_caller_port_to_irq(unsigned int caller_port)
-+{
-+ int irq;
-+
-+ spin_lock(&irq_mapping_update_lock);
-+
-+ if ((irq = evtchn_to_irq[caller_port]) == -1) {
-+ if ((irq = find_unbound_irq()) < 0)
-+ goto out;
-+
-+ evtchn_to_irq[caller_port] = irq;
-+ irq_info[irq] = mk_irq_info(IRQT_CALLER_PORT, 0, caller_port);
-+ }
-+
-+ irq_bindcount[irq]++;
-+
-+ out:
-+ spin_unlock(&irq_mapping_update_lock);
-+ return irq;
-+}
-+
-+static int bind_local_port_to_irq(unsigned int local_port)
-+{
-+ int irq;
-+
-+ spin_lock(&irq_mapping_update_lock);
-+
-+ BUG_ON(evtchn_to_irq[local_port] != -1);
-+
-+ if ((irq = find_unbound_irq()) < 0) {
-+ struct evtchn_close close = { .port = local_port };
-+ if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
-+ BUG();
-+ goto out;
-+ }
-+
-+ evtchn_to_irq[local_port] = irq;
-+ irq_info[irq] = mk_irq_info(IRQT_LOCAL_PORT, 0, local_port);
-+ irq_bindcount[irq]++;
-+
-+ out:
-+ spin_unlock(&irq_mapping_update_lock);
-+ return irq;
-+}
-+
-+static int bind_listening_port_to_irq(unsigned int remote_domain)
-+{
-+ struct evtchn_alloc_unbound alloc_unbound;
-+ int err;
-+
-+ alloc_unbound.dom = DOMID_SELF;
-+ alloc_unbound.remote_dom = remote_domain;
-+
-+ err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
-+ &alloc_unbound);
-+
-+ return err ? : bind_local_port_to_irq(alloc_unbound.port);
-+}
-+
-+static int bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
-+ unsigned int remote_port)
-+{
-+ struct evtchn_bind_interdomain bind_interdomain;
-+ int err;
-+
-+ bind_interdomain.remote_dom = remote_domain;
-+ bind_interdomain.remote_port = remote_port;
-+
-+ err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
-+ &bind_interdomain);
-+
-+ return err ? : bind_local_port_to_irq(bind_interdomain.local_port);
-+}
-+
-+static int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
-+{
-+ struct evtchn_bind_virq bind_virq;
-+ int evtchn, irq;
-+
-+ spin_lock(&irq_mapping_update_lock);
-+
-+ if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) {
-+ if ((irq = find_unbound_irq()) < 0)
-+ goto out;
-+
-+ bind_virq.virq = virq;
-+ bind_virq.vcpu = cpu;
-+ if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
-+ &bind_virq) != 0)
-+ BUG();
-+ evtchn = bind_virq.port;
-+
-+ evtchn_to_irq[evtchn] = irq;
-+ irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
-+
-+ per_cpu(virq_to_irq, cpu)[virq] = irq;
-+
-+ bind_evtchn_to_cpu(evtchn, cpu);
-+ }
-+
-+ irq_bindcount[irq]++;
-+
-+ out:
-+ spin_unlock(&irq_mapping_update_lock);
-+ return irq;
-+}
-+
-+static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
-+{
-+ struct evtchn_bind_ipi bind_ipi;
-+ int evtchn, irq;
-+
-+ spin_lock(&irq_mapping_update_lock);
-+
-+ if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) {
-+ if ((irq = find_unbound_irq()) < 0)
-+ goto out;
-+
-+ bind_ipi.vcpu = cpu;
-+ if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
-+ &bind_ipi) != 0)
-+ BUG();
-+ evtchn = bind_ipi.port;
-+
-+ evtchn_to_irq[evtchn] = irq;
-+ irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
-+
-+ per_cpu(ipi_to_irq, cpu)[ipi] = irq;
-+
-+ bind_evtchn_to_cpu(evtchn, cpu);
-+ }
-+
-+ irq_bindcount[irq]++;
-+
-+ out:
-+ spin_unlock(&irq_mapping_update_lock);
-+ return irq;
-+}
-+
-+static void unbind_from_irq(unsigned int irq)
-+{
-+ struct evtchn_close close;
-+ int cpu, evtchn = evtchn_from_irq(irq);
-+
-+ spin_lock(&irq_mapping_update_lock);
-+
-+ if ((--irq_bindcount[irq] == 0) && VALID_EVTCHN(evtchn)) {
-+ close.port = evtchn;
-+ if ((type_from_irq(irq) != IRQT_CALLER_PORT) &&
-+ HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
-+ BUG();
-+
-+ switch (type_from_irq(irq)) {
-+ case IRQT_VIRQ:
-+ per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
-+ [index_from_irq(irq)] = -1;
-+ break;
-+ case IRQT_IPI:
-+ per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))
-+ [index_from_irq(irq)] = -1;
-+ break;
-+ default:
-+ break;
-+ }
-+
-+ /* Closed ports are implicitly re-bound to VCPU0. */
-+ bind_evtchn_to_cpu(evtchn, 0);
-+
-+ evtchn_to_irq[evtchn] = -1;
-+ irq_info[irq] = IRQ_UNBOUND;
-+
-+ /* Zap stats across IRQ changes of use. */
-+ for_each_possible_cpu(cpu)
-+ kstat_cpu(cpu).irqs[irq] = 0;
-+ }
-+
-+ spin_unlock(&irq_mapping_update_lock);
-+}
-+
-+int bind_caller_port_to_irqhandler(
-+ unsigned int caller_port,
-+ irq_handler_t handler,
-+ unsigned long irqflags,
-+ const char *devname,
-+ void *dev_id)
-+{
-+ int irq, retval;
-+
-+ irq = bind_caller_port_to_irq(caller_port);
-+ if (irq < 0)
-+ return irq;
-+
-+ retval = request_irq(irq, handler, irqflags, devname, dev_id);
-+ if (retval != 0) {
-+ unbind_from_irq(irq);
-+ return retval;
-+ }
-+
-+ return irq;
-+}
-+EXPORT_SYMBOL_GPL(bind_caller_port_to_irqhandler);
-+
-+int bind_listening_port_to_irqhandler(
-+ unsigned int remote_domain,
-+ irq_handler_t handler,
-+ unsigned long irqflags,
-+ const char *devname,
-+ void *dev_id)
-+{
-+ int irq, retval;
-+
-+ irq = bind_listening_port_to_irq(remote_domain);
-+ if (irq < 0)
-+ return irq;
-+
-+ retval = request_irq(irq, handler, irqflags, devname, dev_id);
-+ if (retval != 0) {
-+ unbind_from_irq(irq);
-+ return retval;
-+ }
-+
-+ return irq;
-+}
-+EXPORT_SYMBOL_GPL(bind_listening_port_to_irqhandler);
-+
-+int bind_interdomain_evtchn_to_irqhandler(
-+ unsigned int remote_domain,
-+ unsigned int remote_port,
-+ irq_handler_t handler,
-+ unsigned long irqflags,
-+ const char *devname,
-+ void *dev_id)
-+{
-+ int irq, retval;
-+
-+ irq = bind_interdomain_evtchn_to_irq(remote_domain, remote_port);
-+ if (irq < 0)
-+ return irq;
-+
-+ retval = request_irq(irq, handler, irqflags, devname, dev_id);
-+ if (retval != 0) {
-+ unbind_from_irq(irq);
-+ return retval;
-+ }
-+
-+ return irq;
-+}
-+EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irqhandler);
-+
-+int bind_virq_to_irqhandler(
-+ unsigned int virq,
-+ unsigned int cpu,
-+ irq_handler_t handler,
-+ unsigned long irqflags,
-+ const char *devname,
-+ void *dev_id)
-+{
-+ int irq, retval;
-+
-+ irq = bind_virq_to_irq(virq, cpu);
-+ if (irq < 0)
-+ return irq;
-+
-+ retval = request_irq(irq, handler, irqflags, devname, dev_id);
-+ if (retval != 0) {
-+ unbind_from_irq(irq);
-+ return retval;
-+ }
-+
-+ return irq;
-+}
-+EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
-+
-+int bind_ipi_to_irqhandler(
-+ unsigned int ipi,
-+ unsigned int cpu,
-+ irq_handler_t handler,
-+ unsigned long irqflags,
-+ const char *devname,
-+ void *dev_id)
-+{
-+ int irq, retval;
-+
-+ irq = bind_ipi_to_irq(ipi, cpu);
-+ if (irq < 0)
-+ return irq;
-+
-+ retval = request_irq(irq, handler, irqflags, devname, dev_id);
-+ if (retval != 0) {
-+ unbind_from_irq(irq);
-+ return retval;
-+ }
-+
-+ return irq;
-+}
-+EXPORT_SYMBOL_GPL(bind_ipi_to_irqhandler);
-+
-+void unbind_from_irqhandler(unsigned int irq, void *dev_id)
-+{
-+ free_irq(irq, dev_id);
-+ unbind_from_irq(irq);
-+}
-+EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
-+
-+#ifdef CONFIG_SMP
-+/* Rebind an evtchn so that it gets delivered to a specific cpu */
-+static void rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
-+{
-+ struct evtchn_bind_vcpu bind_vcpu;
-+ int evtchn = evtchn_from_irq(irq);
-+
-+ if (!VALID_EVTCHN(evtchn))
-+ return;
-+
-+ /* Send future instances of this interrupt to other vcpu. */
-+ bind_vcpu.port = evtchn;
-+ bind_vcpu.vcpu = tcpu;
-+
-+ /*
-+ * If this fails, it usually just indicates that we're dealing with a
-+ * virq or IPI channel, which don't actually need to be rebound. Ignore
-+ * it, but don't do the xenlinux-level rebind in that case.
-+ */
-+ if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
-+ bind_evtchn_to_cpu(evtchn, tcpu);
-+}
-+
-+static void set_affinity_irq(unsigned irq, cpumask_t dest)
-+{
-+ unsigned tcpu = first_cpu(dest);
-+ rebind_irq_to_cpu(irq, tcpu);
-+}
-+#endif
-+
-+int resend_irq_on_evtchn(unsigned int irq)
-+{
-+ int masked, evtchn = evtchn_from_irq(irq);
-+ shared_info_t *s = HYPERVISOR_shared_info;
-+
-+ if (!VALID_EVTCHN(evtchn))
-+ return 1;
-+
-+ masked = synch_test_and_set_bit(evtchn, s->evtchn_mask);
-+ synch_set_bit(evtchn, s->evtchn_pending);
-+ if (!masked)
-+ unmask_evtchn(evtchn);
-+
-+ return 1;
-+}
-+
-+/*
-+ * Interface to generic handling in irq.c
-+ */
-+
-+static unsigned int startup_dynirq(unsigned int irq)
-+{
-+ int evtchn = evtchn_from_irq(irq);
-+
-+ if (VALID_EVTCHN(evtchn))
-+ unmask_evtchn(evtchn);
-+ return 0;
-+}
-+
-+static void unmask_dynirq(unsigned int irq)
-+{
-+ int evtchn = evtchn_from_irq(irq);
-+
-+ if (VALID_EVTCHN(evtchn))
-+ unmask_evtchn(evtchn);
-+}
-+
-+static void mask_dynirq(unsigned int irq)
-+{
-+ int evtchn = evtchn_from_irq(irq);
-+
-+ if (VALID_EVTCHN(evtchn))
-+ mask_evtchn(evtchn);
-+}
-+
-+static void ack_dynirq(unsigned int irq)
-+{
-+ int evtchn = evtchn_from_irq(irq);
-+
-+ move_native_irq(irq);
-+
-+ if (VALID_EVTCHN(evtchn)) {
-+ mask_evtchn(evtchn);
-+ clear_evtchn(evtchn);
-+ }
-+}
-+
-+static void end_dynirq(unsigned int irq)
-+{
-+ int evtchn = evtchn_from_irq(irq);
-+
-+ if (VALID_EVTCHN(evtchn) && !(irq_desc[irq].status & IRQ_DISABLED))
-+ unmask_evtchn(evtchn);
-+}
-+
-+static struct irq_chip dynirq_chip = {
-+ .name = "Dynamic-irq",
-+ .startup = startup_dynirq,
-+ .mask = mask_dynirq,
-+ .unmask = unmask_dynirq,
-+ .mask_ack = ack_dynirq,
-+ .ack = ack_dynirq,
-+ .end = end_dynirq,
-+#ifdef CONFIG_SMP
-+ .set_affinity = set_affinity_irq,
-+#endif
-+ .retrigger = resend_irq_on_evtchn,
-+};
-+
-+static inline void pirq_unmask_notify(int pirq)
-+{
-+ struct physdev_eoi eoi = { .irq = pirq };
-+ if (unlikely(test_bit(pirq, pirq_needs_eoi)))
-+ (void)HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
-+}
-+
-+static inline void pirq_query_unmask(int pirq)
-+{
-+ struct physdev_irq_status_query irq_status;
-+ irq_status.irq = pirq;
-+ (void)HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status);
-+ clear_bit(pirq, pirq_needs_eoi);
-+ if (irq_status.flags & XENIRQSTAT_needs_eoi)
-+ set_bit(pirq, pirq_needs_eoi);
-+}
-+
-+/*
-+ * On startup, if there is no action associated with the IRQ then we are
-+ * probing. In this case we should not share with others as it will confuse us.
-+ */
-+#define probing_irq(_irq) (irq_desc[(_irq)].action == NULL)
-+
-+static unsigned int startup_pirq(unsigned int irq)
-+{
-+ struct evtchn_bind_pirq bind_pirq;
-+ int evtchn = evtchn_from_irq(irq);
-+
-+ if (VALID_EVTCHN(evtchn))
-+ goto out;
-+
-+ bind_pirq.pirq = irq;
-+ /* NB. We are happy to share unless we are probing. */
-+ bind_pirq.flags = probing_irq(irq) ? 0 : BIND_PIRQ__WILL_SHARE;
-+ if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq) != 0) {
-+ if (!probing_irq(irq))
-+ printk(KERN_INFO "Failed to obtain physical IRQ %d\n",
-+ irq);
-+ return 0;
-+ }
-+ evtchn = bind_pirq.port;
-+
-+ pirq_query_unmask(irq_to_pirq(irq));
-+
-+ evtchn_to_irq[evtchn] = irq;
-+ bind_evtchn_to_cpu(evtchn, 0);
-+ irq_info[irq] = mk_irq_info(IRQT_PIRQ, irq, evtchn);
-+
-+ out:
-+ unmask_evtchn(evtchn);
-+ pirq_unmask_notify(irq_to_pirq(irq));
-+
-+ return 0;
-+}
-+
-+static void shutdown_pirq(unsigned int irq)
-+{
-+ struct evtchn_close close;
-+ int evtchn = evtchn_from_irq(irq);
-+
-+ if (!VALID_EVTCHN(evtchn))
-+ return;
-+
-+ mask_evtchn(evtchn);
-+
-+ close.port = evtchn;
-+ if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
-+ BUG();
-+
-+ bind_evtchn_to_cpu(evtchn, 0);
-+ evtchn_to_irq[evtchn] = -1;
-+ irq_info[irq] = IRQ_UNBOUND;
-+}
-+
-+static void unmask_pirq(unsigned int irq)
-+{
-+ int evtchn = evtchn_from_irq(irq);
-+
-+ if (VALID_EVTCHN(evtchn)) {
-+ unmask_evtchn(evtchn);
-+ pirq_unmask_notify(irq_to_pirq(irq));
-+ }
-+}
-+
-+static void mask_pirq(unsigned int irq)
-+{
-+ int evtchn = evtchn_from_irq(irq);
-+
-+ if (VALID_EVTCHN(evtchn))
-+ mask_evtchn(evtchn);
-+}
-+
-+static void ack_pirq(unsigned int irq)
-+{
-+ int evtchn = evtchn_from_irq(irq);
-+
-+ move_native_irq(irq);
-+
-+ if (VALID_EVTCHN(evtchn)) {
-+ mask_evtchn(evtchn);
-+ clear_evtchn(evtchn);
-+ }
-+}
-+
-+static void end_pirq(unsigned int irq)
-+{
-+ int evtchn = evtchn_from_irq(irq);
-+
-+ if (VALID_EVTCHN(evtchn) && !(irq_desc[irq].status & IRQ_DISABLED)) {
-+ unmask_evtchn(evtchn);
-+ pirq_unmask_notify(irq_to_pirq(irq));
-+ }
-+}
-+
-+static struct irq_chip pirq_chip = {
-+ .name = "Phys-irq",
-+ .typename = "Phys-irq",
-+ .startup = startup_pirq,
-+ .shutdown = shutdown_pirq,
-+ .mask = mask_pirq,
-+ .unmask = unmask_pirq,
-+ .mask_ack = ack_pirq,
-+ .ack = ack_pirq,
-+ .end = end_pirq,
-+#ifdef CONFIG_SMP
-+ .set_affinity = set_affinity_irq,
-+#endif
-+ .retrigger = resend_irq_on_evtchn,
-+};
-+
-+int irq_ignore_unhandled(unsigned int irq)
-+{
-+ struct physdev_irq_status_query irq_status = { .irq = irq };
-+
-+ if (!is_running_on_xen())
-+ return 0;
-+
-+ (void)HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status);
-+ return !!(irq_status.flags & XENIRQSTAT_shared);
-+}
-+
-+void notify_remote_via_irq(int irq)
-+{
-+ int evtchn = evtchn_from_irq(irq);
-+
-+ if (VALID_EVTCHN(evtchn))
-+ notify_remote_via_evtchn(evtchn);
-+}
-+EXPORT_SYMBOL_GPL(notify_remote_via_irq);
-+
-+int irq_to_evtchn_port(int irq)
-+{
-+ return evtchn_from_irq(irq);
-+}
-+EXPORT_SYMBOL_GPL(irq_to_evtchn_port);
-+
-+void mask_evtchn(int port)
-+{
-+ shared_info_t *s = HYPERVISOR_shared_info;
-+ synch_set_bit(port, s->evtchn_mask);
-+}
-+EXPORT_SYMBOL_GPL(mask_evtchn);
-+
-+void unmask_evtchn(int port)
-+{
-+ shared_info_t *s = HYPERVISOR_shared_info;
-+ unsigned int cpu = smp_processor_id();
-+ vcpu_info_t *vcpu_info = &s->vcpu_info[cpu];
-+
-+ BUG_ON(!irqs_disabled());
-+
-+ /* Slow path (hypercall) if this is a non-local port. */
-+ if (unlikely(cpu != cpu_from_evtchn(port))) {
-+ struct evtchn_unmask unmask = { .port = port };
-+ (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
-+ return;
-+ }
-+
-+ synch_clear_bit(port, s->evtchn_mask);
-+
-+ /* Did we miss an interrupt 'edge'? Re-fire if so. */
-+ if (synch_test_bit(port, s->evtchn_pending) &&
-+ !synch_test_and_set_bit(port / BITS_PER_LONG,
-+ &vcpu_info->evtchn_pending_sel))
-+ vcpu_info->evtchn_upcall_pending = 1;
-+}
-+EXPORT_SYMBOL_GPL(unmask_evtchn);
-+
-+static void restore_cpu_virqs(int cpu)
-+{
-+ struct evtchn_bind_virq bind_virq;
-+ int virq, irq, evtchn;
-+
-+ for (virq = 0; virq < NR_VIRQS; virq++) {
-+ if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
-+ continue;
-+
-+ BUG_ON(irq_info[irq] != mk_irq_info(IRQT_VIRQ, virq, 0));
-+
-+ /* Get a new binding from Xen. */
-+ bind_virq.virq = virq;
-+ bind_virq.vcpu = cpu;
-+ if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
-+ &bind_virq) != 0)
-+ BUG();
-+ evtchn = bind_virq.port;
-+
-+ /* Record the new mapping. */
-+ evtchn_to_irq[evtchn] = irq;
-+ irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
-+ bind_evtchn_to_cpu(evtchn, cpu);
-+
-+ /* Ready for use. */
-+ unmask_evtchn(evtchn);
-+ }
-+}
-+
-+static void restore_cpu_ipis(int cpu)
-+{
-+ struct evtchn_bind_ipi bind_ipi;
-+ int ipi, irq, evtchn;
-+
-+ for (ipi = 0; ipi < NR_IPIS; ipi++) {
-+ if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
-+ continue;
-+
-+ BUG_ON(irq_info[irq] != mk_irq_info(IRQT_IPI, ipi, 0));
-+
-+ /* Get a new binding from Xen. */
-+ bind_ipi.vcpu = cpu;
-+ if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
-+ &bind_ipi) != 0)
-+ BUG();
-+ evtchn = bind_ipi.port;
-+
-+ /* Record the new mapping. */
-+ evtchn_to_irq[evtchn] = irq;
-+ irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
-+ bind_evtchn_to_cpu(evtchn, cpu);
-+
-+ /* Ready for use. */
-+ unmask_evtchn(evtchn);
-+
-+ }
-+}
-+
-+void irq_resume(void)
-+{
-+ int cpu, pirq, irq, evtchn;
-+
-+ init_evtchn_cpu_bindings();
-+
-+ /* New event-channel space is not 'live' yet. */
-+ for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
-+ mask_evtchn(evtchn);
-+
-+ /* Check that no PIRQs are still bound. */
-+ for (pirq = 0; pirq < NR_PIRQS; pirq++)
-+ BUG_ON(irq_info[pirq_to_irq(pirq)] != IRQ_UNBOUND);
-+
-+ /* No IRQ <-> event-channel mappings. */
-+ for (irq = 0; irq < NR_IRQS; irq++)
-+ irq_info[irq] &= ~0xFFFF; /* zap event-channel binding */
-+ for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
-+ evtchn_to_irq[evtchn] = -1;
-+
-+ for_each_possible_cpu(cpu) {
-+ restore_cpu_virqs(cpu);
-+ restore_cpu_ipis(cpu);
-+ }
-+
-+}
-+
-+void __init xen_init_IRQ(void)
-+{
-+ int i;
-+
-+ init_evtchn_cpu_bindings();
-+
-+ /* No event channels are 'live' right now. */
-+ for (i = 0; i < NR_EVENT_CHANNELS; i++)
-+ mask_evtchn(i);
-+
-+ /* No IRQ -> event-channel mappings. */
-+ for (i = 0; i < NR_IRQS; i++)
-+ irq_info[i] = IRQ_UNBOUND;
-+
-+ /* Dynamic IRQ space is currently unbound. Zero the refcnts. */
-+ for (i = 0; i < NR_DYNIRQS; i++) {
-+ irq_bindcount[dynirq_to_irq(i)] = 0;
-+
-+ irq_desc[dynirq_to_irq(i)].status = IRQ_DISABLED;
-+ irq_desc[dynirq_to_irq(i)].action = NULL;
-+ irq_desc[dynirq_to_irq(i)].depth = 1;
-+ set_irq_chip_and_handler_name(dynirq_to_irq(i), &dynirq_chip,
-+ handle_level_irq, "level");
-+ }
-+
-+ /* Phys IRQ space is statically bound (1:1 mapping). Nail refcnts. */
-+ for (i = 0; i < NR_PIRQS; i++) {
-+ irq_bindcount[pirq_to_irq(i)] = 1;
-+
-+#ifdef RTC_IRQ
-+ /* If not domain 0, force our RTC driver to fail its probe. */
-+ if ((i == RTC_IRQ) && !is_initial_xendomain())
-+ continue;
-+#endif
-+
-+ irq_desc[pirq_to_irq(i)].status = IRQ_DISABLED;
-+ irq_desc[pirq_to_irq(i)].action = NULL;
-+ irq_desc[pirq_to_irq(i)].depth = 1;
-+ set_irq_chip_and_handler_name(pirq_to_irq(i), &pirq_chip,
-+ handle_level_irq, "level");
-+ }
-+}
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/core/features.c ubuntu-gutsy-xen/drivers/xen/core/features.c
---- ubuntu-gutsy/drivers/xen/core/features.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/core/features.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,34 @@
-+/******************************************************************************
-+ * features.c
-+ *
-+ * Xen feature flags.
-+ *
-+ * Copyright (c) 2006, Ian Campbell, XenSource Inc.
-+ */
-+#include <linux/types.h>
-+#include <linux/cache.h>
-+#include <linux/module.h>
-+#include <asm/hypervisor.h>
-+#include <xen/features.h>
-+
-+#ifdef HAVE_XEN_PLATFORM_COMPAT_H
-+#include <xen/platform-compat.h>
-+#endif
-+
-+u8 xen_features[XENFEAT_NR_SUBMAPS * 32] __read_mostly;
-+/* Not a GPL symbol: used in ubiquitous macros, so too restrictive. */
-+EXPORT_SYMBOL(xen_features);
-+
-+void setup_xen_features(void)
-+{
-+ xen_feature_info_t fi;
-+ int i, j;
-+
-+ for (i = 0; i < XENFEAT_NR_SUBMAPS; i++) {
-+ fi.submap_idx = i;
-+ if (HYPERVISOR_xen_version(XENVER_get_features, &fi) < 0)
-+ break;
-+ for (j=0; j<32; j++)
-+ xen_features[i*32+j] = !!(fi.submap & 1<<j);
-+ }
-+}
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/core/firmware.c ubuntu-gutsy-xen/drivers/xen/core/firmware.c
---- ubuntu-gutsy/drivers/xen/core/firmware.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/core/firmware.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,74 @@
-+#include <linux/kernel.h>
-+#include <linux/errno.h>
-+#include <linux/init.h>
-+#include <linux/edd.h>
-+#include <video/edid.h>
-+#include <xen/interface/platform.h>
-+#include <asm/hypervisor.h>
-+
-+#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
-+void __init copy_edd(void)
-+{
-+ int ret;
-+ struct xen_platform_op op;
-+
-+ if (!is_initial_xendomain())
-+ return;
-+
-+ op.cmd = XENPF_firmware_info;
-+
-+ op.u.firmware_info.type = XEN_FW_DISK_INFO;
-+ for (op.u.firmware_info.index = 0;
-+ edd.edd_info_nr < EDDMAXNR;
-+ op.u.firmware_info.index++) {
-+ struct edd_info *info = edd.edd_info + edd.edd_info_nr;
-+
-+ info->params.length = sizeof(info->params);
-+ set_xen_guest_handle(op.u.firmware_info.u.disk_info.edd_params,
-+ &info->params);
-+ ret = HYPERVISOR_platform_op(&op);
-+ if (ret)
-+ break;
-+
-+#define C(x) info->x = op.u.firmware_info.u.disk_info.x
-+ C(device);
-+ C(version);
-+ C(interface_support);
-+ C(legacy_max_cylinder);
-+ C(legacy_max_head);
-+ C(legacy_sectors_per_track);
-+#undef C
-+
-+ edd.edd_info_nr++;
-+ }
-+
-+ op.u.firmware_info.type = XEN_FW_DISK_MBR_SIGNATURE;
-+ for (op.u.firmware_info.index = 0;
-+ edd.mbr_signature_nr < EDD_MBR_SIG_MAX;
-+ op.u.firmware_info.index++) {
-+ ret = HYPERVISOR_platform_op(&op);
-+ if (ret)
-+ break;
-+ edd.mbr_signature[edd.mbr_signature_nr++] =
-+ op.u.firmware_info.u.disk_mbr_signature.mbr_signature;
-+ }
-+}
-+#endif
-+
-+void __init copy_edid(void)
-+{
-+#if defined(CONFIG_FIRMWARE_EDID) && defined(CONFIG_X86)
-+ struct xen_platform_op op;
-+
-+ if (!is_initial_xendomain())
-+ return;
-+
-+ op.cmd = XENPF_firmware_info;
-+ op.u.firmware_info.index = 0;
-+ op.u.firmware_info.type = XEN_FW_VBEDDC_INFO;
-+ set_xen_guest_handle(op.u.firmware_info.u.vbeddc_info.edid,
-+ edid_info.dummy);
-+ if (HYPERVISOR_platform_op(&op) != 0)
-+ memset(edid_info.dummy, 0x13, sizeof(edid_info.dummy));
-+#endif
-+}
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/core/gnttab.c ubuntu-gutsy-xen/drivers/xen/core/gnttab.c
---- ubuntu-gutsy/drivers/xen/core/gnttab.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/core/gnttab.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,755 @@
-+/******************************************************************************
-+ * gnttab.c
-+ *
-+ * Granting foreign access to our memory reservation.
-+ *
-+ * Copyright (c) 2005-2006, Christopher Clark
-+ * Copyright (c) 2004-2005, K A Fraser
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#include <linux/module.h>
-+#include <linux/sched.h>
-+#include <linux/mm.h>
-+#include <linux/seqlock.h>
-+#include <xen/interface/xen.h>
-+#include <xen/gnttab.h>
-+#include <asm/pgtable.h>
-+#include <asm/uaccess.h>
-+#include <asm/synch_bitops.h>
-+#include <asm/io.h>
-+#include <xen/interface/memory.h>
-+#include <xen/driver_util.h>
-+#include <asm/gnttab_dma.h>
-+
-+#ifdef HAVE_XEN_PLATFORM_COMPAT_H
-+#include <xen/platform-compat.h>
-+#endif
-+
-+/* External tools reserve first few grant table entries. */
-+#define NR_RESERVED_ENTRIES 8
-+#define GNTTAB_LIST_END 0xffffffff
-+#define GREFS_PER_GRANT_FRAME (PAGE_SIZE / sizeof(grant_entry_t))
-+
-+static grant_ref_t **gnttab_list;
-+static unsigned int nr_grant_frames;
-+static unsigned int boot_max_nr_grant_frames;
-+static int gnttab_free_count;
-+static grant_ref_t gnttab_free_head;
-+static DEFINE_SPINLOCK(gnttab_list_lock);
-+
-+static struct grant_entry *shared;
-+
-+static struct gnttab_free_callback *gnttab_free_callback_list;
-+
-+static DEFINE_SEQLOCK(gnttab_dma_lock);
-+
-+static int gnttab_expand(unsigned int req_entries);
-+
-+#define RPP (PAGE_SIZE / sizeof(grant_ref_t))
-+#define gnttab_entry(entry) (gnttab_list[(entry) / RPP][(entry) % RPP])
-+
-+static int get_free_entries(int count)
-+{
-+ unsigned long flags;
-+ int ref, rc;
-+ grant_ref_t head;
-+
-+ spin_lock_irqsave(&gnttab_list_lock, flags);
-+
-+ if ((gnttab_free_count < count) &&
-+ ((rc = gnttab_expand(count - gnttab_free_count)) < 0)) {
-+ spin_unlock_irqrestore(&gnttab_list_lock, flags);
-+ return rc;
-+ }
-+
-+ ref = head = gnttab_free_head;
-+ gnttab_free_count -= count;
-+ while (count-- > 1)
-+ head = gnttab_entry(head);
-+ gnttab_free_head = gnttab_entry(head);
-+ gnttab_entry(head) = GNTTAB_LIST_END;
-+
-+ spin_unlock_irqrestore(&gnttab_list_lock, flags);
-+
-+ return ref;
-+}
-+
-+#define get_free_entry() get_free_entries(1)
-+
-+static void do_free_callbacks(void)
-+{
-+ struct gnttab_free_callback *callback, *next;
-+
-+ callback = gnttab_free_callback_list;
-+ gnttab_free_callback_list = NULL;
-+
-+ while (callback != NULL) {
-+ next = callback->next;
-+ if (gnttab_free_count >= callback->count) {
-+ callback->next = NULL;
-+ callback->fn(callback->arg);
-+ } else {
-+ callback->next = gnttab_free_callback_list;
-+ gnttab_free_callback_list = callback;
-+ }
-+ callback = next;
-+ }
-+}
-+
-+static inline void check_free_callbacks(void)
-+{
-+ if (unlikely(gnttab_free_callback_list))
-+ do_free_callbacks();
-+}
-+
-+static void put_free_entry(grant_ref_t ref)
-+{
-+ unsigned long flags;
-+ spin_lock_irqsave(&gnttab_list_lock, flags);
-+ gnttab_entry(ref) = gnttab_free_head;
-+ gnttab_free_head = ref;
-+ gnttab_free_count++;
-+ check_free_callbacks();
-+ spin_unlock_irqrestore(&gnttab_list_lock, flags);
-+}
-+
-+/*
-+ * Public grant-issuing interface functions
-+ */
-+
-+int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
-+ int readonly)
-+{
-+ int ref;
-+
-+ if (unlikely((ref = get_free_entry()) < 0))
-+ return -ENOSPC;
-+
-+ shared[ref].frame = frame;
-+ shared[ref].domid = domid;
-+ wmb();
-+ shared[ref].flags = GTF_permit_access | (readonly ? GTF_readonly : 0);
-+
-+ return ref;
-+}
-+EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access);
-+
-+void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
-+ unsigned long frame, int readonly)
-+{
-+ shared[ref].frame = frame;
-+ shared[ref].domid = domid;
-+ wmb();
-+ shared[ref].flags = GTF_permit_access | (readonly ? GTF_readonly : 0);
-+}
-+EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_ref);
-+
-+
-+int gnttab_query_foreign_access(grant_ref_t ref)
-+{
-+ u16 nflags;
-+
-+ nflags = shared[ref].flags;
-+
-+ return (nflags & (GTF_reading|GTF_writing));
-+}
-+EXPORT_SYMBOL_GPL(gnttab_query_foreign_access);
-+
-+int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
-+{
-+ u16 flags, nflags;
-+
-+ nflags = shared[ref].flags;
-+ do {
-+ if ((flags = nflags) & (GTF_reading|GTF_writing)) {
-+ printk(KERN_ALERT "WARNING: g.e. still in use!\n");
-+ return 0;
-+ }
-+ } while ((nflags = synch_cmpxchg_subword(&shared[ref].flags, flags, 0)) !=
-+ flags);
-+
-+ return 1;
-+}
-+EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref);
-+
-+void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
-+ unsigned long page)
-+{
-+ if (gnttab_end_foreign_access_ref(ref, readonly)) {
-+ put_free_entry(ref);
-+ if (page != 0)
-+ free_page(page);
-+ } else {
-+ /* XXX This needs to be fixed so that the ref and page are
-+ placed on a list to be freed up later. */
-+ printk(KERN_WARNING
-+ "WARNING: leaking g.e. and page still in use!\n");
-+ }
-+}
-+EXPORT_SYMBOL_GPL(gnttab_end_foreign_access);
-+
-+int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn)
-+{
-+ int ref;
-+
-+ if (unlikely((ref = get_free_entry()) < 0))
-+ return -ENOSPC;
-+ gnttab_grant_foreign_transfer_ref(ref, domid, pfn);
-+
-+ return ref;
-+}
-+EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer);
-+
-+void gnttab_grant_foreign_transfer_ref(grant_ref_t ref, domid_t domid,
-+ unsigned long pfn)
-+{
-+ shared[ref].frame = pfn;
-+ shared[ref].domid = domid;
-+ wmb();
-+ shared[ref].flags = GTF_accept_transfer;
-+}
-+EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer_ref);
-+
-+unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref)
-+{
-+ unsigned long frame;
-+ u16 flags;
-+
-+ /*
-+ * If a transfer is not even yet started, try to reclaim the grant
-+ * reference and return failure (== 0).
-+ */
-+ while (!((flags = shared[ref].flags) & GTF_transfer_committed)) {
-+ if (synch_cmpxchg_subword(&shared[ref].flags, flags, 0) == flags)
-+ return 0;
-+ cpu_relax();
-+ }
-+
-+ /* If a transfer is in progress then wait until it is completed. */
-+ while (!(flags & GTF_transfer_completed)) {
-+ flags = shared[ref].flags;
-+ cpu_relax();
-+ }
-+
-+ /* Read the frame number /after/ reading completion status. */
-+ rmb();
-+ frame = shared[ref].frame;
-+ BUG_ON(frame == 0);
-+
-+ return frame;
-+}
-+EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer_ref);
-+
-+unsigned long gnttab_end_foreign_transfer(grant_ref_t ref)
-+{
-+ unsigned long frame = gnttab_end_foreign_transfer_ref(ref);
-+ put_free_entry(ref);
-+ return frame;
-+}
-+EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer);
-+
-+void gnttab_free_grant_reference(grant_ref_t ref)
-+{
-+ put_free_entry(ref);
-+}
-+EXPORT_SYMBOL_GPL(gnttab_free_grant_reference);
-+
-+void gnttab_free_grant_references(grant_ref_t head)
-+{
-+ grant_ref_t ref;
-+ unsigned long flags;
-+ int count = 1;
-+ if (head == GNTTAB_LIST_END)
-+ return;
-+ spin_lock_irqsave(&gnttab_list_lock, flags);
-+ ref = head;
-+ while (gnttab_entry(ref) != GNTTAB_LIST_END) {
-+ ref = gnttab_entry(ref);
-+ count++;
-+ }
-+ gnttab_entry(ref) = gnttab_free_head;
-+ gnttab_free_head = head;
-+ gnttab_free_count += count;
-+ check_free_callbacks();
-+ spin_unlock_irqrestore(&gnttab_list_lock, flags);
-+}
-+EXPORT_SYMBOL_GPL(gnttab_free_grant_references);
-+
-+int gnttab_alloc_grant_references(u16 count, grant_ref_t *head)
-+{
-+ int h = get_free_entries(count);
-+
-+ if (h < 0)
-+ return -ENOSPC;
-+
-+ *head = h;
-+
-+ return 0;
-+}
-+EXPORT_SYMBOL_GPL(gnttab_alloc_grant_references);
-+
-+int gnttab_empty_grant_references(const grant_ref_t *private_head)
-+{
-+ return (*private_head == GNTTAB_LIST_END);
-+}
-+EXPORT_SYMBOL_GPL(gnttab_empty_grant_references);
-+
-+int gnttab_claim_grant_reference(grant_ref_t *private_head)
-+{
-+ grant_ref_t g = *private_head;
-+ if (unlikely(g == GNTTAB_LIST_END))
-+ return -ENOSPC;
-+ *private_head = gnttab_entry(g);
-+ return g;
-+}
-+EXPORT_SYMBOL_GPL(gnttab_claim_grant_reference);
-+
-+void gnttab_release_grant_reference(grant_ref_t *private_head,
-+ grant_ref_t release)
-+{
-+ gnttab_entry(release) = *private_head;
-+ *private_head = release;
-+}
-+EXPORT_SYMBOL_GPL(gnttab_release_grant_reference);
-+
-+void gnttab_request_free_callback(struct gnttab_free_callback *callback,
-+ void (*fn)(void *), void *arg, u16 count)
-+{
-+ unsigned long flags;
-+ spin_lock_irqsave(&gnttab_list_lock, flags);
-+ if (callback->next)
-+ goto out;
-+ callback->fn = fn;
-+ callback->arg = arg;
-+ callback->count = count;
-+ callback->next = gnttab_free_callback_list;
-+ gnttab_free_callback_list = callback;
-+ check_free_callbacks();
-+out:
-+ spin_unlock_irqrestore(&gnttab_list_lock, flags);
-+}
-+EXPORT_SYMBOL_GPL(gnttab_request_free_callback);
-+
-+void gnttab_cancel_free_callback(struct gnttab_free_callback *callback)
-+{
-+ struct gnttab_free_callback **pcb;
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&gnttab_list_lock, flags);
-+ for (pcb = &gnttab_free_callback_list; *pcb; pcb = &(*pcb)->next) {
-+ if (*pcb == callback) {
-+ *pcb = callback->next;
-+ break;
-+ }
-+ }
-+ spin_unlock_irqrestore(&gnttab_list_lock, flags);
-+}
-+EXPORT_SYMBOL_GPL(gnttab_cancel_free_callback);
-+
-+static int grow_gnttab_list(unsigned int more_frames)
-+{
-+ unsigned int new_nr_grant_frames, extra_entries, i;
-+
-+ new_nr_grant_frames = nr_grant_frames + more_frames;
-+ extra_entries = more_frames * GREFS_PER_GRANT_FRAME;
-+
-+ for (i = nr_grant_frames; i < new_nr_grant_frames; i++)
-+ {
-+ gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC);
-+ if (!gnttab_list[i])
-+ goto grow_nomem;
-+ }
-+
-+
-+ for (i = GREFS_PER_GRANT_FRAME * nr_grant_frames;
-+ i < GREFS_PER_GRANT_FRAME * new_nr_grant_frames - 1; i++)
-+ gnttab_entry(i) = i + 1;
-+
-+ gnttab_entry(i) = gnttab_free_head;
-+ gnttab_free_head = GREFS_PER_GRANT_FRAME * nr_grant_frames;
-+ gnttab_free_count += extra_entries;
-+
-+ nr_grant_frames = new_nr_grant_frames;
-+
-+ check_free_callbacks();
-+
-+ return 0;
-+
-+grow_nomem:
-+ for ( ; i >= nr_grant_frames; i--)
-+ free_page((unsigned long) gnttab_list[i]);
-+ return -ENOMEM;
-+}
-+
-+static unsigned int __max_nr_grant_frames(void)
-+{
-+ struct gnttab_query_size query;
-+ int rc;
-+
-+ query.dom = DOMID_SELF;
-+
-+ rc = HYPERVISOR_grant_table_op(GNTTABOP_query_size, &query, 1);
-+ if ((rc < 0) || (query.status != GNTST_okay))
-+ return 4; /* Legacy max supported number of frames */
-+
-+ return query.max_nr_frames;
-+}
-+
-+static inline unsigned int max_nr_grant_frames(void)
-+{
-+ unsigned int xen_max = __max_nr_grant_frames();
-+
-+ if (xen_max > boot_max_nr_grant_frames)
-+ return boot_max_nr_grant_frames;
-+ return xen_max;
-+}
-+
-+#ifdef CONFIG_XEN
-+
-+#ifndef __ia64__
-+static int map_pte_fn(pte_t *pte, struct page *pmd_page,
-+ unsigned long addr, void *data)
-+{
-+ unsigned long **frames = (unsigned long **)data;
-+
-+ set_pte_at(&init_mm, addr, pte, pfn_pte_ma((*frames)[0], PAGE_KERNEL));
-+ (*frames)++;
-+ return 0;
-+}
-+
-+static int unmap_pte_fn(pte_t *pte, struct page *pmd_page,
-+ unsigned long addr, void *data)
-+{
-+
-+ set_pte_at(&init_mm, addr, pte, __pte(0));
-+ return 0;
-+}
-+#endif
-+
-+static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
-+{
-+ struct gnttab_setup_table setup;
-+ unsigned long *frames;
-+ unsigned int nr_gframes = end_idx + 1;
-+ int rc;
-+
-+ frames = kmalloc(nr_gframes * sizeof(unsigned long), GFP_ATOMIC);
-+ if (!frames)
-+ return -ENOMEM;
-+
-+ setup.dom = DOMID_SELF;
-+ setup.nr_frames = nr_gframes;
-+ set_xen_guest_handle(setup.frame_list, frames);
-+
-+ rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1);
-+ if (rc == -ENOSYS) {
-+ kfree(frames);
-+ return -ENOSYS;
-+ }
-+
-+ BUG_ON(rc || setup.status);
-+
-+#ifndef __ia64__
-+ if (shared == NULL) {
-+ struct vm_struct *area;
-+ area = alloc_vm_area(PAGE_SIZE * max_nr_grant_frames());
-+ BUG_ON(area == NULL);
-+ shared = area->addr;
-+ }
-+ rc = apply_to_page_range(&init_mm, (unsigned long)shared,
-+ PAGE_SIZE * nr_gframes,
-+ map_pte_fn, &frames);
-+ BUG_ON(rc);
-+ frames -= nr_gframes; /* adjust after map_pte_fn() */
-+#else
-+ shared = __va(frames[0] << PAGE_SHIFT);
-+#endif
-+
-+ kfree(frames);
-+
-+ return 0;
-+}
-+
-+static void gnttab_page_free(struct page *page)
-+{
-+ ClearPageForeign(page);
-+ gnttab_reset_grant_page(page);
-+ put_page(page);
-+}
-+
-+/*
-+ * Must not be called with IRQs off. This should only be used on the
-+ * slow path.
-+ *
-+ * Copy a foreign granted page to local memory.
-+ */
-+int gnttab_copy_grant_page(grant_ref_t ref, struct page **pagep)
-+{
-+ struct gnttab_unmap_and_replace unmap;
-+ mmu_update_t mmu;
-+ struct page *page;
-+ struct page *new_page;
-+ void *new_addr;
-+ void *addr;
-+ paddr_t pfn;
-+ maddr_t mfn;
-+ maddr_t new_mfn;
-+ int err;
-+
-+ page = *pagep;
-+ if (!get_page_unless_zero(page))
-+ return -ENOENT;
-+
-+ err = -ENOMEM;
-+ new_page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
-+ if (!new_page)
-+ goto out;
-+
-+ new_addr = page_address(new_page);
-+ addr = page_address(page);
-+ memcpy(new_addr, addr, PAGE_SIZE);
-+
-+ pfn = page_to_pfn(page);
-+ mfn = pfn_to_mfn(pfn);
-+ new_mfn = virt_to_mfn(new_addr);
-+
-+ write_seqlock(&gnttab_dma_lock);
-+
-+ /* Make seq visible before checking page_mapped. */
-+ smp_mb();
-+
-+ /* Has the page been DMA-mapped? */
-+ if (unlikely(page_mapped(page))) {
-+ write_sequnlock(&gnttab_dma_lock);
-+ put_page(new_page);
-+ err = -EBUSY;
-+ goto out;
-+ }
-+
-+ if (!xen_feature(XENFEAT_auto_translated_physmap))
-+ set_phys_to_machine(pfn, new_mfn);
-+
-+ gnttab_set_replace_op(&unmap, (unsigned long)addr,
-+ (unsigned long)new_addr, ref);
-+
-+ err = HYPERVISOR_grant_table_op(GNTTABOP_unmap_and_replace,
-+ &unmap, 1);
-+ BUG_ON(err);
-+ BUG_ON(unmap.status);
-+
-+ write_sequnlock(&gnttab_dma_lock);
-+
-+ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
-+ set_phys_to_machine(page_to_pfn(new_page), INVALID_P2M_ENTRY);
-+
-+ mmu.ptr = (new_mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
-+ mmu.val = pfn;
-+ err = HYPERVISOR_mmu_update(&mmu, 1, NULL, DOMID_SELF);
-+ BUG_ON(err);
-+ }
-+
-+ new_page->mapping = page->mapping;
-+ new_page->index = page->index;
-+ set_bit(PG_foreign, &new_page->flags);
-+ *pagep = new_page;
-+
-+ SetPageForeign(page, gnttab_page_free);
-+ page->mapping = NULL;
-+
-+out:
-+ put_page(page);
-+ return err;
-+}
-+EXPORT_SYMBOL(gnttab_copy_grant_page);
-+
-+/*
-+ * Keep track of foreign pages marked as PageForeign so that we don't
-+ * return them to the remote domain prematurely.
-+ *
-+ * PageForeign pages are pinned down by increasing their mapcount.
-+ *
-+ * All other pages are simply returned as is.
-+ */
-+void __gnttab_dma_map_page(struct page *page)
-+{
-+ unsigned int seq;
-+
-+ if (!is_running_on_xen() || !PageForeign(page))
-+ return;
-+
-+ do {
-+ seq = read_seqbegin(&gnttab_dma_lock);
-+
-+ if (gnttab_dma_local_pfn(page))
-+ break;
-+
-+ atomic_set(&page->_mapcount, 0);
-+
-+ /* Make _mapcount visible before read_seqretry. */
-+ smp_mb();
-+ } while (unlikely(read_seqretry(&gnttab_dma_lock, seq)));
-+}
-+
-+int gnttab_resume(void)
-+{
-+ if (max_nr_grant_frames() < nr_grant_frames)
-+ return -ENOSYS;
-+ return gnttab_map(0, nr_grant_frames - 1);
-+}
-+
-+int gnttab_suspend(void)
-+{
-+#ifndef __ia64__
-+ apply_to_page_range(&init_mm, (unsigned long)shared,
-+ PAGE_SIZE * nr_grant_frames,
-+ unmap_pte_fn, NULL);
-+#endif
-+ return 0;
-+}
-+
-+#else /* !CONFIG_XEN */
-+
-+#include <platform-pci.h>
-+
-+static unsigned long resume_frames;
-+
-+static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
-+{
-+ struct xen_add_to_physmap xatp;
-+ unsigned int i = end_idx;
-+
-+ /* Loop backwards, so that the first hypercall has the largest index,
-+ * ensuring that the table will grow only once.
-+ */
-+ do {
-+ xatp.domid = DOMID_SELF;
-+ xatp.idx = i;
-+ xatp.space = XENMAPSPACE_grant_table;
-+ xatp.gpfn = (resume_frames >> PAGE_SHIFT) + i;
-+ if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))
-+ BUG();
-+ } while (i-- > start_idx);
-+
-+ return 0;
-+}
-+
-+int gnttab_resume(void)
-+{
-+ unsigned int max_nr_gframes, nr_gframes;
-+
-+ nr_gframes = nr_grant_frames;
-+ max_nr_gframes = max_nr_grant_frames();
-+ if (max_nr_gframes < nr_gframes)
-+ return -ENOSYS;
-+
-+ if (!resume_frames) {
-+ resume_frames = alloc_xen_mmio(PAGE_SIZE * max_nr_gframes);
-+ shared = ioremap(resume_frames, PAGE_SIZE * max_nr_gframes);
-+ if (shared == NULL) {
-+ printk("error to ioremap gnttab share frames\n");
-+ return -1;
-+ }
-+ }
-+
-+ gnttab_map(0, nr_gframes - 1);
-+
-+ return 0;
-+}
-+
-+#endif /* !CONFIG_XEN */
-+
-+static int gnttab_expand(unsigned int req_entries)
-+{
-+ int rc;
-+ unsigned int cur, extra;
-+
-+ cur = nr_grant_frames;
-+ extra = ((req_entries + (GREFS_PER_GRANT_FRAME-1)) /
-+ GREFS_PER_GRANT_FRAME);
-+ if (cur + extra > max_nr_grant_frames())
-+ return -ENOSPC;
-+
-+ if ((rc = gnttab_map(cur, cur + extra - 1)) == 0)
-+ rc = grow_gnttab_list(extra);
-+
-+ return rc;
-+}
-+
-+int __devinit gnttab_init(void)
-+{
-+ int i;
-+ unsigned int max_nr_glist_frames;
-+ unsigned int nr_init_grefs;
-+
-+ if (!is_running_on_xen())
-+ return -ENODEV;
-+
-+ nr_grant_frames = 1;
-+ boot_max_nr_grant_frames = __max_nr_grant_frames();
-+
-+ /* Determine the maximum number of frames required for the
-+ * grant reference free list on the current hypervisor.
-+ */
-+ max_nr_glist_frames = (boot_max_nr_grant_frames *
-+ GREFS_PER_GRANT_FRAME /
-+ (PAGE_SIZE / sizeof(grant_ref_t)));
-+
-+ gnttab_list = kmalloc(max_nr_glist_frames * sizeof(grant_ref_t *),
-+ GFP_KERNEL);
-+ if (gnttab_list == NULL)
-+ return -ENOMEM;
-+
-+ for (i = 0; i < nr_grant_frames; i++) {
-+ gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL);
-+ if (gnttab_list[i] == NULL)
-+ goto ini_nomem;
-+ }
-+
-+ if (gnttab_resume() < 0)
-+ return -ENODEV;
-+
-+ nr_init_grefs = nr_grant_frames * GREFS_PER_GRANT_FRAME;
-+
-+ for (i = NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++)
-+ gnttab_entry(i) = i + 1;
-+
-+ gnttab_entry(nr_init_grefs - 1) = GNTTAB_LIST_END;
-+ gnttab_free_count = nr_init_grefs - NR_RESERVED_ENTRIES;
-+ gnttab_free_head = NR_RESERVED_ENTRIES;
-+
-+ return 0;
-+
-+ ini_nomem:
-+ for (i--; i >= 0; i--)
-+ free_page((unsigned long)gnttab_list[i]);
-+ kfree(gnttab_list);
-+ return -ENOMEM;
-+}
-+
-+#ifdef CONFIG_XEN
-+core_initcall(gnttab_init);
-+#endif
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/core/hypervisor_sysfs.c ubuntu-gutsy-xen/drivers/xen/core/hypervisor_sysfs.c
---- ubuntu-gutsy/drivers/xen/core/hypervisor_sysfs.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/core/hypervisor_sysfs.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,56 @@
-+/*
-+ * copyright (c) 2006 IBM Corporation
-+ * Authored by: Mike D. Day <ncmike@us.ibm.com>
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ */
-+
-+#include <linux/kernel.h>
-+#include <linux/module.h>
-+#include <linux/kobject.h>
-+#include <xen/hypervisor_sysfs.h>
-+
-+static ssize_t hyp_sysfs_show(struct kobject *kobj,
-+ struct attribute *attr,
-+ char *buffer)
-+{
-+ struct hyp_sysfs_attr *hyp_attr;
-+ hyp_attr = container_of(attr, struct hyp_sysfs_attr, attr);
-+ if (hyp_attr->show)
-+ return hyp_attr->show(hyp_attr, buffer);
-+ return 0;
-+}
-+
-+static ssize_t hyp_sysfs_store(struct kobject *kobj,
-+ struct attribute *attr,
-+ const char *buffer,
-+ size_t len)
-+{
-+ struct hyp_sysfs_attr *hyp_attr;
-+ hyp_attr = container_of(attr, struct hyp_sysfs_attr, attr);
-+ if (hyp_attr->store)
-+ return hyp_attr->store(hyp_attr, buffer, len);
-+ return 0;
-+}
-+
-+static struct sysfs_ops hyp_sysfs_ops = {
-+ .show = hyp_sysfs_show,
-+ .store = hyp_sysfs_store,
-+};
-+
-+static struct kobj_type hyp_sysfs_kobj_type = {
-+ .sysfs_ops = &hyp_sysfs_ops,
-+};
-+
-+static int __init hypervisor_subsys_init(void)
-+{
-+ if (!is_running_on_xen())
-+ return -ENODEV;
-+
-+ hypervisor_subsys.kobj.ktype = &hyp_sysfs_kobj_type;
-+ return 0;
-+}
-+
-+device_initcall(hypervisor_subsys_init);
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/core/machine_kexec.c ubuntu-gutsy-xen/drivers/xen/core/machine_kexec.c
---- ubuntu-gutsy/drivers/xen/core/machine_kexec.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/core/machine_kexec.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,189 @@
-+/*
-+ * drivers/xen/core/machine_kexec.c
-+ * handle transition of Linux booting another kernel
-+ */
-+
-+#include <linux/kexec.h>
-+#include <xen/interface/kexec.h>
-+#include <linux/mm.h>
-+#include <linux/bootmem.h>
-+
-+extern void machine_kexec_setup_load_arg(xen_kexec_image_t *xki,
-+ struct kimage *image);
-+
-+int xen_max_nr_phys_cpus;
-+struct resource xen_hypervisor_res;
-+struct resource *xen_phys_cpus;
-+
-+void xen_machine_kexec_setup_resources(void)
-+{
-+ xen_kexec_range_t range;
-+ struct resource *res;
-+ int k = 0;
-+
-+ if (!is_initial_xendomain())
-+ return;
-+
-+ /* determine maximum number of physical cpus */
-+
-+ while (1) {
-+ memset(&range, 0, sizeof(range));
-+ range.range = KEXEC_RANGE_MA_CPU;
-+ range.nr = k;
-+
-+ if(HYPERVISOR_kexec_op(KEXEC_CMD_kexec_get_range, &range))
-+ break;
-+
-+ k++;
-+ }
-+
-+ if (k == 0)
-+ return;
-+
-+ xen_max_nr_phys_cpus = k;
-+
-+ /* allocate xen_phys_cpus */
-+
-+ xen_phys_cpus = alloc_bootmem_low(k * sizeof(struct resource));
-+ BUG_ON(xen_phys_cpus == NULL);
-+
-+ /* fill in xen_phys_cpus with per-cpu crash note information */
-+
-+ for (k = 0; k < xen_max_nr_phys_cpus; k++) {
-+ memset(&range, 0, sizeof(range));
-+ range.range = KEXEC_RANGE_MA_CPU;
-+ range.nr = k;
-+
-+ if (HYPERVISOR_kexec_op(KEXEC_CMD_kexec_get_range, &range))
-+ goto err;
-+
-+ res = xen_phys_cpus + k;
-+
-+ memset(res, 0, sizeof(*res));
-+ res->name = "Crash note";
-+ res->start = range.start;
-+ res->end = range.start + range.size - 1;
-+ res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
-+ }
-+
-+ /* fill in xen_hypervisor_res with hypervisor machine address range */
-+
-+ memset(&range, 0, sizeof(range));
-+ range.range = KEXEC_RANGE_MA_XEN;
-+
-+ if (HYPERVISOR_kexec_op(KEXEC_CMD_kexec_get_range, &range))
-+ goto err;
-+
-+ xen_hypervisor_res.name = "Hypervisor code and data";
-+ xen_hypervisor_res.start = range.start;
-+ xen_hypervisor_res.end = range.start + range.size - 1;
-+ xen_hypervisor_res.flags = IORESOURCE_BUSY | IORESOURCE_MEM;
-+
-+ /* fill in crashk_res if range is reserved by hypervisor */
-+
-+ memset(&range, 0, sizeof(range));
-+ range.range = KEXEC_RANGE_MA_CRASH;
-+
-+ if (HYPERVISOR_kexec_op(KEXEC_CMD_kexec_get_range, &range))
-+ return;
-+
-+ if (range.size) {
-+ crashk_res.start = range.start;
-+ crashk_res.end = range.start + range.size - 1;
-+ }
-+
-+ return;
-+
-+ err:
-+ /*
-+ * It isn't possible to free xen_phys_cpus this early in the
-+ * boot. Failure at this stage is unexpected and the amount of
-+ * memory is small therefore we tolerate the potential leak.
-+ */
-+ xen_max_nr_phys_cpus = 0;
-+ return;
-+}
-+
-+void xen_machine_kexec_register_resources(struct resource *res)
-+{
-+ int k;
-+
-+ request_resource(res, &xen_hypervisor_res);
-+
-+ for (k = 0; k < xen_max_nr_phys_cpus; k++)
-+ request_resource(&xen_hypervisor_res, xen_phys_cpus + k);
-+
-+}
-+
-+static void setup_load_arg(xen_kexec_image_t *xki, struct kimage *image)
-+{
-+ machine_kexec_setup_load_arg(xki, image);
-+
-+ xki->indirection_page = image->head;
-+ xki->start_address = image->start;
-+}
-+
-+/*
-+ * Load the image into xen so xen can kdump itself
-+ * This might have been done in prepare, but prepare
-+ * is currently called too early. It might make sense
-+ * to move prepare, but for now, just add an extra hook.
-+ */
-+int xen_machine_kexec_load(struct kimage *image)
-+{
-+ xen_kexec_load_t xkl;
-+
-+ memset(&xkl, 0, sizeof(xkl));
-+ xkl.type = image->type;
-+ setup_load_arg(&xkl.image, image);
-+ return HYPERVISOR_kexec_op(KEXEC_CMD_kexec_load, &xkl);
-+}
-+
-+/*
-+ * Unload the image that was stored by machine_kexec_load()
-+ * This might have been done in machine_kexec_cleanup() but it
-+ * is called too late, and its possible xen could try and kdump
-+ * using resources that have been freed.
-+ */
-+void xen_machine_kexec_unload(struct kimage *image)
-+{
-+ xen_kexec_load_t xkl;
-+
-+ memset(&xkl, 0, sizeof(xkl));
-+ xkl.type = image->type;
-+ HYPERVISOR_kexec_op(KEXEC_CMD_kexec_unload, &xkl);
-+}
-+
-+/*
-+ * Do not allocate memory (or fail in any way) in machine_kexec().
-+ * We are past the point of no return, committed to rebooting now.
-+ *
-+ * This has the hypervisor move to the prefered reboot CPU,
-+ * stop all CPUs and kexec. That is it combines machine_shutdown()
-+ * and machine_kexec() in Linux kexec terms.
-+ */
-+NORET_TYPE void machine_kexec(struct kimage *image)
-+{
-+ xen_kexec_exec_t xke;
-+
-+ memset(&xke, 0, sizeof(xke));
-+ xke.type = image->type;
-+ HYPERVISOR_kexec_op(KEXEC_CMD_kexec, &xke);
-+ panic("KEXEC_CMD_kexec hypercall should not return\n");
-+}
-+
-+void machine_shutdown(void)
-+{
-+ /* do nothing */
-+}
-+
-+
-+/*
-+ * Local variables:
-+ * c-file-style: "linux"
-+ * indent-tabs-mode: t
-+ * c-indent-level: 8
-+ * c-basic-offset: 8
-+ * tab-width: 8
-+ * End:
-+ */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/core/machine_reboot.c ubuntu-gutsy-xen/drivers/xen/core/machine_reboot.c
---- ubuntu-gutsy/drivers/xen/core/machine_reboot.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/core/machine_reboot.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,241 @@
-+#include <linux/version.h>
-+#include <linux/kernel.h>
-+#include <linux/mm.h>
-+#include <linux/unistd.h>
-+#include <linux/module.h>
-+#include <linux/reboot.h>
-+#include <linux/sysrq.h>
-+#include <linux/stringify.h>
-+#include <linux/stop_machine.h>
-+#include <asm/irq.h>
-+#include <asm/mmu_context.h>
-+#include <xen/evtchn.h>
-+#include <asm/hypervisor.h>
-+#include <xen/xenbus.h>
-+#include <linux/cpu.h>
-+#include <linux/kthread.h>
-+#include <xen/gnttab.h>
-+#include <xen/xencons.h>
-+#include <xen/cpu_hotplug.h>
-+#include <xen/interface/vcpu.h>
-+
-+#if defined(__i386__) || defined(__x86_64__)
-+
-+/*
-+ * Power off function, if any
-+ */
-+void (*pm_power_off)(void);
-+EXPORT_SYMBOL(pm_power_off);
-+
-+void machine_emergency_restart(void)
-+{
-+ /* We really want to get pending console data out before we die. */
-+ xencons_force_flush();
-+ HYPERVISOR_shutdown(SHUTDOWN_reboot);
-+}
-+
-+void machine_restart(char * __unused)
-+{
-+ machine_emergency_restart();
-+}
-+
-+void machine_halt(void)
-+{
-+ machine_power_off();
-+}
-+
-+void machine_power_off(void)
-+{
-+ /* We really want to get pending console data out before we die. */
-+ xencons_force_flush();
-+ if (pm_power_off)
-+ pm_power_off();
-+ HYPERVISOR_shutdown(SHUTDOWN_poweroff);
-+}
-+
-+int reboot_thru_bios = 0; /* for dmi_scan.c */
-+EXPORT_SYMBOL(machine_restart);
-+EXPORT_SYMBOL(machine_halt);
-+EXPORT_SYMBOL(machine_power_off);
-+
-+static void pre_suspend(void)
-+{
-+ HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
-+ HYPERVISOR_update_va_mapping(fix_to_virt(FIX_SHARED_INFO),
-+ __pte_ma(0), 0);
-+
-+ xen_start_info->store_mfn = mfn_to_pfn(xen_start_info->store_mfn);
-+ xen_start_info->console.domU.mfn =
-+ mfn_to_pfn(xen_start_info->console.domU.mfn);
-+}
-+
-+static void post_suspend(int suspend_cancelled)
-+{
-+ int i, j, k, fpp;
-+ unsigned long shinfo_mfn;
-+ extern unsigned long max_pfn;
-+ extern unsigned long *pfn_to_mfn_frame_list_list;
-+ extern unsigned long *pfn_to_mfn_frame_list[];
-+
-+ if (suspend_cancelled) {
-+ xen_start_info->store_mfn =
-+ pfn_to_mfn(xen_start_info->store_mfn);
-+ xen_start_info->console.domU.mfn =
-+ pfn_to_mfn(xen_start_info->console.domU.mfn);
-+ } else {
-+#ifdef CONFIG_SMP
-+ cpu_initialized_map = cpu_online_map;
-+#endif
-+ }
-+
-+ shinfo_mfn = xen_start_info->shared_info >> PAGE_SHIFT;
-+ HYPERVISOR_update_va_mapping(fix_to_virt(FIX_SHARED_INFO),
-+ pfn_pte_ma(shinfo_mfn, PAGE_KERNEL), 0);
-+ HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
-+
-+ memset(empty_zero_page, 0, PAGE_SIZE);
-+
-+ fpp = PAGE_SIZE/sizeof(unsigned long);
-+ for (i = 0, j = 0, k = -1; i < max_pfn; i += fpp, j++) {
-+ if ((j % fpp) == 0) {
-+ k++;
-+ pfn_to_mfn_frame_list_list[k] =
-+ virt_to_mfn(pfn_to_mfn_frame_list[k]);
-+ j = 0;
-+ }
-+ pfn_to_mfn_frame_list[k][j] =
-+ virt_to_mfn(&phys_to_machine_mapping[i]);
-+ }
-+ HYPERVISOR_shared_info->arch.max_pfn = max_pfn;
-+ HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
-+ virt_to_mfn(pfn_to_mfn_frame_list_list);
-+}
-+
-+#else /* !(defined(__i386__) || defined(__x86_64__)) */
-+
-+#ifndef HAVE_XEN_PRE_SUSPEND
-+#define xen_pre_suspend() ((void)0)
-+#endif
-+
-+#ifndef HAVE_XEN_POST_SUSPEND
-+#define xen_post_suspend(x) ((void)0)
-+#endif
-+
-+#define switch_idle_mm() ((void)0)
-+#define mm_pin_all() ((void)0)
-+#define pre_suspend() xen_pre_suspend()
-+#define post_suspend(x) xen_post_suspend(x)
-+
-+#endif
-+
-+static int take_machine_down(void *p_fast_suspend)
-+{
-+ int fast_suspend = *(int *)p_fast_suspend;
-+ int suspend_cancelled, err;
-+ extern void time_resume(void);
-+
-+ if (fast_suspend) {
-+ BUG_ON(!irqs_disabled());
-+ } else {
-+ BUG_ON(irqs_disabled());
-+
-+ for (;;) {
-+ err = smp_suspend();
-+ if (err)
-+ return err;
-+
-+ xenbus_suspend();
-+ preempt_disable();
-+
-+ if (num_online_cpus() == 1)
-+ break;
-+
-+ preempt_enable();
-+ xenbus_suspend_cancel();
-+ }
-+
-+ local_irq_disable();
-+ }
-+
-+ mm_pin_all();
-+ gnttab_suspend();
-+ pre_suspend();
-+
-+ /*
-+ * This hypercall returns 1 if suspend was cancelled or the domain was
-+ * merely checkpointed, and 0 if it is resuming in a new domain.
-+ */
-+ suspend_cancelled = HYPERVISOR_suspend(virt_to_mfn(xen_start_info));
-+
-+ post_suspend(suspend_cancelled);
-+ gnttab_resume();
-+ if (!suspend_cancelled) {
-+ irq_resume();
-+#ifdef __x86_64__
-+ /*
-+ * Older versions of Xen do not save/restore the user %cr3.
-+ * We do it here just in case, but there's no need if we are
-+ * in fast-suspend mode as that implies a new enough Xen.
-+ */
-+ if (!fast_suspend) {
-+ struct mmuext_op op;
-+ op.cmd = MMUEXT_NEW_USER_BASEPTR;
-+ op.arg1.mfn = pfn_to_mfn(__pa(__user_pgd(
-+ current->active_mm->pgd)) >> PAGE_SHIFT);
-+ if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
-+ BUG();
-+ }
-+#endif
-+ }
-+ time_resume();
-+
-+ if (!fast_suspend)
-+ local_irq_enable();
-+
-+ return suspend_cancelled;
-+}
-+
-+int __xen_suspend(int fast_suspend)
-+{
-+ int err, suspend_cancelled;
-+
-+ BUG_ON(smp_processor_id() != 0);
-+ BUG_ON(in_interrupt());
-+
-+#if defined(__i386__) || defined(__x86_64__)
-+ if (xen_feature(XENFEAT_auto_translated_physmap)) {
-+ printk(KERN_WARNING "Cannot suspend in "
-+ "auto_translated_physmap mode.\n");
-+ return -EOPNOTSUPP;
-+ }
-+#endif
-+
-+ /* If we are definitely UP then 'slow mode' is actually faster. */
-+ if (num_possible_cpus() == 1)
-+ fast_suspend = 0;
-+
-+ if (fast_suspend) {
-+ xenbus_suspend();
-+ err = stop_machine_run(take_machine_down, &fast_suspend, 0);
-+ if (err < 0)
-+ xenbus_suspend_cancel();
-+ } else {
-+ err = take_machine_down(&fast_suspend);
-+ }
-+
-+ if (err < 0)
-+ return err;
-+
-+ suspend_cancelled = err;
-+ if (!suspend_cancelled) {
-+ xencons_resume();
-+ xenbus_resume();
-+ } else {
-+ xenbus_suspend_cancel();
-+ }
-+
-+ if (!fast_suspend)
-+ smp_resume();
-+
-+ return 0;
-+}
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/core/Makefile ubuntu-gutsy-xen/drivers/xen/core/Makefile
---- ubuntu-gutsy/drivers/xen/core/Makefile 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/core/Makefile 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,12 @@
-+#
-+# Makefile for the linux kernel.
-+#
-+
-+obj-y := evtchn.o gnttab.o features.o reboot.o machine_reboot.o firmware.o domctl.o
-+
-+obj-$(CONFIG_PROC_FS) += xen_proc.o
-+obj-$(CONFIG_SYS_HYPERVISOR) += hypervisor_sysfs.o
-+obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o
-+obj-$(CONFIG_XEN_SYSFS) += xen_sysfs.o
-+obj-$(CONFIG_XEN_SMPBOOT) += smpboot.o
-+obj-$(CONFIG_KEXEC) += machine_kexec.o
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/core/reboot.c ubuntu-gutsy-xen/drivers/xen/core/reboot.c
---- ubuntu-gutsy/drivers/xen/core/reboot.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/core/reboot.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,249 @@
-+#define __KERNEL_SYSCALLS__
-+#include <linux/version.h>
-+#include <linux/kernel.h>
-+#include <linux/unistd.h>
-+#include <linux/module.h>
-+#include <linux/reboot.h>
-+#include <linux/sysrq.h>
-+#include <asm/hypervisor.h>
-+#include <xen/xenbus.h>
-+#include <linux/kthread.h>
-+
-+#ifdef HAVE_XEN_PLATFORM_COMPAT_H
-+#include <xen/platform-compat.h>
-+#endif
-+
-+MODULE_LICENSE("Dual BSD/GPL");
-+
-+#define SHUTDOWN_INVALID -1
-+#define SHUTDOWN_POWEROFF 0
-+#define SHUTDOWN_SUSPEND 2
-+/* Code 3 is SHUTDOWN_CRASH, which we don't use because the domain can only
-+ * report a crash, not be instructed to crash!
-+ * HALT is the same as POWEROFF, as far as we're concerned. The tools use
-+ * the distinction when we return the reason code to them.
-+ */
-+#define SHUTDOWN_HALT 4
-+
-+/* Ignore multiple shutdown requests. */
-+static int shutting_down = SHUTDOWN_INVALID;
-+
-+/* Can we leave APs online when we suspend? */
-+static int fast_suspend;
-+
-+static void __shutdown_handler(struct work_struct *unused);
-+static DECLARE_DELAYED_WORK(shutdown_work, __shutdown_handler);
-+
-+int __xen_suspend(int fast_suspend);
-+
-+static int shutdown_process(void *__unused)
-+{
-+ static char *envp[] = { "HOME=/", "TERM=linux",
-+ "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL };
-+ static char *poweroff_argv[] = { "/sbin/poweroff", NULL };
-+
-+ extern asmlinkage long sys_reboot(int magic1, int magic2,
-+ unsigned int cmd, void *arg);
-+
-+ if ((shutting_down == SHUTDOWN_POWEROFF) ||
-+ (shutting_down == SHUTDOWN_HALT)) {
-+ if (call_usermodehelper("/sbin/poweroff", poweroff_argv,
-+ envp, 0) < 0) {
-+#ifdef CONFIG_XEN
-+ sys_reboot(LINUX_REBOOT_MAGIC1,
-+ LINUX_REBOOT_MAGIC2,
-+ LINUX_REBOOT_CMD_POWER_OFF,
-+ NULL);
-+#endif /* CONFIG_XEN */
-+ }
-+ }
-+
-+ shutting_down = SHUTDOWN_INVALID; /* could try again */
-+
-+ return 0;
-+}
-+
-+static int xen_suspend(void *__unused)
-+{
-+ int err = __xen_suspend(fast_suspend);
-+ if (err)
-+ printk(KERN_ERR "Xen suspend failed (%d)\n", err);
-+ shutting_down = SHUTDOWN_INVALID;
-+ return 0;
-+}
-+
-+static int kthread_create_on_cpu(int (*f)(void *arg),
-+ void *arg,
-+ const char *name,
-+ int cpu)
-+{
-+ struct task_struct *p;
-+ p = kthread_create(f, arg, name);
-+ if (IS_ERR(p))
-+ return PTR_ERR(p);
-+ kthread_bind(p, cpu);
-+ wake_up_process(p);
-+ return 0;
-+}
-+
-+static void __shutdown_handler(struct work_struct *unused)
-+{
-+ int err;
-+
-+ if (shutting_down != SHUTDOWN_SUSPEND)
-+ err = kernel_thread(shutdown_process, NULL,
-+ CLONE_FS | CLONE_FILES);
-+ else
-+ err = kthread_create_on_cpu(xen_suspend, NULL, "suspend", 0);
-+
-+ if (err < 0) {
-+ printk(KERN_WARNING "Error creating shutdown process (%d): "
-+ "retrying...\n", -err);
-+ schedule_delayed_work(&shutdown_work, HZ/2);
-+ }
-+}
-+
-+static void shutdown_handler(struct xenbus_watch *watch,
-+ const char **vec, unsigned int len)
-+{
-+ extern void ctrl_alt_del(void);
-+ char *str;
-+ struct xenbus_transaction xbt;
-+ int err;
-+
-+ if (shutting_down != SHUTDOWN_INVALID)
-+ return;
-+
-+ again:
-+ err = xenbus_transaction_start(&xbt);
-+ if (err)
-+ return;
-+
-+ str = (char *)xenbus_read(xbt, "control", "shutdown", NULL);
-+ /* Ignore read errors and empty reads. */
-+ if (XENBUS_IS_ERR_READ(str)) {
-+ xenbus_transaction_end(xbt, 1);
-+ return;
-+ }
-+
-+ xenbus_write(xbt, "control", "shutdown", "");
-+
-+ err = xenbus_transaction_end(xbt, 0);
-+ if (err == -EAGAIN) {
-+ kfree(str);
-+ goto again;
-+ }
-+
-+ if (strcmp(str, "poweroff") == 0)
-+ shutting_down = SHUTDOWN_POWEROFF;
-+ else if (strcmp(str, "reboot") == 0)
-+ ctrl_alt_del();
-+ else if (strcmp(str, "suspend") == 0)
-+ shutting_down = SHUTDOWN_SUSPEND;
-+ else if (strcmp(str, "halt") == 0)
-+ shutting_down = SHUTDOWN_HALT;
-+ else {
-+ printk("Ignoring shutdown request: %s\n", str);
-+ shutting_down = SHUTDOWN_INVALID;
-+ }
-+
-+ if (shutting_down != SHUTDOWN_INVALID)
-+ schedule_delayed_work(&shutdown_work, 0);
-+
-+ kfree(str);
-+}
-+
-+static void sysrq_handler(struct xenbus_watch *watch, const char **vec,
-+ unsigned int len)
-+{
-+ char sysrq_key = '\0';
-+ struct xenbus_transaction xbt;
-+ int err;
-+
-+ again:
-+ err = xenbus_transaction_start(&xbt);
-+ if (err)
-+ return;
-+ if (!xenbus_scanf(xbt, "control", "sysrq", "%c", &sysrq_key)) {
-+ printk(KERN_ERR "Unable to read sysrq code in "
-+ "control/sysrq\n");
-+ xenbus_transaction_end(xbt, 1);
-+ return;
-+ }
-+
-+ if (sysrq_key != '\0')
-+ xenbus_printf(xbt, "control", "sysrq", "%c", '\0');
-+
-+ err = xenbus_transaction_end(xbt, 0);
-+ if (err == -EAGAIN)
-+ goto again;
-+
-+#ifdef CONFIG_MAGIC_SYSRQ
-+ if (sysrq_key != '\0')
-+ handle_sysrq(sysrq_key, NULL);
-+#endif
-+}
-+
-+static struct xenbus_watch shutdown_watch = {
-+ .node = "control/shutdown",
-+ .callback = shutdown_handler
-+};
-+
-+static struct xenbus_watch sysrq_watch = {
-+ .node = "control/sysrq",
-+ .callback = sysrq_handler
-+};
-+
-+static int setup_shutdown_watcher(void)
-+{
-+ int err;
-+
-+ xenbus_scanf(XBT_NIL, "control",
-+ "platform-feature-multiprocessor-suspend",
-+ "%d", &fast_suspend);
-+
-+ err = register_xenbus_watch(&shutdown_watch);
-+ if (err) {
-+ printk(KERN_ERR "Failed to set shutdown watcher\n");
-+ return err;
-+ }
-+
-+ err = register_xenbus_watch(&sysrq_watch);
-+ if (err) {
-+ printk(KERN_ERR "Failed to set sysrq watcher\n");
-+ return err;
-+ }
-+
-+ return 0;
-+}
-+
-+#ifdef CONFIG_XEN
-+
-+static int shutdown_event(struct notifier_block *notifier,
-+ unsigned long event,
-+ void *data)
-+{
-+ setup_shutdown_watcher();
-+ return NOTIFY_DONE;
-+}
-+
-+static int __init setup_shutdown_event(void)
-+{
-+ static struct notifier_block xenstore_notifier = {
-+ .notifier_call = shutdown_event
-+ };
-+ register_xenstore_notifier(&xenstore_notifier);
-+
-+ return 0;
-+}
-+
-+subsys_initcall(setup_shutdown_event);
-+
-+#else /* !defined(CONFIG_XEN) */
-+
-+int xen_reboot_init(void)
-+{
-+ return setup_shutdown_watcher();
-+}
-+
-+#endif /* !defined(CONFIG_XEN) */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/core/smpboot.c ubuntu-gutsy-xen/drivers/xen/core/smpboot.c
---- ubuntu-gutsy/drivers/xen/core/smpboot.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/core/smpboot.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,459 @@
-+/*
-+ * Xen SMP booting functions
-+ *
-+ * See arch/i386/kernel/smpboot.c for copyright and credits for derived
-+ * portions of this file.
-+ */
-+
-+#include <linux/module.h>
-+#include <linux/init.h>
-+#include <linux/kernel.h>
-+#include <linux/mm.h>
-+#include <linux/sched.h>
-+#include <linux/kernel_stat.h>
-+#include <linux/smp_lock.h>
-+#include <linux/irq.h>
-+#include <linux/bootmem.h>
-+#include <linux/notifier.h>
-+#include <linux/cpu.h>
-+#include <linux/percpu.h>
-+#include <asm/desc.h>
-+#include <asm/arch_hooks.h>
-+#include <asm/pgalloc.h>
-+#include <xen/evtchn.h>
-+#include <xen/interface/vcpu.h>
-+#include <xen/cpu_hotplug.h>
-+#include <xen/xenbus.h>
-+
-+extern irqreturn_t smp_reschedule_interrupt(int, void *);
-+extern irqreturn_t smp_call_function_interrupt(int, void *);
-+
-+extern int local_setup_timer(unsigned int cpu);
-+extern void local_teardown_timer(unsigned int cpu);
-+
-+extern void hypervisor_callback(void);
-+extern void failsafe_callback(void);
-+extern void system_call(void);
-+extern void smp_trap_init(trap_info_t *);
-+
-+/* Number of siblings per CPU package */
-+int smp_num_siblings = 1;
-+int phys_proc_id[NR_CPUS]; /* Package ID of each logical CPU */
-+EXPORT_SYMBOL(phys_proc_id);
-+int cpu_core_id[NR_CPUS]; /* Core ID of each logical CPU */
-+EXPORT_SYMBOL(cpu_core_id);
-+
-+cpumask_t cpu_online_map;
-+EXPORT_SYMBOL(cpu_online_map);
-+cpumask_t cpu_possible_map;
-+EXPORT_SYMBOL(cpu_possible_map);
-+cpumask_t cpu_initialized_map;
-+
-+struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
-+EXPORT_SYMBOL(cpu_data);
-+
-+#ifdef CONFIG_HOTPLUG_CPU
-+DEFINE_PER_CPU(int, cpu_state) = { 0 };
-+#endif
-+
-+static DEFINE_PER_CPU(int, resched_irq);
-+static DEFINE_PER_CPU(int, callfunc_irq);
-+static char resched_name[NR_CPUS][15];
-+static char callfunc_name[NR_CPUS][15];
-+
-+u8 cpu_2_logical_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
-+
-+void *xquad_portio;
-+
-+cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned;
-+cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned;
-+EXPORT_SYMBOL(cpu_core_map);
-+
-+#if defined(__i386__)
-+u8 x86_cpu_to_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = 0xff };
-+EXPORT_SYMBOL(x86_cpu_to_apicid);
-+#endif
-+
-+void __init prefill_possible_map(void)
-+{
-+ int i, rc;
-+
-+ for_each_possible_cpu(i)
-+ if (i != smp_processor_id())
-+ return;
-+
-+ for (i = 0; i < NR_CPUS; i++) {
-+ rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
-+ if (rc >= 0)
-+ cpu_set(i, cpu_possible_map);
-+ }
-+}
-+
-+void __init smp_alloc_memory(void)
-+{
-+}
-+
-+static inline void
-+set_cpu_sibling_map(int cpu)
-+{
-+ phys_proc_id[cpu] = cpu;
-+ cpu_core_id[cpu] = 0;
-+
-+ cpu_sibling_map[cpu] = cpumask_of_cpu(cpu);
-+ cpu_core_map[cpu] = cpumask_of_cpu(cpu);
-+
-+ cpu_data[cpu].booted_cores = 1;
-+}
-+
-+static void
-+remove_siblinginfo(int cpu)
-+{
-+ phys_proc_id[cpu] = BAD_APICID;
-+ cpu_core_id[cpu] = BAD_APICID;
-+
-+ cpus_clear(cpu_sibling_map[cpu]);
-+ cpus_clear(cpu_core_map[cpu]);
-+
-+ cpu_data[cpu].booted_cores = 0;
-+}
-+
-+static int xen_smp_intr_init(unsigned int cpu)
-+{
-+ int rc;
-+
-+ per_cpu(resched_irq, cpu) = per_cpu(callfunc_irq, cpu) = -1;
-+
-+ sprintf(resched_name[cpu], "resched%d", cpu);
-+ rc = bind_ipi_to_irqhandler(RESCHEDULE_VECTOR,
-+ cpu,
-+ smp_reschedule_interrupt,
-+ IRQF_DISABLED,
-+ resched_name[cpu],
-+ NULL);
-+ if (rc < 0)
-+ goto fail;
-+ per_cpu(resched_irq, cpu) = rc;
-+
-+ sprintf(callfunc_name[cpu], "callfunc%d", cpu);
-+ rc = bind_ipi_to_irqhandler(CALL_FUNCTION_VECTOR,
-+ cpu,
-+ smp_call_function_interrupt,
-+ IRQF_DISABLED,
-+ callfunc_name[cpu],
-+ NULL);
-+ if (rc < 0)
-+ goto fail;
-+ per_cpu(callfunc_irq, cpu) = rc;
-+
-+ if ((cpu != 0) && ((rc = local_setup_timer(cpu)) != 0))
-+ goto fail;
-+
-+ return 0;
-+
-+ fail:
-+ if (per_cpu(resched_irq, cpu) >= 0)
-+ unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL);
-+ if (per_cpu(callfunc_irq, cpu) >= 0)
-+ unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL);
-+ return rc;
-+}
-+
-+#ifdef CONFIG_HOTPLUG_CPU
-+static void xen_smp_intr_exit(unsigned int cpu)
-+{
-+ if (cpu != 0)
-+ local_teardown_timer(cpu);
-+
-+ unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL);
-+ unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL);
-+}
-+#endif
-+
-+void cpu_bringup(void)
-+{
-+ cpu_init();
-+ touch_softlockup_watchdog();
-+ preempt_disable();
-+ local_irq_enable();
-+}
-+
-+static void cpu_bringup_and_idle(void)
-+{
-+ cpu_bringup();
-+ cpu_idle();
-+}
-+
-+static void cpu_initialize_context(unsigned int cpu)
-+{
-+ vcpu_guest_context_t ctxt;
-+ struct task_struct *idle = idle_task(cpu);
-+#ifdef __x86_64__
-+ struct desc_ptr *gdt_descr = &cpu_gdt_descr[cpu];
-+#endif
-+
-+ if (cpu_test_and_set(cpu, cpu_initialized_map))
-+ return;
-+
-+ memset(&ctxt, 0, sizeof(ctxt));
-+
-+ ctxt.flags = VGCF_IN_KERNEL;
-+ ctxt.user_regs.ds = __USER_DS;
-+ ctxt.user_regs.es = __USER_DS;
-+ ctxt.user_regs.fs = 0;
-+ ctxt.user_regs.gs = 0;
-+ ctxt.user_regs.ss = __KERNEL_DS;
-+ ctxt.user_regs.eip = (unsigned long)cpu_bringup_and_idle;
-+ ctxt.user_regs.eflags = X86_EFLAGS_IF | 0x1000; /* IOPL_RING1 */
-+
-+ memset(&ctxt.fpu_ctxt, 0, sizeof(ctxt.fpu_ctxt));
-+
-+ smp_trap_init(ctxt.trap_ctxt);
-+
-+ ctxt.ldt_ents = 0;
-+
-+#ifdef __i386__
-+ ctxt.gdt_frames[0] = virt_to_mfn(get_cpu_gdt_table(cpu));
-+ ctxt.gdt_ents = GDT_SIZE / 8;
-+
-+ ctxt.user_regs.cs = __KERNEL_CS;
-+ ctxt.user_regs.esp = idle->thread.esp0 - sizeof(struct pt_regs);
-+
-+ ctxt.kernel_ss = __KERNEL_DS;
-+ ctxt.kernel_sp = idle->thread.esp0;
-+
-+ ctxt.user_regs.fs = __KERNEL_PERCPU;
-+
-+ ctxt.event_callback_cs = __KERNEL_CS;
-+ ctxt.event_callback_eip = (unsigned long)hypervisor_callback;
-+ ctxt.failsafe_callback_cs = __KERNEL_CS;
-+ ctxt.failsafe_callback_eip = (unsigned long)failsafe_callback;
-+
-+ ctxt.ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir));
-+#else /* __x86_64__ */
-+ ctxt.gdt_frames[0] = virt_to_mfn(gdt_descr->address);
-+ ctxt.gdt_ents = gdt_descr->size / 8;
-+
-+ ctxt.user_regs.cs = __KERNEL_CS;
-+ ctxt.user_regs.esp = idle->thread.rsp0 - sizeof(struct pt_regs);
-+
-+ ctxt.kernel_ss = __KERNEL_DS;
-+ ctxt.kernel_sp = idle->thread.rsp0;
-+
-+ ctxt.event_callback_eip = (unsigned long)hypervisor_callback;
-+ ctxt.failsafe_callback_eip = (unsigned long)failsafe_callback;
-+ ctxt.syscall_callback_eip = (unsigned long)system_call;
-+
-+ ctxt.ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(init_level4_pgt));
-+
-+ ctxt.gs_base_kernel = (unsigned long)(cpu_pda(cpu));
-+#endif
-+
-+ BUG_ON(HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, &ctxt));
-+}
-+
-+void __init smp_prepare_cpus(unsigned int max_cpus)
-+{
-+ int cpu;
-+ struct task_struct *idle;
-+#ifdef __x86_64__
-+ struct desc_ptr *gdt_descr;
-+#endif
-+ void *gdt_addr;
-+
-+ boot_cpu_data.apicid = 0;
-+ cpu_data[0] = boot_cpu_data;
-+
-+ cpu_2_logical_apicid[0] = 0;
-+ x86_cpu_to_apicid[0] = 0;
-+
-+ current_thread_info()->cpu = 0;
-+
-+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
-+ cpus_clear(cpu_sibling_map[cpu]);
-+ cpus_clear(cpu_core_map[cpu]);
-+ }
-+
-+ set_cpu_sibling_map(0);
-+
-+ if (xen_smp_intr_init(0))
-+ BUG();
-+
-+ cpu_initialized_map = cpumask_of_cpu(0);
-+
-+ /* Restrict the possible_map according to max_cpus. */
-+ while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) {
-+ for (cpu = NR_CPUS-1; !cpu_isset(cpu, cpu_possible_map); cpu--)
-+ continue;
-+ cpu_clear(cpu, cpu_possible_map);
-+ }
-+
-+ for_each_possible_cpu (cpu) {
-+ if (cpu == 0)
-+ continue;
-+
-+ idle = fork_idle(cpu);
-+ if (IS_ERR(idle))
-+ panic("failed fork for CPU %d", cpu);
-+
-+#ifdef __x86_64__
-+ gdt_descr = &cpu_gdt_descr[cpu];
-+ gdt_descr->address = get_zeroed_page(GFP_KERNEL);
-+ if (unlikely(!gdt_descr->address)) {
-+ printk(KERN_CRIT "CPU%d failed to allocate GDT\n",
-+ cpu);
-+ continue;
-+ }
-+ gdt_descr->size = GDT_SIZE;
-+ memcpy((void *)gdt_descr->address, cpu_gdt_table, GDT_SIZE);
-+ gdt_addr = (void *)gdt_descr->address;
-+#else
-+ init_gdt(cpu);
-+ gdt_addr = get_cpu_gdt_table(cpu);
-+#endif
-+ make_page_readonly(gdt_addr,
-+ XENFEAT_writable_descriptor_tables);
-+
-+ cpu_data[cpu] = boot_cpu_data;
-+ cpu_data[cpu].apicid = cpu;
-+
-+ cpu_2_logical_apicid[cpu] = cpu;
-+ x86_cpu_to_apicid[cpu] = cpu;
-+
-+#ifdef __x86_64__
-+ cpu_pda(cpu)->pcurrent = idle;
-+ cpu_pda(cpu)->cpunumber = cpu;
-+ clear_ti_thread_flag(task_thread_info(idle), TIF_FORK);
-+#else
-+ per_cpu(current_task, cpu) = idle;
-+#endif
-+
-+ irq_ctx_init(cpu);
-+
-+#ifdef CONFIG_HOTPLUG_CPU
-+ if (is_initial_xendomain())
-+ cpu_set(cpu, cpu_present_map);
-+#else
-+ cpu_set(cpu, cpu_present_map);
-+#endif
-+ }
-+
-+ init_xenbus_allowed_cpumask();
-+
-+#ifdef CONFIG_X86_IO_APIC
-+ /*
-+ * Here we can be sure that there is an IO-APIC in the system. Let's
-+ * go and set it up:
-+ */
-+ if (!skip_ioapic_setup && nr_ioapics)
-+ setup_IO_APIC();
-+#endif
-+}
-+
-+void __init smp_prepare_boot_cpu(void)
-+{
-+#ifdef __i386__
-+ init_gdt(smp_processor_id());
-+ switch_to_new_gdt();
-+#endif
-+ prefill_possible_map();
-+}
-+
-+#ifdef CONFIG_HOTPLUG_CPU
-+
-+/*
-+ * Initialize cpu_present_map late to skip SMP boot code in init/main.c.
-+ * But do it early enough to catch critical for_each_present_cpu() loops
-+ * in i386-specific code.
-+ */
-+static int __init initialize_cpu_present_map(void)
-+{
-+ cpu_present_map = cpu_possible_map;
-+ return 0;
-+}
-+core_initcall(initialize_cpu_present_map);
-+
-+int __cpu_disable(void)
-+{
-+ cpumask_t map = cpu_online_map;
-+ int cpu = smp_processor_id();
-+
-+ if (cpu == 0)
-+ return -EBUSY;
-+
-+ remove_siblinginfo(cpu);
-+
-+ cpu_clear(cpu, map);
-+ fixup_irqs(map);
-+ cpu_clear(cpu, cpu_online_map);
-+
-+ return 0;
-+}
-+
-+void __cpu_die(unsigned int cpu)
-+{
-+ while (HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) {
-+ current->state = TASK_UNINTERRUPTIBLE;
-+ schedule_timeout(HZ/10);
-+ }
-+
-+ xen_smp_intr_exit(cpu);
-+
-+ if (num_online_cpus() == 1)
-+ alternatives_smp_switch(0);
-+}
-+
-+#else /* !CONFIG_HOTPLUG_CPU */
-+
-+int __cpu_disable(void)
-+{
-+ return -ENOSYS;
-+}
-+
-+void __cpu_die(unsigned int cpu)
-+{
-+ BUG();
-+}
-+
-+#endif /* CONFIG_HOTPLUG_CPU */
-+
-+int __devinit __cpu_up(unsigned int cpu)
-+{
-+ int rc;
-+
-+ rc = cpu_up_check(cpu);
-+ if (rc)
-+ return rc;
-+
-+ cpu_initialize_context(cpu);
-+
-+ if (num_online_cpus() == 1)
-+ alternatives_smp_switch(1);
-+
-+ /* This must be done before setting cpu_online_map */
-+ set_cpu_sibling_map(cpu);
-+ wmb();
-+
-+ rc = xen_smp_intr_init(cpu);
-+ if (rc) {
-+ remove_siblinginfo(cpu);
-+ return rc;
-+ }
-+
-+ cpu_set(cpu, cpu_online_map);
-+
-+ rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL);
-+ BUG_ON(rc);
-+
-+ return 0;
-+}
-+
-+void __init smp_cpus_done(unsigned int max_cpus)
-+{
-+}
-+
-+#ifndef CONFIG_X86_LOCAL_APIC
-+int setup_profiling_timer(unsigned int multiplier)
-+{
-+ return -EINVAL;
-+}
-+#endif
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/core/xen_proc.c ubuntu-gutsy-xen/drivers/xen/core/xen_proc.c
---- ubuntu-gutsy/drivers/xen/core/xen_proc.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/core/xen_proc.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,23 @@
-+
-+#include <linux/module.h>
-+#include <linux/proc_fs.h>
-+#include <xen/xen_proc.h>
-+
-+static struct proc_dir_entry *xen_base;
-+
-+struct proc_dir_entry *create_xen_proc_entry(const char *name, mode_t mode)
-+{
-+ if ( xen_base == NULL )
-+ if ( (xen_base = proc_mkdir("xen", &proc_root)) == NULL )
-+ panic("Couldn't create /proc/xen");
-+ return create_proc_entry(name, mode, xen_base);
-+}
-+
-+EXPORT_SYMBOL_GPL(create_xen_proc_entry);
-+
-+void remove_xen_proc_entry(const char *name)
-+{
-+ remove_proc_entry(name, xen_base);
-+}
-+
-+EXPORT_SYMBOL_GPL(remove_xen_proc_entry);
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/core/xen_sysfs.c ubuntu-gutsy-xen/drivers/xen/core/xen_sysfs.c
---- ubuntu-gutsy/drivers/xen/core/xen_sysfs.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/core/xen_sysfs.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,378 @@
-+/*
-+ * copyright (c) 2006 IBM Corporation
-+ * Authored by: Mike D. Day <ncmike@us.ibm.com>
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ */
-+
-+#include <linux/err.h>
-+#include <linux/kernel.h>
-+#include <linux/module.h>
-+#include <linux/init.h>
-+#include <asm/hypervisor.h>
-+#include <xen/features.h>
-+#include <xen/hypervisor_sysfs.h>
-+#include <xen/xenbus.h>
-+
-+MODULE_LICENSE("GPL");
-+MODULE_AUTHOR("Mike D. Day <ncmike@us.ibm.com>");
-+
-+static ssize_t type_show(struct hyp_sysfs_attr *attr, char *buffer)
-+{
-+ return sprintf(buffer, "xen\n");
-+}
-+
-+HYPERVISOR_ATTR_RO(type);
-+
-+static int __init xen_sysfs_type_init(void)
-+{
-+ return sysfs_create_file(&hypervisor_subsys.kobj, &type_attr.attr);
-+}
-+
-+static void xen_sysfs_type_destroy(void)
-+{
-+ sysfs_remove_file(&hypervisor_subsys.kobj, &type_attr.attr);
-+}
-+
-+/* xen version attributes */
-+static ssize_t major_show(struct hyp_sysfs_attr *attr, char *buffer)
-+{
-+ int version = HYPERVISOR_xen_version(XENVER_version, NULL);
-+ if (version)
-+ return sprintf(buffer, "%d\n", version >> 16);
-+ return -ENODEV;
-+}
-+
-+HYPERVISOR_ATTR_RO(major);
-+
-+static ssize_t minor_show(struct hyp_sysfs_attr *attr, char *buffer)
-+{
-+ int version = HYPERVISOR_xen_version(XENVER_version, NULL);
-+ if (version)
-+ return sprintf(buffer, "%d\n", version & 0xff);
-+ return -ENODEV;
-+}
-+
-+HYPERVISOR_ATTR_RO(minor);
-+
-+static ssize_t extra_show(struct hyp_sysfs_attr *attr, char *buffer)
-+{
-+ int ret = -ENOMEM;
-+ char *extra;
-+
-+ extra = kmalloc(XEN_EXTRAVERSION_LEN, GFP_KERNEL);
-+ if (extra) {
-+ ret = HYPERVISOR_xen_version(XENVER_extraversion, extra);
-+ if (!ret)
-+ ret = sprintf(buffer, "%s\n", extra);
-+ kfree(extra);
-+ }
-+
-+ return ret;
-+}
-+
-+HYPERVISOR_ATTR_RO(extra);
-+
-+static struct attribute *version_attrs[] = {
-+ &major_attr.attr,
-+ &minor_attr.attr,
-+ &extra_attr.attr,
-+ NULL
-+};
-+
-+static struct attribute_group version_group = {
-+ .name = "version",
-+ .attrs = version_attrs,
-+};
-+
-+static int __init xen_sysfs_version_init(void)
-+{
-+ return sysfs_create_group(&hypervisor_subsys.kobj,
-+ &version_group);
-+}
-+
-+static void xen_sysfs_version_destroy(void)
-+{
-+ sysfs_remove_group(&hypervisor_subsys.kobj, &version_group);
-+}
-+
-+/* UUID */
-+
-+static ssize_t uuid_show(struct hyp_sysfs_attr *attr, char *buffer)
-+{
-+ char *vm, *val;
-+ int ret;
-+
-+ vm = xenbus_read(XBT_NIL, "vm", "", NULL);
-+ if (IS_ERR(vm))
-+ return PTR_ERR(vm);
-+ val = xenbus_read(XBT_NIL, vm, "uuid", NULL);
-+ kfree(vm);
-+ if (IS_ERR(val))
-+ return PTR_ERR(val);
-+ ret = sprintf(buffer, "%s\n", val);
-+ kfree(val);
-+ return ret;
-+}
-+
-+HYPERVISOR_ATTR_RO(uuid);
-+
-+static int __init xen_sysfs_uuid_init(void)
-+{
-+ return sysfs_create_file(&hypervisor_subsys.kobj, &uuid_attr.attr);
-+}
-+
-+static void xen_sysfs_uuid_destroy(void)
-+{
-+ sysfs_remove_file(&hypervisor_subsys.kobj, &uuid_attr.attr);
-+}
-+
-+/* xen compilation attributes */
-+
-+static ssize_t compiler_show(struct hyp_sysfs_attr *attr, char *buffer)
-+{
-+ int ret = -ENOMEM;
-+ struct xen_compile_info *info;
-+
-+ info = kmalloc(sizeof(struct xen_compile_info), GFP_KERNEL);
-+ if (info) {
-+ ret = HYPERVISOR_xen_version(XENVER_compile_info, info);
-+ if (!ret)
-+ ret = sprintf(buffer, "%s\n", info->compiler);
-+ kfree(info);
-+ }
-+
-+ return ret;
-+}
-+
-+HYPERVISOR_ATTR_RO(compiler);
-+
-+static ssize_t compiled_by_show(struct hyp_sysfs_attr *attr, char *buffer)
-+{
-+ int ret = -ENOMEM;
-+ struct xen_compile_info *info;
-+
-+ info = kmalloc(sizeof(struct xen_compile_info), GFP_KERNEL);
-+ if (info) {
-+ ret = HYPERVISOR_xen_version(XENVER_compile_info, info);
-+ if (!ret)
-+ ret = sprintf(buffer, "%s\n", info->compile_by);
-+ kfree(info);
-+ }
-+
-+ return ret;
-+}
-+
-+HYPERVISOR_ATTR_RO(compiled_by);
-+
-+static ssize_t compile_date_show(struct hyp_sysfs_attr *attr, char *buffer)
-+{
-+ int ret = -ENOMEM;
-+ struct xen_compile_info *info;
-+
-+ info = kmalloc(sizeof(struct xen_compile_info), GFP_KERNEL);
-+ if (info) {
-+ ret = HYPERVISOR_xen_version(XENVER_compile_info, info);
-+ if (!ret)
-+ ret = sprintf(buffer, "%s\n", info->compile_date);
-+ kfree(info);
-+ }
-+
-+ return ret;
-+}
-+
-+HYPERVISOR_ATTR_RO(compile_date);
-+
-+static struct attribute *xen_compile_attrs[] = {
-+ &compiler_attr.attr,
-+ &compiled_by_attr.attr,
-+ &compile_date_attr.attr,
-+ NULL
-+};
-+
-+static struct attribute_group xen_compilation_group = {
-+ .name = "compilation",
-+ .attrs = xen_compile_attrs,
-+};
-+
-+int __init static xen_compilation_init(void)
-+{
-+ return sysfs_create_group(&hypervisor_subsys.kobj,
-+ &xen_compilation_group);
-+}
-+
-+static void xen_compilation_destroy(void)
-+{
-+ sysfs_remove_group(&hypervisor_subsys.kobj,
-+ &xen_compilation_group);
-+}
-+
-+/* xen properties info */
-+
-+static ssize_t capabilities_show(struct hyp_sysfs_attr *attr, char *buffer)
-+{
-+ int ret = -ENOMEM;
-+ char *caps;
-+
-+ caps = kmalloc(XEN_CAPABILITIES_INFO_LEN, GFP_KERNEL);
-+ if (caps) {
-+ ret = HYPERVISOR_xen_version(XENVER_capabilities, caps);
-+ if (!ret)
-+ ret = sprintf(buffer, "%s\n", caps);
-+ kfree(caps);
-+ }
-+
-+ return ret;
-+}
-+
-+HYPERVISOR_ATTR_RO(capabilities);
-+
-+static ssize_t changeset_show(struct hyp_sysfs_attr *attr, char *buffer)
-+{
-+ int ret = -ENOMEM;
-+ char *cset;
-+
-+ cset = kmalloc(XEN_CHANGESET_INFO_LEN, GFP_KERNEL);
-+ if (cset) {
-+ ret = HYPERVISOR_xen_version(XENVER_changeset, cset);
-+ if (!ret)
-+ ret = sprintf(buffer, "%s\n", cset);
-+ kfree(cset);
-+ }
-+
-+ return ret;
-+}
-+
-+HYPERVISOR_ATTR_RO(changeset);
-+
-+static ssize_t virtual_start_show(struct hyp_sysfs_attr *attr, char *buffer)
-+{
-+ int ret = -ENOMEM;
-+ struct xen_platform_parameters *parms;
-+
-+ parms = kmalloc(sizeof(struct xen_platform_parameters), GFP_KERNEL);
-+ if (parms) {
-+ ret = HYPERVISOR_xen_version(XENVER_platform_parameters,
-+ parms);
-+ if (!ret)
-+ ret = sprintf(buffer, "%lx\n", parms->virt_start);
-+ kfree(parms);
-+ }
-+
-+ return ret;
-+}
-+
-+HYPERVISOR_ATTR_RO(virtual_start);
-+
-+static ssize_t pagesize_show(struct hyp_sysfs_attr *attr, char *buffer)
-+{
-+ int ret;
-+
-+ ret = HYPERVISOR_xen_version(XENVER_pagesize, NULL);
-+ if (ret > 0)
-+ ret = sprintf(buffer, "%x\n", ret);
-+
-+ return ret;
-+}
-+
-+HYPERVISOR_ATTR_RO(pagesize);
-+
-+/* eventually there will be several more features to export */
-+static ssize_t xen_feature_show(int index, char *buffer)
-+{
-+ int ret = -ENOMEM;
-+ struct xen_feature_info *info;
-+
-+ info = kmalloc(sizeof(struct xen_feature_info), GFP_KERNEL);
-+ if (info) {
-+ info->submap_idx = index;
-+ ret = HYPERVISOR_xen_version(XENVER_get_features, info);
-+ if (!ret)
-+ ret = sprintf(buffer, "%d\n", info->submap);
-+ kfree(info);
-+ }
-+
-+ return ret;
-+}
-+
-+static ssize_t writable_pt_show(struct hyp_sysfs_attr *attr, char *buffer)
-+{
-+ return xen_feature_show(XENFEAT_writable_page_tables, buffer);
-+}
-+
-+HYPERVISOR_ATTR_RO(writable_pt);
-+
-+static struct attribute *xen_properties_attrs[] = {
-+ &capabilities_attr.attr,
-+ &changeset_attr.attr,
-+ &virtual_start_attr.attr,
-+ &pagesize_attr.attr,
-+ &writable_pt_attr.attr,
-+ NULL
-+};
-+
-+static struct attribute_group xen_properties_group = {
-+ .name = "properties",
-+ .attrs = xen_properties_attrs,
-+};
-+
-+static int __init xen_properties_init(void)
-+{
-+ return sysfs_create_group(&hypervisor_subsys.kobj,
-+ &xen_properties_group);
-+}
-+
-+static void xen_properties_destroy(void)
-+{
-+ sysfs_remove_group(&hypervisor_subsys.kobj,
-+ &xen_properties_group);
-+}
-+
-+static int __init hyper_sysfs_init(void)
-+{
-+ int ret;
-+
-+ if (!is_running_on_xen())
-+ return -ENODEV;
-+
-+ ret = xen_sysfs_type_init();
-+ if (ret)
-+ goto out;
-+ ret = xen_sysfs_version_init();
-+ if (ret)
-+ goto version_out;
-+ ret = xen_compilation_init();
-+ if (ret)
-+ goto comp_out;
-+ ret = xen_sysfs_uuid_init();
-+ if (ret)
-+ goto uuid_out;
-+ ret = xen_properties_init();
-+ if (!ret)
-+ goto out;
-+
-+ xen_sysfs_uuid_destroy();
-+uuid_out:
-+ xen_compilation_destroy();
-+comp_out:
-+ xen_sysfs_version_destroy();
-+version_out:
-+ xen_sysfs_type_destroy();
-+out:
-+ return ret;
-+}
-+
-+static void hyper_sysfs_exit(void)
-+{
-+ xen_properties_destroy();
-+ xen_compilation_destroy();
-+ xen_sysfs_uuid_destroy();
-+ xen_sysfs_version_destroy();
-+ xen_sysfs_type_destroy();
-+
-+}
-+
-+module_init(hyper_sysfs_init);
-+module_exit(hyper_sysfs_exit);
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/evtchn/evtchn.c ubuntu-gutsy-xen/drivers/xen/evtchn/evtchn.c
---- ubuntu-gutsy/drivers/xen/evtchn/evtchn.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/evtchn/evtchn.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,469 @@
-+/******************************************************************************
-+ * evtchn.c
-+ *
-+ * Driver for receiving and demuxing event-channel signals.
-+ *
-+ * Copyright (c) 2004-2005, K A Fraser
-+ * Multi-process extensions Copyright (c) 2004, Steven Smith
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#include <linux/module.h>
-+#include <linux/kernel.h>
-+#include <linux/sched.h>
-+#include <linux/slab.h>
-+#include <linux/string.h>
-+#include <linux/errno.h>
-+#include <linux/fs.h>
-+#include <linux/errno.h>
-+#include <linux/miscdevice.h>
-+#include <linux/major.h>
-+#include <linux/proc_fs.h>
-+#include <linux/stat.h>
-+#include <linux/poll.h>
-+#include <linux/irq.h>
-+#include <linux/init.h>
-+#include <linux/gfp.h>
-+#include <linux/mutex.h>
-+#include <xen/evtchn.h>
-+#include <xen/public/evtchn.h>
-+
-+struct per_user_data {
-+ /* Notification ring, accessed via /dev/xen/evtchn. */
-+#define EVTCHN_RING_SIZE (PAGE_SIZE / sizeof(evtchn_port_t))
-+#define EVTCHN_RING_MASK(_i) ((_i)&(EVTCHN_RING_SIZE-1))
-+ evtchn_port_t *ring;
-+ unsigned int ring_cons, ring_prod, ring_overflow;
-+ struct mutex ring_cons_mutex; /* protect against concurrent readers */
-+
-+ /* Processes wait on this queue when ring is empty. */
-+ wait_queue_head_t evtchn_wait;
-+ struct fasync_struct *evtchn_async_queue;
-+};
-+
-+/* Who's bound to each port? */
-+static struct per_user_data *port_user[NR_EVENT_CHANNELS];
-+static spinlock_t port_user_lock;
-+
-+void evtchn_device_upcall(int port)
-+{
-+ struct per_user_data *u;
-+
-+ spin_lock(&port_user_lock);
-+
-+ mask_evtchn(port);
-+ clear_evtchn(port);
-+
-+ if ((u = port_user[port]) != NULL) {
-+ if ((u->ring_prod - u->ring_cons) < EVTCHN_RING_SIZE) {
-+ u->ring[EVTCHN_RING_MASK(u->ring_prod)] = port;
-+ if (u->ring_cons == u->ring_prod++) {
-+ wake_up_interruptible(&u->evtchn_wait);
-+ kill_fasync(&u->evtchn_async_queue,
-+ SIGIO, POLL_IN);
-+ }
-+ } else {
-+ u->ring_overflow = 1;
-+ }
-+ }
-+
-+ spin_unlock(&port_user_lock);
-+}
-+
-+static ssize_t evtchn_read(struct file *file, char __user *buf,
-+ size_t count, loff_t *ppos)
-+{
-+ int rc;
-+ unsigned int c, p, bytes1 = 0, bytes2 = 0;
-+ struct per_user_data *u = file->private_data;
-+
-+ /* Whole number of ports. */
-+ count &= ~(sizeof(evtchn_port_t)-1);
-+
-+ if (count == 0)
-+ return 0;
-+
-+ if (count > PAGE_SIZE)
-+ count = PAGE_SIZE;
-+
-+ for (;;) {
-+ mutex_lock(&u->ring_cons_mutex);
-+
-+ rc = -EFBIG;
-+ if (u->ring_overflow)
-+ goto unlock_out;
-+
-+ if ((c = u->ring_cons) != (p = u->ring_prod))
-+ break;
-+
-+ mutex_unlock(&u->ring_cons_mutex);
-+
-+ if (file->f_flags & O_NONBLOCK)
-+ return -EAGAIN;
-+
-+ rc = wait_event_interruptible(
-+ u->evtchn_wait, u->ring_cons != u->ring_prod);
-+ if (rc)
-+ return rc;
-+ }
-+
-+ /* Byte lengths of two chunks. Chunk split (if any) is at ring wrap. */
-+ if (((c ^ p) & EVTCHN_RING_SIZE) != 0) {
-+ bytes1 = (EVTCHN_RING_SIZE - EVTCHN_RING_MASK(c)) *
-+ sizeof(evtchn_port_t);
-+ bytes2 = EVTCHN_RING_MASK(p) * sizeof(evtchn_port_t);
-+ } else {
-+ bytes1 = (p - c) * sizeof(evtchn_port_t);
-+ bytes2 = 0;
-+ }
-+
-+ /* Truncate chunks according to caller's maximum byte count. */
-+ if (bytes1 > count) {
-+ bytes1 = count;
-+ bytes2 = 0;
-+ } else if ((bytes1 + bytes2) > count) {
-+ bytes2 = count - bytes1;
-+ }
-+
-+ rc = -EFAULT;
-+ if (copy_to_user(buf, &u->ring[EVTCHN_RING_MASK(c)], bytes1) ||
-+ ((bytes2 != 0) &&
-+ copy_to_user(&buf[bytes1], &u->ring[0], bytes2)))
-+ goto unlock_out;
-+
-+ u->ring_cons += (bytes1 + bytes2) / sizeof(evtchn_port_t);
-+ rc = bytes1 + bytes2;
-+
-+ unlock_out:
-+ mutex_unlock(&u->ring_cons_mutex);
-+ return rc;
-+}
-+
-+static ssize_t evtchn_write(struct file *file, const char __user *buf,
-+ size_t count, loff_t *ppos)
-+{
-+ int rc, i;
-+ evtchn_port_t *kbuf = (evtchn_port_t *)__get_free_page(GFP_KERNEL);
-+ struct per_user_data *u = file->private_data;
-+
-+ if (kbuf == NULL)
-+ return -ENOMEM;
-+
-+ /* Whole number of ports. */
-+ count &= ~(sizeof(evtchn_port_t)-1);
-+
-+ rc = 0;
-+ if (count == 0)
-+ goto out;
-+
-+ if (count > PAGE_SIZE)
-+ count = PAGE_SIZE;
-+
-+ rc = -EFAULT;
-+ if (copy_from_user(kbuf, buf, count) != 0)
-+ goto out;
-+
-+ spin_lock_irq(&port_user_lock);
-+ for (i = 0; i < (count/sizeof(evtchn_port_t)); i++)
-+ if ((kbuf[i] < NR_EVENT_CHANNELS) && (port_user[kbuf[i]] == u))
-+ unmask_evtchn(kbuf[i]);
-+ spin_unlock_irq(&port_user_lock);
-+
-+ rc = count;
-+
-+ out:
-+ free_page((unsigned long)kbuf);
-+ return rc;
-+}
-+
-+static void evtchn_bind_to_user(struct per_user_data *u, int port)
-+{
-+ spin_lock_irq(&port_user_lock);
-+ BUG_ON(port_user[port] != NULL);
-+ port_user[port] = u;
-+ unmask_evtchn(port);
-+ spin_unlock_irq(&port_user_lock);
-+}
-+
-+static int evtchn_ioctl(struct inode *inode, struct file *file,
-+ unsigned int cmd, unsigned long arg)
-+{
-+ int rc;
-+ struct per_user_data *u = file->private_data;
-+ void __user *uarg = (void __user *) arg;
-+
-+ switch (cmd) {
-+ case IOCTL_EVTCHN_BIND_VIRQ: {
-+ struct ioctl_evtchn_bind_virq bind;
-+ struct evtchn_bind_virq bind_virq;
-+
-+ rc = -EFAULT;
-+ if (copy_from_user(&bind, uarg, sizeof(bind)))
-+ break;
-+
-+ bind_virq.virq = bind.virq;
-+ bind_virq.vcpu = 0;
-+ rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
-+ &bind_virq);
-+ if (rc != 0)
-+ break;
-+
-+ rc = bind_virq.port;
-+ evtchn_bind_to_user(u, rc);
-+ break;
-+ }
-+
-+ case IOCTL_EVTCHN_BIND_INTERDOMAIN: {
-+ struct ioctl_evtchn_bind_interdomain bind;
-+ struct evtchn_bind_interdomain bind_interdomain;
-+
-+ rc = -EFAULT;
-+ if (copy_from_user(&bind, uarg, sizeof(bind)))
-+ break;
-+
-+ bind_interdomain.remote_dom = bind.remote_domain;
-+ bind_interdomain.remote_port = bind.remote_port;
-+ rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
-+ &bind_interdomain);
-+ if (rc != 0)
-+ break;
-+
-+ rc = bind_interdomain.local_port;
-+ evtchn_bind_to_user(u, rc);
-+ break;
-+ }
-+
-+ case IOCTL_EVTCHN_BIND_UNBOUND_PORT: {
-+ struct ioctl_evtchn_bind_unbound_port bind;
-+ struct evtchn_alloc_unbound alloc_unbound;
-+
-+ rc = -EFAULT;
-+ if (copy_from_user(&bind, uarg, sizeof(bind)))
-+ break;
-+
-+ alloc_unbound.dom = DOMID_SELF;
-+ alloc_unbound.remote_dom = bind.remote_domain;
-+ rc = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
-+ &alloc_unbound);
-+ if (rc != 0)
-+ break;
-+
-+ rc = alloc_unbound.port;
-+ evtchn_bind_to_user(u, rc);
-+ break;
-+ }
-+
-+ case IOCTL_EVTCHN_UNBIND: {
-+ struct ioctl_evtchn_unbind unbind;
-+ struct evtchn_close close;
-+ int ret;
-+
-+ rc = -EFAULT;
-+ if (copy_from_user(&unbind, uarg, sizeof(unbind)))
-+ break;
-+
-+ rc = -EINVAL;
-+ if (unbind.port >= NR_EVENT_CHANNELS)
-+ break;
-+
-+ spin_lock_irq(&port_user_lock);
-+
-+ rc = -ENOTCONN;
-+ if (port_user[unbind.port] != u) {
-+ spin_unlock_irq(&port_user_lock);
-+ break;
-+ }
-+
-+ port_user[unbind.port] = NULL;
-+ mask_evtchn(unbind.port);
-+
-+ spin_unlock_irq(&port_user_lock);
-+
-+ close.port = unbind.port;
-+ ret = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
-+ BUG_ON(ret);
-+
-+ rc = 0;
-+ break;
-+ }
-+
-+ case IOCTL_EVTCHN_NOTIFY: {
-+ struct ioctl_evtchn_notify notify;
-+
-+ rc = -EFAULT;
-+ if (copy_from_user(&notify, uarg, sizeof(notify)))
-+ break;
-+
-+ if (notify.port >= NR_EVENT_CHANNELS) {
-+ rc = -EINVAL;
-+ } else if (port_user[notify.port] != u) {
-+ rc = -ENOTCONN;
-+ } else {
-+ notify_remote_via_evtchn(notify.port);
-+ rc = 0;
-+ }
-+ break;
-+ }
-+
-+ case IOCTL_EVTCHN_RESET: {
-+ /* Initialise the ring to empty. Clear errors. */
-+ mutex_lock(&u->ring_cons_mutex);
-+ spin_lock_irq(&port_user_lock);
-+ u->ring_cons = u->ring_prod = u->ring_overflow = 0;
-+ spin_unlock_irq(&port_user_lock);
-+ mutex_unlock(&u->ring_cons_mutex);
-+ rc = 0;
-+ break;
-+ }
-+
-+ default:
-+ rc = -ENOSYS;
-+ break;
-+ }
-+
-+ return rc;
-+}
-+
-+static unsigned int evtchn_poll(struct file *file, poll_table *wait)
-+{
-+ unsigned int mask = POLLOUT | POLLWRNORM;
-+ struct per_user_data *u = file->private_data;
-+
-+ poll_wait(file, &u->evtchn_wait, wait);
-+ if (u->ring_cons != u->ring_prod)
-+ mask |= POLLIN | POLLRDNORM;
-+ if (u->ring_overflow)
-+ mask = POLLERR;
-+ return mask;
-+}
-+
-+static int evtchn_fasync(int fd, struct file *filp, int on)
-+{
-+ struct per_user_data *u = filp->private_data;
-+ return fasync_helper(fd, filp, on, &u->evtchn_async_queue);
-+}
-+
-+static int evtchn_open(struct inode *inode, struct file *filp)
-+{
-+ struct per_user_data *u;
-+
-+ if ((u = kmalloc(sizeof(*u), GFP_KERNEL)) == NULL)
-+ return -ENOMEM;
-+
-+ memset(u, 0, sizeof(*u));
-+ init_waitqueue_head(&u->evtchn_wait);
-+
-+ u->ring = (evtchn_port_t *)__get_free_page(GFP_KERNEL);
-+ if (u->ring == NULL) {
-+ kfree(u);
-+ return -ENOMEM;
-+ }
-+
-+ mutex_init(&u->ring_cons_mutex);
-+
-+ filp->private_data = u;
-+
-+ return 0;
-+}
-+
-+static int evtchn_release(struct inode *inode, struct file *filp)
-+{
-+ int i;
-+ struct per_user_data *u = filp->private_data;
-+ struct evtchn_close close;
-+
-+ spin_lock_irq(&port_user_lock);
-+
-+ free_page((unsigned long)u->ring);
-+
-+ for (i = 0; i < NR_EVENT_CHANNELS; i++) {
-+ int ret;
-+ if (port_user[i] != u)
-+ continue;
-+
-+ port_user[i] = NULL;
-+ mask_evtchn(i);
-+
-+ close.port = i;
-+ ret = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
-+ BUG_ON(ret);
-+ }
-+
-+ spin_unlock_irq(&port_user_lock);
-+
-+ kfree(u);
-+
-+ return 0;
-+}
-+
-+static const struct file_operations evtchn_fops = {
-+ .owner = THIS_MODULE,
-+ .read = evtchn_read,
-+ .write = evtchn_write,
-+ .ioctl = evtchn_ioctl,
-+ .poll = evtchn_poll,
-+ .fasync = evtchn_fasync,
-+ .open = evtchn_open,
-+ .release = evtchn_release,
-+};
-+
-+static struct miscdevice evtchn_miscdev = {
-+ .minor = MISC_DYNAMIC_MINOR,
-+ .name = "evtchn",
-+ .fops = &evtchn_fops,
-+};
-+
-+static int __init evtchn_init(void)
-+{
-+ int err;
-+
-+ if (!is_running_on_xen())
-+ return -ENODEV;
-+
-+ spin_lock_init(&port_user_lock);
-+ memset(port_user, 0, sizeof(port_user));
-+
-+ /* Create '/dev/misc/evtchn'. */
-+ err = misc_register(&evtchn_miscdev);
-+ if (err != 0) {
-+ printk(KERN_ALERT "Could not register /dev/misc/evtchn\n");
-+ return err;
-+ }
-+
-+ printk("Event-channel device installed.\n");
-+
-+ return 0;
-+}
-+
-+static void evtchn_cleanup(void)
-+{
-+ misc_deregister(&evtchn_miscdev);
-+}
-+
-+module_init(evtchn_init);
-+module_exit(evtchn_cleanup);
-+
-+MODULE_LICENSE("Dual BSD/GPL");
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/evtchn/Makefile ubuntu-gutsy-xen/drivers/xen/evtchn/Makefile
---- ubuntu-gutsy/drivers/xen/evtchn/Makefile 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/evtchn/Makefile 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,2 @@
-+
-+obj-y := evtchn.o
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/fbfront/Makefile ubuntu-gutsy-xen/drivers/xen/fbfront/Makefile
---- ubuntu-gutsy/drivers/xen/fbfront/Makefile 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/fbfront/Makefile 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,2 @@
-+obj-$(CONFIG_XEN_FRAMEBUFFER) := xenfb.o
-+obj-$(CONFIG_XEN_KEYBOARD) += xenkbd.o
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/fbfront/xenfb.c ubuntu-gutsy-xen/drivers/xen/fbfront/xenfb.c
---- ubuntu-gutsy/drivers/xen/fbfront/xenfb.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/fbfront/xenfb.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,752 @@
-+/*
-+ * linux/drivers/video/xenfb.c -- Xen para-virtual frame buffer device
-+ *
-+ * Copyright (C) 2005-2006 Anthony Liguori <aliguori@us.ibm.com>
-+ * Copyright (C) 2006 Red Hat, Inc., Markus Armbruster <armbru@redhat.com>
-+ *
-+ * Based on linux/drivers/video/q40fb.c
-+ *
-+ * This file is subject to the terms and conditions of the GNU General Public
-+ * License. See the file COPYING in the main directory of this archive for
-+ * more details.
-+ */
-+
-+/*
-+ * TODO:
-+ *
-+ * Switch to grant tables when they become capable of dealing with the
-+ * frame buffer.
-+ */
-+
-+#include <linux/kernel.h>
-+#include <linux/errno.h>
-+#include <linux/fb.h>
-+#include <linux/module.h>
-+#include <linux/vmalloc.h>
-+#include <linux/mm.h>
-+#include <linux/mutex.h>
-+#include <linux/freezer.h>
-+#include <asm/hypervisor.h>
-+#include <xen/evtchn.h>
-+#include <xen/interface/io/fbif.h>
-+#include <xen/interface/io/protocols.h>
-+#include <xen/xenbus.h>
-+#include <linux/kthread.h>
-+
-+struct xenfb_mapping
-+{
-+ struct list_head link;
-+ struct vm_area_struct *vma;
-+ atomic_t map_refs;
-+ int faults;
-+ struct xenfb_info *info;
-+};
-+
-+struct xenfb_info
-+{
-+ struct task_struct *kthread;
-+ wait_queue_head_t wq;
-+
-+ unsigned char *fb;
-+ struct fb_info *fb_info;
-+ struct timer_list refresh;
-+ int dirty;
-+ int x1, y1, x2, y2; /* dirty rectangle,
-+ protected by dirty_lock */
-+ spinlock_t dirty_lock;
-+ struct mutex mm_lock;
-+ int nr_pages;
-+ struct page **pages;
-+ struct list_head mappings; /* protected by mm_lock */
-+
-+ int irq;
-+ struct xenfb_page *page;
-+ unsigned long *mfns;
-+ int update_wanted; /* XENFB_TYPE_UPDATE wanted */
-+
-+ struct xenbus_device *xbdev;
-+};
-+
-+/*
-+ * How the locks work together
-+ *
-+ * There are two locks: spinlock dirty_lock protecting the dirty
-+ * rectangle, and mutex mm_lock protecting mappings.
-+ *
-+ * The problem is that dirty rectangle and mappings aren't
-+ * independent: the dirty rectangle must cover all faulted pages in
-+ * mappings. We need to prove that our locking maintains this
-+ * invariant.
-+ *
-+ * There are several kinds of critical regions:
-+ *
-+ * 1. Holding only dirty_lock: xenfb_refresh(). May run in
-+ * interrupts. Extends the dirty rectangle. Trivially preserves
-+ * invariant.
-+ *
-+ * 2. Holding only mm_lock: xenfb_mmap() and xenfb_vm_close(). Touch
-+ * only mappings. The former creates unfaulted pages. Preserves
-+ * invariant. The latter removes pages. Preserves invariant.
-+ *
-+ * 3. Holding both locks: xenfb_vm_nopage(). Extends the dirty
-+ * rectangle and updates mappings consistently. Preserves
-+ * invariant.
-+ *
-+ * 4. The ugliest one: xenfb_update_screen(). Clear the dirty
-+ * rectangle and update mappings consistently.
-+ *
-+ * We can't simply hold both locks, because zap_page_range() cannot
-+ * be called with a spinlock held.
-+ *
-+ * Therefore, we first clear the dirty rectangle with both locks
-+ * held. Then we unlock dirty_lock and update the mappings.
-+ * Critical regions that hold only dirty_lock may interfere with
-+ * that. This can only be region 1: xenfb_refresh(). But that
-+ * just extends the dirty rectangle, which can't harm the
-+ * invariant.
-+ *
-+ * But FIXME: the invariant is too weak. It misses that the fault
-+ * record in mappings must be consistent with the mapping of pages in
-+ * the associated address space! do_no_page() updates the PTE after
-+ * xenfb_vm_nopage() returns, i.e. outside the critical region. This
-+ * allows the following race:
-+ *
-+ * X writes to some address in the Xen frame buffer
-+ * Fault - call do_no_page()
-+ * call xenfb_vm_nopage()
-+ * grab mm_lock
-+ * map->faults++;
-+ * release mm_lock
-+ * return back to do_no_page()
-+ * (preempted, or SMP)
-+ * Xen worker thread runs.
-+ * grab mm_lock
-+ * look at mappings
-+ * find this mapping, zaps its pages (but page not in pte yet)
-+ * clear map->faults
-+ * releases mm_lock
-+ * (back to X process)
-+ * put page in X's pte
-+ *
-+ * Oh well, we wont be updating the writes to this page anytime soon.
-+ */
-+
-+static int xenfb_fps = 20;
-+static unsigned long xenfb_mem_len = XENFB_WIDTH * XENFB_HEIGHT * XENFB_DEPTH / 8;
-+
-+static int xenfb_remove(struct xenbus_device *);
-+static void xenfb_init_shared_page(struct xenfb_info *);
-+static int xenfb_connect_backend(struct xenbus_device *, struct xenfb_info *);
-+static void xenfb_disconnect_backend(struct xenfb_info *);
-+
-+static void xenfb_do_update(struct xenfb_info *info,
-+ int x, int y, int w, int h)
-+{
-+ union xenfb_out_event event;
-+ __u32 prod;
-+
-+ event.type = XENFB_TYPE_UPDATE;
-+ event.update.x = x;
-+ event.update.y = y;
-+ event.update.width = w;
-+ event.update.height = h;
-+
-+ prod = info->page->out_prod;
-+ /* caller ensures !xenfb_queue_full() */
-+ mb(); /* ensure ring space available */
-+ XENFB_OUT_RING_REF(info->page, prod) = event;
-+ wmb(); /* ensure ring contents visible */
-+ info->page->out_prod = prod + 1;
-+
-+ notify_remote_via_irq(info->irq);
-+}
-+
-+static int xenfb_queue_full(struct xenfb_info *info)
-+{
-+ __u32 cons, prod;
-+
-+ prod = info->page->out_prod;
-+ cons = info->page->out_cons;
-+ return prod - cons == XENFB_OUT_RING_LEN;
-+}
-+
-+static void xenfb_update_screen(struct xenfb_info *info)
-+{
-+ unsigned long flags;
-+ int y1, y2, x1, x2;
-+ struct xenfb_mapping *map;
-+
-+ if (!info->update_wanted)
-+ return;
-+ if (xenfb_queue_full(info))
-+ return;
-+
-+ mutex_lock(&info->mm_lock);
-+
-+ spin_lock_irqsave(&info->dirty_lock, flags);
-+ y1 = info->y1;
-+ y2 = info->y2;
-+ x1 = info->x1;
-+ x2 = info->x2;
-+ info->x1 = info->y1 = INT_MAX;
-+ info->x2 = info->y2 = 0;
-+ spin_unlock_irqrestore(&info->dirty_lock, flags);
-+
-+ list_for_each_entry(map, &info->mappings, link) {
-+ if (!map->faults)
-+ continue;
-+ zap_page_range(map->vma, map->vma->vm_start,
-+ map->vma->vm_end - map->vma->vm_start, NULL);
-+ map->faults = 0;
-+ }
-+
-+ mutex_unlock(&info->mm_lock);
-+
-+ xenfb_do_update(info, x1, y1, x2 - x1, y2 - y1);
-+}
-+
-+static int xenfb_thread(void *data)
-+{
-+ struct xenfb_info *info = data;
-+
-+ while (!kthread_should_stop()) {
-+ if (info->dirty) {
-+ info->dirty = 0;
-+ xenfb_update_screen(info);
-+ }
-+ wait_event_interruptible(info->wq,
-+ kthread_should_stop() || info->dirty);
-+ try_to_freeze();
-+ }
-+ return 0;
-+}
-+
-+static int xenfb_setcolreg(unsigned regno, unsigned red, unsigned green,
-+ unsigned blue, unsigned transp,
-+ struct fb_info *info)
-+{
-+ u32 v;
-+
-+ if (regno > info->cmap.len)
-+ return 1;
-+
-+ red >>= (16 - info->var.red.length);
-+ green >>= (16 - info->var.green.length);
-+ blue >>= (16 - info->var.blue.length);
-+
-+ v = (red << info->var.red.offset) |
-+ (green << info->var.green.offset) |
-+ (blue << info->var.blue.offset);
-+
-+ /* FIXME is this sane? check against xxxfb_setcolreg()! */
-+ switch (info->var.bits_per_pixel) {
-+ case 16:
-+ case 24:
-+ case 32:
-+ ((u32 *)info->pseudo_palette)[regno] = v;
-+ break;
-+ }
-+
-+ return 0;
-+}
-+
-+static void xenfb_timer(unsigned long data)
-+{
-+ struct xenfb_info *info = (struct xenfb_info *)data;
-+ info->dirty = 1;
-+ wake_up(&info->wq);
-+}
-+
-+static void __xenfb_refresh(struct xenfb_info *info,
-+ int x1, int y1, int w, int h)
-+{
-+ int y2, x2;
-+
-+ y2 = y1 + h;
-+ x2 = x1 + w;
-+
-+ if (info->y1 > y1)
-+ info->y1 = y1;
-+ if (info->y2 < y2)
-+ info->y2 = y2;
-+ if (info->x1 > x1)
-+ info->x1 = x1;
-+ if (info->x2 < x2)
-+ info->x2 = x2;
-+
-+ if (timer_pending(&info->refresh))
-+ return;
-+
-+ mod_timer(&info->refresh, jiffies + HZ/xenfb_fps);
-+}
-+
-+static void xenfb_refresh(struct xenfb_info *info,
-+ int x1, int y1, int w, int h)
-+{
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&info->dirty_lock, flags);
-+ __xenfb_refresh(info, x1, y1, w, h);
-+ spin_unlock_irqrestore(&info->dirty_lock, flags);
-+}
-+
-+static void xenfb_fillrect(struct fb_info *p, const struct fb_fillrect *rect)
-+{
-+ struct xenfb_info *info = p->par;
-+
-+ cfb_fillrect(p, rect);
-+ xenfb_refresh(info, rect->dx, rect->dy, rect->width, rect->height);
-+}
-+
-+static void xenfb_imageblit(struct fb_info *p, const struct fb_image *image)
-+{
-+ struct xenfb_info *info = p->par;
-+
-+ cfb_imageblit(p, image);
-+ xenfb_refresh(info, image->dx, image->dy, image->width, image->height);
-+}
-+
-+static void xenfb_copyarea(struct fb_info *p, const struct fb_copyarea *area)
-+{
-+ struct xenfb_info *info = p->par;
-+
-+ cfb_copyarea(p, area);
-+ xenfb_refresh(info, area->dx, area->dy, area->width, area->height);
-+}
-+
-+static void xenfb_vm_open(struct vm_area_struct *vma)
-+{
-+ struct xenfb_mapping *map = vma->vm_private_data;
-+ atomic_inc(&map->map_refs);
-+}
-+
-+static void xenfb_vm_close(struct vm_area_struct *vma)
-+{
-+ struct xenfb_mapping *map = vma->vm_private_data;
-+ struct xenfb_info *info = map->info;
-+
-+ mutex_lock(&info->mm_lock);
-+ if (atomic_dec_and_test(&map->map_refs)) {
-+ list_del(&map->link);
-+ kfree(map);
-+ }
-+ mutex_unlock(&info->mm_lock);
-+}
-+
-+static struct page *xenfb_vm_nopage(struct vm_area_struct *vma,
-+ unsigned long vaddr, int *type)
-+{
-+ struct xenfb_mapping *map = vma->vm_private_data;
-+ struct xenfb_info *info = map->info;
-+ int pgnr = (vaddr - vma->vm_start) >> PAGE_SHIFT;
-+ unsigned long flags;
-+ struct page *page;
-+ int y1, y2;
-+
-+ if (pgnr >= info->nr_pages)
-+ return NOPAGE_SIGBUS;
-+
-+ mutex_lock(&info->mm_lock);
-+ spin_lock_irqsave(&info->dirty_lock, flags);
-+ page = info->pages[pgnr];
-+ get_page(page);
-+ map->faults++;
-+
-+ y1 = pgnr * PAGE_SIZE / info->fb_info->fix.line_length;
-+ y2 = (pgnr * PAGE_SIZE + PAGE_SIZE - 1) / info->fb_info->fix.line_length;
-+ if (y2 > info->fb_info->var.yres)
-+ y2 = info->fb_info->var.yres;
-+ __xenfb_refresh(info, 0, y1, info->fb_info->var.xres, y2 - y1);
-+ spin_unlock_irqrestore(&info->dirty_lock, flags);
-+ mutex_unlock(&info->mm_lock);
-+
-+ if (type)
-+ *type = VM_FAULT_MINOR;
-+
-+ return page;
-+}
-+
-+static struct vm_operations_struct xenfb_vm_ops = {
-+ .open = xenfb_vm_open,
-+ .close = xenfb_vm_close,
-+ .nopage = xenfb_vm_nopage,
-+};
-+
-+static int xenfb_mmap(struct fb_info *fb_info, struct vm_area_struct *vma)
-+{
-+ struct xenfb_info *info = fb_info->par;
-+ struct xenfb_mapping *map;
-+ int map_pages;
-+
-+ if (!(vma->vm_flags & VM_WRITE))
-+ return -EINVAL;
-+ if (!(vma->vm_flags & VM_SHARED))
-+ return -EINVAL;
-+ if (vma->vm_pgoff != 0)
-+ return -EINVAL;
-+
-+ map_pages = (vma->vm_end - vma->vm_start + PAGE_SIZE-1) >> PAGE_SHIFT;
-+ if (map_pages > info->nr_pages)
-+ return -EINVAL;
-+
-+ map = kzalloc(sizeof(*map), GFP_KERNEL);
-+ if (map == NULL)
-+ return -ENOMEM;
-+
-+ map->vma = vma;
-+ map->faults = 0;
-+ map->info = info;
-+ atomic_set(&map->map_refs, 1);
-+
-+ mutex_lock(&info->mm_lock);
-+ list_add(&map->link, &info->mappings);
-+ mutex_unlock(&info->mm_lock);
-+
-+ vma->vm_ops = &xenfb_vm_ops;
-+ vma->vm_flags |= (VM_DONTEXPAND | VM_RESERVED);
-+ vma->vm_private_data = map;
-+
-+ return 0;
-+}
-+
-+static struct fb_ops xenfb_fb_ops = {
-+ .owner = THIS_MODULE,
-+ .fb_setcolreg = xenfb_setcolreg,
-+ .fb_fillrect = xenfb_fillrect,
-+ .fb_copyarea = xenfb_copyarea,
-+ .fb_imageblit = xenfb_imageblit,
-+ .fb_mmap = xenfb_mmap,
-+};
-+
-+static irqreturn_t xenfb_event_handler(int rq, void *dev_id)
-+{
-+ /*
-+ * No in events recognized, simply ignore them all.
-+ * If you need to recognize some, see xenbkd's input_handler()
-+ * for how to do that.
-+ */
-+ struct xenfb_info *info = dev_id;
-+ struct xenfb_page *page = info->page;
-+
-+ if (page->in_cons != page->in_prod) {
-+ info->page->in_cons = info->page->in_prod;
-+ notify_remote_via_irq(info->irq);
-+ }
-+ return IRQ_HANDLED;
-+}
-+
-+static unsigned long vmalloc_to_mfn(void *address)
-+{
-+ return pfn_to_mfn(vmalloc_to_pfn(address));
-+}
-+
-+static int __devinit xenfb_probe(struct xenbus_device *dev,
-+ const struct xenbus_device_id *id)
-+{
-+ struct xenfb_info *info;
-+ struct fb_info *fb_info;
-+ int ret;
-+
-+ info = kzalloc(sizeof(*info), GFP_KERNEL);
-+ if (info == NULL) {
-+ xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure");
-+ return -ENOMEM;
-+ }
-+ dev->dev.driver_data = info;
-+ info->xbdev = dev;
-+ info->irq = -1;
-+ info->x1 = info->y1 = INT_MAX;
-+ spin_lock_init(&info->dirty_lock);
-+ mutex_init(&info->mm_lock);
-+ init_waitqueue_head(&info->wq);
-+ init_timer(&info->refresh);
-+ info->refresh.function = xenfb_timer;
-+ info->refresh.data = (unsigned long)info;
-+ INIT_LIST_HEAD(&info->mappings);
-+
-+ info->fb = vmalloc(xenfb_mem_len);
-+ if (info->fb == NULL)
-+ goto error_nomem;
-+ memset(info->fb, 0, xenfb_mem_len);
-+
-+ info->nr_pages = (xenfb_mem_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
-+
-+ info->pages = kmalloc(sizeof(struct page *) * info->nr_pages,
-+ GFP_KERNEL);
-+ if (info->pages == NULL)
-+ goto error_nomem;
-+
-+ info->mfns = vmalloc(sizeof(unsigned long) * info->nr_pages);
-+ if (!info->mfns)
-+ goto error_nomem;
-+
-+ /* set up shared page */
-+ info->page = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
-+ if (!info->page)
-+ goto error_nomem;
-+
-+ xenfb_init_shared_page(info);
-+
-+ fb_info = framebuffer_alloc(sizeof(u32) * 256, NULL);
-+ /* see fishy hackery below */
-+ if (fb_info == NULL)
-+ goto error_nomem;
-+
-+ /* FIXME fishy hackery */
-+ fb_info->pseudo_palette = fb_info->par;
-+ fb_info->par = info;
-+ /* /FIXME */
-+ fb_info->screen_base = info->fb;
-+
-+ fb_info->fbops = &xenfb_fb_ops;
-+ fb_info->var.xres_virtual = fb_info->var.xres = info->page->width;
-+ fb_info->var.yres_virtual = fb_info->var.yres = info->page->height;
-+ fb_info->var.bits_per_pixel = info->page->depth;
-+
-+ fb_info->var.red = (struct fb_bitfield){16, 8, 0};
-+ fb_info->var.green = (struct fb_bitfield){8, 8, 0};
-+ fb_info->var.blue = (struct fb_bitfield){0, 8, 0};
-+
-+ fb_info->var.activate = FB_ACTIVATE_NOW;
-+ fb_info->var.height = -1;
-+ fb_info->var.width = -1;
-+ fb_info->var.vmode = FB_VMODE_NONINTERLACED;
-+
-+ fb_info->fix.visual = FB_VISUAL_TRUECOLOR;
-+ fb_info->fix.line_length = info->page->line_length;
-+ fb_info->fix.smem_start = 0;
-+ fb_info->fix.smem_len = xenfb_mem_len;
-+ strcpy(fb_info->fix.id, "xen");
-+ fb_info->fix.type = FB_TYPE_PACKED_PIXELS;
-+ fb_info->fix.accel = FB_ACCEL_NONE;
-+
-+ fb_info->flags = FBINFO_FLAG_DEFAULT;
-+
-+ ret = fb_alloc_cmap(&fb_info->cmap, 256, 0);
-+ if (ret < 0) {
-+ framebuffer_release(fb_info);
-+ xenbus_dev_fatal(dev, ret, "fb_alloc_cmap");
-+ goto error;
-+ }
-+
-+ ret = register_framebuffer(fb_info);
-+ if (ret) {
-+ fb_dealloc_cmap(&info->fb_info->cmap);
-+ framebuffer_release(fb_info);
-+ xenbus_dev_fatal(dev, ret, "register_framebuffer");
-+ goto error;
-+ }
-+ info->fb_info = fb_info;
-+
-+ /* FIXME should this be delayed until backend XenbusStateConnected? */
-+ info->kthread = kthread_run(xenfb_thread, info, "xenfb thread");
-+ if (IS_ERR(info->kthread)) {
-+ ret = PTR_ERR(info->kthread);
-+ info->kthread = NULL;
-+ xenbus_dev_fatal(dev, ret, "register_framebuffer");
-+ goto error;
-+ }
-+
-+ ret = xenfb_connect_backend(dev, info);
-+ if (ret < 0)
-+ goto error;
-+
-+ return 0;
-+
-+ error_nomem:
-+ ret = -ENOMEM;
-+ xenbus_dev_fatal(dev, ret, "allocating device memory");
-+ error:
-+ xenfb_remove(dev);
-+ return ret;
-+}
-+
-+static int xenfb_resume(struct xenbus_device *dev)
-+{
-+ struct xenfb_info *info = dev->dev.driver_data;
-+
-+ xenfb_disconnect_backend(info);
-+ xenfb_init_shared_page(info);
-+ return xenfb_connect_backend(dev, info);
-+}
-+
-+static int xenfb_remove(struct xenbus_device *dev)
-+{
-+ struct xenfb_info *info = dev->dev.driver_data;
-+
-+ del_timer(&info->refresh);
-+ if (info->kthread)
-+ kthread_stop(info->kthread);
-+ xenfb_disconnect_backend(info);
-+ if (info->fb_info) {
-+ unregister_framebuffer(info->fb_info);
-+ fb_dealloc_cmap(&info->fb_info->cmap);
-+ framebuffer_release(info->fb_info);
-+ }
-+ free_page((unsigned long)info->page);
-+ vfree(info->mfns);
-+ kfree(info->pages);
-+ vfree(info->fb);
-+ kfree(info);
-+
-+ return 0;
-+}
-+
-+static void xenfb_init_shared_page(struct xenfb_info *info)
-+{
-+ int i;
-+
-+ for (i = 0; i < info->nr_pages; i++)
-+ info->pages[i] = vmalloc_to_page(info->fb + i * PAGE_SIZE);
-+
-+ for (i = 0; i < info->nr_pages; i++)
-+ info->mfns[i] = vmalloc_to_mfn(info->fb + i * PAGE_SIZE);
-+
-+ info->page->pd[0] = vmalloc_to_mfn(info->mfns);
-+ info->page->pd[1] = 0;
-+ info->page->width = XENFB_WIDTH;
-+ info->page->height = XENFB_HEIGHT;
-+ info->page->depth = XENFB_DEPTH;
-+ info->page->line_length = (info->page->depth / 8) * info->page->width;
-+ info->page->mem_length = xenfb_mem_len;
-+ info->page->in_cons = info->page->in_prod = 0;
-+ info->page->out_cons = info->page->out_prod = 0;
-+}
-+
-+static int xenfb_connect_backend(struct xenbus_device *dev,
-+ struct xenfb_info *info)
-+{
-+ int ret;
-+ struct xenbus_transaction xbt;
-+
-+ ret = bind_listening_port_to_irqhandler(
-+ dev->otherend_id, xenfb_event_handler, 0, "xenfb", info);
-+ if (ret < 0) {
-+ xenbus_dev_fatal(dev, ret,
-+ "bind_listening_port_to_irqhandler");
-+ return ret;
-+ }
-+ info->irq = ret;
-+
-+ again:
-+ ret = xenbus_transaction_start(&xbt);
-+ if (ret) {
-+ xenbus_dev_fatal(dev, ret, "starting transaction");
-+ return ret;
-+ }
-+ ret = xenbus_printf(xbt, dev->nodename, "page-ref", "%lu",
-+ virt_to_mfn(info->page));
-+ if (ret)
-+ goto error_xenbus;
-+ ret = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
-+ irq_to_evtchn_port(info->irq));
-+ if (ret)
-+ goto error_xenbus;
-+ ret = xenbus_printf(xbt, dev->nodename, "protocol", "%s",
-+ XEN_IO_PROTO_ABI_NATIVE);
-+ if (ret)
-+ goto error_xenbus;
-+ ret = xenbus_printf(xbt, dev->nodename, "feature-update", "1");
-+ if (ret)
-+ goto error_xenbus;
-+ ret = xenbus_transaction_end(xbt, 0);
-+ if (ret) {
-+ if (ret == -EAGAIN)
-+ goto again;
-+ xenbus_dev_fatal(dev, ret, "completing transaction");
-+ return ret;
-+ }
-+
-+ xenbus_switch_state(dev, XenbusStateInitialised);
-+ return 0;
-+
-+ error_xenbus:
-+ xenbus_transaction_end(xbt, 1);
-+ xenbus_dev_fatal(dev, ret, "writing xenstore");
-+ return ret;
-+}
-+
-+static void xenfb_disconnect_backend(struct xenfb_info *info)
-+{
-+ if (info->irq >= 0)
-+ unbind_from_irqhandler(info->irq, info);
-+ info->irq = -1;
-+}
-+
-+static void xenfb_backend_changed(struct xenbus_device *dev,
-+ enum xenbus_state backend_state)
-+{
-+ struct xenfb_info *info = dev->dev.driver_data;
-+ int val;
-+
-+ switch (backend_state) {
-+ case XenbusStateInitialising:
-+ case XenbusStateInitialised:
-+ case XenbusStateUnknown:
-+ case XenbusStateClosed:
-+ break;
-+
-+ case XenbusStateInitWait:
-+ InitWait:
-+ xenbus_switch_state(dev, XenbusStateConnected);
-+ break;
-+
-+ case XenbusStateConnected:
-+ /*
-+ * Work around xenbus race condition: If backend goes
-+ * through InitWait to Connected fast enough, we can
-+ * get Connected twice here.
-+ */
-+ if (dev->state != XenbusStateConnected)
-+ goto InitWait; /* no InitWait seen yet, fudge it */
-+
-+ if (xenbus_scanf(XBT_NIL, info->xbdev->otherend,
-+ "request-update", "%d", &val) < 0)
-+ val = 0;
-+ if (val)
-+ info->update_wanted = 1;
-+ break;
-+
-+ case XenbusStateClosing:
-+ // FIXME is this safe in any dev->state?
-+ xenbus_frontend_closed(dev);
-+ break;
-+ }
-+}
-+
-+static struct xenbus_device_id xenfb_ids[] = {
-+ { "vfb" },
-+ { "" }
-+};
-+
-+static struct xenbus_driver xenfb = {
-+ .name = "vfb",
-+ .owner = THIS_MODULE,
-+ .ids = xenfb_ids,
-+ .probe = xenfb_probe,
-+ .remove = xenfb_remove,
-+ .resume = xenfb_resume,
-+ .otherend_changed = xenfb_backend_changed,
-+};
-+
-+static int __init xenfb_init(void)
-+{
-+ if (!is_running_on_xen())
-+ return -ENODEV;
-+
-+ /* Nothing to do if running in dom0. */
-+ if (is_initial_xendomain())
-+ return -ENODEV;
-+
-+ return xenbus_register_frontend(&xenfb);
-+}
-+
-+static void __exit xenfb_cleanup(void)
-+{
-+ return xenbus_unregister_driver(&xenfb);
-+}
-+
-+module_init(xenfb_init);
-+module_exit(xenfb_cleanup);
-+
-+MODULE_LICENSE("GPL");
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/fbfront/xenkbd.c ubuntu-gutsy-xen/drivers/xen/fbfront/xenkbd.c
---- ubuntu-gutsy/drivers/xen/fbfront/xenkbd.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/fbfront/xenkbd.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,333 @@
-+/*
-+ * linux/drivers/input/keyboard/xenkbd.c -- Xen para-virtual input device
-+ *
-+ * Copyright (C) 2005 Anthony Liguori <aliguori@us.ibm.com>
-+ * Copyright (C) 2006 Red Hat, Inc., Markus Armbruster <armbru@redhat.com>
-+ *
-+ * Based on linux/drivers/input/mouse/sermouse.c
-+ *
-+ * This file is subject to the terms and conditions of the GNU General Public
-+ * License. See the file COPYING in the main directory of this archive for
-+ * more details.
-+ */
-+
-+/*
-+ * TODO:
-+ *
-+ * Switch to grant tables together with xenfb.c.
-+ */
-+
-+#include <linux/kernel.h>
-+#include <linux/errno.h>
-+#include <linux/module.h>
-+#include <linux/input.h>
-+#include <asm/hypervisor.h>
-+#include <xen/evtchn.h>
-+#include <xen/interface/io/fbif.h>
-+#include <xen/interface/io/kbdif.h>
-+#include <xen/xenbus.h>
-+
-+struct xenkbd_info
-+{
-+ struct input_dev *kbd;
-+ struct input_dev *ptr;
-+ struct xenkbd_page *page;
-+ int irq;
-+ struct xenbus_device *xbdev;
-+ char phys[32];
-+};
-+
-+static int xenkbd_remove(struct xenbus_device *);
-+static int xenkbd_connect_backend(struct xenbus_device *, struct xenkbd_info *);
-+static void xenkbd_disconnect_backend(struct xenkbd_info *);
-+
-+/*
-+ * Note: if you need to send out events, see xenfb_do_update() for how
-+ * to do that.
-+ */
-+
-+static irqreturn_t input_handler(int rq, void *dev_id)
-+{
-+ struct xenkbd_info *info = dev_id;
-+ struct xenkbd_page *page = info->page;
-+ __u32 cons, prod;
-+
-+ prod = page->in_prod;
-+ if (prod == page->out_cons)
-+ return IRQ_HANDLED;
-+ rmb(); /* ensure we see ring contents up to prod */
-+ for (cons = page->in_cons; cons != prod; cons++) {
-+ union xenkbd_in_event *event;
-+ struct input_dev *dev;
-+ event = &XENKBD_IN_RING_REF(page, cons);
-+
-+ dev = info->ptr;
-+ switch (event->type) {
-+ case XENKBD_TYPE_MOTION:
-+ input_report_rel(dev, REL_X, event->motion.rel_x);
-+ input_report_rel(dev, REL_Y, event->motion.rel_y);
-+ break;
-+ case XENKBD_TYPE_KEY:
-+ dev = NULL;
-+ if (test_bit(event->key.keycode, info->kbd->keybit))
-+ dev = info->kbd;
-+ if (test_bit(event->key.keycode, info->ptr->keybit))
-+ dev = info->ptr;
-+ if (dev)
-+ input_report_key(dev, event->key.keycode,
-+ event->key.pressed);
-+ else
-+ printk("xenkbd: unhandled keycode 0x%x\n",
-+ event->key.keycode);
-+ break;
-+ case XENKBD_TYPE_POS:
-+ input_report_abs(dev, ABS_X, event->pos.abs_x);
-+ input_report_abs(dev, ABS_Y, event->pos.abs_y);
-+ break;
-+ }
-+ if (dev)
-+ input_sync(dev);
-+ }
-+ mb(); /* ensure we got ring contents */
-+ page->in_cons = cons;
-+ notify_remote_via_irq(info->irq);
-+
-+ return IRQ_HANDLED;
-+}
-+
-+int __devinit xenkbd_probe(struct xenbus_device *dev,
-+ const struct xenbus_device_id *id)
-+{
-+ int ret, i;
-+ struct xenkbd_info *info;
-+ struct input_dev *kbd, *ptr;
-+
-+ info = kzalloc(sizeof(*info), GFP_KERNEL);
-+ if (!info) {
-+ xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure");
-+ return -ENOMEM;
-+ }
-+ dev->dev.driver_data = info;
-+ info->xbdev = dev;
-+ snprintf(info->phys, sizeof(info->phys), "xenbus/%s", dev->nodename);
-+
-+ info->page = (void *)__get_free_page(GFP_KERNEL);
-+ if (!info->page)
-+ goto error_nomem;
-+ info->page->in_cons = info->page->in_prod = 0;
-+ info->page->out_cons = info->page->out_prod = 0;
-+
-+ /* keyboard */
-+ kbd = input_allocate_device();
-+ if (!kbd)
-+ goto error_nomem;
-+ kbd->name = "Xen Virtual Keyboard";
-+ kbd->phys = info->phys;
-+ kbd->id.bustype = BUS_PCI;
-+ kbd->id.vendor = 0x5853;
-+ kbd->id.product = 0xffff;
-+ kbd->evbit[0] = BIT(EV_KEY);
-+ for (i = KEY_ESC; i < KEY_UNKNOWN; i++)
-+ set_bit(i, kbd->keybit);
-+ for (i = KEY_OK; i < KEY_MAX; i++)
-+ set_bit(i, kbd->keybit);
-+
-+ ret = input_register_device(kbd);
-+ if (ret) {
-+ input_free_device(kbd);
-+ xenbus_dev_fatal(dev, ret, "input_register_device(kbd)");
-+ goto error;
-+ }
-+ info->kbd = kbd;
-+
-+ /* pointing device */
-+ ptr = input_allocate_device();
-+ if (!ptr)
-+ goto error_nomem;
-+ ptr->name = "Xen Virtual Pointer";
-+ ptr->phys = info->phys;
-+ ptr->id.bustype = BUS_PCI;
-+ ptr->id.vendor = 0x5853;
-+ ptr->id.product = 0xfffe;
-+ ptr->evbit[0] = BIT(EV_KEY) | BIT(EV_REL) | BIT(EV_ABS);
-+ for (i = BTN_LEFT; i <= BTN_TASK; i++)
-+ set_bit(i, ptr->keybit);
-+ ptr->relbit[0] = BIT(REL_X) | BIT(REL_Y);
-+ input_set_abs_params(ptr, ABS_X, 0, XENFB_WIDTH, 0, 0);
-+ input_set_abs_params(ptr, ABS_Y, 0, XENFB_HEIGHT, 0, 0);
-+
-+ ret = input_register_device(ptr);
-+ if (ret) {
-+ input_free_device(ptr);
-+ xenbus_dev_fatal(dev, ret, "input_register_device(ptr)");
-+ goto error;
-+ }
-+ info->ptr = ptr;
-+
-+ ret = xenkbd_connect_backend(dev, info);
-+ if (ret < 0)
-+ goto error;
-+
-+ return 0;
-+
-+ error_nomem:
-+ ret = -ENOMEM;
-+ xenbus_dev_fatal(dev, ret, "allocating device memory");
-+ error:
-+ xenkbd_remove(dev);
-+ return ret;
-+}
-+
-+static int xenkbd_resume(struct xenbus_device *dev)
-+{
-+ struct xenkbd_info *info = dev->dev.driver_data;
-+
-+ xenkbd_disconnect_backend(info);
-+ return xenkbd_connect_backend(dev, info);
-+}
-+
-+static int xenkbd_remove(struct xenbus_device *dev)
-+{
-+ struct xenkbd_info *info = dev->dev.driver_data;
-+
-+ xenkbd_disconnect_backend(info);
-+ input_unregister_device(info->kbd);
-+ input_unregister_device(info->ptr);
-+ free_page((unsigned long)info->page);
-+ kfree(info);
-+ return 0;
-+}
-+
-+static int xenkbd_connect_backend(struct xenbus_device *dev,
-+ struct xenkbd_info *info)
-+{
-+ int ret;
-+ struct xenbus_transaction xbt;
-+
-+ ret = bind_listening_port_to_irqhandler(
-+ dev->otherend_id, input_handler, 0, "xenkbd", info);
-+ if (ret < 0) {
-+ xenbus_dev_fatal(dev, ret,
-+ "bind_listening_port_to_irqhandler");
-+ return ret;
-+ }
-+ info->irq = ret;
-+
-+ again:
-+ ret = xenbus_transaction_start(&xbt);
-+ if (ret) {
-+ xenbus_dev_fatal(dev, ret, "starting transaction");
-+ return ret;
-+ }
-+ ret = xenbus_printf(xbt, dev->nodename, "page-ref", "%lu",
-+ virt_to_mfn(info->page));
-+ if (ret)
-+ goto error_xenbus;
-+ ret = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
-+ irq_to_evtchn_port(info->irq));
-+ if (ret)
-+ goto error_xenbus;
-+ ret = xenbus_transaction_end(xbt, 0);
-+ if (ret) {
-+ if (ret == -EAGAIN)
-+ goto again;
-+ xenbus_dev_fatal(dev, ret, "completing transaction");
-+ return ret;
-+ }
-+
-+ xenbus_switch_state(dev, XenbusStateInitialised);
-+ return 0;
-+
-+ error_xenbus:
-+ xenbus_transaction_end(xbt, 1);
-+ xenbus_dev_fatal(dev, ret, "writing xenstore");
-+ return ret;
-+}
-+
-+static void xenkbd_disconnect_backend(struct xenkbd_info *info)
-+{
-+ if (info->irq >= 0)
-+ unbind_from_irqhandler(info->irq, info);
-+ info->irq = -1;
-+}
-+
-+static void xenkbd_backend_changed(struct xenbus_device *dev,
-+ enum xenbus_state backend_state)
-+{
-+ struct xenkbd_info *info = dev->dev.driver_data;
-+ int ret, val;
-+
-+ switch (backend_state) {
-+ case XenbusStateInitialising:
-+ case XenbusStateInitialised:
-+ case XenbusStateUnknown:
-+ case XenbusStateClosed:
-+ break;
-+
-+ case XenbusStateInitWait:
-+ InitWait:
-+ ret = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
-+ "feature-abs-pointer", "%d", &val);
-+ if (ret < 0)
-+ val = 0;
-+ if (val) {
-+ ret = xenbus_printf(XBT_NIL, info->xbdev->nodename,
-+ "request-abs-pointer", "1");
-+ if (ret)
-+ ; /* FIXME */
-+ }
-+ xenbus_switch_state(dev, XenbusStateConnected);
-+ break;
-+
-+ case XenbusStateConnected:
-+ /*
-+ * Work around xenbus race condition: If backend goes
-+ * through InitWait to Connected fast enough, we can
-+ * get Connected twice here.
-+ */
-+ if (dev->state != XenbusStateConnected)
-+ goto InitWait; /* no InitWait seen yet, fudge it */
-+ break;
-+
-+ case XenbusStateClosing:
-+ xenbus_frontend_closed(dev);
-+ break;
-+ }
-+}
-+
-+static struct xenbus_device_id xenkbd_ids[] = {
-+ { "vkbd" },
-+ { "" }
-+};
-+
-+static struct xenbus_driver xenkbd = {
-+ .name = "vkbd",
-+ .owner = THIS_MODULE,
-+ .ids = xenkbd_ids,
-+ .probe = xenkbd_probe,
-+ .remove = xenkbd_remove,
-+ .resume = xenkbd_resume,
-+ .otherend_changed = xenkbd_backend_changed,
-+};
-+
-+static int __init xenkbd_init(void)
-+{
-+ if (!is_running_on_xen())
-+ return -ENODEV;
-+
-+ /* Nothing to do if running in dom0. */
-+ if (is_initial_xendomain())
-+ return -ENODEV;
-+
-+ return xenbus_register_frontend(&xenkbd);
-+}
-+
-+static void __exit xenkbd_cleanup(void)
-+{
-+ return xenbus_unregister_driver(&xenkbd);
-+}
-+
-+module_init(xenkbd_init);
-+module_exit(xenkbd_cleanup);
-+
-+MODULE_LICENSE("GPL");
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/gntdev/gntdev.c ubuntu-gutsy-xen/drivers/xen/gntdev/gntdev.c
---- ubuntu-gutsy/drivers/xen/gntdev/gntdev.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/gntdev/gntdev.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,974 @@
-+/******************************************************************************
-+ * gntdev.c
-+ *
-+ * Device for accessing (in user-space) pages that have been granted by other
-+ * domains.
-+ *
-+ * Copyright (c) 2006-2007, D G Murray.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program; if not, write to the Free Software
-+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-+ */
-+
-+#include <asm/atomic.h>
-+#include <linux/module.h>
-+#include <linux/kernel.h>
-+#include <linux/init.h>
-+#include <linux/fs.h>
-+#include <linux/device.h>
-+#include <linux/mm.h>
-+#include <linux/mman.h>
-+#include <asm/uaccess.h>
-+#include <asm/io.h>
-+#include <xen/gnttab.h>
-+#include <asm/hypervisor.h>
-+#include <xen/balloon.h>
-+#include <xen/evtchn.h>
-+#include <xen/driver_util.h>
-+
-+#include <linux/types.h>
-+#include <xen/public/gntdev.h>
-+
-+
-+#define DRIVER_AUTHOR "Derek G. Murray <Derek.Murray@cl.cam.ac.uk>"
-+#define DRIVER_DESC "User-space granted page access driver"
-+
-+MODULE_LICENSE("GPL");
-+MODULE_AUTHOR(DRIVER_AUTHOR);
-+MODULE_DESCRIPTION(DRIVER_DESC);
-+
-+#define MAX_GRANTS 128
-+
-+/* A slot can be in one of three states:
-+ *
-+ * 0. GNTDEV_SLOT_INVALID:
-+ * This slot is not associated with a grant reference, and is therefore free
-+ * to be overwritten by a new grant reference.
-+ *
-+ * 1. GNTDEV_SLOT_NOT_YET_MAPPED:
-+ * This slot is associated with a grant reference (via the
-+ * IOCTL_GNTDEV_MAP_GRANT_REF ioctl), but it has not yet been mmap()-ed.
-+ *
-+ * 2. GNTDEV_SLOT_MAPPED:
-+ * This slot is associated with a grant reference, and has been mmap()-ed.
-+ */
-+typedef enum gntdev_slot_state {
-+ GNTDEV_SLOT_INVALID = 0,
-+ GNTDEV_SLOT_NOT_YET_MAPPED,
-+ GNTDEV_SLOT_MAPPED
-+} gntdev_slot_state_t;
-+
-+#define GNTDEV_INVALID_HANDLE -1
-+#define GNTDEV_FREE_LIST_INVALID -1
-+/* Each opened instance of gntdev is associated with a list of grants,
-+ * represented by an array of elements of the following type,
-+ * gntdev_grant_info_t.
-+ */
-+typedef struct gntdev_grant_info {
-+ gntdev_slot_state_t state;
-+ union {
-+ uint32_t free_list_index;
-+ struct {
-+ domid_t domid;
-+ grant_ref_t ref;
-+ grant_handle_t kernel_handle;
-+ grant_handle_t user_handle;
-+ uint64_t dev_bus_addr;
-+ } valid;
-+ } u;
-+} gntdev_grant_info_t;
-+
-+/* Private data structure, which is stored in the file pointer for files
-+ * associated with this device.
-+ */
-+typedef struct gntdev_file_private_data {
-+
-+ /* Array of grant information. */
-+ gntdev_grant_info_t grants[MAX_GRANTS];
-+
-+ /* Read/write semaphore used to protect the grants array. */
-+ struct rw_semaphore grants_sem;
-+
-+ /* An array of indices of free slots in the grants array.
-+ * N.B. An entry in this list may temporarily have the value
-+ * GNTDEV_FREE_LIST_INVALID if the corresponding slot has been removed
-+ * from the list by the contiguous allocator, but the list has not yet
-+ * been compressed. However, this is not visible across invocations of
-+ * the device.
-+ */
-+ int32_t free_list[MAX_GRANTS];
-+
-+ /* The number of free slots in the grants array. */
-+ uint32_t free_list_size;
-+
-+ /* Read/write semaphore used to protect the free list. */
-+ struct rw_semaphore free_list_sem;
-+
-+ /* Index of the next slot after the most recent contiguous allocation,
-+ * for use in a next-fit allocator.
-+ */
-+ uint32_t next_fit_index;
-+
-+ /* Used to map grants into the kernel, before mapping them into user
-+ * space.
-+ */
-+ struct page **foreign_pages;
-+
-+} gntdev_file_private_data_t;
-+
-+/* Module lifecycle operations. */
-+static int __init gntdev_init(void);
-+static void __exit gntdev_exit(void);
-+
-+module_init(gntdev_init);
-+module_exit(gntdev_exit);
-+
-+/* File operations. */
-+static int gntdev_open(struct inode *inode, struct file *flip);
-+static int gntdev_release(struct inode *inode, struct file *flip);
-+static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma);
-+static int gntdev_ioctl (struct inode *inode, struct file *flip,
-+ unsigned int cmd, unsigned long arg);
-+
-+static struct file_operations gntdev_fops = {
-+ .owner = THIS_MODULE,
-+ .open = gntdev_open,
-+ .release = gntdev_release,
-+ .mmap = gntdev_mmap,
-+ .ioctl = gntdev_ioctl
-+};
-+
-+/* VM operations. */
-+static void gntdev_vma_close(struct vm_area_struct *vma);
-+static pte_t gntdev_clear_pte(struct vm_area_struct *vma, unsigned long addr,
-+ pte_t *ptep, int is_fullmm);
-+
-+static struct vm_operations_struct gntdev_vmops = {
-+ .close = gntdev_vma_close,
-+ .zap_pte = gntdev_clear_pte
-+};
-+
-+/* Global variables. */
-+
-+/* The driver major number, for use when unregistering the driver. */
-+static int gntdev_major;
-+
-+#define GNTDEV_NAME "gntdev"
-+
-+/* Memory mapping functions
-+ * ------------------------
-+ *
-+ * Every granted page is mapped into both kernel and user space, and the two
-+ * following functions return the respective virtual addresses of these pages.
-+ *
-+ * When shadow paging is disabled, the granted page is mapped directly into
-+ * user space; when it is enabled, it is mapped into the kernel and remapped
-+ * into user space using vm_insert_page() (see gntdev_mmap(), below).
-+ */
-+
-+/* Returns the virtual address (in user space) of the @page_index'th page
-+ * in the given VM area.
-+ */
-+static inline unsigned long get_user_vaddr (struct vm_area_struct *vma,
-+ int page_index)
-+{
-+ return (unsigned long) vma->vm_start + (page_index << PAGE_SHIFT);
-+}
-+
-+/* Returns the virtual address (in kernel space) of the @slot_index'th page
-+ * mapped by the gntdev instance that owns the given private data struct.
-+ */
-+static inline unsigned long get_kernel_vaddr (gntdev_file_private_data_t *priv,
-+ int slot_index)
-+{
-+ unsigned long pfn;
-+ void *kaddr;
-+ pfn = page_to_pfn(priv->foreign_pages[slot_index]);
-+ kaddr = pfn_to_kaddr(pfn);
-+ return (unsigned long) kaddr;
-+}
-+
-+/* Helper functions. */
-+
-+/* Adds information about a grant reference to the list of grants in the file's
-+ * private data structure. Returns non-zero on failure. On success, sets the
-+ * value of *offset to the offset that should be mmap()-ed in order to map the
-+ * grant reference.
-+ */
-+static int add_grant_reference(struct file *flip,
-+ struct ioctl_gntdev_grant_ref *op,
-+ uint64_t *offset)
-+{
-+ gntdev_file_private_data_t *private_data
-+ = (gntdev_file_private_data_t *) flip->private_data;
-+
-+ uint32_t slot_index;
-+
-+ if (unlikely(private_data->free_list_size == 0)) {
-+ return -ENOMEM;
-+ }
-+
-+ slot_index = private_data->free_list[--private_data->free_list_size];
-+
-+ /* Copy the grant information into file's private data. */
-+ private_data->grants[slot_index].state = GNTDEV_SLOT_NOT_YET_MAPPED;
-+ private_data->grants[slot_index].u.valid.domid = op->domid;
-+ private_data->grants[slot_index].u.valid.ref = op->ref;
-+
-+ /* The offset is calculated as the index of the chosen entry in the
-+ * file's private data's array of grant information. This is then
-+ * shifted to give an offset into the virtual "file address space".
-+ */
-+ *offset = slot_index << PAGE_SHIFT;
-+
-+ return 0;
-+}
-+
-+/* Adds the @count grant references to the contiguous range in the slot array
-+ * beginning at @first_slot. It is assumed that @first_slot was returned by a
-+ * previous invocation of find_contiguous_free_range(), during the same
-+ * invocation of the driver.
-+ */
-+static int add_grant_references(struct file *flip,
-+ int count,
-+ struct ioctl_gntdev_grant_ref *ops,
-+ uint32_t first_slot)
-+{
-+ gntdev_file_private_data_t *private_data
-+ = (gntdev_file_private_data_t *) flip->private_data;
-+ int i;
-+
-+ for (i = 0; i < count; ++i) {
-+
-+ /* First, mark the slot's entry in the free list as invalid. */
-+ int free_list_index =
-+ private_data->grants[first_slot+i].u.free_list_index;
-+ private_data->free_list[free_list_index] =
-+ GNTDEV_FREE_LIST_INVALID;
-+
-+ /* Now, update the slot. */
-+ private_data->grants[first_slot+i].state =
-+ GNTDEV_SLOT_NOT_YET_MAPPED;
-+ private_data->grants[first_slot+i].u.valid.domid =
-+ ops[i].domid;
-+ private_data->grants[first_slot+i].u.valid.ref = ops[i].ref;
-+ }
-+
-+ return 0;
-+}
-+
-+/* Scans through the free list for @flip, removing entries that are marked as
-+ * GNTDEV_SLOT_INVALID. This will reduce the recorded size of the free list to
-+ * the number of valid entries.
-+ */
-+static void compress_free_list(struct file *flip)
-+{
-+ gntdev_file_private_data_t *private_data
-+ = (gntdev_file_private_data_t *) flip->private_data;
-+ int i, j = 0, old_size;
-+
-+ old_size = private_data->free_list_size;
-+ for (i = 0; i < old_size; ++i) {
-+ if (private_data->free_list[i] != GNTDEV_FREE_LIST_INVALID) {
-+ private_data->free_list[j] =
-+ private_data->free_list[i];
-+ ++j;
-+ } else {
-+ --private_data->free_list_size;
-+ }
-+ }
-+}
-+
-+/* Searches the grant array in the private data of @flip for a range of
-+ * @num_slots contiguous slots in the GNTDEV_SLOT_INVALID state.
-+ *
-+ * Returns the index of the first slot if a range is found, otherwise -ENOMEM.
-+ */
-+static int find_contiguous_free_range(struct file *flip,
-+ uint32_t num_slots)
-+{
-+ gntdev_file_private_data_t *private_data
-+ = (gntdev_file_private_data_t *) flip->private_data;
-+
-+ int i;
-+ int start_index = private_data->next_fit_index;
-+ int range_start = 0, range_length;
-+
-+ if (private_data->free_list_size < num_slots) {
-+ return -ENOMEM;
-+ }
-+
-+ /* First search from the start_index to the end of the array. */
-+ range_length = 0;
-+ for (i = start_index; i < MAX_GRANTS; ++i) {
-+ if (private_data->grants[i].state == GNTDEV_SLOT_INVALID) {
-+ if (range_length == 0) {
-+ range_start = i;
-+ }
-+ ++range_length;
-+ if (range_length == num_slots) {
-+ return range_start;
-+ }
-+ }
-+ }
-+
-+ /* Now search from the start of the array to the start_index. */
-+ range_length = 0;
-+ for (i = 0; i < start_index; ++i) {
-+ if (private_data->grants[i].state == GNTDEV_SLOT_INVALID) {
-+ if (range_length == 0) {
-+ range_start = i;
-+ }
-+ ++range_length;
-+ if (range_length == num_slots) {
-+ return range_start;
-+ }
-+ }
-+ }
-+
-+ return -ENOMEM;
-+}
-+
-+/* Interface functions. */
-+
-+/* Initialises the driver. Called when the module is loaded. */
-+static int __init gntdev_init(void)
-+{
-+ struct class *class;
-+ struct class_device *device;
-+
-+ if (!is_running_on_xen()) {
-+ printk(KERN_ERR "You must be running Xen to use gntdev\n");
-+ return -ENODEV;
-+ }
-+
-+ gntdev_major = register_chrdev(0, GNTDEV_NAME, &gntdev_fops);
-+ if (gntdev_major < 0)
-+ {
-+ printk(KERN_ERR "Could not register gntdev device\n");
-+ return -ENOMEM;
-+ }
-+
-+ /* Note that if the sysfs code fails, we will still initialise the
-+ * device, and output the major number so that the device can be
-+ * created manually using mknod.
-+ */
-+ if ((class = get_xen_class()) == NULL) {
-+ printk(KERN_ERR "Error setting up xen_class\n");
-+ printk(KERN_ERR "gntdev created with major number = %d\n",
-+ gntdev_major);
-+ return 0;
-+ }
-+
-+ device = class_device_create(class, NULL, MKDEV(gntdev_major, 0),
-+ NULL, GNTDEV_NAME);
-+ if (IS_ERR(device)) {
-+ printk(KERN_ERR "Error creating gntdev device in xen_class\n");
-+ printk(KERN_ERR "gntdev created with major number = %d\n",
-+ gntdev_major);
-+ return 0;
-+ }
-+
-+ return 0;
-+}
-+
-+/* Cleans up and unregisters the driver. Called when the driver is unloaded.
-+ */
-+static void __exit gntdev_exit(void)
-+{
-+ struct class *class;
-+ if ((class = get_xen_class()) != NULL)
-+ class_device_destroy(class, MKDEV(gntdev_major, 0));
-+ unregister_chrdev(gntdev_major, GNTDEV_NAME);
-+}
-+
-+/* Called when the device is opened. */
-+static int gntdev_open(struct inode *inode, struct file *flip)
-+{
-+ gntdev_file_private_data_t *private_data;
-+ int i;
-+
-+ try_module_get(THIS_MODULE);
-+
-+ /* Allocate space for the per-instance private data. */
-+ private_data = kmalloc(sizeof(*private_data), GFP_KERNEL);
-+ if (!private_data)
-+ goto nomem_out;
-+
-+ /* Allocate space for the kernel-mapping of granted pages. */
-+ private_data->foreign_pages =
-+ alloc_empty_pages_and_pagevec(MAX_GRANTS);
-+ if (!private_data->foreign_pages)
-+ goto nomem_out2;
-+
-+ /* Initialise the free-list, which contains all slots at first.
-+ */
-+ for (i = 0; i < MAX_GRANTS; ++i) {
-+ private_data->free_list[MAX_GRANTS - i - 1] = i;
-+ private_data->grants[i].state = GNTDEV_SLOT_INVALID;
-+ private_data->grants[i].u.free_list_index = MAX_GRANTS - i - 1;
-+ }
-+ private_data->free_list_size = MAX_GRANTS;
-+ private_data->next_fit_index = 0;
-+
-+ init_rwsem(&private_data->grants_sem);
-+ init_rwsem(&private_data->free_list_sem);
-+
-+ flip->private_data = private_data;
-+
-+ return 0;
-+
-+nomem_out2:
-+ kfree(private_data);
-+nomem_out:
-+ return -ENOMEM;
-+}
-+
-+/* Called when the device is closed.
-+ */
-+static int gntdev_release(struct inode *inode, struct file *flip)
-+{
-+ if (flip->private_data) {
-+ gntdev_file_private_data_t *private_data =
-+ (gntdev_file_private_data_t *) flip->private_data;
-+ if (private_data->foreign_pages) {
-+ free_empty_pages_and_pagevec
-+ (private_data->foreign_pages, MAX_GRANTS);
-+ }
-+ kfree(private_data);
-+ }
-+ module_put(THIS_MODULE);
-+ return 0;
-+}
-+
-+/* Called when an attempt is made to mmap() the device. The private data from
-+ * @flip contains the list of grant references that can be mapped. The vm_pgoff
-+ * field of @vma contains the index into that list that refers to the grant
-+ * reference that will be mapped. Only mappings that are a multiple of
-+ * PAGE_SIZE are handled.
-+ */
-+static int gntdev_mmap (struct file *flip, struct vm_area_struct *vma)
-+{
-+ struct gnttab_map_grant_ref op;
-+ unsigned long slot_index = vma->vm_pgoff;
-+ unsigned long kernel_vaddr, user_vaddr;
-+ uint32_t size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
-+ uint64_t ptep;
-+ int ret;
-+ int flags;
-+ int i;
-+ struct page *page;
-+ gntdev_file_private_data_t *private_data = flip->private_data;
-+
-+ if (unlikely(!private_data)) {
-+ printk(KERN_ERR "File's private data is NULL.\n");
-+ return -EINVAL;
-+ }
-+
-+ if (unlikely((size <= 0) || (size + slot_index) > MAX_GRANTS)) {
-+ printk(KERN_ERR "Invalid number of pages or offset"
-+ "(num_pages = %d, first_slot = %ld).\n",
-+ size, slot_index);
-+ return -ENXIO;
-+ }
-+
-+ if ((vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_SHARED)) {
-+ printk(KERN_ERR "Writable mappings must be shared.\n");
-+ return -EINVAL;
-+ }
-+
-+ /* Slots must be in the NOT_YET_MAPPED state. */
-+ down_write(&private_data->grants_sem);
-+ for (i = 0; i < size; ++i) {
-+ if (private_data->grants[slot_index + i].state !=
-+ GNTDEV_SLOT_NOT_YET_MAPPED) {
-+ printk(KERN_ERR "Slot (index = %ld) is in the wrong "
-+ "state (%d).\n", slot_index + i,
-+ private_data->grants[slot_index + i].state);
-+ up_write(&private_data->grants_sem);
-+ return -EINVAL;
-+ }
-+ }
-+
-+ /* Install the hook for unmapping. */
-+ vma->vm_ops = &gntdev_vmops;
-+
-+ /* The VM area contains pages from another VM. */
-+ vma->vm_flags |= VM_FOREIGN;
-+ vma->vm_private_data = kzalloc(size * sizeof(struct page_struct *),
-+ GFP_KERNEL);
-+ if (vma->vm_private_data == NULL) {
-+ printk(KERN_ERR "Couldn't allocate mapping structure for VM "
-+ "area.\n");
-+ return -ENOMEM;
-+ }
-+
-+ /* This flag prevents Bad PTE errors when the memory is unmapped. */
-+ vma->vm_flags |= VM_RESERVED;
-+
-+ /* This flag prevents this VM area being copied on a fork(). A better
-+ * behaviour might be to explicitly carry out the appropriate mappings
-+ * on fork(), but I don't know if there's a hook for this.
-+ */
-+ vma->vm_flags |= VM_DONTCOPY;
-+
-+#ifdef CONFIG_X86
-+ /* This flag ensures that the page tables are not unpinned before the
-+ * VM area is unmapped. Therefore Xen still recognises the PTE as
-+ * belonging to an L1 pagetable, and the grant unmap operation will
-+ * succeed, even if the process does not exit cleanly.
-+ */
-+ vma->vm_mm->context.has_foreign_mappings = 1;
-+#endif
-+
-+ for (i = 0; i < size; ++i) {
-+
-+ flags = GNTMAP_host_map;
-+ if (!(vma->vm_flags & VM_WRITE))
-+ flags |= GNTMAP_readonly;
-+
-+ kernel_vaddr = get_kernel_vaddr(private_data, slot_index + i);
-+ user_vaddr = get_user_vaddr(vma, i);
-+ page = pfn_to_page(__pa(kernel_vaddr) >> PAGE_SHIFT);
-+
-+ gnttab_set_map_op(&op, kernel_vaddr, flags,
-+ private_data->grants[slot_index+i]
-+ .u.valid.ref,
-+ private_data->grants[slot_index+i]
-+ .u.valid.domid);
-+
-+ /* Carry out the mapping of the grant reference. */
-+ ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
-+ &op, 1);
-+ BUG_ON(ret);
-+ if (op.status) {
-+ printk(KERN_ERR "Error mapping the grant reference "
-+ "into the kernel (%d). domid = %d; ref = %d\n",
-+ op.status,
-+ private_data->grants[slot_index+i]
-+ .u.valid.domid,
-+ private_data->grants[slot_index+i]
-+ .u.valid.ref);
-+ goto undo_map_out;
-+ }
-+
-+ /* Store a reference to the page that will be mapped into user
-+ * space.
-+ */
-+ ((struct page **) vma->vm_private_data)[i] = page;
-+
-+ /* Mark mapped page as reserved. */
-+ SetPageReserved(page);
-+
-+ /* Record the grant handle, for use in the unmap operation. */
-+ private_data->grants[slot_index+i].u.valid.kernel_handle =
-+ op.handle;
-+ private_data->grants[slot_index+i].u.valid.dev_bus_addr =
-+ op.dev_bus_addr;
-+
-+ private_data->grants[slot_index+i].state = GNTDEV_SLOT_MAPPED;
-+ private_data->grants[slot_index+i].u.valid.user_handle =
-+ GNTDEV_INVALID_HANDLE;
-+
-+ /* Now perform the mapping to user space. */
-+ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
-+
-+ /* NOT USING SHADOW PAGE TABLES. */
-+ /* In this case, we map the grant(s) straight into user
-+ * space.
-+ */
-+
-+ /* Get the machine address of the PTE for the user
-+ * page.
-+ */
-+ if ((ret = create_lookup_pte_addr(vma->vm_mm,
-+ vma->vm_start
-+ + (i << PAGE_SHIFT),
-+ &ptep)))
-+ {
-+ printk(KERN_ERR "Error obtaining PTE pointer "
-+ "(%d).\n", ret);
-+ goto undo_map_out;
-+ }
-+
-+ /* Configure the map operation. */
-+
-+ /* The reference is to be used by host CPUs. */
-+ flags = GNTMAP_host_map;
-+
-+ /* Specifies a user space mapping. */
-+ flags |= GNTMAP_application_map;
-+
-+ /* The map request contains the machine address of the
-+ * PTE to update.
-+ */
-+ flags |= GNTMAP_contains_pte;
-+
-+ if (!(vma->vm_flags & VM_WRITE))
-+ flags |= GNTMAP_readonly;
-+
-+ gnttab_set_map_op(&op, ptep, flags,
-+ private_data->grants[slot_index+i]
-+ .u.valid.ref,
-+ private_data->grants[slot_index+i]
-+ .u.valid.domid);
-+
-+ /* Carry out the mapping of the grant reference. */
-+ ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
-+ &op, 1);
-+ BUG_ON(ret);
-+ if (op.status) {
-+ printk(KERN_ERR "Error mapping the grant "
-+ "reference into user space (%d). domid "
-+ "= %d; ref = %d\n", op.status,
-+ private_data->grants[slot_index+i].u
-+ .valid.domid,
-+ private_data->grants[slot_index+i].u
-+ .valid.ref);
-+ goto undo_map_out;
-+ }
-+
-+ /* Record the grant handle, for use in the unmap
-+ * operation.
-+ */
-+ private_data->grants[slot_index+i].u.
-+ valid.user_handle = op.handle;
-+
-+ /* Update p2m structure with the new mapping. */
-+ set_phys_to_machine(__pa(kernel_vaddr) >> PAGE_SHIFT,
-+ FOREIGN_FRAME(private_data->
-+ grants[slot_index+i]
-+ .u.valid.dev_bus_addr
-+ >> PAGE_SHIFT));
-+ } else {
-+ /* USING SHADOW PAGE TABLES. */
-+ /* In this case, we simply insert the page into the VM
-+ * area. */
-+ ret = vm_insert_page(vma, user_vaddr, page);
-+ }
-+
-+ }
-+
-+ up_write(&private_data->grants_sem);
-+ return 0;
-+
-+undo_map_out:
-+ /* If we have a mapping failure, the unmapping will be taken care of
-+ * by do_mmap_pgoff(), which will eventually call gntdev_clear_pte().
-+ * All we need to do here is free the vma_private_data.
-+ */
-+ kfree(vma->vm_private_data);
-+
-+ /* THIS IS VERY UNPLEASANT: do_mmap_pgoff() will set the vma->vm_file
-+ * to NULL on failure. However, we need this in gntdev_clear_pte() to
-+ * unmap the grants. Therefore, we smuggle a reference to the file's
-+ * private data in the VM area's private data pointer.
-+ */
-+ vma->vm_private_data = private_data;
-+
-+ up_write(&private_data->grants_sem);
-+
-+ return -ENOMEM;
-+}
-+
-+static pte_t gntdev_clear_pte(struct vm_area_struct *vma, unsigned long addr,
-+ pte_t *ptep, int is_fullmm)
-+{
-+ int slot_index, ret;
-+ pte_t copy;
-+ struct gnttab_unmap_grant_ref op;
-+ gntdev_file_private_data_t *private_data;
-+
-+ /* THIS IS VERY UNPLEASANT: do_mmap_pgoff() will set the vma->vm_file
-+ * to NULL on failure. However, we need this in gntdev_clear_pte() to
-+ * unmap the grants. Therefore, we smuggle a reference to the file's
-+ * private data in the VM area's private data pointer.
-+ */
-+ if (vma->vm_file) {
-+ private_data = (gntdev_file_private_data_t *)
-+ vma->vm_file->private_data;
-+ } else if (vma->vm_private_data) {
-+ private_data = (gntdev_file_private_data_t *)
-+ vma->vm_private_data;
-+ } else {
-+ private_data = NULL; /* gcc warning */
-+ BUG();
-+ }
-+
-+ /* Calculate the grant relating to this PTE. */
-+ slot_index = vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT);
-+
-+ /* Only unmap grants if the slot has been mapped. This could be being
-+ * called from a failing mmap().
-+ */
-+ if (private_data->grants[slot_index].state == GNTDEV_SLOT_MAPPED) {
-+
-+ /* First, we clear the user space mapping, if it has been made.
-+ */
-+ if (private_data->grants[slot_index].u.valid.user_handle !=
-+ GNTDEV_INVALID_HANDLE &&
-+ !xen_feature(XENFEAT_auto_translated_physmap)) {
-+ /* NOT USING SHADOW PAGE TABLES. */
-+
-+ /* Copy the existing value of the PTE for returning. */
-+ copy = *ptep;
-+
-+ gnttab_set_unmap_op(&op, virt_to_machine(ptep),
-+ GNTMAP_contains_pte,
-+ private_data->grants[slot_index]
-+ .u.valid.user_handle);
-+ ret = HYPERVISOR_grant_table_op(
-+ GNTTABOP_unmap_grant_ref, &op, 1);
-+ BUG_ON(ret);
-+ if (op.status)
-+ printk("User unmap grant status = %d\n",
-+ op.status);
-+ } else {
-+ /* USING SHADOW PAGE TABLES. */
-+ copy = ptep_get_and_clear_full(vma->vm_mm, addr, ptep, is_fullmm);
-+ }
-+
-+ /* Finally, we unmap the grant from kernel space. */
-+ gnttab_set_unmap_op(&op,
-+ get_kernel_vaddr(private_data, slot_index),
-+ GNTMAP_host_map,
-+ private_data->grants[slot_index].u.valid
-+ .kernel_handle);
-+ ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
-+ &op, 1);
-+ BUG_ON(ret);
-+ if (op.status)
-+ printk("Kernel unmap grant status = %d\n", op.status);
-+
-+
-+ /* Return slot to the not-yet-mapped state, so that it may be
-+ * mapped again, or removed by a subsequent ioctl.
-+ */
-+ private_data->grants[slot_index].state =
-+ GNTDEV_SLOT_NOT_YET_MAPPED;
-+
-+ /* Invalidate the physical to machine mapping for this page. */
-+ set_phys_to_machine(__pa(get_kernel_vaddr(private_data,
-+ slot_index))
-+ >> PAGE_SHIFT, INVALID_P2M_ENTRY);
-+
-+ } else {
-+ copy = ptep_get_and_clear_full(vma->vm_mm, addr, ptep, is_fullmm);
-+ }
-+
-+ return copy;
-+}
-+
-+/* "Destructor" for a VM area.
-+ */
-+static void gntdev_vma_close(struct vm_area_struct *vma) {
-+ if (vma->vm_private_data) {
-+ kfree(vma->vm_private_data);
-+ }
-+}
-+
-+/* Called when an ioctl is made on the device.
-+ */
-+static int gntdev_ioctl(struct inode *inode, struct file *flip,
-+ unsigned int cmd, unsigned long arg)
-+{
-+ int rc = 0;
-+ gntdev_file_private_data_t *private_data =
-+ (gntdev_file_private_data_t *) flip->private_data;
-+
-+ switch (cmd) {
-+ case IOCTL_GNTDEV_MAP_GRANT_REF:
-+ {
-+ struct ioctl_gntdev_map_grant_ref op;
-+ down_write(&private_data->grants_sem);
-+ down_write(&private_data->free_list_sem);
-+
-+ if ((rc = copy_from_user(&op, (void __user *) arg,
-+ sizeof(op)))) {
-+ rc = -EFAULT;
-+ goto map_out;
-+ }
-+ if (unlikely(op.count <= 0)) {
-+ rc = -EINVAL;
-+ goto map_out;
-+ }
-+
-+ if (op.count == 1) {
-+ if ((rc = add_grant_reference(flip, &op.refs[0],
-+ &op.index)) < 0) {
-+ printk(KERN_ERR "Adding grant reference "
-+ "failed (%d).\n", rc);
-+ goto map_out;
-+ }
-+ } else {
-+ struct ioctl_gntdev_grant_ref *refs, *u;
-+ refs = kmalloc(op.count * sizeof(*refs), GFP_KERNEL);
-+ if (!refs) {
-+ rc = -ENOMEM;
-+ goto map_out;
-+ }
-+ u = ((struct ioctl_gntdev_map_grant_ref *)arg)->refs;
-+ if ((rc = copy_from_user(refs,
-+ (void __user *)u,
-+ sizeof(*refs) * op.count))) {
-+ printk(KERN_ERR "Copying refs from user failed"
-+ " (%d).\n", rc);
-+ rc = -EINVAL;
-+ goto map_out;
-+ }
-+ if ((rc = find_contiguous_free_range(flip, op.count))
-+ < 0) {
-+ printk(KERN_ERR "Finding contiguous range "
-+ "failed (%d).\n", rc);
-+ kfree(refs);
-+ goto map_out;
-+ }
-+ op.index = rc << PAGE_SHIFT;
-+ if ((rc = add_grant_references(flip, op.count,
-+ refs, rc))) {
-+ printk(KERN_ERR "Adding grant references "
-+ "failed (%d).\n", rc);
-+ kfree(refs);
-+ goto map_out;
-+ }
-+ compress_free_list(flip);
-+ kfree(refs);
-+ }
-+ if ((rc = copy_to_user((void __user *) arg,
-+ &op,
-+ sizeof(op)))) {
-+ printk(KERN_ERR "Copying result back to user failed "
-+ "(%d)\n", rc);
-+ rc = -EFAULT;
-+ goto map_out;
-+ }
-+ map_out:
-+ up_write(&private_data->grants_sem);
-+ up_write(&private_data->free_list_sem);
-+ return rc;
-+ }
-+ case IOCTL_GNTDEV_UNMAP_GRANT_REF:
-+ {
-+ struct ioctl_gntdev_unmap_grant_ref op;
-+ int i, start_index;
-+
-+ down_write(&private_data->grants_sem);
-+ down_write(&private_data->free_list_sem);
-+
-+ if ((rc = copy_from_user(&op,
-+ (void __user *) arg,
-+ sizeof(op)))) {
-+ rc = -EFAULT;
-+ goto unmap_out;
-+ }
-+
-+ start_index = op.index >> PAGE_SHIFT;
-+
-+ /* First, check that all pages are in the NOT_YET_MAPPED
-+ * state.
-+ */
-+ for (i = 0; i < op.count; ++i) {
-+ if (unlikely
-+ (private_data->grants[start_index + i].state
-+ != GNTDEV_SLOT_NOT_YET_MAPPED)) {
-+ if (private_data->grants[start_index + i].state
-+ == GNTDEV_SLOT_INVALID) {
-+ printk(KERN_ERR
-+ "Tried to remove an invalid "
-+ "grant at offset 0x%x.",
-+ (start_index + i)
-+ << PAGE_SHIFT);
-+ rc = -EINVAL;
-+ } else {
-+ printk(KERN_ERR
-+ "Tried to remove a grant which "
-+ "is currently mmap()-ed at "
-+ "offset 0x%x.",
-+ (start_index + i)
-+ << PAGE_SHIFT);
-+ rc = -EBUSY;
-+ }
-+ goto unmap_out;
-+ }
-+ }
-+
-+ /* Unmap pages and add them to the free list.
-+ */
-+ for (i = 0; i < op.count; ++i) {
-+ private_data->grants[start_index+i].state =
-+ GNTDEV_SLOT_INVALID;
-+ private_data->grants[start_index+i].u.free_list_index =
-+ private_data->free_list_size;
-+ private_data->free_list[private_data->free_list_size] =
-+ start_index + i;
-+ ++private_data->free_list_size;
-+ }
-+ compress_free_list(flip);
-+
-+ unmap_out:
-+ up_write(&private_data->grants_sem);
-+ up_write(&private_data->free_list_sem);
-+ return rc;
-+ }
-+ case IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR:
-+ {
-+ struct ioctl_gntdev_get_offset_for_vaddr op;
-+ struct vm_area_struct *vma;
-+ unsigned long vaddr;
-+
-+ if ((rc = copy_from_user(&op,
-+ (void __user *) arg,
-+ sizeof(op)))) {
-+ rc = -EFAULT;
-+ goto get_offset_out;
-+ }
-+ vaddr = (unsigned long)op.vaddr;
-+
-+ down_read(&current->mm->mmap_sem);
-+ vma = find_vma(current->mm, vaddr);
-+ if (vma == NULL) {
-+ rc = -EFAULT;
-+ goto get_offset_unlock_out;
-+ }
-+ if ((!vma->vm_ops) || (vma->vm_ops != &gntdev_vmops)) {
-+ printk(KERN_ERR "The vaddr specified does not belong "
-+ "to a gntdev instance: %#lx\n", vaddr);
-+ rc = -EFAULT;
-+ goto get_offset_unlock_out;
-+ }
-+ if (vma->vm_start != vaddr) {
-+ printk(KERN_ERR "The vaddr specified in an "
-+ "IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR must be at "
-+ "the start of the VM area. vma->vm_start = "
-+ "%#lx; vaddr = %#lx\n",
-+ vma->vm_start, vaddr);
-+ rc = -EFAULT;
-+ goto get_offset_unlock_out;
-+ }
-+ op.offset = vma->vm_pgoff << PAGE_SHIFT;
-+ op.count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
-+ up_read(&current->mm->mmap_sem);
-+ if ((rc = copy_to_user((void __user *) arg,
-+ &op,
-+ sizeof(op)))) {
-+ rc = -EFAULT;
-+ goto get_offset_out;
-+ }
-+ goto get_offset_out;
-+ get_offset_unlock_out:
-+ up_read(&current->mm->mmap_sem);
-+ get_offset_out:
-+ return rc;
-+ }
-+ default:
-+ return -ENOIOCTLCMD;
-+ }
-+
-+ return 0;
-+}
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/gntdev/Makefile ubuntu-gutsy-xen/drivers/xen/gntdev/Makefile
---- ubuntu-gutsy/drivers/xen/gntdev/Makefile 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/gntdev/Makefile 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1 @@
-+obj-y := gntdev.o
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/Kconfig ubuntu-gutsy-xen/drivers/xen/Kconfig
---- ubuntu-gutsy/drivers/xen/Kconfig 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/Kconfig 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,273 @@
-+#
-+# This Kconfig describe xen options
-+#
-+
-+mainmenu "Xen Configuration"
-+
-+config XEN
-+ bool
-+ default y if X86_XEN || X86_64_XEN
-+ help
-+ This is the Linux Xen port.
-+
-+if XEN
-+config XEN_INTERFACE_VERSION
-+ hex
-+ default 0x00030205
-+
-+menu "XEN"
-+
-+config XEN_PRIVILEGED_GUEST
-+ bool "Privileged Guest (domain 0)"
-+ depends XEN
-+ default n
-+ help
-+ Support for privileged operation (domain 0)
-+
-+config XEN_UNPRIVILEGED_GUEST
-+ bool
-+ default !XEN_PRIVILEGED_GUEST
-+
-+config XEN_PRIVCMD
-+ bool
-+ depends on PROC_FS
-+ default y
-+
-+config XEN_XENBUS_DEV
-+ bool
-+ depends on PROC_FS
-+ default y
-+
-+config XEN_BACKEND
-+ tristate "Backend driver support"
-+ default y
-+ help
-+ Support for backend device drivers that provide I/O services
-+ to other virtual machines.
-+
-+config XEN_BLKDEV_BACKEND
-+ tristate "Block-device backend driver"
-+ depends on XEN_BACKEND
-+ default y
-+ help
-+ The block-device backend driver allows the kernel to export its
-+ block devices to other guests via a high-performance shared-memory
-+ interface.
-+
-+config XEN_BLKDEV_TAP
-+ tristate "Block-device tap backend driver"
-+ depends on XEN_BACKEND
-+ default XEN_PRIVILEGED_GUEST
-+ help
-+ The block tap driver is an alternative to the block back driver
-+ and allows VM block requests to be redirected to userspace through
-+ a device interface. The tap allows user-space development of
-+ high-performance block backends, where disk images may be implemented
-+ as files, in memory, or on other hosts across the network. This
-+ driver can safely coexist with the existing blockback driver.
-+
-+config XEN_NETDEV_BACKEND
-+ tristate "Network-device backend driver"
-+ depends on XEN_BACKEND && NET
-+ default y
-+ help
-+ The network-device backend driver allows the kernel to export its
-+ network devices to other guests via a high-performance shared-memory
-+ interface.
-+
-+config XEN_NETDEV_PIPELINED_TRANSMITTER
-+ bool "Pipelined transmitter (DANGEROUS)"
-+ depends on XEN_NETDEV_BACKEND
-+ default n
-+ help
-+ If the net backend is a dumb domain, such as a transparent Ethernet
-+ bridge with no local IP interface, it is safe to say Y here to get
-+ slightly lower network overhead.
-+ If the backend has a local IP interface; or may be doing smart things
-+ like reassembling packets to perform firewall filtering; or if you
-+ are unsure; or if you experience network hangs when this option is
-+ enabled; then you must say N here.
-+
-+config XEN_NETDEV_LOOPBACK
-+ tristate "Network-device loopback driver"
-+ depends on XEN_NETDEV_BACKEND
-+ default y
-+ help
-+ A two-interface loopback device to emulate a local netfront-netback
-+ connection.
-+
-+config XEN_PCIDEV_BACKEND
-+ tristate "PCI-device backend driver"
-+ depends on PCI && XEN_BACKEND
-+ default XEN_PRIVILEGED_GUEST
-+ help
-+ The PCI device backend driver allows the kernel to export arbitrary
-+ PCI devices to other guests. If you select this to be a module, you
-+ will need to make sure no other driver has bound to the device(s)
-+ you want to make visible to other guests.
-+
-+choice
-+ prompt "PCI Backend Mode"
-+ depends on XEN_PCIDEV_BACKEND
-+ default XEN_PCIDEV_BACKEND_VPCI
-+
-+config XEN_PCIDEV_BACKEND_VPCI
-+ bool "Virtual PCI"
-+ ---help---
-+ This PCI Backend hides the true PCI topology and makes the frontend
-+ think there is a single PCI bus with only the exported devices on it.
-+ For example, a device at 03:05.0 will be re-assigned to 00:00.0. A
-+ second device at 02:1a.1 will be re-assigned to 00:01.1.
-+
-+config XEN_PCIDEV_BACKEND_PASS
-+ bool "Passthrough"
-+ ---help---
-+ This PCI Backend provides a real view of the PCI topology to the
-+ frontend (for example, a device at 06:01.b will still appear at
-+ 06:01.b to the frontend). This is similar to how Xen 2.0.x exposed
-+ PCI devices to its driver domains. This may be required for drivers
-+ which depend on finding their hardward in certain bus/slot
-+ locations.
-+
-+config XEN_PCIDEV_BACKEND_SLOT
-+ bool "Slot"
-+ ---help---
-+ This PCI Backend hides the true PCI topology and makes the frontend
-+ think there is a single PCI bus with only the exported devices on it.
-+ Contrary to the virtual PCI backend, a function becomes a new slot.
-+ For example, a device at 03:05.2 will be re-assigned to 00:00.0. A
-+ second device at 02:1a.1 will be re-assigned to 00:01.0.
-+
-+endchoice
-+
-+config XEN_PCIDEV_BE_DEBUG
-+ bool "PCI Backend Debugging"
-+ depends on XEN_PCIDEV_BACKEND
-+ default n
-+
-+config XEN_TPMDEV_BACKEND
-+ tristate "TPM-device backend driver"
-+ depends on XEN_BACKEND
-+ default n
-+ help
-+ The TPM-device backend driver
-+
-+config XEN_BLKDEV_FRONTEND
-+ tristate "Block-device frontend driver"
-+ depends on XEN
-+ default y
-+ help
-+ The block-device frontend driver allows the kernel to access block
-+ devices mounted within another guest OS. Unless you are building a
-+ dedicated device-driver domain, or your master control domain
-+ (domain 0), then you almost certainly want to say Y here.
-+
-+config XEN_NETDEV_FRONTEND
-+ tristate "Network-device frontend driver"
-+ depends on XEN && NET
-+ default y
-+ help
-+ The network-device frontend driver allows the kernel to access
-+ network interfaces within another guest OS. Unless you are building a
-+ dedicated device-driver domain, or your master control domain
-+ (domain 0), then you almost certainly want to say Y here.
-+
-+config XEN_FRAMEBUFFER
-+ tristate "Framebuffer-device frontend driver"
-+ depends on XEN && FB
-+ select FB_CFB_FILLRECT
-+ select FB_CFB_COPYAREA
-+ select FB_CFB_IMAGEBLIT
-+ default y
-+ help
-+ The framebuffer-device frontend drivers allows the kernel to create a
-+ virtual framebuffer. This framebuffer can be viewed in another
-+ domain. Unless this domain has access to a real video card, you
-+ probably want to say Y here.
-+
-+config XEN_KEYBOARD
-+ tristate "Keyboard-device frontend driver"
-+ depends on XEN && XEN_FRAMEBUFFER && INPUT
-+ default y
-+ help
-+ The keyboard-device frontend driver allows the kernel to create a
-+ virtual keyboard. This keyboard can then be driven by another
-+ domain. If you've said Y to CONFIG_XEN_FRAMEBUFFER, you probably
-+ want to say Y here.
-+
-+config XEN_CONSOLE
-+ bool "Xen virtual console"
-+ default y
-+ help
-+ The Xen virtual console is ...
-+
-+config XEN_SCRUB_PAGES
-+ bool "Scrub memory before freeing it to Xen"
-+ default y
-+ help
-+ Erase memory contents before freeing it back to Xen's global
-+ pool. This ensures that any secrets contained within that
-+ memory (e.g., private keys) cannot be found by other guests that
-+ may be running on the machine. Most people will want to say Y here.
-+ If security is not a concern then you may increase performance by
-+ saying N.
-+
-+config XEN_DISABLE_SERIAL
-+ bool "Disable serial port drivers"
-+ default y
-+ help
-+ Disable serial port drivers, allowing the Xen console driver
-+ to provide a serial console at ttyS0.
-+
-+config XEN_SYSFS
-+ tristate "Export Xen attributes in sysfs"
-+ depends on SYSFS
-+ select SYS_HYPERVISOR
-+ default y
-+ help
-+ Xen hypervisor attributes will show up under /sys/hypervisor/.
-+
-+choice
-+ prompt "Xen version compatibility"
-+ default XEN_COMPAT_030002_AND_LATER
-+
-+ config XEN_COMPAT_030002_AND_LATER
-+ bool "3.0.2 and later"
-+
-+ config XEN_COMPAT_030004_AND_LATER
-+ bool "3.0.4 and later"
-+
-+ config XEN_COMPAT_LATEST_ONLY
-+ bool "no compatibility code"
-+
-+endchoice
-+
-+config XEN_COMPAT
-+ hex
-+ default 0xffffff if XEN_COMPAT_LATEST_ONLY
-+ default 0x030004 if XEN_COMPAT_030004_AND_LATER
-+ default 0x030002 if XEN_COMPAT_030002_AND_LATER
-+ default 0
-+
-+endmenu
-+
-+config HAVE_IRQ_IGNORE_UNHANDLED
-+ bool
-+ default y
-+
-+config GENERIC_HARDIRQS_NO__DO_IRQ
-+ def_bool y
-+
-+config NO_IDLE_HZ
-+ bool
-+ default y
-+
-+config PM
-+ def_bool y
-+
-+config XEN_SMPBOOT
-+ bool
-+ default y
-+ depends on SMP
-+
-+endif
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/Makefile ubuntu-gutsy-xen/drivers/xen/Makefile
---- ubuntu-gutsy/drivers/xen/Makefile 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/Makefile 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,20 @@
-+obj-y += core/
-+obj-y += console/
-+obj-y += evtchn/
-+obj-y += privcmd/
-+obj-y += xenbus/
-+obj-y += gntdev/
-+obj-y += balloon/
-+obj-y += char/
-+
-+obj-y += util.o
-+obj-$(CONFIG_XEN_BLKDEV_BACKEND) += blkback/
-+obj-$(CONFIG_XEN_BLKDEV_TAP) += blktap/
-+obj-$(CONFIG_XEN_NETDEV_BACKEND) += netback/
-+obj-$(CONFIG_XEN_TPMDEV_BACKEND) += tpmback/
-+obj-$(CONFIG_XEN_BLKDEV_FRONTEND) += blkfront/
-+obj-$(CONFIG_XEN_NETDEV_FRONTEND) += netfront/
-+obj-$(CONFIG_XEN_PCIDEV_BACKEND) += pciback/
-+obj-$(CONFIG_XEN_PCIDEV_FRONTEND) += pcifront/
-+obj-$(CONFIG_XEN_FRAMEBUFFER) += fbfront/
-+obj-$(CONFIG_XEN_KEYBOARD) += fbfront/
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/netback/common.h ubuntu-gutsy-xen/drivers/xen/netback/common.h
---- ubuntu-gutsy/drivers/xen/netback/common.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/netback/common.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,157 @@
-+/******************************************************************************
-+ * arch/xen/drivers/netif/backend/common.h
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#ifndef __NETIF__BACKEND__COMMON_H__
-+#define __NETIF__BACKEND__COMMON_H__
-+
-+#include <linux/version.h>
-+#include <linux/module.h>
-+#include <linux/interrupt.h>
-+#include <linux/slab.h>
-+#include <linux/ip.h>
-+#include <linux/in.h>
-+#include <linux/netdevice.h>
-+#include <linux/etherdevice.h>
-+#include <linux/wait.h>
-+#include <xen/evtchn.h>
-+#include <xen/interface/io/netif.h>
-+#include <asm/io.h>
-+#include <asm/pgalloc.h>
-+#include <xen/interface/grant_table.h>
-+#include <xen/gnttab.h>
-+#include <xen/driver_util.h>
-+
-+#define DPRINTK(_f, _a...) \
-+ pr_debug("(file=%s, line=%d) " _f, \
-+ __FILE__ , __LINE__ , ## _a )
-+#define IPRINTK(fmt, args...) \
-+ printk(KERN_INFO "xen_net: " fmt, ##args)
-+#define WPRINTK(fmt, args...) \
-+ printk(KERN_WARNING "xen_net: " fmt, ##args)
-+
-+typedef struct netif_st {
-+ /* Unique identifier for this interface. */
-+ domid_t domid;
-+ unsigned int handle;
-+
-+ u8 fe_dev_addr[6];
-+
-+ /* Physical parameters of the comms window. */
-+ grant_handle_t tx_shmem_handle;
-+ grant_ref_t tx_shmem_ref;
-+ grant_handle_t rx_shmem_handle;
-+ grant_ref_t rx_shmem_ref;
-+ unsigned int irq;
-+
-+ /* The shared rings and indexes. */
-+ netif_tx_back_ring_t tx;
-+ netif_rx_back_ring_t rx;
-+ struct vm_struct *tx_comms_area;
-+ struct vm_struct *rx_comms_area;
-+
-+ /* Set of features that can be turned on in dev->features. */
-+ int features;
-+
-+ /* Internal feature information. */
-+ int can_queue:1; /* can queue packets for receiver? */
-+ int copying_receiver:1; /* copy packets to receiver? */
-+
-+ /* Allow netif_be_start_xmit() to peek ahead in the rx request ring. */
-+ RING_IDX rx_req_cons_peek;
-+
-+ /* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */
-+ unsigned long credit_bytes;
-+ unsigned long credit_usec;
-+ unsigned long remaining_credit;
-+ struct timer_list credit_timeout;
-+
-+ /* Enforce draining of the transmit queue. */
-+ struct timer_list tx_queue_timeout;
-+
-+ /* Miscellaneous private stuff. */
-+ struct list_head list; /* scheduling list */
-+ atomic_t refcnt;
-+ struct net_device *dev;
-+ struct net_device_stats stats;
-+
-+ unsigned int carrier;
-+
-+ wait_queue_head_t waiting_to_free;
-+} netif_t;
-+
-+/*
-+ * Implement our own carrier flag: the network stack's version causes delays
-+ * when the carrier is re-enabled (in particular, dev_activate() may not
-+ * immediately be called, which can cause packet loss; also the etherbridge
-+ * can be rather lazy in activating its port).
-+ */
-+#define netback_carrier_on(netif) ((netif)->carrier = 1)
-+#define netback_carrier_off(netif) ((netif)->carrier = 0)
-+#define netback_carrier_ok(netif) ((netif)->carrier)
-+
-+#define NET_TX_RING_SIZE __RING_SIZE((netif_tx_sring_t *)0, PAGE_SIZE)
-+#define NET_RX_RING_SIZE __RING_SIZE((netif_rx_sring_t *)0, PAGE_SIZE)
-+
-+void netif_disconnect(netif_t *netif);
-+
-+netif_t *netif_alloc(domid_t domid, unsigned int handle);
-+int netif_map(netif_t *netif, unsigned long tx_ring_ref,
-+ unsigned long rx_ring_ref, unsigned int evtchn);
-+
-+#define netif_get(_b) (atomic_inc(&(_b)->refcnt))
-+#define netif_put(_b) \
-+ do { \
-+ if ( atomic_dec_and_test(&(_b)->refcnt) ) \
-+ wake_up(&(_b)->waiting_to_free); \
-+ } while (0)
-+
-+void netif_xenbus_init(void);
-+
-+#define netif_schedulable(netif) \
-+ (netif_running((netif)->dev) && netback_carrier_ok(netif))
-+
-+void netif_schedule_work(netif_t *netif);
-+void netif_deschedule_work(netif_t *netif);
-+
-+int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev);
-+struct net_device_stats *netif_be_get_stats(struct net_device *dev);
-+irqreturn_t netif_be_int(int irq, void *dev_id);
-+
-+static inline int netbk_can_queue(struct net_device *dev)
-+{
-+ netif_t *netif = netdev_priv(dev);
-+ return netif->can_queue;
-+}
-+
-+static inline int netbk_can_sg(struct net_device *dev)
-+{
-+ netif_t *netif = netdev_priv(dev);
-+ return netif->features & NETIF_F_SG;
-+}
-+
-+#endif /* __NETIF__BACKEND__COMMON_H__ */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/netback/interface.c ubuntu-gutsy-xen/drivers/xen/netback/interface.c
---- ubuntu-gutsy/drivers/xen/netback/interface.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/netback/interface.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,336 @@
-+/******************************************************************************
-+ * arch/xen/drivers/netif/backend/interface.c
-+ *
-+ * Network-device interface management.
-+ *
-+ * Copyright (c) 2004-2005, Keir Fraser
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#include "common.h"
-+#include <linux/ethtool.h>
-+#include <linux/rtnetlink.h>
-+
-+/*
-+ * Module parameter 'queue_length':
-+ *
-+ * Enables queuing in the network stack when a client has run out of receive
-+ * descriptors. Although this feature can improve receive bandwidth by avoiding
-+ * packet loss, it can also result in packets sitting in the 'tx_queue' for
-+ * unbounded time. This is bad if those packets hold onto foreign resources.
-+ * For example, consider a packet that holds onto resources belonging to the
-+ * guest for which it is queued (e.g., packet received on vif1.0, destined for
-+ * vif1.1 which is not activated in the guest): in this situation the guest
-+ * will never be destroyed, unless vif1.1 is taken down. To avoid this, we
-+ * run a timer (tx_queue_timeout) to drain the queue when the interface is
-+ * blocked.
-+ */
-+static unsigned long netbk_queue_length = 32;
-+module_param_named(queue_length, netbk_queue_length, ulong, 0);
-+
-+static void __netif_up(netif_t *netif)
-+{
-+ enable_irq(netif->irq);
-+ netif_schedule_work(netif);
-+}
-+
-+static void __netif_down(netif_t *netif)
-+{
-+ disable_irq(netif->irq);
-+ netif_deschedule_work(netif);
-+}
-+
-+static int net_open(struct net_device *dev)
-+{
-+ netif_t *netif = netdev_priv(dev);
-+ if (netback_carrier_ok(netif)) {
-+ __netif_up(netif);
-+ netif_start_queue(dev);
-+ }
-+ return 0;
-+}
-+
-+static int net_close(struct net_device *dev)
-+{
-+ netif_t *netif = netdev_priv(dev);
-+ if (netback_carrier_ok(netif))
-+ __netif_down(netif);
-+ netif_stop_queue(dev);
-+ return 0;
-+}
-+
-+static int netbk_change_mtu(struct net_device *dev, int mtu)
-+{
-+ int max = netbk_can_sg(dev) ? 65535 - ETH_HLEN : ETH_DATA_LEN;
-+
-+ if (mtu > max)
-+ return -EINVAL;
-+ dev->mtu = mtu;
-+ return 0;
-+}
-+
-+static int netbk_set_sg(struct net_device *dev, u32 data)
-+{
-+ if (data) {
-+ netif_t *netif = netdev_priv(dev);
-+
-+ if (!(netif->features & NETIF_F_SG))
-+ return -ENOSYS;
-+ }
-+
-+ return ethtool_op_set_sg(dev, data);
-+}
-+
-+static int netbk_set_tso(struct net_device *dev, u32 data)
-+{
-+ if (data) {
-+ netif_t *netif = netdev_priv(dev);
-+
-+ if (!(netif->features & NETIF_F_TSO))
-+ return -ENOSYS;
-+ }
-+
-+ return ethtool_op_set_tso(dev, data);
-+}
-+
-+static struct ethtool_ops network_ethtool_ops =
-+{
-+ .get_tx_csum = ethtool_op_get_tx_csum,
-+ .set_tx_csum = ethtool_op_set_tx_csum,
-+ .get_sg = ethtool_op_get_sg,
-+ .set_sg = netbk_set_sg,
-+ .get_tso = ethtool_op_get_tso,
-+ .set_tso = netbk_set_tso,
-+ .get_link = ethtool_op_get_link,
-+};
-+
-+netif_t *netif_alloc(domid_t domid, unsigned int handle)
-+{
-+ int err = 0;
-+ struct net_device *dev;
-+ netif_t *netif;
-+ char name[IFNAMSIZ] = {};
-+
-+ snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
-+ dev = alloc_netdev(sizeof(netif_t), name, ether_setup);
-+ if (dev == NULL) {
-+ DPRINTK("Could not create netif: out of memory\n");
-+ return ERR_PTR(-ENOMEM);
-+ }
-+
-+ netif = netdev_priv(dev);
-+ memset(netif, 0, sizeof(*netif));
-+ netif->domid = domid;
-+ netif->handle = handle;
-+ atomic_set(&netif->refcnt, 1);
-+ init_waitqueue_head(&netif->waiting_to_free);
-+ netif->dev = dev;
-+
-+ netback_carrier_off(netif);
-+
-+ netif->credit_bytes = netif->remaining_credit = ~0UL;
-+ netif->credit_usec = 0UL;
-+ init_timer(&netif->credit_timeout);
-+ /* Initialize 'expires' now: it's used to track the credit window. */
-+ netif->credit_timeout.expires = jiffies;
-+
-+ init_timer(&netif->tx_queue_timeout);
-+
-+ dev->hard_start_xmit = netif_be_start_xmit;
-+ dev->get_stats = netif_be_get_stats;
-+ dev->open = net_open;
-+ dev->stop = net_close;
-+ dev->change_mtu = netbk_change_mtu;
-+ dev->features = NETIF_F_IP_CSUM;
-+
-+ SET_ETHTOOL_OPS(dev, &network_ethtool_ops);
-+
-+ dev->tx_queue_len = netbk_queue_length;
-+
-+ /*
-+ * Initialise a dummy MAC address. We choose the numerically
-+ * largest non-broadcast address to prevent the address getting
-+ * stolen by an Ethernet bridge for STP purposes.
-+ * (FE:FF:FF:FF:FF:FF)
-+ */
-+ memset(dev->dev_addr, 0xFF, ETH_ALEN);
-+ dev->dev_addr[0] &= ~0x01;
-+
-+ rtnl_lock();
-+ err = register_netdevice(dev);
-+ rtnl_unlock();
-+ if (err) {
-+ DPRINTK("Could not register new net device %s: err=%d\n",
-+ dev->name, err);
-+ free_netdev(dev);
-+ return ERR_PTR(err);
-+ }
-+
-+ DPRINTK("Successfully created netif\n");
-+ return netif;
-+}
-+
-+static int map_frontend_pages(
-+ netif_t *netif, grant_ref_t tx_ring_ref, grant_ref_t rx_ring_ref)
-+{
-+ struct gnttab_map_grant_ref op;
-+
-+ gnttab_set_map_op(&op, (unsigned long)netif->tx_comms_area->addr,
-+ GNTMAP_host_map, tx_ring_ref, netif->domid);
-+
-+ if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
-+ BUG();
-+
-+ if (op.status) {
-+ DPRINTK(" Gnttab failure mapping tx_ring_ref!\n");
-+ return op.status;
-+ }
-+
-+ netif->tx_shmem_ref = tx_ring_ref;
-+ netif->tx_shmem_handle = op.handle;
-+
-+ gnttab_set_map_op(&op, (unsigned long)netif->rx_comms_area->addr,
-+ GNTMAP_host_map, rx_ring_ref, netif->domid);
-+
-+ if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
-+ BUG();
-+
-+ if (op.status) {
-+ DPRINTK(" Gnttab failure mapping rx_ring_ref!\n");
-+ return op.status;
-+ }
-+
-+ netif->rx_shmem_ref = rx_ring_ref;
-+ netif->rx_shmem_handle = op.handle;
-+
-+ return 0;
-+}
-+
-+static void unmap_frontend_pages(netif_t *netif)
-+{
-+ struct gnttab_unmap_grant_ref op;
-+
-+ gnttab_set_unmap_op(&op, (unsigned long)netif->tx_comms_area->addr,
-+ GNTMAP_host_map, netif->tx_shmem_handle);
-+
-+ if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
-+ BUG();
-+
-+ gnttab_set_unmap_op(&op, (unsigned long)netif->rx_comms_area->addr,
-+ GNTMAP_host_map, netif->rx_shmem_handle);
-+
-+ if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
-+ BUG();
-+}
-+
-+int netif_map(netif_t *netif, unsigned long tx_ring_ref,
-+ unsigned long rx_ring_ref, unsigned int evtchn)
-+{
-+ int err = -ENOMEM;
-+ netif_tx_sring_t *txs;
-+ netif_rx_sring_t *rxs;
-+
-+ /* Already connected through? */
-+ if (netif->irq)
-+ return 0;
-+
-+ netif->tx_comms_area = alloc_vm_area(PAGE_SIZE);
-+ if (netif->tx_comms_area == NULL)
-+ return -ENOMEM;
-+ netif->rx_comms_area = alloc_vm_area(PAGE_SIZE);
-+ if (netif->rx_comms_area == NULL)
-+ goto err_rx;
-+
-+ err = map_frontend_pages(netif, tx_ring_ref, rx_ring_ref);
-+ if (err)
-+ goto err_map;
-+
-+ err = bind_interdomain_evtchn_to_irqhandler(
-+ netif->domid, evtchn, netif_be_int, 0,
-+ netif->dev->name, netif);
-+ if (err < 0)
-+ goto err_hypervisor;
-+ netif->irq = err;
-+ disable_irq(netif->irq);
-+
-+ txs = (netif_tx_sring_t *)netif->tx_comms_area->addr;
-+ BACK_RING_INIT(&netif->tx, txs, PAGE_SIZE);
-+
-+ rxs = (netif_rx_sring_t *)
-+ ((char *)netif->rx_comms_area->addr);
-+ BACK_RING_INIT(&netif->rx, rxs, PAGE_SIZE);
-+
-+ netif->rx_req_cons_peek = 0;
-+
-+ netif_get(netif);
-+
-+ rtnl_lock();
-+ netback_carrier_on(netif);
-+ if (netif_running(netif->dev))
-+ __netif_up(netif);
-+ rtnl_unlock();
-+
-+ return 0;
-+err_hypervisor:
-+ unmap_frontend_pages(netif);
-+err_map:
-+ free_vm_area(netif->rx_comms_area);
-+err_rx:
-+ free_vm_area(netif->tx_comms_area);
-+ return err;
-+}
-+
-+void netif_disconnect(netif_t *netif)
-+{
-+ if (netback_carrier_ok(netif)) {
-+ rtnl_lock();
-+ netback_carrier_off(netif);
-+ netif_carrier_off(netif->dev); /* discard queued packets */
-+ if (netif_running(netif->dev))
-+ __netif_down(netif);
-+ rtnl_unlock();
-+ netif_put(netif);
-+ }
-+
-+ atomic_dec(&netif->refcnt);
-+ wait_event(netif->waiting_to_free, atomic_read(&netif->refcnt) == 0);
-+
-+ del_timer_sync(&netif->credit_timeout);
-+ del_timer_sync(&netif->tx_queue_timeout);
-+
-+ if (netif->irq)
-+ unbind_from_irqhandler(netif->irq, netif);
-+
-+ unregister_netdev(netif->dev);
-+
-+ if (netif->tx.sring) {
-+ unmap_frontend_pages(netif);
-+ free_vm_area(netif->tx_comms_area);
-+ free_vm_area(netif->rx_comms_area);
-+ }
-+
-+ free_netdev(netif->dev);
-+}
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/netback/loopback.c ubuntu-gutsy-xen/drivers/xen/netback/loopback.c
---- ubuntu-gutsy/drivers/xen/netback/loopback.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/netback/loopback.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,321 @@
-+/******************************************************************************
-+ * netback/loopback.c
-+ *
-+ * A two-interface loopback device to emulate a local netfront-netback
-+ * connection. This ensures that local packet delivery looks identical
-+ * to inter-domain delivery. Most importantly, packets delivered locally
-+ * originating from other domains will get *copied* when they traverse this
-+ * driver. This prevents unbounded delays in socket-buffer queues from
-+ * causing the netback driver to "seize up".
-+ *
-+ * This driver creates a symmetric pair of loopback interfaces with names
-+ * vif0.0 and veth0. The intention is that 'vif0.0' is bound to an Ethernet
-+ * bridge, just like a proper netback interface, while a local IP interface
-+ * is configured on 'veth0'.
-+ *
-+ * As with a real netback interface, vif0.0 is configured with a suitable
-+ * dummy MAC address. No default is provided for veth0: a reasonable strategy
-+ * is to transfer eth0's MAC address to veth0, and give eth0 a dummy address
-+ * (to avoid confusing the Etherbridge).
-+ *
-+ * Copyright (c) 2005 K A Fraser
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#include <linux/module.h>
-+#include <linux/netdevice.h>
-+#include <linux/inetdevice.h>
-+#include <linux/etherdevice.h>
-+#include <linux/skbuff.h>
-+#include <linux/ethtool.h>
-+#include <net/dst.h>
-+#include <net/xfrm.h> /* secpath_reset() */
-+#include <asm/hypervisor.h> /* is_initial_xendomain() */
-+#include <../net/core/kmap_skb.h> /* k{,un}map_skb_frag() */
-+
-+static int nloopbacks = -1;
-+module_param(nloopbacks, int, 0);
-+MODULE_PARM_DESC(nloopbacks, "Number of netback-loopback devices to create");
-+
-+struct net_private {
-+ struct net_device *loopback_dev;
-+ struct net_device_stats stats;
-+};
-+
-+static int loopback_open(struct net_device *dev)
-+{
-+ struct net_private *np = netdev_priv(dev);
-+ memset(&np->stats, 0, sizeof(np->stats));
-+ netif_start_queue(dev);
-+ return 0;
-+}
-+
-+static int loopback_close(struct net_device *dev)
-+{
-+ netif_stop_queue(dev);
-+ return 0;
-+}
-+
-+#ifdef CONFIG_X86
-+static int is_foreign(unsigned long pfn)
-+{
-+ /* NB. Play it safe for auto-translation mode. */
-+ return (xen_feature(XENFEAT_auto_translated_physmap) ||
-+ (phys_to_machine_mapping[pfn] & FOREIGN_FRAME_BIT));
-+}
-+#else
-+/* How to detect a foreign mapping? Play it safe. */
-+#define is_foreign(pfn) (1)
-+#endif
-+
-+static int skb_remove_foreign_references(struct sk_buff *skb)
-+{
-+ struct page *page;
-+ unsigned long pfn;
-+ int i, off;
-+ char *vaddr;
-+
-+ BUG_ON(skb_shinfo(skb)->frag_list);
-+
-+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-+ pfn = page_to_pfn(skb_shinfo(skb)->frags[i].page);
-+ if (!is_foreign(pfn))
-+ continue;
-+
-+ page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
-+ if (unlikely(!page))
-+ return 0;
-+
-+ vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]);
-+ off = skb_shinfo(skb)->frags[i].page_offset;
-+ memcpy(page_address(page) + off,
-+ vaddr + off,
-+ skb_shinfo(skb)->frags[i].size);
-+ kunmap_skb_frag(vaddr);
-+
-+ put_page(skb_shinfo(skb)->frags[i].page);
-+ skb_shinfo(skb)->frags[i].page = page;
-+ }
-+
-+ return 1;
-+}
-+
-+static int loopback_start_xmit(struct sk_buff *skb, struct net_device *dev)
-+{
-+ struct net_private *np = netdev_priv(dev);
-+
-+ if (!skb_remove_foreign_references(skb)) {
-+ np->stats.tx_dropped++;
-+ dev_kfree_skb(skb);
-+ return 0;
-+ }
-+
-+ dst_release(skb->dst);
-+ skb->dst = NULL;
-+
-+ skb_orphan(skb);
-+
-+ np->stats.tx_bytes += skb->len;
-+ np->stats.tx_packets++;
-+
-+ /* Switch to loopback context. */
-+ dev = np->loopback_dev;
-+ np = netdev_priv(dev);
-+
-+ np->stats.rx_bytes += skb->len;
-+ np->stats.rx_packets++;
-+
-+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
-+ /* Defer checksum calculation. */
-+ skb->proto_csum_blank = 1;
-+ /* Must be a local packet: assert its integrity. */
-+ skb->proto_data_valid = 1;
-+ }
-+
-+ skb->ip_summed = skb->proto_data_valid ?
-+ CHECKSUM_UNNECESSARY : CHECKSUM_NONE;
-+
-+ skb->pkt_type = PACKET_HOST; /* overridden by eth_type_trans() */
-+ skb->protocol = eth_type_trans(skb, dev);
-+ skb->dev = dev;
-+ dev->last_rx = jiffies;
-+
-+ /* Flush netfilter context: rx'ed skbuffs not expected to have any. */
-+ nf_reset(skb);
-+ secpath_reset(skb);
-+
-+ netif_rx(skb);
-+
-+ return 0;
-+}
-+
-+static struct net_device_stats *loopback_get_stats(struct net_device *dev)
-+{
-+ struct net_private *np = netdev_priv(dev);
-+ return &np->stats;
-+}
-+
-+static struct ethtool_ops network_ethtool_ops =
-+{
-+ .get_tx_csum = ethtool_op_get_tx_csum,
-+ .set_tx_csum = ethtool_op_set_tx_csum,
-+ .get_sg = ethtool_op_get_sg,
-+ .set_sg = ethtool_op_set_sg,
-+ .get_tso = ethtool_op_get_tso,
-+ .set_tso = ethtool_op_set_tso,
-+ .get_link = ethtool_op_get_link,
-+};
-+
-+/*
-+ * Nothing to do here. Virtual interface is point-to-point and the
-+ * physical interface is probably promiscuous anyway.
-+ */
-+static void loopback_set_multicast_list(struct net_device *dev)
-+{
-+}
-+
-+static void loopback_construct(struct net_device *dev, struct net_device *lo)
-+{
-+ struct net_private *np = netdev_priv(dev);
-+
-+ np->loopback_dev = lo;
-+
-+ dev->open = loopback_open;
-+ dev->stop = loopback_close;
-+ dev->hard_start_xmit = loopback_start_xmit;
-+ dev->get_stats = loopback_get_stats;
-+ dev->set_multicast_list = loopback_set_multicast_list;
-+ dev->change_mtu = NULL; /* allow arbitrary mtu */
-+
-+ dev->tx_queue_len = 0;
-+
-+ dev->features = (NETIF_F_HIGHDMA |
-+ NETIF_F_LLTX |
-+ NETIF_F_TSO |
-+ NETIF_F_SG |
-+ NETIF_F_IP_CSUM);
-+
-+ SET_ETHTOOL_OPS(dev, &network_ethtool_ops);
-+
-+ /*
-+ * We do not set a jumbo MTU on the interface. Otherwise the network
-+ * stack will try to send large packets that will get dropped by the
-+ * Ethernet bridge (unless the physical Ethernet interface is
-+ * configured to transfer jumbo packets). If a larger MTU is desired
-+ * then the system administrator can specify it using the 'ifconfig'
-+ * command.
-+ */
-+ /*dev->mtu = 16*1024;*/
-+}
-+
-+static int __init make_loopback(int i)
-+{
-+ struct net_device *dev1, *dev2;
-+ char dev_name[IFNAMSIZ];
-+ int err = -ENOMEM;
-+
-+ sprintf(dev_name, "vif0.%d", i);
-+ dev1 = alloc_netdev(sizeof(struct net_private), dev_name, ether_setup);
-+ if (!dev1)
-+ return err;
-+
-+ sprintf(dev_name, "veth%d", i);
-+ dev2 = alloc_netdev(sizeof(struct net_private), dev_name, ether_setup);
-+ if (!dev2)
-+ goto fail_netdev2;
-+
-+ loopback_construct(dev1, dev2);
-+ loopback_construct(dev2, dev1);
-+
-+ /*
-+ * Initialise a dummy MAC address for the 'dummy backend' interface. We
-+ * choose the numerically largest non-broadcast address to prevent the
-+ * address getting stolen by an Ethernet bridge for STP purposes.
-+ */
-+ memset(dev1->dev_addr, 0xFF, ETH_ALEN);
-+ dev1->dev_addr[0] &= ~0x01;
-+
-+ if ((err = register_netdev(dev1)) != 0)
-+ goto fail;
-+
-+ if ((err = register_netdev(dev2)) != 0) {
-+ unregister_netdev(dev1);
-+ goto fail;
-+ }
-+
-+ return 0;
-+
-+ fail:
-+ free_netdev(dev2);
-+ fail_netdev2:
-+ free_netdev(dev1);
-+ return err;
-+}
-+
-+static void __exit clean_loopback(int i)
-+{
-+ struct net_device *dev1, *dev2;
-+ char dev_name[IFNAMSIZ];
-+
-+ sprintf(dev_name, "vif0.%d", i);
-+ dev1 = dev_get_by_name(dev_name);
-+ sprintf(dev_name, "veth%d", i);
-+ dev2 = dev_get_by_name(dev_name);
-+ if (dev1 && dev2) {
-+ unregister_netdev(dev2);
-+ unregister_netdev(dev1);
-+ free_netdev(dev2);
-+ free_netdev(dev1);
-+ }
-+}
-+
-+static int __init loopback_init(void)
-+{
-+ int i, err = 0;
-+
-+ if (nloopbacks == -1)
-+ nloopbacks = is_initial_xendomain() ? 4 : 0;
-+
-+ for (i = 0; i < nloopbacks; i++)
-+ if ((err = make_loopback(i)) != 0)
-+ break;
-+
-+ return err;
-+}
-+
-+module_init(loopback_init);
-+
-+static void __exit loopback_exit(void)
-+{
-+ int i;
-+
-+ for (i = nloopbacks; i-- > 0; )
-+ clean_loopback(i);
-+}
-+
-+module_exit(loopback_exit);
-+
-+MODULE_LICENSE("Dual BSD/GPL");
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/netback/Makefile ubuntu-gutsy-xen/drivers/xen/netback/Makefile
---- ubuntu-gutsy/drivers/xen/netback/Makefile 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/netback/Makefile 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,5 @@
-+obj-$(CONFIG_XEN_NETDEV_BACKEND) := netbk.o
-+obj-$(CONFIG_XEN_NETDEV_LOOPBACK) += netloop.o
-+
-+netbk-y := netback.o xenbus.o interface.o
-+netloop-y := loopback.o
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/netback/netback.c ubuntu-gutsy-xen/drivers/xen/netback/netback.c
---- ubuntu-gutsy/drivers/xen/netback/netback.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/netback/netback.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,1500 @@
-+/******************************************************************************
-+ * drivers/xen/netback/netback.c
-+ *
-+ * Back-end of the driver for virtual network devices. This portion of the
-+ * driver exports a 'unified' network-device interface that can be accessed
-+ * by any operating system that implements a compatible front end. A
-+ * reference front-end implementation can be found in:
-+ * drivers/xen/netfront/netfront.c
-+ *
-+ * Copyright (c) 2002-2005, K A Fraser
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#include "common.h"
-+#include <xen/balloon.h>
-+#include <xen/interface/memory.h>
-+
-+/*define NETBE_DEBUG_INTERRUPT*/
-+
-+/* extra field used in struct page */
-+#define netif_page_index(pg) (*(long *)&(pg)->mapping)
-+
-+struct netbk_rx_meta {
-+ skb_frag_t frag;
-+ int id;
-+ int copy:1;
-+};
-+
-+static void netif_idx_release(u16 pending_idx);
-+static void netif_page_release(struct page *page);
-+static void make_tx_response(netif_t *netif,
-+ netif_tx_request_t *txp,
-+ s8 st);
-+static netif_rx_response_t *make_rx_response(netif_t *netif,
-+ u16 id,
-+ s8 st,
-+ u16 offset,
-+ u16 size,
-+ u16 flags);
-+
-+static void net_tx_action(unsigned long unused);
-+static DECLARE_TASKLET(net_tx_tasklet, net_tx_action, 0);
-+
-+static void net_rx_action(unsigned long unused);
-+static DECLARE_TASKLET(net_rx_tasklet, net_rx_action, 0);
-+
-+static struct timer_list net_timer;
-+
-+#define MAX_PENDING_REQS 256
-+
-+static struct sk_buff_head rx_queue;
-+
-+static struct page **mmap_pages;
-+static inline unsigned long idx_to_kaddr(unsigned int idx)
-+{
-+ return (unsigned long)pfn_to_kaddr(page_to_pfn(mmap_pages[idx]));
-+}
-+
-+#define PKT_PROT_LEN 64
-+
-+static struct pending_tx_info {
-+ netif_tx_request_t req;
-+ netif_t *netif;
-+} pending_tx_info[MAX_PENDING_REQS];
-+static u16 pending_ring[MAX_PENDING_REQS];
-+typedef unsigned int PEND_RING_IDX;
-+#define MASK_PEND_IDX(_i) ((_i)&(MAX_PENDING_REQS-1))
-+static PEND_RING_IDX pending_prod, pending_cons;
-+#define NR_PENDING_REQS (MAX_PENDING_REQS - pending_prod + pending_cons)
-+
-+/* Freed TX SKBs get batched on this ring before return to pending_ring. */
-+static u16 dealloc_ring[MAX_PENDING_REQS];
-+static PEND_RING_IDX dealloc_prod, dealloc_cons;
-+
-+static struct sk_buff_head tx_queue;
-+
-+static grant_handle_t grant_tx_handle[MAX_PENDING_REQS];
-+static gnttab_unmap_grant_ref_t tx_unmap_ops[MAX_PENDING_REQS];
-+static gnttab_map_grant_ref_t tx_map_ops[MAX_PENDING_REQS];
-+
-+static struct list_head net_schedule_list;
-+static spinlock_t net_schedule_list_lock;
-+
-+#define MAX_MFN_ALLOC 64
-+static unsigned long mfn_list[MAX_MFN_ALLOC];
-+static unsigned int alloc_index = 0;
-+
-+static inline unsigned long alloc_mfn(void)
-+{
-+ BUG_ON(alloc_index == 0);
-+ return mfn_list[--alloc_index];
-+}
-+
-+static int check_mfn(int nr)
-+{
-+ struct xen_memory_reservation reservation = {
-+ .extent_order = 0,
-+ .domid = DOMID_SELF
-+ };
-+
-+ if (likely(alloc_index >= nr))
-+ return 0;
-+
-+ set_xen_guest_handle(reservation.extent_start, mfn_list + alloc_index);
-+ reservation.nr_extents = MAX_MFN_ALLOC - alloc_index;
-+ alloc_index += HYPERVISOR_memory_op(XENMEM_increase_reservation,
-+ &reservation);
-+
-+ return alloc_index >= nr ? 0 : -ENOMEM;
-+}
-+
-+static inline void maybe_schedule_tx_action(void)
-+{
-+ smp_mb();
-+ if ((NR_PENDING_REQS < (MAX_PENDING_REQS/2)) &&
-+ !list_empty(&net_schedule_list))
-+ tasklet_schedule(&net_tx_tasklet);
-+}
-+
-+static struct sk_buff *netbk_copy_skb(struct sk_buff *skb)
-+{
-+ struct skb_shared_info *ninfo;
-+ struct sk_buff *nskb;
-+ unsigned long offset;
-+ int ret;
-+ int len;
-+ int headlen;
-+
-+ BUG_ON(skb_shinfo(skb)->frag_list != NULL);
-+
-+ nskb = alloc_skb(SKB_MAX_HEAD(0), GFP_ATOMIC | __GFP_NOWARN);
-+ if (unlikely(!nskb))
-+ goto err;
-+
-+ skb_reserve(nskb, 16 + NET_IP_ALIGN);
-+ headlen = skb_end_pointer(nskb) - nskb->data;
-+ if (headlen > skb_headlen(skb))
-+ headlen = skb_headlen(skb);
-+ ret = skb_copy_bits(skb, 0, __skb_put(nskb, headlen), headlen);
-+ BUG_ON(ret);
-+
-+ ninfo = skb_shinfo(nskb);
-+ ninfo->gso_size = skb_shinfo(skb)->gso_size;
-+ ninfo->gso_type = skb_shinfo(skb)->gso_type;
-+
-+ offset = headlen;
-+ len = skb->len - headlen;
-+
-+ nskb->len = skb->len;
-+ nskb->data_len = len;
-+ nskb->truesize += len;
-+
-+ while (len) {
-+ struct page *page;
-+ int copy;
-+ int zero;
-+
-+ if (unlikely(ninfo->nr_frags >= MAX_SKB_FRAGS)) {
-+ dump_stack();
-+ goto err_free;
-+ }
-+
-+ copy = len >= PAGE_SIZE ? PAGE_SIZE : len;
-+ zero = len >= PAGE_SIZE ? 0 : __GFP_ZERO;
-+
-+ page = alloc_page(GFP_ATOMIC | __GFP_NOWARN | zero);
-+ if (unlikely(!page))
-+ goto err_free;
-+
-+ ret = skb_copy_bits(skb, offset, page_address(page), copy);
-+ BUG_ON(ret);
-+
-+ ninfo->frags[ninfo->nr_frags].page = page;
-+ ninfo->frags[ninfo->nr_frags].page_offset = 0;
-+ ninfo->frags[ninfo->nr_frags].size = copy;
-+ ninfo->nr_frags++;
-+
-+ offset += copy;
-+ len -= copy;
-+ }
-+
-+#ifdef NET_SKBUFF_DATA_USES_OFFSET
-+ offset = 0;
-+#else
-+ offset = nskb->data - skb->data;
-+#endif
-+
-+ nskb->transport_header = skb->transport_header + offset;
-+ nskb->network_header = skb->network_header + offset;
-+ nskb->mac_header = skb->mac_header + offset;
-+
-+ return nskb;
-+
-+ err_free:
-+ kfree_skb(nskb);
-+ err:
-+ return NULL;
-+}
-+
-+static inline int netbk_max_required_rx_slots(netif_t *netif)
-+{
-+ if (netif->features & (NETIF_F_SG|NETIF_F_TSO))
-+ return MAX_SKB_FRAGS + 2; /* header + extra_info + frags */
-+ return 1; /* all in one */
-+}
-+
-+static inline int netbk_queue_full(netif_t *netif)
-+{
-+ RING_IDX peek = netif->rx_req_cons_peek;
-+ RING_IDX needed = netbk_max_required_rx_slots(netif);
-+
-+ return ((netif->rx.sring->req_prod - peek) < needed) ||
-+ ((netif->rx.rsp_prod_pvt + NET_RX_RING_SIZE - peek) < needed);
-+}
-+
-+static void tx_queue_callback(unsigned long data)
-+{
-+ netif_t *netif = (netif_t *)data;
-+ if (netif_schedulable(netif))
-+ netif_wake_queue(netif->dev);
-+}
-+
-+int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev)
-+{
-+ netif_t *netif = netdev_priv(dev);
-+
-+ BUG_ON(skb->dev != dev);
-+
-+ /* Drop the packet if the target domain has no receive buffers. */
-+ if (unlikely(!netif_schedulable(netif) || netbk_queue_full(netif)))
-+ goto drop;
-+
-+ /*
-+ * Copy the packet here if it's destined for a flipping interface
-+ * but isn't flippable (e.g. extra references to data).
-+ * XXX For now we also copy skbuffs whose head crosses a page
-+ * boundary, because netbk_gop_skb can't handle them.
-+ */
-+ if (!netif->copying_receiver ||
-+ ((skb_headlen(skb) + offset_in_page(skb->data)) >= PAGE_SIZE)) {
-+ struct sk_buff *nskb = netbk_copy_skb(skb);
-+ if ( unlikely(nskb == NULL) )
-+ goto drop;
-+ /* Copy only the header fields we use in this driver. */
-+ nskb->dev = skb->dev;
-+ nskb->ip_summed = skb->ip_summed;
-+ nskb->proto_data_valid = skb->proto_data_valid;
-+ dev_kfree_skb(skb);
-+ skb = nskb;
-+ }
-+
-+ netif->rx_req_cons_peek += skb_shinfo(skb)->nr_frags + 1 +
-+ !!skb_shinfo(skb)->gso_size;
-+ netif_get(netif);
-+
-+ if (netbk_can_queue(dev) && netbk_queue_full(netif)) {
-+ netif->rx.sring->req_event = netif->rx_req_cons_peek +
-+ netbk_max_required_rx_slots(netif);
-+ mb(); /* request notification /then/ check & stop the queue */
-+ if (netbk_queue_full(netif)) {
-+ netif_stop_queue(dev);
-+ /*
-+ * Schedule 500ms timeout to restart the queue, thus
-+ * ensuring that an inactive queue will be drained.
-+ * Packets will be immediately be dropped until more
-+ * receive buffers become available (see
-+ * netbk_queue_full() check above).
-+ */
-+ netif->tx_queue_timeout.data = (unsigned long)netif;
-+ netif->tx_queue_timeout.function = tx_queue_callback;
-+ __mod_timer(&netif->tx_queue_timeout, jiffies + HZ/2);
-+ }
-+ }
-+
-+ skb_queue_tail(&rx_queue, skb);
-+ tasklet_schedule(&net_rx_tasklet);
-+
-+ return 0;
-+
-+ drop:
-+ netif->stats.tx_dropped++;
-+ dev_kfree_skb(skb);
-+ return 0;
-+}
-+
-+#if 0
-+static void xen_network_done_notify(void)
-+{
-+ static struct net_device *eth0_dev = NULL;
-+ if (unlikely(eth0_dev == NULL))
-+ eth0_dev = __dev_get_by_name("eth0");
-+ netif_rx_schedule(eth0_dev);
-+}
-+/*
-+ * Add following to poll() function in NAPI driver (Tigon3 is example):
-+ * if ( xen_network_done() )
-+ * tg3_enable_ints(tp);
-+ */
-+int xen_network_done(void)
-+{
-+ return skb_queue_empty(&rx_queue);
-+}
-+#endif
-+
-+struct netrx_pending_operations {
-+ unsigned trans_prod, trans_cons;
-+ unsigned mmu_prod, mmu_cons;
-+ unsigned mcl_prod, mcl_cons;
-+ unsigned copy_prod, copy_cons;
-+ unsigned meta_prod, meta_cons;
-+ mmu_update_t *mmu;
-+ gnttab_transfer_t *trans;
-+ gnttab_copy_t *copy;
-+ multicall_entry_t *mcl;
-+ struct netbk_rx_meta *meta;
-+};
-+
-+/* Set up the grant operations for this fragment. If it's a flipping
-+ interface, we also set up the unmap request from here. */
-+static u16 netbk_gop_frag(netif_t *netif, struct netbk_rx_meta *meta,
-+ int i, struct netrx_pending_operations *npo,
-+ struct page *page, unsigned long size,
-+ unsigned long offset)
-+{
-+ mmu_update_t *mmu;
-+ gnttab_transfer_t *gop;
-+ gnttab_copy_t *copy_gop;
-+ multicall_entry_t *mcl;
-+ netif_rx_request_t *req;
-+ unsigned long old_mfn, new_mfn;
-+
-+ old_mfn = virt_to_mfn(page_address(page));
-+
-+ req = RING_GET_REQUEST(&netif->rx, netif->rx.req_cons + i);
-+ if (netif->copying_receiver) {
-+ /* The fragment needs to be copied rather than
-+ flipped. */
-+ meta->copy = 1;
-+ copy_gop = npo->copy + npo->copy_prod++;
-+ copy_gop->flags = GNTCOPY_dest_gref;
-+ if (PageForeign(page)) {
-+ struct pending_tx_info *src_pend =
-+ &pending_tx_info[netif_page_index(page)];
-+ copy_gop->source.domid = src_pend->netif->domid;
-+ copy_gop->source.u.ref = src_pend->req.gref;
-+ copy_gop->flags |= GNTCOPY_source_gref;
-+ } else {
-+ copy_gop->source.domid = DOMID_SELF;
-+ copy_gop->source.u.gmfn = old_mfn;
-+ }
-+ copy_gop->source.offset = offset;
-+ copy_gop->dest.domid = netif->domid;
-+ copy_gop->dest.offset = 0;
-+ copy_gop->dest.u.ref = req->gref;
-+ copy_gop->len = size;
-+ } else {
-+ meta->copy = 0;
-+ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
-+ new_mfn = alloc_mfn();
-+
-+ /*
-+ * Set the new P2M table entry before
-+ * reassigning the old data page. Heed the
-+ * comment in pgtable-2level.h:pte_page(). :-)
-+ */
-+ set_phys_to_machine(page_to_pfn(page), new_mfn);
-+
-+ mcl = npo->mcl + npo->mcl_prod++;
-+ MULTI_update_va_mapping(mcl,
-+ (unsigned long)page_address(page),
-+ pfn_pte_ma(new_mfn, PAGE_KERNEL),
-+ 0);
-+
-+ mmu = npo->mmu + npo->mmu_prod++;
-+ mmu->ptr = ((maddr_t)new_mfn << PAGE_SHIFT) |
-+ MMU_MACHPHYS_UPDATE;
-+ mmu->val = page_to_pfn(page);
-+ }
-+
-+ gop = npo->trans + npo->trans_prod++;
-+ gop->mfn = old_mfn;
-+ gop->domid = netif->domid;
-+ gop->ref = req->gref;
-+ }
-+ return req->id;
-+}
-+
-+static void netbk_gop_skb(struct sk_buff *skb,
-+ struct netrx_pending_operations *npo)
-+{
-+ netif_t *netif = netdev_priv(skb->dev);
-+ int nr_frags = skb_shinfo(skb)->nr_frags;
-+ int i;
-+ int extra;
-+ struct netbk_rx_meta *head_meta, *meta;
-+
-+ head_meta = npo->meta + npo->meta_prod++;
-+ head_meta->frag.page_offset = skb_shinfo(skb)->gso_type;
-+ head_meta->frag.size = skb_shinfo(skb)->gso_size;
-+ extra = !!head_meta->frag.size + 1;
-+
-+ for (i = 0; i < nr_frags; i++) {
-+ meta = npo->meta + npo->meta_prod++;
-+ meta->frag = skb_shinfo(skb)->frags[i];
-+ meta->id = netbk_gop_frag(netif, meta, i + extra, npo,
-+ meta->frag.page,
-+ meta->frag.size,
-+ meta->frag.page_offset);
-+ }
-+
-+ /*
-+ * This must occur at the end to ensure that we don't trash skb_shinfo
-+ * until we're done. We know that the head doesn't cross a page
-+ * boundary because such packets get copied in netif_be_start_xmit.
-+ */
-+ head_meta->id = netbk_gop_frag(netif, head_meta, 0, npo,
-+ virt_to_page(skb->data),
-+ skb_headlen(skb),
-+ offset_in_page(skb->data));
-+
-+ netif->rx.req_cons += nr_frags + extra;
-+}
-+
-+static inline void netbk_free_pages(int nr_frags, struct netbk_rx_meta *meta)
-+{
-+ int i;
-+
-+ for (i = 0; i < nr_frags; i++)
-+ put_page(meta[i].frag.page);
-+}
-+
-+/* This is a twin to netbk_gop_skb. Assume that netbk_gop_skb was
-+ used to set up the operations on the top of
-+ netrx_pending_operations, which have since been done. Check that
-+ they didn't give any errors and advance over them. */
-+static int netbk_check_gop(int nr_frags, domid_t domid,
-+ struct netrx_pending_operations *npo)
-+{
-+ multicall_entry_t *mcl;
-+ gnttab_transfer_t *gop;
-+ gnttab_copy_t *copy_op;
-+ int status = NETIF_RSP_OKAY;
-+ int i;
-+
-+ for (i = 0; i <= nr_frags; i++) {
-+ if (npo->meta[npo->meta_cons + i].copy) {
-+ copy_op = npo->copy + npo->copy_cons++;
-+ if (copy_op->status != GNTST_okay) {
-+ DPRINTK("Bad status %d from copy to DOM%d.\n",
-+ copy_op->status, domid);
-+ status = NETIF_RSP_ERROR;
-+ }
-+ } else {
-+ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
-+ mcl = npo->mcl + npo->mcl_cons++;
-+ /* The update_va_mapping() must not fail. */
-+ BUG_ON(mcl->result != 0);
-+ }
-+
-+ gop = npo->trans + npo->trans_cons++;
-+ /* Check the reassignment error code. */
-+ if (gop->status != 0) {
-+ DPRINTK("Bad status %d from grant transfer to DOM%u\n",
-+ gop->status, domid);
-+ /*
-+ * Page no longer belongs to us unless
-+ * GNTST_bad_page, but that should be
-+ * a fatal error anyway.
-+ */
-+ BUG_ON(gop->status == GNTST_bad_page);
-+ status = NETIF_RSP_ERROR;
-+ }
-+ }
-+ }
-+
-+ return status;
-+}
-+
-+static void netbk_add_frag_responses(netif_t *netif, int status,
-+ struct netbk_rx_meta *meta, int nr_frags)
-+{
-+ int i;
-+ unsigned long offset;
-+
-+ for (i = 0; i < nr_frags; i++) {
-+ int id = meta[i].id;
-+ int flags = (i == nr_frags - 1) ? 0 : NETRXF_more_data;
-+
-+ if (meta[i].copy)
-+ offset = 0;
-+ else
-+ offset = meta[i].frag.page_offset;
-+ make_rx_response(netif, id, status, offset,
-+ meta[i].frag.size, flags);
-+ }
-+}
-+
-+static void net_rx_action(unsigned long unused)
-+{
-+ netif_t *netif = NULL;
-+ s8 status;
-+ u16 id, irq, flags;
-+ netif_rx_response_t *resp;
-+ multicall_entry_t *mcl;
-+ struct sk_buff_head rxq;
-+ struct sk_buff *skb;
-+ int notify_nr = 0;
-+ int ret;
-+ int nr_frags;
-+ int count;
-+ unsigned long offset;
-+
-+ /*
-+ * Putting hundreds of bytes on the stack is considered rude.
-+ * Static works because a tasklet can only be on one CPU at any time.
-+ */
-+ static multicall_entry_t rx_mcl[NET_RX_RING_SIZE+3];
-+ static mmu_update_t rx_mmu[NET_RX_RING_SIZE];
-+ static gnttab_transfer_t grant_trans_op[NET_RX_RING_SIZE];
-+ static gnttab_copy_t grant_copy_op[NET_RX_RING_SIZE];
-+ static unsigned char rx_notify[NR_IRQS];
-+ static u16 notify_list[NET_RX_RING_SIZE];
-+ static struct netbk_rx_meta meta[NET_RX_RING_SIZE];
-+
-+ struct netrx_pending_operations npo = {
-+ mmu: rx_mmu,
-+ trans: grant_trans_op,
-+ copy: grant_copy_op,
-+ mcl: rx_mcl,
-+ meta: meta};
-+
-+ skb_queue_head_init(&rxq);
-+
-+ count = 0;
-+
-+ while ((skb = skb_dequeue(&rx_queue)) != NULL) {
-+ nr_frags = skb_shinfo(skb)->nr_frags;
-+ *(int *)skb->cb = nr_frags;
-+
-+ if (!xen_feature(XENFEAT_auto_translated_physmap) &&
-+ !((netif_t *)netdev_priv(skb->dev))->copying_receiver &&
-+ check_mfn(nr_frags + 1)) {
-+ /* Memory squeeze? Back off for an arbitrary while. */
-+ if ( net_ratelimit() )
-+ WPRINTK("Memory squeeze in netback "
-+ "driver.\n");
-+ mod_timer(&net_timer, jiffies + HZ);
-+ skb_queue_head(&rx_queue, skb);
-+ break;
-+ }
-+
-+ netbk_gop_skb(skb, &npo);
-+
-+ count += nr_frags + 1;
-+
-+ __skb_queue_tail(&rxq, skb);
-+
-+ /* Filled the batch queue? */
-+ if (count + MAX_SKB_FRAGS >= NET_RX_RING_SIZE)
-+ break;
-+ }
-+
-+ if (npo.mcl_prod &&
-+ !xen_feature(XENFEAT_auto_translated_physmap)) {
-+ mcl = npo.mcl + npo.mcl_prod++;
-+
-+ BUG_ON(mcl[-1].op != __HYPERVISOR_update_va_mapping);
-+ mcl[-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
-+
-+ mcl->op = __HYPERVISOR_mmu_update;
-+ mcl->args[0] = (unsigned long)rx_mmu;
-+ mcl->args[1] = npo.mmu_prod;
-+ mcl->args[2] = 0;
-+ mcl->args[3] = DOMID_SELF;
-+ }
-+
-+ if (npo.trans_prod) {
-+ mcl = npo.mcl + npo.mcl_prod++;
-+ mcl->op = __HYPERVISOR_grant_table_op;
-+ mcl->args[0] = GNTTABOP_transfer;
-+ mcl->args[1] = (unsigned long)grant_trans_op;
-+ mcl->args[2] = npo.trans_prod;
-+ }
-+
-+ if (npo.copy_prod) {
-+ mcl = npo.mcl + npo.mcl_prod++;
-+ mcl->op = __HYPERVISOR_grant_table_op;
-+ mcl->args[0] = GNTTABOP_copy;
-+ mcl->args[1] = (unsigned long)grant_copy_op;
-+ mcl->args[2] = npo.copy_prod;
-+ }
-+
-+ /* Nothing to do? */
-+ if (!npo.mcl_prod)
-+ return;
-+
-+ BUG_ON(npo.copy_prod > NET_RX_RING_SIZE);
-+ BUG_ON(npo.mmu_prod > NET_RX_RING_SIZE);
-+ BUG_ON(npo.trans_prod > NET_RX_RING_SIZE);
-+ BUG_ON(npo.mcl_prod > NET_RX_RING_SIZE+3);
-+ BUG_ON(npo.meta_prod > NET_RX_RING_SIZE);
-+
-+ ret = HYPERVISOR_multicall(npo.mcl, npo.mcl_prod);
-+ BUG_ON(ret != 0);
-+
-+ while ((skb = __skb_dequeue(&rxq)) != NULL) {
-+ nr_frags = *(int *)skb->cb;
-+
-+ netif = netdev_priv(skb->dev);
-+ /* We can't rely on skb_release_data to release the
-+ pages used by fragments for us, since it tries to
-+ touch the pages in the fraglist. If we're in
-+ flipping mode, that doesn't work. In copying mode,
-+ we still have access to all of the pages, and so
-+ it's safe to let release_data deal with it. */
-+ /* (Freeing the fragments is safe since we copy
-+ non-linear skbs destined for flipping interfaces) */
-+ if (!netif->copying_receiver) {
-+ atomic_set(&(skb_shinfo(skb)->dataref), 1);
-+ skb_shinfo(skb)->frag_list = NULL;
-+ skb_shinfo(skb)->nr_frags = 0;
-+ netbk_free_pages(nr_frags, meta + npo.meta_cons + 1);
-+ }
-+
-+ netif->stats.tx_bytes += skb->len;
-+ netif->stats.tx_packets++;
-+
-+ status = netbk_check_gop(nr_frags, netif->domid, &npo);
-+
-+ id = meta[npo.meta_cons].id;
-+ flags = nr_frags ? NETRXF_more_data : 0;
-+
-+ if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */
-+ flags |= NETRXF_csum_blank | NETRXF_data_validated;
-+ else if (skb->proto_data_valid) /* remote but checksummed? */
-+ flags |= NETRXF_data_validated;
-+
-+ if (meta[npo.meta_cons].copy)
-+ offset = 0;
-+ else
-+ offset = offset_in_page(skb->data);
-+ resp = make_rx_response(netif, id, status, offset,
-+ skb_headlen(skb), flags);
-+
-+ if (meta[npo.meta_cons].frag.size) {
-+ struct netif_extra_info *gso =
-+ (struct netif_extra_info *)
-+ RING_GET_RESPONSE(&netif->rx,
-+ netif->rx.rsp_prod_pvt++);
-+
-+ resp->flags |= NETRXF_extra_info;
-+
-+ gso->u.gso.size = meta[npo.meta_cons].frag.size;
-+ gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
-+ gso->u.gso.pad = 0;
-+ gso->u.gso.features = 0;
-+
-+ gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
-+ gso->flags = 0;
-+ }
-+
-+ netbk_add_frag_responses(netif, status,
-+ meta + npo.meta_cons + 1,
-+ nr_frags);
-+
-+ RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&netif->rx, ret);
-+ irq = netif->irq;
-+ if (ret && !rx_notify[irq]) {
-+ rx_notify[irq] = 1;
-+ notify_list[notify_nr++] = irq;
-+ }
-+
-+ if (netif_queue_stopped(netif->dev) &&
-+ netif_schedulable(netif) &&
-+ !netbk_queue_full(netif))
-+ netif_wake_queue(netif->dev);
-+
-+ netif_put(netif);
-+ dev_kfree_skb(skb);
-+ npo.meta_cons += nr_frags + 1;
-+ }
-+
-+ while (notify_nr != 0) {
-+ irq = notify_list[--notify_nr];
-+ rx_notify[irq] = 0;
-+ notify_remote_via_irq(irq);
-+ }
-+
-+ /* More work to do? */
-+ if (!skb_queue_empty(&rx_queue) && !timer_pending(&net_timer))
-+ tasklet_schedule(&net_rx_tasklet);
-+#if 0
-+ else
-+ xen_network_done_notify();
-+#endif
-+}
-+
-+static void net_alarm(unsigned long unused)
-+{
-+ tasklet_schedule(&net_rx_tasklet);
-+}
-+
-+struct net_device_stats *netif_be_get_stats(struct net_device *dev)
-+{
-+ netif_t *netif = netdev_priv(dev);
-+ return &netif->stats;
-+}
-+
-+static int __on_net_schedule_list(netif_t *netif)
-+{
-+ return netif->list.next != NULL;
-+}
-+
-+static void remove_from_net_schedule_list(netif_t *netif)
-+{
-+ spin_lock_irq(&net_schedule_list_lock);
-+ if (likely(__on_net_schedule_list(netif))) {
-+ list_del(&netif->list);
-+ netif->list.next = NULL;
-+ netif_put(netif);
-+ }
-+ spin_unlock_irq(&net_schedule_list_lock);
-+}
-+
-+static void add_to_net_schedule_list_tail(netif_t *netif)
-+{
-+ if (__on_net_schedule_list(netif))
-+ return;
-+
-+ spin_lock_irq(&net_schedule_list_lock);
-+ if (!__on_net_schedule_list(netif) &&
-+ likely(netif_schedulable(netif))) {
-+ list_add_tail(&netif->list, &net_schedule_list);
-+ netif_get(netif);
-+ }
-+ spin_unlock_irq(&net_schedule_list_lock);
-+}
-+
-+/*
-+ * Note on CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER:
-+ * If this driver is pipelining transmit requests then we can be very
-+ * aggressive in avoiding new-packet notifications -- frontend only needs to
-+ * send a notification if there are no outstanding unreceived responses.
-+ * If we may be buffer transmit buffers for any reason then we must be rather
-+ * more conservative and treat this as the final check for pending work.
-+ */
-+void netif_schedule_work(netif_t *netif)
-+{
-+ int more_to_do;
-+
-+#ifdef CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER
-+ more_to_do = RING_HAS_UNCONSUMED_REQUESTS(&netif->tx);
-+#else
-+ RING_FINAL_CHECK_FOR_REQUESTS(&netif->tx, more_to_do);
-+#endif
-+
-+ if (more_to_do) {
-+ add_to_net_schedule_list_tail(netif);
-+ maybe_schedule_tx_action();
-+ }
-+}
-+
-+void netif_deschedule_work(netif_t *netif)
-+{
-+ remove_from_net_schedule_list(netif);
-+}
-+
-+
-+static void tx_add_credit(netif_t *netif)
-+{
-+ unsigned long max_burst, max_credit;
-+
-+ /*
-+ * Allow a burst big enough to transmit a jumbo packet of up to 128kB.
-+ * Otherwise the interface can seize up due to insufficient credit.
-+ */
-+ max_burst = RING_GET_REQUEST(&netif->tx, netif->tx.req_cons)->size;
-+ max_burst = min(max_burst, 131072UL);
-+ max_burst = max(max_burst, netif->credit_bytes);
-+
-+ /* Take care that adding a new chunk of credit doesn't wrap to zero. */
-+ max_credit = netif->remaining_credit + netif->credit_bytes;
-+ if (max_credit < netif->remaining_credit)
-+ max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
-+
-+ netif->remaining_credit = min(max_credit, max_burst);
-+}
-+
-+static void tx_credit_callback(unsigned long data)
-+{
-+ netif_t *netif = (netif_t *)data;
-+ tx_add_credit(netif);
-+ netif_schedule_work(netif);
-+}
-+
-+inline static void net_tx_action_dealloc(void)
-+{
-+ gnttab_unmap_grant_ref_t *gop;
-+ u16 pending_idx;
-+ PEND_RING_IDX dc, dp;
-+ netif_t *netif;
-+ int ret;
-+
-+ dc = dealloc_cons;
-+ dp = dealloc_prod;
-+
-+ /* Ensure we see all indexes enqueued by netif_idx_release(). */
-+ smp_rmb();
-+
-+ /*
-+ * Free up any grants we have finished using
-+ */
-+ gop = tx_unmap_ops;
-+ while (dc != dp) {
-+ pending_idx = dealloc_ring[MASK_PEND_IDX(dc++)];
-+ gnttab_set_unmap_op(gop, idx_to_kaddr(pending_idx),
-+ GNTMAP_host_map,
-+ grant_tx_handle[pending_idx]);
-+ gop++;
-+ }
-+ ret = HYPERVISOR_grant_table_op(
-+ GNTTABOP_unmap_grant_ref, tx_unmap_ops, gop - tx_unmap_ops);
-+ BUG_ON(ret);
-+
-+ while (dealloc_cons != dp) {
-+ pending_idx = dealloc_ring[MASK_PEND_IDX(dealloc_cons++)];
-+
-+ netif = pending_tx_info[pending_idx].netif;
-+
-+ make_tx_response(netif, &pending_tx_info[pending_idx].req,
-+ NETIF_RSP_OKAY);
-+
-+ pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
-+
-+ netif_put(netif);
-+ }
-+}
-+
-+static void netbk_tx_err(netif_t *netif, netif_tx_request_t *txp, RING_IDX end)
-+{
-+ RING_IDX cons = netif->tx.req_cons;
-+
-+ do {
-+ make_tx_response(netif, txp, NETIF_RSP_ERROR);
-+ if (cons >= end)
-+ break;
-+ txp = RING_GET_REQUEST(&netif->tx, cons++);
-+ } while (1);
-+ netif->tx.req_cons = cons;
-+ netif_schedule_work(netif);
-+ netif_put(netif);
-+}
-+
-+static int netbk_count_requests(netif_t *netif, netif_tx_request_t *first,
-+ netif_tx_request_t *txp, int work_to_do)
-+{
-+ RING_IDX cons = netif->tx.req_cons;
-+ int frags = 0;
-+
-+ if (!(first->flags & NETTXF_more_data))
-+ return 0;
-+
-+ do {
-+ if (frags >= work_to_do) {
-+ DPRINTK("Need more frags\n");
-+ return -frags;
-+ }
-+
-+ if (unlikely(frags >= MAX_SKB_FRAGS)) {
-+ DPRINTK("Too many frags\n");
-+ return -frags;
-+ }
-+
-+ memcpy(txp, RING_GET_REQUEST(&netif->tx, cons + frags),
-+ sizeof(*txp));
-+ if (txp->size > first->size) {
-+ DPRINTK("Frags galore\n");
-+ return -frags;
-+ }
-+
-+ first->size -= txp->size;
-+ frags++;
-+
-+ if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
-+ DPRINTK("txp->offset: %x, size: %u\n",
-+ txp->offset, txp->size);
-+ return -frags;
-+ }
-+ } while ((txp++)->flags & NETTXF_more_data);
-+
-+ return frags;
-+}
-+
-+static gnttab_map_grant_ref_t *netbk_get_requests(netif_t *netif,
-+ struct sk_buff *skb,
-+ netif_tx_request_t *txp,
-+ gnttab_map_grant_ref_t *mop)
-+{
-+ struct skb_shared_info *shinfo = skb_shinfo(skb);
-+ skb_frag_t *frags = shinfo->frags;
-+ unsigned long pending_idx = *((u16 *)skb->data);
-+ int i, start;
-+
-+ /* Skip first skb fragment if it is on same page as header fragment. */
-+ start = ((unsigned long)shinfo->frags[0].page == pending_idx);
-+
-+ for (i = start; i < shinfo->nr_frags; i++, txp++) {
-+ pending_idx = pending_ring[MASK_PEND_IDX(pending_cons++)];
-+
-+ gnttab_set_map_op(mop++, idx_to_kaddr(pending_idx),
-+ GNTMAP_host_map | GNTMAP_readonly,
-+ txp->gref, netif->domid);
-+
-+ memcpy(&pending_tx_info[pending_idx].req, txp, sizeof(*txp));
-+ netif_get(netif);
-+ pending_tx_info[pending_idx].netif = netif;
-+ frags[i].page = (void *)pending_idx;
-+ }
-+
-+ return mop;
-+}
-+
-+static int netbk_tx_check_mop(struct sk_buff *skb,
-+ gnttab_map_grant_ref_t **mopp)
-+{
-+ gnttab_map_grant_ref_t *mop = *mopp;
-+ int pending_idx = *((u16 *)skb->data);
-+ netif_t *netif = pending_tx_info[pending_idx].netif;
-+ netif_tx_request_t *txp;
-+ struct skb_shared_info *shinfo = skb_shinfo(skb);
-+ int nr_frags = shinfo->nr_frags;
-+ int i, err, start;
-+
-+ /* Check status of header. */
-+ err = mop->status;
-+ if (unlikely(err)) {
-+ txp = &pending_tx_info[pending_idx].req;
-+ make_tx_response(netif, txp, NETIF_RSP_ERROR);
-+ pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
-+ netif_put(netif);
-+ } else {
-+ set_phys_to_machine(
-+ __pa(idx_to_kaddr(pending_idx)) >> PAGE_SHIFT,
-+ FOREIGN_FRAME(mop->dev_bus_addr >> PAGE_SHIFT));
-+ grant_tx_handle[pending_idx] = mop->handle;
-+ }
-+
-+ /* Skip first skb fragment if it is on same page as header fragment. */
-+ start = ((unsigned long)shinfo->frags[0].page == pending_idx);
-+
-+ for (i = start; i < nr_frags; i++) {
-+ int j, newerr;
-+
-+ pending_idx = (unsigned long)shinfo->frags[i].page;
-+
-+ /* Check error status: if okay then remember grant handle. */
-+ newerr = (++mop)->status;
-+ if (likely(!newerr)) {
-+ set_phys_to_machine(
-+ __pa(idx_to_kaddr(pending_idx))>>PAGE_SHIFT,
-+ FOREIGN_FRAME(mop->dev_bus_addr>>PAGE_SHIFT));
-+ grant_tx_handle[pending_idx] = mop->handle;
-+ /* Had a previous error? Invalidate this fragment. */
-+ if (unlikely(err))
-+ netif_idx_release(pending_idx);
-+ continue;
-+ }
-+
-+ /* Error on this fragment: respond to client with an error. */
-+ txp = &pending_tx_info[pending_idx].req;
-+ make_tx_response(netif, txp, NETIF_RSP_ERROR);
-+ pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
-+ netif_put(netif);
-+
-+ /* Not the first error? Preceding frags already invalidated. */
-+ if (err)
-+ continue;
-+
-+ /* First error: invalidate header and preceding fragments. */
-+ pending_idx = *((u16 *)skb->data);
-+ netif_idx_release(pending_idx);
-+ for (j = start; j < i; j++) {
-+ pending_idx = (unsigned long)shinfo->frags[i].page;
-+ netif_idx_release(pending_idx);
-+ }
-+
-+ /* Remember the error: invalidate all subsequent fragments. */
-+ err = newerr;
-+ }
-+
-+ *mopp = mop + 1;
-+ return err;
-+}
-+
-+static void netbk_fill_frags(struct sk_buff *skb)
-+{
-+ struct skb_shared_info *shinfo = skb_shinfo(skb);
-+ int nr_frags = shinfo->nr_frags;
-+ int i;
-+
-+ for (i = 0; i < nr_frags; i++) {
-+ skb_frag_t *frag = shinfo->frags + i;
-+ netif_tx_request_t *txp;
-+ unsigned long pending_idx;
-+
-+ pending_idx = (unsigned long)frag->page;
-+ txp = &pending_tx_info[pending_idx].req;
-+ frag->page = virt_to_page(idx_to_kaddr(pending_idx));
-+ frag->size = txp->size;
-+ frag->page_offset = txp->offset;
-+
-+ skb->len += txp->size;
-+ skb->data_len += txp->size;
-+ skb->truesize += txp->size;
-+ }
-+}
-+
-+int netbk_get_extras(netif_t *netif, struct netif_extra_info *extras,
-+ int work_to_do)
-+{
-+ struct netif_extra_info extra;
-+ RING_IDX cons = netif->tx.req_cons;
-+
-+ do {
-+ if (unlikely(work_to_do-- <= 0)) {
-+ DPRINTK("Missing extra info\n");
-+ return -EBADR;
-+ }
-+
-+ memcpy(&extra, RING_GET_REQUEST(&netif->tx, cons),
-+ sizeof(extra));
-+ if (unlikely(!extra.type ||
-+ extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
-+ netif->tx.req_cons = ++cons;
-+ DPRINTK("Invalid extra type: %d\n", extra.type);
-+ return -EINVAL;
-+ }
-+
-+ memcpy(&extras[extra.type - 1], &extra, sizeof(extra));
-+ netif->tx.req_cons = ++cons;
-+ } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
-+
-+ return work_to_do;
-+}
-+
-+static int netbk_set_skb_gso(struct sk_buff *skb, struct netif_extra_info *gso)
-+{
-+ if (!gso->u.gso.size) {
-+ DPRINTK("GSO size must not be zero.\n");
-+ return -EINVAL;
-+ }
-+
-+ /* Currently only TCPv4 S.O. is supported. */
-+ if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
-+ DPRINTK("Bad GSO type %d.\n", gso->u.gso.type);
-+ return -EINVAL;
-+ }
-+
-+ skb_shinfo(skb)->gso_size = gso->u.gso.size;
-+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
-+
-+ /* Header must be checked, and gso_segs computed. */
-+ skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
-+ skb_shinfo(skb)->gso_segs = 0;
-+
-+ return 0;
-+}
-+
-+/* Called after netfront has transmitted */
-+static void net_tx_action(unsigned long unused)
-+{
-+ struct list_head *ent;
-+ struct sk_buff *skb;
-+ netif_t *netif;
-+ netif_tx_request_t txreq;
-+ netif_tx_request_t txfrags[MAX_SKB_FRAGS];
-+ struct netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
-+ u16 pending_idx;
-+ RING_IDX i;
-+ gnttab_map_grant_ref_t *mop;
-+ unsigned int data_len;
-+ int ret, work_to_do;
-+
-+ if (dealloc_cons != dealloc_prod)
-+ net_tx_action_dealloc();
-+
-+ mop = tx_map_ops;
-+ while (((NR_PENDING_REQS + MAX_SKB_FRAGS) < MAX_PENDING_REQS) &&
-+ !list_empty(&net_schedule_list)) {
-+ /* Get a netif from the list with work to do. */
-+ ent = net_schedule_list.next;
-+ netif = list_entry(ent, netif_t, list);
-+ netif_get(netif);
-+ remove_from_net_schedule_list(netif);
-+
-+ RING_FINAL_CHECK_FOR_REQUESTS(&netif->tx, work_to_do);
-+ if (!work_to_do) {
-+ netif_put(netif);
-+ continue;
-+ }
-+
-+ i = netif->tx.req_cons;
-+ rmb(); /* Ensure that we see the request before we copy it. */
-+ memcpy(&txreq, RING_GET_REQUEST(&netif->tx, i), sizeof(txreq));
-+
-+ /* Credit-based scheduling. */
-+ if (txreq.size > netif->remaining_credit) {
-+ unsigned long now = jiffies;
-+ unsigned long next_credit =
-+ netif->credit_timeout.expires +
-+ msecs_to_jiffies(netif->credit_usec / 1000);
-+
-+ /* Timer could already be pending in rare cases. */
-+ if (timer_pending(&netif->credit_timeout)) {
-+ netif_put(netif);
-+ continue;
-+ }
-+
-+ /* Passed the point where we can replenish credit? */
-+ if (time_after_eq(now, next_credit)) {
-+ netif->credit_timeout.expires = now;
-+ tx_add_credit(netif);
-+ }
-+
-+ /* Still too big to send right now? Set a callback. */
-+ if (txreq.size > netif->remaining_credit) {
-+ netif->credit_timeout.data =
-+ (unsigned long)netif;
-+ netif->credit_timeout.function =
-+ tx_credit_callback;
-+ __mod_timer(&netif->credit_timeout,
-+ next_credit);
-+ netif_put(netif);
-+ continue;
-+ }
-+ }
-+ netif->remaining_credit -= txreq.size;
-+
-+ work_to_do--;
-+ netif->tx.req_cons = ++i;
-+
-+ memset(extras, 0, sizeof(extras));
-+ if (txreq.flags & NETTXF_extra_info) {
-+ work_to_do = netbk_get_extras(netif, extras,
-+ work_to_do);
-+ i = netif->tx.req_cons;
-+ if (unlikely(work_to_do < 0)) {
-+ netbk_tx_err(netif, &txreq, i);
-+ continue;
-+ }
-+ }
-+
-+ ret = netbk_count_requests(netif, &txreq, txfrags, work_to_do);
-+ if (unlikely(ret < 0)) {
-+ netbk_tx_err(netif, &txreq, i - ret);
-+ continue;
-+ }
-+ i += ret;
-+
-+ if (unlikely(txreq.size < ETH_HLEN)) {
-+ DPRINTK("Bad packet size: %d\n", txreq.size);
-+ netbk_tx_err(netif, &txreq, i);
-+ continue;
-+ }
-+
-+ /* No crossing a page as the payload mustn't fragment. */
-+ if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
-+ DPRINTK("txreq.offset: %x, size: %u, end: %lu\n",
-+ txreq.offset, txreq.size,
-+ (txreq.offset &~PAGE_MASK) + txreq.size);
-+ netbk_tx_err(netif, &txreq, i);
-+ continue;
-+ }
-+
-+ pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
-+
-+ data_len = (txreq.size > PKT_PROT_LEN &&
-+ ret < MAX_SKB_FRAGS) ?
-+ PKT_PROT_LEN : txreq.size;
-+
-+ skb = alloc_skb(data_len + 16 + NET_IP_ALIGN,
-+ GFP_ATOMIC | __GFP_NOWARN);
-+ if (unlikely(skb == NULL)) {
-+ DPRINTK("Can't allocate a skb in start_xmit.\n");
-+ netbk_tx_err(netif, &txreq, i);
-+ break;
-+ }
-+
-+ /* Packets passed to netif_rx() must have some headroom. */
-+ skb_reserve(skb, 16 + NET_IP_ALIGN);
-+
-+ if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
-+ struct netif_extra_info *gso;
-+ gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
-+
-+ if (netbk_set_skb_gso(skb, gso)) {
-+ kfree_skb(skb);
-+ netbk_tx_err(netif, &txreq, i);
-+ continue;
-+ }
-+ }
-+
-+ gnttab_set_map_op(mop, idx_to_kaddr(pending_idx),
-+ GNTMAP_host_map | GNTMAP_readonly,
-+ txreq.gref, netif->domid);
-+ mop++;
-+
-+ memcpy(&pending_tx_info[pending_idx].req,
-+ &txreq, sizeof(txreq));
-+ pending_tx_info[pending_idx].netif = netif;
-+ *((u16 *)skb->data) = pending_idx;
-+
-+ __skb_put(skb, data_len);
-+
-+ skb_shinfo(skb)->nr_frags = ret;
-+ if (data_len < txreq.size) {
-+ skb_shinfo(skb)->nr_frags++;
-+ skb_shinfo(skb)->frags[0].page =
-+ (void *)(unsigned long)pending_idx;
-+ } else {
-+ /* Discriminate from any valid pending_idx value. */
-+ skb_shinfo(skb)->frags[0].page = (void *)~0UL;
-+ }
-+
-+ __skb_queue_tail(&tx_queue, skb);
-+
-+ pending_cons++;
-+
-+ mop = netbk_get_requests(netif, skb, txfrags, mop);
-+
-+ netif->tx.req_cons = i;
-+ netif_schedule_work(netif);
-+
-+ if ((mop - tx_map_ops) >= ARRAY_SIZE(tx_map_ops))
-+ break;
-+ }
-+
-+ if (mop == tx_map_ops)
-+ return;
-+
-+ ret = HYPERVISOR_grant_table_op(
-+ GNTTABOP_map_grant_ref, tx_map_ops, mop - tx_map_ops);
-+ BUG_ON(ret);
-+
-+ mop = tx_map_ops;
-+ while ((skb = __skb_dequeue(&tx_queue)) != NULL) {
-+ netif_tx_request_t *txp;
-+
-+ pending_idx = *((u16 *)skb->data);
-+ netif = pending_tx_info[pending_idx].netif;
-+ txp = &pending_tx_info[pending_idx].req;
-+
-+ /* Check the remap error code. */
-+ if (unlikely(netbk_tx_check_mop(skb, &mop))) {
-+ DPRINTK("netback grant failed.\n");
-+ skb_shinfo(skb)->nr_frags = 0;
-+ kfree_skb(skb);
-+ continue;
-+ }
-+
-+ data_len = skb->len;
-+ memcpy(skb->data,
-+ (void *)(idx_to_kaddr(pending_idx)|txp->offset),
-+ data_len);
-+ if (data_len < txp->size) {
-+ /* Append the packet payload as a fragment. */
-+ txp->offset += data_len;
-+ txp->size -= data_len;
-+ } else {
-+ /* Schedule a response immediately. */
-+ netif_idx_release(pending_idx);
-+ }
-+
-+ /*
-+ * Old frontends do not assert data_validated but we
-+ * can infer it from csum_blank so test both flags.
-+ */
-+ if (txp->flags & (NETTXF_data_validated|NETTXF_csum_blank)) {
-+ skb->ip_summed = CHECKSUM_UNNECESSARY;
-+ skb->proto_data_valid = 1;
-+ } else {
-+ skb->ip_summed = CHECKSUM_NONE;
-+ skb->proto_data_valid = 0;
-+ }
-+ skb->proto_csum_blank = !!(txp->flags & NETTXF_csum_blank);
-+
-+ netbk_fill_frags(skb);
-+
-+ skb->dev = netif->dev;
-+ skb->protocol = eth_type_trans(skb, skb->dev);
-+
-+ netif->stats.rx_bytes += skb->len;
-+ netif->stats.rx_packets++;
-+
-+ netif_rx(skb);
-+ netif->dev->last_rx = jiffies;
-+ }
-+}
-+
-+static void netif_idx_release(u16 pending_idx)
-+{
-+ static DEFINE_SPINLOCK(_lock);
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&_lock, flags);
-+ dealloc_ring[MASK_PEND_IDX(dealloc_prod)] = pending_idx;
-+ /* Sync with net_tx_action_dealloc: insert idx /then/ incr producer. */
-+ smp_wmb();
-+ dealloc_prod++;
-+ spin_unlock_irqrestore(&_lock, flags);
-+
-+ tasklet_schedule(&net_tx_tasklet);
-+}
-+
-+static void netif_page_release(struct page *page)
-+{
-+ /* Ready for next use. */
-+ init_page_count(page);
-+
-+ netif_idx_release(netif_page_index(page));
-+}
-+
-+irqreturn_t netif_be_int(int irq, void *dev_id)
-+{
-+ netif_t *netif = dev_id;
-+
-+ add_to_net_schedule_list_tail(netif);
-+ maybe_schedule_tx_action();
-+
-+ if (netif_schedulable(netif) && !netbk_queue_full(netif))
-+ netif_wake_queue(netif->dev);
-+
-+ return IRQ_HANDLED;
-+}
-+
-+static void make_tx_response(netif_t *netif,
-+ netif_tx_request_t *txp,
-+ s8 st)
-+{
-+ RING_IDX i = netif->tx.rsp_prod_pvt;
-+ netif_tx_response_t *resp;
-+ int notify;
-+
-+ resp = RING_GET_RESPONSE(&netif->tx, i);
-+ resp->id = txp->id;
-+ resp->status = st;
-+
-+ if (txp->flags & NETTXF_extra_info)
-+ RING_GET_RESPONSE(&netif->tx, ++i)->status = NETIF_RSP_NULL;
-+
-+ netif->tx.rsp_prod_pvt = ++i;
-+ RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&netif->tx, notify);
-+ if (notify)
-+ notify_remote_via_irq(netif->irq);
-+
-+#ifdef CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER
-+ if (i == netif->tx.req_cons) {
-+ int more_to_do;
-+ RING_FINAL_CHECK_FOR_REQUESTS(&netif->tx, more_to_do);
-+ if (more_to_do)
-+ add_to_net_schedule_list_tail(netif);
-+ }
-+#endif
-+}
-+
-+static netif_rx_response_t *make_rx_response(netif_t *netif,
-+ u16 id,
-+ s8 st,
-+ u16 offset,
-+ u16 size,
-+ u16 flags)
-+{
-+ RING_IDX i = netif->rx.rsp_prod_pvt;
-+ netif_rx_response_t *resp;
-+
-+ resp = RING_GET_RESPONSE(&netif->rx, i);
-+ resp->offset = offset;
-+ resp->flags = flags;
-+ resp->id = id;
-+ resp->status = (s16)size;
-+ if (st < 0)
-+ resp->status = (s16)st;
-+
-+ netif->rx.rsp_prod_pvt = ++i;
-+
-+ return resp;
-+}
-+
-+#ifdef NETBE_DEBUG_INTERRUPT
-+static irqreturn_t netif_be_dbg(int irq, void *dev_id)
-+{
-+ struct list_head *ent;
-+ netif_t *netif;
-+ int i = 0;
-+
-+ printk(KERN_ALERT "netif_schedule_list:\n");
-+ spin_lock_irq(&net_schedule_list_lock);
-+
-+ list_for_each (ent, &net_schedule_list) {
-+ netif = list_entry(ent, netif_t, list);
-+ printk(KERN_ALERT " %d: private(rx_req_cons=%08x "
-+ "rx_resp_prod=%08x\n",
-+ i, netif->rx.req_cons, netif->rx.rsp_prod_pvt);
-+ printk(KERN_ALERT " tx_req_cons=%08x tx_resp_prod=%08x)\n",
-+ netif->tx.req_cons, netif->tx.rsp_prod_pvt);
-+ printk(KERN_ALERT " shared(rx_req_prod=%08x "
-+ "rx_resp_prod=%08x\n",
-+ netif->rx.sring->req_prod, netif->rx.sring->rsp_prod);
-+ printk(KERN_ALERT " rx_event=%08x tx_req_prod=%08x\n",
-+ netif->rx.sring->rsp_event, netif->tx.sring->req_prod);
-+ printk(KERN_ALERT " tx_resp_prod=%08x, tx_event=%08x)\n",
-+ netif->tx.sring->rsp_prod, netif->tx.sring->rsp_event);
-+ i++;
-+ }
-+
-+ spin_unlock_irq(&net_schedule_list_lock);
-+ printk(KERN_ALERT " ** End of netif_schedule_list **\n");
-+
-+ return IRQ_HANDLED;
-+}
-+#endif
-+
-+static int __init netback_init(void)
-+{
-+ int i;
-+ struct page *page;
-+
-+ if (!is_running_on_xen())
-+ return -ENODEV;
-+
-+ /* We can increase reservation by this much in net_rx_action(). */
-+ balloon_update_driver_allowance(NET_RX_RING_SIZE);
-+
-+ skb_queue_head_init(&rx_queue);
-+ skb_queue_head_init(&tx_queue);
-+
-+ init_timer(&net_timer);
-+ net_timer.data = 0;
-+ net_timer.function = net_alarm;
-+
-+ mmap_pages = alloc_empty_pages_and_pagevec(MAX_PENDING_REQS);
-+ if (mmap_pages == NULL) {
-+ printk("%s: out of memory\n", __FUNCTION__);
-+ return -ENOMEM;
-+ }
-+
-+ for (i = 0; i < MAX_PENDING_REQS; i++) {
-+ page = mmap_pages[i];
-+ SetPageForeign(page, netif_page_release);
-+ netif_page_index(page) = i;
-+ }
-+
-+ pending_cons = 0;
-+ pending_prod = MAX_PENDING_REQS;
-+ for (i = 0; i < MAX_PENDING_REQS; i++)
-+ pending_ring[i] = i;
-+
-+ spin_lock_init(&net_schedule_list_lock);
-+ INIT_LIST_HEAD(&net_schedule_list);
-+
-+ netif_xenbus_init();
-+
-+#ifdef NETBE_DEBUG_INTERRUPT
-+ (void)bind_virq_to_irqhandler(VIRQ_DEBUG,
-+ 0,
-+ netif_be_dbg,
-+ IRQF_SHARED,
-+ "net-be-dbg",
-+ &netif_be_dbg);
-+#endif
-+
-+ return 0;
-+}
-+
-+module_init(netback_init);
-+
-+MODULE_LICENSE("Dual BSD/GPL");
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/netback/xenbus.c ubuntu-gutsy-xen/drivers/xen/netback/xenbus.c
---- ubuntu-gutsy/drivers/xen/netback/xenbus.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/netback/xenbus.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,448 @@
-+/* Xenbus code for netif backend
-+ Copyright (C) 2005 Rusty Russell <rusty@rustcorp.com.au>
-+ Copyright (C) 2005 XenSource Ltd
-+
-+ This program is free software; you can redistribute it and/or modify
-+ it under the terms of the GNU General Public License as published by
-+ the Free Software Foundation; either version 2 of the License, or
-+ (at your option) any later version.
-+
-+ This program is distributed in the hope that it will be useful,
-+ but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ GNU General Public License for more details.
-+
-+ You should have received a copy of the GNU General Public License
-+ along with this program; if not, write to the Free Software
-+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-+*/
-+
-+#include <stdarg.h>
-+#include <linux/module.h>
-+#include <xen/xenbus.h>
-+#include "common.h"
-+
-+#if 0
-+#undef DPRINTK
-+#define DPRINTK(fmt, args...) \
-+ printk("netback/xenbus (%s:%d) " fmt ".\n", __FUNCTION__, __LINE__, ##args)
-+#endif
-+
-+struct backend_info {
-+ struct xenbus_device *dev;
-+ netif_t *netif;
-+ enum xenbus_state frontend_state;
-+};
-+
-+static int connect_rings(struct backend_info *);
-+static void connect(struct backend_info *);
-+static void backend_create_netif(struct backend_info *be);
-+
-+static int netback_remove(struct xenbus_device *dev)
-+{
-+ struct backend_info *be = dev->dev.driver_data;
-+
-+ if (be->netif) {
-+ netif_disconnect(be->netif);
-+ be->netif = NULL;
-+ }
-+ kfree(be);
-+ dev->dev.driver_data = NULL;
-+ return 0;
-+}
-+
-+
-+/**
-+ * Entry point to this code when a new device is created. Allocate the basic
-+ * structures and switch to InitWait.
-+ */
-+static int netback_probe(struct xenbus_device *dev,
-+ const struct xenbus_device_id *id)
-+{
-+ const char *message;
-+ struct xenbus_transaction xbt;
-+ int err;
-+ struct backend_info *be = kzalloc(sizeof(struct backend_info),
-+ GFP_KERNEL);
-+ if (!be) {
-+ xenbus_dev_fatal(dev, -ENOMEM,
-+ "allocating backend structure");
-+ return -ENOMEM;
-+ }
-+
-+ be->dev = dev;
-+ dev->dev.driver_data = be;
-+
-+ do {
-+ err = xenbus_transaction_start(&xbt);
-+ if (err) {
-+ xenbus_dev_fatal(dev, err, "starting transaction");
-+ goto fail;
-+ }
-+
-+ err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1);
-+ if (err) {
-+ message = "writing feature-sg";
-+ goto abort_transaction;
-+ }
-+
-+ err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4",
-+ "%d", 1);
-+ if (err) {
-+ message = "writing feature-gso-tcpv4";
-+ goto abort_transaction;
-+ }
-+
-+ /* We support rx-copy path. */
-+ err = xenbus_printf(xbt, dev->nodename,
-+ "feature-rx-copy", "%d", 1);
-+ if (err) {
-+ message = "writing feature-rx-copy";
-+ goto abort_transaction;
-+ }
-+
-+ /*
-+ * We don't support rx-flip path (except old guests who don't
-+ * grok this feature flag).
-+ */
-+ err = xenbus_printf(xbt, dev->nodename,
-+ "feature-rx-flip", "%d", 0);
-+ if (err) {
-+ message = "writing feature-rx-flip";
-+ goto abort_transaction;
-+ }
-+
-+ err = xenbus_transaction_end(xbt, 0);
-+ } while (err == -EAGAIN);
-+
-+ if (err) {
-+ xenbus_dev_fatal(dev, err, "completing transaction");
-+ goto fail;
-+ }
-+
-+ err = xenbus_switch_state(dev, XenbusStateInitWait);
-+ if (err)
-+ goto fail;
-+
-+ /* This kicks hotplug scripts, so do it immediately. */
-+ backend_create_netif(be);
-+
-+ return 0;
-+
-+abort_transaction:
-+ xenbus_transaction_end(xbt, 1);
-+ xenbus_dev_fatal(dev, err, "%s", message);
-+fail:
-+ DPRINTK("failed");
-+ netback_remove(dev);
-+ return err;
-+}
-+
-+
-+/**
-+ * Handle the creation of the hotplug script environment. We add the script
-+ * and vif variables to the environment, for the benefit of the vif-* hotplug
-+ * scripts.
-+ */
-+static int netback_uevent(struct xenbus_device *xdev, char **envp,
-+ int num_envp, char *buffer, int buffer_size)
-+{
-+ struct backend_info *be = xdev->dev.driver_data;
-+ netif_t *netif = be->netif;
-+ int i = 0, length = 0;
-+ char *val;
-+
-+ DPRINTK("netback_uevent");
-+
-+ val = xenbus_read(XBT_NIL, xdev->nodename, "script", NULL);
-+ if (IS_ERR(val)) {
-+ int err = PTR_ERR(val);
-+ xenbus_dev_fatal(xdev, err, "reading script");
-+ return err;
-+ }
-+ else {
-+ add_uevent_var(envp, num_envp, &i, buffer, buffer_size,
-+ &length, "script=%s", val);
-+ kfree(val);
-+ }
-+
-+ add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length,
-+ "vif=%s", netif->dev->name);
-+
-+ envp[i] = NULL;
-+
-+ return 0;
-+}
-+
-+
-+static void backend_create_netif(struct backend_info *be)
-+{
-+ int err;
-+ long handle;
-+ struct xenbus_device *dev = be->dev;
-+
-+ if (be->netif != NULL)
-+ return;
-+
-+ err = xenbus_scanf(XBT_NIL, dev->nodename, "handle", "%li", &handle);
-+ if (err != 1) {
-+ xenbus_dev_fatal(dev, err, "reading handle");
-+ return;
-+ }
-+
-+ be->netif = netif_alloc(dev->otherend_id, handle);
-+ if (IS_ERR(be->netif)) {
-+ err = PTR_ERR(be->netif);
-+ be->netif = NULL;
-+ xenbus_dev_fatal(dev, err, "creating interface");
-+ return;
-+ }
-+
-+ kobject_uevent(&dev->dev.kobj, KOBJ_ONLINE);
-+}
-+
-+
-+/**
-+ * Callback received when the frontend's state changes.
-+ */
-+static void frontend_changed(struct xenbus_device *dev,
-+ enum xenbus_state frontend_state)
-+{
-+ struct backend_info *be = dev->dev.driver_data;
-+
-+ DPRINTK("%s", xenbus_strstate(frontend_state));
-+
-+ be->frontend_state = frontend_state;
-+
-+ switch (frontend_state) {
-+ case XenbusStateInitialising:
-+ if (dev->state == XenbusStateClosed) {
-+ printk(KERN_INFO "%s: %s: prepare for reconnect\n",
-+ __FUNCTION__, dev->nodename);
-+ if (be->netif) {
-+ netif_disconnect(be->netif);
-+ be->netif = NULL;
-+ }
-+ xenbus_switch_state(dev, XenbusStateInitWait);
-+ }
-+ break;
-+
-+ case XenbusStateInitialised:
-+ break;
-+
-+ case XenbusStateConnected:
-+ backend_create_netif(be);
-+ if (be->netif)
-+ connect(be);
-+ break;
-+
-+ case XenbusStateClosing:
-+ xenbus_switch_state(dev, XenbusStateClosing);
-+ break;
-+
-+ case XenbusStateClosed:
-+ xenbus_switch_state(dev, XenbusStateClosed);
-+ if (xenbus_dev_is_online(dev))
-+ break;
-+ /* fall through if not online */
-+ case XenbusStateUnknown:
-+ if (be->netif != NULL)
-+ kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
-+ device_unregister(&dev->dev);
-+ break;
-+
-+ default:
-+ xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
-+ frontend_state);
-+ break;
-+ }
-+}
-+
-+
-+static void xen_net_read_rate(struct xenbus_device *dev,
-+ unsigned long *bytes, unsigned long *usec)
-+{
-+ char *s, *e;
-+ unsigned long b, u;
-+ char *ratestr;
-+
-+ /* Default to unlimited bandwidth. */
-+ *bytes = ~0UL;
-+ *usec = 0;
-+
-+ ratestr = xenbus_read(XBT_NIL, dev->nodename, "rate", NULL);
-+ if (IS_ERR(ratestr))
-+ return;
-+
-+ s = ratestr;
-+ b = simple_strtoul(s, &e, 10);
-+ if ((s == e) || (*e != ','))
-+ goto fail;
-+
-+ s = e + 1;
-+ u = simple_strtoul(s, &e, 10);
-+ if ((s == e) || (*e != '\0'))
-+ goto fail;
-+
-+ *bytes = b;
-+ *usec = u;
-+
-+ kfree(ratestr);
-+ return;
-+
-+ fail:
-+ WPRINTK("Failed to parse network rate limit. Traffic unlimited.\n");
-+ kfree(ratestr);
-+}
-+
-+static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
-+{
-+ char *s, *e, *macstr;
-+ int i;
-+
-+ macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL);
-+ if (IS_ERR(macstr))
-+ return PTR_ERR(macstr);
-+
-+ for (i = 0; i < ETH_ALEN; i++) {
-+ mac[i] = simple_strtoul(s, &e, 16);
-+ if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
-+ kfree(macstr);
-+ return -ENOENT;
-+ }
-+ s = e+1;
-+ }
-+
-+ kfree(macstr);
-+ return 0;
-+}
-+
-+static void connect(struct backend_info *be)
-+{
-+ int err;
-+ struct xenbus_device *dev = be->dev;
-+
-+ err = connect_rings(be);
-+ if (err)
-+ return;
-+
-+ err = xen_net_read_mac(dev, be->netif->fe_dev_addr);
-+ if (err) {
-+ xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
-+ return;
-+ }
-+
-+ xen_net_read_rate(dev, &be->netif->credit_bytes,
-+ &be->netif->credit_usec);
-+ be->netif->remaining_credit = be->netif->credit_bytes;
-+
-+ xenbus_switch_state(dev, XenbusStateConnected);
-+
-+ netif_wake_queue(be->netif->dev);
-+}
-+
-+
-+static int connect_rings(struct backend_info *be)
-+{
-+ struct xenbus_device *dev = be->dev;
-+ unsigned long tx_ring_ref, rx_ring_ref;
-+ unsigned int evtchn, rx_copy;
-+ int err;
-+ int val;
-+
-+ DPRINTK("");
-+
-+ err = xenbus_gather(XBT_NIL, dev->otherend,
-+ "tx-ring-ref", "%lu", &tx_ring_ref,
-+ "rx-ring-ref", "%lu", &rx_ring_ref,
-+ "event-channel", "%u", &evtchn, NULL);
-+ if (err) {
-+ xenbus_dev_fatal(dev, err,
-+ "reading %s/ring-ref and event-channel",
-+ dev->otherend);
-+ return err;
-+ }
-+
-+ err = xenbus_scanf(XBT_NIL, dev->otherend, "request-rx-copy", "%u",
-+ &rx_copy);
-+ if (err == -ENOENT) {
-+ err = 0;
-+ rx_copy = 0;
-+ }
-+ if (err < 0) {
-+ xenbus_dev_fatal(dev, err, "reading %s/request-rx-copy",
-+ dev->otherend);
-+ return err;
-+ }
-+ be->netif->copying_receiver = !!rx_copy;
-+
-+ if (be->netif->dev->tx_queue_len != 0) {
-+ if (xenbus_scanf(XBT_NIL, dev->otherend,
-+ "feature-rx-notify", "%d", &val) < 0)
-+ val = 0;
-+ if (val)
-+ be->netif->can_queue = 1;
-+ else
-+ /* Must be non-zero for pfifo_fast to work. */
-+ be->netif->dev->tx_queue_len = 1;
-+ }
-+
-+ if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-sg", "%d", &val) < 0)
-+ val = 0;
-+ if (val) {
-+ be->netif->features |= NETIF_F_SG;
-+ be->netif->dev->features |= NETIF_F_SG;
-+ }
-+
-+ if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4", "%d",
-+ &val) < 0)
-+ val = 0;
-+ if (val) {
-+ be->netif->features |= NETIF_F_TSO;
-+ be->netif->dev->features |= NETIF_F_TSO;
-+ }
-+
-+ if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-no-csum-offload",
-+ "%d", &val) < 0)
-+ val = 0;
-+ if (val) {
-+ be->netif->features &= ~NETIF_F_IP_CSUM;
-+ be->netif->dev->features &= ~NETIF_F_IP_CSUM;
-+ }
-+
-+ /* Map the shared frame, irq etc. */
-+ err = netif_map(be->netif, tx_ring_ref, rx_ring_ref, evtchn);
-+ if (err) {
-+ xenbus_dev_fatal(dev, err,
-+ "mapping shared-frames %lu/%lu port %u",
-+ tx_ring_ref, rx_ring_ref, evtchn);
-+ return err;
-+ }
-+ return 0;
-+}
-+
-+
-+/* ** Driver Registration ** */
-+
-+
-+static struct xenbus_device_id netback_ids[] = {
-+ { "vif" },
-+ { "" }
-+};
-+
-+
-+static struct xenbus_driver netback = {
-+ .name = "vif",
-+ .owner = THIS_MODULE,
-+ .ids = netback_ids,
-+ .probe = netback_probe,
-+ .remove = netback_remove,
-+ .uevent = netback_uevent,
-+ .otherend_changed = frontend_changed,
-+};
-+
-+
-+void netif_xenbus_init(void)
-+{
-+ xenbus_register_backend(&netback);
-+}
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/netfront/Makefile ubuntu-gutsy-xen/drivers/xen/netfront/Makefile
---- ubuntu-gutsy/drivers/xen/netfront/Makefile 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/netfront/Makefile 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,4 @@
-+
-+obj-$(CONFIG_XEN_NETDEV_FRONTEND) := xennet.o
-+
-+xennet-objs := netfront.o
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/netfront/netfront.c ubuntu-gutsy-xen/drivers/xen/netfront/netfront.c
---- ubuntu-gutsy/drivers/xen/netfront/netfront.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/netfront/netfront.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,2127 @@
-+/******************************************************************************
-+ * Virtual network driver for conversing with remote driver backends.
-+ *
-+ * Copyright (c) 2002-2005, K A Fraser
-+ * Copyright (c) 2005, XenSource Ltd
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#include <linux/module.h>
-+#include <linux/version.h>
-+#include <linux/kernel.h>
-+#include <linux/sched.h>
-+#include <linux/slab.h>
-+#include <linux/string.h>
-+#include <linux/errno.h>
-+#include <linux/netdevice.h>
-+#include <linux/inetdevice.h>
-+#include <linux/etherdevice.h>
-+#include <linux/skbuff.h>
-+#include <linux/init.h>
-+#include <linux/bitops.h>
-+#include <linux/ethtool.h>
-+#include <linux/in.h>
-+#include <linux/if_ether.h>
-+#include <linux/io.h>
-+#include <linux/moduleparam.h>
-+#include <net/sock.h>
-+#include <net/pkt_sched.h>
-+#include <net/arp.h>
-+#include <net/route.h>
-+#include <asm/uaccess.h>
-+#include <xen/evtchn.h>
-+#include <xen/xenbus.h>
-+#include <xen/interface/io/netif.h>
-+#include <xen/interface/memory.h>
-+#include <xen/balloon.h>
-+#include <asm/page.h>
-+#include <asm/maddr.h>
-+#include <asm/uaccess.h>
-+#include <xen/interface/grant_table.h>
-+#include <xen/gnttab.h>
-+
-+#ifdef HAVE_XEN_PLATFORM_COMPAT_H
-+#include <xen/platform-compat.h>
-+#endif
-+
-+struct netfront_cb {
-+ struct page *page;
-+ unsigned offset;
-+};
-+
-+#define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb))
-+
-+/*
-+ * Mutually-exclusive module options to select receive data path:
-+ * rx_copy : Packets are copied by network backend into local memory
-+ * rx_flip : Page containing packet data is transferred to our ownership
-+ * For fully-virtualised guests there is no option - copying must be used.
-+ * For paravirtualised guests, flipping is the default.
-+ */
-+#ifdef CONFIG_XEN
-+static int MODPARM_rx_copy = 0;
-+module_param_named(rx_copy, MODPARM_rx_copy, bool, 0);
-+MODULE_PARM_DESC(rx_copy, "Copy packets from network card (rather than flip)");
-+static int MODPARM_rx_flip = 0;
-+module_param_named(rx_flip, MODPARM_rx_flip, bool, 0);
-+MODULE_PARM_DESC(rx_flip, "Flip packets from network card (rather than copy)");
-+#else
-+static const int MODPARM_rx_copy = 1;
-+static const int MODPARM_rx_flip = 0;
-+#endif
-+
-+#define RX_COPY_THRESHOLD 256
-+
-+/* If we don't have GSO, fake things up so that we never try to use it. */
-+#if defined(NETIF_F_GSO)
-+#define HAVE_GSO 1
-+#define HAVE_TSO 1 /* TSO is a subset of GSO */
-+static inline void dev_disable_gso_features(struct net_device *dev)
-+{
-+ /* Turn off all GSO bits except ROBUST. */
-+ dev->features &= (1 << NETIF_F_GSO_SHIFT) - 1;
-+ dev->features |= NETIF_F_GSO_ROBUST;
-+}
-+#elif defined(NETIF_F_TSO)
-+#define HAVE_TSO 1
-+
-+/* Some older kernels cannot cope with incorrect checksums,
-+ * particularly in netfilter. I'm not sure there is 100% correlation
-+ * with the presence of NETIF_F_TSO but it appears to be a good first
-+ * approximiation.
-+ */
-+#define HAVE_NO_CSUM_OFFLOAD 1
-+
-+#define gso_size tso_size
-+#define gso_segs tso_segs
-+static inline void dev_disable_gso_features(struct net_device *dev)
-+{
-+ /* Turn off all TSO bits. */
-+ dev->features &= ~NETIF_F_TSO;
-+}
-+static inline int skb_is_gso(const struct sk_buff *skb)
-+{
-+ return skb_shinfo(skb)->tso_size;
-+}
-+static inline int skb_gso_ok(struct sk_buff *skb, int features)
-+{
-+ return (features & NETIF_F_TSO);
-+}
-+
-+static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
-+{
-+ return skb_is_gso(skb) &&
-+ (!skb_gso_ok(skb, dev->features) ||
-+ unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
-+}
-+#else
-+#define netif_needs_gso(dev, skb) 0
-+#define dev_disable_gso_features(dev) ((void)0)
-+#endif
-+
-+#define GRANT_INVALID_REF 0
-+
-+#define NET_TX_RING_SIZE __RING_SIZE((struct netif_tx_sring *)0, PAGE_SIZE)
-+#define NET_RX_RING_SIZE __RING_SIZE((struct netif_rx_sring *)0, PAGE_SIZE)
-+
-+struct netfront_info {
-+ struct list_head list;
-+ struct net_device *netdev;
-+
-+ struct net_device_stats stats;
-+
-+ struct netif_tx_front_ring tx;
-+ struct netif_rx_front_ring rx;
-+
-+ spinlock_t tx_lock;
-+ spinlock_t rx_lock;
-+
-+ unsigned int irq;
-+ unsigned int copying_receiver;
-+ unsigned int carrier;
-+
-+ /* Receive-ring batched refills. */
-+#define RX_MIN_TARGET 8
-+#define RX_DFL_MIN_TARGET 64
-+#define RX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256)
-+ unsigned rx_min_target, rx_max_target, rx_target;
-+ struct sk_buff_head rx_batch;
-+
-+ struct timer_list rx_refill_timer;
-+
-+ /*
-+ * {tx,rx}_skbs store outstanding skbuffs. The first entry in tx_skbs
-+ * is an index into a chain of free entries.
-+ */
-+ struct sk_buff *tx_skbs[NET_TX_RING_SIZE+1];
-+ struct sk_buff *rx_skbs[NET_RX_RING_SIZE];
-+
-+#define TX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256)
-+ grant_ref_t gref_tx_head;
-+ grant_ref_t grant_tx_ref[NET_TX_RING_SIZE + 1];
-+ grant_ref_t gref_rx_head;
-+ grant_ref_t grant_rx_ref[NET_RX_RING_SIZE];
-+
-+ struct xenbus_device *xbdev;
-+ int tx_ring_ref;
-+ int rx_ring_ref;
-+ u8 mac[ETH_ALEN];
-+
-+ unsigned long rx_pfn_array[NET_RX_RING_SIZE];
-+ struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1];
-+ struct mmu_update rx_mmu[NET_RX_RING_SIZE];
-+};
-+
-+struct netfront_rx_info {
-+ struct netif_rx_response rx;
-+ struct netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
-+};
-+
-+/*
-+ * Implement our own carrier flag: the network stack's version causes delays
-+ * when the carrier is re-enabled (in particular, dev_activate() may not
-+ * immediately be called, which can cause packet loss).
-+ */
-+#define netfront_carrier_on(netif) ((netif)->carrier = 1)
-+#define netfront_carrier_off(netif) ((netif)->carrier = 0)
-+#define netfront_carrier_ok(netif) ((netif)->carrier)
-+
-+/*
-+ * Access macros for acquiring freeing slots in tx_skbs[].
-+ */
-+
-+static inline void add_id_to_freelist(struct sk_buff **list, unsigned short id)
-+{
-+ list[id] = list[0];
-+ list[0] = (void *)(unsigned long)id;
-+}
-+
-+static inline unsigned short get_id_from_freelist(struct sk_buff **list)
-+{
-+ unsigned int id = (unsigned int)(unsigned long)list[0];
-+ list[0] = list[id];
-+ return id;
-+}
-+
-+static inline int xennet_rxidx(RING_IDX idx)
-+{
-+ return idx & (NET_RX_RING_SIZE - 1);
-+}
-+
-+static inline struct sk_buff *xennet_get_rx_skb(struct netfront_info *np,
-+ RING_IDX ri)
-+{
-+ int i = xennet_rxidx(ri);
-+ struct sk_buff *skb = np->rx_skbs[i];
-+ np->rx_skbs[i] = NULL;
-+ return skb;
-+}
-+
-+static inline grant_ref_t xennet_get_rx_ref(struct netfront_info *np,
-+ RING_IDX ri)
-+{
-+ int i = xennet_rxidx(ri);
-+ grant_ref_t ref = np->grant_rx_ref[i];
-+ np->grant_rx_ref[i] = GRANT_INVALID_REF;
-+ return ref;
-+}
-+
-+#define DPRINTK(fmt, args...) \
-+ pr_debug("netfront (%s:%d) " fmt, \
-+ __FUNCTION__, __LINE__, ##args)
-+#define IPRINTK(fmt, args...) \
-+ printk(KERN_INFO "netfront: " fmt, ##args)
-+#define WPRINTK(fmt, args...) \
-+ printk(KERN_WARNING "netfront: " fmt, ##args)
-+
-+static int setup_device(struct xenbus_device *, struct netfront_info *);
-+static struct net_device *create_netdev(struct xenbus_device *);
-+
-+static void end_access(int, void *);
-+static void netif_disconnect_backend(struct netfront_info *);
-+
-+static int network_connect(struct net_device *);
-+static void network_tx_buf_gc(struct net_device *);
-+static void network_alloc_rx_buffers(struct net_device *);
-+static int send_fake_arp(struct net_device *);
-+
-+static irqreturn_t netif_int(int irq, void *dev_id);
-+
-+#ifdef CONFIG_SYSFS
-+static int xennet_sysfs_addif(struct net_device *netdev);
-+static void xennet_sysfs_delif(struct net_device *netdev);
-+#else /* !CONFIG_SYSFS */
-+#define xennet_sysfs_addif(dev) (0)
-+#define xennet_sysfs_delif(dev) do { } while(0)
-+#endif
-+
-+static inline int xennet_can_sg(struct net_device *dev)
-+{
-+ return dev->features & NETIF_F_SG;
-+}
-+
-+/**
-+ * Entry point to this code when a new device is created. Allocate the basic
-+ * structures and the ring buffers for communication with the backend, and
-+ * inform the backend of the appropriate details for those.
-+ */
-+static int __devinit netfront_probe(struct xenbus_device *dev,
-+ const struct xenbus_device_id *id)
-+{
-+ int err;
-+ struct net_device *netdev;
-+ struct netfront_info *info;
-+
-+ netdev = create_netdev(dev);
-+ if (IS_ERR(netdev)) {
-+ err = PTR_ERR(netdev);
-+ xenbus_dev_fatal(dev, err, "creating netdev");
-+ return err;
-+ }
-+
-+ info = netdev_priv(netdev);
-+ dev->dev.driver_data = info;
-+
-+ err = register_netdev(info->netdev);
-+ if (err) {
-+ printk(KERN_WARNING "%s: register_netdev err=%d\n",
-+ __FUNCTION__, err);
-+ goto fail;
-+ }
-+
-+ err = xennet_sysfs_addif(info->netdev);
-+ if (err) {
-+ unregister_netdev(info->netdev);
-+ printk(KERN_WARNING "%s: add sysfs failed err=%d\n",
-+ __FUNCTION__, err);
-+ goto fail;
-+ }
-+
-+ return 0;
-+
-+ fail:
-+ free_netdev(netdev);
-+ dev->dev.driver_data = NULL;
-+ return err;
-+}
-+
-+static int __devexit netfront_remove(struct xenbus_device *dev)
-+{
-+ struct netfront_info *info = dev->dev.driver_data;
-+
-+ DPRINTK("%s\n", dev->nodename);
-+
-+ netif_disconnect_backend(info);
-+
-+ del_timer_sync(&info->rx_refill_timer);
-+
-+ xennet_sysfs_delif(info->netdev);
-+
-+ unregister_netdev(info->netdev);
-+
-+ free_netdev(info->netdev);
-+
-+ return 0;
-+}
-+
-+/**
-+ * We are reconnecting to the backend, due to a suspend/resume, or a backend
-+ * driver restart. We tear down our netif structure and recreate it, but
-+ * leave the device-layer structures intact so that this is transparent to the
-+ * rest of the kernel.
-+ */
-+static int netfront_resume(struct xenbus_device *dev)
-+{
-+ struct netfront_info *info = dev->dev.driver_data;
-+
-+ DPRINTK("%s\n", dev->nodename);
-+
-+ netif_disconnect_backend(info);
-+ return 0;
-+}
-+
-+static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
-+{
-+ char *s, *e, *macstr;
-+ int i;
-+
-+ macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL);
-+ if (IS_ERR(macstr))
-+ return PTR_ERR(macstr);
-+
-+ for (i = 0; i < ETH_ALEN; i++) {
-+ mac[i] = simple_strtoul(s, &e, 16);
-+ if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
-+ kfree(macstr);
-+ return -ENOENT;
-+ }
-+ s = e+1;
-+ }
-+
-+ kfree(macstr);
-+ return 0;
-+}
-+
-+/* Common code used when first setting up, and when resuming. */
-+static int talk_to_backend(struct xenbus_device *dev,
-+ struct netfront_info *info)
-+{
-+ const char *message;
-+ struct xenbus_transaction xbt;
-+ int err;
-+
-+ err = xen_net_read_mac(dev, info->mac);
-+ if (err) {
-+ xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
-+ goto out;
-+ }
-+
-+ /* Create shared ring, alloc event channel. */
-+ err = setup_device(dev, info);
-+ if (err)
-+ goto out;
-+
-+again:
-+ err = xenbus_transaction_start(&xbt);
-+ if (err) {
-+ xenbus_dev_fatal(dev, err, "starting transaction");
-+ goto destroy_ring;
-+ }
-+
-+ err = xenbus_printf(xbt, dev->nodename, "tx-ring-ref","%u",
-+ info->tx_ring_ref);
-+ if (err) {
-+ message = "writing tx ring-ref";
-+ goto abort_transaction;
-+ }
-+ err = xenbus_printf(xbt, dev->nodename, "rx-ring-ref","%u",
-+ info->rx_ring_ref);
-+ if (err) {
-+ message = "writing rx ring-ref";
-+ goto abort_transaction;
-+ }
-+ err = xenbus_printf(xbt, dev->nodename,
-+ "event-channel", "%u",
-+ irq_to_evtchn_port(info->irq));
-+ if (err) {
-+ message = "writing event-channel";
-+ goto abort_transaction;
-+ }
-+
-+ err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u",
-+ info->copying_receiver);
-+ if (err) {
-+ message = "writing request-rx-copy";
-+ goto abort_transaction;
-+ }
-+
-+ err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1);
-+ if (err) {
-+ message = "writing feature-rx-notify";
-+ goto abort_transaction;
-+ }
-+
-+#ifdef HAVE_NO_CSUM_OFFLOAD
-+ err = xenbus_printf(xbt, dev->nodename, "feature-no-csum-offload", "%d", 1);
-+ if (err) {
-+ message = "writing feature-no-csum-offload";
-+ goto abort_transaction;
-+ }
-+#endif
-+
-+ err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1);
-+ if (err) {
-+ message = "writing feature-sg";
-+ goto abort_transaction;
-+ }
-+
-+#ifdef HAVE_TSO
-+ err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", 1);
-+ if (err) {
-+ message = "writing feature-gso-tcpv4";
-+ goto abort_transaction;
-+ }
-+#endif
-+
-+ err = xenbus_transaction_end(xbt, 0);
-+ if (err) {
-+ if (err == -EAGAIN)
-+ goto again;
-+ xenbus_dev_fatal(dev, err, "completing transaction");
-+ goto destroy_ring;
-+ }
-+
-+ return 0;
-+
-+ abort_transaction:
-+ xenbus_transaction_end(xbt, 1);
-+ xenbus_dev_fatal(dev, err, "%s", message);
-+ destroy_ring:
-+ netif_disconnect_backend(info);
-+ out:
-+ return err;
-+}
-+
-+static int setup_device(struct xenbus_device *dev, struct netfront_info *info)
-+{
-+ struct netif_tx_sring *txs;
-+ struct netif_rx_sring *rxs;
-+ int err;
-+ struct net_device *netdev = info->netdev;
-+
-+ info->tx_ring_ref = GRANT_INVALID_REF;
-+ info->rx_ring_ref = GRANT_INVALID_REF;
-+ info->rx.sring = NULL;
-+ info->tx.sring = NULL;
-+ info->irq = 0;
-+
-+ txs = (struct netif_tx_sring *)get_zeroed_page(GFP_KERNEL);
-+ if (!txs) {
-+ err = -ENOMEM;
-+ xenbus_dev_fatal(dev, err, "allocating tx ring page");
-+ goto fail;
-+ }
-+ SHARED_RING_INIT(txs);
-+ FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE);
-+
-+ err = xenbus_grant_ring(dev, virt_to_mfn(txs));
-+ if (err < 0) {
-+ free_page((unsigned long)txs);
-+ goto fail;
-+ }
-+ info->tx_ring_ref = err;
-+
-+ rxs = (struct netif_rx_sring *)get_zeroed_page(GFP_KERNEL);
-+ if (!rxs) {
-+ err = -ENOMEM;
-+ xenbus_dev_fatal(dev, err, "allocating rx ring page");
-+ goto fail;
-+ }
-+ SHARED_RING_INIT(rxs);
-+ FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE);
-+
-+ err = xenbus_grant_ring(dev, virt_to_mfn(rxs));
-+ if (err < 0) {
-+ free_page((unsigned long)rxs);
-+ goto fail;
-+ }
-+ info->rx_ring_ref = err;
-+
-+ memcpy(netdev->dev_addr, info->mac, ETH_ALEN);
-+
-+ err = bind_listening_port_to_irqhandler(
-+ dev->otherend_id, netif_int, IRQF_SAMPLE_RANDOM, netdev->name,
-+ netdev);
-+ if (err < 0)
-+ goto fail;
-+ info->irq = err;
-+
-+ return 0;
-+
-+ fail:
-+ return err;
-+}
-+
-+/**
-+ * Callback received when the backend's state changes.
-+ */
-+static void backend_changed(struct xenbus_device *dev,
-+ enum xenbus_state backend_state)
-+{
-+ struct netfront_info *np = dev->dev.driver_data;
-+ struct net_device *netdev = np->netdev;
-+
-+ DPRINTK("%s\n", xenbus_strstate(backend_state));
-+
-+ switch (backend_state) {
-+ case XenbusStateInitialising:
-+ case XenbusStateInitialised:
-+ case XenbusStateConnected:
-+ case XenbusStateUnknown:
-+ case XenbusStateClosed:
-+ break;
-+
-+ case XenbusStateInitWait:
-+ if (dev->state != XenbusStateInitialising)
-+ break;
-+ if (network_connect(netdev) != 0)
-+ break;
-+ xenbus_switch_state(dev, XenbusStateConnected);
-+ (void)send_fake_arp(netdev);
-+ break;
-+
-+ case XenbusStateClosing:
-+ xenbus_frontend_closed(dev);
-+ break;
-+ }
-+}
-+
-+/** Send a packet on a net device to encourage switches to learn the
-+ * MAC. We send a fake ARP request.
-+ *
-+ * @param dev device
-+ * @return 0 on success, error code otherwise
-+ */
-+static int send_fake_arp(struct net_device *dev)
-+{
-+ struct sk_buff *skb;
-+ u32 src_ip, dst_ip;
-+
-+ dst_ip = INADDR_BROADCAST;
-+ src_ip = inet_select_addr(dev, dst_ip, RT_SCOPE_LINK);
-+
-+ /* No IP? Then nothing to do. */
-+ if (src_ip == 0)
-+ return 0;
-+
-+ skb = arp_create(ARPOP_REPLY, ETH_P_ARP,
-+ dst_ip, dev, src_ip,
-+ /*dst_hw*/ NULL, /*src_hw*/ NULL,
-+ /*target_hw*/ dev->dev_addr);
-+ if (skb == NULL)
-+ return -ENOMEM;
-+
-+ return dev_queue_xmit(skb);
-+}
-+
-+static inline int netfront_tx_slot_available(struct netfront_info *np)
-+{
-+ return ((np->tx.req_prod_pvt - np->tx.rsp_cons) <
-+ (TX_MAX_TARGET - MAX_SKB_FRAGS - 2));
-+}
-+
-+static inline void network_maybe_wake_tx(struct net_device *dev)
-+{
-+ struct netfront_info *np = netdev_priv(dev);
-+
-+ if (unlikely(netif_queue_stopped(dev)) &&
-+ netfront_tx_slot_available(np) &&
-+ likely(netif_running(dev)))
-+ netif_wake_queue(dev);
-+}
-+
-+static int network_open(struct net_device *dev)
-+{
-+ struct netfront_info *np = netdev_priv(dev);
-+
-+ memset(&np->stats, 0, sizeof(np->stats));
-+
-+ spin_lock_bh(&np->rx_lock);
-+ if (netfront_carrier_ok(np)) {
-+ network_alloc_rx_buffers(dev);
-+ np->rx.sring->rsp_event = np->rx.rsp_cons + 1;
-+ if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx))
-+ netif_rx_schedule(dev);
-+ }
-+ spin_unlock_bh(&np->rx_lock);
-+
-+ network_maybe_wake_tx(dev);
-+
-+ return 0;
-+}
-+
-+static void network_tx_buf_gc(struct net_device *dev)
-+{
-+ RING_IDX cons, prod;
-+ unsigned short id;
-+ struct netfront_info *np = netdev_priv(dev);
-+ struct sk_buff *skb;
-+
-+ BUG_ON(!netfront_carrier_ok(np));
-+
-+ do {
-+ prod = np->tx.sring->rsp_prod;
-+ rmb(); /* Ensure we see responses up to 'rp'. */
-+
-+ for (cons = np->tx.rsp_cons; cons != prod; cons++) {
-+ struct netif_tx_response *txrsp;
-+
-+ txrsp = RING_GET_RESPONSE(&np->tx, cons);
-+ if (txrsp->status == NETIF_RSP_NULL)
-+ continue;
-+
-+ id = txrsp->id;
-+ skb = np->tx_skbs[id];
-+ if (unlikely(gnttab_query_foreign_access(
-+ np->grant_tx_ref[id]) != 0)) {
-+ printk(KERN_ALERT "network_tx_buf_gc: warning "
-+ "-- grant still in use by backend "
-+ "domain.\n");
-+ BUG();
-+ }
-+ gnttab_end_foreign_access_ref(
-+ np->grant_tx_ref[id], GNTMAP_readonly);
-+ gnttab_release_grant_reference(
-+ &np->gref_tx_head, np->grant_tx_ref[id]);
-+ np->grant_tx_ref[id] = GRANT_INVALID_REF;
-+ add_id_to_freelist(np->tx_skbs, id);
-+ dev_kfree_skb_irq(skb);
-+ }
-+
-+ np->tx.rsp_cons = prod;
-+
-+ /*
-+ * Set a new event, then check for race with update of tx_cons.
-+ * Note that it is essential to schedule a callback, no matter
-+ * how few buffers are pending. Even if there is space in the
-+ * transmit ring, higher layers may be blocked because too much
-+ * data is outstanding: in such cases notification from Xen is
-+ * likely to be the only kick that we'll get.
-+ */
-+ np->tx.sring->rsp_event =
-+ prod + ((np->tx.sring->req_prod - prod) >> 1) + 1;
-+ mb();
-+ } while ((cons == prod) && (prod != np->tx.sring->rsp_prod));
-+
-+ network_maybe_wake_tx(dev);
-+}
-+
-+static void rx_refill_timeout(unsigned long data)
-+{
-+ struct net_device *dev = (struct net_device *)data;
-+ netif_rx_schedule(dev);
-+}
-+
-+static void network_alloc_rx_buffers(struct net_device *dev)
-+{
-+ unsigned short id;
-+ struct netfront_info *np = netdev_priv(dev);
-+ struct sk_buff *skb;
-+ struct page *page;
-+ int i, batch_target, notify;
-+ RING_IDX req_prod = np->rx.req_prod_pvt;
-+ struct xen_memory_reservation reservation;
-+ grant_ref_t ref;
-+ unsigned long pfn;
-+ void *vaddr;
-+ int nr_flips;
-+ netif_rx_request_t *req;
-+
-+ if (unlikely(!netfront_carrier_ok(np)))
-+ return;
-+
-+ /*
-+ * Allocate skbuffs greedily, even though we batch updates to the
-+ * receive ring. This creates a less bursty demand on the memory
-+ * allocator, so should reduce the chance of failed allocation requests
-+ * both for ourself and for other kernel subsystems.
-+ */
-+ batch_target = np->rx_target - (req_prod - np->rx.rsp_cons);
-+ for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) {
-+ /*
-+ * Allocate an skb and a page. Do not use __dev_alloc_skb as
-+ * that will allocate page-sized buffers which is not
-+ * necessary here.
-+ * 16 bytes added as necessary headroom for netif_receive_skb.
-+ */
-+ skb = alloc_skb(RX_COPY_THRESHOLD + 16 + NET_IP_ALIGN,
-+ GFP_ATOMIC | __GFP_NOWARN);
-+ if (unlikely(!skb))
-+ goto no_skb;
-+
-+ page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
-+ if (!page) {
-+ kfree_skb(skb);
-+no_skb:
-+ /* Any skbuffs queued for refill? Force them out. */
-+ if (i != 0)
-+ goto refill;
-+ /* Could not allocate any skbuffs. Try again later. */
-+ mod_timer(&np->rx_refill_timer,
-+ jiffies + (HZ/10));
-+ break;
-+ }
-+
-+ skb_reserve(skb, 16 + NET_IP_ALIGN); /* mimic dev_alloc_skb() */
-+ skb_shinfo(skb)->frags[0].page = page;
-+ skb_shinfo(skb)->nr_frags = 1;
-+ __skb_queue_tail(&np->rx_batch, skb);
-+ }
-+
-+ /* Is the batch large enough to be worthwhile? */
-+ if (i < (np->rx_target/2)) {
-+ if (req_prod > np->rx.sring->req_prod)
-+ goto push;
-+ return;
-+ }
-+
-+ /* Adjust our fill target if we risked running out of buffers. */
-+ if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) &&
-+ ((np->rx_target *= 2) > np->rx_max_target))
-+ np->rx_target = np->rx_max_target;
-+
-+ refill:
-+ for (nr_flips = i = 0; ; i++) {
-+ if ((skb = __skb_dequeue(&np->rx_batch)) == NULL)
-+ break;
-+
-+ skb->dev = dev;
-+
-+ id = xennet_rxidx(req_prod + i);
-+
-+ BUG_ON(np->rx_skbs[id]);
-+ np->rx_skbs[id] = skb;
-+
-+ ref = gnttab_claim_grant_reference(&np->gref_rx_head);
-+ BUG_ON((signed short)ref < 0);
-+ np->grant_rx_ref[id] = ref;
-+
-+ pfn = page_to_pfn(skb_shinfo(skb)->frags[0].page);
-+ vaddr = page_address(skb_shinfo(skb)->frags[0].page);
-+
-+ req = RING_GET_REQUEST(&np->rx, req_prod + i);
-+ if (!np->copying_receiver) {
-+ gnttab_grant_foreign_transfer_ref(ref,
-+ np->xbdev->otherend_id,
-+ pfn);
-+ np->rx_pfn_array[nr_flips] = pfn_to_mfn(pfn);
-+ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
-+ /* Remove this page before passing
-+ * back to Xen. */
-+ set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
-+ MULTI_update_va_mapping(np->rx_mcl+i,
-+ (unsigned long)vaddr,
-+ __pte(0), 0);
-+ }
-+ nr_flips++;
-+ } else {
-+ gnttab_grant_foreign_access_ref(ref,
-+ np->xbdev->otherend_id,
-+ pfn_to_mfn(pfn),
-+ 0);
-+ }
-+
-+ req->id = id;
-+ req->gref = ref;
-+ }
-+
-+ if ( nr_flips != 0 ) {
-+ /* Tell the ballon driver what is going on. */
-+ balloon_update_driver_allowance(i);
-+
-+ set_xen_guest_handle(reservation.extent_start,
-+ np->rx_pfn_array);
-+ reservation.nr_extents = nr_flips;
-+ reservation.extent_order = 0;
-+ reservation.address_bits = 0;
-+ reservation.domid = DOMID_SELF;
-+
-+ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
-+ /* After all PTEs have been zapped, flush the TLB. */
-+ np->rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] =
-+ UVMF_TLB_FLUSH|UVMF_ALL;
-+
-+ /* Give away a batch of pages. */
-+ np->rx_mcl[i].op = __HYPERVISOR_memory_op;
-+ np->rx_mcl[i].args[0] = XENMEM_decrease_reservation;
-+ np->rx_mcl[i].args[1] = (unsigned long)&reservation;
-+
-+ /* Zap PTEs and give away pages in one big
-+ * multicall. */
-+ (void)HYPERVISOR_multicall(np->rx_mcl, i+1);
-+
-+ /* Check return status of HYPERVISOR_memory_op(). */
-+ if (unlikely(np->rx_mcl[i].result != i))
-+ panic("Unable to reduce memory reservation\n");
-+ } else {
-+ if (HYPERVISOR_memory_op(XENMEM_decrease_reservation,
-+ &reservation) != i)
-+ panic("Unable to reduce memory reservation\n");
-+ }
-+ } else {
-+ wmb();
-+ }
-+
-+ /* Above is a suitable barrier to ensure backend will see requests. */
-+ np->rx.req_prod_pvt = req_prod + i;
-+ push:
-+ RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify);
-+ if (notify)
-+ notify_remote_via_irq(np->irq);
-+}
-+
-+static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
-+ struct netif_tx_request *tx)
-+{
-+ struct netfront_info *np = netdev_priv(dev);
-+ char *data = skb->data;
-+ unsigned long mfn;
-+ RING_IDX prod = np->tx.req_prod_pvt;
-+ int frags = skb_shinfo(skb)->nr_frags;
-+ unsigned int offset = offset_in_page(data);
-+ unsigned int len = skb_headlen(skb);
-+ unsigned int id;
-+ grant_ref_t ref;
-+ int i;
-+
-+ while (len > PAGE_SIZE - offset) {
-+ tx->size = PAGE_SIZE - offset;
-+ tx->flags |= NETTXF_more_data;
-+ len -= tx->size;
-+ data += tx->size;
-+ offset = 0;
-+
-+ id = get_id_from_freelist(np->tx_skbs);
-+ np->tx_skbs[id] = skb_get(skb);
-+ tx = RING_GET_REQUEST(&np->tx, prod++);
-+ tx->id = id;
-+ ref = gnttab_claim_grant_reference(&np->gref_tx_head);
-+ BUG_ON((signed short)ref < 0);
-+
-+ mfn = virt_to_mfn(data);
-+ gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,
-+ mfn, GNTMAP_readonly);
-+
-+ tx->gref = np->grant_tx_ref[id] = ref;
-+ tx->offset = offset;
-+ tx->size = len;
-+ tx->flags = 0;
-+ }
-+
-+ for (i = 0; i < frags; i++) {
-+ skb_frag_t *frag = skb_shinfo(skb)->frags + i;
-+
-+ tx->flags |= NETTXF_more_data;
-+
-+ id = get_id_from_freelist(np->tx_skbs);
-+ np->tx_skbs[id] = skb_get(skb);
-+ tx = RING_GET_REQUEST(&np->tx, prod++);
-+ tx->id = id;
-+ ref = gnttab_claim_grant_reference(&np->gref_tx_head);
-+ BUG_ON((signed short)ref < 0);
-+
-+ mfn = pfn_to_mfn(page_to_pfn(frag->page));
-+ gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,
-+ mfn, GNTMAP_readonly);
-+
-+ tx->gref = np->grant_tx_ref[id] = ref;
-+ tx->offset = frag->page_offset;
-+ tx->size = frag->size;
-+ tx->flags = 0;
-+ }
-+
-+ np->tx.req_prod_pvt = prod;
-+}
-+
-+static int network_start_xmit(struct sk_buff *skb, struct net_device *dev)
-+{
-+ unsigned short id;
-+ struct netfront_info *np = netdev_priv(dev);
-+ struct netif_tx_request *tx;
-+ struct netif_extra_info *extra;
-+ char *data = skb->data;
-+ RING_IDX i;
-+ grant_ref_t ref;
-+ unsigned long mfn;
-+ int notify;
-+ int frags = skb_shinfo(skb)->nr_frags;
-+ unsigned int offset = offset_in_page(data);
-+ unsigned int len = skb_headlen(skb);
-+
-+ frags += (offset + len + PAGE_SIZE - 1) / PAGE_SIZE;
-+ if (unlikely(frags > MAX_SKB_FRAGS + 1)) {
-+ printk(KERN_ALERT "xennet: skb rides the rocket: %d frags\n",
-+ frags);
-+ dump_stack();
-+ goto drop;
-+ }
-+
-+ spin_lock_irq(&np->tx_lock);
-+
-+ if (unlikely(!netfront_carrier_ok(np) ||
-+ (frags > 1 && !xennet_can_sg(dev)) ||
-+ netif_needs_gso(dev, skb))) {
-+ spin_unlock_irq(&np->tx_lock);
-+ goto drop;
-+ }
-+
-+ i = np->tx.req_prod_pvt;
-+
-+ id = get_id_from_freelist(np->tx_skbs);
-+ np->tx_skbs[id] = skb;
-+
-+ tx = RING_GET_REQUEST(&np->tx, i);
-+
-+ tx->id = id;
-+ ref = gnttab_claim_grant_reference(&np->gref_tx_head);
-+ BUG_ON((signed short)ref < 0);
-+ mfn = virt_to_mfn(data);
-+ gnttab_grant_foreign_access_ref(
-+ ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly);
-+ tx->gref = np->grant_tx_ref[id] = ref;
-+ tx->offset = offset;
-+ tx->size = len;
-+
-+ tx->flags = 0;
-+ extra = NULL;
-+
-+ if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */
-+ tx->flags |= NETTXF_csum_blank | NETTXF_data_validated;
-+#ifdef CONFIG_XEN
-+ if (skb->proto_data_valid) /* remote but checksummed? */
-+ tx->flags |= NETTXF_data_validated;
-+#endif
-+
-+#ifdef HAVE_TSO
-+ if (skb_shinfo(skb)->gso_size) {
-+ struct netif_extra_info *gso = (struct netif_extra_info *)
-+ RING_GET_REQUEST(&np->tx, ++i);
-+
-+ if (extra)
-+ extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE;
-+ else
-+ tx->flags |= NETTXF_extra_info;
-+
-+ gso->u.gso.size = skb_shinfo(skb)->gso_size;
-+ gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
-+ gso->u.gso.pad = 0;
-+ gso->u.gso.features = 0;
-+
-+ gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
-+ gso->flags = 0;
-+ extra = gso;
-+ }
-+#endif
-+
-+ np->tx.req_prod_pvt = i + 1;
-+
-+ xennet_make_frags(skb, dev, tx);
-+ tx->size = skb->len;
-+
-+ RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify);
-+ if (notify)
-+ notify_remote_via_irq(np->irq);
-+
-+ network_tx_buf_gc(dev);
-+
-+ if (!netfront_tx_slot_available(np))
-+ netif_stop_queue(dev);
-+
-+ spin_unlock_irq(&np->tx_lock);
-+
-+ np->stats.tx_bytes += skb->len;
-+ np->stats.tx_packets++;
-+
-+ return 0;
-+
-+ drop:
-+ np->stats.tx_dropped++;
-+ dev_kfree_skb(skb);
-+ return 0;
-+}
-+
-+static irqreturn_t netif_int(int irq, void *dev_id)
-+{
-+ struct net_device *dev = dev_id;
-+ struct netfront_info *np = netdev_priv(dev);
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&np->tx_lock, flags);
-+
-+ if (likely(netfront_carrier_ok(np))) {
-+ network_tx_buf_gc(dev);
-+ /* Under tx_lock: protects access to rx shared-ring indexes. */
-+ if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx))
-+ netif_rx_schedule(dev);
-+ }
-+
-+ spin_unlock_irqrestore(&np->tx_lock, flags);
-+
-+ return IRQ_HANDLED;
-+}
-+
-+static void xennet_move_rx_slot(struct netfront_info *np, struct sk_buff *skb,
-+ grant_ref_t ref)
-+{
-+ int new = xennet_rxidx(np->rx.req_prod_pvt);
-+
-+ BUG_ON(np->rx_skbs[new]);
-+ np->rx_skbs[new] = skb;
-+ np->grant_rx_ref[new] = ref;
-+ RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new;
-+ RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref;
-+ np->rx.req_prod_pvt++;
-+}
-+
-+int xennet_get_extras(struct netfront_info *np,
-+ struct netif_extra_info *extras, RING_IDX rp)
-+
-+{
-+ struct netif_extra_info *extra;
-+ RING_IDX cons = np->rx.rsp_cons;
-+ int err = 0;
-+
-+ do {
-+ struct sk_buff *skb;
-+ grant_ref_t ref;
-+
-+ if (unlikely(cons + 1 == rp)) {
-+ if (net_ratelimit())
-+ WPRINTK("Missing extra info\n");
-+ err = -EBADR;
-+ break;
-+ }
-+
-+ extra = (struct netif_extra_info *)
-+ RING_GET_RESPONSE(&np->rx, ++cons);
-+
-+ if (unlikely(!extra->type ||
-+ extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
-+ if (net_ratelimit())
-+ WPRINTK("Invalid extra type: %d\n",
-+ extra->type);
-+ err = -EINVAL;
-+ } else {
-+ memcpy(&extras[extra->type - 1], extra,
-+ sizeof(*extra));
-+ }
-+
-+ skb = xennet_get_rx_skb(np, cons);
-+ ref = xennet_get_rx_ref(np, cons);
-+ xennet_move_rx_slot(np, skb, ref);
-+ } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
-+
-+ np->rx.rsp_cons = cons;
-+ return err;
-+}
-+
-+static int xennet_get_responses(struct netfront_info *np,
-+ struct netfront_rx_info *rinfo, RING_IDX rp,
-+ struct sk_buff_head *list,
-+ int *pages_flipped_p)
-+{
-+ int pages_flipped = *pages_flipped_p;
-+ struct mmu_update *mmu;
-+ struct multicall_entry *mcl;
-+ struct netif_rx_response *rx = &rinfo->rx;
-+ struct netif_extra_info *extras = rinfo->extras;
-+ RING_IDX cons = np->rx.rsp_cons;
-+ struct sk_buff *skb = xennet_get_rx_skb(np, cons);
-+ grant_ref_t ref = xennet_get_rx_ref(np, cons);
-+ int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD);
-+ int frags = 1;
-+ int err = 0;
-+ unsigned long ret;
-+
-+ if (rx->flags & NETRXF_extra_info) {
-+ err = xennet_get_extras(np, extras, rp);
-+ cons = np->rx.rsp_cons;
-+ }
-+
-+ for (;;) {
-+ unsigned long mfn;
-+
-+ if (unlikely(rx->status < 0 ||
-+ rx->offset + rx->status > PAGE_SIZE)) {
-+ if (net_ratelimit())
-+ WPRINTK("rx->offset: %x, size: %u\n",
-+ rx->offset, rx->status);
-+ xennet_move_rx_slot(np, skb, ref);
-+ err = -EINVAL;
-+ goto next;
-+ }
-+
-+ /*
-+ * This definitely indicates a bug, either in this driver or in
-+ * the backend driver. In future this should flag the bad
-+ * situation to the system controller to reboot the backed.
-+ */
-+ if (ref == GRANT_INVALID_REF) {
-+ if (net_ratelimit())
-+ WPRINTK("Bad rx response id %d.\n", rx->id);
-+ err = -EINVAL;
-+ goto next;
-+ }
-+
-+ if (!np->copying_receiver) {
-+ /* Memory pressure, insufficient buffer
-+ * headroom, ... */
-+ if (!(mfn = gnttab_end_foreign_transfer_ref(ref))) {
-+ if (net_ratelimit())
-+ WPRINTK("Unfulfilled rx req "
-+ "(id=%d, st=%d).\n",
-+ rx->id, rx->status);
-+ xennet_move_rx_slot(np, skb, ref);
-+ err = -ENOMEM;
-+ goto next;
-+ }
-+
-+ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
-+ /* Remap the page. */
-+ struct page *page =
-+ skb_shinfo(skb)->frags[0].page;
-+ unsigned long pfn = page_to_pfn(page);
-+ void *vaddr = page_address(page);
-+
-+ mcl = np->rx_mcl + pages_flipped;
-+ mmu = np->rx_mmu + pages_flipped;
-+
-+ MULTI_update_va_mapping(mcl,
-+ (unsigned long)vaddr,
-+ pfn_pte_ma(mfn,
-+ PAGE_KERNEL),
-+ 0);
-+ mmu->ptr = ((maddr_t)mfn << PAGE_SHIFT)
-+ | MMU_MACHPHYS_UPDATE;
-+ mmu->val = pfn;
-+
-+ set_phys_to_machine(pfn, mfn);
-+ }
-+ pages_flipped++;
-+ } else {
-+ ret = gnttab_end_foreign_access_ref(ref, 0);
-+ BUG_ON(!ret);
-+ }
-+
-+ gnttab_release_grant_reference(&np->gref_rx_head, ref);
-+
-+ __skb_queue_tail(list, skb);
-+
-+next:
-+ if (!(rx->flags & NETRXF_more_data))
-+ break;
-+
-+ if (cons + frags == rp) {
-+ if (net_ratelimit())
-+ WPRINTK("Need more frags\n");
-+ err = -ENOENT;
-+ break;
-+ }
-+
-+ rx = RING_GET_RESPONSE(&np->rx, cons + frags);
-+ skb = xennet_get_rx_skb(np, cons + frags);
-+ ref = xennet_get_rx_ref(np, cons + frags);
-+ frags++;
-+ }
-+
-+ if (unlikely(frags > max)) {
-+ if (net_ratelimit())
-+ WPRINTK("Too many frags\n");
-+ err = -E2BIG;
-+ }
-+
-+ if (unlikely(err))
-+ np->rx.rsp_cons = cons + frags;
-+
-+ *pages_flipped_p = pages_flipped;
-+
-+ return err;
-+}
-+
-+static RING_IDX xennet_fill_frags(struct netfront_info *np,
-+ struct sk_buff *skb,
-+ struct sk_buff_head *list)
-+{
-+ struct skb_shared_info *shinfo = skb_shinfo(skb);
-+ int nr_frags = shinfo->nr_frags;
-+ RING_IDX cons = np->rx.rsp_cons;
-+ skb_frag_t *frag = shinfo->frags + nr_frags;
-+ struct sk_buff *nskb;
-+
-+ while ((nskb = __skb_dequeue(list))) {
-+ struct netif_rx_response *rx =
-+ RING_GET_RESPONSE(&np->rx, ++cons);
-+
-+ frag->page = skb_shinfo(nskb)->frags[0].page;
-+ frag->page_offset = rx->offset;
-+ frag->size = rx->status;
-+
-+ skb->data_len += rx->status;
-+
-+ skb_shinfo(nskb)->nr_frags = 0;
-+ kfree_skb(nskb);
-+
-+ frag++;
-+ nr_frags++;
-+ }
-+
-+ shinfo->nr_frags = nr_frags;
-+ return cons;
-+}
-+
-+static int xennet_set_skb_gso(struct sk_buff *skb,
-+ struct netif_extra_info *gso)
-+{
-+ if (!gso->u.gso.size) {
-+ if (net_ratelimit())
-+ WPRINTK("GSO size must not be zero.\n");
-+ return -EINVAL;
-+ }
-+
-+ /* Currently only TCPv4 S.O. is supported. */
-+ if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
-+ if (net_ratelimit())
-+ WPRINTK("Bad GSO type %d.\n", gso->u.gso.type);
-+ return -EINVAL;
-+ }
-+
-+#ifdef HAVE_TSO
-+ skb_shinfo(skb)->gso_size = gso->u.gso.size;
-+#ifdef HAVE_GSO
-+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
-+
-+ /* Header must be checked, and gso_segs computed. */
-+ skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
-+#endif
-+ skb_shinfo(skb)->gso_segs = 0;
-+
-+ return 0;
-+#else
-+ if (net_ratelimit())
-+ WPRINTK("GSO unsupported by this kernel.\n");
-+ return -EINVAL;
-+#endif
-+}
-+
-+static int netif_poll(struct net_device *dev, int *pbudget)
-+{
-+ struct netfront_info *np = netdev_priv(dev);
-+ struct sk_buff *skb;
-+ struct netfront_rx_info rinfo;
-+ struct netif_rx_response *rx = &rinfo.rx;
-+ struct netif_extra_info *extras = rinfo.extras;
-+ RING_IDX i, rp;
-+ struct multicall_entry *mcl;
-+ int work_done, budget, more_to_do = 1;
-+ struct sk_buff_head rxq;
-+ struct sk_buff_head errq;
-+ struct sk_buff_head tmpq;
-+ unsigned long flags;
-+ unsigned int len;
-+ int pages_flipped = 0;
-+ int err;
-+
-+ spin_lock(&np->rx_lock); /* no need for spin_lock_bh() in ->poll() */
-+
-+ if (unlikely(!netfront_carrier_ok(np))) {
-+ spin_unlock(&np->rx_lock);
-+ return 0;
-+ }
-+
-+ skb_queue_head_init(&rxq);
-+ skb_queue_head_init(&errq);
-+ skb_queue_head_init(&tmpq);
-+
-+ if ((budget = *pbudget) > dev->quota)
-+ budget = dev->quota;
-+ rp = np->rx.sring->rsp_prod;
-+ rmb(); /* Ensure we see queued responses up to 'rp'. */
-+
-+ i = np->rx.rsp_cons;
-+ work_done = 0;
-+ while ((i != rp) && (work_done < budget)) {
-+ memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx));
-+ memset(extras, 0, sizeof(rinfo.extras));
-+
-+ err = xennet_get_responses(np, &rinfo, rp, &tmpq,
-+ &pages_flipped);
-+
-+ if (unlikely(err)) {
-+err:
-+ while ((skb = __skb_dequeue(&tmpq)))
-+ __skb_queue_tail(&errq, skb);
-+ np->stats.rx_errors++;
-+ i = np->rx.rsp_cons;
-+ continue;
-+ }
-+
-+ skb = __skb_dequeue(&tmpq);
-+
-+ if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
-+ struct netif_extra_info *gso;
-+ gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
-+
-+ if (unlikely(xennet_set_skb_gso(skb, gso))) {
-+ __skb_queue_head(&tmpq, skb);
-+ np->rx.rsp_cons += skb_queue_len(&tmpq);
-+ goto err;
-+ }
-+ }
-+
-+ NETFRONT_SKB_CB(skb)->page = skb_shinfo(skb)->frags[0].page;
-+ NETFRONT_SKB_CB(skb)->offset = rx->offset;
-+
-+ len = rx->status;
-+ if (len > RX_COPY_THRESHOLD)
-+ len = RX_COPY_THRESHOLD;
-+ skb_put(skb, len);
-+
-+ if (rx->status > len) {
-+ skb_shinfo(skb)->frags[0].page_offset =
-+ rx->offset + len;
-+ skb_shinfo(skb)->frags[0].size = rx->status - len;
-+ skb->data_len = rx->status - len;
-+ } else {
-+ skb_shinfo(skb)->frags[0].page = NULL;
-+ skb_shinfo(skb)->nr_frags = 0;
-+ }
-+
-+ i = xennet_fill_frags(np, skb, &tmpq);
-+
-+ /*
-+ * Truesize must approximates the size of true data plus
-+ * any supervisor overheads. Adding hypervisor overheads
-+ * has been shown to significantly reduce achievable
-+ * bandwidth with the default receive buffer size. It is
-+ * therefore not wise to account for it here.
-+ *
-+ * After alloc_skb(RX_COPY_THRESHOLD), truesize is set to
-+ * RX_COPY_THRESHOLD + the supervisor overheads. Here, we
-+ * add the size of the data pulled in xennet_fill_frags().
-+ *
-+ * We also adjust for any unused space in the main data
-+ * area by subtracting (RX_COPY_THRESHOLD - len). This is
-+ * especially important with drivers which split incoming
-+ * packets into header and data, using only 66 bytes of
-+ * the main data area (see the e1000 driver for example.)
-+ * On such systems, without this last adjustement, our
-+ * achievable receive throughout using the standard receive
-+ * buffer size was cut by 25%(!!!).
-+ */
-+ skb->truesize += skb->data_len - (RX_COPY_THRESHOLD - len);
-+ skb->len += skb->data_len;
-+
-+ /*
-+ * Old backends do not assert data_validated but we
-+ * can infer it from csum_blank so test both flags.
-+ */
-+ if (rx->flags & (NETRXF_data_validated|NETRXF_csum_blank))
-+ skb->ip_summed = CHECKSUM_UNNECESSARY;
-+ else
-+ skb->ip_summed = CHECKSUM_NONE;
-+#ifdef CONFIG_XEN
-+ skb->proto_data_valid = (skb->ip_summed != CHECKSUM_NONE);
-+ skb->proto_csum_blank = !!(rx->flags & NETRXF_csum_blank);
-+#endif
-+ np->stats.rx_packets++;
-+ np->stats.rx_bytes += skb->len;
-+
-+ __skb_queue_tail(&rxq, skb);
-+
-+ np->rx.rsp_cons = ++i;
-+ work_done++;
-+ }
-+
-+ if (pages_flipped) {
-+ /* Some pages are no longer absent... */
-+ balloon_update_driver_allowance(-pages_flipped);
-+
-+ /* Do all the remapping work and M2P updates. */
-+ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
-+ mcl = np->rx_mcl + pages_flipped;
-+ mcl->op = __HYPERVISOR_mmu_update;
-+ mcl->args[0] = (unsigned long)np->rx_mmu;
-+ mcl->args[1] = pages_flipped;
-+ mcl->args[2] = 0;
-+ mcl->args[3] = DOMID_SELF;
-+ (void)HYPERVISOR_multicall(np->rx_mcl,
-+ pages_flipped + 1);
-+ }
-+ }
-+
-+ while ((skb = __skb_dequeue(&errq)))
-+ kfree_skb(skb);
-+
-+ while ((skb = __skb_dequeue(&rxq)) != NULL) {
-+ struct page *page = NETFRONT_SKB_CB(skb)->page;
-+ void *vaddr = page_address(page);
-+ unsigned offset = NETFRONT_SKB_CB(skb)->offset;
-+
-+ memcpy(skb->data, vaddr + offset, skb_headlen(skb));
-+
-+ if (page != skb_shinfo(skb)->frags[0].page)
-+ __free_page(page);
-+
-+ /* Ethernet work: Delayed to here as it peeks the header. */
-+ skb->protocol = eth_type_trans(skb, dev);
-+
-+ /* Pass it up. */
-+ netif_receive_skb(skb);
-+ dev->last_rx = jiffies;
-+ }
-+
-+ /* If we get a callback with very few responses, reduce fill target. */
-+ /* NB. Note exponential increase, linear decrease. */
-+ if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) >
-+ ((3*np->rx_target) / 4)) &&
-+ (--np->rx_target < np->rx_min_target))
-+ np->rx_target = np->rx_min_target;
-+
-+ network_alloc_rx_buffers(dev);
-+
-+ *pbudget -= work_done;
-+ dev->quota -= work_done;
-+
-+ if (work_done < budget) {
-+ local_irq_save(flags);
-+
-+ RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do);
-+ if (!more_to_do)
-+ __netif_rx_complete(dev);
-+
-+ local_irq_restore(flags);
-+ }
-+
-+ spin_unlock(&np->rx_lock);
-+
-+ return more_to_do;
-+}
-+
-+static void netif_release_tx_bufs(struct netfront_info *np)
-+{
-+ struct sk_buff *skb;
-+ int i;
-+
-+ for (i = 1; i <= NET_TX_RING_SIZE; i++) {
-+ if ((unsigned long)np->tx_skbs[i] < PAGE_OFFSET)
-+ continue;
-+
-+ skb = np->tx_skbs[i];
-+ gnttab_end_foreign_access_ref(
-+ np->grant_tx_ref[i], GNTMAP_readonly);
-+ gnttab_release_grant_reference(
-+ &np->gref_tx_head, np->grant_tx_ref[i]);
-+ np->grant_tx_ref[i] = GRANT_INVALID_REF;
-+ add_id_to_freelist(np->tx_skbs, i);
-+ dev_kfree_skb_irq(skb);
-+ }
-+}
-+
-+static void netif_release_rx_bufs(struct netfront_info *np)
-+{
-+ struct mmu_update *mmu = np->rx_mmu;
-+ struct multicall_entry *mcl = np->rx_mcl;
-+ struct sk_buff_head free_list;
-+ struct sk_buff *skb;
-+ unsigned long mfn;
-+ int xfer = 0, noxfer = 0, unused = 0;
-+ int id, ref, rc;
-+
-+ if (np->copying_receiver) {
-+ WPRINTK("%s: fix me for copying receiver.\n", __FUNCTION__);
-+ return;
-+ }
-+
-+ skb_queue_head_init(&free_list);
-+
-+ spin_lock_bh(&np->rx_lock);
-+
-+ for (id = 0; id < NET_RX_RING_SIZE; id++) {
-+ if ((ref = np->grant_rx_ref[id]) == GRANT_INVALID_REF) {
-+ unused++;
-+ continue;
-+ }
-+
-+ skb = np->rx_skbs[id];
-+ mfn = gnttab_end_foreign_transfer_ref(ref);
-+ gnttab_release_grant_reference(&np->gref_rx_head, ref);
-+ np->grant_rx_ref[id] = GRANT_INVALID_REF;
-+ add_id_to_freelist(np->rx_skbs, id);
-+
-+ if (0 == mfn) {
-+ struct page *page = skb_shinfo(skb)->frags[0].page;
-+ balloon_release_driver_page(page);
-+ skb_shinfo(skb)->nr_frags = 0;
-+ dev_kfree_skb(skb);
-+ noxfer++;
-+ continue;
-+ }
-+
-+ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
-+ /* Remap the page. */
-+ struct page *page = skb_shinfo(skb)->frags[0].page;
-+ unsigned long pfn = page_to_pfn(page);
-+ void *vaddr = page_address(page);
-+
-+ MULTI_update_va_mapping(mcl, (unsigned long)vaddr,
-+ pfn_pte_ma(mfn, PAGE_KERNEL),
-+ 0);
-+ mcl++;
-+ mmu->ptr = ((maddr_t)mfn << PAGE_SHIFT)
-+ | MMU_MACHPHYS_UPDATE;
-+ mmu->val = pfn;
-+ mmu++;
-+
-+ set_phys_to_machine(pfn, mfn);
-+ }
-+ __skb_queue_tail(&free_list, skb);
-+ xfer++;
-+ }
-+
-+ IPRINTK("%s: %d xfer, %d noxfer, %d unused\n",
-+ __FUNCTION__, xfer, noxfer, unused);
-+
-+ if (xfer) {
-+ /* Some pages are no longer absent... */
-+ balloon_update_driver_allowance(-xfer);
-+
-+ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
-+ /* Do all the remapping work and M2P updates. */
-+ mcl->op = __HYPERVISOR_mmu_update;
-+ mcl->args[0] = (unsigned long)np->rx_mmu;
-+ mcl->args[1] = mmu - np->rx_mmu;
-+ mcl->args[2] = 0;
-+ mcl->args[3] = DOMID_SELF;
-+ mcl++;
-+ rc = HYPERVISOR_multicall_check(
-+ np->rx_mcl, mcl - np->rx_mcl, NULL);
-+ BUG_ON(rc);
-+ }
-+ }
-+
-+ while ((skb = __skb_dequeue(&free_list)) != NULL)
-+ dev_kfree_skb(skb);
-+
-+ spin_unlock_bh(&np->rx_lock);
-+}
-+
-+static int network_close(struct net_device *dev)
-+{
-+ struct netfront_info *np = netdev_priv(dev);
-+ netif_stop_queue(np->netdev);
-+ return 0;
-+}
-+
-+
-+static struct net_device_stats *network_get_stats(struct net_device *dev)
-+{
-+ struct netfront_info *np = netdev_priv(dev);
-+ return &np->stats;
-+}
-+
-+static int xennet_change_mtu(struct net_device *dev, int mtu)
-+{
-+ int max = xennet_can_sg(dev) ? 65535 - ETH_HLEN : ETH_DATA_LEN;
-+
-+ if (mtu > max)
-+ return -EINVAL;
-+ dev->mtu = mtu;
-+ return 0;
-+}
-+
-+static int xennet_set_sg(struct net_device *dev, u32 data)
-+{
-+ if (data) {
-+ struct netfront_info *np = netdev_priv(dev);
-+ int val;
-+
-+ if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg",
-+ "%d", &val) < 0)
-+ val = 0;
-+ if (!val)
-+ return -ENOSYS;
-+ } else if (dev->mtu > ETH_DATA_LEN)
-+ dev->mtu = ETH_DATA_LEN;
-+
-+ return ethtool_op_set_sg(dev, data);
-+}
-+
-+static int xennet_set_tso(struct net_device *dev, u32 data)
-+{
-+#ifdef HAVE_TSO
-+ if (data) {
-+ struct netfront_info *np = netdev_priv(dev);
-+ int val;
-+
-+ if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
-+ "feature-gso-tcpv4", "%d", &val) < 0)
-+ val = 0;
-+ if (!val)
-+ return -ENOSYS;
-+ }
-+
-+ return ethtool_op_set_tso(dev, data);
-+#else
-+ return -ENOSYS;
-+#endif
-+}
-+
-+static void xennet_set_features(struct net_device *dev)
-+{
-+ dev_disable_gso_features(dev);
-+ xennet_set_sg(dev, 0);
-+
-+ /* We need checksum offload to enable scatter/gather and TSO. */
-+ if (!(dev->features & NETIF_F_IP_CSUM))
-+ return;
-+
-+ if (xennet_set_sg(dev, 1))
-+ return;
-+
-+ /* Before 2.6.9 TSO seems to be unreliable so do not enable it
-+ * on older kernels.
-+ */
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9)
-+ xennet_set_tso(dev, 1);
-+#endif
-+
-+}
-+
-+static int network_connect(struct net_device *dev)
-+{
-+ struct netfront_info *np = netdev_priv(dev);
-+ int i, requeue_idx, err;
-+ struct sk_buff *skb;
-+ grant_ref_t ref;
-+ netif_rx_request_t *req;
-+ unsigned int feature_rx_copy, feature_rx_flip;
-+
-+ err = xenbus_scanf(XBT_NIL, np->xbdev->otherend,
-+ "feature-rx-copy", "%u", &feature_rx_copy);
-+ if (err != 1)
-+ feature_rx_copy = 0;
-+ err = xenbus_scanf(XBT_NIL, np->xbdev->otherend,
-+ "feature-rx-flip", "%u", &feature_rx_flip);
-+ if (err != 1)
-+ feature_rx_flip = 1;
-+
-+ /*
-+ * Copy packets on receive path if:
-+ * (a) This was requested by user, and the backend supports it; or
-+ * (b) Flipping was requested, but this is unsupported by the backend.
-+ */
-+ np->copying_receiver = ((MODPARM_rx_copy && feature_rx_copy) ||
-+ (MODPARM_rx_flip && !feature_rx_flip));
-+
-+ err = talk_to_backend(np->xbdev, np);
-+ if (err)
-+ return err;
-+
-+ xennet_set_features(dev);
-+
-+ IPRINTK("device %s has %sing receive path.\n",
-+ dev->name, np->copying_receiver ? "copy" : "flipp");
-+
-+ spin_lock_bh(&np->rx_lock);
-+ spin_lock_irq(&np->tx_lock);
-+
-+ /*
-+ * Recovery procedure:
-+ * NB. Freelist index entries are always going to be less than
-+ * PAGE_OFFSET, whereas pointers to skbs will always be equal or
-+ * greater than PAGE_OFFSET: we use this property to distinguish
-+ * them.
-+ */
-+
-+ /* Step 1: Discard all pending TX packet fragments. */
-+ netif_release_tx_bufs(np);
-+
-+ /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */
-+ for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) {
-+ if (!np->rx_skbs[i])
-+ continue;
-+
-+ skb = np->rx_skbs[requeue_idx] = xennet_get_rx_skb(np, i);
-+ ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i);
-+ req = RING_GET_REQUEST(&np->rx, requeue_idx);
-+
-+ if (!np->copying_receiver) {
-+ gnttab_grant_foreign_transfer_ref(
-+ ref, np->xbdev->otherend_id,
-+ page_to_pfn(skb_shinfo(skb)->frags->page));
-+ } else {
-+ gnttab_grant_foreign_access_ref(
-+ ref, np->xbdev->otherend_id,
-+ pfn_to_mfn(page_to_pfn(skb_shinfo(skb)->
-+ frags->page)),
-+ 0);
-+ }
-+ req->gref = ref;
-+ req->id = requeue_idx;
-+
-+ requeue_idx++;
-+ }
-+
-+ np->rx.req_prod_pvt = requeue_idx;
-+
-+ /*
-+ * Step 3: All public and private state should now be sane. Get
-+ * ready to start sending and receiving packets and give the driver
-+ * domain a kick because we've probably just requeued some
-+ * packets.
-+ */
-+ netfront_carrier_on(np);
-+ notify_remote_via_irq(np->irq);
-+ network_tx_buf_gc(dev);
-+ network_alloc_rx_buffers(dev);
-+
-+ spin_unlock_irq(&np->tx_lock);
-+ spin_unlock_bh(&np->rx_lock);
-+
-+ return 0;
-+}
-+
-+static void netif_uninit(struct net_device *dev)
-+{
-+ struct netfront_info *np = netdev_priv(dev);
-+ netif_release_tx_bufs(np);
-+ netif_release_rx_bufs(np);
-+ gnttab_free_grant_references(np->gref_tx_head);
-+ gnttab_free_grant_references(np->gref_rx_head);
-+}
-+
-+static struct ethtool_ops network_ethtool_ops =
-+{
-+ .get_tx_csum = ethtool_op_get_tx_csum,
-+ .set_tx_csum = ethtool_op_set_tx_csum,
-+ .get_sg = ethtool_op_get_sg,
-+ .set_sg = xennet_set_sg,
-+ .get_tso = ethtool_op_get_tso,
-+ .set_tso = xennet_set_tso,
-+ .get_link = ethtool_op_get_link,
-+};
-+
-+#ifdef CONFIG_SYSFS
-+static ssize_t show_rxbuf_min(struct device *dev,
-+ struct device_attribute *attr, char *buf)
-+{
-+ struct netfront_info *info = netdev_priv(to_net_dev(dev));
-+
-+ return sprintf(buf, "%u\n", info->rx_min_target);
-+}
-+
-+static ssize_t store_rxbuf_min(struct device *dev,
-+ struct device_attribute *attr,
-+ const char *buf, size_t len)
-+{
-+ struct net_device *netdev = to_net_dev(dev);
-+ struct netfront_info *np = netdev_priv(netdev);
-+ char *endp;
-+ unsigned long target;
-+
-+ if (!capable(CAP_NET_ADMIN))
-+ return -EPERM;
-+
-+ target = simple_strtoul(buf, &endp, 0);
-+ if (endp == buf)
-+ return -EBADMSG;
-+
-+ if (target < RX_MIN_TARGET)
-+ target = RX_MIN_TARGET;
-+ if (target > RX_MAX_TARGET)
-+ target = RX_MAX_TARGET;
-+
-+ spin_lock_bh(&np->rx_lock);
-+ if (target > np->rx_max_target)
-+ np->rx_max_target = target;
-+ np->rx_min_target = target;
-+ if (target > np->rx_target)
-+ np->rx_target = target;
-+
-+ network_alloc_rx_buffers(netdev);
-+
-+ spin_unlock_bh(&np->rx_lock);
-+ return len;
-+}
-+
-+static ssize_t show_rxbuf_max(struct device *dev,
-+ struct device_attribute *attr, char *buf)
-+{
-+ struct netfront_info *info = netdev_priv(to_net_dev(dev));
-+
-+ return sprintf(buf, "%u\n", info->rx_max_target);
-+}
-+
-+static ssize_t store_rxbuf_max(struct device *dev,
-+ struct device_attribute *attr,
-+ const char *buf, size_t len)
-+{
-+ struct net_device *netdev = to_net_dev(dev);
-+ struct netfront_info *np = netdev_priv(netdev);
-+ char *endp;
-+ unsigned long target;
-+
-+ if (!capable(CAP_NET_ADMIN))
-+ return -EPERM;
-+
-+ target = simple_strtoul(buf, &endp, 0);
-+ if (endp == buf)
-+ return -EBADMSG;
-+
-+ if (target < RX_MIN_TARGET)
-+ target = RX_MIN_TARGET;
-+ if (target > RX_MAX_TARGET)
-+ target = RX_MAX_TARGET;
-+
-+ spin_lock_bh(&np->rx_lock);
-+ if (target < np->rx_min_target)
-+ np->rx_min_target = target;
-+ np->rx_max_target = target;
-+ if (target < np->rx_target)
-+ np->rx_target = target;
-+
-+ network_alloc_rx_buffers(netdev);
-+
-+ spin_unlock_bh(&np->rx_lock);
-+ return len;
-+}
-+
-+static ssize_t show_rxbuf_cur(struct device *dev,
-+ struct device_attribute *attr, char *buf)
-+{
-+ struct netfront_info *info = netdev_priv(to_net_dev(dev));
-+
-+ return sprintf(buf, "%u\n", info->rx_target);
-+}
-+
-+static struct device_attribute xennet_attrs[] = {
-+ __ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf_min, store_rxbuf_min),
-+ __ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf_max, store_rxbuf_max),
-+ __ATTR(rxbuf_cur, S_IRUGO, show_rxbuf_cur, NULL),
-+};
-+
-+static int xennet_sysfs_addif(struct net_device *netdev)
-+{
-+ int i;
-+ int error = 0;
-+
-+ for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) {
-+ error = device_create_file(&netdev->dev,
-+ &xennet_attrs[i]);
-+ if (error)
-+ goto fail;
-+ }
-+ return 0;
-+
-+ fail:
-+ while (--i >= 0)
-+ device_remove_file(&netdev->dev, &xennet_attrs[i]);
-+ return error;
-+}
-+
-+static void xennet_sysfs_delif(struct net_device *netdev)
-+{
-+ int i;
-+
-+ for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++)
-+ device_remove_file(&netdev->dev, &xennet_attrs[i]);
-+}
-+
-+#endif /* CONFIG_SYSFS */
-+
-+
-+/*
-+ * Nothing to do here. Virtual interface is point-to-point and the
-+ * physical interface is probably promiscuous anyway.
-+ */
-+static void network_set_multicast_list(struct net_device *dev)
-+{
-+}
-+
-+static struct net_device * __devinit create_netdev(struct xenbus_device *dev)
-+{
-+ int i, err = 0;
-+ struct net_device *netdev = NULL;
-+ struct netfront_info *np = NULL;
-+
-+ netdev = alloc_etherdev(sizeof(struct netfront_info));
-+ if (!netdev) {
-+ printk(KERN_WARNING "%s> alloc_etherdev failed.\n",
-+ __FUNCTION__);
-+ return ERR_PTR(-ENOMEM);
-+ }
-+
-+ np = netdev_priv(netdev);
-+ np->xbdev = dev;
-+
-+ spin_lock_init(&np->tx_lock);
-+ spin_lock_init(&np->rx_lock);
-+
-+ skb_queue_head_init(&np->rx_batch);
-+ np->rx_target = RX_DFL_MIN_TARGET;
-+ np->rx_min_target = RX_DFL_MIN_TARGET;
-+ np->rx_max_target = RX_MAX_TARGET;
-+
-+ init_timer(&np->rx_refill_timer);
-+ np->rx_refill_timer.data = (unsigned long)netdev;
-+ np->rx_refill_timer.function = rx_refill_timeout;
-+
-+ /* Initialise {tx,rx}_skbs as a free chain containing every entry. */
-+ for (i = 0; i <= NET_TX_RING_SIZE; i++) {
-+ np->tx_skbs[i] = (void *)((unsigned long) i+1);
-+ np->grant_tx_ref[i] = GRANT_INVALID_REF;
-+ }
-+
-+ for (i = 0; i < NET_RX_RING_SIZE; i++) {
-+ np->rx_skbs[i] = NULL;
-+ np->grant_rx_ref[i] = GRANT_INVALID_REF;
-+ }
-+
-+ /* A grant for every tx ring slot */
-+ if (gnttab_alloc_grant_references(TX_MAX_TARGET,
-+ &np->gref_tx_head) < 0) {
-+ printk(KERN_ALERT "#### netfront can't alloc tx grant refs\n");
-+ err = -ENOMEM;
-+ goto exit;
-+ }
-+ /* A grant for every rx ring slot */
-+ if (gnttab_alloc_grant_references(RX_MAX_TARGET,
-+ &np->gref_rx_head) < 0) {
-+ printk(KERN_ALERT "#### netfront can't alloc rx grant refs\n");
-+ err = -ENOMEM;
-+ goto exit_free_tx;
-+ }
-+
-+ netdev->open = network_open;
-+ netdev->hard_start_xmit = network_start_xmit;
-+ netdev->stop = network_close;
-+ netdev->get_stats = network_get_stats;
-+ netdev->poll = netif_poll;
-+ netdev->set_multicast_list = network_set_multicast_list;
-+ netdev->uninit = netif_uninit;
-+ netdev->change_mtu = xennet_change_mtu;
-+ netdev->weight = 64;
-+ netdev->features = NETIF_F_IP_CSUM;
-+
-+ SET_ETHTOOL_OPS(netdev, &network_ethtool_ops);
-+ SET_MODULE_OWNER(netdev);
-+ SET_NETDEV_DEV(netdev, &dev->dev);
-+
-+ np->netdev = netdev;
-+
-+ netfront_carrier_off(np);
-+
-+ return netdev;
-+
-+ exit_free_tx:
-+ gnttab_free_grant_references(np->gref_tx_head);
-+ exit:
-+ free_netdev(netdev);
-+ return ERR_PTR(err);
-+}
-+
-+/*
-+ * We use this notifier to send out a fake ARP reply to reset switches and
-+ * router ARP caches when an IP interface is brought up on a VIF.
-+ */
-+static int
-+inetdev_notify(struct notifier_block *this, unsigned long event, void *ptr)
-+{
-+ struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
-+ struct net_device *dev = ifa->ifa_dev->dev;
-+
-+ /* UP event and is it one of our devices? */
-+ if (event == NETDEV_UP && dev->open == network_open)
-+ (void)send_fake_arp(dev);
-+
-+ return NOTIFY_DONE;
-+}
-+
-+
-+static void netif_disconnect_backend(struct netfront_info *info)
-+{
-+ /* Stop old i/f to prevent errors whilst we rebuild the state. */
-+ spin_lock_bh(&info->rx_lock);
-+ spin_lock_irq(&info->tx_lock);
-+ netfront_carrier_off(info);
-+ spin_unlock_irq(&info->tx_lock);
-+ spin_unlock_bh(&info->rx_lock);
-+
-+ if (info->irq)
-+ unbind_from_irqhandler(info->irq, info->netdev);
-+ info->irq = 0;
-+
-+ end_access(info->tx_ring_ref, info->tx.sring);
-+ end_access(info->rx_ring_ref, info->rx.sring);
-+ info->tx_ring_ref = GRANT_INVALID_REF;
-+ info->rx_ring_ref = GRANT_INVALID_REF;
-+ info->tx.sring = NULL;
-+ info->rx.sring = NULL;
-+}
-+
-+
-+static void end_access(int ref, void *page)
-+{
-+ if (ref != GRANT_INVALID_REF)
-+ gnttab_end_foreign_access(ref, 0, (unsigned long)page);
-+}
-+
-+
-+/* ** Driver registration ** */
-+
-+
-+static struct xenbus_device_id netfront_ids[] = {
-+ { "vif" },
-+ { "" }
-+};
-+
-+
-+static struct xenbus_driver netfront = {
-+ .name = "vif",
-+ .owner = THIS_MODULE,
-+ .ids = netfront_ids,
-+ .probe = netfront_probe,
-+ .remove = __devexit_p(netfront_remove),
-+ .resume = netfront_resume,
-+ .otherend_changed = backend_changed,
-+};
-+
-+
-+static struct notifier_block notifier_inetdev = {
-+ .notifier_call = inetdev_notify,
-+ .next = NULL,
-+ .priority = 0
-+};
-+
-+static int __init netif_init(void)
-+{
-+ if (!is_running_on_xen())
-+ return -ENODEV;
-+
-+#ifdef CONFIG_XEN
-+ if (MODPARM_rx_flip && MODPARM_rx_copy) {
-+ WPRINTK("Cannot specify both rx_copy and rx_flip.\n");
-+ return -EINVAL;
-+ }
-+
-+ if (!MODPARM_rx_flip && !MODPARM_rx_copy)
-+ MODPARM_rx_flip = 1; /* Default is to flip. */
-+#endif
-+
-+ if (is_initial_xendomain())
-+ return 0;
-+
-+ IPRINTK("Initialising virtual ethernet driver.\n");
-+
-+ (void)register_inetaddr_notifier(&notifier_inetdev);
-+
-+ return xenbus_register_frontend(&netfront);
-+}
-+module_init(netif_init);
-+
-+
-+static void __exit netif_exit(void)
-+{
-+ if (is_initial_xendomain())
-+ return;
-+
-+ unregister_inetaddr_notifier(&notifier_inetdev);
-+
-+ return xenbus_unregister_driver(&netfront);
-+}
-+module_exit(netif_exit);
-+
-+MODULE_LICENSE("Dual BSD/GPL");
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/pciback/conf_space.c ubuntu-gutsy-xen/drivers/xen/pciback/conf_space.c
---- ubuntu-gutsy/drivers/xen/pciback/conf_space.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/pciback/conf_space.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,426 @@
-+/*
-+ * PCI Backend - Functions for creating a virtual configuration space for
-+ * exported PCI Devices.
-+ * It's dangerous to allow PCI Driver Domains to change their
-+ * device's resources (memory, i/o ports, interrupts). We need to
-+ * restrict changes to certain PCI Configuration registers:
-+ * BARs, INTERRUPT_PIN, most registers in the header...
-+ *
-+ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
-+ */
-+
-+#include <linux/kernel.h>
-+#include <linux/pci.h>
-+#include "pciback.h"
-+#include "conf_space.h"
-+#include "conf_space_quirks.h"
-+
-+#define DEFINE_PCI_CONFIG(op,size,type) \
-+int pciback_##op##_config_##size \
-+(struct pci_dev *dev, int offset, type value, void *data) \
-+{ \
-+ return pci_##op##_config_##size (dev, offset, value); \
-+}
-+
-+DEFINE_PCI_CONFIG(read, byte, u8 *)
-+DEFINE_PCI_CONFIG(read, word, u16 *)
-+DEFINE_PCI_CONFIG(read, dword, u32 *)
-+
-+DEFINE_PCI_CONFIG(write, byte, u8)
-+DEFINE_PCI_CONFIG(write, word, u16)
-+DEFINE_PCI_CONFIG(write, dword, u32)
-+
-+static int conf_space_read(struct pci_dev *dev,
-+ struct config_field_entry *entry, int offset,
-+ u32 * value)
-+{
-+ int ret = 0;
-+ struct config_field *field = entry->field;
-+
-+ *value = 0;
-+
-+ switch (field->size) {
-+ case 1:
-+ if (field->u.b.read)
-+ ret = field->u.b.read(dev, offset, (u8 *) value,
-+ entry->data);
-+ break;
-+ case 2:
-+ if (field->u.w.read)
-+ ret = field->u.w.read(dev, offset, (u16 *) value,
-+ entry->data);
-+ break;
-+ case 4:
-+ if (field->u.dw.read)
-+ ret = field->u.dw.read(dev, offset, value, entry->data);
-+ break;
-+ }
-+ return ret;
-+}
-+
-+static int conf_space_write(struct pci_dev *dev,
-+ struct config_field_entry *entry, int offset,
-+ u32 value)
-+{
-+ int ret = 0;
-+ struct config_field *field = entry->field;
-+
-+ switch (field->size) {
-+ case 1:
-+ if (field->u.b.write)
-+ ret = field->u.b.write(dev, offset, (u8) value,
-+ entry->data);
-+ break;
-+ case 2:
-+ if (field->u.w.write)
-+ ret = field->u.w.write(dev, offset, (u16) value,
-+ entry->data);
-+ break;
-+ case 4:
-+ if (field->u.dw.write)
-+ ret = field->u.dw.write(dev, offset, value,
-+ entry->data);
-+ break;
-+ }
-+ return ret;
-+}
-+
-+static inline u32 get_mask(int size)
-+{
-+ if (size == 1)
-+ return 0xff;
-+ else if (size == 2)
-+ return 0xffff;
-+ else
-+ return 0xffffffff;
-+}
-+
-+static inline int valid_request(int offset, int size)
-+{
-+ /* Validate request (no un-aligned requests) */
-+ if ((size == 1 || size == 2 || size == 4) && (offset % size) == 0)
-+ return 1;
-+ return 0;
-+}
-+
-+static inline u32 merge_value(u32 val, u32 new_val, u32 new_val_mask,
-+ int offset)
-+{
-+ if (offset >= 0) {
-+ new_val_mask <<= (offset * 8);
-+ new_val <<= (offset * 8);
-+ } else {
-+ new_val_mask >>= (offset * -8);
-+ new_val >>= (offset * -8);
-+ }
-+ val = (val & ~new_val_mask) | (new_val & new_val_mask);
-+
-+ return val;
-+}
-+
-+static int pcibios_err_to_errno(int err)
-+{
-+ switch (err) {
-+ case PCIBIOS_SUCCESSFUL:
-+ return XEN_PCI_ERR_success;
-+ case PCIBIOS_DEVICE_NOT_FOUND:
-+ return XEN_PCI_ERR_dev_not_found;
-+ case PCIBIOS_BAD_REGISTER_NUMBER:
-+ return XEN_PCI_ERR_invalid_offset;
-+ case PCIBIOS_FUNC_NOT_SUPPORTED:
-+ return XEN_PCI_ERR_not_implemented;
-+ case PCIBIOS_SET_FAILED:
-+ return XEN_PCI_ERR_access_denied;
-+ }
-+ return err;
-+}
-+
-+int pciback_config_read(struct pci_dev *dev, int offset, int size,
-+ u32 * ret_val)
-+{
-+ int err = 0;
-+ struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
-+ struct config_field_entry *cfg_entry;
-+ struct config_field *field;
-+ int req_start, req_end, field_start, field_end;
-+ /* if read fails for any reason, return 0 (as if device didn't respond) */
-+ u32 value = 0, tmp_val;
-+
-+ if (unlikely(verbose_request))
-+ printk(KERN_DEBUG "pciback: %s: read %d bytes at 0x%x\n",
-+ pci_name(dev), size, offset);
-+
-+ if (!valid_request(offset, size)) {
-+ err = XEN_PCI_ERR_invalid_offset;
-+ goto out;
-+ }
-+
-+ /* Get the real value first, then modify as appropriate */
-+ switch (size) {
-+ case 1:
-+ err = pci_read_config_byte(dev, offset, (u8 *) & value);
-+ break;
-+ case 2:
-+ err = pci_read_config_word(dev, offset, (u16 *) & value);
-+ break;
-+ case 4:
-+ err = pci_read_config_dword(dev, offset, &value);
-+ break;
-+ }
-+
-+ list_for_each_entry(cfg_entry, &dev_data->config_fields, list) {
-+ field = cfg_entry->field;
-+
-+ req_start = offset;
-+ req_end = offset + size;
-+ field_start = OFFSET(cfg_entry);
-+ field_end = OFFSET(cfg_entry) + field->size;
-+
-+ if ((req_start >= field_start && req_start < field_end)
-+ || (req_end > field_start && req_end <= field_end)) {
-+ err = conf_space_read(dev, cfg_entry, field_start,
-+ &tmp_val);
-+ if (err)
-+ goto out;
-+
-+ value = merge_value(value, tmp_val,
-+ get_mask(field->size),
-+ field_start - req_start);
-+ }
-+ }
-+
-+ out:
-+ if (unlikely(verbose_request))
-+ printk(KERN_DEBUG "pciback: %s: read %d bytes at 0x%x = %x\n",
-+ pci_name(dev), size, offset, value);
-+
-+ *ret_val = value;
-+ return pcibios_err_to_errno(err);
-+}
-+
-+int pciback_config_write(struct pci_dev *dev, int offset, int size, u32 value)
-+{
-+ int err = 0, handled = 0;
-+ struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
-+ struct config_field_entry *cfg_entry;
-+ struct config_field *field;
-+ u32 tmp_val;
-+ int req_start, req_end, field_start, field_end;
-+
-+ if (unlikely(verbose_request))
-+ printk(KERN_DEBUG
-+ "pciback: %s: write request %d bytes at 0x%x = %x\n",
-+ pci_name(dev), size, offset, value);
-+
-+ if (!valid_request(offset, size))
-+ return XEN_PCI_ERR_invalid_offset;
-+
-+ list_for_each_entry(cfg_entry, &dev_data->config_fields, list) {
-+ field = cfg_entry->field;
-+
-+ req_start = offset;
-+ req_end = offset + size;
-+ field_start = OFFSET(cfg_entry);
-+ field_end = OFFSET(cfg_entry) + field->size;
-+
-+ if ((req_start >= field_start && req_start < field_end)
-+ || (req_end > field_start && req_end <= field_end)) {
-+ tmp_val = 0;
-+
-+ err = pciback_config_read(dev, field_start,
-+ field->size, &tmp_val);
-+ if (err)
-+ break;
-+
-+ tmp_val = merge_value(tmp_val, value, get_mask(size),
-+ req_start - field_start);
-+
-+ err = conf_space_write(dev, cfg_entry, field_start,
-+ tmp_val);
-+
-+ /* handled is set true here, but not every byte
-+ * may have been written! Properly detecting if
-+ * every byte is handled is unnecessary as the
-+ * flag is used to detect devices that need
-+ * special helpers to work correctly.
-+ */
-+ handled = 1;
-+ }
-+ }
-+
-+ if (!handled && !err) {
-+ /* By default, anything not specificially handled above is
-+ * read-only. The permissive flag changes this behavior so
-+ * that anything not specifically handled above is writable.
-+ * This means that some fields may still be read-only because
-+ * they have entries in the config_field list that intercept
-+ * the write and do nothing. */
-+ if (dev_data->permissive) {
-+ switch (size) {
-+ case 1:
-+ err = pci_write_config_byte(dev, offset,
-+ (u8) value);
-+ break;
-+ case 2:
-+ err = pci_write_config_word(dev, offset,
-+ (u16) value);
-+ break;
-+ case 4:
-+ err = pci_write_config_dword(dev, offset,
-+ (u32) value);
-+ break;
-+ }
-+ } else if (!dev_data->warned_on_write) {
-+ dev_data->warned_on_write = 1;
-+ dev_warn(&dev->dev, "Driver tried to write to a "
-+ "read-only configuration space field at offset "
-+ "0x%x, size %d. This may be harmless, but if "
-+ "you have problems with your device:\n"
-+ "1) see permissive attribute in sysfs\n"
-+ "2) report problems to the xen-devel "
-+ "mailing list along with details of your "
-+ "device obtained from lspci.\n", offset, size);
-+ }
-+ }
-+
-+ return pcibios_err_to_errno(err);
-+}
-+
-+void pciback_config_free_dyn_fields(struct pci_dev *dev)
-+{
-+ struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
-+ struct config_field_entry *cfg_entry, *t;
-+ struct config_field *field;
-+
-+ dev_dbg(&dev->dev,
-+ "free-ing dynamically allocated virtual configuration space fields\n");
-+
-+ list_for_each_entry_safe(cfg_entry, t, &dev_data->config_fields, list) {
-+ field = cfg_entry->field;
-+
-+ if (field->clean) {
-+ field->clean(field);
-+
-+ if (cfg_entry->data)
-+ kfree(cfg_entry->data);
-+
-+ list_del(&cfg_entry->list);
-+ kfree(cfg_entry);
-+ }
-+
-+ }
-+}
-+
-+void pciback_config_reset_dev(struct pci_dev *dev)
-+{
-+ struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
-+ struct config_field_entry *cfg_entry;
-+ struct config_field *field;
-+
-+ dev_dbg(&dev->dev, "resetting virtual configuration space\n");
-+
-+ list_for_each_entry(cfg_entry, &dev_data->config_fields, list) {
-+ field = cfg_entry->field;
-+
-+ if (field->reset)
-+ field->reset(dev, OFFSET(cfg_entry), cfg_entry->data);
-+ }
-+}
-+
-+void pciback_config_free_dev(struct pci_dev *dev)
-+{
-+ struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
-+ struct config_field_entry *cfg_entry, *t;
-+ struct config_field *field;
-+
-+ dev_dbg(&dev->dev, "free-ing virtual configuration space fields\n");
-+
-+ list_for_each_entry_safe(cfg_entry, t, &dev_data->config_fields, list) {
-+ list_del(&cfg_entry->list);
-+
-+ field = cfg_entry->field;
-+
-+ if (field->release)
-+ field->release(dev, OFFSET(cfg_entry), cfg_entry->data);
-+
-+ kfree(cfg_entry);
-+ }
-+}
-+
-+int pciback_config_add_field_offset(struct pci_dev *dev,
-+ struct config_field *field,
-+ unsigned int base_offset)
-+{
-+ int err = 0;
-+ struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
-+ struct config_field_entry *cfg_entry;
-+ void *tmp;
-+
-+ cfg_entry = kmalloc(sizeof(*cfg_entry), GFP_KERNEL);
-+ if (!cfg_entry) {
-+ err = -ENOMEM;
-+ goto out;
-+ }
-+
-+ cfg_entry->data = NULL;
-+ cfg_entry->field = field;
-+ cfg_entry->base_offset = base_offset;
-+
-+ /* silently ignore duplicate fields */
-+ err = pciback_field_is_dup(dev,OFFSET(cfg_entry));
-+ if (err)
-+ goto out;
-+
-+ if (field->init) {
-+ tmp = field->init(dev, OFFSET(cfg_entry));
-+
-+ if (IS_ERR(tmp)) {
-+ err = PTR_ERR(tmp);
-+ goto out;
-+ }
-+
-+ cfg_entry->data = tmp;
-+ }
-+
-+ dev_dbg(&dev->dev, "added config field at offset 0x%02x\n",
-+ OFFSET(cfg_entry));
-+ list_add_tail(&cfg_entry->list, &dev_data->config_fields);
-+
-+ out:
-+ if (err)
-+ kfree(cfg_entry);
-+
-+ return err;
-+}
-+
-+/* This sets up the device's virtual configuration space to keep track of
-+ * certain registers (like the base address registers (BARs) so that we can
-+ * keep the client from manipulating them directly.
-+ */
-+int pciback_config_init_dev(struct pci_dev *dev)
-+{
-+ int err = 0;
-+ struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
-+
-+ dev_dbg(&dev->dev, "initializing virtual configuration space\n");
-+
-+ INIT_LIST_HEAD(&dev_data->config_fields);
-+
-+ err = pciback_config_header_add_fields(dev);
-+ if (err)
-+ goto out;
-+
-+ err = pciback_config_capability_add_fields(dev);
-+ if (err)
-+ goto out;
-+
-+ err = pciback_config_quirks_init(dev);
-+
-+ out:
-+ return err;
-+}
-+
-+int pciback_config_init(void)
-+{
-+ return pciback_config_capability_init();
-+}
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/pciback/conf_space_capability.c ubuntu-gutsy-xen/drivers/xen/pciback/conf_space_capability.c
---- ubuntu-gutsy/drivers/xen/pciback/conf_space_capability.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/pciback/conf_space_capability.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,71 @@
-+/*
-+ * PCI Backend - Handles the virtual fields found on the capability lists
-+ * in the configuration space.
-+ *
-+ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
-+ */
-+
-+#include <linux/kernel.h>
-+#include <linux/pci.h>
-+#include "pciback.h"
-+#include "conf_space.h"
-+#include "conf_space_capability.h"
-+
-+static LIST_HEAD(capabilities);
-+
-+static struct config_field caplist_header[] = {
-+ {
-+ .offset = PCI_CAP_LIST_ID,
-+ .size = 2, /* encompass PCI_CAP_LIST_ID & PCI_CAP_LIST_NEXT */
-+ .u.w.read = pciback_read_config_word,
-+ .u.w.write = NULL,
-+ },
-+ {
-+ .size = 0,
-+ },
-+};
-+
-+static inline void register_capability(struct pciback_config_capability *cap)
-+{
-+ list_add_tail(&cap->cap_list, &capabilities);
-+}
-+
-+int pciback_config_capability_add_fields(struct pci_dev *dev)
-+{
-+ int err = 0;
-+ struct pciback_config_capability *cap;
-+ int cap_offset;
-+
-+ list_for_each_entry(cap, &capabilities, cap_list) {
-+ cap_offset = pci_find_capability(dev, cap->capability);
-+ if (cap_offset) {
-+ dev_dbg(&dev->dev, "Found capability 0x%x at 0x%x\n",
-+ cap->capability, cap_offset);
-+
-+ err = pciback_config_add_fields_offset(dev,
-+ caplist_header,
-+ cap_offset);
-+ if (err)
-+ goto out;
-+ err = pciback_config_add_fields_offset(dev,
-+ cap->fields,
-+ cap_offset);
-+ if (err)
-+ goto out;
-+ }
-+ }
-+
-+ out:
-+ return err;
-+}
-+
-+extern struct pciback_config_capability pciback_config_capability_vpd;
-+extern struct pciback_config_capability pciback_config_capability_pm;
-+
-+int pciback_config_capability_init(void)
-+{
-+ register_capability(&pciback_config_capability_vpd);
-+ register_capability(&pciback_config_capability_pm);
-+
-+ return 0;
-+}
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/pciback/conf_space_capability.h ubuntu-gutsy-xen/drivers/xen/pciback/conf_space_capability.h
---- ubuntu-gutsy/drivers/xen/pciback/conf_space_capability.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/pciback/conf_space_capability.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,23 @@
-+/*
-+ * PCI Backend - Data structures for special overlays for structures on
-+ * the capability list.
-+ *
-+ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
-+ */
-+
-+#ifndef __PCIBACK_CONFIG_CAPABILITY_H__
-+#define __PCIBACK_CONFIG_CAPABILITY_H__
-+
-+#include <linux/pci.h>
-+#include <linux/list.h>
-+
-+struct pciback_config_capability {
-+ struct list_head cap_list;
-+
-+ int capability;
-+
-+ /* If the device has the capability found above, add these fields */
-+ struct config_field *fields;
-+};
-+
-+#endif
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/pciback/conf_space_capability_pm.c ubuntu-gutsy-xen/drivers/xen/pciback/conf_space_capability_pm.c
---- ubuntu-gutsy/drivers/xen/pciback/conf_space_capability_pm.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/pciback/conf_space_capability_pm.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,128 @@
-+/*
-+ * PCI Backend - Configuration space overlay for power management
-+ *
-+ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
-+ */
-+
-+#include <linux/pci.h>
-+#include "conf_space.h"
-+#include "conf_space_capability.h"
-+
-+static int pm_caps_read(struct pci_dev *dev, int offset, u16 *value,
-+ void *data)
-+{
-+ int err;
-+ u16 real_value;
-+
-+ err = pci_read_config_word(dev, offset, &real_value);
-+ if (err)
-+ goto out;
-+
-+ *value = real_value & ~PCI_PM_CAP_PME_MASK;
-+
-+ out:
-+ return err;
-+}
-+
-+/* PM_OK_BITS specifies the bits that the driver domain is allowed to change.
-+ * Can't allow driver domain to enable PMEs - they're shared */
-+#define PM_OK_BITS (PCI_PM_CTRL_PME_STATUS|PCI_PM_CTRL_DATA_SEL_MASK)
-+
-+static int pm_ctrl_write(struct pci_dev *dev, int offset, u16 new_value,
-+ void *data)
-+{
-+ int err;
-+ u16 old_value;
-+ pci_power_t new_state, old_state;
-+
-+ err = pci_read_config_word(dev, offset, &old_value);
-+ if (err)
-+ goto out;
-+
-+ old_state = (pci_power_t)(old_value & PCI_PM_CTRL_STATE_MASK);
-+ new_state = (pci_power_t)(new_value & PCI_PM_CTRL_STATE_MASK);
-+
-+ new_value &= PM_OK_BITS;
-+ if ((old_value & PM_OK_BITS) != new_value) {
-+ new_value = (old_value & ~PM_OK_BITS) | new_value;
-+ err = pci_write_config_word(dev, offset, new_value);
-+ if (err)
-+ goto out;
-+ }
-+
-+ /* Let pci core handle the power management change */
-+ dev_dbg(&dev->dev, "set power state to %x\n", new_state);
-+ err = pci_set_power_state(dev, new_state);
-+ if (err) {
-+ err = PCIBIOS_SET_FAILED;
-+ goto out;
-+ }
-+
-+ /*
-+ * Device may lose PCI config info on D3->D0 transition. This
-+ * is a problem for some guests which will not reset BARs. Even
-+ * those that have a go will be foiled by our BAR-write handler
-+ * which will discard the write! Since Linux won't re-init
-+ * the config space automatically in all cases, we do it here.
-+ * Future: Should we re-initialise all first 64 bytes of config space?
-+ */
-+ if (new_state == PCI_D0 &&
-+ (old_state == PCI_D3hot || old_state == PCI_D3cold) &&
-+ !(old_value & PCI_PM_CTRL_NO_SOFT_RESET))
-+ pci_restore_bars(dev);
-+
-+ out:
-+ return err;
-+}
-+
-+/* Ensure PMEs are disabled */
-+static void *pm_ctrl_init(struct pci_dev *dev, int offset)
-+{
-+ int err;
-+ u16 value;
-+
-+ err = pci_read_config_word(dev, offset, &value);
-+ if (err)
-+ goto out;
-+
-+ if (value & PCI_PM_CTRL_PME_ENABLE) {
-+ value &= ~PCI_PM_CTRL_PME_ENABLE;
-+ err = pci_write_config_word(dev, offset, value);
-+ }
-+
-+ out:
-+ return ERR_PTR(err);
-+}
-+
-+static struct config_field caplist_pm[] = {
-+ {
-+ .offset = PCI_PM_PMC,
-+ .size = 2,
-+ .u.w.read = pm_caps_read,
-+ },
-+ {
-+ .offset = PCI_PM_CTRL,
-+ .size = 2,
-+ .init = pm_ctrl_init,
-+ .u.w.read = pciback_read_config_word,
-+ .u.w.write = pm_ctrl_write,
-+ },
-+ {
-+ .offset = PCI_PM_PPB_EXTENSIONS,
-+ .size = 1,
-+ .u.b.read = pciback_read_config_byte,
-+ },
-+ {
-+ .offset = PCI_PM_DATA_REGISTER,
-+ .size = 1,
-+ .u.b.read = pciback_read_config_byte,
-+ },
-+ {
-+ .size = 0,
-+ },
-+};
-+
-+struct pciback_config_capability pciback_config_capability_pm = {
-+ .capability = PCI_CAP_ID_PM,
-+ .fields = caplist_pm,
-+};
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/pciback/conf_space_capability_vpd.c ubuntu-gutsy-xen/drivers/xen/pciback/conf_space_capability_vpd.c
---- ubuntu-gutsy/drivers/xen/pciback/conf_space_capability_vpd.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/pciback/conf_space_capability_vpd.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,42 @@
-+/*
-+ * PCI Backend - Configuration space overlay for Vital Product Data
-+ *
-+ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
-+ */
-+
-+#include <linux/pci.h>
-+#include "conf_space.h"
-+#include "conf_space_capability.h"
-+
-+static int vpd_address_write(struct pci_dev *dev, int offset, u16 value,
-+ void *data)
-+{
-+ /* Disallow writes to the vital product data */
-+ if (value & PCI_VPD_ADDR_F)
-+ return PCIBIOS_SET_FAILED;
-+ else
-+ return pci_write_config_word(dev, offset, value);
-+}
-+
-+static struct config_field caplist_vpd[] = {
-+ {
-+ .offset = PCI_VPD_ADDR,
-+ .size = 2,
-+ .u.w.read = pciback_read_config_word,
-+ .u.w.write = vpd_address_write,
-+ },
-+ {
-+ .offset = PCI_VPD_DATA,
-+ .size = 4,
-+ .u.dw.read = pciback_read_config_dword,
-+ .u.dw.write = NULL,
-+ },
-+ {
-+ .size = 0,
-+ },
-+};
-+
-+struct pciback_config_capability pciback_config_capability_vpd = {
-+ .capability = PCI_CAP_ID_VPD,
-+ .fields = caplist_vpd,
-+};
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/pciback/conf_space.h ubuntu-gutsy-xen/drivers/xen/pciback/conf_space.h
---- ubuntu-gutsy/drivers/xen/pciback/conf_space.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/pciback/conf_space.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,126 @@
-+/*
-+ * PCI Backend - Common data structures for overriding the configuration space
-+ *
-+ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
-+ */
-+
-+#ifndef __XEN_PCIBACK_CONF_SPACE_H__
-+#define __XEN_PCIBACK_CONF_SPACE_H__
-+
-+#include <linux/list.h>
-+#include <linux/err.h>
-+
-+/* conf_field_init can return an errno in a ptr with ERR_PTR() */
-+typedef void *(*conf_field_init) (struct pci_dev * dev, int offset);
-+typedef void (*conf_field_reset) (struct pci_dev * dev, int offset, void *data);
-+typedef void (*conf_field_free) (struct pci_dev * dev, int offset, void *data);
-+
-+typedef int (*conf_dword_write) (struct pci_dev * dev, int offset, u32 value,
-+ void *data);
-+typedef int (*conf_word_write) (struct pci_dev * dev, int offset, u16 value,
-+ void *data);
-+typedef int (*conf_byte_write) (struct pci_dev * dev, int offset, u8 value,
-+ void *data);
-+typedef int (*conf_dword_read) (struct pci_dev * dev, int offset, u32 * value,
-+ void *data);
-+typedef int (*conf_word_read) (struct pci_dev * dev, int offset, u16 * value,
-+ void *data);
-+typedef int (*conf_byte_read) (struct pci_dev * dev, int offset, u8 * value,
-+ void *data);
-+
-+/* These are the fields within the configuration space which we
-+ * are interested in intercepting reads/writes to and changing their
-+ * values.
-+ */
-+struct config_field {
-+ unsigned int offset;
-+ unsigned int size;
-+ unsigned int mask;
-+ conf_field_init init;
-+ conf_field_reset reset;
-+ conf_field_free release;
-+ void (*clean) (struct config_field * field);
-+ union {
-+ struct {
-+ conf_dword_write write;
-+ conf_dword_read read;
-+ } dw;
-+ struct {
-+ conf_word_write write;
-+ conf_word_read read;
-+ } w;
-+ struct {
-+ conf_byte_write write;
-+ conf_byte_read read;
-+ } b;
-+ } u;
-+ struct list_head list;
-+};
-+
-+struct config_field_entry {
-+ struct list_head list;
-+ struct config_field *field;
-+ unsigned int base_offset;
-+ void *data;
-+};
-+
-+#define OFFSET(cfg_entry) ((cfg_entry)->base_offset+(cfg_entry)->field->offset)
-+
-+/* Add fields to a device - the add_fields macro expects to get a pointer to
-+ * the first entry in an array (of which the ending is marked by size==0)
-+ */
-+int pciback_config_add_field_offset(struct pci_dev *dev,
-+ struct config_field *field,
-+ unsigned int offset);
-+
-+static inline int pciback_config_add_field(struct pci_dev *dev,
-+ struct config_field *field)
-+{
-+ return pciback_config_add_field_offset(dev, field, 0);
-+}
-+
-+static inline int pciback_config_add_fields(struct pci_dev *dev,
-+ struct config_field *field)
-+{
-+ int i, err = 0;
-+ for (i = 0; field[i].size != 0; i++) {
-+ err = pciback_config_add_field(dev, &field[i]);
-+ if (err)
-+ break;
-+ }
-+ return err;
-+}
-+
-+static inline int pciback_config_add_fields_offset(struct pci_dev *dev,
-+ struct config_field *field,
-+ unsigned int offset)
-+{
-+ int i, err = 0;
-+ for (i = 0; field[i].size != 0; i++) {
-+ err = pciback_config_add_field_offset(dev, &field[i], offset);
-+ if (err)
-+ break;
-+ }
-+ return err;
-+}
-+
-+/* Read/Write the real configuration space */
-+int pciback_read_config_byte(struct pci_dev *dev, int offset, u8 * value,
-+ void *data);
-+int pciback_read_config_word(struct pci_dev *dev, int offset, u16 * value,
-+ void *data);
-+int pciback_read_config_dword(struct pci_dev *dev, int offset, u32 * value,
-+ void *data);
-+int pciback_write_config_byte(struct pci_dev *dev, int offset, u8 value,
-+ void *data);
-+int pciback_write_config_word(struct pci_dev *dev, int offset, u16 value,
-+ void *data);
-+int pciback_write_config_dword(struct pci_dev *dev, int offset, u32 value,
-+ void *data);
-+
-+int pciback_config_capability_init(void);
-+
-+int pciback_config_header_add_fields(struct pci_dev *dev);
-+int pciback_config_capability_add_fields(struct pci_dev *dev);
-+
-+#endif /* __XEN_PCIBACK_CONF_SPACE_H__ */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/pciback/conf_space_header.c ubuntu-gutsy-xen/drivers/xen/pciback/conf_space_header.c
---- ubuntu-gutsy/drivers/xen/pciback/conf_space_header.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/pciback/conf_space_header.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,309 @@
-+/*
-+ * PCI Backend - Handles the virtual fields in the configuration space headers.
-+ *
-+ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
-+ */
-+
-+#include <linux/kernel.h>
-+#include <linux/pci.h>
-+#include "pciback.h"
-+#include "conf_space.h"
-+
-+struct pci_bar_info {
-+ u32 val;
-+ u32 len_val;
-+ int which;
-+};
-+
-+#define is_enable_cmd(value) ((value)&(PCI_COMMAND_MEMORY|PCI_COMMAND_IO))
-+#define is_master_cmd(value) ((value)&PCI_COMMAND_MASTER)
-+
-+static int command_write(struct pci_dev *dev, int offset, u16 value, void *data)
-+{
-+ int err;
-+
-+ if (!atomic_read(&dev->enable_cnt) && is_enable_cmd(value)) {
-+ if (unlikely(verbose_request))
-+ printk(KERN_DEBUG "pciback: %s: enable\n",
-+ pci_name(dev));
-+ err = pci_enable_device(dev);
-+ if (err)
-+ return err;
-+ } else if (atomic_read(&dev->enable_cnt) && !is_enable_cmd(value)) {
-+ if (unlikely(verbose_request))
-+ printk(KERN_DEBUG "pciback: %s: disable\n",
-+ pci_name(dev));
-+ pci_disable_device(dev);
-+ }
-+
-+ if (!dev->is_busmaster && is_master_cmd(value)) {
-+ if (unlikely(verbose_request))
-+ printk(KERN_DEBUG "pciback: %s: set bus master\n",
-+ pci_name(dev));
-+ pci_set_master(dev);
-+ }
-+
-+ if (value & PCI_COMMAND_INVALIDATE) {
-+ if (unlikely(verbose_request))
-+ printk(KERN_DEBUG
-+ "pciback: %s: enable memory-write-invalidate\n",
-+ pci_name(dev));
-+ err = pci_set_mwi(dev);
-+ if (err) {
-+ printk(KERN_WARNING
-+ "pciback: %s: cannot enable memory-write-invalidate (%d)\n",
-+ pci_name(dev), err);
-+ value &= ~PCI_COMMAND_INVALIDATE;
-+ }
-+ }
-+
-+ return pci_write_config_word(dev, offset, value);
-+}
-+
-+static int rom_write(struct pci_dev *dev, int offset, u32 value, void *data)
-+{
-+ struct pci_bar_info *bar = data;
-+
-+ if (unlikely(!bar)) {
-+ printk(KERN_WARNING "pciback: driver data not found for %s\n",
-+ pci_name(dev));
-+ return XEN_PCI_ERR_op_failed;
-+ }
-+
-+ /* A write to obtain the length must happen as a 32-bit write.
-+ * This does not (yet) support writing individual bytes
-+ */
-+ if (value == ~PCI_ROM_ADDRESS_ENABLE)
-+ bar->which = 1;
-+ else
-+ bar->which = 0;
-+
-+ /* Do we need to support enabling/disabling the rom address here? */
-+
-+ return 0;
-+}
-+
-+/* For the BARs, only allow writes which write ~0 or
-+ * the correct resource information
-+ * (Needed for when the driver probes the resource usage)
-+ */
-+static int bar_write(struct pci_dev *dev, int offset, u32 value, void *data)
-+{
-+ struct pci_bar_info *bar = data;
-+
-+ if (unlikely(!bar)) {
-+ printk(KERN_WARNING "pciback: driver data not found for %s\n",
-+ pci_name(dev));
-+ return XEN_PCI_ERR_op_failed;
-+ }
-+
-+ /* A write to obtain the length must happen as a 32-bit write.
-+ * This does not (yet) support writing individual bytes
-+ */
-+ if (value == ~0)
-+ bar->which = 1;
-+ else
-+ bar->which = 0;
-+
-+ return 0;
-+}
-+
-+static int bar_read(struct pci_dev *dev, int offset, u32 * value, void *data)
-+{
-+ struct pci_bar_info *bar = data;
-+
-+ if (unlikely(!bar)) {
-+ printk(KERN_WARNING "pciback: driver data not found for %s\n",
-+ pci_name(dev));
-+ return XEN_PCI_ERR_op_failed;
-+ }
-+
-+ *value = bar->which ? bar->len_val : bar->val;
-+
-+ return 0;
-+}
-+
-+static inline void read_dev_bar(struct pci_dev *dev,
-+ struct pci_bar_info *bar_info, int offset,
-+ u32 len_mask)
-+{
-+ pci_read_config_dword(dev, offset, &bar_info->val);
-+ pci_write_config_dword(dev, offset, len_mask);
-+ pci_read_config_dword(dev, offset, &bar_info->len_val);
-+ pci_write_config_dword(dev, offset, bar_info->val);
-+}
-+
-+static void *bar_init(struct pci_dev *dev, int offset)
-+{
-+ struct pci_bar_info *bar = kmalloc(sizeof(*bar), GFP_KERNEL);
-+
-+ if (!bar)
-+ return ERR_PTR(-ENOMEM);
-+
-+ read_dev_bar(dev, bar, offset, ~0);
-+ bar->which = 0;
-+
-+ return bar;
-+}
-+
-+static void *rom_init(struct pci_dev *dev, int offset)
-+{
-+ struct pci_bar_info *bar = kmalloc(sizeof(*bar), GFP_KERNEL);
-+
-+ if (!bar)
-+ return ERR_PTR(-ENOMEM);
-+
-+ read_dev_bar(dev, bar, offset, ~PCI_ROM_ADDRESS_ENABLE);
-+ bar->which = 0;
-+
-+ return bar;
-+}
-+
-+static void bar_reset(struct pci_dev *dev, int offset, void *data)
-+{
-+ struct pci_bar_info *bar = data;
-+
-+ bar->which = 0;
-+}
-+
-+static void bar_release(struct pci_dev *dev, int offset, void *data)
-+{
-+ kfree(data);
-+}
-+
-+static int interrupt_read(struct pci_dev *dev, int offset, u8 * value,
-+ void *data)
-+{
-+ *value = (u8) dev->irq;
-+
-+ return 0;
-+}
-+
-+static int bist_write(struct pci_dev *dev, int offset, u8 value, void *data)
-+{
-+ u8 cur_value;
-+ int err;
-+
-+ err = pci_read_config_byte(dev, offset, &cur_value);
-+ if (err)
-+ goto out;
-+
-+ if ((cur_value & ~PCI_BIST_START) == (value & ~PCI_BIST_START)
-+ || value == PCI_BIST_START)
-+ err = pci_write_config_byte(dev, offset, value);
-+
-+ out:
-+ return err;
-+}
-+
-+static struct config_field header_common[] = {
-+ {
-+ .offset = PCI_COMMAND,
-+ .size = 2,
-+ .u.w.read = pciback_read_config_word,
-+ .u.w.write = command_write,
-+ },
-+ {
-+ .offset = PCI_INTERRUPT_LINE,
-+ .size = 1,
-+ .u.b.read = interrupt_read,
-+ },
-+ {
-+ .offset = PCI_INTERRUPT_PIN,
-+ .size = 1,
-+ .u.b.read = pciback_read_config_byte,
-+ },
-+ {
-+ /* Any side effects of letting driver domain control cache line? */
-+ .offset = PCI_CACHE_LINE_SIZE,
-+ .size = 1,
-+ .u.b.read = pciback_read_config_byte,
-+ .u.b.write = pciback_write_config_byte,
-+ },
-+ {
-+ .offset = PCI_LATENCY_TIMER,
-+ .size = 1,
-+ .u.b.read = pciback_read_config_byte,
-+ },
-+ {
-+ .offset = PCI_BIST,
-+ .size = 1,
-+ .u.b.read = pciback_read_config_byte,
-+ .u.b.write = bist_write,
-+ },
-+ {
-+ .size = 0,
-+ },
-+};
-+
-+#define CFG_FIELD_BAR(reg_offset) \
-+ { \
-+ .offset = reg_offset, \
-+ .size = 4, \
-+ .init = bar_init, \
-+ .reset = bar_reset, \
-+ .release = bar_release, \
-+ .u.dw.read = bar_read, \
-+ .u.dw.write = bar_write, \
-+ }
-+
-+#define CFG_FIELD_ROM(reg_offset) \
-+ { \
-+ .offset = reg_offset, \
-+ .size = 4, \
-+ .init = rom_init, \
-+ .reset = bar_reset, \
-+ .release = bar_release, \
-+ .u.dw.read = bar_read, \
-+ .u.dw.write = rom_write, \
-+ }
-+
-+static struct config_field header_0[] = {
-+ CFG_FIELD_BAR(PCI_BASE_ADDRESS_0),
-+ CFG_FIELD_BAR(PCI_BASE_ADDRESS_1),
-+ CFG_FIELD_BAR(PCI_BASE_ADDRESS_2),
-+ CFG_FIELD_BAR(PCI_BASE_ADDRESS_3),
-+ CFG_FIELD_BAR(PCI_BASE_ADDRESS_4),
-+ CFG_FIELD_BAR(PCI_BASE_ADDRESS_5),
-+ CFG_FIELD_ROM(PCI_ROM_ADDRESS),
-+ {
-+ .size = 0,
-+ },
-+};
-+
-+static struct config_field header_1[] = {
-+ CFG_FIELD_BAR(PCI_BASE_ADDRESS_0),
-+ CFG_FIELD_BAR(PCI_BASE_ADDRESS_1),
-+ CFG_FIELD_ROM(PCI_ROM_ADDRESS1),
-+ {
-+ .size = 0,
-+ },
-+};
-+
-+int pciback_config_header_add_fields(struct pci_dev *dev)
-+{
-+ int err;
-+
-+ err = pciback_config_add_fields(dev, header_common);
-+ if (err)
-+ goto out;
-+
-+ switch (dev->hdr_type) {
-+ case PCI_HEADER_TYPE_NORMAL:
-+ err = pciback_config_add_fields(dev, header_0);
-+ break;
-+
-+ case PCI_HEADER_TYPE_BRIDGE:
-+ err = pciback_config_add_fields(dev, header_1);
-+ break;
-+
-+ default:
-+ err = -EINVAL;
-+ printk(KERN_ERR "pciback: %s: Unsupported header type %d!\n",
-+ pci_name(dev), dev->hdr_type);
-+ break;
-+ }
-+
-+ out:
-+ return err;
-+}
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/pciback/conf_space_quirks.c ubuntu-gutsy-xen/drivers/xen/pciback/conf_space_quirks.c
---- ubuntu-gutsy/drivers/xen/pciback/conf_space_quirks.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/pciback/conf_space_quirks.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,126 @@
-+/*
-+ * PCI Backend - Handle special overlays for broken devices.
-+ *
-+ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
-+ * Author: Chris Bookholt <hap10@epoch.ncsc.mil>
-+ */
-+
-+#include <linux/kernel.h>
-+#include <linux/pci.h>
-+#include "pciback.h"
-+#include "conf_space.h"
-+#include "conf_space_quirks.h"
-+
-+LIST_HEAD(pciback_quirks);
-+
-+struct pciback_config_quirk *pciback_find_quirk(struct pci_dev *dev)
-+{
-+ struct pciback_config_quirk *tmp_quirk;
-+
-+ list_for_each_entry(tmp_quirk, &pciback_quirks, quirks_list)
-+ if (pci_match_id(&tmp_quirk->devid, dev))
-+ goto out;
-+ tmp_quirk = NULL;
-+ printk(KERN_DEBUG
-+ "quirk didn't match any device pciback knows about\n");
-+ out:
-+ return tmp_quirk;
-+}
-+
-+static inline void register_quirk(struct pciback_config_quirk *quirk)
-+{
-+ list_add_tail(&quirk->quirks_list, &pciback_quirks);
-+}
-+
-+int pciback_field_is_dup(struct pci_dev *dev, unsigned int reg)
-+{
-+ int ret = 0;
-+ struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
-+ struct config_field_entry *cfg_entry;
-+
-+ list_for_each_entry(cfg_entry, &dev_data->config_fields, list) {
-+ if ( OFFSET(cfg_entry) == reg) {
-+ ret = 1;
-+ break;
-+ }
-+ }
-+ return ret;
-+}
-+
-+int pciback_config_quirks_add_field(struct pci_dev *dev, struct config_field
-+ *field)
-+{
-+ int err = 0;
-+
-+ switch (field->size) {
-+ case 1:
-+ field->u.b.read = pciback_read_config_byte;
-+ field->u.b.write = pciback_write_config_byte;
-+ break;
-+ case 2:
-+ field->u.w.read = pciback_read_config_word;
-+ field->u.w.write = pciback_write_config_word;
-+ break;
-+ case 4:
-+ field->u.dw.read = pciback_read_config_dword;
-+ field->u.dw.write = pciback_write_config_dword;
-+ break;
-+ default:
-+ err = -EINVAL;
-+ goto out;
-+ }
-+
-+ pciback_config_add_field(dev, field);
-+
-+ out:
-+ return err;
-+}
-+
-+int pciback_config_quirks_init(struct pci_dev *dev)
-+{
-+ struct pciback_config_quirk *quirk;
-+ int ret = 0;
-+
-+ quirk = kzalloc(sizeof(*quirk), GFP_ATOMIC);
-+ if (!quirk) {
-+ ret = -ENOMEM;
-+ goto out;
-+ }
-+
-+ quirk->devid.vendor = dev->vendor;
-+ quirk->devid.device = dev->device;
-+ quirk->devid.subvendor = dev->subsystem_vendor;
-+ quirk->devid.subdevice = dev->subsystem_device;
-+ quirk->devid.class = 0;
-+ quirk->devid.class_mask = 0;
-+ quirk->devid.driver_data = 0UL;
-+
-+ quirk->pdev = dev;
-+
-+ register_quirk(quirk);
-+ out:
-+ return ret;
-+}
-+
-+void pciback_config_field_free(struct config_field *field)
-+{
-+ kfree(field);
-+}
-+
-+int pciback_config_quirk_release(struct pci_dev *dev)
-+{
-+ struct pciback_config_quirk *quirk;
-+ int ret = 0;
-+
-+ quirk = pciback_find_quirk(dev);
-+ if (!quirk) {
-+ ret = -ENXIO;
-+ goto out;
-+ }
-+
-+ list_del(&quirk->quirks_list);
-+ kfree(quirk);
-+
-+ out:
-+ return ret;
-+}
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/pciback/conf_space_quirks.h ubuntu-gutsy-xen/drivers/xen/pciback/conf_space_quirks.h
---- ubuntu-gutsy/drivers/xen/pciback/conf_space_quirks.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/pciback/conf_space_quirks.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,35 @@
-+/*
-+ * PCI Backend - Data structures for special overlays for broken devices.
-+ *
-+ * Ryan Wilson <hap9@epoch.ncsc.mil>
-+ * Chris Bookholt <hap10@epoch.ncsc.mil>
-+ */
-+
-+#ifndef __XEN_PCIBACK_CONF_SPACE_QUIRKS_H__
-+#define __XEN_PCIBACK_CONF_SPACE_QUIRKS_H__
-+
-+#include <linux/pci.h>
-+#include <linux/list.h>
-+
-+struct pciback_config_quirk {
-+ struct list_head quirks_list;
-+ struct pci_device_id devid;
-+ struct pci_dev *pdev;
-+};
-+
-+struct pciback_config_quirk *pciback_find_quirk(struct pci_dev *dev);
-+
-+int pciback_config_quirks_add_field(struct pci_dev *dev, struct config_field
-+ *field);
-+
-+int pciback_config_quirks_remove_field(struct pci_dev *dev, int reg);
-+
-+int pciback_config_quirks_init(struct pci_dev *dev);
-+
-+void pciback_config_field_free(struct config_field *field);
-+
-+int pciback_config_quirk_release(struct pci_dev *dev);
-+
-+int pciback_field_is_dup(struct pci_dev *dev, unsigned int reg);
-+
-+#endif
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/pciback/Makefile ubuntu-gutsy-xen/drivers/xen/pciback/Makefile
---- ubuntu-gutsy/drivers/xen/pciback/Makefile 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/pciback/Makefile 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,15 @@
-+obj-$(CONFIG_XEN_PCIDEV_BACKEND) += pciback.o
-+
-+pciback-y := pci_stub.o pciback_ops.o xenbus.o
-+pciback-y += conf_space.o conf_space_header.o \
-+ conf_space_capability.o \
-+ conf_space_capability_vpd.o \
-+ conf_space_capability_pm.o \
-+ conf_space_quirks.o
-+pciback-$(CONFIG_XEN_PCIDEV_BACKEND_VPCI) += vpci.o
-+pciback-$(CONFIG_XEN_PCIDEV_BACKEND_SLOT) += slot.o
-+pciback-$(CONFIG_XEN_PCIDEV_BACKEND_PASS) += passthrough.o
-+
-+ifeq ($(CONFIG_XEN_PCIDEV_BE_DEBUG),y)
-+EXTRA_CFLAGS += -DDEBUG
-+endif
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/pciback/passthrough.c ubuntu-gutsy-xen/drivers/xen/pciback/passthrough.c
---- ubuntu-gutsy/drivers/xen/pciback/passthrough.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/pciback/passthrough.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,157 @@
-+/*
-+ * PCI Backend - Provides restricted access to the real PCI bus topology
-+ * to the frontend
-+ *
-+ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
-+ */
-+
-+#include <linux/list.h>
-+#include <linux/pci.h>
-+#include <linux/spinlock.h>
-+#include "pciback.h"
-+
-+struct passthrough_dev_data {
-+ /* Access to dev_list must be protected by lock */
-+ struct list_head dev_list;
-+ spinlock_t lock;
-+};
-+
-+struct pci_dev *pciback_get_pci_dev(struct pciback_device *pdev,
-+ unsigned int domain, unsigned int bus,
-+ unsigned int devfn)
-+{
-+ struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
-+ struct pci_dev_entry *dev_entry;
-+ struct pci_dev *dev = NULL;
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&dev_data->lock, flags);
-+
-+ list_for_each_entry(dev_entry, &dev_data->dev_list, list) {
-+ if (domain == (unsigned int)pci_domain_nr(dev_entry->dev->bus)
-+ && bus == (unsigned int)dev_entry->dev->bus->number
-+ && devfn == dev_entry->dev->devfn) {
-+ dev = dev_entry->dev;
-+ break;
-+ }
-+ }
-+
-+ spin_unlock_irqrestore(&dev_data->lock, flags);
-+
-+ return dev;
-+}
-+
-+int pciback_add_pci_dev(struct pciback_device *pdev, struct pci_dev *dev)
-+{
-+ struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
-+ struct pci_dev_entry *dev_entry;
-+ unsigned long flags;
-+
-+ dev_entry = kmalloc(sizeof(*dev_entry), GFP_KERNEL);
-+ if (!dev_entry)
-+ return -ENOMEM;
-+ dev_entry->dev = dev;
-+
-+ spin_lock_irqsave(&dev_data->lock, flags);
-+ list_add_tail(&dev_entry->list, &dev_data->dev_list);
-+ spin_unlock_irqrestore(&dev_data->lock, flags);
-+
-+ return 0;
-+}
-+
-+void pciback_release_pci_dev(struct pciback_device *pdev, struct pci_dev *dev)
-+{
-+ struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
-+ struct pci_dev_entry *dev_entry, *t;
-+ struct pci_dev *found_dev = NULL;
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&dev_data->lock, flags);
-+
-+ list_for_each_entry_safe(dev_entry, t, &dev_data->dev_list, list) {
-+ if (dev_entry->dev == dev) {
-+ list_del(&dev_entry->list);
-+ found_dev = dev_entry->dev;
-+ kfree(dev_entry);
-+ }
-+ }
-+
-+ spin_unlock_irqrestore(&dev_data->lock, flags);
-+
-+ if (found_dev)
-+ pcistub_put_pci_dev(found_dev);
-+}
-+
-+int pciback_init_devices(struct pciback_device *pdev)
-+{
-+ struct passthrough_dev_data *dev_data;
-+
-+ dev_data = kmalloc(sizeof(*dev_data), GFP_KERNEL);
-+ if (!dev_data)
-+ return -ENOMEM;
-+
-+ spin_lock_init(&dev_data->lock);
-+
-+ INIT_LIST_HEAD(&dev_data->dev_list);
-+
-+ pdev->pci_dev_data = dev_data;
-+
-+ return 0;
-+}
-+
-+int pciback_publish_pci_roots(struct pciback_device *pdev,
-+ publish_pci_root_cb publish_root_cb)
-+{
-+ int err = 0;
-+ struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
-+ struct pci_dev_entry *dev_entry, *e;
-+ struct pci_dev *dev;
-+ int found;
-+ unsigned int domain, bus;
-+
-+ spin_lock(&dev_data->lock);
-+
-+ list_for_each_entry(dev_entry, &dev_data->dev_list, list) {
-+ /* Only publish this device as a root if none of its
-+ * parent bridges are exported
-+ */
-+ found = 0;
-+ dev = dev_entry->dev->bus->self;
-+ for (; !found && dev != NULL; dev = dev->bus->self) {
-+ list_for_each_entry(e, &dev_data->dev_list, list) {
-+ if (dev == e->dev) {
-+ found = 1;
-+ break;
-+ }
-+ }
-+ }
-+
-+ domain = (unsigned int)pci_domain_nr(dev_entry->dev->bus);
-+ bus = (unsigned int)dev_entry->dev->bus->number;
-+
-+ if (!found) {
-+ err = publish_root_cb(pdev, domain, bus);
-+ if (err)
-+ break;
-+ }
-+ }
-+
-+ spin_unlock(&dev_data->lock);
-+
-+ return err;
-+}
-+
-+void pciback_release_devices(struct pciback_device *pdev)
-+{
-+ struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
-+ struct pci_dev_entry *dev_entry, *t;
-+
-+ list_for_each_entry_safe(dev_entry, t, &dev_data->dev_list, list) {
-+ list_del(&dev_entry->list);
-+ pcistub_put_pci_dev(dev_entry->dev);
-+ kfree(dev_entry);
-+ }
-+
-+ kfree(dev_data);
-+ pdev->pci_dev_data = NULL;
-+}
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/pciback/pciback.h ubuntu-gutsy-xen/drivers/xen/pciback/pciback.h
---- ubuntu-gutsy/drivers/xen/pciback/pciback.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/pciback/pciback.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,93 @@
-+/*
-+ * PCI Backend Common Data Structures & Function Declarations
-+ *
-+ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
-+ */
-+#ifndef __XEN_PCIBACK_H__
-+#define __XEN_PCIBACK_H__
-+
-+#include <linux/pci.h>
-+#include <linux/interrupt.h>
-+#include <xen/xenbus.h>
-+#include <linux/list.h>
-+#include <linux/spinlock.h>
-+#include <linux/workqueue.h>
-+#include <asm/atomic.h>
-+#include <xen/interface/io/pciif.h>
-+
-+struct pci_dev_entry {
-+ struct list_head list;
-+ struct pci_dev *dev;
-+};
-+
-+#define _PDEVF_op_active (0)
-+#define PDEVF_op_active (1<<(_PDEVF_op_active))
-+
-+struct pciback_device {
-+ void *pci_dev_data;
-+ spinlock_t dev_lock;
-+
-+ struct xenbus_device *xdev;
-+
-+ struct xenbus_watch be_watch;
-+ u8 be_watching;
-+
-+ int evtchn_irq;
-+
-+ struct vm_struct *sh_area;
-+ struct xen_pci_sharedinfo *sh_info;
-+
-+ unsigned long flags;
-+
-+ struct work_struct op_work;
-+};
-+
-+struct pciback_dev_data {
-+ struct list_head config_fields;
-+ int permissive;
-+ int warned_on_write;
-+};
-+
-+/* Get/Put PCI Devices that are hidden from the PCI Backend Domain */
-+struct pci_dev *pcistub_get_pci_dev_by_slot(struct pciback_device *pdev,
-+ int domain, int bus,
-+ int slot, int func);
-+struct pci_dev *pcistub_get_pci_dev(struct pciback_device *pdev,
-+ struct pci_dev *dev);
-+void pcistub_put_pci_dev(struct pci_dev *dev);
-+
-+/* Ensure a device is turned off or reset */
-+void pciback_reset_device(struct pci_dev *pdev);
-+
-+/* Access a virtual configuration space for a PCI device */
-+int pciback_config_init(void);
-+int pciback_config_init_dev(struct pci_dev *dev);
-+void pciback_config_free_dyn_fields(struct pci_dev *dev);
-+void pciback_config_reset_dev(struct pci_dev *dev);
-+void pciback_config_free_dev(struct pci_dev *dev);
-+int pciback_config_read(struct pci_dev *dev, int offset, int size,
-+ u32 * ret_val);
-+int pciback_config_write(struct pci_dev *dev, int offset, int size, u32 value);
-+
-+/* Handle requests for specific devices from the frontend */
-+typedef int (*publish_pci_root_cb) (struct pciback_device * pdev,
-+ unsigned int domain, unsigned int bus);
-+int pciback_add_pci_dev(struct pciback_device *pdev, struct pci_dev *dev);
-+void pciback_release_pci_dev(struct pciback_device *pdev, struct pci_dev *dev);
-+struct pci_dev *pciback_get_pci_dev(struct pciback_device *pdev,
-+ unsigned int domain, unsigned int bus,
-+ unsigned int devfn);
-+int pciback_init_devices(struct pciback_device *pdev);
-+int pciback_publish_pci_roots(struct pciback_device *pdev,
-+ publish_pci_root_cb cb);
-+void pciback_release_devices(struct pciback_device *pdev);
-+
-+/* Handles events from front-end */
-+irqreturn_t pciback_handle_event(int irq, void *dev_id);
-+void pciback_do_op(struct work_struct *work);
-+
-+int pciback_xenbus_register(void);
-+void pciback_xenbus_unregister(void);
-+
-+extern int verbose_request;
-+#endif
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/pciback/pciback_ops.c ubuntu-gutsy-xen/drivers/xen/pciback/pciback_ops.c
---- ubuntu-gutsy/drivers/xen/pciback/pciback_ops.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/pciback/pciback_ops.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,95 @@
-+/*
-+ * PCI Backend Operations - respond to PCI requests from Frontend
-+ *
-+ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
-+ */
-+#include <linux/module.h>
-+#include <asm/bitops.h>
-+#include <xen/evtchn.h>
-+#include "pciback.h"
-+
-+int verbose_request = 0;
-+module_param(verbose_request, int, 0644);
-+
-+/* Ensure a device is "turned off" and ready to be exported.
-+ * (Also see pciback_config_reset to ensure virtual configuration space is
-+ * ready to be re-exported)
-+ */
-+void pciback_reset_device(struct pci_dev *dev)
-+{
-+ u16 cmd;
-+
-+ /* Disable devices (but not bridges) */
-+ if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
-+ pci_disable_device(dev);
-+
-+ pci_write_config_word(dev, PCI_COMMAND, 0);
-+
-+ atomic_set(&dev->enable_cnt, 0);
-+ dev->is_busmaster = 0;
-+ } else {
-+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
-+ if (cmd & (PCI_COMMAND_INVALIDATE)) {
-+ cmd &= ~(PCI_COMMAND_INVALIDATE);
-+ pci_write_config_word(dev, PCI_COMMAND, cmd);
-+
-+ dev->is_busmaster = 0;
-+ }
-+ }
-+}
-+
-+static inline void test_and_schedule_op(struct pciback_device *pdev)
-+{
-+ /* Check that frontend is requesting an operation and that we are not
-+ * already processing a request */
-+ if (test_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags)
-+ && !test_and_set_bit(_PDEVF_op_active, &pdev->flags))
-+ schedule_work(&pdev->op_work);
-+}
-+
-+/* Performing the configuration space reads/writes must not be done in atomic
-+ * context because some of the pci_* functions can sleep (mostly due to ACPI
-+ * use of semaphores). This function is intended to be called from a work
-+ * queue in process context taking a struct pciback_device as a parameter */
-+void pciback_do_op(struct work_struct *work)
-+{
-+ struct pciback_device *pdev = container_of(work, struct pciback_device, op_work);
-+ struct pci_dev *dev;
-+ struct xen_pci_op *op = &pdev->sh_info->op;
-+
-+ dev = pciback_get_pci_dev(pdev, op->domain, op->bus, op->devfn);
-+
-+ if (dev == NULL)
-+ op->err = XEN_PCI_ERR_dev_not_found;
-+ else if (op->cmd == XEN_PCI_OP_conf_read)
-+ op->err = pciback_config_read(dev, op->offset, op->size,
-+ &op->value);
-+ else if (op->cmd == XEN_PCI_OP_conf_write)
-+ op->err = pciback_config_write(dev, op->offset, op->size,
-+ op->value);
-+ else
-+ op->err = XEN_PCI_ERR_not_implemented;
-+
-+ /* Tell the driver domain that we're done. */
-+ wmb();
-+ clear_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags);
-+ notify_remote_via_irq(pdev->evtchn_irq);
-+
-+ /* Mark that we're done. */
-+ smp_mb__before_clear_bit(); /* /after/ clearing PCIF_active */
-+ clear_bit(_PDEVF_op_active, &pdev->flags);
-+ smp_mb__after_clear_bit(); /* /before/ final check for work */
-+
-+ /* Check to see if the driver domain tried to start another request in
-+ * between clearing _XEN_PCIF_active and clearing _PDEVF_op_active. */
-+ test_and_schedule_op(pdev);
-+}
-+
-+irqreturn_t pciback_handle_event(int irq, void *dev_id)
-+{
-+ struct pciback_device *pdev = dev_id;
-+
-+ test_and_schedule_op(pdev);
-+
-+ return IRQ_HANDLED;
-+}
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/pciback/pci_stub.c ubuntu-gutsy-xen/drivers/xen/pciback/pci_stub.c
---- ubuntu-gutsy/drivers/xen/pciback/pci_stub.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/pciback/pci_stub.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,929 @@
-+/*
-+ * PCI Stub Driver - Grabs devices in backend to be exported later
-+ *
-+ * Ryan Wilson <hap9@epoch.ncsc.mil>
-+ * Chris Bookholt <hap10@epoch.ncsc.mil>
-+ */
-+#include <linux/module.h>
-+#include <linux/init.h>
-+#include <linux/list.h>
-+#include <linux/spinlock.h>
-+#include <linux/kref.h>
-+#include <asm/atomic.h>
-+#include "pciback.h"
-+#include "conf_space.h"
-+#include "conf_space_quirks.h"
-+
-+static char *pci_devs_to_hide = NULL;
-+module_param_named(hide, pci_devs_to_hide, charp, 0444);
-+
-+struct pcistub_device_id {
-+ struct list_head slot_list;
-+ int domain;
-+ unsigned char bus;
-+ unsigned int devfn;
-+};
-+static LIST_HEAD(pcistub_device_ids);
-+static DEFINE_SPINLOCK(device_ids_lock);
-+
-+struct pcistub_device {
-+ struct kref kref;
-+ struct list_head dev_list;
-+ spinlock_t lock;
-+
-+ struct pci_dev *dev;
-+ struct pciback_device *pdev; /* non-NULL if struct pci_dev is in use */
-+};
-+
-+/* Access to pcistub_devices & seized_devices lists and the initialize_devices
-+ * flag must be locked with pcistub_devices_lock
-+ */
-+static DEFINE_SPINLOCK(pcistub_devices_lock);
-+static LIST_HEAD(pcistub_devices);
-+
-+/* wait for device_initcall before initializing our devices
-+ * (see pcistub_init_devices_late)
-+ */
-+static int initialize_devices = 0;
-+static LIST_HEAD(seized_devices);
-+
-+static struct pcistub_device *pcistub_device_alloc(struct pci_dev *dev)
-+{
-+ struct pcistub_device *psdev;
-+
-+ dev_dbg(&dev->dev, "pcistub_device_alloc\n");
-+
-+ psdev = kzalloc(sizeof(*psdev), GFP_ATOMIC);
-+ if (!psdev)
-+ return NULL;
-+
-+ psdev->dev = pci_dev_get(dev);
-+ if (!psdev->dev) {
-+ kfree(psdev);
-+ return NULL;
-+ }
-+
-+ kref_init(&psdev->kref);
-+ spin_lock_init(&psdev->lock);
-+
-+ return psdev;
-+}
-+
-+/* Don't call this directly as it's called by pcistub_device_put */
-+static void pcistub_device_release(struct kref *kref)
-+{
-+ struct pcistub_device *psdev;
-+
-+ psdev = container_of(kref, struct pcistub_device, kref);
-+
-+ dev_dbg(&psdev->dev->dev, "pcistub_device_release\n");
-+
-+ /* Clean-up the device */
-+ pciback_reset_device(psdev->dev);
-+ pciback_config_free_dyn_fields(psdev->dev);
-+ pciback_config_free_dev(psdev->dev);
-+ kfree(pci_get_drvdata(psdev->dev));
-+ pci_set_drvdata(psdev->dev, NULL);
-+
-+ pci_dev_put(psdev->dev);
-+
-+ kfree(psdev);
-+}
-+
-+static inline void pcistub_device_get(struct pcistub_device *psdev)
-+{
-+ kref_get(&psdev->kref);
-+}
-+
-+static inline void pcistub_device_put(struct pcistub_device *psdev)
-+{
-+ kref_put(&psdev->kref, pcistub_device_release);
-+}
-+
-+static struct pcistub_device *pcistub_device_find(int domain, int bus,
-+ int slot, int func)
-+{
-+ struct pcistub_device *psdev = NULL;
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&pcistub_devices_lock, flags);
-+
-+ list_for_each_entry(psdev, &pcistub_devices, dev_list) {
-+ if (psdev->dev != NULL
-+ && domain == pci_domain_nr(psdev->dev->bus)
-+ && bus == psdev->dev->bus->number
-+ && PCI_DEVFN(slot, func) == psdev->dev->devfn) {
-+ pcistub_device_get(psdev);
-+ goto out;
-+ }
-+ }
-+
-+ /* didn't find it */
-+ psdev = NULL;
-+
-+ out:
-+ spin_unlock_irqrestore(&pcistub_devices_lock, flags);
-+ return psdev;
-+}
-+
-+static struct pci_dev *pcistub_device_get_pci_dev(struct pciback_device *pdev,
-+ struct pcistub_device *psdev)
-+{
-+ struct pci_dev *pci_dev = NULL;
-+ unsigned long flags;
-+
-+ pcistub_device_get(psdev);
-+
-+ spin_lock_irqsave(&psdev->lock, flags);
-+ if (!psdev->pdev) {
-+ psdev->pdev = pdev;
-+ pci_dev = psdev->dev;
-+ }
-+ spin_unlock_irqrestore(&psdev->lock, flags);
-+
-+ if (!pci_dev)
-+ pcistub_device_put(psdev);
-+
-+ return pci_dev;
-+}
-+
-+struct pci_dev *pcistub_get_pci_dev_by_slot(struct pciback_device *pdev,
-+ int domain, int bus,
-+ int slot, int func)
-+{
-+ struct pcistub_device *psdev;
-+ struct pci_dev *found_dev = NULL;
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&pcistub_devices_lock, flags);
-+
-+ list_for_each_entry(psdev, &pcistub_devices, dev_list) {
-+ if (psdev->dev != NULL
-+ && domain == pci_domain_nr(psdev->dev->bus)
-+ && bus == psdev->dev->bus->number
-+ && PCI_DEVFN(slot, func) == psdev->dev->devfn) {
-+ found_dev = pcistub_device_get_pci_dev(pdev, psdev);
-+ break;
-+ }
-+ }
-+
-+ spin_unlock_irqrestore(&pcistub_devices_lock, flags);
-+ return found_dev;
-+}
-+
-+struct pci_dev *pcistub_get_pci_dev(struct pciback_device *pdev,
-+ struct pci_dev *dev)
-+{
-+ struct pcistub_device *psdev;
-+ struct pci_dev *found_dev = NULL;
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&pcistub_devices_lock, flags);
-+
-+ list_for_each_entry(psdev, &pcistub_devices, dev_list) {
-+ if (psdev->dev == dev) {
-+ found_dev = pcistub_device_get_pci_dev(pdev, psdev);
-+ break;
-+ }
-+ }
-+
-+ spin_unlock_irqrestore(&pcistub_devices_lock, flags);
-+ return found_dev;
-+}
-+
-+void pcistub_put_pci_dev(struct pci_dev *dev)
-+{
-+ struct pcistub_device *psdev, *found_psdev = NULL;
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&pcistub_devices_lock, flags);
-+
-+ list_for_each_entry(psdev, &pcistub_devices, dev_list) {
-+ if (psdev->dev == dev) {
-+ found_psdev = psdev;
-+ break;
-+ }
-+ }
-+
-+ spin_unlock_irqrestore(&pcistub_devices_lock, flags);
-+
-+ /* Cleanup our device
-+ * (so it's ready for the next domain)
-+ */
-+ pciback_reset_device(found_psdev->dev);
-+ pciback_config_free_dyn_fields(found_psdev->dev);
-+ pciback_config_reset_dev(found_psdev->dev);
-+
-+ spin_lock_irqsave(&found_psdev->lock, flags);
-+ found_psdev->pdev = NULL;
-+ spin_unlock_irqrestore(&found_psdev->lock, flags);
-+
-+ pcistub_device_put(found_psdev);
-+}
-+
-+static int __devinit pcistub_match_one(struct pci_dev *dev,
-+ struct pcistub_device_id *pdev_id)
-+{
-+ /* Match the specified device by domain, bus, slot, func and also if
-+ * any of the device's parent bridges match.
-+ */
-+ for (; dev != NULL; dev = dev->bus->self) {
-+ if (pci_domain_nr(dev->bus) == pdev_id->domain
-+ && dev->bus->number == pdev_id->bus
-+ && dev->devfn == pdev_id->devfn)
-+ return 1;
-+
-+ /* Sometimes topmost bridge links to itself. */
-+ if (dev == dev->bus->self)
-+ break;
-+ }
-+
-+ return 0;
-+}
-+
-+static int __devinit pcistub_match(struct pci_dev *dev)
-+{
-+ struct pcistub_device_id *pdev_id;
-+ unsigned long flags;
-+ int found = 0;
-+
-+ spin_lock_irqsave(&device_ids_lock, flags);
-+ list_for_each_entry(pdev_id, &pcistub_device_ids, slot_list) {
-+ if (pcistub_match_one(dev, pdev_id)) {
-+ found = 1;
-+ break;
-+ }
-+ }
-+ spin_unlock_irqrestore(&device_ids_lock, flags);
-+
-+ return found;
-+}
-+
-+static int __devinit pcistub_init_device(struct pci_dev *dev)
-+{
-+ struct pciback_dev_data *dev_data;
-+ int err = 0;
-+
-+ dev_dbg(&dev->dev, "initializing...\n");
-+
-+ /* The PCI backend is not intended to be a module (or to work with
-+ * removable PCI devices (yet). If it were, pciback_config_free()
-+ * would need to be called somewhere to free the memory allocated
-+ * here and then to call kfree(pci_get_drvdata(psdev->dev)).
-+ */
-+ dev_data = kzalloc(sizeof(*dev_data), GFP_ATOMIC);
-+ if (!dev_data) {
-+ err = -ENOMEM;
-+ goto out;
-+ }
-+ pci_set_drvdata(dev, dev_data);
-+
-+ dev_dbg(&dev->dev, "initializing config\n");
-+ err = pciback_config_init_dev(dev);
-+ if (err)
-+ goto out;
-+
-+ /* HACK: Force device (& ACPI) to determine what IRQ it's on - we
-+ * must do this here because pcibios_enable_device may specify
-+ * the pci device's true irq (and possibly its other resources)
-+ * if they differ from what's in the configuration space.
-+ * This makes the assumption that the device's resources won't
-+ * change after this point (otherwise this code may break!)
-+ */
-+ dev_dbg(&dev->dev, "enabling device\n");
-+ err = pci_enable_device(dev);
-+ if (err)
-+ goto config_release;
-+
-+ /* Now disable the device (this also ensures some private device
-+ * data is setup before we export)
-+ */
-+ dev_dbg(&dev->dev, "reset device\n");
-+ pciback_reset_device(dev);
-+
-+ return 0;
-+
-+ config_release:
-+ pciback_config_free_dev(dev);
-+
-+ out:
-+ pci_set_drvdata(dev, NULL);
-+ kfree(dev_data);
-+ return err;
-+}
-+
-+/*
-+ * Because some initialization still happens on
-+ * devices during fs_initcall, we need to defer
-+ * full initialization of our devices until
-+ * device_initcall.
-+ */
-+static int __init pcistub_init_devices_late(void)
-+{
-+ struct pcistub_device *psdev;
-+ unsigned long flags;
-+ int err = 0;
-+
-+ pr_debug("pciback: pcistub_init_devices_late\n");
-+
-+ spin_lock_irqsave(&pcistub_devices_lock, flags);
-+
-+ while (!list_empty(&seized_devices)) {
-+ psdev = container_of(seized_devices.next,
-+ struct pcistub_device, dev_list);
-+ list_del(&psdev->dev_list);
-+
-+ spin_unlock_irqrestore(&pcistub_devices_lock, flags);
-+
-+ err = pcistub_init_device(psdev->dev);
-+ if (err) {
-+ dev_err(&psdev->dev->dev,
-+ "error %d initializing device\n", err);
-+ kfree(psdev);
-+ psdev = NULL;
-+ }
-+
-+ spin_lock_irqsave(&pcistub_devices_lock, flags);
-+
-+ if (psdev)
-+ list_add_tail(&psdev->dev_list, &pcistub_devices);
-+ }
-+
-+ initialize_devices = 1;
-+
-+ spin_unlock_irqrestore(&pcistub_devices_lock, flags);
-+
-+ return 0;
-+}
-+
-+static int __devinit pcistub_seize(struct pci_dev *dev)
-+{
-+ struct pcistub_device *psdev;
-+ unsigned long flags;
-+ int err = 0;
-+
-+ psdev = pcistub_device_alloc(dev);
-+ if (!psdev)
-+ return -ENOMEM;
-+
-+ spin_lock_irqsave(&pcistub_devices_lock, flags);
-+
-+ if (initialize_devices) {
-+ spin_unlock_irqrestore(&pcistub_devices_lock, flags);
-+
-+ /* don't want irqs disabled when calling pcistub_init_device */
-+ err = pcistub_init_device(psdev->dev);
-+
-+ spin_lock_irqsave(&pcistub_devices_lock, flags);
-+
-+ if (!err)
-+ list_add(&psdev->dev_list, &pcistub_devices);
-+ } else {
-+ dev_dbg(&dev->dev, "deferring initialization\n");
-+ list_add(&psdev->dev_list, &seized_devices);
-+ }
-+
-+ spin_unlock_irqrestore(&pcistub_devices_lock, flags);
-+
-+ if (err)
-+ pcistub_device_put(psdev);
-+
-+ return err;
-+}
-+
-+static int __devinit pcistub_probe(struct pci_dev *dev,
-+ const struct pci_device_id *id)
-+{
-+ int err = 0;
-+
-+ dev_dbg(&dev->dev, "probing...\n");
-+
-+ if (pcistub_match(dev)) {
-+
-+ if (dev->hdr_type != PCI_HEADER_TYPE_NORMAL
-+ && dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) {
-+ dev_err(&dev->dev, "can't export pci devices that "
-+ "don't have a normal (0) or bridge (1) "
-+ "header type!\n");
-+ err = -ENODEV;
-+ goto out;
-+ }
-+
-+ dev_info(&dev->dev, "seizing device\n");
-+ err = pcistub_seize(dev);
-+ } else
-+ /* Didn't find the device */
-+ err = -ENODEV;
-+
-+ out:
-+ return err;
-+}
-+
-+static void pcistub_remove(struct pci_dev *dev)
-+{
-+ struct pcistub_device *psdev, *found_psdev = NULL;
-+ unsigned long flags;
-+
-+ dev_dbg(&dev->dev, "removing\n");
-+
-+ spin_lock_irqsave(&pcistub_devices_lock, flags);
-+
-+ pciback_config_quirk_release(dev);
-+
-+ list_for_each_entry(psdev, &pcistub_devices, dev_list) {
-+ if (psdev->dev == dev) {
-+ found_psdev = psdev;
-+ break;
-+ }
-+ }
-+
-+ spin_unlock_irqrestore(&pcistub_devices_lock, flags);
-+
-+ if (found_psdev) {
-+ dev_dbg(&dev->dev, "found device to remove - in use? %p\n",
-+ found_psdev->pdev);
-+
-+ if (found_psdev->pdev) {
-+ printk(KERN_WARNING "pciback: ****** removing device "
-+ "%s while still in-use! ******\n",
-+ pci_name(found_psdev->dev));
-+ printk(KERN_WARNING "pciback: ****** driver domain may "
-+ "still access this device's i/o resources!\n");
-+ printk(KERN_WARNING "pciback: ****** shutdown driver "
-+ "domain before binding device\n");
-+ printk(KERN_WARNING "pciback: ****** to other drivers "
-+ "or domains\n");
-+
-+ pciback_release_pci_dev(found_psdev->pdev,
-+ found_psdev->dev);
-+ }
-+
-+ spin_lock_irqsave(&pcistub_devices_lock, flags);
-+ list_del(&found_psdev->dev_list);
-+ spin_unlock_irqrestore(&pcistub_devices_lock, flags);
-+
-+ /* the final put for releasing from the list */
-+ pcistub_device_put(found_psdev);
-+ }
-+}
-+
-+static struct pci_device_id pcistub_ids[] = {
-+ {
-+ .vendor = PCI_ANY_ID,
-+ .device = PCI_ANY_ID,
-+ .subvendor = PCI_ANY_ID,
-+ .subdevice = PCI_ANY_ID,
-+ },
-+ {0,},
-+};
-+
-+/*
-+ * Note: There is no MODULE_DEVICE_TABLE entry here because this isn't
-+ * for a normal device. I don't want it to be loaded automatically.
-+ */
-+
-+static struct pci_driver pciback_pci_driver = {
-+ .name = "pciback",
-+ .id_table = pcistub_ids,
-+ .probe = pcistub_probe,
-+ .remove = pcistub_remove,
-+};
-+
-+static inline int str_to_slot(const char *buf, int *domain, int *bus,
-+ int *slot, int *func)
-+{
-+ int err;
-+
-+ err = sscanf(buf, " %x:%x:%x.%x", domain, bus, slot, func);
-+ if (err == 4)
-+ return 0;
-+ else if (err < 0)
-+ return -EINVAL;
-+
-+ /* try again without domain */
-+ *domain = 0;
-+ err = sscanf(buf, " %x:%x.%x", bus, slot, func);
-+ if (err == 3)
-+ return 0;
-+
-+ return -EINVAL;
-+}
-+
-+static inline int str_to_quirk(const char *buf, int *domain, int *bus, int
-+ *slot, int *func, int *reg, int *size, int *mask)
-+{
-+ int err;
-+
-+ err =
-+ sscanf(buf, " %04x:%02x:%02x.%1x-%08x:%1x:%08x", domain, bus, slot,
-+ func, reg, size, mask);
-+ if (err == 7)
-+ return 0;
-+ return -EINVAL;
-+}
-+
-+static int pcistub_device_id_add(int domain, int bus, int slot, int func)
-+{
-+ struct pcistub_device_id *pci_dev_id;
-+ unsigned long flags;
-+
-+ pci_dev_id = kmalloc(sizeof(*pci_dev_id), GFP_KERNEL);
-+ if (!pci_dev_id)
-+ return -ENOMEM;
-+
-+ pci_dev_id->domain = domain;
-+ pci_dev_id->bus = bus;
-+ pci_dev_id->devfn = PCI_DEVFN(slot, func);
-+
-+ pr_debug("pciback: wants to seize %04x:%02x:%02x.%01x\n",
-+ domain, bus, slot, func);
-+
-+ spin_lock_irqsave(&device_ids_lock, flags);
-+ list_add_tail(&pci_dev_id->slot_list, &pcistub_device_ids);
-+ spin_unlock_irqrestore(&device_ids_lock, flags);
-+
-+ return 0;
-+}
-+
-+static int pcistub_device_id_remove(int domain, int bus, int slot, int func)
-+{
-+ struct pcistub_device_id *pci_dev_id, *t;
-+ int devfn = PCI_DEVFN(slot, func);
-+ int err = -ENOENT;
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&device_ids_lock, flags);
-+ list_for_each_entry_safe(pci_dev_id, t, &pcistub_device_ids, slot_list) {
-+
-+ if (pci_dev_id->domain == domain
-+ && pci_dev_id->bus == bus && pci_dev_id->devfn == devfn) {
-+ /* Don't break; here because it's possible the same
-+ * slot could be in the list more than once
-+ */
-+ list_del(&pci_dev_id->slot_list);
-+ kfree(pci_dev_id);
-+
-+ err = 0;
-+
-+ pr_debug("pciback: removed %04x:%02x:%02x.%01x from "
-+ "seize list\n", domain, bus, slot, func);
-+ }
-+ }
-+ spin_unlock_irqrestore(&device_ids_lock, flags);
-+
-+ return err;
-+}
-+
-+static int pcistub_reg_add(int domain, int bus, int slot, int func, int reg,
-+ int size, int mask)
-+{
-+ int err = 0;
-+ struct pcistub_device *psdev;
-+ struct pci_dev *dev;
-+ struct config_field *field;
-+
-+ psdev = pcistub_device_find(domain, bus, slot, func);
-+ if (!psdev || !psdev->dev) {
-+ err = -ENODEV;
-+ goto out;
-+ }
-+ dev = psdev->dev;
-+
-+ field = kzalloc(sizeof(*field), GFP_ATOMIC);
-+ if (!field) {
-+ err = -ENOMEM;
-+ goto out;
-+ }
-+
-+ field->offset = reg;
-+ field->size = size;
-+ field->mask = mask;
-+ field->init = NULL;
-+ field->reset = NULL;
-+ field->release = NULL;
-+ field->clean = pciback_config_field_free;
-+
-+ err = pciback_config_quirks_add_field(dev, field);
-+ if (err)
-+ kfree(field);
-+ out:
-+ return err;
-+}
-+
-+static ssize_t pcistub_slot_add(struct device_driver *drv, const char *buf,
-+ size_t count)
-+{
-+ int domain, bus, slot, func;
-+ int err;
-+
-+ err = str_to_slot(buf, &domain, &bus, &slot, &func);
-+ if (err)
-+ goto out;
-+
-+ err = pcistub_device_id_add(domain, bus, slot, func);
-+
-+ out:
-+ if (!err)
-+ err = count;
-+ return err;
-+}
-+
-+DRIVER_ATTR(new_slot, S_IWUSR, NULL, pcistub_slot_add);
-+
-+static ssize_t pcistub_slot_remove(struct device_driver *drv, const char *buf,
-+ size_t count)
-+{
-+ int domain, bus, slot, func;
-+ int err;
-+
-+ err = str_to_slot(buf, &domain, &bus, &slot, &func);
-+ if (err)
-+ goto out;
-+
-+ err = pcistub_device_id_remove(domain, bus, slot, func);
-+
-+ out:
-+ if (!err)
-+ err = count;
-+ return err;
-+}
-+
-+DRIVER_ATTR(remove_slot, S_IWUSR, NULL, pcistub_slot_remove);
-+
-+static ssize_t pcistub_slot_show(struct device_driver *drv, char *buf)
-+{
-+ struct pcistub_device_id *pci_dev_id;
-+ size_t count = 0;
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&device_ids_lock, flags);
-+ list_for_each_entry(pci_dev_id, &pcistub_device_ids, slot_list) {
-+ if (count >= PAGE_SIZE)
-+ break;
-+
-+ count += scnprintf(buf + count, PAGE_SIZE - count,
-+ "%04x:%02x:%02x.%01x\n",
-+ pci_dev_id->domain, pci_dev_id->bus,
-+ PCI_SLOT(pci_dev_id->devfn),
-+ PCI_FUNC(pci_dev_id->devfn));
-+ }
-+ spin_unlock_irqrestore(&device_ids_lock, flags);
-+
-+ return count;
-+}
-+
-+DRIVER_ATTR(slots, S_IRUSR, pcistub_slot_show, NULL);
-+
-+static ssize_t pcistub_quirk_add(struct device_driver *drv, const char *buf,
-+ size_t count)
-+{
-+ int domain, bus, slot, func, reg, size, mask;
-+ int err;
-+
-+ err = str_to_quirk(buf, &domain, &bus, &slot, &func, &reg, &size,
-+ &mask);
-+ if (err)
-+ goto out;
-+
-+ err = pcistub_reg_add(domain, bus, slot, func, reg, size, mask);
-+
-+ out:
-+ if (!err)
-+ err = count;
-+ return err;
-+}
-+
-+static ssize_t pcistub_quirk_show(struct device_driver *drv, char *buf)
-+{
-+ int count = 0;
-+ unsigned long flags;
-+ extern struct list_head pciback_quirks;
-+ struct pciback_config_quirk *quirk;
-+ struct pciback_dev_data *dev_data;
-+ struct config_field *field;
-+ struct config_field_entry *cfg_entry;
-+
-+ spin_lock_irqsave(&device_ids_lock, flags);
-+ list_for_each_entry(quirk, &pciback_quirks, quirks_list) {
-+ if (count >= PAGE_SIZE)
-+ goto out;
-+
-+ count += scnprintf(buf + count, PAGE_SIZE - count,
-+ "%02x:%02x.%01x\n\t%04x:%04x:%04x:%04x\n",
-+ quirk->pdev->bus->number,
-+ PCI_SLOT(quirk->pdev->devfn),
-+ PCI_FUNC(quirk->pdev->devfn),
-+ quirk->devid.vendor, quirk->devid.device,
-+ quirk->devid.subvendor,
-+ quirk->devid.subdevice);
-+
-+ dev_data = pci_get_drvdata(quirk->pdev);
-+
-+ list_for_each_entry(cfg_entry, &dev_data->config_fields, list) {
-+ field = cfg_entry->field;
-+ if (count >= PAGE_SIZE)
-+ goto out;
-+
-+ count += scnprintf(buf + count, PAGE_SIZE - count,
-+ "\t\t%08x:%01x:%08x\n",
-+ cfg_entry->base_offset + field->offset,
-+ field->size, field->mask);
-+ }
-+ }
-+
-+ out:
-+ spin_unlock_irqrestore(&device_ids_lock, flags);
-+
-+ return count;
-+}
-+
-+DRIVER_ATTR(quirks, S_IRUSR | S_IWUSR, pcistub_quirk_show, pcistub_quirk_add);
-+
-+static ssize_t permissive_add(struct device_driver *drv, const char *buf,
-+ size_t count)
-+{
-+ int domain, bus, slot, func;
-+ int err;
-+ struct pcistub_device *psdev;
-+ struct pciback_dev_data *dev_data;
-+ err = str_to_slot(buf, &domain, &bus, &slot, &func);
-+ if (err)
-+ goto out;
-+ psdev = pcistub_device_find(domain, bus, slot, func);
-+ if (!psdev) {
-+ err = -ENODEV;
-+ goto out;
-+ }
-+ if (!psdev->dev) {
-+ err = -ENODEV;
-+ goto release;
-+ }
-+ dev_data = pci_get_drvdata(psdev->dev);
-+ /* the driver data for a device should never be null at this point */
-+ if (!dev_data) {
-+ err = -ENXIO;
-+ goto release;
-+ }
-+ if (!dev_data->permissive) {
-+ dev_data->permissive = 1;
-+ /* Let user know that what they're doing could be unsafe */
-+ dev_warn(&psdev->dev->dev,
-+ "enabling permissive mode configuration space accesses!\n");
-+ dev_warn(&psdev->dev->dev,
-+ "permissive mode is potentially unsafe!\n");
-+ }
-+ release:
-+ pcistub_device_put(psdev);
-+ out:
-+ if (!err)
-+ err = count;
-+ return err;
-+}
-+
-+static ssize_t permissive_show(struct device_driver *drv, char *buf)
-+{
-+ struct pcistub_device *psdev;
-+ struct pciback_dev_data *dev_data;
-+ size_t count = 0;
-+ unsigned long flags;
-+ spin_lock_irqsave(&pcistub_devices_lock, flags);
-+ list_for_each_entry(psdev, &pcistub_devices, dev_list) {
-+ if (count >= PAGE_SIZE)
-+ break;
-+ if (!psdev->dev)
-+ continue;
-+ dev_data = pci_get_drvdata(psdev->dev);
-+ if (!dev_data || !dev_data->permissive)
-+ continue;
-+ count +=
-+ scnprintf(buf + count, PAGE_SIZE - count, "%s\n",
-+ pci_name(psdev->dev));
-+ }
-+ spin_unlock_irqrestore(&pcistub_devices_lock, flags);
-+ return count;
-+}
-+
-+DRIVER_ATTR(permissive, S_IRUSR | S_IWUSR, permissive_show, permissive_add);
-+
-+static void pcistub_exit(void)
-+{
-+ driver_remove_file(&pciback_pci_driver.driver, &driver_attr_new_slot);
-+ driver_remove_file(&pciback_pci_driver.driver,
-+ &driver_attr_remove_slot);
-+ driver_remove_file(&pciback_pci_driver.driver, &driver_attr_slots);
-+ driver_remove_file(&pciback_pci_driver.driver, &driver_attr_quirks);
-+ driver_remove_file(&pciback_pci_driver.driver, &driver_attr_permissive);
-+
-+ pci_unregister_driver(&pciback_pci_driver);
-+}
-+
-+static int __init pcistub_init(void)
-+{
-+ int pos = 0;
-+ int err = 0;
-+ int domain, bus, slot, func;
-+ int parsed;
-+
-+ if (pci_devs_to_hide && *pci_devs_to_hide) {
-+ do {
-+ parsed = 0;
-+
-+ err = sscanf(pci_devs_to_hide + pos,
-+ " (%x:%x:%x.%x) %n",
-+ &domain, &bus, &slot, &func, &parsed);
-+ if (err != 4) {
-+ domain = 0;
-+ err = sscanf(pci_devs_to_hide + pos,
-+ " (%x:%x.%x) %n",
-+ &bus, &slot, &func, &parsed);
-+ if (err != 3)
-+ goto parse_error;
-+ }
-+
-+ err = pcistub_device_id_add(domain, bus, slot, func);
-+ if (err)
-+ goto out;
-+
-+ /* if parsed<=0, we've reached the end of the string */
-+ pos += parsed;
-+ } while (parsed > 0 && pci_devs_to_hide[pos]);
-+ }
-+
-+ /* If we're the first PCI Device Driver to register, we're the
-+ * first one to get offered PCI devices as they become
-+ * available (and thus we can be the first to grab them)
-+ */
-+ err = pci_register_driver(&pciback_pci_driver);
-+ if (err < 0)
-+ goto out;
-+
-+ err = driver_create_file(&pciback_pci_driver.driver,
-+ &driver_attr_new_slot);
-+ if (!err)
-+ err = driver_create_file(&pciback_pci_driver.driver,
-+ &driver_attr_remove_slot);
-+ if (!err)
-+ err = driver_create_file(&pciback_pci_driver.driver,
-+ &driver_attr_slots);
-+ if (!err)
-+ err = driver_create_file(&pciback_pci_driver.driver,
-+ &driver_attr_quirks);
-+ if (!err)
-+ err = driver_create_file(&pciback_pci_driver.driver,
-+ &driver_attr_permissive);
-+
-+ if (err)
-+ pcistub_exit();
-+
-+ out:
-+ return err;
-+
-+ parse_error:
-+ printk(KERN_ERR "pciback: Error parsing pci_devs_to_hide at \"%s\"\n",
-+ pci_devs_to_hide + pos);
-+ return -EINVAL;
-+}
-+
-+#ifndef MODULE
-+/*
-+ * fs_initcall happens before device_initcall
-+ * so pciback *should* get called first (b/c we
-+ * want to suck up any device before other drivers
-+ * get a chance by being the first pci device
-+ * driver to register)
-+ */
-+fs_initcall(pcistub_init);
-+#endif
-+
-+static int __init pciback_init(void)
-+{
-+ int err;
-+
-+ err = pciback_config_init();
-+ if (err)
-+ return err;
-+
-+#ifdef MODULE
-+ err = pcistub_init();
-+ if (err < 0)
-+ return err;
-+#endif
-+
-+ pcistub_init_devices_late();
-+ err = pciback_xenbus_register();
-+ if (err)
-+ pcistub_exit();
-+
-+ return err;
-+}
-+
-+static void __exit pciback_cleanup(void)
-+{
-+ pciback_xenbus_unregister();
-+ pcistub_exit();
-+}
-+
-+module_init(pciback_init);
-+module_exit(pciback_cleanup);
-+
-+MODULE_LICENSE("Dual BSD/GPL");
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/pciback/slot.c ubuntu-gutsy-xen/drivers/xen/pciback/slot.c
---- ubuntu-gutsy/drivers/xen/pciback/slot.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/pciback/slot.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,151 @@
-+/*
-+ * PCI Backend - Provides a Virtual PCI bus (with real devices)
-+ * to the frontend
-+ *
-+ * Author: Ryan Wilson <hap9@epoch.ncsc.mil> (vpci.c)
-+ * Author: Tristan Gingold <tristan.gingold@bull.net>, from vpci.c
-+ */
-+
-+#include <linux/list.h>
-+#include <linux/slab.h>
-+#include <linux/pci.h>
-+#include <linux/spinlock.h>
-+#include "pciback.h"
-+
-+/* There are at most 32 slots in a pci bus. */
-+#define PCI_SLOT_MAX 32
-+
-+#define PCI_BUS_NBR 2
-+
-+struct slot_dev_data {
-+ /* Access to dev_list must be protected by lock */
-+ struct pci_dev *slots[PCI_BUS_NBR][PCI_SLOT_MAX];
-+ spinlock_t lock;
-+};
-+
-+struct pci_dev *pciback_get_pci_dev(struct pciback_device *pdev,
-+ unsigned int domain, unsigned int bus,
-+ unsigned int devfn)
-+{
-+ struct pci_dev *dev = NULL;
-+ struct slot_dev_data *slot_dev = pdev->pci_dev_data;
-+ unsigned long flags;
-+
-+ if (domain != 0 || PCI_FUNC(devfn) != 0)
-+ return NULL;
-+
-+ if (PCI_SLOT(devfn) >= PCI_SLOT_MAX || bus >= PCI_BUS_NBR)
-+ return NULL;
-+
-+ spin_lock_irqsave(&slot_dev->lock, flags);
-+ dev = slot_dev->slots[bus][PCI_SLOT(devfn)];
-+ spin_unlock_irqrestore(&slot_dev->lock, flags);
-+
-+ return dev;
-+}
-+
-+int pciback_add_pci_dev(struct pciback_device *pdev, struct pci_dev *dev)
-+{
-+ int err = 0, slot, bus;
-+ struct slot_dev_data *slot_dev = pdev->pci_dev_data;
-+ unsigned long flags;
-+
-+ if ((dev->class >> 24) == PCI_BASE_CLASS_BRIDGE) {
-+ err = -EFAULT;
-+ xenbus_dev_fatal(pdev->xdev, err,
-+ "Can't export bridges on the virtual PCI bus");
-+ goto out;
-+ }
-+
-+ spin_lock_irqsave(&slot_dev->lock, flags);
-+
-+ /* Assign to a new slot on the virtual PCI bus */
-+ for (bus = 0; bus < PCI_BUS_NBR; bus++)
-+ for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
-+ if (slot_dev->slots[bus][slot] == NULL) {
-+ printk(KERN_INFO
-+ "pciback: slot: %s: assign to virtual slot %d, bus %d\n",
-+ pci_name(dev), slot, bus);
-+ slot_dev->slots[bus][slot] = dev;
-+ goto unlock;
-+ }
-+ }
-+
-+ err = -ENOMEM;
-+ xenbus_dev_fatal(pdev->xdev, err,
-+ "No more space on root virtual PCI bus");
-+
-+ unlock:
-+ spin_unlock_irqrestore(&slot_dev->lock, flags);
-+ out:
-+ return err;
-+}
-+
-+void pciback_release_pci_dev(struct pciback_device *pdev, struct pci_dev *dev)
-+{
-+ int slot, bus;
-+ struct slot_dev_data *slot_dev = pdev->pci_dev_data;
-+ struct pci_dev *found_dev = NULL;
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&slot_dev->lock, flags);
-+
-+ for (bus = 0; bus < PCI_BUS_NBR; bus++)
-+ for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
-+ if (slot_dev->slots[bus][slot] == dev) {
-+ slot_dev->slots[bus][slot] = NULL;
-+ found_dev = dev;
-+ goto out;
-+ }
-+ }
-+
-+ out:
-+ spin_unlock_irqrestore(&slot_dev->lock, flags);
-+
-+ if (found_dev)
-+ pcistub_put_pci_dev(found_dev);
-+}
-+
-+int pciback_init_devices(struct pciback_device *pdev)
-+{
-+ int slot, bus;
-+ struct slot_dev_data *slot_dev;
-+
-+ slot_dev = kmalloc(sizeof(*slot_dev), GFP_KERNEL);
-+ if (!slot_dev)
-+ return -ENOMEM;
-+
-+ spin_lock_init(&slot_dev->lock);
-+
-+ for (bus = 0; bus < PCI_BUS_NBR; bus++)
-+ for (slot = 0; slot < PCI_SLOT_MAX; slot++)
-+ slot_dev->slots[bus][slot] = NULL;
-+
-+ pdev->pci_dev_data = slot_dev;
-+
-+ return 0;
-+}
-+
-+int pciback_publish_pci_roots(struct pciback_device *pdev,
-+ publish_pci_root_cb publish_cb)
-+{
-+ /* The Virtual PCI bus has only one root */
-+ return publish_cb(pdev, 0, 0);
-+}
-+
-+void pciback_release_devices(struct pciback_device *pdev)
-+{
-+ int slot, bus;
-+ struct slot_dev_data *slot_dev = pdev->pci_dev_data;
-+ struct pci_dev *dev;
-+
-+ for (bus = 0; bus < PCI_BUS_NBR; bus++)
-+ for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
-+ dev = slot_dev->slots[bus][slot];
-+ if (dev != NULL)
-+ pcistub_put_pci_dev(dev);
-+ }
-+
-+ kfree(slot_dev);
-+ pdev->pci_dev_data = NULL;
-+}
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/pciback/vpci.c ubuntu-gutsy-xen/drivers/xen/pciback/vpci.c
---- ubuntu-gutsy/drivers/xen/pciback/vpci.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/pciback/vpci.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,204 @@
-+/*
-+ * PCI Backend - Provides a Virtual PCI bus (with real devices)
-+ * to the frontend
-+ *
-+ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
-+ */
-+
-+#include <linux/list.h>
-+#include <linux/slab.h>
-+#include <linux/pci.h>
-+#include <linux/spinlock.h>
-+#include "pciback.h"
-+
-+#define PCI_SLOT_MAX 32
-+
-+struct vpci_dev_data {
-+ /* Access to dev_list must be protected by lock */
-+ struct list_head dev_list[PCI_SLOT_MAX];
-+ spinlock_t lock;
-+};
-+
-+static inline struct list_head *list_first(struct list_head *head)
-+{
-+ return head->next;
-+}
-+
-+struct pci_dev *pciback_get_pci_dev(struct pciback_device *pdev,
-+ unsigned int domain, unsigned int bus,
-+ unsigned int devfn)
-+{
-+ struct pci_dev_entry *entry;
-+ struct pci_dev *dev = NULL;
-+ struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
-+ unsigned long flags;
-+
-+ if (domain != 0 || bus != 0)
-+ return NULL;
-+
-+ if (PCI_SLOT(devfn) < PCI_SLOT_MAX) {
-+ spin_lock_irqsave(&vpci_dev->lock, flags);
-+
-+ list_for_each_entry(entry,
-+ &vpci_dev->dev_list[PCI_SLOT(devfn)],
-+ list) {
-+ if (PCI_FUNC(entry->dev->devfn) == PCI_FUNC(devfn)) {
-+ dev = entry->dev;
-+ break;
-+ }
-+ }
-+
-+ spin_unlock_irqrestore(&vpci_dev->lock, flags);
-+ }
-+ return dev;
-+}
-+
-+static inline int match_slot(struct pci_dev *l, struct pci_dev *r)
-+{
-+ if (pci_domain_nr(l->bus) == pci_domain_nr(r->bus)
-+ && l->bus == r->bus && PCI_SLOT(l->devfn) == PCI_SLOT(r->devfn))
-+ return 1;
-+
-+ return 0;
-+}
-+
-+int pciback_add_pci_dev(struct pciback_device *pdev, struct pci_dev *dev)
-+{
-+ int err = 0, slot;
-+ struct pci_dev_entry *t, *dev_entry;
-+ struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
-+ unsigned long flags;
-+
-+ if ((dev->class >> 24) == PCI_BASE_CLASS_BRIDGE) {
-+ err = -EFAULT;
-+ xenbus_dev_fatal(pdev->xdev, err,
-+ "Can't export bridges on the virtual PCI bus");
-+ goto out;
-+ }
-+
-+ dev_entry = kmalloc(sizeof(*dev_entry), GFP_KERNEL);
-+ if (!dev_entry) {
-+ err = -ENOMEM;
-+ xenbus_dev_fatal(pdev->xdev, err,
-+ "Error adding entry to virtual PCI bus");
-+ goto out;
-+ }
-+
-+ dev_entry->dev = dev;
-+
-+ spin_lock_irqsave(&vpci_dev->lock, flags);
-+
-+ /* Keep multi-function devices together on the virtual PCI bus */
-+ for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
-+ if (!list_empty(&vpci_dev->dev_list[slot])) {
-+ t = list_entry(list_first(&vpci_dev->dev_list[slot]),
-+ struct pci_dev_entry, list);
-+
-+ if (match_slot(dev, t->dev)) {
-+ pr_info("pciback: vpci: %s: "
-+ "assign to virtual slot %d func %d\n",
-+ pci_name(dev), slot,
-+ PCI_FUNC(dev->devfn));
-+ list_add_tail(&dev_entry->list,
-+ &vpci_dev->dev_list[slot]);
-+ goto unlock;
-+ }
-+ }
-+ }
-+
-+ /* Assign to a new slot on the virtual PCI bus */
-+ for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
-+ if (list_empty(&vpci_dev->dev_list[slot])) {
-+ printk(KERN_INFO
-+ "pciback: vpci: %s: assign to virtual slot %d\n",
-+ pci_name(dev), slot);
-+ list_add_tail(&dev_entry->list,
-+ &vpci_dev->dev_list[slot]);
-+ goto unlock;
-+ }
-+ }
-+
-+ err = -ENOMEM;
-+ xenbus_dev_fatal(pdev->xdev, err,
-+ "No more space on root virtual PCI bus");
-+
-+ unlock:
-+ spin_unlock_irqrestore(&vpci_dev->lock, flags);
-+ out:
-+ return err;
-+}
-+
-+void pciback_release_pci_dev(struct pciback_device *pdev, struct pci_dev *dev)
-+{
-+ int slot;
-+ struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
-+ struct pci_dev *found_dev = NULL;
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&vpci_dev->lock, flags);
-+
-+ for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
-+ struct pci_dev_entry *e, *tmp;
-+ list_for_each_entry_safe(e, tmp, &vpci_dev->dev_list[slot],
-+ list) {
-+ if (e->dev == dev) {
-+ list_del(&e->list);
-+ found_dev = e->dev;
-+ kfree(e);
-+ goto out;
-+ }
-+ }
-+ }
-+
-+ out:
-+ spin_unlock_irqrestore(&vpci_dev->lock, flags);
-+
-+ if (found_dev)
-+ pcistub_put_pci_dev(found_dev);
-+}
-+
-+int pciback_init_devices(struct pciback_device *pdev)
-+{
-+ int slot;
-+ struct vpci_dev_data *vpci_dev;
-+
-+ vpci_dev = kmalloc(sizeof(*vpci_dev), GFP_KERNEL);
-+ if (!vpci_dev)
-+ return -ENOMEM;
-+
-+ spin_lock_init(&vpci_dev->lock);
-+
-+ for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
-+ INIT_LIST_HEAD(&vpci_dev->dev_list[slot]);
-+ }
-+
-+ pdev->pci_dev_data = vpci_dev;
-+
-+ return 0;
-+}
-+
-+int pciback_publish_pci_roots(struct pciback_device *pdev,
-+ publish_pci_root_cb publish_cb)
-+{
-+ /* The Virtual PCI bus has only one root */
-+ return publish_cb(pdev, 0, 0);
-+}
-+
-+void pciback_release_devices(struct pciback_device *pdev)
-+{
-+ int slot;
-+ struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
-+
-+ for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
-+ struct pci_dev_entry *e, *tmp;
-+ list_for_each_entry_safe(e, tmp, &vpci_dev->dev_list[slot],
-+ list) {
-+ list_del(&e->list);
-+ pcistub_put_pci_dev(e->dev);
-+ kfree(e);
-+ }
-+ }
-+
-+ kfree(vpci_dev);
-+ pdev->pci_dev_data = NULL;
-+}
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/pciback/xenbus.c ubuntu-gutsy-xen/drivers/xen/pciback/xenbus.c
---- ubuntu-gutsy/drivers/xen/pciback/xenbus.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/pciback/xenbus.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,453 @@
-+/*
-+ * PCI Backend Xenbus Setup - handles setup with frontend and xend
-+ *
-+ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
-+ */
-+#include <linux/module.h>
-+#include <linux/init.h>
-+#include <linux/list.h>
-+#include <linux/vmalloc.h>
-+#include <xen/xenbus.h>
-+#include <xen/evtchn.h>
-+#include "pciback.h"
-+
-+#define INVALID_EVTCHN_IRQ (-1)
-+
-+static struct pciback_device *alloc_pdev(struct xenbus_device *xdev)
-+{
-+ struct pciback_device *pdev;
-+
-+ pdev = kzalloc(sizeof(struct pciback_device), GFP_KERNEL);
-+ if (pdev == NULL)
-+ goto out;
-+ dev_dbg(&xdev->dev, "allocated pdev @ 0x%p\n", pdev);
-+
-+ pdev->xdev = xdev;
-+ xdev->dev.driver_data = pdev;
-+
-+ spin_lock_init(&pdev->dev_lock);
-+
-+ pdev->sh_area = NULL;
-+ pdev->sh_info = NULL;
-+ pdev->evtchn_irq = INVALID_EVTCHN_IRQ;
-+ pdev->be_watching = 0;
-+
-+ INIT_WORK(&pdev->op_work, pciback_do_op);
-+
-+ if (pciback_init_devices(pdev)) {
-+ kfree(pdev);
-+ pdev = NULL;
-+ }
-+ out:
-+ return pdev;
-+}
-+
-+static void free_pdev(struct pciback_device *pdev)
-+{
-+ if (pdev->be_watching)
-+ unregister_xenbus_watch(&pdev->be_watch);
-+
-+ /* Ensure the guest can't trigger our handler before removing devices */
-+ if (pdev->evtchn_irq != INVALID_EVTCHN_IRQ)
-+ unbind_from_irqhandler(pdev->evtchn_irq, pdev);
-+
-+ /* If the driver domain started an op, make sure we complete it or
-+ * delete it before releasing the shared memory */
-+ flush_scheduled_work();
-+
-+ if (pdev->sh_info)
-+ xenbus_unmap_ring_vfree(pdev->xdev, pdev->sh_area);
-+
-+ pciback_release_devices(pdev);
-+
-+ pdev->xdev->dev.driver_data = NULL;
-+ pdev->xdev = NULL;
-+
-+ kfree(pdev);
-+}
-+
-+static int pciback_do_attach(struct pciback_device *pdev, int gnt_ref,
-+ int remote_evtchn)
-+{
-+ int err = 0;
-+ struct vm_struct *area;
-+
-+ dev_dbg(&pdev->xdev->dev,
-+ "Attaching to frontend resources - gnt_ref=%d evtchn=%d\n",
-+ gnt_ref, remote_evtchn);
-+
-+ area = xenbus_map_ring_valloc(pdev->xdev, gnt_ref);
-+ if (IS_ERR(area)) {
-+ err = PTR_ERR(area);
-+ goto out;
-+ }
-+ pdev->sh_area = area;
-+ pdev->sh_info = area->addr;
-+
-+ err = bind_interdomain_evtchn_to_irqhandler(
-+ pdev->xdev->otherend_id, remote_evtchn, pciback_handle_event,
-+ IRQF_SAMPLE_RANDOM, "pciback", pdev);
-+ if (err < 0) {
-+ xenbus_dev_fatal(pdev->xdev, err,
-+ "Error binding event channel to IRQ");
-+ goto out;
-+ }
-+ pdev->evtchn_irq = err;
-+ err = 0;
-+
-+ dev_dbg(&pdev->xdev->dev, "Attached!\n");
-+ out:
-+ return err;
-+}
-+
-+static int pciback_attach(struct pciback_device *pdev)
-+{
-+ int err = 0;
-+ int gnt_ref, remote_evtchn;
-+ char *magic = NULL;
-+
-+ spin_lock(&pdev->dev_lock);
-+
-+ /* Make sure we only do this setup once */
-+ if (xenbus_read_driver_state(pdev->xdev->nodename) !=
-+ XenbusStateInitialised)
-+ goto out;
-+
-+ /* Wait for frontend to state that it has published the configuration */
-+ if (xenbus_read_driver_state(pdev->xdev->otherend) !=
-+ XenbusStateInitialised)
-+ goto out;
-+
-+ dev_dbg(&pdev->xdev->dev, "Reading frontend config\n");
-+
-+ err = xenbus_gather(XBT_NIL, pdev->xdev->otherend,
-+ "pci-op-ref", "%u", &gnt_ref,
-+ "event-channel", "%u", &remote_evtchn,
-+ "magic", NULL, &magic, NULL);
-+ if (err) {
-+ /* If configuration didn't get read correctly, wait longer */
-+ xenbus_dev_fatal(pdev->xdev, err,
-+ "Error reading configuration from frontend");
-+ goto out;
-+ }
-+
-+ if (magic == NULL || strcmp(magic, XEN_PCI_MAGIC) != 0) {
-+ xenbus_dev_fatal(pdev->xdev, -EFAULT,
-+ "version mismatch (%s/%s) with pcifront - "
-+ "halting pciback",
-+ magic, XEN_PCI_MAGIC);
-+ goto out;
-+ }
-+
-+ err = pciback_do_attach(pdev, gnt_ref, remote_evtchn);
-+ if (err)
-+ goto out;
-+
-+ dev_dbg(&pdev->xdev->dev, "Connecting...\n");
-+
-+ err = xenbus_switch_state(pdev->xdev, XenbusStateConnected);
-+ if (err)
-+ xenbus_dev_fatal(pdev->xdev, err,
-+ "Error switching to connected state!");
-+
-+ dev_dbg(&pdev->xdev->dev, "Connected? %d\n", err);
-+ out:
-+ spin_unlock(&pdev->dev_lock);
-+
-+ if (magic)
-+ kfree(magic);
-+
-+ return err;
-+}
-+
-+static void pciback_frontend_changed(struct xenbus_device *xdev,
-+ enum xenbus_state fe_state)
-+{
-+ struct pciback_device *pdev = xdev->dev.driver_data;
-+
-+ dev_dbg(&xdev->dev, "fe state changed %d\n", fe_state);
-+
-+ switch (fe_state) {
-+ case XenbusStateInitialised:
-+ pciback_attach(pdev);
-+ break;
-+
-+ case XenbusStateClosing:
-+ xenbus_switch_state(xdev, XenbusStateClosing);
-+ break;
-+
-+ case XenbusStateUnknown:
-+ case XenbusStateClosed:
-+ dev_dbg(&xdev->dev, "frontend is gone! unregister device\n");
-+ device_unregister(&xdev->dev);
-+ break;
-+
-+ default:
-+ break;
-+ }
-+}
-+
-+static int pciback_publish_pci_root(struct pciback_device *pdev,
-+ unsigned int domain, unsigned int bus)
-+{
-+ unsigned int d, b;
-+ int i, root_num, len, err;
-+ char str[64];
-+
-+ dev_dbg(&pdev->xdev->dev, "Publishing pci roots\n");
-+
-+ err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename,
-+ "root_num", "%d", &root_num);
-+ if (err == 0 || err == -ENOENT)
-+ root_num = 0;
-+ else if (err < 0)
-+ goto out;
-+
-+ /* Verify that we haven't already published this pci root */
-+ for (i = 0; i < root_num; i++) {
-+ len = snprintf(str, sizeof(str), "root-%d", i);
-+ if (unlikely(len >= (sizeof(str) - 1))) {
-+ err = -ENOMEM;
-+ goto out;
-+ }
-+
-+ err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename,
-+ str, "%x:%x", &d, &b);
-+ if (err < 0)
-+ goto out;
-+ if (err != 2) {
-+ err = -EINVAL;
-+ goto out;
-+ }
-+
-+ if (d == domain && b == bus) {
-+ err = 0;
-+ goto out;
-+ }
-+ }
-+
-+ len = snprintf(str, sizeof(str), "root-%d", root_num);
-+ if (unlikely(len >= (sizeof(str) - 1))) {
-+ err = -ENOMEM;
-+ goto out;
-+ }
-+
-+ dev_dbg(&pdev->xdev->dev, "writing root %d at %04x:%02x\n",
-+ root_num, domain, bus);
-+
-+ err = xenbus_printf(XBT_NIL, pdev->xdev->nodename, str,
-+ "%04x:%02x", domain, bus);
-+ if (err)
-+ goto out;
-+
-+ err = xenbus_printf(XBT_NIL, pdev->xdev->nodename,
-+ "root_num", "%d", (root_num + 1));
-+
-+ out:
-+ return err;
-+}
-+
-+static int pciback_export_device(struct pciback_device *pdev,
-+ int domain, int bus, int slot, int func)
-+{
-+ struct pci_dev *dev;
-+ int err = 0;
-+
-+ dev_dbg(&pdev->xdev->dev, "exporting dom %x bus %x slot %x func %x\n",
-+ domain, bus, slot, func);
-+
-+ dev = pcistub_get_pci_dev_by_slot(pdev, domain, bus, slot, func);
-+ if (!dev) {
-+ err = -EINVAL;
-+ xenbus_dev_fatal(pdev->xdev, err,
-+ "Couldn't locate PCI device "
-+ "(%04x:%02x:%02x.%01x)! "
-+ "perhaps already in-use?",
-+ domain, bus, slot, func);
-+ goto out;
-+ }
-+
-+ err = pciback_add_pci_dev(pdev, dev);
-+ if (err)
-+ goto out;
-+
-+ /* TODO: It'd be nice to export a bridge and have all of its children
-+ * get exported with it. This may be best done in xend (which will
-+ * have to calculate resource usage anyway) but we probably want to
-+ * put something in here to ensure that if a bridge gets given to a
-+ * driver domain, that all devices under that bridge are not given
-+ * to other driver domains (as he who controls the bridge can disable
-+ * it and stop the other devices from working).
-+ */
-+ out:
-+ return err;
-+}
-+
-+static int pciback_setup_backend(struct pciback_device *pdev)
-+{
-+ /* Get configuration from xend (if available now) */
-+ int domain, bus, slot, func;
-+ int err = 0;
-+ int i, num_devs;
-+ char dev_str[64];
-+
-+ spin_lock(&pdev->dev_lock);
-+
-+ /* It's possible we could get the call to setup twice, so make sure
-+ * we're not already connected.
-+ */
-+ if (xenbus_read_driver_state(pdev->xdev->nodename) !=
-+ XenbusStateInitWait)
-+ goto out;
-+
-+ dev_dbg(&pdev->xdev->dev, "getting be setup\n");
-+
-+ err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename, "num_devs", "%d",
-+ &num_devs);
-+ if (err != 1) {
-+ if (err >= 0)
-+ err = -EINVAL;
-+ xenbus_dev_fatal(pdev->xdev, err,
-+ "Error reading number of devices");
-+ goto out;
-+ }
-+
-+ for (i = 0; i < num_devs; i++) {
-+ int l = snprintf(dev_str, sizeof(dev_str), "dev-%d", i);
-+ if (unlikely(l >= (sizeof(dev_str) - 1))) {
-+ err = -ENOMEM;
-+ xenbus_dev_fatal(pdev->xdev, err,
-+ "String overflow while reading "
-+ "configuration");
-+ goto out;
-+ }
-+
-+ err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename, dev_str,
-+ "%x:%x:%x.%x", &domain, &bus, &slot, &func);
-+ if (err < 0) {
-+ xenbus_dev_fatal(pdev->xdev, err,
-+ "Error reading device configuration");
-+ goto out;
-+ }
-+ if (err != 4) {
-+ err = -EINVAL;
-+ xenbus_dev_fatal(pdev->xdev, err,
-+ "Error parsing pci device "
-+ "configuration");
-+ goto out;
-+ }
-+
-+ err = pciback_export_device(pdev, domain, bus, slot, func);
-+ if (err)
-+ goto out;
-+ }
-+
-+ err = pciback_publish_pci_roots(pdev, pciback_publish_pci_root);
-+ if (err) {
-+ xenbus_dev_fatal(pdev->xdev, err,
-+ "Error while publish PCI root buses "
-+ "for frontend");
-+ goto out;
-+ }
-+
-+ err = xenbus_switch_state(pdev->xdev, XenbusStateInitialised);
-+ if (err)
-+ xenbus_dev_fatal(pdev->xdev, err,
-+ "Error switching to initialised state!");
-+
-+ out:
-+ spin_unlock(&pdev->dev_lock);
-+
-+ if (!err)
-+ /* see if pcifront is already configured (if not, we'll wait) */
-+ pciback_attach(pdev);
-+
-+ return err;
-+}
-+
-+static void pciback_be_watch(struct xenbus_watch *watch,
-+ const char **vec, unsigned int len)
-+{
-+ struct pciback_device *pdev =
-+ container_of(watch, struct pciback_device, be_watch);
-+
-+ switch (xenbus_read_driver_state(pdev->xdev->nodename)) {
-+ case XenbusStateInitWait:
-+ pciback_setup_backend(pdev);
-+ break;
-+
-+ default:
-+ break;
-+ }
-+}
-+
-+static int pciback_xenbus_probe(struct xenbus_device *dev,
-+ const struct xenbus_device_id *id)
-+{
-+ int err = 0;
-+ struct pciback_device *pdev = alloc_pdev(dev);
-+
-+ if (pdev == NULL) {
-+ err = -ENOMEM;
-+ xenbus_dev_fatal(dev, err,
-+ "Error allocating pciback_device struct");
-+ goto out;
-+ }
-+
-+ /* wait for xend to configure us */
-+ err = xenbus_switch_state(dev, XenbusStateInitWait);
-+ if (err)
-+ goto out;
-+
-+ /* watch the backend node for backend configuration information */
-+ err = xenbus_watch_path(dev, dev->nodename, &pdev->be_watch,
-+ pciback_be_watch);
-+ if (err)
-+ goto out;
-+ pdev->be_watching = 1;
-+
-+ /* We need to force a call to our callback here in case
-+ * xend already configured us!
-+ */
-+ pciback_be_watch(&pdev->be_watch, NULL, 0);
-+
-+ out:
-+ return err;
-+}
-+
-+static int pciback_xenbus_remove(struct xenbus_device *dev)
-+{
-+ struct pciback_device *pdev = dev->dev.driver_data;
-+
-+ if (pdev != NULL)
-+ free_pdev(pdev);
-+
-+ return 0;
-+}
-+
-+static struct xenbus_device_id xenpci_ids[] = {
-+ {"pci"},
-+ {{0}},
-+};
-+
-+static struct xenbus_driver xenbus_pciback_driver = {
-+ .name = "pciback",
-+ .owner = THIS_MODULE,
-+ .ids = xenpci_ids,
-+ .probe = pciback_xenbus_probe,
-+ .remove = pciback_xenbus_remove,
-+ .otherend_changed = pciback_frontend_changed,
-+};
-+
-+int __init pciback_xenbus_register(void)
-+{
-+ if (!is_running_on_xen())
-+ return -ENODEV;
-+
-+ return xenbus_register_backend(&xenbus_pciback_driver);
-+}
-+
-+void __exit pciback_xenbus_unregister(void)
-+{
-+ xenbus_unregister_driver(&xenbus_pciback_driver);
-+}
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/pcifront/Makefile ubuntu-gutsy-xen/drivers/xen/pcifront/Makefile
---- ubuntu-gutsy/drivers/xen/pcifront/Makefile 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/pcifront/Makefile 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,7 @@
-+obj-y += pcifront.o
-+
-+pcifront-y := pci_op.o xenbus.o pci.o
-+
-+ifeq ($(CONFIG_XEN_PCIDEV_FE_DEBUG),y)
-+EXTRA_CFLAGS += -DDEBUG
-+endif
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/pcifront/pci.c ubuntu-gutsy-xen/drivers/xen/pcifront/pci.c
---- ubuntu-gutsy/drivers/xen/pcifront/pci.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/pcifront/pci.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,46 @@
-+/*
-+ * PCI Frontend Operations - ensure only one PCI frontend runs at a time
-+ *
-+ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
-+ */
-+#include <linux/module.h>
-+#include <linux/init.h>
-+#include <linux/pci.h>
-+#include <linux/spinlock.h>
-+#include "pcifront.h"
-+
-+DEFINE_SPINLOCK(pcifront_dev_lock);
-+static struct pcifront_device *pcifront_dev = NULL;
-+
-+int pcifront_connect(struct pcifront_device *pdev)
-+{
-+ int err = 0;
-+
-+ spin_lock(&pcifront_dev_lock);
-+
-+ if (!pcifront_dev) {
-+ dev_info(&pdev->xdev->dev, "Installing PCI frontend\n");
-+ pcifront_dev = pdev;
-+ }
-+ else {
-+ dev_err(&pdev->xdev->dev, "PCI frontend already installed!\n");
-+ err = -EEXIST;
-+ }
-+
-+ spin_unlock(&pcifront_dev_lock);
-+
-+ return err;
-+}
-+
-+void pcifront_disconnect(struct pcifront_device *pdev)
-+{
-+ spin_lock(&pcifront_dev_lock);
-+
-+ if (pdev == pcifront_dev) {
-+ dev_info(&pdev->xdev->dev,
-+ "Disconnecting PCI Frontend Buses\n");
-+ pcifront_dev = NULL;
-+ }
-+
-+ spin_unlock(&pcifront_dev_lock);
-+}
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/pcifront/pcifront.h ubuntu-gutsy-xen/drivers/xen/pcifront/pcifront.h
---- ubuntu-gutsy/drivers/xen/pcifront/pcifront.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/pcifront/pcifront.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,40 @@
-+/*
-+ * PCI Frontend - Common data structures & function declarations
-+ *
-+ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
-+ */
-+#ifndef __XEN_PCIFRONT_H__
-+#define __XEN_PCIFRONT_H__
-+
-+#include <linux/spinlock.h>
-+#include <linux/pci.h>
-+#include <xen/xenbus.h>
-+#include <xen/interface/io/pciif.h>
-+#include <xen/pcifront.h>
-+
-+struct pci_bus_entry {
-+ struct list_head list;
-+ struct pci_bus *bus;
-+};
-+
-+struct pcifront_device {
-+ struct xenbus_device *xdev;
-+ struct list_head root_buses;
-+ spinlock_t dev_lock;
-+
-+ int evtchn;
-+ int gnt_ref;
-+
-+ /* Lock this when doing any operations in sh_info */
-+ spinlock_t sh_info_lock;
-+ struct xen_pci_sharedinfo *sh_info;
-+};
-+
-+int pcifront_connect(struct pcifront_device *pdev);
-+void pcifront_disconnect(struct pcifront_device *pdev);
-+
-+int pcifront_scan_root(struct pcifront_device *pdev,
-+ unsigned int domain, unsigned int bus);
-+void pcifront_free_roots(struct pcifront_device *pdev);
-+
-+#endif /* __XEN_PCIFRONT_H__ */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/pcifront/pci_op.c ubuntu-gutsy-xen/drivers/xen/pcifront/pci_op.c
---- ubuntu-gutsy/drivers/xen/pcifront/pci_op.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/pcifront/pci_op.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,268 @@
-+/*
-+ * PCI Frontend Operations - Communicates with frontend
-+ *
-+ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
-+ */
-+#include <linux/module.h>
-+#include <linux/version.h>
-+#include <linux/init.h>
-+#include <linux/pci.h>
-+#include <linux/spinlock.h>
-+#include <linux/time.h>
-+#include <xen/evtchn.h>
-+#include "pcifront.h"
-+
-+static int verbose_request = 0;
-+module_param(verbose_request, int, 0644);
-+
-+static int errno_to_pcibios_err(int errno)
-+{
-+ switch (errno) {
-+ case XEN_PCI_ERR_success:
-+ return PCIBIOS_SUCCESSFUL;
-+
-+ case XEN_PCI_ERR_dev_not_found:
-+ return PCIBIOS_DEVICE_NOT_FOUND;
-+
-+ case XEN_PCI_ERR_invalid_offset:
-+ case XEN_PCI_ERR_op_failed:
-+ return PCIBIOS_BAD_REGISTER_NUMBER;
-+
-+ case XEN_PCI_ERR_not_implemented:
-+ return PCIBIOS_FUNC_NOT_SUPPORTED;
-+
-+ case XEN_PCI_ERR_access_denied:
-+ return PCIBIOS_SET_FAILED;
-+ }
-+ return errno;
-+}
-+
-+static int do_pci_op(struct pcifront_device *pdev, struct xen_pci_op *op)
-+{
-+ int err = 0;
-+ struct xen_pci_op *active_op = &pdev->sh_info->op;
-+ unsigned long irq_flags;
-+ evtchn_port_t port = pdev->evtchn;
-+ s64 ns, ns_timeout;
-+ struct timeval tv;
-+
-+ spin_lock_irqsave(&pdev->sh_info_lock, irq_flags);
-+
-+ memcpy(active_op, op, sizeof(struct xen_pci_op));
-+
-+ /* Go */
-+ wmb();
-+ set_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags);
-+ notify_remote_via_evtchn(port);
-+
-+ /*
-+ * We set a poll timeout of 3 seconds but give up on return after
-+ * 2 seconds. It is better to time out too late rather than too early
-+ * (in the latter case we end up continually re-executing poll() with a
-+ * timeout in the past). 1s difference gives plenty of slack for error.
-+ */
-+ do_gettimeofday(&tv);
-+ ns_timeout = timeval_to_ns(&tv) + 2 * (s64)NSEC_PER_SEC;
-+
-+ clear_evtchn(port);
-+
-+ while (test_bit(_XEN_PCIF_active,
-+ (unsigned long *)&pdev->sh_info->flags)) {
-+ if (HYPERVISOR_poll(&port, 1, jiffies + 3*HZ))
-+ BUG();
-+ clear_evtchn(port);
-+ do_gettimeofday(&tv);
-+ ns = timeval_to_ns(&tv);
-+ if (ns > ns_timeout) {
-+ dev_err(&pdev->xdev->dev,
-+ "pciback not responding!!!\n");
-+ clear_bit(_XEN_PCIF_active,
-+ (unsigned long *)&pdev->sh_info->flags);
-+ err = XEN_PCI_ERR_dev_not_found;
-+ goto out;
-+ }
-+ }
-+
-+ memcpy(op, active_op, sizeof(struct xen_pci_op));
-+
-+ err = op->err;
-+ out:
-+ spin_unlock_irqrestore(&pdev->sh_info_lock, irq_flags);
-+ return err;
-+}
-+
-+/* Access to this function is spinlocked in drivers/pci/access.c */
-+static int pcifront_bus_read(struct pci_bus *bus, unsigned int devfn,
-+ int where, int size, u32 * val)
-+{
-+ int err = 0;
-+ struct xen_pci_op op = {
-+ .cmd = XEN_PCI_OP_conf_read,
-+ .domain = pci_domain_nr(bus),
-+ .bus = bus->number,
-+ .devfn = devfn,
-+ .offset = where,
-+ .size = size,
-+ };
-+ struct pcifront_sd *sd = bus->sysdata;
-+ struct pcifront_device *pdev = pcifront_get_pdev(sd);
-+
-+ if (verbose_request)
-+ dev_info(&pdev->xdev->dev,
-+ "read dev=%04x:%02x:%02x.%01x - offset %x size %d\n",
-+ pci_domain_nr(bus), bus->number, PCI_SLOT(devfn),
-+ PCI_FUNC(devfn), where, size);
-+
-+ err = do_pci_op(pdev, &op);
-+
-+ if (likely(!err)) {
-+ if (verbose_request)
-+ dev_info(&pdev->xdev->dev, "read got back value %x\n",
-+ op.value);
-+
-+ *val = op.value;
-+ } else if (err == -ENODEV) {
-+ /* No device here, pretend that it just returned 0 */
-+ err = 0;
-+ *val = 0;
-+ }
-+
-+ return errno_to_pcibios_err(err);
-+}
-+
-+/* Access to this function is spinlocked in drivers/pci/access.c */
-+static int pcifront_bus_write(struct pci_bus *bus, unsigned int devfn,
-+ int where, int size, u32 val)
-+{
-+ struct xen_pci_op op = {
-+ .cmd = XEN_PCI_OP_conf_write,
-+ .domain = pci_domain_nr(bus),
-+ .bus = bus->number,
-+ .devfn = devfn,
-+ .offset = where,
-+ .size = size,
-+ .value = val,
-+ };
-+ struct pcifront_sd *sd = bus->sysdata;
-+ struct pcifront_device *pdev = pcifront_get_pdev(sd);
-+
-+ if (verbose_request)
-+ dev_info(&pdev->xdev->dev,
-+ "write dev=%04x:%02x:%02x.%01x - "
-+ "offset %x size %d val %x\n",
-+ pci_domain_nr(bus), bus->number,
-+ PCI_SLOT(devfn), PCI_FUNC(devfn), where, size, val);
-+
-+ return errno_to_pcibios_err(do_pci_op(pdev, &op));
-+}
-+
-+struct pci_ops pcifront_bus_ops = {
-+ .read = pcifront_bus_read,
-+ .write = pcifront_bus_write,
-+};
-+
-+/* Claim resources for the PCI frontend as-is, backend won't allow changes */
-+static void pcifront_claim_resource(struct pci_dev *dev, void *data)
-+{
-+ struct pcifront_device *pdev = data;
-+ int i;
-+ struct resource *r;
-+
-+ for (i = 0; i < PCI_NUM_RESOURCES; i++) {
-+ r = &dev->resource[i];
-+
-+ if (!r->parent && r->start && r->flags) {
-+ dev_dbg(&pdev->xdev->dev, "claiming resource %s/%d\n",
-+ pci_name(dev), i);
-+ pci_claim_resource(dev, i);
-+ }
-+ }
-+}
-+
-+int pcifront_scan_root(struct pcifront_device *pdev,
-+ unsigned int domain, unsigned int bus)
-+{
-+ struct pci_bus *b;
-+ struct pcifront_sd *sd = NULL;
-+ struct pci_bus_entry *bus_entry = NULL;
-+ int err = 0;
-+
-+#ifndef CONFIG_PCI_DOMAINS
-+ if (domain != 0) {
-+ dev_err(&pdev->xdev->dev,
-+ "PCI Root in non-zero PCI Domain! domain=%d\n", domain);
-+ dev_err(&pdev->xdev->dev,
-+ "Please compile with CONFIG_PCI_DOMAINS\n");
-+ err = -EINVAL;
-+ goto err_out;
-+ }
-+#endif
-+
-+ dev_info(&pdev->xdev->dev, "Creating PCI Frontend Bus %04x:%02x\n",
-+ domain, bus);
-+
-+ bus_entry = kmalloc(sizeof(*bus_entry), GFP_KERNEL);
-+ sd = kmalloc(sizeof(*sd), GFP_KERNEL);
-+ if (!bus_entry || !sd) {
-+ err = -ENOMEM;
-+ goto err_out;
-+ }
-+ pcifront_init_sd(sd, domain, pdev);
-+
-+ b = pci_scan_bus_parented(&pdev->xdev->dev, bus,
-+ &pcifront_bus_ops, sd);
-+ if (!b) {
-+ dev_err(&pdev->xdev->dev,
-+ "Error creating PCI Frontend Bus!\n");
-+ err = -ENOMEM;
-+ goto err_out;
-+ }
-+ bus_entry->bus = b;
-+
-+ list_add(&bus_entry->list, &pdev->root_buses);
-+
-+ /* Claim resources before going "live" with our devices */
-+ pci_walk_bus(b, pcifront_claim_resource, pdev);
-+
-+ pci_bus_add_devices(b);
-+
-+ return 0;
-+
-+ err_out:
-+ kfree(bus_entry);
-+ kfree(sd);
-+
-+ return err;
-+}
-+
-+static void free_root_bus_devs(struct pci_bus *bus)
-+{
-+ struct pci_dev *dev;
-+
-+ while (!list_empty(&bus->devices)) {
-+ dev = container_of(bus->devices.next, struct pci_dev,
-+ bus_list);
-+ dev_dbg(&dev->dev, "removing device\n");
-+ pci_remove_bus_device(dev);
-+ }
-+}
-+
-+void pcifront_free_roots(struct pcifront_device *pdev)
-+{
-+ struct pci_bus_entry *bus_entry, *t;
-+
-+ dev_dbg(&pdev->xdev->dev, "cleaning up root buses\n");
-+
-+ list_for_each_entry_safe(bus_entry, t, &pdev->root_buses, list) {
-+ list_del(&bus_entry->list);
-+
-+ free_root_bus_devs(bus_entry->bus);
-+
-+ kfree(bus_entry->bus->sysdata);
-+
-+ device_unregister(bus_entry->bus->bridge);
-+ pci_remove_bus(bus_entry->bus);
-+
-+ kfree(bus_entry);
-+ }
-+}
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/pcifront/xenbus.c ubuntu-gutsy-xen/drivers/xen/pcifront/xenbus.c
---- ubuntu-gutsy/drivers/xen/pcifront/xenbus.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/pcifront/xenbus.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,295 @@
-+/*
-+ * PCI Frontend Xenbus Setup - handles setup with backend (imports page/evtchn)
-+ *
-+ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
-+ */
-+#include <linux/module.h>
-+#include <linux/init.h>
-+#include <linux/mm.h>
-+#include <xen/xenbus.h>
-+#include <xen/gnttab.h>
-+#include "pcifront.h"
-+
-+#define INVALID_GRANT_REF (0)
-+#define INVALID_EVTCHN (-1)
-+
-+static struct pcifront_device *alloc_pdev(struct xenbus_device *xdev)
-+{
-+ struct pcifront_device *pdev;
-+
-+ pdev = kmalloc(sizeof(struct pcifront_device), GFP_KERNEL);
-+ if (pdev == NULL)
-+ goto out;
-+
-+ pdev->sh_info =
-+ (struct xen_pci_sharedinfo *)__get_free_page(GFP_KERNEL);
-+ if (pdev->sh_info == NULL) {
-+ kfree(pdev);
-+ pdev = NULL;
-+ goto out;
-+ }
-+ pdev->sh_info->flags = 0;
-+
-+ xdev->dev.driver_data = pdev;
-+ pdev->xdev = xdev;
-+
-+ INIT_LIST_HEAD(&pdev->root_buses);
-+
-+ spin_lock_init(&pdev->dev_lock);
-+ spin_lock_init(&pdev->sh_info_lock);
-+
-+ pdev->evtchn = INVALID_EVTCHN;
-+ pdev->gnt_ref = INVALID_GRANT_REF;
-+
-+ dev_dbg(&xdev->dev, "Allocated pdev @ 0x%p pdev->sh_info @ 0x%p\n",
-+ pdev, pdev->sh_info);
-+ out:
-+ return pdev;
-+}
-+
-+static void free_pdev(struct pcifront_device *pdev)
-+{
-+ dev_dbg(&pdev->xdev->dev, "freeing pdev @ 0x%p\n", pdev);
-+
-+ pcifront_free_roots(pdev);
-+
-+ if (pdev->evtchn != INVALID_EVTCHN)
-+ xenbus_free_evtchn(pdev->xdev, pdev->evtchn);
-+
-+ if (pdev->gnt_ref != INVALID_GRANT_REF)
-+ gnttab_end_foreign_access(pdev->gnt_ref, 0,
-+ (unsigned long)pdev->sh_info);
-+
-+ pdev->xdev->dev.driver_data = NULL;
-+
-+ kfree(pdev);
-+}
-+
-+static int pcifront_publish_info(struct pcifront_device *pdev)
-+{
-+ int err = 0;
-+ struct xenbus_transaction trans;
-+
-+ err = xenbus_grant_ring(pdev->xdev, virt_to_mfn(pdev->sh_info));
-+ if (err < 0)
-+ goto out;
-+
-+ pdev->gnt_ref = err;
-+
-+ err = xenbus_alloc_evtchn(pdev->xdev, &pdev->evtchn);
-+ if (err)
-+ goto out;
-+
-+ do_publish:
-+ err = xenbus_transaction_start(&trans);
-+ if (err) {
-+ xenbus_dev_fatal(pdev->xdev, err,
-+ "Error writing configuration for backend "
-+ "(start transaction)");
-+ goto out;
-+ }
-+
-+ err = xenbus_printf(trans, pdev->xdev->nodename,
-+ "pci-op-ref", "%u", pdev->gnt_ref);
-+ if (!err)
-+ err = xenbus_printf(trans, pdev->xdev->nodename,
-+ "event-channel", "%u", pdev->evtchn);
-+ if (!err)
-+ err = xenbus_printf(trans, pdev->xdev->nodename,
-+ "magic", XEN_PCI_MAGIC);
-+
-+ if (err) {
-+ xenbus_transaction_end(trans, 1);
-+ xenbus_dev_fatal(pdev->xdev, err,
-+ "Error writing configuration for backend");
-+ goto out;
-+ } else {
-+ err = xenbus_transaction_end(trans, 0);
-+ if (err == -EAGAIN)
-+ goto do_publish;
-+ else if (err) {
-+ xenbus_dev_fatal(pdev->xdev, err,
-+ "Error completing transaction "
-+ "for backend");
-+ goto out;
-+ }
-+ }
-+
-+ xenbus_switch_state(pdev->xdev, XenbusStateInitialised);
-+
-+ dev_dbg(&pdev->xdev->dev, "publishing successful!\n");
-+
-+ out:
-+ return err;
-+}
-+
-+static int pcifront_try_connect(struct pcifront_device *pdev)
-+{
-+ int err = -EFAULT;
-+ int i, num_roots, len;
-+ char str[64];
-+ unsigned int domain, bus;
-+
-+ spin_lock(&pdev->dev_lock);
-+
-+ /* Only connect once */
-+ if (xenbus_read_driver_state(pdev->xdev->nodename) !=
-+ XenbusStateInitialised)
-+ goto out;
-+
-+ err = pcifront_connect(pdev);
-+ if (err) {
-+ xenbus_dev_fatal(pdev->xdev, err,
-+ "Error connecting PCI Frontend");
-+ goto out;
-+ }
-+
-+ err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend,
-+ "root_num", "%d", &num_roots);
-+ if (err == -ENOENT) {
-+ xenbus_dev_error(pdev->xdev, err,
-+ "No PCI Roots found, trying 0000:00");
-+ err = pcifront_scan_root(pdev, 0, 0);
-+ num_roots = 0;
-+ } else if (err != 1) {
-+ if (err == 0)
-+ err = -EINVAL;
-+ xenbus_dev_fatal(pdev->xdev, err,
-+ "Error reading number of PCI roots");
-+ goto out;
-+ }
-+
-+ for (i = 0; i < num_roots; i++) {
-+ len = snprintf(str, sizeof(str), "root-%d", i);
-+ if (unlikely(len >= (sizeof(str) - 1))) {
-+ err = -ENOMEM;
-+ goto out;
-+ }
-+
-+ err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, str,
-+ "%x:%x", &domain, &bus);
-+ if (err != 2) {
-+ if (err >= 0)
-+ err = -EINVAL;
-+ xenbus_dev_fatal(pdev->xdev, err,
-+ "Error reading PCI root %d", i);
-+ goto out;
-+ }
-+
-+ err = pcifront_scan_root(pdev, domain, bus);
-+ if (err) {
-+ xenbus_dev_fatal(pdev->xdev, err,
-+ "Error scanning PCI root %04x:%02x",
-+ domain, bus);
-+ goto out;
-+ }
-+ }
-+
-+ err = xenbus_switch_state(pdev->xdev, XenbusStateConnected);
-+ if (err)
-+ goto out;
-+
-+ out:
-+ spin_unlock(&pdev->dev_lock);
-+ return err;
-+}
-+
-+static int pcifront_try_disconnect(struct pcifront_device *pdev)
-+{
-+ int err = 0;
-+ enum xenbus_state prev_state;
-+
-+ spin_lock(&pdev->dev_lock);
-+
-+ prev_state = xenbus_read_driver_state(pdev->xdev->nodename);
-+
-+ if (prev_state < XenbusStateClosing)
-+ err = xenbus_switch_state(pdev->xdev, XenbusStateClosing);
-+
-+ if (!err && prev_state == XenbusStateConnected)
-+ pcifront_disconnect(pdev);
-+
-+ spin_unlock(&pdev->dev_lock);
-+
-+ return err;
-+}
-+
-+static void pcifront_backend_changed(struct xenbus_device *xdev,
-+ enum xenbus_state be_state)
-+{
-+ struct pcifront_device *pdev = xdev->dev.driver_data;
-+
-+ switch (be_state) {
-+ case XenbusStateClosing:
-+ dev_warn(&xdev->dev, "backend going away!\n");
-+ pcifront_try_disconnect(pdev);
-+ break;
-+
-+ case XenbusStateUnknown:
-+ case XenbusStateClosed:
-+ dev_warn(&xdev->dev, "backend went away!\n");
-+ pcifront_try_disconnect(pdev);
-+
-+ device_unregister(&pdev->xdev->dev);
-+ break;
-+
-+ case XenbusStateConnected:
-+ pcifront_try_connect(pdev);
-+ break;
-+
-+ default:
-+ break;
-+ }
-+}
-+
-+static int pcifront_xenbus_probe(struct xenbus_device *xdev,
-+ const struct xenbus_device_id *id)
-+{
-+ int err = 0;
-+ struct pcifront_device *pdev = alloc_pdev(xdev);
-+
-+ if (pdev == NULL) {
-+ err = -ENOMEM;
-+ xenbus_dev_fatal(xdev, err,
-+ "Error allocating pcifront_device struct");
-+ goto out;
-+ }
-+
-+ err = pcifront_publish_info(pdev);
-+
-+ out:
-+ return err;
-+}
-+
-+static int pcifront_xenbus_remove(struct xenbus_device *xdev)
-+{
-+ if (xdev->dev.driver_data)
-+ free_pdev(xdev->dev.driver_data);
-+
-+ return 0;
-+}
-+
-+static struct xenbus_device_id xenpci_ids[] = {
-+ {"pci"},
-+ {{0}},
-+};
-+
-+static struct xenbus_driver xenbus_pcifront_driver = {
-+ .name = "pcifront",
-+ .owner = THIS_MODULE,
-+ .ids = xenpci_ids,
-+ .probe = pcifront_xenbus_probe,
-+ .remove = pcifront_xenbus_remove,
-+ .otherend_changed = pcifront_backend_changed,
-+};
-+
-+static int __init pcifront_init(void)
-+{
-+ if (!is_running_on_xen())
-+ return -ENODEV;
-+
-+ return xenbus_register_frontend(&xenbus_pcifront_driver);
-+}
-+
-+/* Initialize after the Xen PCI Frontend Stub is initialized */
-+subsys_initcall(pcifront_init);
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/privcmd/Makefile ubuntu-gutsy-xen/drivers/xen/privcmd/Makefile
---- ubuntu-gutsy/drivers/xen/privcmd/Makefile 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/privcmd/Makefile 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,2 @@
-+
-+obj-$(CONFIG_XEN_PRIVCMD) := privcmd.o
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/privcmd/privcmd.c ubuntu-gutsy-xen/drivers/xen/privcmd/privcmd.c
---- ubuntu-gutsy/drivers/xen/privcmd/privcmd.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/privcmd/privcmd.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,284 @@
-+/******************************************************************************
-+ * privcmd.c
-+ *
-+ * Interface to privileged domain-0 commands.
-+ *
-+ * Copyright (c) 2002-2004, K A Fraser, B Dragovic
-+ */
-+
-+#include <linux/kernel.h>
-+#include <linux/sched.h>
-+#include <linux/slab.h>
-+#include <linux/string.h>
-+#include <linux/errno.h>
-+#include <linux/mm.h>
-+#include <linux/mman.h>
-+#include <linux/swap.h>
-+#include <linux/smp_lock.h>
-+#include <linux/highmem.h>
-+#include <linux/pagemap.h>
-+#include <linux/seq_file.h>
-+#include <linux/kthread.h>
-+#include <asm/hypervisor.h>
-+
-+#include <asm/pgalloc.h>
-+#include <asm/pgtable.h>
-+#include <asm/uaccess.h>
-+#include <asm/tlb.h>
-+#include <asm/hypervisor.h>
-+#include <xen/public/privcmd.h>
-+#include <xen/interface/xen.h>
-+#include <xen/xen_proc.h>
-+
-+static struct proc_dir_entry *privcmd_intf;
-+static struct proc_dir_entry *capabilities_intf;
-+
-+#ifndef HAVE_ARCH_PRIVCMD_MMAP
-+static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma);
-+#endif
-+
-+static int privcmd_ioctl(struct inode *inode, struct file *file,
-+ unsigned int cmd, unsigned long data)
-+{
-+ int ret = -ENOSYS;
-+ void __user *udata = (void __user *) data;
-+
-+ switch (cmd) {
-+ case IOCTL_PRIVCMD_HYPERCALL: {
-+ privcmd_hypercall_t hypercall;
-+
-+ if (copy_from_user(&hypercall, udata, sizeof(hypercall)))
-+ return -EFAULT;
-+
-+#if defined(__i386__)
-+ if (hypercall.op >= (PAGE_SIZE >> 5))
-+ break;
-+ __asm__ __volatile__ (
-+ "pushl %%ebx; pushl %%ecx; pushl %%edx; "
-+ "pushl %%esi; pushl %%edi; "
-+ "movl 8(%%eax),%%ebx ;"
-+ "movl 16(%%eax),%%ecx ;"
-+ "movl 24(%%eax),%%edx ;"
-+ "movl 32(%%eax),%%esi ;"
-+ "movl 40(%%eax),%%edi ;"
-+ "movl (%%eax),%%eax ;"
-+ "shll $5,%%eax ;"
-+ "addl $hypercall_page,%%eax ;"
-+ "call *%%eax ;"
-+ "popl %%edi; popl %%esi; popl %%edx; "
-+ "popl %%ecx; popl %%ebx"
-+ : "=a" (ret) : "0" (&hypercall) : "memory" );
-+#elif defined (__x86_64__)
-+ if (hypercall.op < (PAGE_SIZE >> 5)) {
-+ long ign1, ign2, ign3;
-+ __asm__ __volatile__ (
-+ "movq %8,%%r10; movq %9,%%r8;"
-+ "shll $5,%%eax ;"
-+ "addq $hypercall_page,%%rax ;"
-+ "call *%%rax"
-+ : "=a" (ret), "=D" (ign1),
-+ "=S" (ign2), "=d" (ign3)
-+ : "0" ((unsigned int)hypercall.op),
-+ "1" (hypercall.arg[0]),
-+ "2" (hypercall.arg[1]),
-+ "3" (hypercall.arg[2]),
-+ "g" (hypercall.arg[3]),
-+ "g" (hypercall.arg[4])
-+ : "r8", "r10", "memory" );
-+ }
-+#elif defined (__ia64__)
-+ ret = privcmd_hypercall(&hypercall);
-+#endif
-+ }
-+ break;
-+
-+ case IOCTL_PRIVCMD_MMAP: {
-+ privcmd_mmap_t mmapcmd;
-+ privcmd_mmap_entry_t msg;
-+ privcmd_mmap_entry_t __user *p;
-+ struct mm_struct *mm = current->mm;
-+ struct vm_area_struct *vma;
-+ unsigned long va;
-+ int i, rc;
-+
-+ if (!is_initial_xendomain())
-+ return -EPERM;
-+
-+ if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd)))
-+ return -EFAULT;
-+
-+ p = mmapcmd.entry;
-+ if (copy_from_user(&msg, p, sizeof(msg)))
-+ return -EFAULT;
-+
-+ down_write(&mm->mmap_sem);
-+
-+ vma = find_vma(mm, msg.va);
-+ rc = -EINVAL;
-+ if (!vma || (msg.va != vma->vm_start) ||
-+ !privcmd_enforce_singleshot_mapping(vma))
-+ goto mmap_out;
-+
-+ va = vma->vm_start;
-+
-+ for (i = 0; i < mmapcmd.num; i++) {
-+ rc = -EFAULT;
-+ if (copy_from_user(&msg, p, sizeof(msg)))
-+ goto mmap_out;
-+
-+ /* Do not allow range to wrap the address space. */
-+ rc = -EINVAL;
-+ if ((msg.npages > (LONG_MAX >> PAGE_SHIFT)) ||
-+ ((unsigned long)(msg.npages << PAGE_SHIFT) >= -va))
-+ goto mmap_out;
-+
-+ /* Range chunks must be contiguous in va space. */
-+ if ((msg.va != va) ||
-+ ((msg.va+(msg.npages<<PAGE_SHIFT)) > vma->vm_end))
-+ goto mmap_out;
-+
-+ if ((rc = direct_remap_pfn_range(
-+ vma,
-+ msg.va & PAGE_MASK,
-+ msg.mfn,
-+ msg.npages << PAGE_SHIFT,
-+ vma->vm_page_prot,
-+ mmapcmd.dom)) < 0)
-+ goto mmap_out;
-+
-+ p++;
-+ va += msg.npages << PAGE_SHIFT;
-+ }
-+
-+ rc = 0;
-+
-+ mmap_out:
-+ up_write(&mm->mmap_sem);
-+ ret = rc;
-+ }
-+ break;
-+
-+ case IOCTL_PRIVCMD_MMAPBATCH: {
-+ privcmd_mmapbatch_t m;
-+ struct mm_struct *mm = current->mm;
-+ struct vm_area_struct *vma;
-+ xen_pfn_t __user *p;
-+ unsigned long addr, mfn, nr_pages;
-+ int i;
-+
-+ if (!is_initial_xendomain())
-+ return -EPERM;
-+
-+ if (copy_from_user(&m, udata, sizeof(m)))
-+ return -EFAULT;
-+
-+ nr_pages = m.num;
-+ if ((m.num <= 0) || (nr_pages > (LONG_MAX >> PAGE_SHIFT)))
-+ return -EINVAL;
-+
-+ down_write(&mm->mmap_sem);
-+
-+ vma = find_vma(mm, m.addr);
-+ if (!vma ||
-+ (m.addr != vma->vm_start) ||
-+ ((m.addr + (nr_pages << PAGE_SHIFT)) != vma->vm_end) ||
-+ !privcmd_enforce_singleshot_mapping(vma)) {
-+ up_write(&mm->mmap_sem);
-+ return -EINVAL;
-+ }
-+
-+ p = m.arr;
-+ addr = m.addr;
-+ for (i = 0; i < nr_pages; i++, addr += PAGE_SIZE, p++) {
-+ if (get_user(mfn, p)) {
-+ up_write(&mm->mmap_sem);
-+ return -EFAULT;
-+ }
-+
-+ ret = direct_remap_pfn_range(vma, addr & PAGE_MASK,
-+ mfn, PAGE_SIZE,
-+ vma->vm_page_prot, m.dom);
-+ if (ret < 0)
-+ put_user(0xF0000000 | mfn, p);
-+ }
-+
-+ up_write(&mm->mmap_sem);
-+ ret = 0;
-+ }
-+ break;
-+
-+ default:
-+ ret = -EINVAL;
-+ break;
-+ }
-+
-+ return ret;
-+}
-+
-+#ifndef HAVE_ARCH_PRIVCMD_MMAP
-+static struct page *privcmd_nopage(struct vm_area_struct *vma,
-+ unsigned long address,
-+ int *type)
-+{
-+ return NOPAGE_SIGBUS;
-+}
-+
-+static struct vm_operations_struct privcmd_vm_ops = {
-+ .nopage = privcmd_nopage
-+};
-+
-+static int privcmd_mmap(struct file * file, struct vm_area_struct * vma)
-+{
-+ /* Unsupported for auto-translate guests. */
-+ if (xen_feature(XENFEAT_auto_translated_physmap))
-+ return -ENOSYS;
-+
-+ /* DONTCOPY is essential for Xen as copy_page_range is broken. */
-+ vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTCOPY;
-+ vma->vm_ops = &privcmd_vm_ops;
-+ vma->vm_private_data = NULL;
-+
-+ return 0;
-+}
-+
-+static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma)
-+{
-+ return (xchg(&vma->vm_private_data, (void *)1) == NULL);
-+}
-+#endif
-+
-+static const struct file_operations privcmd_file_ops = {
-+ .ioctl = privcmd_ioctl,
-+ .mmap = privcmd_mmap,
-+};
-+
-+static int capabilities_read(char *page, char **start, off_t off,
-+ int count, int *eof, void *data)
-+{
-+ int len = 0;
-+ *page = 0;
-+
-+ if (is_initial_xendomain())
-+ len = sprintf( page, "control_d\n" );
-+
-+ *eof = 1;
-+ return len;
-+}
-+
-+static int __init privcmd_init(void)
-+{
-+ if (!is_running_on_xen())
-+ return -ENODEV;
-+
-+ privcmd_intf = create_xen_proc_entry("privcmd", 0400);
-+ if (privcmd_intf != NULL)
-+ privcmd_intf->proc_fops = &privcmd_file_ops;
-+
-+ capabilities_intf = create_xen_proc_entry("capabilities", 0400 );
-+ if (capabilities_intf != NULL)
-+ capabilities_intf->read_proc = capabilities_read;
-+
-+ return 0;
-+}
-+
-+__initcall(privcmd_init);
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/tpmback/common.h ubuntu-gutsy-xen/drivers/xen/tpmback/common.h
---- ubuntu-gutsy/drivers/xen/tpmback/common.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/tpmback/common.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,85 @@
-+/******************************************************************************
-+ * drivers/xen/tpmback/common.h
-+ */
-+
-+#ifndef __TPM__BACKEND__COMMON_H__
-+#define __TPM__BACKEND__COMMON_H__
-+
-+#include <linux/version.h>
-+#include <linux/module.h>
-+#include <linux/interrupt.h>
-+#include <linux/slab.h>
-+#include <xen/evtchn.h>
-+#include <xen/driver_util.h>
-+#include <xen/interface/grant_table.h>
-+#include <xen/interface/io/tpmif.h>
-+#include <asm/io.h>
-+#include <asm/pgalloc.h>
-+
-+#define DPRINTK(_f, _a...) \
-+ pr_debug("(file=%s, line=%d) " _f, \
-+ __FILE__ , __LINE__ , ## _a )
-+
-+struct backend_info;
-+
-+typedef struct tpmif_st {
-+ struct list_head tpmif_list;
-+ /* Unique identifier for this interface. */
-+ domid_t domid;
-+ unsigned int handle;
-+
-+ /* Physical parameters of the comms window. */
-+ unsigned int irq;
-+
-+ /* The shared rings and indexes. */
-+ tpmif_tx_interface_t *tx;
-+ struct vm_struct *tx_area;
-+
-+ /* Miscellaneous private stuff. */
-+ enum { DISCONNECTED, DISCONNECTING, CONNECTED } status;
-+ int active;
-+
-+ struct tpmif_st *hash_next;
-+ struct list_head list; /* scheduling list */
-+ atomic_t refcnt;
-+
-+ struct backend_info *bi;
-+
-+ grant_handle_t shmem_handle;
-+ grant_ref_t shmem_ref;
-+ struct page **mmap_pages;
-+
-+ char devname[20];
-+} tpmif_t;
-+
-+void tpmif_disconnect_complete(tpmif_t * tpmif);
-+tpmif_t *tpmif_find(domid_t domid, struct backend_info *bi);
-+void tpmif_interface_init(void);
-+void tpmif_interface_exit(void);
-+void tpmif_schedule_work(tpmif_t * tpmif);
-+void tpmif_deschedule_work(tpmif_t * tpmif);
-+void tpmif_xenbus_init(void);
-+void tpmif_xenbus_exit(void);
-+int tpmif_map(tpmif_t *tpmif, unsigned long shared_page, unsigned int evtchn);
-+irqreturn_t tpmif_be_int(int irq, void *dev_id);
-+
-+long int tpmback_get_instance(struct backend_info *bi);
-+
-+int vtpm_release_packets(tpmif_t * tpmif, int send_msgs);
-+
-+
-+#define tpmif_get(_b) (atomic_inc(&(_b)->refcnt))
-+#define tpmif_put(_b) \
-+ do { \
-+ if (atomic_dec_and_test(&(_b)->refcnt)) \
-+ tpmif_disconnect_complete(_b); \
-+ } while (0)
-+
-+extern int num_frontends;
-+
-+static inline unsigned long idx_to_kaddr(tpmif_t *t, unsigned int idx)
-+{
-+ return (unsigned long)pfn_to_kaddr(page_to_pfn(t->mmap_pages[idx]));
-+}
-+
-+#endif /* __TPMIF__BACKEND__COMMON_H__ */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/tpmback/interface.c ubuntu-gutsy-xen/drivers/xen/tpmback/interface.c
---- ubuntu-gutsy/drivers/xen/tpmback/interface.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/tpmback/interface.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,167 @@
-+ /*****************************************************************************
-+ * drivers/xen/tpmback/interface.c
-+ *
-+ * Vritual TPM interface management.
-+ *
-+ * Copyright (c) 2005, IBM Corporation
-+ *
-+ * Author: Stefan Berger, stefanb@us.ibm.com
-+ *
-+ * This code has been derived from drivers/xen/netback/interface.c
-+ * Copyright (c) 2004, Keir Fraser
-+ */
-+
-+#include "common.h"
-+#include <xen/balloon.h>
-+#include <xen/gnttab.h>
-+
-+static struct kmem_cache *tpmif_cachep;
-+int num_frontends = 0;
-+
-+LIST_HEAD(tpmif_list);
-+
-+static tpmif_t *alloc_tpmif(domid_t domid, struct backend_info *bi)
-+{
-+ tpmif_t *tpmif;
-+
-+ tpmif = kmem_cache_alloc(tpmif_cachep, GFP_KERNEL);
-+ if (tpmif == NULL)
-+ goto out_of_memory;
-+
-+ memset(tpmif, 0, sizeof (*tpmif));
-+ tpmif->domid = domid;
-+ tpmif->status = DISCONNECTED;
-+ tpmif->bi = bi;
-+ snprintf(tpmif->devname, sizeof(tpmif->devname), "tpmif%d", domid);
-+ atomic_set(&tpmif->refcnt, 1);
-+
-+ tpmif->mmap_pages = alloc_empty_pages_and_pagevec(TPMIF_TX_RING_SIZE);
-+ if (tpmif->mmap_pages == NULL)
-+ goto out_of_memory;
-+
-+ list_add(&tpmif->tpmif_list, &tpmif_list);
-+ num_frontends++;
-+
-+ return tpmif;
-+
-+ out_of_memory:
-+ if (tpmif != NULL)
-+ kmem_cache_free(tpmif_cachep, tpmif);
-+ printk("%s: out of memory\n", __FUNCTION__);
-+ return ERR_PTR(-ENOMEM);
-+}
-+
-+static void free_tpmif(tpmif_t * tpmif)
-+{
-+ num_frontends--;
-+ list_del(&tpmif->tpmif_list);
-+ free_empty_pages_and_pagevec(tpmif->mmap_pages, TPMIF_TX_RING_SIZE);
-+ kmem_cache_free(tpmif_cachep, tpmif);
-+}
-+
-+tpmif_t *tpmif_find(domid_t domid, struct backend_info *bi)
-+{
-+ tpmif_t *tpmif;
-+
-+ list_for_each_entry(tpmif, &tpmif_list, tpmif_list) {
-+ if (tpmif->bi == bi) {
-+ if (tpmif->domid == domid) {
-+ tpmif_get(tpmif);
-+ return tpmif;
-+ } else {
-+ return ERR_PTR(-EEXIST);
-+ }
-+ }
-+ }
-+
-+ return alloc_tpmif(domid, bi);
-+}
-+
-+static int map_frontend_page(tpmif_t *tpmif, unsigned long shared_page)
-+{
-+ struct gnttab_map_grant_ref op;
-+
-+ gnttab_set_map_op(&op, (unsigned long)tpmif->tx_area->addr,
-+ GNTMAP_host_map, shared_page, tpmif->domid);
-+
-+ if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
-+ BUG();
-+
-+ if (op.status) {
-+ DPRINTK(" Grant table operation failure !\n");
-+ return op.status;
-+ }
-+
-+ tpmif->shmem_ref = shared_page;
-+ tpmif->shmem_handle = op.handle;
-+
-+ return 0;
-+}
-+
-+static void unmap_frontend_page(tpmif_t *tpmif)
-+{
-+ struct gnttab_unmap_grant_ref op;
-+
-+ gnttab_set_unmap_op(&op, (unsigned long)tpmif->tx_area->addr,
-+ GNTMAP_host_map, tpmif->shmem_handle);
-+
-+ if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
-+ BUG();
-+}
-+
-+int tpmif_map(tpmif_t *tpmif, unsigned long shared_page, unsigned int evtchn)
-+{
-+ int err;
-+
-+ if (tpmif->irq)
-+ return 0;
-+
-+ if ((tpmif->tx_area = alloc_vm_area(PAGE_SIZE)) == NULL)
-+ return -ENOMEM;
-+
-+ err = map_frontend_page(tpmif, shared_page);
-+ if (err) {
-+ free_vm_area(tpmif->tx_area);
-+ return err;
-+ }
-+
-+ tpmif->tx = (tpmif_tx_interface_t *)tpmif->tx_area->addr;
-+
-+ err = bind_interdomain_evtchn_to_irqhandler(
-+ tpmif->domid, evtchn, tpmif_be_int, 0, tpmif->devname, tpmif);
-+ if (err < 0) {
-+ unmap_frontend_page(tpmif);
-+ free_vm_area(tpmif->tx_area);
-+ return err;
-+ }
-+ tpmif->irq = err;
-+
-+ tpmif->shmem_ref = shared_page;
-+ tpmif->active = 1;
-+
-+ return 0;
-+}
-+
-+void tpmif_disconnect_complete(tpmif_t *tpmif)
-+{
-+ if (tpmif->irq)
-+ unbind_from_irqhandler(tpmif->irq, tpmif);
-+
-+ if (tpmif->tx) {
-+ unmap_frontend_page(tpmif);
-+ free_vm_area(tpmif->tx_area);
-+ }
-+
-+ free_tpmif(tpmif);
-+}
-+
-+void __init tpmif_interface_init(void)
-+{
-+ tpmif_cachep = kmem_cache_create("tpmif_cache", sizeof (tpmif_t),
-+ 0, 0, NULL, NULL);
-+}
-+
-+void __exit tpmif_interface_exit(void)
-+{
-+ kmem_cache_destroy(tpmif_cachep);
-+}
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/tpmback/Makefile ubuntu-gutsy-xen/drivers/xen/tpmback/Makefile
---- ubuntu-gutsy/drivers/xen/tpmback/Makefile 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/tpmback/Makefile 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,4 @@
-+
-+obj-$(CONFIG_XEN_TPMDEV_BACKEND) += tpmbk.o
-+
-+tpmbk-y += tpmback.o interface.o xenbus.o
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/tpmback/tpmback.c ubuntu-gutsy-xen/drivers/xen/tpmback/tpmback.c
---- ubuntu-gutsy/drivers/xen/tpmback/tpmback.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/tpmback/tpmback.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,944 @@
-+/******************************************************************************
-+ * drivers/xen/tpmback/tpmback.c
-+ *
-+ * Copyright (c) 2005, IBM Corporation
-+ *
-+ * Author: Stefan Berger, stefanb@us.ibm.com
-+ * Grant table support: Mahadevan Gomathisankaran
-+ *
-+ * This code has been derived from drivers/xen/netback/netback.c
-+ * Copyright (c) 2002-2004, K A Fraser
-+ *
-+ */
-+
-+#include "common.h"
-+#include <xen/evtchn.h>
-+
-+#include <linux/types.h>
-+#include <linux/list.h>
-+#include <linux/miscdevice.h>
-+#include <linux/poll.h>
-+#include <asm/uaccess.h>
-+#include <xen/xenbus.h>
-+#include <xen/interface/grant_table.h>
-+#include <xen/gnttab.h>
-+
-+/* local data structures */
-+struct data_exchange {
-+ struct list_head pending_pak;
-+ struct list_head current_pak;
-+ unsigned int copied_so_far;
-+ u8 has_opener:1;
-+ u8 aborted:1;
-+ rwlock_t pak_lock; // protects all of the previous fields
-+ wait_queue_head_t wait_queue;
-+};
-+
-+struct vtpm_resp_hdr {
-+ uint32_t instance_no;
-+ uint16_t tag_no;
-+ uint32_t len_no;
-+ uint32_t ordinal_no;
-+} __attribute__ ((packed));
-+
-+struct packet {
-+ struct list_head next;
-+ unsigned int data_len;
-+ u8 *data_buffer;
-+ tpmif_t *tpmif;
-+ u32 tpm_instance;
-+ u8 req_tag;
-+ u32 last_read;
-+ u8 flags;
-+ struct timer_list processing_timer;
-+};
-+
-+enum {
-+ PACKET_FLAG_DISCARD_RESPONSE = 1,
-+};
-+
-+/* local variables */
-+static struct data_exchange dataex;
-+
-+/* local function prototypes */
-+static int _packet_write(struct packet *pak,
-+ const char *data, size_t size, int userbuffer);
-+static void processing_timeout(unsigned long ptr);
-+static int packet_read_shmem(struct packet *pak,
-+ tpmif_t * tpmif,
-+ u32 offset,
-+ char *buffer, int isuserbuffer, u32 left);
-+static int vtpm_queue_packet(struct packet *pak);
-+
-+/***************************************************************
-+ Buffer copying fo user and kernel space buffes.
-+***************************************************************/
-+static inline int copy_from_buffer(void *to,
-+ const void *from, unsigned long size,
-+ int isuserbuffer)
-+{
-+ if (isuserbuffer) {
-+ if (copy_from_user(to, (void __user *)from, size))
-+ return -EFAULT;
-+ } else {
-+ memcpy(to, from, size);
-+ }
-+ return 0;
-+}
-+
-+static inline int copy_to_buffer(void *to,
-+ const void *from, unsigned long size,
-+ int isuserbuffer)
-+{
-+ if (isuserbuffer) {
-+ if (copy_to_user((void __user *)to, from, size))
-+ return -EFAULT;
-+ } else {
-+ memcpy(to, from, size);
-+ }
-+ return 0;
-+}
-+
-+
-+static void dataex_init(struct data_exchange *dataex)
-+{
-+ INIT_LIST_HEAD(&dataex->pending_pak);
-+ INIT_LIST_HEAD(&dataex->current_pak);
-+ dataex->has_opener = 0;
-+ rwlock_init(&dataex->pak_lock);
-+ init_waitqueue_head(&dataex->wait_queue);
-+}
-+
-+/***************************************************************
-+ Packet-related functions
-+***************************************************************/
-+
-+static struct packet *packet_find_instance(struct list_head *head,
-+ u32 tpm_instance)
-+{
-+ struct packet *pak;
-+ struct list_head *p;
-+
-+ /*
-+ * traverse the list of packets and return the first
-+ * one with the given instance number
-+ */
-+ list_for_each(p, head) {
-+ pak = list_entry(p, struct packet, next);
-+
-+ if (pak->tpm_instance == tpm_instance) {
-+ return pak;
-+ }
-+ }
-+ return NULL;
-+}
-+
-+static struct packet *packet_find_packet(struct list_head *head, void *packet)
-+{
-+ struct packet *pak;
-+ struct list_head *p;
-+
-+ /*
-+ * traverse the list of packets and return the first
-+ * one with the given instance number
-+ */
-+ list_for_each(p, head) {
-+ pak = list_entry(p, struct packet, next);
-+
-+ if (pak == packet) {
-+ return pak;
-+ }
-+ }
-+ return NULL;
-+}
-+
-+static struct packet *packet_alloc(tpmif_t * tpmif,
-+ u32 size, u8 req_tag, u8 flags)
-+{
-+ struct packet *pak = NULL;
-+ pak = kzalloc(sizeof (struct packet), GFP_ATOMIC);
-+ if (NULL != pak) {
-+ if (tpmif) {
-+ pak->tpmif = tpmif;
-+ pak->tpm_instance = tpmback_get_instance(tpmif->bi);
-+ tpmif_get(tpmif);
-+ }
-+ pak->data_len = size;
-+ pak->req_tag = req_tag;
-+ pak->last_read = 0;
-+ pak->flags = flags;
-+
-+ /*
-+ * cannot do tpmif_get(tpmif); bad things happen
-+ * on the last tpmif_put()
-+ */
-+ init_timer(&pak->processing_timer);
-+ pak->processing_timer.function = processing_timeout;
-+ pak->processing_timer.data = (unsigned long)pak;
-+ }
-+ return pak;
-+}
-+
-+static void inline packet_reset(struct packet *pak)
-+{
-+ pak->last_read = 0;
-+}
-+
-+static void packet_free(struct packet *pak)
-+{
-+ if (timer_pending(&pak->processing_timer)) {
-+ BUG();
-+ }
-+
-+ if (pak->tpmif)
-+ tpmif_put(pak->tpmif);
-+ kfree(pak->data_buffer);
-+ /*
-+ * cannot do tpmif_put(pak->tpmif); bad things happen
-+ * on the last tpmif_put()
-+ */
-+ kfree(pak);
-+}
-+
-+
-+/*
-+ * Write data to the shared memory and send it to the FE.
-+ */
-+static int packet_write(struct packet *pak,
-+ const char *data, size_t size, int isuserbuffer)
-+{
-+ int rc = 0;
-+
-+ if (0 != (pak->flags & PACKET_FLAG_DISCARD_RESPONSE)) {
-+ /* Don't send a respone to this packet. Just acknowledge it. */
-+ rc = size;
-+ } else {
-+ rc = _packet_write(pak, data, size, isuserbuffer);
-+ }
-+
-+ return rc;
-+}
-+
-+int _packet_write(struct packet *pak,
-+ const char *data, size_t size, int isuserbuffer)
-+{
-+ /*
-+ * Write into the shared memory pages directly
-+ * and send it to the front end.
-+ */
-+ tpmif_t *tpmif = pak->tpmif;
-+ grant_handle_t handle;
-+ int rc = 0;
-+ unsigned int i = 0;
-+ unsigned int offset = 0;
-+
-+ if (tpmif == NULL) {
-+ return -EFAULT;
-+ }
-+
-+ if (tpmif->status == DISCONNECTED) {
-+ return size;
-+ }
-+
-+ while (offset < size && i < TPMIF_TX_RING_SIZE) {
-+ unsigned int tocopy;
-+ struct gnttab_map_grant_ref map_op;
-+ struct gnttab_unmap_grant_ref unmap_op;
-+ tpmif_tx_request_t *tx;
-+
-+ tx = &tpmif->tx->ring[i].req;
-+
-+ if (0 == tx->addr) {
-+ DPRINTK("ERROR: Buffer for outgoing packet NULL?! i=%d\n", i);
-+ return 0;
-+ }
-+
-+ gnttab_set_map_op(&map_op, idx_to_kaddr(tpmif, i),
-+ GNTMAP_host_map, tx->ref, tpmif->domid);
-+
-+ if (unlikely(HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
-+ &map_op, 1))) {
-+ BUG();
-+ }
-+
-+ handle = map_op.handle;
-+
-+ if (map_op.status) {
-+ DPRINTK(" Grant table operation failure !\n");
-+ return 0;
-+ }
-+
-+ tocopy = min_t(size_t, size - offset, PAGE_SIZE);
-+
-+ if (copy_from_buffer((void *)(idx_to_kaddr(tpmif, i) |
-+ (tx->addr & ~PAGE_MASK)),
-+ &data[offset], tocopy, isuserbuffer)) {
-+ tpmif_put(tpmif);
-+ return -EFAULT;
-+ }
-+ tx->size = tocopy;
-+
-+ gnttab_set_unmap_op(&unmap_op, idx_to_kaddr(tpmif, i),
-+ GNTMAP_host_map, handle);
-+
-+ if (unlikely
-+ (HYPERVISOR_grant_table_op
-+ (GNTTABOP_unmap_grant_ref, &unmap_op, 1))) {
-+ BUG();
-+ }
-+
-+ offset += tocopy;
-+ i++;
-+ }
-+
-+ rc = offset;
-+ DPRINTK("Notifying frontend via irq %d\n", tpmif->irq);
-+ notify_remote_via_irq(tpmif->irq);
-+
-+ return rc;
-+}
-+
-+/*
-+ * Read data from the shared memory and copy it directly into the
-+ * provided buffer. Advance the read_last indicator which tells
-+ * how many bytes have already been read.
-+ */
-+static int packet_read(struct packet *pak, size_t numbytes,
-+ char *buffer, size_t buffersize, int isuserbuffer)
-+{
-+ tpmif_t *tpmif = pak->tpmif;
-+
-+ /*
-+ * Read 'numbytes' of data from the buffer. The first 4
-+ * bytes are the instance number in network byte order,
-+ * after that come the data from the shared memory buffer.
-+ */
-+ u32 to_copy;
-+ u32 offset = 0;
-+ u32 room_left = buffersize;
-+
-+ if (pak->last_read < 4) {
-+ /*
-+ * copy the instance number into the buffer
-+ */
-+ u32 instance_no = htonl(pak->tpm_instance);
-+ u32 last_read = pak->last_read;
-+
-+ to_copy = min_t(size_t, 4 - last_read, numbytes);
-+
-+ if (copy_to_buffer(&buffer[0],
-+ &(((u8 *) & instance_no)[last_read]),
-+ to_copy, isuserbuffer)) {
-+ return -EFAULT;
-+ }
-+
-+ pak->last_read += to_copy;
-+ offset += to_copy;
-+ room_left -= to_copy;
-+ }
-+
-+ /*
-+ * If the packet has a data buffer appended, read from it...
-+ */
-+
-+ if (room_left > 0) {
-+ if (pak->data_buffer) {
-+ u32 to_copy = min_t(u32, pak->data_len - offset, room_left);
-+ u32 last_read = pak->last_read - 4;
-+
-+ if (copy_to_buffer(&buffer[offset],
-+ &pak->data_buffer[last_read],
-+ to_copy, isuserbuffer)) {
-+ return -EFAULT;
-+ }
-+ pak->last_read += to_copy;
-+ offset += to_copy;
-+ } else {
-+ offset = packet_read_shmem(pak,
-+ tpmif,
-+ offset,
-+ buffer,
-+ isuserbuffer, room_left);
-+ }
-+ }
-+ return offset;
-+}
-+
-+static int packet_read_shmem(struct packet *pak,
-+ tpmif_t * tpmif,
-+ u32 offset, char *buffer, int isuserbuffer,
-+ u32 room_left)
-+{
-+ u32 last_read = pak->last_read - 4;
-+ u32 i = (last_read / PAGE_SIZE);
-+ u32 pg_offset = last_read & (PAGE_SIZE - 1);
-+ u32 to_copy;
-+ grant_handle_t handle;
-+
-+ tpmif_tx_request_t *tx;
-+
-+ tx = &tpmif->tx->ring[0].req;
-+ /*
-+ * Start copying data at the page with index 'index'
-+ * and within that page at offset 'offset'.
-+ * Copy a maximum of 'room_left' bytes.
-+ */
-+ to_copy = min_t(u32, PAGE_SIZE - pg_offset, room_left);
-+ while (to_copy > 0) {
-+ void *src;
-+ struct gnttab_map_grant_ref map_op;
-+ struct gnttab_unmap_grant_ref unmap_op;
-+
-+ tx = &tpmif->tx->ring[i].req;
-+
-+ gnttab_set_map_op(&map_op, idx_to_kaddr(tpmif, i),
-+ GNTMAP_host_map, tx->ref, tpmif->domid);
-+
-+ if (unlikely(HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
-+ &map_op, 1))) {
-+ BUG();
-+ }
-+
-+ if (map_op.status) {
-+ DPRINTK(" Grant table operation failure !\n");
-+ return -EFAULT;
-+ }
-+
-+ handle = map_op.handle;
-+
-+ if (to_copy > tx->size) {
-+ /*
-+ * User requests more than what's available
-+ */
-+ to_copy = min_t(u32, tx->size, to_copy);
-+ }
-+
-+ DPRINTK("Copying from mapped memory at %08lx\n",
-+ (unsigned long)(idx_to_kaddr(tpmif, i) |
-+ (tx->addr & ~PAGE_MASK)));
-+
-+ src = (void *)(idx_to_kaddr(tpmif, i) |
-+ ((tx->addr & ~PAGE_MASK) + pg_offset));
-+ if (copy_to_buffer(&buffer[offset],
-+ src, to_copy, isuserbuffer)) {
-+ return -EFAULT;
-+ }
-+
-+ DPRINTK("Data from TPM-FE of domain %d are %d %d %d %d\n",
-+ tpmif->domid, buffer[offset], buffer[offset + 1],
-+ buffer[offset + 2], buffer[offset + 3]);
-+
-+ gnttab_set_unmap_op(&unmap_op, idx_to_kaddr(tpmif, i),
-+ GNTMAP_host_map, handle);
-+
-+ if (unlikely
-+ (HYPERVISOR_grant_table_op
-+ (GNTTABOP_unmap_grant_ref, &unmap_op, 1))) {
-+ BUG();
-+ }
-+
-+ offset += to_copy;
-+ pg_offset = 0;
-+ last_read += to_copy;
-+ room_left -= to_copy;
-+
-+ to_copy = min_t(u32, PAGE_SIZE, room_left);
-+ i++;
-+ } /* while (to_copy > 0) */
-+ /*
-+ * Adjust the last_read pointer
-+ */
-+ pak->last_read = last_read + 4;
-+ return offset;
-+}
-+
-+/* ============================================================
-+ * The file layer for reading data from this device
-+ * ============================================================
-+ */
-+static int vtpm_op_open(struct inode *inode, struct file *f)
-+{
-+ int rc = 0;
-+ unsigned long flags;
-+
-+ write_lock_irqsave(&dataex.pak_lock, flags);
-+ if (dataex.has_opener == 0) {
-+ dataex.has_opener = 1;
-+ } else {
-+ rc = -EPERM;
-+ }
-+ write_unlock_irqrestore(&dataex.pak_lock, flags);
-+ return rc;
-+}
-+
-+static ssize_t vtpm_op_read(struct file *file,
-+ char __user * data, size_t size, loff_t * offset)
-+{
-+ int ret_size = -ENODATA;
-+ struct packet *pak = NULL;
-+ unsigned long flags;
-+
-+ write_lock_irqsave(&dataex.pak_lock, flags);
-+ if (dataex.aborted) {
-+ dataex.aborted = 0;
-+ dataex.copied_so_far = 0;
-+ write_unlock_irqrestore(&dataex.pak_lock, flags);
-+ return -EIO;
-+ }
-+
-+ if (list_empty(&dataex.pending_pak)) {
-+ write_unlock_irqrestore(&dataex.pak_lock, flags);
-+ wait_event_interruptible(dataex.wait_queue,
-+ !list_empty(&dataex.pending_pak));
-+ write_lock_irqsave(&dataex.pak_lock, flags);
-+ dataex.copied_so_far = 0;
-+ }
-+
-+ if (!list_empty(&dataex.pending_pak)) {
-+ unsigned int left;
-+
-+ pak = list_entry(dataex.pending_pak.next, struct packet, next);
-+ left = pak->data_len - dataex.copied_so_far;
-+ list_del(&pak->next);
-+ write_unlock_irqrestore(&dataex.pak_lock, flags);
-+
-+ DPRINTK("size given by app: %zu, available: %u\n", size, left);
-+
-+ ret_size = min_t(size_t, size, left);
-+
-+ ret_size = packet_read(pak, ret_size, data, size, 1);
-+
-+ write_lock_irqsave(&dataex.pak_lock, flags);
-+
-+ if (ret_size < 0) {
-+ del_singleshot_timer_sync(&pak->processing_timer);
-+ packet_free(pak);
-+ dataex.copied_so_far = 0;
-+ } else {
-+ DPRINTK("Copied %d bytes to user buffer\n", ret_size);
-+
-+ dataex.copied_so_far += ret_size;
-+ if (dataex.copied_so_far >= pak->data_len + 4) {
-+ DPRINTK("All data from this packet given to app.\n");
-+ /* All data given to app */
-+
-+ del_singleshot_timer_sync(&pak->
-+ processing_timer);
-+ list_add_tail(&pak->next, &dataex.current_pak);
-+ /*
-+ * The more fontends that are handled at the same time,
-+ * the more time we give the TPM to process the request.
-+ */
-+ mod_timer(&pak->processing_timer,
-+ jiffies + (num_frontends * 60 * HZ));
-+ dataex.copied_so_far = 0;
-+ } else {
-+ list_add(&pak->next, &dataex.pending_pak);
-+ }
-+ }
-+ }
-+ write_unlock_irqrestore(&dataex.pak_lock, flags);
-+
-+ DPRINTK("Returning result from read to app: %d\n", ret_size);
-+
-+ return ret_size;
-+}
-+
-+/*
-+ * Write operation - only works after a previous read operation!
-+ */
-+static ssize_t vtpm_op_write(struct file *file,
-+ const char __user * data, size_t size,
-+ loff_t * offset)
-+{
-+ struct packet *pak;
-+ int rc = 0;
-+ unsigned int off = 4;
-+ unsigned long flags;
-+ struct vtpm_resp_hdr vrh;
-+
-+ /*
-+ * Minimum required packet size is:
-+ * 4 bytes for instance number
-+ * 2 bytes for tag
-+ * 4 bytes for paramSize
-+ * 4 bytes for the ordinal
-+ * sum: 14 bytes
-+ */
-+ if (size < sizeof (vrh))
-+ return -EFAULT;
-+
-+ if (copy_from_user(&vrh, data, sizeof (vrh)))
-+ return -EFAULT;
-+
-+ /* malformed packet? */
-+ if ((off + ntohl(vrh.len_no)) != size)
-+ return -EFAULT;
-+
-+ write_lock_irqsave(&dataex.pak_lock, flags);
-+ pak = packet_find_instance(&dataex.current_pak,
-+ ntohl(vrh.instance_no));
-+
-+ if (pak == NULL) {
-+ write_unlock_irqrestore(&dataex.pak_lock, flags);
-+ DPRINTK(KERN_ALERT "No associated packet! (inst=%d)\n",
-+ ntohl(vrh.instance_no));
-+ return -EFAULT;
-+ }
-+
-+ del_singleshot_timer_sync(&pak->processing_timer);
-+ list_del(&pak->next);
-+
-+ write_unlock_irqrestore(&dataex.pak_lock, flags);
-+
-+ /*
-+ * The first 'offset' bytes must be the instance number - skip them.
-+ */
-+ size -= off;
-+
-+ rc = packet_write(pak, &data[off], size, 1);
-+
-+ if (rc > 0) {
-+ /* I neglected the first 4 bytes */
-+ rc += off;
-+ }
-+ packet_free(pak);
-+ return rc;
-+}
-+
-+static int vtpm_op_release(struct inode *inode, struct file *file)
-+{
-+ unsigned long flags;
-+
-+ vtpm_release_packets(NULL, 1);
-+ write_lock_irqsave(&dataex.pak_lock, flags);
-+ dataex.has_opener = 0;
-+ write_unlock_irqrestore(&dataex.pak_lock, flags);
-+ return 0;
-+}
-+
-+static unsigned int vtpm_op_poll(struct file *file,
-+ struct poll_table_struct *pts)
-+{
-+ unsigned int flags = POLLOUT | POLLWRNORM;
-+
-+ poll_wait(file, &dataex.wait_queue, pts);
-+ if (!list_empty(&dataex.pending_pak)) {
-+ flags |= POLLIN | POLLRDNORM;
-+ }
-+ return flags;
-+}
-+
-+static const struct file_operations vtpm_ops = {
-+ .owner = THIS_MODULE,
-+ .llseek = no_llseek,
-+ .open = vtpm_op_open,
-+ .read = vtpm_op_read,
-+ .write = vtpm_op_write,
-+ .release = vtpm_op_release,
-+ .poll = vtpm_op_poll,
-+};
-+
-+static struct miscdevice vtpms_miscdevice = {
-+ .minor = 225,
-+ .name = "vtpm",
-+ .fops = &vtpm_ops,
-+};
-+
-+/***************************************************************
-+ Utility functions
-+***************************************************************/
-+
-+static int tpm_send_fail_message(struct packet *pak, u8 req_tag)
-+{
-+ int rc;
-+ static const unsigned char tpm_error_message_fail[] = {
-+ 0x00, 0x00,
-+ 0x00, 0x00, 0x00, 0x0a,
-+ 0x00, 0x00, 0x00, 0x09 /* TPM_FAIL */
-+ };
-+ unsigned char buffer[sizeof (tpm_error_message_fail)];
-+
-+ memcpy(buffer, tpm_error_message_fail,
-+ sizeof (tpm_error_message_fail));
-+ /*
-+ * Insert the right response tag depending on the given tag
-+ * All response tags are '+3' to the request tag.
-+ */
-+ buffer[1] = req_tag + 3;
-+
-+ /*
-+ * Write the data to shared memory and notify the front-end
-+ */
-+ rc = packet_write(pak, buffer, sizeof (buffer), 0);
-+
-+ return rc;
-+}
-+
-+static int _vtpm_release_packets(struct list_head *head,
-+ tpmif_t * tpmif, int send_msgs)
-+{
-+ int aborted = 0;
-+ int c = 0;
-+ struct packet *pak;
-+ struct list_head *pos, *tmp;
-+
-+ list_for_each_safe(pos, tmp, head) {
-+ pak = list_entry(pos, struct packet, next);
-+ c += 1;
-+
-+ if (tpmif == NULL || pak->tpmif == tpmif) {
-+ int can_send = 0;
-+
-+ del_singleshot_timer_sync(&pak->processing_timer);
-+ list_del(&pak->next);
-+
-+ if (pak->tpmif && pak->tpmif->status == CONNECTED) {
-+ can_send = 1;
-+ }
-+
-+ if (send_msgs && can_send) {
-+ tpm_send_fail_message(pak, pak->req_tag);
-+ }
-+ packet_free(pak);
-+ if (c == 1)
-+ aborted = 1;
-+ }
-+ }
-+ return aborted;
-+}
-+
-+int vtpm_release_packets(tpmif_t * tpmif, int send_msgs)
-+{
-+ unsigned long flags;
-+
-+ write_lock_irqsave(&dataex.pak_lock, flags);
-+
-+ dataex.aborted = _vtpm_release_packets(&dataex.pending_pak,
-+ tpmif,
-+ send_msgs);
-+ _vtpm_release_packets(&dataex.current_pak, tpmif, send_msgs);
-+
-+ write_unlock_irqrestore(&dataex.pak_lock, flags);
-+ return 0;
-+}
-+
-+static int vtpm_queue_packet(struct packet *pak)
-+{
-+ int rc = 0;
-+
-+ if (dataex.has_opener) {
-+ unsigned long flags;
-+
-+ write_lock_irqsave(&dataex.pak_lock, flags);
-+ list_add_tail(&pak->next, &dataex.pending_pak);
-+ /* give the TPM some time to pick up the request */
-+ mod_timer(&pak->processing_timer, jiffies + (30 * HZ));
-+ write_unlock_irqrestore(&dataex.pak_lock, flags);
-+
-+ wake_up_interruptible(&dataex.wait_queue);
-+ } else {
-+ rc = -EFAULT;
-+ }
-+ return rc;
-+}
-+
-+static int vtpm_receive(tpmif_t * tpmif, u32 size)
-+{
-+ int rc = 0;
-+ unsigned char buffer[10];
-+ __be32 *native_size;
-+ struct packet *pak = packet_alloc(tpmif, size, 0, 0);
-+
-+ if (!pak)
-+ return -ENOMEM;
-+ /*
-+ * Read 10 bytes from the received buffer to test its
-+ * content for validity.
-+ */
-+ if (sizeof (buffer) != packet_read(pak,
-+ sizeof (buffer), buffer,
-+ sizeof (buffer), 0)) {
-+ goto failexit;
-+ }
-+ /*
-+ * Reset the packet read pointer so we can read all its
-+ * contents again.
-+ */
-+ packet_reset(pak);
-+
-+ native_size = (__force __be32 *) (&buffer[4 + 2]);
-+ /*
-+ * Verify that the size of the packet is correct
-+ * as indicated and that there's actually someone reading packets.
-+ * The minimum size of the packet is '10' for tag, size indicator
-+ * and ordinal.
-+ */
-+ if (size < 10 ||
-+ be32_to_cpu(*native_size) != size ||
-+ 0 == dataex.has_opener || tpmif->status != CONNECTED) {
-+ rc = -EINVAL;
-+ goto failexit;
-+ } else {
-+ rc = vtpm_queue_packet(pak);
-+ if (rc < 0)
-+ goto failexit;
-+ }
-+ return 0;
-+
-+ failexit:
-+ if (pak) {
-+ tpm_send_fail_message(pak, buffer[4 + 1]);
-+ packet_free(pak);
-+ }
-+ return rc;
-+}
-+
-+/*
-+ * Timeout function that gets invoked when a packet has not been processed
-+ * during the timeout period.
-+ * The packet must be on a list when this function is invoked. This
-+ * also means that once its taken off a list, the timer must be
-+ * destroyed as well.
-+ */
-+static void processing_timeout(unsigned long ptr)
-+{
-+ struct packet *pak = (struct packet *)ptr;
-+ unsigned long flags;
-+
-+ write_lock_irqsave(&dataex.pak_lock, flags);
-+ /*
-+ * The packet needs to be searched whether it
-+ * is still on the list.
-+ */
-+ if (pak == packet_find_packet(&dataex.pending_pak, pak) ||
-+ pak == packet_find_packet(&dataex.current_pak, pak)) {
-+ if ((pak->flags & PACKET_FLAG_DISCARD_RESPONSE) == 0) {
-+ tpm_send_fail_message(pak, pak->req_tag);
-+ }
-+ /* discard future responses */
-+ pak->flags |= PACKET_FLAG_DISCARD_RESPONSE;
-+ }
-+
-+ write_unlock_irqrestore(&dataex.pak_lock, flags);
-+}
-+
-+static void tpm_tx_action(unsigned long unused);
-+static DECLARE_TASKLET(tpm_tx_tasklet, tpm_tx_action, 0);
-+
-+static struct list_head tpm_schedule_list;
-+static spinlock_t tpm_schedule_list_lock;
-+
-+static inline void maybe_schedule_tx_action(void)
-+{
-+ smp_mb();
-+ tasklet_schedule(&tpm_tx_tasklet);
-+}
-+
-+static inline int __on_tpm_schedule_list(tpmif_t * tpmif)
-+{
-+ return tpmif->list.next != NULL;
-+}
-+
-+static void remove_from_tpm_schedule_list(tpmif_t * tpmif)
-+{
-+ spin_lock_irq(&tpm_schedule_list_lock);
-+ if (likely(__on_tpm_schedule_list(tpmif))) {
-+ list_del(&tpmif->list);
-+ tpmif->list.next = NULL;
-+ tpmif_put(tpmif);
-+ }
-+ spin_unlock_irq(&tpm_schedule_list_lock);
-+}
-+
-+static void add_to_tpm_schedule_list_tail(tpmif_t * tpmif)
-+{
-+ if (__on_tpm_schedule_list(tpmif))
-+ return;
-+
-+ spin_lock_irq(&tpm_schedule_list_lock);
-+ if (!__on_tpm_schedule_list(tpmif) && tpmif->active) {
-+ list_add_tail(&tpmif->list, &tpm_schedule_list);
-+ tpmif_get(tpmif);
-+ }
-+ spin_unlock_irq(&tpm_schedule_list_lock);
-+}
-+
-+void tpmif_schedule_work(tpmif_t * tpmif)
-+{
-+ add_to_tpm_schedule_list_tail(tpmif);
-+ maybe_schedule_tx_action();
-+}
-+
-+void tpmif_deschedule_work(tpmif_t * tpmif)
-+{
-+ remove_from_tpm_schedule_list(tpmif);
-+}
-+
-+static void tpm_tx_action(unsigned long unused)
-+{
-+ struct list_head *ent;
-+ tpmif_t *tpmif;
-+ tpmif_tx_request_t *tx;
-+
-+ DPRINTK("%s: Getting data from front-end(s)!\n", __FUNCTION__);
-+
-+ while (!list_empty(&tpm_schedule_list)) {
-+ /* Get a tpmif from the list with work to do. */
-+ ent = tpm_schedule_list.next;
-+ tpmif = list_entry(ent, tpmif_t, list);
-+ tpmif_get(tpmif);
-+ remove_from_tpm_schedule_list(tpmif);
-+
-+ tx = &tpmif->tx->ring[0].req;
-+
-+ /* pass it up */
-+ vtpm_receive(tpmif, tx->size);
-+
-+ tpmif_put(tpmif);
-+ }
-+}
-+
-+irqreturn_t tpmif_be_int(int irq, void *dev_id)
-+{
-+ tpmif_t *tpmif = (tpmif_t *) dev_id;
-+
-+ add_to_tpm_schedule_list_tail(tpmif);
-+ maybe_schedule_tx_action();
-+ return IRQ_HANDLED;
-+}
-+
-+static int __init tpmback_init(void)
-+{
-+ int rc;
-+
-+ if ((rc = misc_register(&vtpms_miscdevice)) != 0) {
-+ printk(KERN_ALERT
-+ "Could not register misc device for TPM BE.\n");
-+ return rc;
-+ }
-+
-+ dataex_init(&dataex);
-+
-+ spin_lock_init(&tpm_schedule_list_lock);
-+ INIT_LIST_HEAD(&tpm_schedule_list);
-+
-+ tpmif_interface_init();
-+ tpmif_xenbus_init();
-+
-+ printk(KERN_ALERT "Successfully initialized TPM backend driver.\n");
-+
-+ return 0;
-+}
-+
-+module_init(tpmback_init);
-+
-+void __exit tpmback_exit(void)
-+{
-+ vtpm_release_packets(NULL, 0);
-+ tpmif_xenbus_exit();
-+ tpmif_interface_exit();
-+ misc_deregister(&vtpms_miscdevice);
-+}
-+
-+MODULE_LICENSE("Dual BSD/GPL");
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/tpmback/xenbus.c ubuntu-gutsy-xen/drivers/xen/tpmback/xenbus.c
---- ubuntu-gutsy/drivers/xen/tpmback/xenbus.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/tpmback/xenbus.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,289 @@
-+/* Xenbus code for tpmif backend
-+ Copyright (C) 2005 IBM Corporation
-+ Copyright (C) 2005 Rusty Russell <rusty@rustcorp.com.au>
-+
-+ This program is free software; you can redistribute it and/or modify
-+ it under the terms of the GNU General Public License as published by
-+ the Free Software Foundation; either version 2 of the License, or
-+ (at your option) any later version.
-+
-+ This program is distributed in the hope that it will be useful,
-+ but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ GNU General Public License for more details.
-+
-+ You should have received a copy of the GNU General Public License
-+ along with this program; if not, write to the Free Software
-+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-+*/
-+#include <stdarg.h>
-+#include <linux/module.h>
-+#include <xen/xenbus.h>
-+#include "common.h"
-+
-+struct backend_info
-+{
-+ struct xenbus_device *dev;
-+
-+ /* our communications channel */
-+ tpmif_t *tpmif;
-+
-+ long int frontend_id;
-+ long int instance; // instance of TPM
-+ u8 is_instance_set;// whether instance number has been set
-+
-+ /* watch front end for changes */
-+ struct xenbus_watch backend_watch;
-+};
-+
-+static void maybe_connect(struct backend_info *be);
-+static void connect(struct backend_info *be);
-+static int connect_ring(struct backend_info *be);
-+static void backend_changed(struct xenbus_watch *watch,
-+ const char **vec, unsigned int len);
-+static void frontend_changed(struct xenbus_device *dev,
-+ enum xenbus_state frontend_state);
-+
-+long int tpmback_get_instance(struct backend_info *bi)
-+{
-+ long int res = -1;
-+ if (bi && bi->is_instance_set)
-+ res = bi->instance;
-+ return res;
-+}
-+
-+static int tpmback_remove(struct xenbus_device *dev)
-+{
-+ struct backend_info *be = dev->dev.driver_data;
-+
-+ if (!be) return 0;
-+
-+ if (be->backend_watch.node) {
-+ unregister_xenbus_watch(&be->backend_watch);
-+ kfree(be->backend_watch.node);
-+ be->backend_watch.node = NULL;
-+ }
-+ if (be->tpmif) {
-+ be->tpmif->bi = NULL;
-+ vtpm_release_packets(be->tpmif, 0);
-+ tpmif_put(be->tpmif);
-+ be->tpmif = NULL;
-+ }
-+ kfree(be);
-+ dev->dev.driver_data = NULL;
-+ return 0;
-+}
-+
-+static int tpmback_probe(struct xenbus_device *dev,
-+ const struct xenbus_device_id *id)
-+{
-+ int err;
-+ struct backend_info *be = kzalloc(sizeof(struct backend_info),
-+ GFP_KERNEL);
-+
-+ if (!be) {
-+ xenbus_dev_fatal(dev, -ENOMEM,
-+ "allocating backend structure");
-+ return -ENOMEM;
-+ }
-+
-+ be->is_instance_set = 0;
-+ be->dev = dev;
-+ dev->dev.driver_data = be;
-+
-+ err = xenbus_watch_path2(dev, dev->nodename,
-+ "instance", &be->backend_watch,
-+ backend_changed);
-+ if (err) {
-+ goto fail;
-+ }
-+
-+ err = xenbus_switch_state(dev, XenbusStateInitWait);
-+ if (err) {
-+ goto fail;
-+ }
-+ return 0;
-+fail:
-+ tpmback_remove(dev);
-+ return err;
-+}
-+
-+
-+static void backend_changed(struct xenbus_watch *watch,
-+ const char **vec, unsigned int len)
-+{
-+ int err;
-+ long instance;
-+ struct backend_info *be
-+ = container_of(watch, struct backend_info, backend_watch);
-+ struct xenbus_device *dev = be->dev;
-+
-+ err = xenbus_scanf(XBT_NIL, dev->nodename,
-+ "instance","%li", &instance);
-+ if (XENBUS_EXIST_ERR(err)) {
-+ return;
-+ }
-+
-+ if (err != 1) {
-+ xenbus_dev_fatal(dev, err, "reading instance");
-+ return;
-+ }
-+
-+ if (be->is_instance_set == 0) {
-+ be->instance = instance;
-+ be->is_instance_set = 1;
-+ }
-+}
-+
-+
-+static void frontend_changed(struct xenbus_device *dev,
-+ enum xenbus_state frontend_state)
-+{
-+ struct backend_info *be = dev->dev.driver_data;
-+ int err;
-+
-+ switch (frontend_state) {
-+ case XenbusStateInitialising:
-+ case XenbusStateInitialised:
-+ break;
-+
-+ case XenbusStateConnected:
-+ err = connect_ring(be);
-+ if (err) {
-+ return;
-+ }
-+ maybe_connect(be);
-+ break;
-+
-+ case XenbusStateClosing:
-+ be->instance = -1;
-+ xenbus_switch_state(dev, XenbusStateClosing);
-+ break;
-+
-+ case XenbusStateUnknown: /* keep it here */
-+ case XenbusStateClosed:
-+ xenbus_switch_state(dev, XenbusStateClosed);
-+ device_unregister(&be->dev->dev);
-+ tpmback_remove(dev);
-+ break;
-+
-+ default:
-+ xenbus_dev_fatal(dev, -EINVAL,
-+ "saw state %d at frontend",
-+ frontend_state);
-+ break;
-+ }
-+}
-+
-+
-+
-+static void maybe_connect(struct backend_info *be)
-+{
-+ if (be->tpmif == NULL || be->tpmif->status == CONNECTED)
-+ return;
-+
-+ connect(be);
-+}
-+
-+
-+static void connect(struct backend_info *be)
-+{
-+ struct xenbus_transaction xbt;
-+ int err;
-+ struct xenbus_device *dev = be->dev;
-+ unsigned long ready = 1;
-+
-+again:
-+ err = xenbus_transaction_start(&xbt);
-+ if (err) {
-+ xenbus_dev_fatal(be->dev, err, "starting transaction");
-+ return;
-+ }
-+
-+ err = xenbus_printf(xbt, be->dev->nodename,
-+ "ready", "%lu", ready);
-+ if (err) {
-+ xenbus_dev_fatal(be->dev, err, "writing 'ready'");
-+ goto abort;
-+ }
-+
-+ err = xenbus_transaction_end(xbt, 0);
-+ if (err == -EAGAIN)
-+ goto again;
-+ if (err)
-+ xenbus_dev_fatal(be->dev, err, "end of transaction");
-+
-+ err = xenbus_switch_state(dev, XenbusStateConnected);
-+ if (!err)
-+ be->tpmif->status = CONNECTED;
-+ return;
-+abort:
-+ xenbus_transaction_end(xbt, 1);
-+}
-+
-+
-+static int connect_ring(struct backend_info *be)
-+{
-+ struct xenbus_device *dev = be->dev;
-+ unsigned long ring_ref;
-+ unsigned int evtchn;
-+ int err;
-+
-+ err = xenbus_gather(XBT_NIL, dev->otherend,
-+ "ring-ref", "%lu", &ring_ref,
-+ "event-channel", "%u", &evtchn, NULL);
-+ if (err) {
-+ xenbus_dev_error(dev, err,
-+ "reading %s/ring-ref and event-channel",
-+ dev->otherend);
-+ return err;
-+ }
-+
-+ if (!be->tpmif) {
-+ be->tpmif = tpmif_find(dev->otherend_id, be);
-+ if (IS_ERR(be->tpmif)) {
-+ err = PTR_ERR(be->tpmif);
-+ be->tpmif = NULL;
-+ xenbus_dev_fatal(dev,err,"creating vtpm interface");
-+ return err;
-+ }
-+ }
-+
-+ if (be->tpmif != NULL) {
-+ err = tpmif_map(be->tpmif, ring_ref, evtchn);
-+ if (err) {
-+ xenbus_dev_error(dev, err,
-+ "mapping shared-frame %lu port %u",
-+ ring_ref, evtchn);
-+ return err;
-+ }
-+ }
-+ return 0;
-+}
-+
-+
-+static struct xenbus_device_id tpmback_ids[] = {
-+ { "vtpm" },
-+ { "" }
-+};
-+
-+
-+static struct xenbus_driver tpmback = {
-+ .name = "vtpm",
-+ .owner = THIS_MODULE,
-+ .ids = tpmback_ids,
-+ .probe = tpmback_probe,
-+ .remove = tpmback_remove,
-+ .otherend_changed = frontend_changed,
-+};
-+
-+
-+void tpmif_xenbus_init(void)
-+{
-+ xenbus_register_backend(&tpmback);
-+}
-+
-+void tpmif_xenbus_exit(void)
-+{
-+ xenbus_unregister_driver(&tpmback);
-+}
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/util.c ubuntu-gutsy-xen/drivers/xen/util.c
---- ubuntu-gutsy/drivers/xen/util.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/util.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,70 @@
-+#include <linux/mm.h>
-+#include <linux/module.h>
-+#include <linux/slab.h>
-+#include <linux/vmalloc.h>
-+#include <asm/uaccess.h>
-+#include <xen/driver_util.h>
-+
-+struct class *get_xen_class(void)
-+{
-+ static struct class *xen_class;
-+
-+ if (xen_class)
-+ return xen_class;
-+
-+ xen_class = class_create(THIS_MODULE, "xen");
-+ if (IS_ERR(xen_class)) {
-+ printk("Failed to create xen sysfs class.\n");
-+ xen_class = NULL;
-+ }
-+
-+ return xen_class;
-+}
-+EXPORT_SYMBOL_GPL(get_xen_class);
-+
-+/* Todo: merge ia64 ('auto-translate physmap') versions of these functions. */
-+#ifndef __ia64__
-+
-+static int f(pte_t *pte, struct page *pmd_page, unsigned long addr, void *data)
-+{
-+ /* apply_to_page_range() does all the hard work. */
-+ return 0;
-+}
-+
-+struct vm_struct *alloc_vm_area(unsigned long size)
-+{
-+ struct vm_struct *area;
-+
-+ area = get_vm_area(size, VM_IOREMAP);
-+ if (area == NULL)
-+ return NULL;
-+
-+ /*
-+ * This ensures that page tables are constructed for this region
-+ * of kernel virtual address space and mapped into init_mm.
-+ */
-+ if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
-+ area->size, f, NULL)) {
-+ free_vm_area(area);
-+ return NULL;
-+ }
-+
-+ /* Map page directories into every address space. */
-+#ifdef CONFIG_X86
-+ vmalloc_sync_all();
-+#endif
-+
-+ return area;
-+}
-+EXPORT_SYMBOL_GPL(alloc_vm_area);
-+
-+void free_vm_area(struct vm_struct *area)
-+{
-+ struct vm_struct *ret;
-+ ret = remove_vm_area(area->addr);
-+ BUG_ON(ret != area);
-+ kfree(area);
-+}
-+EXPORT_SYMBOL_GPL(free_vm_area);
-+
-+#endif /* !__ia64__ */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/xenbus/Makefile ubuntu-gutsy-xen/drivers/xen/xenbus/Makefile
---- ubuntu-gutsy/drivers/xen/xenbus/Makefile 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/xenbus/Makefile 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,9 @@
-+obj-y += xenbus_client.o xenbus_comms.o xenbus_xs.o xenbus_probe.o
-+obj-$(CONFIG_XEN_BACKEND) += xenbus_be.o
-+
-+xenbus_be-objs =
-+xenbus_be-objs += xenbus_backend_client.o
-+
-+xenbus-$(CONFIG_XEN_BACKEND) += xenbus_probe_backend.o
-+obj-y += $(xenbus-y) $(xenbus-m)
-+obj-$(CONFIG_XEN_XENBUS_DEV) += xenbus_dev.o
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/xenbus/xenbus_backend_client.c ubuntu-gutsy-xen/drivers/xen/xenbus/xenbus_backend_client.c
---- ubuntu-gutsy/drivers/xen/xenbus/xenbus_backend_client.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/xenbus/xenbus_backend_client.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,147 @@
-+/******************************************************************************
-+ * Backend-client-facing interface for the Xenbus driver. In other words, the
-+ * interface between the Xenbus and the device-specific code in the backend
-+ * driver.
-+ *
-+ * Copyright (C) 2005-2006 XenSource Ltd
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#include <linux/err.h>
-+#include <xen/gnttab.h>
-+#include <xen/xenbus.h>
-+#include <xen/driver_util.h>
-+
-+/* Based on Rusty Russell's skeleton driver's map_page */
-+struct vm_struct *xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref)
-+{
-+ struct gnttab_map_grant_ref op;
-+ struct vm_struct *area;
-+
-+ area = alloc_vm_area(PAGE_SIZE);
-+ if (!area)
-+ return ERR_PTR(-ENOMEM);
-+
-+ gnttab_set_map_op(&op, (unsigned long)area->addr, GNTMAP_host_map,
-+ gnt_ref, dev->otherend_id);
-+
-+ if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
-+ BUG();
-+
-+ if (op.status != GNTST_okay) {
-+ free_vm_area(area);
-+ xenbus_dev_fatal(dev, op.status,
-+ "mapping in shared page %d from domain %d",
-+ gnt_ref, dev->otherend_id);
-+ BUG_ON(!IS_ERR(ERR_PTR(op.status)));
-+ return ERR_PTR(op.status);
-+ }
-+
-+ /* Stuff the handle in an unused field */
-+ area->phys_addr = (unsigned long)op.handle;
-+
-+ return area;
-+}
-+EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc);
-+
-+
-+int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref,
-+ grant_handle_t *handle, void *vaddr)
-+{
-+ struct gnttab_map_grant_ref op;
-+
-+ gnttab_set_map_op(&op, (unsigned long)vaddr, GNTMAP_host_map,
-+ gnt_ref, dev->otherend_id);
-+ if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
-+ BUG();
-+
-+ if (op.status != GNTST_okay) {
-+ xenbus_dev_fatal(dev, op.status,
-+ "mapping in shared page %d from domain %d",
-+ gnt_ref, dev->otherend_id);
-+ } else
-+ *handle = op.handle;
-+
-+ return op.status;
-+}
-+EXPORT_SYMBOL_GPL(xenbus_map_ring);
-+
-+
-+/* Based on Rusty Russell's skeleton driver's unmap_page */
-+int xenbus_unmap_ring_vfree(struct xenbus_device *dev, struct vm_struct *area)
-+{
-+ struct gnttab_unmap_grant_ref op;
-+
-+ gnttab_set_unmap_op(&op, (unsigned long)area->addr, GNTMAP_host_map,
-+ (grant_handle_t)area->phys_addr);
-+
-+ if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
-+ BUG();
-+
-+ if (op.status == GNTST_okay)
-+ free_vm_area(area);
-+ else
-+ xenbus_dev_error(dev, op.status,
-+ "unmapping page at handle %d error %d",
-+ (int16_t)area->phys_addr, op.status);
-+
-+ return op.status;
-+}
-+EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);
-+
-+
-+int xenbus_unmap_ring(struct xenbus_device *dev,
-+ grant_handle_t handle, void *vaddr)
-+{
-+ struct gnttab_unmap_grant_ref op;
-+
-+ gnttab_set_unmap_op(&op, (unsigned long)vaddr, GNTMAP_host_map,
-+ handle);
-+ if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
-+ BUG();
-+
-+ if (op.status != GNTST_okay)
-+ xenbus_dev_error(dev, op.status,
-+ "unmapping page at handle %d error %d",
-+ handle, op.status);
-+
-+ return op.status;
-+}
-+EXPORT_SYMBOL_GPL(xenbus_unmap_ring);
-+
-+int xenbus_dev_is_online(struct xenbus_device *dev)
-+{
-+ int rc, val;
-+
-+ rc = xenbus_scanf(XBT_NIL, dev->nodename, "online", "%d", &val);
-+ if (rc != 1)
-+ val = 0; /* no online node present */
-+
-+ return val;
-+}
-+EXPORT_SYMBOL_GPL(xenbus_dev_is_online);
-+
-+MODULE_LICENSE("Dual BSD/GPL");
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/xenbus/xenbus_client.c ubuntu-gutsy-xen/drivers/xen/xenbus/xenbus_client.c
---- ubuntu-gutsy/drivers/xen/xenbus/xenbus_client.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/xenbus/xenbus_client.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,283 @@
-+/******************************************************************************
-+ * Client-facing interface for the Xenbus driver. In other words, the
-+ * interface between the Xenbus and the device-specific code, be it the
-+ * frontend or the backend of that driver.
-+ *
-+ * Copyright (C) 2005 XenSource Ltd
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#include <xen/evtchn.h>
-+#include <xen/gnttab.h>
-+#include <xen/xenbus.h>
-+#include <xen/driver_util.h>
-+
-+#ifdef HAVE_XEN_PLATFORM_COMPAT_H
-+#include <xen/platform-compat.h>
-+#endif
-+
-+#define DPRINTK(fmt, args...) \
-+ pr_debug("xenbus_client (%s:%d) " fmt ".\n", __FUNCTION__, __LINE__, ##args)
-+
-+const char *xenbus_strstate(enum xenbus_state state)
-+{
-+ static const char *const name[] = {
-+ [ XenbusStateUnknown ] = "Unknown",
-+ [ XenbusStateInitialising ] = "Initialising",
-+ [ XenbusStateInitWait ] = "InitWait",
-+ [ XenbusStateInitialised ] = "Initialised",
-+ [ XenbusStateConnected ] = "Connected",
-+ [ XenbusStateClosing ] = "Closing",
-+ [ XenbusStateClosed ] = "Closed",
-+ };
-+ return (state < ARRAY_SIZE(name)) ? name[state] : "INVALID";
-+}
-+EXPORT_SYMBOL_GPL(xenbus_strstate);
-+
-+int xenbus_watch_path(struct xenbus_device *dev, const char *path,
-+ struct xenbus_watch *watch,
-+ void (*callback)(struct xenbus_watch *,
-+ const char **, unsigned int))
-+{
-+ int err;
-+
-+ watch->node = path;
-+ watch->callback = callback;
-+
-+ err = register_xenbus_watch(watch);
-+
-+ if (err) {
-+ watch->node = NULL;
-+ watch->callback = NULL;
-+ xenbus_dev_fatal(dev, err, "adding watch on %s", path);
-+ }
-+
-+ return err;
-+}
-+EXPORT_SYMBOL_GPL(xenbus_watch_path);
-+
-+
-+int xenbus_watch_path2(struct xenbus_device *dev, const char *path,
-+ const char *path2, struct xenbus_watch *watch,
-+ void (*callback)(struct xenbus_watch *,
-+ const char **, unsigned int))
-+{
-+ int err;
-+ char *state = kasprintf(GFP_KERNEL, "%s/%s", path, path2);
-+ if (!state) {
-+ xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch");
-+ return -ENOMEM;
-+ }
-+ err = xenbus_watch_path(dev, state, watch, callback);
-+
-+ if (err)
-+ kfree(state);
-+ return err;
-+}
-+EXPORT_SYMBOL_GPL(xenbus_watch_path2);
-+
-+
-+int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state state)
-+{
-+ /* We check whether the state is currently set to the given value, and
-+ if not, then the state is set. We don't want to unconditionally
-+ write the given state, because we don't want to fire watches
-+ unnecessarily. Furthermore, if the node has gone, we don't write
-+ to it, as the device will be tearing down, and we don't want to
-+ resurrect that directory.
-+
-+ Note that, because of this cached value of our state, this function
-+ will not work inside a Xenstore transaction (something it was
-+ trying to in the past) because dev->state would not get reset if
-+ the transaction was aborted.
-+
-+ */
-+
-+ int current_state;
-+ int err;
-+
-+ if (state == dev->state)
-+ return 0;
-+
-+ err = xenbus_scanf(XBT_NIL, dev->nodename, "state", "%d",
-+ &current_state);
-+ if (err != 1)
-+ return 0;
-+
-+ err = xenbus_printf(XBT_NIL, dev->nodename, "state", "%d", state);
-+ if (err) {
-+ if (state != XenbusStateClosing) /* Avoid looping */
-+ xenbus_dev_fatal(dev, err, "writing new state");
-+ return err;
-+ }
-+
-+ dev->state = state;
-+
-+ return 0;
-+}
-+EXPORT_SYMBOL_GPL(xenbus_switch_state);
-+
-+int xenbus_frontend_closed(struct xenbus_device *dev)
-+{
-+ xenbus_switch_state(dev, XenbusStateClosed);
-+ complete(&dev->down);
-+ return 0;
-+}
-+EXPORT_SYMBOL_GPL(xenbus_frontend_closed);
-+
-+/**
-+ * Return the path to the error node for the given device, or NULL on failure.
-+ * If the value returned is non-NULL, then it is the caller's to kfree.
-+ */
-+static char *error_path(struct xenbus_device *dev)
-+{
-+ return kasprintf(GFP_KERNEL, "error/%s", dev->nodename);
-+}
-+
-+
-+void _dev_error(struct xenbus_device *dev, int err, const char *fmt,
-+ va_list ap)
-+{
-+ int ret;
-+ unsigned int len;
-+ char *printf_buffer = NULL, *path_buffer = NULL;
-+
-+#define PRINTF_BUFFER_SIZE 4096
-+ printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL);
-+ if (printf_buffer == NULL)
-+ goto fail;
-+
-+ len = sprintf(printf_buffer, "%i ", -err);
-+ ret = vsnprintf(printf_buffer+len, PRINTF_BUFFER_SIZE-len, fmt, ap);
-+
-+ BUG_ON(len + ret > PRINTF_BUFFER_SIZE-1);
-+
-+ dev_err(&dev->dev, "%s\n", printf_buffer);
-+
-+ path_buffer = error_path(dev);
-+
-+ if (path_buffer == NULL) {
-+ printk("xenbus: failed to write error node for %s (%s)\n",
-+ dev->nodename, printf_buffer);
-+ goto fail;
-+ }
-+
-+ if (xenbus_write(XBT_NIL, path_buffer, "error", printf_buffer) != 0) {
-+ printk("xenbus: failed to write error node for %s (%s)\n",
-+ dev->nodename, printf_buffer);
-+ goto fail;
-+ }
-+
-+fail:
-+ if (printf_buffer)
-+ kfree(printf_buffer);
-+ if (path_buffer)
-+ kfree(path_buffer);
-+}
-+
-+
-+void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt,
-+ ...)
-+{
-+ va_list ap;
-+
-+ va_start(ap, fmt);
-+ _dev_error(dev, err, fmt, ap);
-+ va_end(ap);
-+}
-+EXPORT_SYMBOL_GPL(xenbus_dev_error);
-+
-+
-+void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt,
-+ ...)
-+{
-+ va_list ap;
-+
-+ va_start(ap, fmt);
-+ _dev_error(dev, err, fmt, ap);
-+ va_end(ap);
-+
-+ xenbus_switch_state(dev, XenbusStateClosing);
-+}
-+EXPORT_SYMBOL_GPL(xenbus_dev_fatal);
-+
-+
-+int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn)
-+{
-+ int err = gnttab_grant_foreign_access(dev->otherend_id, ring_mfn, 0);
-+ if (err < 0)
-+ xenbus_dev_fatal(dev, err, "granting access to ring page");
-+ return err;
-+}
-+EXPORT_SYMBOL_GPL(xenbus_grant_ring);
-+
-+
-+int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port)
-+{
-+ struct evtchn_alloc_unbound alloc_unbound;
-+ int err;
-+
-+ alloc_unbound.dom = DOMID_SELF;
-+ alloc_unbound.remote_dom = dev->otherend_id;
-+
-+ err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
-+ &alloc_unbound);
-+ if (err)
-+ xenbus_dev_fatal(dev, err, "allocating event channel");
-+ else
-+ *port = alloc_unbound.port;
-+
-+ return err;
-+}
-+EXPORT_SYMBOL_GPL(xenbus_alloc_evtchn);
-+
-+
-+int xenbus_free_evtchn(struct xenbus_device *dev, int port)
-+{
-+ struct evtchn_close close;
-+ int err;
-+
-+ close.port = port;
-+
-+ err = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
-+ if (err)
-+ xenbus_dev_error(dev, err, "freeing event channel %d", port);
-+
-+ return err;
-+}
-+EXPORT_SYMBOL_GPL(xenbus_free_evtchn);
-+
-+
-+enum xenbus_state xenbus_read_driver_state(const char *path)
-+{
-+ enum xenbus_state result;
-+ int err = xenbus_gather(XBT_NIL, path, "state", "%d", &result, NULL);
-+ if (err)
-+ result = XenbusStateUnknown;
-+
-+ return result;
-+}
-+EXPORT_SYMBOL_GPL(xenbus_read_driver_state);
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/xenbus/xenbus_comms.c ubuntu-gutsy-xen/drivers/xen/xenbus/xenbus_comms.c
---- ubuntu-gutsy/drivers/xen/xenbus/xenbus_comms.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/xenbus/xenbus_comms.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,232 @@
-+/******************************************************************************
-+ * xenbus_comms.c
-+ *
-+ * Low level code to talks to Xen Store: ringbuffer and event channel.
-+ *
-+ * Copyright (C) 2005 Rusty Russell, IBM Corporation
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#include <linux/wait.h>
-+#include <linux/interrupt.h>
-+#include <linux/sched.h>
-+#include <linux/err.h>
-+#include <linux/ptrace.h>
-+#include <xen/evtchn.h>
-+#include <xen/xenbus.h>
-+
-+#include <asm/hypervisor.h>
-+
-+#include "xenbus_comms.h"
-+
-+#ifdef HAVE_XEN_PLATFORM_COMPAT_H
-+#include <xen/platform-compat.h>
-+#endif
-+
-+static int xenbus_irq;
-+
-+extern void xenbus_probe(struct work_struct *);
-+extern int xenstored_ready;
-+static DECLARE_WORK(probe_work, xenbus_probe);
-+
-+static DECLARE_WAIT_QUEUE_HEAD(xb_waitq);
-+
-+static irqreturn_t wake_waiting(int irq, void *unused)
-+{
-+ if (unlikely(xenstored_ready == 0)) {
-+ xenstored_ready = 1;
-+ schedule_work(&probe_work);
-+ }
-+
-+ wake_up(&xb_waitq);
-+ return IRQ_HANDLED;
-+}
-+
-+static int check_indexes(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod)
-+{
-+ return ((prod - cons) <= XENSTORE_RING_SIZE);
-+}
-+
-+static void *get_output_chunk(XENSTORE_RING_IDX cons,
-+ XENSTORE_RING_IDX prod,
-+ char *buf, uint32_t *len)
-+{
-+ *len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(prod);
-+ if ((XENSTORE_RING_SIZE - (prod - cons)) < *len)
-+ *len = XENSTORE_RING_SIZE - (prod - cons);
-+ return buf + MASK_XENSTORE_IDX(prod);
-+}
-+
-+static const void *get_input_chunk(XENSTORE_RING_IDX cons,
-+ XENSTORE_RING_IDX prod,
-+ const char *buf, uint32_t *len)
-+{
-+ *len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(cons);
-+ if ((prod - cons) < *len)
-+ *len = prod - cons;
-+ return buf + MASK_XENSTORE_IDX(cons);
-+}
-+
-+int xb_write(const void *data, unsigned len)
-+{
-+ struct xenstore_domain_interface *intf = xen_store_interface;
-+ XENSTORE_RING_IDX cons, prod;
-+ int rc;
-+
-+ while (len != 0) {
-+ void *dst;
-+ unsigned int avail;
-+
-+ rc = wait_event_interruptible(
-+ xb_waitq,
-+ (intf->req_prod - intf->req_cons) !=
-+ XENSTORE_RING_SIZE);
-+ if (rc < 0)
-+ return rc;
-+
-+ /* Read indexes, then verify. */
-+ cons = intf->req_cons;
-+ prod = intf->req_prod;
-+ if (!check_indexes(cons, prod)) {
-+ intf->req_cons = intf->req_prod = 0;
-+ return -EIO;
-+ }
-+
-+ dst = get_output_chunk(cons, prod, intf->req, &avail);
-+ if (avail == 0)
-+ continue;
-+ if (avail > len)
-+ avail = len;
-+
-+ /* Must write data /after/ reading the consumer index. */
-+ mb();
-+
-+ memcpy(dst, data, avail);
-+ data += avail;
-+ len -= avail;
-+
-+ /* Other side must not see new producer until data is there. */
-+ wmb();
-+ intf->req_prod += avail;
-+
-+ /* Implies mb(): other side will see the updated producer. */
-+ notify_remote_via_evtchn(xen_store_evtchn);
-+ }
-+
-+ return 0;
-+}
-+
-+int xb_data_to_read(void)
-+{
-+ struct xenstore_domain_interface *intf = xen_store_interface;
-+ return (intf->rsp_cons != intf->rsp_prod);
-+}
-+
-+int xb_wait_for_data_to_read(void)
-+{
-+ return wait_event_interruptible(xb_waitq, xb_data_to_read());
-+}
-+
-+int xb_read(void *data, unsigned len)
-+{
-+ struct xenstore_domain_interface *intf = xen_store_interface;
-+ XENSTORE_RING_IDX cons, prod;
-+ int rc;
-+
-+ while (len != 0) {
-+ unsigned int avail;
-+ const char *src;
-+
-+ rc = xb_wait_for_data_to_read();
-+ if (rc < 0)
-+ return rc;
-+
-+ /* Read indexes, then verify. */
-+ cons = intf->rsp_cons;
-+ prod = intf->rsp_prod;
-+ if (!check_indexes(cons, prod)) {
-+ intf->rsp_cons = intf->rsp_prod = 0;
-+ return -EIO;
-+ }
-+
-+ src = get_input_chunk(cons, prod, intf->rsp, &avail);
-+ if (avail == 0)
-+ continue;
-+ if (avail > len)
-+ avail = len;
-+
-+ /* Must read data /after/ reading the producer index. */
-+ rmb();
-+
-+ memcpy(data, src, avail);
-+ data += avail;
-+ len -= avail;
-+
-+ /* Other side must not see free space until we've copied out */
-+ mb();
-+ intf->rsp_cons += avail;
-+
-+ pr_debug("Finished read of %i bytes (%i to go)\n", avail, len);
-+
-+ /* Implies mb(): other side will see the updated consumer. */
-+ notify_remote_via_evtchn(xen_store_evtchn);
-+ }
-+
-+ return 0;
-+}
-+
-+/* Set up interrupt handler off store event channel. */
-+int xb_init_comms(void)
-+{
-+ struct xenstore_domain_interface *intf = xen_store_interface;
-+ int err;
-+
-+ if (intf->req_prod != intf->req_cons)
-+ printk(KERN_ERR "XENBUS request ring is not quiescent "
-+ "(%08x:%08x)!\n", intf->req_cons, intf->req_prod);
-+
-+ if (intf->rsp_prod != intf->rsp_cons) {
-+ printk(KERN_WARNING "XENBUS response ring is not quiescent "
-+ "(%08x:%08x): fixing up\n",
-+ intf->rsp_cons, intf->rsp_prod);
-+ intf->rsp_cons = intf->rsp_prod;
-+ }
-+
-+ if (xenbus_irq)
-+ unbind_from_irqhandler(xenbus_irq, &xb_waitq);
-+
-+ err = bind_caller_port_to_irqhandler(
-+ xen_store_evtchn, wake_waiting,
-+ 0, "xenbus", &xb_waitq);
-+ if (err <= 0) {
-+ printk(KERN_ERR "XENBUS request irq failed %i\n", err);
-+ return err;
-+ }
-+
-+ xenbus_irq = err;
-+
-+ return 0;
-+}
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/xenbus/xenbus_comms.h ubuntu-gutsy-xen/drivers/xen/xenbus/xenbus_comms.h
---- ubuntu-gutsy/drivers/xen/xenbus/xenbus_comms.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/xenbus/xenbus_comms.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,46 @@
-+/*
-+ * Private include for xenbus communications.
-+ *
-+ * Copyright (C) 2005 Rusty Russell, IBM Corporation
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#ifndef _XENBUS_COMMS_H
-+#define _XENBUS_COMMS_H
-+
-+int xs_init(void);
-+int xb_init_comms(void);
-+
-+/* Low level routines. */
-+int xb_write(const void *data, unsigned len);
-+int xb_read(void *data, unsigned len);
-+int xb_data_to_read(void);
-+int xb_wait_for_data_to_read(void);
-+int xs_input_avail(void);
-+extern struct xenstore_domain_interface *xen_store_interface;
-+extern int xen_store_evtchn;
-+
-+#endif /* _XENBUS_COMMS_H */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/xenbus/xenbus_dev.c ubuntu-gutsy-xen/drivers/xen/xenbus/xenbus_dev.c
---- ubuntu-gutsy/drivers/xen/xenbus/xenbus_dev.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/xenbus/xenbus_dev.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,404 @@
-+/*
-+ * xenbus_dev.c
-+ *
-+ * Driver giving user-space access to the kernel's xenbus connection
-+ * to xenstore.
-+ *
-+ * Copyright (c) 2005, Christian Limpach
-+ * Copyright (c) 2005, Rusty Russell, IBM Corporation
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#include <linux/kernel.h>
-+#include <linux/errno.h>
-+#include <linux/uio.h>
-+#include <linux/notifier.h>
-+#include <linux/wait.h>
-+#include <linux/fs.h>
-+#include <linux/poll.h>
-+#include <linux/mutex.h>
-+
-+#include "xenbus_comms.h"
-+
-+#include <asm/uaccess.h>
-+#include <asm/hypervisor.h>
-+#include <xen/xenbus.h>
-+#include <xen/xen_proc.h>
-+#include <asm/hypervisor.h>
-+
-+#ifdef HAVE_XEN_PLATFORM_COMPAT_H
-+#include <xen/platform-compat.h>
-+#endif
-+
-+struct xenbus_dev_transaction {
-+ struct list_head list;
-+ struct xenbus_transaction handle;
-+};
-+
-+struct read_buffer {
-+ struct list_head list;
-+ unsigned int cons;
-+ unsigned int len;
-+ char msg[];
-+};
-+
-+struct xenbus_dev_data {
-+ /* In-progress transaction. */
-+ struct list_head transactions;
-+
-+ /* Active watches. */
-+ struct list_head watches;
-+
-+ /* Partial request. */
-+ unsigned int len;
-+ union {
-+ struct xsd_sockmsg msg;
-+ char buffer[PAGE_SIZE];
-+ } u;
-+
-+ /* Response queue. */
-+ struct list_head read_buffers;
-+ wait_queue_head_t read_waitq;
-+
-+ struct mutex reply_mutex;
-+};
-+
-+static struct proc_dir_entry *xenbus_dev_intf;
-+
-+static ssize_t xenbus_dev_read(struct file *filp,
-+ char __user *ubuf,
-+ size_t len, loff_t *ppos)
-+{
-+ struct xenbus_dev_data *u = filp->private_data;
-+ struct read_buffer *rb;
-+ int i, ret;
-+
-+ mutex_lock(&u->reply_mutex);
-+ while (list_empty(&u->read_buffers)) {
-+ mutex_unlock(&u->reply_mutex);
-+ ret = wait_event_interruptible(u->read_waitq,
-+ !list_empty(&u->read_buffers));
-+ if (ret)
-+ return ret;
-+ mutex_lock(&u->reply_mutex);
-+ }
-+
-+ rb = list_entry(u->read_buffers.next, struct read_buffer, list);
-+ for (i = 0; i < len;) {
-+ put_user(rb->msg[rb->cons], ubuf + i);
-+ i++;
-+ rb->cons++;
-+ if (rb->cons == rb->len) {
-+ list_del(&rb->list);
-+ kfree(rb);
-+ if (list_empty(&u->read_buffers))
-+ break;
-+ rb = list_entry(u->read_buffers.next,
-+ struct read_buffer, list);
-+ }
-+ }
-+ mutex_unlock(&u->reply_mutex);
-+
-+ return i;
-+}
-+
-+static void queue_reply(struct xenbus_dev_data *u,
-+ char *data, unsigned int len)
-+{
-+ struct read_buffer *rb;
-+
-+ if (len == 0)
-+ return;
-+
-+ rb = kmalloc(sizeof(*rb) + len, GFP_KERNEL);
-+ BUG_ON(rb == NULL);
-+
-+ rb->cons = 0;
-+ rb->len = len;
-+
-+ memcpy(rb->msg, data, len);
-+
-+ list_add_tail(&rb->list, &u->read_buffers);
-+
-+ wake_up(&u->read_waitq);
-+}
-+
-+struct watch_adapter
-+{
-+ struct list_head list;
-+ struct xenbus_watch watch;
-+ struct xenbus_dev_data *dev_data;
-+ char *token;
-+};
-+
-+static void free_watch_adapter (struct watch_adapter *watch)
-+{
-+ kfree(watch->watch.node);
-+ kfree(watch->token);
-+ kfree(watch);
-+}
-+
-+static void watch_fired(struct xenbus_watch *watch,
-+ const char **vec,
-+ unsigned int len)
-+{
-+ struct watch_adapter *adap =
-+ container_of(watch, struct watch_adapter, watch);
-+ struct xsd_sockmsg hdr;
-+ const char *path, *token;
-+ int path_len, tok_len, body_len;
-+
-+ path = vec[XS_WATCH_PATH];
-+ token = adap->token;
-+
-+ path_len = strlen(path) + 1;
-+ tok_len = strlen(token) + 1;
-+ body_len = path_len + tok_len;
-+
-+ hdr.type = XS_WATCH_EVENT;
-+ hdr.len = body_len;
-+
-+ mutex_lock(&adap->dev_data->reply_mutex);
-+ queue_reply(adap->dev_data, (char *)&hdr, sizeof(hdr));
-+ queue_reply(adap->dev_data, (char *)path, path_len);
-+ queue_reply(adap->dev_data, (char *)token, tok_len);
-+ mutex_unlock(&adap->dev_data->reply_mutex);
-+}
-+
-+static LIST_HEAD(watch_list);
-+
-+static ssize_t xenbus_dev_write(struct file *filp,
-+ const char __user *ubuf,
-+ size_t len, loff_t *ppos)
-+{
-+ struct xenbus_dev_data *u = filp->private_data;
-+ struct xenbus_dev_transaction *trans = NULL;
-+ uint32_t msg_type;
-+ void *reply;
-+ char *path, *token;
-+ struct watch_adapter *watch, *tmp_watch;
-+ int err, rc = len;
-+
-+ if ((len + u->len) > sizeof(u->u.buffer)) {
-+ rc = -EINVAL;
-+ goto out;
-+ }
-+
-+ if (copy_from_user(u->u.buffer + u->len, ubuf, len) != 0) {
-+ rc = -EFAULT;
-+ goto out;
-+ }
-+
-+ u->len += len;
-+ if ((u->len < sizeof(u->u.msg)) ||
-+ (u->len < (sizeof(u->u.msg) + u->u.msg.len)))
-+ return rc;
-+
-+ msg_type = u->u.msg.type;
-+
-+ switch (msg_type) {
-+ case XS_TRANSACTION_START:
-+ case XS_TRANSACTION_END:
-+ case XS_DIRECTORY:
-+ case XS_READ:
-+ case XS_GET_PERMS:
-+ case XS_RELEASE:
-+ case XS_GET_DOMAIN_PATH:
-+ case XS_WRITE:
-+ case XS_MKDIR:
-+ case XS_RM:
-+ case XS_SET_PERMS:
-+ if (msg_type == XS_TRANSACTION_START) {
-+ trans = kmalloc(sizeof(*trans), GFP_KERNEL);
-+ if (!trans) {
-+ rc = -ENOMEM;
-+ goto out;
-+ }
-+ }
-+
-+ reply = xenbus_dev_request_and_reply(&u->u.msg);
-+ if (IS_ERR(reply)) {
-+ kfree(trans);
-+ rc = PTR_ERR(reply);
-+ goto out;
-+ }
-+
-+ if (msg_type == XS_TRANSACTION_START) {
-+ trans->handle.id = simple_strtoul(reply, NULL, 0);
-+ list_add(&trans->list, &u->transactions);
-+ } else if (msg_type == XS_TRANSACTION_END) {
-+ list_for_each_entry(trans, &u->transactions, list)
-+ if (trans->handle.id == u->u.msg.tx_id)
-+ break;
-+ BUG_ON(&trans->list == &u->transactions);
-+ list_del(&trans->list);
-+ kfree(trans);
-+ }
-+ mutex_lock(&u->reply_mutex);
-+ queue_reply(u, (char *)&u->u.msg, sizeof(u->u.msg));
-+ queue_reply(u, (char *)reply, u->u.msg.len);
-+ mutex_unlock(&u->reply_mutex);
-+ kfree(reply);
-+ break;
-+
-+ case XS_WATCH:
-+ case XS_UNWATCH: {
-+ static const char *XS_RESP = "OK";
-+ struct xsd_sockmsg hdr;
-+
-+ path = u->u.buffer + sizeof(u->u.msg);
-+ token = memchr(path, 0, u->u.msg.len);
-+ if (token == NULL) {
-+ rc = -EILSEQ;
-+ goto out;
-+ }
-+ token++;
-+
-+ if (msg_type == XS_WATCH) {
-+ watch = kmalloc(sizeof(*watch), GFP_KERNEL);
-+ watch->watch.node = kmalloc(strlen(path)+1,
-+ GFP_KERNEL);
-+ strcpy((char *)watch->watch.node, path);
-+ watch->watch.callback = watch_fired;
-+ watch->token = kmalloc(strlen(token)+1, GFP_KERNEL);
-+ strcpy(watch->token, token);
-+ watch->dev_data = u;
-+
-+ err = register_xenbus_watch(&watch->watch);
-+ if (err) {
-+ free_watch_adapter(watch);
-+ rc = err;
-+ goto out;
-+ }
-+
-+ list_add(&watch->list, &u->watches);
-+ } else {
-+ list_for_each_entry_safe(watch, tmp_watch,
-+ &u->watches, list) {
-+ if (!strcmp(watch->token, token) &&
-+ !strcmp(watch->watch.node, path))
-+ {
-+ unregister_xenbus_watch(&watch->watch);
-+ list_del(&watch->list);
-+ free_watch_adapter(watch);
-+ break;
-+ }
-+ }
-+ }
-+
-+ hdr.type = msg_type;
-+ hdr.len = strlen(XS_RESP) + 1;
-+ mutex_lock(&u->reply_mutex);
-+ queue_reply(u, (char *)&hdr, sizeof(hdr));
-+ queue_reply(u, (char *)XS_RESP, hdr.len);
-+ mutex_unlock(&u->reply_mutex);
-+ break;
-+ }
-+
-+ default:
-+ rc = -EINVAL;
-+ break;
-+ }
-+
-+ out:
-+ u->len = 0;
-+ return rc;
-+}
-+
-+static int xenbus_dev_open(struct inode *inode, struct file *filp)
-+{
-+ struct xenbus_dev_data *u;
-+
-+ if (xen_store_evtchn == 0)
-+ return -ENOENT;
-+
-+ nonseekable_open(inode, filp);
-+
-+ u = kzalloc(sizeof(*u), GFP_KERNEL);
-+ if (u == NULL)
-+ return -ENOMEM;
-+
-+ INIT_LIST_HEAD(&u->transactions);
-+ INIT_LIST_HEAD(&u->watches);
-+ INIT_LIST_HEAD(&u->read_buffers);
-+ init_waitqueue_head(&u->read_waitq);
-+
-+ mutex_init(&u->reply_mutex);
-+
-+ filp->private_data = u;
-+
-+ return 0;
-+}
-+
-+static int xenbus_dev_release(struct inode *inode, struct file *filp)
-+{
-+ struct xenbus_dev_data *u = filp->private_data;
-+ struct xenbus_dev_transaction *trans, *tmp;
-+ struct watch_adapter *watch, *tmp_watch;
-+
-+ list_for_each_entry_safe(trans, tmp, &u->transactions, list) {
-+ xenbus_transaction_end(trans->handle, 1);
-+ list_del(&trans->list);
-+ kfree(trans);
-+ }
-+
-+ list_for_each_entry_safe(watch, tmp_watch, &u->watches, list) {
-+ unregister_xenbus_watch(&watch->watch);
-+ list_del(&watch->list);
-+ free_watch_adapter(watch);
-+ }
-+
-+ kfree(u);
-+
-+ return 0;
-+}
-+
-+static unsigned int xenbus_dev_poll(struct file *file, poll_table *wait)
-+{
-+ struct xenbus_dev_data *u = file->private_data;
-+
-+ poll_wait(file, &u->read_waitq, wait);
-+ if (!list_empty(&u->read_buffers))
-+ return POLLIN | POLLRDNORM;
-+ return 0;
-+}
-+
-+static const struct file_operations xenbus_dev_file_ops = {
-+ .read = xenbus_dev_read,
-+ .write = xenbus_dev_write,
-+ .open = xenbus_dev_open,
-+ .release = xenbus_dev_release,
-+ .poll = xenbus_dev_poll,
-+};
-+
-+int xenbus_dev_init(void)
-+{
-+ xenbus_dev_intf = create_xen_proc_entry("xenbus", 0400);
-+ if (xenbus_dev_intf)
-+ xenbus_dev_intf->proc_fops = &xenbus_dev_file_ops;
-+
-+ return 0;
-+}
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/xenbus/xenbus_probe_backend.c ubuntu-gutsy-xen/drivers/xen/xenbus/xenbus_probe_backend.c
---- ubuntu-gutsy/drivers/xen/xenbus/xenbus_probe_backend.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/xenbus/xenbus_probe_backend.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,286 @@
-+/******************************************************************************
-+ * Talks to Xen Store to figure out what devices we have (backend half).
-+ *
-+ * Copyright (C) 2005 Rusty Russell, IBM Corporation
-+ * Copyright (C) 2005 Mike Wray, Hewlett-Packard
-+ * Copyright (C) 2005, 2006 XenSource Ltd
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#define DPRINTK(fmt, args...) \
-+ pr_debug("xenbus_probe (%s:%d) " fmt ".\n", \
-+ __FUNCTION__, __LINE__, ##args)
-+
-+#include <linux/kernel.h>
-+#include <linux/err.h>
-+#include <linux/string.h>
-+#include <linux/ctype.h>
-+#include <linux/fcntl.h>
-+#include <linux/mm.h>
-+#include <linux/notifier.h>
-+#include <linux/kthread.h>
-+
-+#include <asm/io.h>
-+#include <asm/page.h>
-+#include <asm/maddr.h>
-+#include <asm/pgtable.h>
-+#include <asm/hypervisor.h>
-+#include <xen/xenbus.h>
-+#include <xen/xen_proc.h>
-+#include <xen/evtchn.h>
-+#include <xen/features.h>
-+#include <xen/hvm.h>
-+
-+#include "xenbus_comms.h"
-+#include "xenbus_probe.h"
-+
-+#ifdef HAVE_XEN_PLATFORM_COMPAT_H
-+#include <xen/platform-compat.h>
-+#endif
-+
-+static int xenbus_uevent_backend(struct device *dev, char **envp,
-+ int num_envp, char *buffer, int buffer_size);
-+static int xenbus_probe_backend(const char *type, const char *domid);
-+
-+extern int read_otherend_details(struct xenbus_device *xendev,
-+ char *id_node, char *path_node);
-+
-+static int read_frontend_details(struct xenbus_device *xendev)
-+{
-+ return read_otherend_details(xendev, "frontend-id", "frontend");
-+}
-+
-+/* backend/<type>/<fe-uuid>/<id> => <type>-<fe-domid>-<id> */
-+static int backend_bus_id(char bus_id[BUS_ID_SIZE], const char *nodename)
-+{
-+ int domid, err;
-+ const char *devid, *type, *frontend;
-+ unsigned int typelen;
-+
-+ type = strchr(nodename, '/');
-+ if (!type)
-+ return -EINVAL;
-+ type++;
-+ typelen = strcspn(type, "/");
-+ if (!typelen || type[typelen] != '/')
-+ return -EINVAL;
-+
-+ devid = strrchr(nodename, '/') + 1;
-+
-+ err = xenbus_gather(XBT_NIL, nodename, "frontend-id", "%i", &domid,
-+ "frontend", NULL, &frontend,
-+ NULL);
-+ if (err)
-+ return err;
-+ if (strlen(frontend) == 0)
-+ err = -ERANGE;
-+ if (!err && !xenbus_exists(XBT_NIL, frontend, ""))
-+ err = -ENOENT;
-+ kfree(frontend);
-+
-+ if (err)
-+ return err;
-+
-+ if (snprintf(bus_id, BUS_ID_SIZE,
-+ "%.*s-%i-%s", typelen, type, domid, devid) >= BUS_ID_SIZE)
-+ return -ENOSPC;
-+ return 0;
-+}
-+
-+static struct xen_bus_type xenbus_backend = {
-+ .root = "backend",
-+ .levels = 3, /* backend/type/<frontend>/<id> */
-+ .get_bus_id = backend_bus_id,
-+ .probe = xenbus_probe_backend,
-+ .bus = {
-+ .name = "xen-backend",
-+ .match = xenbus_match,
-+ .probe = xenbus_dev_probe,
-+ .remove = xenbus_dev_remove,
-+// .shutdown = xenbus_dev_shutdown,
-+ .uevent = xenbus_uevent_backend,
-+ },
-+ .dev = {
-+ .bus_id = "xen-backend",
-+ },
-+};
-+
-+static int xenbus_uevent_backend(struct device *dev, char **envp,
-+ int num_envp, char *buffer, int buffer_size)
-+{
-+ struct xenbus_device *xdev;
-+ struct xenbus_driver *drv;
-+ int i = 0;
-+ int length = 0;
-+
-+ DPRINTK("");
-+
-+ if (dev == NULL)
-+ return -ENODEV;
-+
-+ xdev = to_xenbus_device(dev);
-+ if (xdev == NULL)
-+ return -ENODEV;
-+
-+ /* stuff we want to pass to /sbin/hotplug */
-+ add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length,
-+ "XENBUS_TYPE=%s", xdev->devicetype);
-+
-+ add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length,
-+ "XENBUS_PATH=%s", xdev->nodename);
-+
-+ add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length,
-+ "XENBUS_BASE_PATH=%s", xenbus_backend.root);
-+
-+ /* terminate, set to next free slot, shrink available space */
-+ envp[i] = NULL;
-+ envp = &envp[i];
-+ num_envp -= i;
-+ buffer = &buffer[length];
-+ buffer_size -= length;
-+
-+ if (dev->driver) {
-+ drv = to_xenbus_driver(dev->driver);
-+ if (drv && drv->uevent)
-+ return drv->uevent(xdev, envp, num_envp, buffer,
-+ buffer_size);
-+ }
-+
-+ return 0;
-+}
-+
-+int xenbus_register_backend(struct xenbus_driver *drv)
-+{
-+ drv->read_otherend_details = read_frontend_details;
-+
-+ return xenbus_register_driver_common(drv, &xenbus_backend);
-+}
-+EXPORT_SYMBOL_GPL(xenbus_register_backend);
-+
-+/* backend/<typename>/<frontend-uuid>/<name> */
-+static int xenbus_probe_backend_unit(const char *dir,
-+ const char *type,
-+ const char *name)
-+{
-+ char *nodename;
-+ int err;
-+
-+ nodename = kasprintf(GFP_KERNEL, "%s/%s", dir, name);
-+ if (!nodename)
-+ return -ENOMEM;
-+
-+ DPRINTK("%s\n", nodename);
-+
-+ err = xenbus_probe_node(&xenbus_backend, type, nodename);
-+ kfree(nodename);
-+ return err;
-+}
-+
-+/* backend/<typename>/<frontend-domid> */
-+static int xenbus_probe_backend(const char *type, const char *domid)
-+{
-+ char *nodename;
-+ int err = 0;
-+ char **dir;
-+ unsigned int i, dir_n = 0;
-+
-+ DPRINTK("");
-+
-+ nodename = kasprintf(GFP_KERNEL, "%s/%s/%s", xenbus_backend.root, type, domid);
-+ if (!nodename)
-+ return -ENOMEM;
-+
-+ dir = xenbus_directory(XBT_NIL, nodename, "", &dir_n);
-+ if (IS_ERR(dir)) {
-+ kfree(nodename);
-+ return PTR_ERR(dir);
-+ }
-+
-+ for (i = 0; i < dir_n; i++) {
-+ err = xenbus_probe_backend_unit(nodename, type, dir[i]);
-+ if (err)
-+ break;
-+ }
-+ kfree(dir);
-+ kfree(nodename);
-+ return err;
-+}
-+
-+static void backend_changed(struct xenbus_watch *watch,
-+ const char **vec, unsigned int len)
-+{
-+ DPRINTK("");
-+
-+ dev_changed(vec[XS_WATCH_PATH], &xenbus_backend);
-+}
-+
-+static struct xenbus_watch be_watch = {
-+ .node = "backend",
-+ .callback = backend_changed,
-+};
-+
-+void xenbus_backend_suspend(int (*fn)(struct device *, void *))
-+{
-+ DPRINTK("");
-+ if (!xenbus_backend.error)
-+ bus_for_each_dev(&xenbus_backend.bus, NULL, NULL, fn);
-+}
-+
-+void xenbus_backend_resume(int (*fn)(struct device *, void *))
-+{
-+ DPRINTK("");
-+ if (!xenbus_backend.error)
-+ bus_for_each_dev(&xenbus_backend.bus, NULL, NULL, fn);
-+}
-+
-+void xenbus_backend_probe_and_watch(void)
-+{
-+ xenbus_probe_devices(&xenbus_backend);
-+ register_xenbus_watch(&be_watch);
-+}
-+
-+void xenbus_backend_bus_register(void)
-+{
-+ xenbus_backend.error = bus_register(&xenbus_backend.bus);
-+ if (xenbus_backend.error)
-+ printk(KERN_WARNING
-+ "XENBUS: Error registering backend bus: %i\n",
-+ xenbus_backend.error);
-+}
-+
-+void xenbus_backend_device_register(void)
-+{
-+ if (xenbus_backend.error)
-+ return;
-+
-+ xenbus_backend.error = device_register(&xenbus_backend.dev);
-+ if (xenbus_backend.error) {
-+ bus_unregister(&xenbus_backend.bus);
-+ printk(KERN_WARNING
-+ "XENBUS: Error registering backend device: %i\n",
-+ xenbus_backend.error);
-+ }
-+}
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/xenbus/xenbus_probe.c ubuntu-gutsy-xen/drivers/xen/xenbus/xenbus_probe.c
---- ubuntu-gutsy/drivers/xen/xenbus/xenbus_probe.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/xenbus/xenbus_probe.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,1089 @@
-+/******************************************************************************
-+ * Talks to Xen Store to figure out what devices we have.
-+ *
-+ * Copyright (C) 2005 Rusty Russell, IBM Corporation
-+ * Copyright (C) 2005 Mike Wray, Hewlett-Packard
-+ * Copyright (C) 2005, 2006 XenSource Ltd
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#define DPRINTK(fmt, args...) \
-+ pr_debug("xenbus_probe (%s:%d) " fmt ".\n", \
-+ __FUNCTION__, __LINE__, ##args)
-+
-+#include <linux/kernel.h>
-+#include <linux/err.h>
-+#include <linux/string.h>
-+#include <linux/ctype.h>
-+#include <linux/fcntl.h>
-+#include <linux/mm.h>
-+#include <linux/notifier.h>
-+#include <linux/kthread.h>
-+#include <linux/mutex.h>
-+
-+#include <asm/io.h>
-+#include <asm/page.h>
-+#include <asm/maddr.h>
-+#include <asm/pgtable.h>
-+#include <asm/hypervisor.h>
-+#include <xen/xenbus.h>
-+#include <xen/xen_proc.h>
-+#include <xen/evtchn.h>
-+#include <xen/features.h>
-+#include <xen/hvm.h>
-+
-+#include "xenbus_comms.h"
-+#include "xenbus_probe.h"
-+
-+#ifdef HAVE_XEN_PLATFORM_COMPAT_H
-+#include <xen/platform-compat.h>
-+#endif
-+
-+int xen_store_evtchn;
-+struct xenstore_domain_interface *xen_store_interface;
-+static unsigned long xen_store_mfn;
-+
-+extern struct mutex xenwatch_mutex;
-+
-+static ATOMIC_NOTIFIER_HEAD(xenstore_chain);
-+
-+static void wait_for_devices(struct xenbus_driver *xendrv);
-+
-+static int xenbus_probe_frontend(const char *type, const char *name);
-+
-+static void xenbus_dev_shutdown(struct device *_dev);
-+
-+/* If something in array of ids matches this device, return it. */
-+static const struct xenbus_device_id *
-+match_device(const struct xenbus_device_id *arr, struct xenbus_device *dev)
-+{
-+ for (; *arr->devicetype != '\0'; arr++) {
-+ if (!strcmp(arr->devicetype, dev->devicetype))
-+ return arr;
-+ }
-+ return NULL;
-+}
-+
-+int xenbus_match(struct device *_dev, struct device_driver *_drv)
-+{
-+ struct xenbus_driver *drv = to_xenbus_driver(_drv);
-+
-+ if (!drv->ids)
-+ return 0;
-+
-+ return match_device(drv->ids, to_xenbus_device(_dev)) != NULL;
-+}
-+
-+/* device/<type>/<id> => <type>-<id> */
-+static int frontend_bus_id(char bus_id[BUS_ID_SIZE], const char *nodename)
-+{
-+ nodename = strchr(nodename, '/');
-+ if (!nodename || strlen(nodename + 1) >= BUS_ID_SIZE) {
-+ printk(KERN_WARNING "XENBUS: bad frontend %s\n", nodename);
-+ return -EINVAL;
-+ }
-+
-+ strlcpy(bus_id, nodename + 1, BUS_ID_SIZE);
-+ if (!strchr(bus_id, '/')) {
-+ printk(KERN_WARNING "XENBUS: bus_id %s no slash\n", bus_id);
-+ return -EINVAL;
-+ }
-+ *strchr(bus_id, '/') = '-';
-+ return 0;
-+}
-+
-+
-+static void free_otherend_details(struct xenbus_device *dev)
-+{
-+ kfree(dev->otherend);
-+ dev->otherend = NULL;
-+}
-+
-+
-+static void free_otherend_watch(struct xenbus_device *dev)
-+{
-+ if (dev->otherend_watch.node) {
-+ unregister_xenbus_watch(&dev->otherend_watch);
-+ kfree(dev->otherend_watch.node);
-+ dev->otherend_watch.node = NULL;
-+ }
-+}
-+
-+
-+int read_otherend_details(struct xenbus_device *xendev,
-+ char *id_node, char *path_node)
-+{
-+ int err = xenbus_gather(XBT_NIL, xendev->nodename,
-+ id_node, "%i", &xendev->otherend_id,
-+ path_node, NULL, &xendev->otherend,
-+ NULL);
-+ if (err) {
-+ xenbus_dev_fatal(xendev, err,
-+ "reading other end details from %s",
-+ xendev->nodename);
-+ return err;
-+ }
-+ if (strlen(xendev->otherend) == 0 ||
-+ !xenbus_exists(XBT_NIL, xendev->otherend, "")) {
-+ xenbus_dev_fatal(xendev, -ENOENT,
-+ "unable to read other end from %s. "
-+ "missing or inaccessible.",
-+ xendev->nodename);
-+ free_otherend_details(xendev);
-+ return -ENOENT;
-+ }
-+
-+ return 0;
-+}
-+
-+
-+static int read_backend_details(struct xenbus_device *xendev)
-+{
-+ return read_otherend_details(xendev, "backend-id", "backend");
-+}
-+
-+
-+/* Bus type for frontend drivers. */
-+static struct xen_bus_type xenbus_frontend = {
-+ .root = "device",
-+ .levels = 2, /* device/type/<id> */
-+ .get_bus_id = frontend_bus_id,
-+ .probe = xenbus_probe_frontend,
-+ .bus = {
-+ .name = "xen",
-+ .match = xenbus_match,
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
-+ .probe = xenbus_dev_probe,
-+ .remove = xenbus_dev_remove,
-+ .shutdown = xenbus_dev_shutdown,
-+#endif
-+ },
-+ .dev = {
-+ .bus_id = "xen",
-+ },
-+};
-+
-+static void otherend_changed(struct xenbus_watch *watch,
-+ const char **vec, unsigned int len)
-+{
-+ struct xenbus_device *dev =
-+ container_of(watch, struct xenbus_device, otherend_watch);
-+ struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver);
-+ enum xenbus_state state;
-+
-+ /* Protect us against watches firing on old details when the otherend
-+ details change, say immediately after a resume. */
-+ if (!dev->otherend ||
-+ strncmp(dev->otherend, vec[XS_WATCH_PATH],
-+ strlen(dev->otherend))) {
-+ DPRINTK("Ignoring watch at %s", vec[XS_WATCH_PATH]);
-+ return;
-+ }
-+
-+ state = xenbus_read_driver_state(dev->otherend);
-+
-+ DPRINTK("state is %d (%s), %s, %s", state, xenbus_strstate(state),
-+ dev->otherend_watch.node, vec[XS_WATCH_PATH]);
-+
-+ /*
-+ * Ignore xenbus transitions during shutdown. This prevents us doing
-+ * work that can fail e.g., when the rootfs is gone.
-+ */
-+ if (system_state > SYSTEM_RUNNING) {
-+ struct xen_bus_type *bus = bus;
-+ bus = container_of(dev->dev.bus, struct xen_bus_type, bus);
-+ /* If we're frontend, drive the state machine to Closed. */
-+ /* This should cause the backend to release our resources. */
-+ if ((bus == &xenbus_frontend) && (state == XenbusStateClosing))
-+ xenbus_frontend_closed(dev);
-+ return;
-+ }
-+
-+ if (drv->otherend_changed)
-+ drv->otherend_changed(dev, state);
-+}
-+
-+
-+static int talk_to_otherend(struct xenbus_device *dev)
-+{
-+ struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver);
-+
-+ free_otherend_watch(dev);
-+ free_otherend_details(dev);
-+
-+ return drv->read_otherend_details(dev);
-+}
-+
-+
-+static int watch_otherend(struct xenbus_device *dev)
-+{
-+ return xenbus_watch_path2(dev, dev->otherend, "state",
-+ &dev->otherend_watch, otherend_changed);
-+}
-+
-+
-+int xenbus_dev_probe(struct device *_dev)
-+{
-+ struct xenbus_device *dev = to_xenbus_device(_dev);
-+ struct xenbus_driver *drv = to_xenbus_driver(_dev->driver);
-+ const struct xenbus_device_id *id;
-+ int err;
-+
-+ DPRINTK("%s", dev->nodename);
-+
-+ if (!drv->probe) {
-+ err = -ENODEV;
-+ goto fail;
-+ }
-+
-+ id = match_device(drv->ids, dev);
-+ if (!id) {
-+ err = -ENODEV;
-+ goto fail;
-+ }
-+
-+ err = talk_to_otherend(dev);
-+ if (err) {
-+ printk(KERN_WARNING
-+ "xenbus_probe: talk_to_otherend on %s failed.\n",
-+ dev->nodename);
-+ return err;
-+ }
-+
-+ err = drv->probe(dev, id);
-+ if (err)
-+ goto fail;
-+
-+ err = watch_otherend(dev);
-+ if (err) {
-+ printk(KERN_WARNING
-+ "xenbus_probe: watch_otherend on %s failed.\n",
-+ dev->nodename);
-+ return err;
-+ }
-+
-+ return 0;
-+fail:
-+ xenbus_dev_error(dev, err, "xenbus_dev_probe on %s", dev->nodename);
-+ xenbus_switch_state(dev, XenbusStateClosed);
-+ return -ENODEV;
-+}
-+
-+int xenbus_dev_remove(struct device *_dev)
-+{
-+ struct xenbus_device *dev = to_xenbus_device(_dev);
-+ struct xenbus_driver *drv = to_xenbus_driver(_dev->driver);
-+
-+ DPRINTK("%s", dev->nodename);
-+
-+ free_otherend_watch(dev);
-+ free_otherend_details(dev);
-+
-+ if (drv->remove)
-+ drv->remove(dev);
-+
-+ xenbus_switch_state(dev, XenbusStateClosed);
-+ return 0;
-+}
-+
-+static void xenbus_dev_shutdown(struct device *_dev)
-+{
-+ struct xenbus_device *dev = to_xenbus_device(_dev);
-+ unsigned long timeout = 5*HZ;
-+
-+ DPRINTK("%s", dev->nodename);
-+
-+ get_device(&dev->dev);
-+ if (dev->state != XenbusStateConnected) {
-+ printk("%s: %s: %s != Connected, skipping\n", __FUNCTION__,
-+ dev->nodename, xenbus_strstate(dev->state));
-+ goto out;
-+ }
-+ xenbus_switch_state(dev, XenbusStateClosing);
-+ timeout = wait_for_completion_timeout(&dev->down, timeout);
-+ if (!timeout)
-+ printk("%s: %s timeout closing device\n", __FUNCTION__, dev->nodename);
-+ out:
-+ put_device(&dev->dev);
-+}
-+
-+int xenbus_register_driver_common(struct xenbus_driver *drv,
-+ struct xen_bus_type *bus)
-+{
-+ int ret;
-+
-+ if (bus->error)
-+ return bus->error;
-+
-+ drv->driver.name = drv->name;
-+ drv->driver.bus = &bus->bus;
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
-+ drv->driver.owner = drv->owner;
-+#endif
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
-+ drv->driver.probe = xenbus_dev_probe;
-+ drv->driver.remove = xenbus_dev_remove;
-+ drv->driver.shutdown = xenbus_dev_shutdown;
-+#endif
-+
-+ mutex_lock(&xenwatch_mutex);
-+ ret = driver_register(&drv->driver);
-+ mutex_unlock(&xenwatch_mutex);
-+ return ret;
-+}
-+
-+int xenbus_register_frontend(struct xenbus_driver *drv)
-+{
-+ int ret;
-+
-+ drv->read_otherend_details = read_backend_details;
-+
-+ ret = xenbus_register_driver_common(drv, &xenbus_frontend);
-+ if (ret)
-+ return ret;
-+
-+ /* If this driver is loaded as a module wait for devices to attach. */
-+ wait_for_devices(drv);
-+
-+ return 0;
-+}
-+EXPORT_SYMBOL_GPL(xenbus_register_frontend);
-+
-+void xenbus_unregister_driver(struct xenbus_driver *drv)
-+{
-+ driver_unregister(&drv->driver);
-+}
-+EXPORT_SYMBOL_GPL(xenbus_unregister_driver);
-+
-+struct xb_find_info
-+{
-+ struct xenbus_device *dev;
-+ const char *nodename;
-+};
-+
-+static int cmp_dev(struct device *dev, void *data)
-+{
-+ struct xenbus_device *xendev = to_xenbus_device(dev);
-+ struct xb_find_info *info = data;
-+
-+ if (!strcmp(xendev->nodename, info->nodename)) {
-+ info->dev = xendev;
-+ get_device(dev);
-+ return 1;
-+ }
-+ return 0;
-+}
-+
-+struct xenbus_device *xenbus_device_find(const char *nodename,
-+ struct bus_type *bus)
-+{
-+ struct xb_find_info info = { .dev = NULL, .nodename = nodename };
-+
-+ bus_for_each_dev(bus, NULL, &info, cmp_dev);
-+ return info.dev;
-+}
-+
-+static int cleanup_dev(struct device *dev, void *data)
-+{
-+ struct xenbus_device *xendev = to_xenbus_device(dev);
-+ struct xb_find_info *info = data;
-+ int len = strlen(info->nodename);
-+
-+ DPRINTK("%s", info->nodename);
-+
-+ /* Match the info->nodename path, or any subdirectory of that path. */
-+ if (strncmp(xendev->nodename, info->nodename, len))
-+ return 0;
-+
-+ /* If the node name is longer, ensure it really is a subdirectory. */
-+ if ((strlen(xendev->nodename) > len) && (xendev->nodename[len] != '/'))
-+ return 0;
-+
-+ info->dev = xendev;
-+ get_device(dev);
-+ return 1;
-+}
-+
-+static void xenbus_cleanup_devices(const char *path, struct bus_type *bus)
-+{
-+ struct xb_find_info info = { .nodename = path };
-+
-+ do {
-+ info.dev = NULL;
-+ bus_for_each_dev(bus, NULL, &info, cleanup_dev);
-+ if (info.dev) {
-+ device_unregister(&info.dev->dev);
-+ put_device(&info.dev->dev);
-+ }
-+ } while (info.dev);
-+}
-+
-+static void xenbus_dev_release(struct device *dev)
-+{
-+ if (dev)
-+ kfree(to_xenbus_device(dev));
-+}
-+
-+static ssize_t xendev_show_nodename(struct device *dev,
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13)
-+ struct device_attribute *attr,
-+#endif
-+ char *buf)
-+{
-+ return sprintf(buf, "%s\n", to_xenbus_device(dev)->nodename);
-+}
-+DEVICE_ATTR(nodename, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_nodename, NULL);
-+
-+static ssize_t xendev_show_devtype(struct device *dev,
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13)
-+ struct device_attribute *attr,
-+#endif
-+ char *buf)
-+{
-+ return sprintf(buf, "%s\n", to_xenbus_device(dev)->devicetype);
-+}
-+DEVICE_ATTR(devtype, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_devtype, NULL);
-+
-+
-+int xenbus_probe_node(struct xen_bus_type *bus,
-+ const char *type,
-+ const char *nodename)
-+{
-+ int err;
-+ struct xenbus_device *xendev;
-+ size_t stringlen;
-+ char *tmpstring;
-+
-+ enum xenbus_state state = xenbus_read_driver_state(nodename);
-+
-+ if (bus->error)
-+ return bus->error;
-+
-+ if (state != XenbusStateInitialising) {
-+ /* Device is not new, so ignore it. This can happen if a
-+ device is going away after switching to Closed. */
-+ return 0;
-+ }
-+
-+ stringlen = strlen(nodename) + 1 + strlen(type) + 1;
-+ xendev = kzalloc(sizeof(*xendev) + stringlen, GFP_KERNEL);
-+ if (!xendev)
-+ return -ENOMEM;
-+
-+ xendev->state = XenbusStateInitialising;
-+
-+ /* Copy the strings into the extra space. */
-+
-+ tmpstring = (char *)(xendev + 1);
-+ strcpy(tmpstring, nodename);
-+ xendev->nodename = tmpstring;
-+
-+ tmpstring += strlen(tmpstring) + 1;
-+ strcpy(tmpstring, type);
-+ xendev->devicetype = tmpstring;
-+ init_completion(&xendev->down);
-+
-+ xendev->dev.parent = &bus->dev;
-+ xendev->dev.bus = &bus->bus;
-+ xendev->dev.release = xenbus_dev_release;
-+
-+ err = bus->get_bus_id(xendev->dev.bus_id, xendev->nodename);
-+ if (err)
-+ goto fail;
-+
-+ /* Register with generic device framework. */
-+ err = device_register(&xendev->dev);
-+ if (err)
-+ goto fail;
-+
-+ err = device_create_file(&xendev->dev, &dev_attr_nodename);
-+ if (err)
-+ goto unregister;
-+ err = device_create_file(&xendev->dev, &dev_attr_devtype);
-+ if (err)
-+ goto unregister;
-+
-+ return 0;
-+unregister:
-+ device_remove_file(&xendev->dev, &dev_attr_nodename);
-+ device_remove_file(&xendev->dev, &dev_attr_devtype);
-+ device_unregister(&xendev->dev);
-+fail:
-+ kfree(xendev);
-+ return err;
-+}
-+
-+/* device/<typename>/<name> */
-+static int xenbus_probe_frontend(const char *type, const char *name)
-+{
-+ char *nodename;
-+ int err;
-+
-+ nodename = kasprintf(GFP_KERNEL, "%s/%s/%s", xenbus_frontend.root, type, name);
-+ if (!nodename)
-+ return -ENOMEM;
-+
-+ DPRINTK("%s", nodename);
-+
-+ err = xenbus_probe_node(&xenbus_frontend, type, nodename);
-+ kfree(nodename);
-+ return err;
-+}
-+
-+static int xenbus_probe_device_type(struct xen_bus_type *bus, const char *type)
-+{
-+ int err = 0;
-+ char **dir;
-+ unsigned int dir_n = 0;
-+ int i;
-+
-+ dir = xenbus_directory(XBT_NIL, bus->root, type, &dir_n);
-+ if (IS_ERR(dir))
-+ return PTR_ERR(dir);
-+
-+ for (i = 0; i < dir_n; i++) {
-+ err = bus->probe(type, dir[i]);
-+ if (err)
-+ break;
-+ }
-+ kfree(dir);
-+ return err;
-+}
-+
-+int xenbus_probe_devices(struct xen_bus_type *bus)
-+{
-+ int err = 0;
-+ char **dir;
-+ unsigned int i, dir_n;
-+
-+ if (bus->error)
-+ return bus->error;
-+
-+ dir = xenbus_directory(XBT_NIL, bus->root, "", &dir_n);
-+ if (IS_ERR(dir))
-+ return PTR_ERR(dir);
-+
-+ for (i = 0; i < dir_n; i++) {
-+ err = xenbus_probe_device_type(bus, dir[i]);
-+ if (err)
-+ break;
-+ }
-+ kfree(dir);
-+ return err;
-+}
-+
-+static unsigned int char_count(const char *str, char c)
-+{
-+ unsigned int i, ret = 0;
-+
-+ for (i = 0; str[i]; i++)
-+ if (str[i] == c)
-+ ret++;
-+ return ret;
-+}
-+
-+static int strsep_len(const char *str, char c, unsigned int len)
-+{
-+ unsigned int i;
-+
-+ for (i = 0; str[i]; i++)
-+ if (str[i] == c) {
-+ if (len == 0)
-+ return i;
-+ len--;
-+ }
-+ return (len == 0) ? i : -ERANGE;
-+}
-+
-+void dev_changed(const char *node, struct xen_bus_type *bus)
-+{
-+ int exists, rootlen;
-+ struct xenbus_device *dev;
-+ char type[BUS_ID_SIZE];
-+ const char *p, *root;
-+
-+ if (bus->error || char_count(node, '/') < 2)
-+ return;
-+
-+ exists = xenbus_exists(XBT_NIL, node, "");
-+ if (!exists) {
-+ xenbus_cleanup_devices(node, &bus->bus);
-+ return;
-+ }
-+
-+ /* backend/<type>/... or device/<type>/... */
-+ p = strchr(node, '/') + 1;
-+ snprintf(type, BUS_ID_SIZE, "%.*s", (int)strcspn(p, "/"), p);
-+ type[BUS_ID_SIZE-1] = '\0';
-+
-+ rootlen = strsep_len(node, '/', bus->levels);
-+ if (rootlen < 0)
-+ return;
-+ root = kasprintf(GFP_KERNEL, "%.*s", rootlen, node);
-+ if (!root)
-+ return;
-+
-+ dev = xenbus_device_find(root, &bus->bus);
-+ if (!dev)
-+ xenbus_probe_node(bus, type, root);
-+ else
-+ put_device(&dev->dev);
-+
-+ kfree(root);
-+}
-+
-+static void frontend_changed(struct xenbus_watch *watch,
-+ const char **vec, unsigned int len)
-+{
-+ DPRINTK("");
-+
-+ dev_changed(vec[XS_WATCH_PATH], &xenbus_frontend);
-+}
-+
-+/* We watch for devices appearing and vanishing. */
-+static struct xenbus_watch fe_watch = {
-+ .node = "device",
-+ .callback = frontend_changed,
-+};
-+
-+static int suspend_dev(struct device *dev, void *data)
-+{
-+ int err = 0;
-+ struct xenbus_driver *drv;
-+ struct xenbus_device *xdev;
-+
-+ DPRINTK("");
-+
-+ if (dev->driver == NULL)
-+ return 0;
-+ drv = to_xenbus_driver(dev->driver);
-+ xdev = container_of(dev, struct xenbus_device, dev);
-+ if (drv->suspend)
-+ err = drv->suspend(xdev);
-+ if (err)
-+ printk(KERN_WARNING
-+ "xenbus: suspend %s failed: %i\n", dev->bus_id, err);
-+ return 0;
-+}
-+
-+static int suspend_cancel_dev(struct device *dev, void *data)
-+{
-+ int err = 0;
-+ struct xenbus_driver *drv;
-+ struct xenbus_device *xdev;
-+
-+ DPRINTK("");
-+
-+ if (dev->driver == NULL)
-+ return 0;
-+ drv = to_xenbus_driver(dev->driver);
-+ xdev = container_of(dev, struct xenbus_device, dev);
-+ if (drv->suspend_cancel)
-+ err = drv->suspend_cancel(xdev);
-+ if (err)
-+ printk(KERN_WARNING
-+ "xenbus: suspend_cancel %s failed: %i\n",
-+ dev->bus_id, err);
-+ return 0;
-+}
-+
-+static int resume_dev(struct device *dev, void *data)
-+{
-+ int err;
-+ struct xenbus_driver *drv;
-+ struct xenbus_device *xdev;
-+
-+ DPRINTK("");
-+
-+ if (dev->driver == NULL)
-+ return 0;
-+
-+ drv = to_xenbus_driver(dev->driver);
-+ xdev = container_of(dev, struct xenbus_device, dev);
-+
-+ err = talk_to_otherend(xdev);
-+ if (err) {
-+ printk(KERN_WARNING
-+ "xenbus: resume (talk_to_otherend) %s failed: %i\n",
-+ dev->bus_id, err);
-+ return err;
-+ }
-+
-+ xdev->state = XenbusStateInitialising;
-+
-+ if (drv->resume) {
-+ err = drv->resume(xdev);
-+ if (err) {
-+ printk(KERN_WARNING
-+ "xenbus: resume %s failed: %i\n",
-+ dev->bus_id, err);
-+ return err;
-+ }
-+ }
-+
-+ err = watch_otherend(xdev);
-+ if (err) {
-+ printk(KERN_WARNING
-+ "xenbus_probe: resume (watch_otherend) %s failed: "
-+ "%d.\n", dev->bus_id, err);
-+ return err;
-+ }
-+
-+ return 0;
-+}
-+
-+void xenbus_suspend(void)
-+{
-+ DPRINTK("");
-+
-+ if (!xenbus_frontend.error)
-+ bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, suspend_dev);
-+ xenbus_backend_suspend(suspend_dev);
-+ xs_suspend();
-+}
-+EXPORT_SYMBOL_GPL(xenbus_suspend);
-+
-+void xenbus_resume(void)
-+{
-+ xb_init_comms();
-+ xs_resume();
-+ if (!xenbus_frontend.error)
-+ bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, resume_dev);
-+ xenbus_backend_resume(resume_dev);
-+}
-+EXPORT_SYMBOL_GPL(xenbus_resume);
-+
-+void xenbus_suspend_cancel(void)
-+{
-+ xs_suspend_cancel();
-+ if (!xenbus_frontend.error)
-+ bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, suspend_cancel_dev);
-+ xenbus_backend_resume(suspend_cancel_dev);
-+}
-+EXPORT_SYMBOL_GPL(xenbus_suspend_cancel);
-+
-+/* A flag to determine if xenstored is 'ready' (i.e. has started) */
-+int xenstored_ready = 0;
-+
-+
-+int register_xenstore_notifier(struct notifier_block *nb)
-+{
-+ int ret = 0;
-+
-+ if (xenstored_ready > 0)
-+ ret = nb->notifier_call(nb, 0, NULL);
-+ else
-+ atomic_notifier_chain_register(&xenstore_chain, nb);
-+
-+ return ret;
-+}
-+EXPORT_SYMBOL_GPL(register_xenstore_notifier);
-+
-+void unregister_xenstore_notifier(struct notifier_block *nb)
-+{
-+ atomic_notifier_chain_unregister(&xenstore_chain, nb);
-+}
-+EXPORT_SYMBOL_GPL(unregister_xenstore_notifier);
-+
-+
-+void xenbus_probe(struct work_struct *unused)
-+{
-+ BUG_ON((xenstored_ready <= 0));
-+
-+ /* Enumerate devices in xenstore and watch for changes. */
-+ xenbus_probe_devices(&xenbus_frontend);
-+ register_xenbus_watch(&fe_watch);
-+ xenbus_backend_probe_and_watch();
-+
-+ /* Notify others that xenstore is up */
-+ atomic_notifier_call_chain(&xenstore_chain, 0, NULL);
-+}
-+
-+
-+#if defined(CONFIG_PROC_FS) && defined(CONFIG_XEN_PRIVILEGED_GUEST)
-+static struct file_operations xsd_kva_fops;
-+static struct proc_dir_entry *xsd_kva_intf;
-+static struct proc_dir_entry *xsd_port_intf;
-+
-+static int xsd_kva_mmap(struct file *file, struct vm_area_struct *vma)
-+{
-+ size_t size = vma->vm_end - vma->vm_start;
-+
-+ if ((size > PAGE_SIZE) || (vma->vm_pgoff != 0))
-+ return -EINVAL;
-+
-+ if (remap_pfn_range(vma, vma->vm_start, mfn_to_pfn(xen_store_mfn),
-+ size, vma->vm_page_prot))
-+ return -EAGAIN;
-+
-+ return 0;
-+}
-+
-+static int xsd_kva_read(char *page, char **start, off_t off,
-+ int count, int *eof, void *data)
-+{
-+ int len;
-+
-+ len = sprintf(page, "0x%p", xen_store_interface);
-+ *eof = 1;
-+ return len;
-+}
-+
-+static int xsd_port_read(char *page, char **start, off_t off,
-+ int count, int *eof, void *data)
-+{
-+ int len;
-+
-+ len = sprintf(page, "%d", xen_store_evtchn);
-+ *eof = 1;
-+ return len;
-+}
-+#endif
-+
-+static int xenbus_probe_init(void)
-+{
-+ int err = 0;
-+ unsigned long page = 0;
-+
-+ DPRINTK("");
-+
-+ if (!is_running_on_xen())
-+ return -ENODEV;
-+
-+ /* Register ourselves with the kernel bus subsystem */
-+ xenbus_frontend.error = bus_register(&xenbus_frontend.bus);
-+ if (xenbus_frontend.error)
-+ printk(KERN_WARNING
-+ "XENBUS: Error registering frontend bus: %i\n",
-+ xenbus_frontend.error);
-+ xenbus_backend_bus_register();
-+
-+ /*
-+ * Domain0 doesn't have a store_evtchn or store_mfn yet.
-+ */
-+ if (is_initial_xendomain()) {
-+ struct evtchn_alloc_unbound alloc_unbound;
-+
-+ /* Allocate page. */
-+ page = get_zeroed_page(GFP_KERNEL);
-+ if (!page)
-+ return -ENOMEM;
-+
-+ xen_store_mfn = xen_start_info->store_mfn =
-+ pfn_to_mfn(virt_to_phys((void *)page) >>
-+ PAGE_SHIFT);
-+
-+ /* Next allocate a local port which xenstored can bind to */
-+ alloc_unbound.dom = DOMID_SELF;
-+ alloc_unbound.remote_dom = 0;
-+
-+ err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
-+ &alloc_unbound);
-+ if (err == -ENOSYS)
-+ goto err;
-+ BUG_ON(err);
-+ xen_store_evtchn = xen_start_info->store_evtchn =
-+ alloc_unbound.port;
-+
-+#if defined(CONFIG_PROC_FS) && defined(CONFIG_XEN_PRIVILEGED_GUEST)
-+ /* And finally publish the above info in /proc/xen */
-+ xsd_kva_intf = create_xen_proc_entry("xsd_kva", 0600);
-+ if (xsd_kva_intf) {
-+ memcpy(&xsd_kva_fops, xsd_kva_intf->proc_fops,
-+ sizeof(xsd_kva_fops));
-+ xsd_kva_fops.mmap = xsd_kva_mmap;
-+ xsd_kva_intf->proc_fops = &xsd_kva_fops;
-+ xsd_kva_intf->read_proc = xsd_kva_read;
-+ }
-+ xsd_port_intf = create_xen_proc_entry("xsd_port", 0400);
-+ if (xsd_port_intf)
-+ xsd_port_intf->read_proc = xsd_port_read;
-+#endif
-+ xen_store_interface = mfn_to_virt(xen_store_mfn);
-+ } else {
-+ xenstored_ready = 1;
-+#ifdef CONFIG_XEN
-+ xen_store_evtchn = xen_start_info->store_evtchn;
-+ xen_store_mfn = xen_start_info->store_mfn;
-+ xen_store_interface = mfn_to_virt(xen_store_mfn);
-+#else
-+ xen_store_evtchn = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN);
-+ xen_store_mfn = hvm_get_parameter(HVM_PARAM_STORE_PFN);
-+ xen_store_interface = ioremap(xen_store_mfn << PAGE_SHIFT,
-+ PAGE_SIZE);
-+#endif
-+ }
-+
-+
-+ xenbus_dev_init();
-+
-+ /* Initialize the interface to xenstore. */
-+ err = xs_init();
-+ if (err) {
-+ printk(KERN_WARNING
-+ "XENBUS: Error initializing xenstore comms: %i\n", err);
-+ goto err;
-+ }
-+
-+ /* Register ourselves with the kernel device subsystem */
-+ if (!xenbus_frontend.error) {
-+ xenbus_frontend.error = device_register(&xenbus_frontend.dev);
-+ if (xenbus_frontend.error) {
-+ bus_unregister(&xenbus_frontend.bus);
-+ printk(KERN_WARNING
-+ "XENBUS: Error registering frontend device: %i\n",
-+ xenbus_frontend.error);
-+ }
-+ }
-+ xenbus_backend_device_register();
-+
-+ if (!is_initial_xendomain())
-+ xenbus_probe(NULL);
-+
-+ return 0;
-+
-+ err:
-+ if (page)
-+ free_page(page);
-+
-+ /*
-+ * Do not unregister the xenbus front/backend buses here. The buses
-+ * must exist because front/backend drivers will use them when they are
-+ * registered.
-+ */
-+
-+ return err;
-+}
-+
-+#ifdef CONFIG_XEN
-+postcore_initcall(xenbus_probe_init);
-+MODULE_LICENSE("Dual BSD/GPL");
-+#else
-+int xenbus_init(void)
-+{
-+ return xenbus_probe_init();
-+}
-+#endif
-+
-+static int is_disconnected_device(struct device *dev, void *data)
-+{
-+ struct xenbus_device *xendev = to_xenbus_device(dev);
-+ struct device_driver *drv = data;
-+ struct xenbus_driver *xendrv;
-+
-+ /*
-+ * A device with no driver will never connect. We care only about
-+ * devices which should currently be in the process of connecting.
-+ */
-+ if (!dev->driver)
-+ return 0;
-+
-+ /* Is this search limited to a particular driver? */
-+ if (drv && (dev->driver != drv))
-+ return 0;
-+
-+ xendrv = to_xenbus_driver(dev->driver);
-+ return (xendev->state != XenbusStateConnected ||
-+ (xendrv->is_ready && !xendrv->is_ready(xendev)));
-+}
-+
-+static int exists_disconnected_device(struct device_driver *drv)
-+{
-+ if (xenbus_frontend.error)
-+ return xenbus_frontend.error;
-+ return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv,
-+ is_disconnected_device);
-+}
-+
-+static int print_device_status(struct device *dev, void *data)
-+{
-+ struct xenbus_device *xendev = to_xenbus_device(dev);
-+ struct device_driver *drv = data;
-+
-+ /* Is this operation limited to a particular driver? */
-+ if (drv && (dev->driver != drv))
-+ return 0;
-+
-+ if (!dev->driver) {
-+ /* Information only: is this too noisy? */
-+ printk(KERN_INFO "XENBUS: Device with no driver: %s\n",
-+ xendev->nodename);
-+ } else if (xendev->state != XenbusStateConnected) {
-+ printk(KERN_WARNING "XENBUS: Timeout connecting "
-+ "to device: %s (state %d)\n",
-+ xendev->nodename, xendev->state);
-+ }
-+
-+ return 0;
-+}
-+
-+/* We only wait for device setup after most initcalls have run. */
-+static int ready_to_wait_for_devices;
-+
-+/*
-+ * On a 10 second timeout, wait for all devices currently configured. We need
-+ * to do this to guarantee that the filesystems and / or network devices
-+ * needed for boot are available, before we can allow the boot to proceed.
-+ *
-+ * This needs to be on a late_initcall, to happen after the frontend device
-+ * drivers have been initialised, but before the root fs is mounted.
-+ *
-+ * A possible improvement here would be to have the tools add a per-device
-+ * flag to the store entry, indicating whether it is needed at boot time.
-+ * This would allow people who knew what they were doing to accelerate their
-+ * boot slightly, but of course needs tools or manual intervention to set up
-+ * those flags correctly.
-+ */
-+static void wait_for_devices(struct xenbus_driver *xendrv)
-+{
-+ unsigned long timeout = jiffies + 10*HZ;
-+ struct device_driver *drv = xendrv ? &xendrv->driver : NULL;
-+
-+ if (!ready_to_wait_for_devices || !is_running_on_xen())
-+ return;
-+
-+ while (exists_disconnected_device(drv)) {
-+ if (time_after(jiffies, timeout))
-+ break;
-+ schedule_timeout_interruptible(HZ/10);
-+ }
-+
-+ bus_for_each_dev(&xenbus_frontend.bus, NULL, drv,
-+ print_device_status);
-+}
-+
-+#ifndef MODULE
-+static int __init boot_wait_for_devices(void)
-+{
-+ if (!xenbus_frontend.error) {
-+ ready_to_wait_for_devices = 1;
-+ wait_for_devices(NULL);
-+ }
-+ return 0;
-+}
-+
-+late_initcall(boot_wait_for_devices);
-+#endif
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/xenbus/xenbus_probe.h ubuntu-gutsy-xen/drivers/xen/xenbus/xenbus_probe.h
---- ubuntu-gutsy/drivers/xen/xenbus/xenbus_probe.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/xenbus/xenbus_probe.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,75 @@
-+/******************************************************************************
-+ * xenbus_probe.h
-+ *
-+ * Talks to Xen Store to figure out what devices we have.
-+ *
-+ * Copyright (C) 2005 Rusty Russell, IBM Corporation
-+ * Copyright (C) 2005 XenSource Ltd.
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#ifndef _XENBUS_PROBE_H
-+#define _XENBUS_PROBE_H
-+
-+#if defined(CONFIG_XEN_BACKEND) || defined(CONFIG_XEN_BACKEND_MODULE)
-+extern void xenbus_backend_suspend(int (*fn)(struct device *, void *));
-+extern void xenbus_backend_resume(int (*fn)(struct device *, void *));
-+extern void xenbus_backend_probe_and_watch(void);
-+extern void xenbus_backend_bus_register(void);
-+extern void xenbus_backend_device_register(void);
-+#else
-+static inline void xenbus_backend_suspend(int (*fn)(struct device *, void *)) {}
-+static inline void xenbus_backend_resume(int (*fn)(struct device *, void *)) {}
-+static inline void xenbus_backend_probe_and_watch(void) {}
-+static inline void xenbus_backend_bus_register(void) {}
-+static inline void xenbus_backend_device_register(void) {}
-+#endif
-+
-+struct xen_bus_type
-+{
-+ char *root;
-+ int error;
-+ unsigned int levels;
-+ int (*get_bus_id)(char bus_id[BUS_ID_SIZE], const char *nodename);
-+ int (*probe)(const char *type, const char *dir);
-+ struct bus_type bus;
-+ struct device dev;
-+};
-+
-+extern int xenbus_match(struct device *_dev, struct device_driver *_drv);
-+extern int xenbus_dev_probe(struct device *_dev);
-+extern int xenbus_dev_remove(struct device *_dev);
-+extern int xenbus_register_driver_common(struct xenbus_driver *drv,
-+ struct xen_bus_type *bus);
-+extern int xenbus_probe_node(struct xen_bus_type *bus,
-+ const char *type,
-+ const char *nodename);
-+extern int xenbus_probe_devices(struct xen_bus_type *bus);
-+
-+extern void dev_changed(const char *node, struct xen_bus_type *bus);
-+
-+#endif
-+
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/xenbus/xenbus_xs.c ubuntu-gutsy-xen/drivers/xen/xenbus/xenbus_xs.c
---- ubuntu-gutsy/drivers/xen/xenbus/xenbus_xs.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/xenbus/xenbus_xs.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,882 @@
-+/******************************************************************************
-+ * xenbus_xs.c
-+ *
-+ * This is the kernel equivalent of the "xs" library. We don't need everything
-+ * and we use xenbus_comms for communication.
-+ *
-+ * Copyright (C) 2005 Rusty Russell, IBM Corporation
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#include <linux/unistd.h>
-+#include <linux/errno.h>
-+#include <linux/types.h>
-+#include <linux/uio.h>
-+#include <linux/kernel.h>
-+#include <linux/string.h>
-+#include <linux/err.h>
-+#include <linux/slab.h>
-+#include <linux/fcntl.h>
-+#include <linux/kthread.h>
-+#include <linux/rwsem.h>
-+#include <linux/module.h>
-+#include <linux/mutex.h>
-+#include <xen/xenbus.h>
-+#include "xenbus_comms.h"
-+
-+#ifdef HAVE_XEN_PLATFORM_COMPAT_H
-+#include <xen/platform-compat.h>
-+#endif
-+
-+struct xs_stored_msg {
-+ struct list_head list;
-+
-+ struct xsd_sockmsg hdr;
-+
-+ union {
-+ /* Queued replies. */
-+ struct {
-+ char *body;
-+ } reply;
-+
-+ /* Queued watch events. */
-+ struct {
-+ struct xenbus_watch *handle;
-+ char **vec;
-+ unsigned int vec_size;
-+ } watch;
-+ } u;
-+};
-+
-+struct xs_handle {
-+ /* A list of replies. Currently only one will ever be outstanding. */
-+ struct list_head reply_list;
-+ spinlock_t reply_lock;
-+ wait_queue_head_t reply_waitq;
-+
-+ /*
-+ * Mutex ordering: transaction_mutex -> watch_mutex -> request_mutex.
-+ * response_mutex is never taken simultaneously with the other three.
-+ */
-+
-+ /* One request at a time. */
-+ struct mutex request_mutex;
-+
-+ /* Protect xenbus reader thread against save/restore. */
-+ struct mutex response_mutex;
-+
-+ /* Protect transactions against save/restore. */
-+ struct rw_semaphore transaction_mutex;
-+
-+ /* Protect watch (de)register against save/restore. */
-+ struct rw_semaphore watch_mutex;
-+};
-+
-+static struct xs_handle xs_state;
-+
-+/* List of registered watches, and a lock to protect it. */
-+static LIST_HEAD(watches);
-+static DEFINE_SPINLOCK(watches_lock);
-+
-+/* List of pending watch callback events, and a lock to protect it. */
-+static LIST_HEAD(watch_events);
-+static DEFINE_SPINLOCK(watch_events_lock);
-+
-+/*
-+ * Details of the xenwatch callback kernel thread. The thread waits on the
-+ * watch_events_waitq for work to do (queued on watch_events list). When it
-+ * wakes up it acquires the xenwatch_mutex before reading the list and
-+ * carrying out work.
-+ */
-+static pid_t xenwatch_pid;
-+/* static */ DEFINE_MUTEX(xenwatch_mutex);
-+static DECLARE_WAIT_QUEUE_HEAD(watch_events_waitq);
-+
-+static int get_error(const char *errorstring)
-+{
-+ unsigned int i;
-+
-+ for (i = 0; strcmp(errorstring, xsd_errors[i].errstring) != 0; i++) {
-+ if (i == ARRAY_SIZE(xsd_errors) - 1) {
-+ printk(KERN_WARNING
-+ "XENBUS xen store gave: unknown error %s",
-+ errorstring);
-+ return EINVAL;
-+ }
-+ }
-+ return xsd_errors[i].errnum;
-+}
-+
-+static void *read_reply(enum xsd_sockmsg_type *type, unsigned int *len)
-+{
-+ struct xs_stored_msg *msg;
-+ char *body;
-+
-+ spin_lock(&xs_state.reply_lock);
-+
-+ while (list_empty(&xs_state.reply_list)) {
-+ spin_unlock(&xs_state.reply_lock);
-+ /* XXX FIXME: Avoid synchronous wait for response here. */
-+ wait_event(xs_state.reply_waitq,
-+ !list_empty(&xs_state.reply_list));
-+ spin_lock(&xs_state.reply_lock);
-+ }
-+
-+ msg = list_entry(xs_state.reply_list.next,
-+ struct xs_stored_msg, list);
-+ list_del(&msg->list);
-+
-+ spin_unlock(&xs_state.reply_lock);
-+
-+ *type = msg->hdr.type;
-+ if (len)
-+ *len = msg->hdr.len;
-+ body = msg->u.reply.body;
-+
-+ kfree(msg);
-+
-+ return body;
-+}
-+
-+void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg)
-+{
-+ void *ret;
-+ struct xsd_sockmsg req_msg = *msg;
-+ int err;
-+
-+ if (req_msg.type == XS_TRANSACTION_START)
-+ down_read(&xs_state.transaction_mutex);
-+
-+ mutex_lock(&xs_state.request_mutex);
-+
-+ err = xb_write(msg, sizeof(*msg) + msg->len);
-+ if (err) {
-+ msg->type = XS_ERROR;
-+ ret = ERR_PTR(err);
-+ } else
-+ ret = read_reply(&msg->type, &msg->len);
-+
-+ mutex_unlock(&xs_state.request_mutex);
-+
-+ if ((req_msg.type == XS_TRANSACTION_END) ||
-+ ((req_msg.type == XS_TRANSACTION_START) &&
-+ (msg->type == XS_ERROR)))
-+ up_read(&xs_state.transaction_mutex);
-+
-+ return ret;
-+}
-+
-+/* Send message to xs, get kmalloc'ed reply. ERR_PTR() on error. */
-+static void *xs_talkv(struct xenbus_transaction t,
-+ enum xsd_sockmsg_type type,
-+ const struct kvec *iovec,
-+ unsigned int num_vecs,
-+ unsigned int *len)
-+{
-+ struct xsd_sockmsg msg;
-+ void *ret = NULL;
-+ unsigned int i;
-+ int err;
-+
-+ msg.tx_id = t.id;
-+ msg.req_id = 0;
-+ msg.type = type;
-+ msg.len = 0;
-+ for (i = 0; i < num_vecs; i++)
-+ msg.len += iovec[i].iov_len;
-+
-+ mutex_lock(&xs_state.request_mutex);
-+
-+ err = xb_write(&msg, sizeof(msg));
-+ if (err) {
-+ mutex_unlock(&xs_state.request_mutex);
-+ return ERR_PTR(err);
-+ }
-+
-+ for (i = 0; i < num_vecs; i++) {
-+ err = xb_write(iovec[i].iov_base, iovec[i].iov_len);;
-+ if (err) {
-+ mutex_unlock(&xs_state.request_mutex);
-+ return ERR_PTR(err);
-+ }
-+ }
-+
-+ ret = read_reply(&msg.type, len);
-+
-+ mutex_unlock(&xs_state.request_mutex);
-+
-+ if (IS_ERR(ret))
-+ return ret;
-+
-+ if (msg.type == XS_ERROR) {
-+ err = get_error(ret);
-+ kfree(ret);
-+ return ERR_PTR(-err);
-+ }
-+
-+ if (msg.type != type) {
-+ if (printk_ratelimit())
-+ printk(KERN_WARNING
-+ "XENBUS unexpected type [%d], expected [%d]\n",
-+ msg.type, type);
-+ kfree(ret);
-+ return ERR_PTR(-EINVAL);
-+ }
-+ return ret;
-+}
-+
-+/* Simplified version of xs_talkv: single message. */
-+static void *xs_single(struct xenbus_transaction t,
-+ enum xsd_sockmsg_type type,
-+ const char *string,
-+ unsigned int *len)
-+{
-+ struct kvec iovec;
-+
-+ iovec.iov_base = (void *)string;
-+ iovec.iov_len = strlen(string) + 1;
-+ return xs_talkv(t, type, &iovec, 1, len);
-+}
-+
-+/* Many commands only need an ack, don't care what it says. */
-+static int xs_error(char *reply)
-+{
-+ if (IS_ERR(reply))
-+ return PTR_ERR(reply);
-+ kfree(reply);
-+ return 0;
-+}
-+
-+static unsigned int count_strings(const char *strings, unsigned int len)
-+{
-+ unsigned int num;
-+ const char *p;
-+
-+ for (p = strings, num = 0; p < strings + len; p += strlen(p) + 1)
-+ num++;
-+
-+ return num;
-+}
-+
-+/* Return the path to dir with /name appended. Buffer must be kfree()'ed. */
-+static char *join(const char *dir, const char *name)
-+{
-+ char *buffer;
-+
-+ if (strlen(name) == 0)
-+ buffer = kasprintf(GFP_KERNEL, "%s", dir);
-+ else
-+ buffer = kasprintf(GFP_KERNEL, "%s/%s", dir, name);
-+ return (!buffer) ? ERR_PTR(-ENOMEM) : buffer;
-+}
-+
-+static char **split(char *strings, unsigned int len, unsigned int *num)
-+{
-+ char *p, **ret;
-+
-+ /* Count the strings. */
-+ *num = count_strings(strings, len);
-+
-+ /* Transfer to one big alloc for easy freeing. */
-+ ret = kmalloc(*num * sizeof(char *) + len, GFP_KERNEL);
-+ if (!ret) {
-+ kfree(strings);
-+ return ERR_PTR(-ENOMEM);
-+ }
-+ memcpy(&ret[*num], strings, len);
-+ kfree(strings);
-+
-+ strings = (char *)&ret[*num];
-+ for (p = strings, *num = 0; p < strings + len; p += strlen(p) + 1)
-+ ret[(*num)++] = p;
-+
-+ return ret;
-+}
-+
-+char **xenbus_directory(struct xenbus_transaction t,
-+ const char *dir, const char *node, unsigned int *num)
-+{
-+ char *strings, *path;
-+ unsigned int len;
-+
-+ path = join(dir, node);
-+ if (IS_ERR(path))
-+ return (char **)path;
-+
-+ strings = xs_single(t, XS_DIRECTORY, path, &len);
-+ kfree(path);
-+ if (IS_ERR(strings))
-+ return (char **)strings;
-+
-+ return split(strings, len, num);
-+}
-+EXPORT_SYMBOL_GPL(xenbus_directory);
-+
-+/* Check if a path exists. Return 1 if it does. */
-+int xenbus_exists(struct xenbus_transaction t,
-+ const char *dir, const char *node)
-+{
-+ char **d;
-+ int dir_n;
-+
-+ d = xenbus_directory(t, dir, node, &dir_n);
-+ if (IS_ERR(d))
-+ return 0;
-+ kfree(d);
-+ return 1;
-+}
-+EXPORT_SYMBOL_GPL(xenbus_exists);
-+
-+/* Get the value of a single file.
-+ * Returns a kmalloced value: call free() on it after use.
-+ * len indicates length in bytes.
-+ */
-+void *xenbus_read(struct xenbus_transaction t,
-+ const char *dir, const char *node, unsigned int *len)
-+{
-+ char *path;
-+ void *ret;
-+
-+ path = join(dir, node);
-+ if (IS_ERR(path))
-+ return (void *)path;
-+
-+ ret = xs_single(t, XS_READ, path, len);
-+ kfree(path);
-+ return ret;
-+}
-+EXPORT_SYMBOL_GPL(xenbus_read);
-+
-+/* Write the value of a single file.
-+ * Returns -err on failure.
-+ */
-+int xenbus_write(struct xenbus_transaction t,
-+ const char *dir, const char *node, const char *string)
-+{
-+ const char *path;
-+ struct kvec iovec[2];
-+ int ret;
-+
-+ path = join(dir, node);
-+ if (IS_ERR(path))
-+ return PTR_ERR(path);
-+
-+ iovec[0].iov_base = (void *)path;
-+ iovec[0].iov_len = strlen(path) + 1;
-+ iovec[1].iov_base = (void *)string;
-+ iovec[1].iov_len = strlen(string);
-+
-+ ret = xs_error(xs_talkv(t, XS_WRITE, iovec, ARRAY_SIZE(iovec), NULL));
-+ kfree(path);
-+ return ret;
-+}
-+EXPORT_SYMBOL_GPL(xenbus_write);
-+
-+/* Create a new directory. */
-+int xenbus_mkdir(struct xenbus_transaction t,
-+ const char *dir, const char *node)
-+{
-+ char *path;
-+ int ret;
-+
-+ path = join(dir, node);
-+ if (IS_ERR(path))
-+ return PTR_ERR(path);
-+
-+ ret = xs_error(xs_single(t, XS_MKDIR, path, NULL));
-+ kfree(path);
-+ return ret;
-+}
-+EXPORT_SYMBOL_GPL(xenbus_mkdir);
-+
-+/* Destroy a file or directory (directories must be empty). */
-+int xenbus_rm(struct xenbus_transaction t, const char *dir, const char *node)
-+{
-+ char *path;
-+ int ret;
-+
-+ path = join(dir, node);
-+ if (IS_ERR(path))
-+ return PTR_ERR(path);
-+
-+ ret = xs_error(xs_single(t, XS_RM, path, NULL));
-+ kfree(path);
-+ return ret;
-+}
-+EXPORT_SYMBOL_GPL(xenbus_rm);
-+
-+/* Start a transaction: changes by others will not be seen during this
-+ * transaction, and changes will not be visible to others until end.
-+ */
-+int xenbus_transaction_start(struct xenbus_transaction *t)
-+{
-+ char *id_str;
-+
-+ down_read(&xs_state.transaction_mutex);
-+
-+ id_str = xs_single(XBT_NIL, XS_TRANSACTION_START, "", NULL);
-+ if (IS_ERR(id_str)) {
-+ up_read(&xs_state.transaction_mutex);
-+ return PTR_ERR(id_str);
-+ }
-+
-+ t->id = simple_strtoul(id_str, NULL, 0);
-+ kfree(id_str);
-+ return 0;
-+}
-+EXPORT_SYMBOL_GPL(xenbus_transaction_start);
-+
-+/* End a transaction.
-+ * If abandon is true, transaction is discarded instead of committed.
-+ */
-+int xenbus_transaction_end(struct xenbus_transaction t, int abort)
-+{
-+ char abortstr[2];
-+ int err;
-+
-+ if (abort)
-+ strcpy(abortstr, "F");
-+ else
-+ strcpy(abortstr, "T");
-+
-+ err = xs_error(xs_single(t, XS_TRANSACTION_END, abortstr, NULL));
-+
-+ up_read(&xs_state.transaction_mutex);
-+
-+ return err;
-+}
-+EXPORT_SYMBOL_GPL(xenbus_transaction_end);
-+
-+/* Single read and scanf: returns -errno or num scanned. */
-+int xenbus_scanf(struct xenbus_transaction t,
-+ const char *dir, const char *node, const char *fmt, ...)
-+{
-+ va_list ap;
-+ int ret;
-+ char *val;
-+
-+ val = xenbus_read(t, dir, node, NULL);
-+ if (IS_ERR(val))
-+ return PTR_ERR(val);
-+
-+ va_start(ap, fmt);
-+ ret = vsscanf(val, fmt, ap);
-+ va_end(ap);
-+ kfree(val);
-+ /* Distinctive errno. */
-+ if (ret == 0)
-+ return -ERANGE;
-+ return ret;
-+}
-+EXPORT_SYMBOL_GPL(xenbus_scanf);
-+
-+/* Single printf and write: returns -errno or 0. */
-+int xenbus_printf(struct xenbus_transaction t,
-+ const char *dir, const char *node, const char *fmt, ...)
-+{
-+ va_list ap;
-+ int ret;
-+#define PRINTF_BUFFER_SIZE 4096
-+ char *printf_buffer;
-+
-+ printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL);
-+ if (printf_buffer == NULL)
-+ return -ENOMEM;
-+
-+ va_start(ap, fmt);
-+ ret = vsnprintf(printf_buffer, PRINTF_BUFFER_SIZE, fmt, ap);
-+ va_end(ap);
-+
-+ BUG_ON(ret > PRINTF_BUFFER_SIZE-1);
-+ ret = xenbus_write(t, dir, node, printf_buffer);
-+
-+ kfree(printf_buffer);
-+
-+ return ret;
-+}
-+EXPORT_SYMBOL_GPL(xenbus_printf);
-+
-+/* Takes tuples of names, scanf-style args, and void **, NULL terminated. */
-+int xenbus_gather(struct xenbus_transaction t, const char *dir, ...)
-+{
-+ va_list ap;
-+ const char *name;
-+ int ret = 0;
-+
-+ va_start(ap, dir);
-+ while (ret == 0 && (name = va_arg(ap, char *)) != NULL) {
-+ const char *fmt = va_arg(ap, char *);
-+ void *result = va_arg(ap, void *);
-+ char *p;
-+
-+ p = xenbus_read(t, dir, name, NULL);
-+ if (IS_ERR(p)) {
-+ ret = PTR_ERR(p);
-+ break;
-+ }
-+ if (fmt) {
-+ if (sscanf(p, fmt, result) == 0)
-+ ret = -EINVAL;
-+ kfree(p);
-+ } else
-+ *(char **)result = p;
-+ }
-+ va_end(ap);
-+ return ret;
-+}
-+EXPORT_SYMBOL_GPL(xenbus_gather);
-+
-+static int xs_watch(const char *path, const char *token)
-+{
-+ struct kvec iov[2];
-+
-+ iov[0].iov_base = (void *)path;
-+ iov[0].iov_len = strlen(path) + 1;
-+ iov[1].iov_base = (void *)token;
-+ iov[1].iov_len = strlen(token) + 1;
-+
-+ return xs_error(xs_talkv(XBT_NIL, XS_WATCH, iov,
-+ ARRAY_SIZE(iov), NULL));
-+}
-+
-+static int xs_unwatch(const char *path, const char *token)
-+{
-+ struct kvec iov[2];
-+
-+ iov[0].iov_base = (char *)path;
-+ iov[0].iov_len = strlen(path) + 1;
-+ iov[1].iov_base = (char *)token;
-+ iov[1].iov_len = strlen(token) + 1;
-+
-+ return xs_error(xs_talkv(XBT_NIL, XS_UNWATCH, iov,
-+ ARRAY_SIZE(iov), NULL));
-+}
-+
-+static struct xenbus_watch *find_watch(const char *token)
-+{
-+ struct xenbus_watch *i, *cmp;
-+
-+ cmp = (void *)simple_strtoul(token, NULL, 16);
-+
-+ list_for_each_entry(i, &watches, list)
-+ if (i == cmp)
-+ return i;
-+
-+ return NULL;
-+}
-+
-+/* Register callback to watch this node. */
-+int register_xenbus_watch(struct xenbus_watch *watch)
-+{
-+ /* Pointer in ascii is the token. */
-+ char token[sizeof(watch) * 2 + 1];
-+ int err;
-+
-+ sprintf(token, "%lX", (long)watch);
-+
-+ down_read(&xs_state.watch_mutex);
-+
-+ spin_lock(&watches_lock);
-+ BUG_ON(find_watch(token));
-+ list_add(&watch->list, &watches);
-+ spin_unlock(&watches_lock);
-+
-+ err = xs_watch(watch->node, token);
-+
-+ /* Ignore errors due to multiple registration. */
-+ if ((err != 0) && (err != -EEXIST)) {
-+ spin_lock(&watches_lock);
-+ list_del(&watch->list);
-+ spin_unlock(&watches_lock);
-+ }
-+
-+ up_read(&xs_state.watch_mutex);
-+
-+ return err;
-+}
-+EXPORT_SYMBOL_GPL(register_xenbus_watch);
-+
-+void unregister_xenbus_watch(struct xenbus_watch *watch)
-+{
-+ struct xs_stored_msg *msg, *tmp;
-+ char token[sizeof(watch) * 2 + 1];
-+ int err;
-+
-+ sprintf(token, "%lX", (long)watch);
-+
-+ down_read(&xs_state.watch_mutex);
-+
-+ spin_lock(&watches_lock);
-+ BUG_ON(!find_watch(token));
-+ list_del(&watch->list);
-+ spin_unlock(&watches_lock);
-+
-+ err = xs_unwatch(watch->node, token);
-+ if (err)
-+ printk(KERN_WARNING
-+ "XENBUS Failed to release watch %s: %i\n",
-+ watch->node, err);
-+
-+ up_read(&xs_state.watch_mutex);
-+
-+ /* Cancel pending watch events. */
-+ spin_lock(&watch_events_lock);
-+ list_for_each_entry_safe(msg, tmp, &watch_events, list) {
-+ if (msg->u.watch.handle != watch)
-+ continue;
-+ list_del(&msg->list);
-+ kfree(msg->u.watch.vec);
-+ kfree(msg);
-+ }
-+ spin_unlock(&watch_events_lock);
-+
-+ /* Flush any currently-executing callback, unless we are it. :-) */
-+ if (current->pid != xenwatch_pid) {
-+ mutex_lock(&xenwatch_mutex);
-+ mutex_unlock(&xenwatch_mutex);
-+ }
-+}
-+EXPORT_SYMBOL_GPL(unregister_xenbus_watch);
-+
-+void xs_suspend(void)
-+{
-+ down_write(&xs_state.transaction_mutex);
-+ down_write(&xs_state.watch_mutex);
-+ mutex_lock(&xs_state.request_mutex);
-+ mutex_lock(&xs_state.response_mutex);
-+}
-+
-+void xs_resume(void)
-+{
-+ struct xenbus_watch *watch;
-+ char token[sizeof(watch) * 2 + 1];
-+
-+ mutex_unlock(&xs_state.response_mutex);
-+ mutex_unlock(&xs_state.request_mutex);
-+ up_write(&xs_state.transaction_mutex);
-+
-+ /* No need for watches_lock: the watch_mutex is sufficient. */
-+ list_for_each_entry(watch, &watches, list) {
-+ sprintf(token, "%lX", (long)watch);
-+ xs_watch(watch->node, token);
-+ }
-+
-+ up_write(&xs_state.watch_mutex);
-+}
-+
-+void xs_suspend_cancel(void)
-+{
-+ mutex_unlock(&xs_state.response_mutex);
-+ mutex_unlock(&xs_state.request_mutex);
-+ up_write(&xs_state.watch_mutex);
-+ up_write(&xs_state.transaction_mutex);
-+}
-+
-+static int xenwatch_handle_callback(void *data)
-+{
-+ struct xs_stored_msg *msg = data;
-+
-+ msg->u.watch.handle->callback(msg->u.watch.handle,
-+ (const char **)msg->u.watch.vec,
-+ msg->u.watch.vec_size);
-+
-+ kfree(msg->u.watch.vec);
-+ kfree(msg);
-+
-+ /* Kill this kthread if we were spawned just for this callback. */
-+ if (current->pid != xenwatch_pid)
-+ do_exit(0);
-+
-+ return 0;
-+}
-+
-+static int xenwatch_thread(void *unused)
-+{
-+ struct list_head *ent;
-+ struct xs_stored_msg *msg;
-+
-+ current->flags |= PF_NOFREEZE;
-+ for (;;) {
-+ wait_event_interruptible(watch_events_waitq,
-+ !list_empty(&watch_events));
-+
-+ if (kthread_should_stop())
-+ break;
-+
-+ mutex_lock(&xenwatch_mutex);
-+
-+ spin_lock(&watch_events_lock);
-+ ent = watch_events.next;
-+ if (ent != &watch_events)
-+ list_del(ent);
-+ spin_unlock(&watch_events_lock);
-+
-+ if (ent != &watch_events) {
-+ msg = list_entry(ent, struct xs_stored_msg, list);
-+ if (msg->u.watch.handle->flags & XBWF_new_thread)
-+ kthread_run(xenwatch_handle_callback,
-+ msg, "xenwatch_cb");
-+ else
-+ xenwatch_handle_callback(msg);
-+ }
-+
-+ mutex_unlock(&xenwatch_mutex);
-+ }
-+
-+ return 0;
-+}
-+
-+static int process_msg(void)
-+{
-+ struct xs_stored_msg *msg;
-+ char *body;
-+ int err;
-+
-+ /*
-+ * We must disallow save/restore while reading a xenstore message.
-+ * A partial read across s/r leaves us out of sync with xenstored.
-+ */
-+ for (;;) {
-+ err = xb_wait_for_data_to_read();
-+ if (err)
-+ return err;
-+ mutex_lock(&xs_state.response_mutex);
-+ if (xb_data_to_read())
-+ break;
-+ /* We raced with save/restore: pending data 'disappeared'. */
-+ mutex_unlock(&xs_state.response_mutex);
-+ }
-+
-+
-+ msg = kmalloc(sizeof(*msg), GFP_KERNEL);
-+ if (msg == NULL) {
-+ err = -ENOMEM;
-+ goto out;
-+ }
-+
-+ err = xb_read(&msg->hdr, sizeof(msg->hdr));
-+ if (err) {
-+ kfree(msg);
-+ goto out;
-+ }
-+
-+ body = kmalloc(msg->hdr.len + 1, GFP_KERNEL);
-+ if (body == NULL) {
-+ kfree(msg);
-+ err = -ENOMEM;
-+ goto out;
-+ }
-+
-+ err = xb_read(body, msg->hdr.len);
-+ if (err) {
-+ kfree(body);
-+ kfree(msg);
-+ goto out;
-+ }
-+ body[msg->hdr.len] = '\0';
-+
-+ if (msg->hdr.type == XS_WATCH_EVENT) {
-+ msg->u.watch.vec = split(body, msg->hdr.len,
-+ &msg->u.watch.vec_size);
-+ if (IS_ERR(msg->u.watch.vec)) {
-+ kfree(msg);
-+ err = PTR_ERR(msg->u.watch.vec);
-+ goto out;
-+ }
-+
-+ spin_lock(&watches_lock);
-+ msg->u.watch.handle = find_watch(
-+ msg->u.watch.vec[XS_WATCH_TOKEN]);
-+ if (msg->u.watch.handle != NULL) {
-+ spin_lock(&watch_events_lock);
-+ list_add_tail(&msg->list, &watch_events);
-+ wake_up(&watch_events_waitq);
-+ spin_unlock(&watch_events_lock);
-+ } else {
-+ kfree(msg->u.watch.vec);
-+ kfree(msg);
-+ }
-+ spin_unlock(&watches_lock);
-+ } else {
-+ msg->u.reply.body = body;
-+ spin_lock(&xs_state.reply_lock);
-+ list_add_tail(&msg->list, &xs_state.reply_list);
-+ spin_unlock(&xs_state.reply_lock);
-+ wake_up(&xs_state.reply_waitq);
-+ }
-+
-+ out:
-+ mutex_unlock(&xs_state.response_mutex);
-+ return err;
-+}
-+
-+static int xenbus_thread(void *unused)
-+{
-+ int err;
-+
-+ current->flags |= PF_NOFREEZE;
-+ for (;;) {
-+ err = process_msg();
-+ if (err)
-+ printk(KERN_WARNING "XENBUS error %d while reading "
-+ "message\n", err);
-+ if (kthread_should_stop())
-+ break;
-+ }
-+
-+ return 0;
-+}
-+
-+int xs_init(void)
-+{
-+ int err;
-+ struct task_struct *task;
-+
-+ INIT_LIST_HEAD(&xs_state.reply_list);
-+ spin_lock_init(&xs_state.reply_lock);
-+ init_waitqueue_head(&xs_state.reply_waitq);
-+
-+ mutex_init(&xs_state.request_mutex);
-+ mutex_init(&xs_state.response_mutex);
-+ init_rwsem(&xs_state.transaction_mutex);
-+ init_rwsem(&xs_state.watch_mutex);
-+
-+ /* Initialize the shared memory rings to talk to xenstored */
-+ err = xb_init_comms();
-+ if (err)
-+ return err;
-+
-+ task = kthread_run(xenwatch_thread, NULL, "xenwatch");
-+ if (IS_ERR(task))
-+ return PTR_ERR(task);
-+ xenwatch_pid = task->pid;
-+
-+ task = kthread_run(xenbus_thread, NULL, "xenbus");
-+ if (IS_ERR(task))
-+ return PTR_ERR(task);
-+
-+ return 0;
-+}
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/drivers/xen/xenoprof/xenoprofile.c ubuntu-gutsy-xen/drivers/xen/xenoprof/xenoprofile.c
---- ubuntu-gutsy/drivers/xen/xenoprof/xenoprofile.c 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/drivers/xen/xenoprof/xenoprofile.c 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,500 @@
-+/**
-+ * @file xenoprofile.c
-+ *
-+ * @remark Copyright 2002 OProfile authors
-+ * @remark Read the file COPYING
-+ *
-+ * @author John Levon <levon@movementarian.org>
-+ *
-+ * Modified by Aravind Menon and Jose Renato Santos for Xen
-+ * These modifications are:
-+ * Copyright (C) 2005 Hewlett-Packard Co.
-+ *
-+ * Separated out arch-generic part
-+ * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
-+ * VA Linux Systems Japan K.K.
-+ */
-+
-+#include <linux/init.h>
-+#include <linux/notifier.h>
-+#include <linux/smp.h>
-+#include <linux/oprofile.h>
-+#include <linux/sysdev.h>
-+#include <linux/slab.h>
-+#include <linux/interrupt.h>
-+#include <linux/vmalloc.h>
-+#include <asm/pgtable.h>
-+#include <xen/evtchn.h>
-+#include <xen/xenoprof.h>
-+#include <xen/driver_util.h>
-+#include <xen/interface/xen.h>
-+#include <xen/interface/xenoprof.h>
-+#include "../../../drivers/oprofile/cpu_buffer.h"
-+#include "../../../drivers/oprofile/event_buffer.h"
-+
-+#define MAX_XENOPROF_SAMPLES 16
-+
-+/* sample buffers shared with Xen */
-+xenoprof_buf_t * xenoprof_buf[MAX_VIRT_CPUS];
-+/* Shared buffer area */
-+struct xenoprof_shared_buffer shared_buffer;
-+
-+/* Passive sample buffers shared with Xen */
-+xenoprof_buf_t *p_xenoprof_buf[MAX_OPROF_DOMAINS][MAX_VIRT_CPUS];
-+/* Passive shared buffer area */
-+struct xenoprof_shared_buffer p_shared_buffer[MAX_OPROF_DOMAINS];
-+
-+static int xenoprof_start(void);
-+static void xenoprof_stop(void);
-+
-+static int xenoprof_enabled = 0;
-+static int xenoprof_is_primary = 0;
-+static int active_defined;
-+
-+/* Number of buffers in shared area (one per VCPU) */
-+int nbuf;
-+/* Mappings of VIRQ_XENOPROF to irq number (per cpu) */
-+int ovf_irq[NR_CPUS];
-+/* cpu model type string - copied from Xen memory space on XENOPROF_init command */
-+char cpu_type[XENOPROF_CPU_TYPE_SIZE];
-+
-+#ifdef CONFIG_PM
-+
-+static int xenoprof_suspend(struct sys_device * dev, pm_message_t state)
-+{
-+ if (xenoprof_enabled == 1)
-+ xenoprof_stop();
-+ return 0;
-+}
-+
-+
-+static int xenoprof_resume(struct sys_device * dev)
-+{
-+ if (xenoprof_enabled == 1)
-+ xenoprof_start();
-+ return 0;
-+}
-+
-+
-+static struct sysdev_class oprofile_sysclass = {
-+ set_kset_name("oprofile"),
-+ .resume = xenoprof_resume,
-+ .suspend = xenoprof_suspend
-+};
-+
-+
-+static struct sys_device device_oprofile = {
-+ .id = 0,
-+ .cls = &oprofile_sysclass,
-+};
-+
-+
-+static int __init init_driverfs(void)
-+{
-+ int error;
-+ if (!(error = sysdev_class_register(&oprofile_sysclass)))
-+ error = sysdev_register(&device_oprofile);
-+ return error;
-+}
-+
-+
-+static void exit_driverfs(void)
-+{
-+ sysdev_unregister(&device_oprofile);
-+ sysdev_class_unregister(&oprofile_sysclass);
-+}
-+
-+#else
-+#define init_driverfs() do { } while (0)
-+#define exit_driverfs() do { } while (0)
-+#endif /* CONFIG_PM */
-+
-+unsigned long long oprofile_samples = 0;
-+unsigned long long p_oprofile_samples = 0;
-+
-+unsigned int pdomains;
-+struct xenoprof_passive passive_domains[MAX_OPROF_DOMAINS];
-+
-+static void xenoprof_add_pc(xenoprof_buf_t *buf, int is_passive)
-+{
-+ int head, tail, size;
-+
-+ head = buf->event_head;
-+ tail = buf->event_tail;
-+ size = buf->event_size;
-+
-+ if (tail > head) {
-+ while (tail < size) {
-+ oprofile_add_pc(buf->event_log[tail].eip,
-+ buf->event_log[tail].mode,
-+ buf->event_log[tail].event);
-+ if (!is_passive)
-+ oprofile_samples++;
-+ else
-+ p_oprofile_samples++;
-+ tail++;
-+ }
-+ tail = 0;
-+ }
-+ while (tail < head) {
-+ oprofile_add_pc(buf->event_log[tail].eip,
-+ buf->event_log[tail].mode,
-+ buf->event_log[tail].event);
-+ if (!is_passive)
-+ oprofile_samples++;
-+ else
-+ p_oprofile_samples++;
-+ tail++;
-+ }
-+
-+ buf->event_tail = tail;
-+}
-+
-+static void xenoprof_handle_passive(void)
-+{
-+ int i, j;
-+ int flag_domain, flag_switch = 0;
-+
-+ for (i = 0; i < pdomains; i++) {
-+ flag_domain = 0;
-+ for (j = 0; j < passive_domains[i].nbuf; j++) {
-+ xenoprof_buf_t *buf = p_xenoprof_buf[i][j];
-+ if (buf->event_head == buf->event_tail)
-+ continue;
-+ if (!flag_domain) {
-+ if (!oprofile_add_domain_switch(passive_domains[i].
-+ domain_id))
-+ goto done;
-+ flag_domain = 1;
-+ }
-+ xenoprof_add_pc(buf, 1);
-+ flag_switch = 1;
-+ }
-+ }
-+done:
-+ if (flag_switch)
-+ oprofile_add_domain_switch(COORDINATOR_DOMAIN);
-+}
-+
-+static irqreturn_t
-+xenoprof_ovf_interrupt(int irq, void * dev_id)
-+{
-+ struct xenoprof_buf * buf;
-+ int cpu;
-+ static unsigned long flag;
-+
-+ cpu = smp_processor_id();
-+ buf = xenoprof_buf[cpu];
-+
-+ xenoprof_add_pc(buf, 0);
-+
-+ if (xenoprof_is_primary && !test_and_set_bit(0, &flag)) {
-+ xenoprof_handle_passive();
-+ smp_mb__before_clear_bit();
-+ clear_bit(0, &flag);
-+ }
-+
-+ return IRQ_HANDLED;
-+}
-+
-+
-+static void unbind_virq(void)
-+{
-+ int i;
-+
-+ for_each_online_cpu(i) {
-+ if (ovf_irq[i] >= 0) {
-+ unbind_from_irqhandler(ovf_irq[i], NULL);
-+ ovf_irq[i] = -1;
-+ }
-+ }
-+}
-+
-+
-+static int bind_virq(void)
-+{
-+ int i, result;
-+
-+ for_each_online_cpu(i) {
-+ result = bind_virq_to_irqhandler(VIRQ_XENOPROF,
-+ i,
-+ xenoprof_ovf_interrupt,
-+ IRQF_DISABLED,
-+ "xenoprof",
-+ NULL);
-+
-+ if (result < 0) {
-+ unbind_virq();
-+ return result;
-+ }
-+
-+ ovf_irq[i] = result;
-+ }
-+
-+ return 0;
-+}
-+
-+
-+static void unmap_passive_list(void)
-+{
-+ int i;
-+ for (i = 0; i < pdomains; i++)
-+ xenoprof_arch_unmap_shared_buffer(&p_shared_buffer[i]);
-+ pdomains = 0;
-+}
-+
-+
-+static int map_xenoprof_buffer(int max_samples)
-+{
-+ struct xenoprof_get_buffer get_buffer;
-+ struct xenoprof_buf *buf;
-+ int ret, i;
-+
-+ if ( shared_buffer.buffer )
-+ return 0;
-+
-+ get_buffer.max_samples = max_samples;
-+ ret = xenoprof_arch_map_shared_buffer(&get_buffer, &shared_buffer);
-+ if (ret)
-+ return ret;
-+ nbuf = get_buffer.nbuf;
-+
-+ for (i=0; i< nbuf; i++) {
-+ buf = (struct xenoprof_buf*)
-+ &shared_buffer.buffer[i * get_buffer.bufsize];
-+ BUG_ON(buf->vcpu_id >= MAX_VIRT_CPUS);
-+ xenoprof_buf[buf->vcpu_id] = buf;
-+ }
-+
-+ return 0;
-+}
-+
-+
-+static int xenoprof_setup(void)
-+{
-+ int ret;
-+
-+ if ( (ret = map_xenoprof_buffer(MAX_XENOPROF_SAMPLES)) )
-+ return ret;
-+
-+ if ( (ret = bind_virq()) )
-+ return ret;
-+
-+ if (xenoprof_is_primary) {
-+ /* Define dom0 as an active domain if not done yet */
-+ if (!active_defined) {
-+ domid_t domid;
-+ ret = HYPERVISOR_xenoprof_op(XENOPROF_reset_active_list, NULL);
-+ if (ret)
-+ goto err;
-+ domid = 0;
-+ ret = HYPERVISOR_xenoprof_op(XENOPROF_set_active, &domid);
-+ if (ret)
-+ goto err;
-+ active_defined = 1;
-+ }
-+
-+ ret = HYPERVISOR_xenoprof_op(XENOPROF_reserve_counters, NULL);
-+ if (ret)
-+ goto err;
-+ xenoprof_arch_counter();
-+ ret = HYPERVISOR_xenoprof_op(XENOPROF_setup_events, NULL);
-+
-+ if (ret)
-+ goto err;
-+ }
-+
-+ ret = HYPERVISOR_xenoprof_op(XENOPROF_enable_virq, NULL);
-+ if (ret)
-+ goto err;
-+
-+ xenoprof_enabled = 1;
-+ return 0;
-+ err:
-+ unbind_virq();
-+ return ret;
-+}
-+
-+
-+static void xenoprof_shutdown(void)
-+{
-+ xenoprof_enabled = 0;
-+
-+ HYPERVISOR_xenoprof_op(XENOPROF_disable_virq, NULL);
-+
-+ if (xenoprof_is_primary) {
-+ HYPERVISOR_xenoprof_op(XENOPROF_release_counters, NULL);
-+ active_defined = 0;
-+ }
-+
-+ unbind_virq();
-+
-+ xenoprof_arch_unmap_shared_buffer(&shared_buffer);
-+ if (xenoprof_is_primary)
-+ unmap_passive_list();
-+}
-+
-+
-+static int xenoprof_start(void)
-+{
-+ int ret = 0;
-+
-+ if (xenoprof_is_primary)
-+ ret = HYPERVISOR_xenoprof_op(XENOPROF_start, NULL);
-+ if (!ret)
-+ xenoprof_arch_start();
-+ return ret;
-+}
-+
-+
-+static void xenoprof_stop(void)
-+{
-+ if (xenoprof_is_primary)
-+ HYPERVISOR_xenoprof_op(XENOPROF_stop, NULL);
-+ xenoprof_arch_stop();
-+}
-+
-+
-+static int xenoprof_set_active(int * active_domains,
-+ unsigned int adomains)
-+{
-+ int ret = 0;
-+ int i;
-+ int set_dom0 = 0;
-+ domid_t domid;
-+
-+ if (!xenoprof_is_primary)
-+ return 0;
-+
-+ if (adomains > MAX_OPROF_DOMAINS)
-+ return -E2BIG;
-+
-+ ret = HYPERVISOR_xenoprof_op(XENOPROF_reset_active_list, NULL);
-+ if (ret)
-+ return ret;
-+
-+ for (i=0; i<adomains; i++) {
-+ domid = active_domains[i];
-+ if (domid != active_domains[i]) {
-+ ret = -EINVAL;
-+ goto out;
-+ }
-+ ret = HYPERVISOR_xenoprof_op(XENOPROF_set_active, &domid);
-+ if (ret)
-+ goto out;
-+ if (active_domains[i] == 0)
-+ set_dom0 = 1;
-+ }
-+ /* dom0 must always be active but may not be in the list */
-+ if (!set_dom0) {
-+ domid = 0;
-+ ret = HYPERVISOR_xenoprof_op(XENOPROF_set_active, &domid);
-+ }
-+
-+out:
-+ if (ret)
-+ HYPERVISOR_xenoprof_op(XENOPROF_reset_active_list, NULL);
-+ active_defined = !ret;
-+ return ret;
-+}
-+
-+static int xenoprof_set_passive(int * p_domains,
-+ unsigned int pdoms)
-+{
-+ int ret;
-+ int i, j;
-+ struct xenoprof_buf *buf;
-+
-+ if (!xenoprof_is_primary)
-+ return 0;
-+
-+ if (pdoms > MAX_OPROF_DOMAINS)
-+ return -E2BIG;
-+
-+ ret = HYPERVISOR_xenoprof_op(XENOPROF_reset_passive_list, NULL);
-+ if (ret)
-+ return ret;
-+ unmap_passive_list();
-+
-+ for (i = 0; i < pdoms; i++) {
-+ passive_domains[i].domain_id = p_domains[i];
-+ passive_domains[i].max_samples = 2048;
-+ ret = xenoprof_arch_set_passive(&passive_domains[i],
-+ &p_shared_buffer[i]);
-+ if (ret)
-+ goto out;
-+ for (j = 0; j < passive_domains[i].nbuf; j++) {
-+ buf = (struct xenoprof_buf *)
-+ &p_shared_buffer[i].buffer[j * passive_domains[i].bufsize];
-+ BUG_ON(buf->vcpu_id >= MAX_VIRT_CPUS);
-+ p_xenoprof_buf[i][buf->vcpu_id] = buf;
-+ }
-+ }
-+
-+ pdomains = pdoms;
-+ return 0;
-+
-+out:
-+ for (j = 0; j < i; j++)
-+ xenoprof_arch_unmap_shared_buffer(&p_shared_buffer[i]);
-+
-+ return ret;
-+}
-+
-+struct oprofile_operations xenoprof_ops = {
-+#ifdef HAVE_XENOPROF_CREATE_FILES
-+ .create_files = xenoprof_create_files,
-+#endif
-+ .set_active = xenoprof_set_active,
-+ .set_passive = xenoprof_set_passive,
-+ .setup = xenoprof_setup,
-+ .shutdown = xenoprof_shutdown,
-+ .start = xenoprof_start,
-+ .stop = xenoprof_stop
-+};
-+
-+
-+/* in order to get driverfs right */
-+static int using_xenoprof;
-+
-+int __init xenoprofile_init(struct oprofile_operations * ops)
-+{
-+ struct xenoprof_init init;
-+ int ret, i;
-+
-+ ret = HYPERVISOR_xenoprof_op(XENOPROF_init, &init);
-+ if (!ret) {
-+ xenoprof_arch_init_counter(&init);
-+ xenoprof_is_primary = init.is_primary;
-+
-+ /* cpu_type is detected by Xen */
-+ cpu_type[XENOPROF_CPU_TYPE_SIZE-1] = 0;
-+ strncpy(cpu_type, init.cpu_type, XENOPROF_CPU_TYPE_SIZE - 1);
-+ xenoprof_ops.cpu_type = cpu_type;
-+
-+ init_driverfs();
-+ using_xenoprof = 1;
-+ *ops = xenoprof_ops;
-+
-+ for (i=0; i<NR_CPUS; i++)
-+ ovf_irq[i] = -1;
-+
-+ active_defined = 0;
-+ }
-+ printk(KERN_INFO "%s: ret %d, events %d, xenoprof_is_primary %d\n",
-+ __func__, ret, init.num_events, xenoprof_is_primary);
-+ return ret;
-+}
-+
-+
-+void xenoprofile_exit(void)
-+{
-+ if (using_xenoprof)
-+ exit_driverfs();
-+
-+ xenoprof_arch_unmap_shared_buffer(&shared_buffer);
-+ if (xenoprof_is_primary) {
-+ unmap_passive_list();
-+ HYPERVISOR_xenoprof_op(XENOPROF_shutdown, NULL);
-+ }
-+}
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/fs/aio.c ubuntu-gutsy-xen/fs/aio.c
---- ubuntu-gutsy/fs/aio.c 2007-08-18 09:40:32.000000000 -0400
-+++ ubuntu-gutsy-xen/fs/aio.c 2007-08-18 12:38:02.000000000 -0400
-@@ -36,6 +36,11 @@
- #include <asm/uaccess.h>
- #include <asm/mmu_context.h>
-
-+#ifdef CONFIG_EPOLL
-+#include <linux/poll.h>
-+#include <linux/anon_inodes.h>
-+#endif
-+
- #if DEBUG > 1
- #define dprintk printk
- #else
-@@ -1009,6 +1014,11 @@
- if (waitqueue_active(&ctx->wait))
- wake_up(&ctx->wait);
-
-+#ifdef CONFIG_EPOLL
-+ if (ctx->file && waitqueue_active(&ctx->poll_wait))
-+ wake_up(&ctx->poll_wait);
-+#endif
-+
- spin_unlock_irqrestore(&ctx->ctx_lock, flags);
- return ret;
- }
-@@ -1016,6 +1026,8 @@
- /* aio_read_evt
- * Pull an event off of the ioctx's event ring. Returns the number of
- * events fetched (0 or 1 ;-)
-+ * If ent parameter is 0, just returns the number of events that would
-+ * be fetched.
- * FIXME: make this use cmpxchg.
- * TODO: make the ringbuffer user mmap()able (requires FIXME).
- */
-@@ -1038,13 +1050,18 @@
-
- head = ring->head % info->nr;
- if (head != ring->tail) {
-- struct io_event *evp = aio_ring_event(info, head, KM_USER1);
-- *ent = *evp;
-- head = (head + 1) % info->nr;
-- smp_mb(); /* finish reading the event before updatng the head */
-- ring->head = head;
-- ret = 1;
-- put_aio_ring_event(evp, KM_USER1);
-+ if (ent) { /* event requested */
-+ struct io_event *evp =
-+ aio_ring_event(info, head, KM_USER1);
-+ *ent = *evp;
-+ head = (head + 1) % info->nr;
-+ /* finish reading the event before updatng the head */
-+ smp_mb();
-+ ring->head = head;
-+ ret = 1;
-+ put_aio_ring_event(evp, KM_USER1);
-+ } else /* only need to know availability */
-+ ret = 1;
- }
- spin_unlock(&info->ring_lock);
-
-@@ -1227,9 +1244,78 @@
-
- aio_cancel_all(ioctx);
- wait_for_all_aios(ioctx);
-+#ifdef CONFIG_EPOLL
-+ /* forget the poll file, but it's up to the user to close it */
-+ if (ioctx->file) {
-+ ioctx->file->private_data = 0;
-+ ioctx->file = 0;
-+ }
-+#endif
- put_ioctx(ioctx); /* once for the lookup */
- }
-
-+#ifdef CONFIG_EPOLL
-+
-+static int aio_queue_fd_close(struct inode *inode, struct file *file)
-+{
-+ struct kioctx *ioctx = file->private_data;
-+ if (ioctx) {
-+ file->private_data = 0;
-+ spin_lock_irq(&ioctx->ctx_lock);
-+ ioctx->file = 0;
-+ spin_unlock_irq(&ioctx->ctx_lock);
-+ }
-+ return 0;
-+}
-+
-+static unsigned int aio_queue_fd_poll(struct file *file, poll_table *wait)
-+{ unsigned int pollflags = 0;
-+ struct kioctx *ioctx = file->private_data;
-+
-+ if (ioctx) {
-+
-+ spin_lock_irq(&ioctx->ctx_lock);
-+ /* Insert inside our poll wait queue */
-+ poll_wait(file, &ioctx->poll_wait, wait);
-+
-+ /* Check our condition */
-+ if (aio_read_evt(ioctx, 0))
-+ pollflags = POLLIN | POLLRDNORM;
-+ spin_unlock_irq(&ioctx->ctx_lock);
-+ }
-+
-+ return pollflags;
-+}
-+
-+static const struct file_operations aioq_fops = {
-+ .release = aio_queue_fd_close,
-+ .poll = aio_queue_fd_poll
-+};
-+
-+/* make_aio_fd:
-+ * Create a file descriptor that can be used to poll the event queue.
-+ * Based on the excellent epoll code.
-+ */
-+
-+static int make_aio_fd(struct kioctx *ioctx)
-+{
-+ int error, fd;
-+ struct inode *inode;
-+ struct file *file;
-+
-+ error = anon_inode_getfd(&fd, &inode, &file, "[aioq]",
-+ &aioq_fops, ioctx);
-+ if (error)
-+ return error;
-+
-+ /* associate the file with the IO context */
-+ ioctx->file = file;
-+ init_waitqueue_head(&ioctx->poll_wait);
-+ return fd;
-+}
-+#endif
-+
-+
- /* sys_io_setup:
- * Create an aio_context capable of receiving at least nr_events.
- * ctxp must not point to an aio_context that already exists, and
-@@ -1242,18 +1328,30 @@
- * resources are available. May fail with -EFAULT if an invalid
- * pointer is passed for ctxp. Will fail with -ENOSYS if not
- * implemented.
-+ *
-+ * To request a selectable fd, the user context has to be initialized
-+ * to 1, instead of 0, and the return value is the fd.
-+ * This keeps the system call compatible, since a non-zero value
-+ * was not allowed so far.
- */
- asmlinkage long sys_io_setup(unsigned nr_events, aio_context_t __user *ctxp)
- {
- struct kioctx *ioctx = NULL;
- unsigned long ctx;
- long ret;
-+ int make_fd = 0;
-
- ret = get_user(ctx, ctxp);
- if (unlikely(ret))
- goto out;
-
- ret = -EINVAL;
-+#ifdef CONFIG_EPOLL
-+ if (ctx == 1) {
-+ make_fd = 1;
-+ ctx = 0;
-+ }
-+#endif
- if (unlikely(ctx || nr_events == 0)) {
- pr_debug("EINVAL: io_setup: ctx %lu nr_events %u\n",
- ctx, nr_events);
-@@ -1264,8 +1362,12 @@
- ret = PTR_ERR(ioctx);
- if (!IS_ERR(ioctx)) {
- ret = put_user(ioctx->user_id, ctxp);
-- if (!ret)
-- return 0;
-+#ifdef CONFIG_EPOLL
-+ if (make_fd && ret >= 0)
-+ ret = make_aio_fd(ioctx);
-+#endif
-+ if (ret >= 0)
-+ return ret;
-
- get_ioctx(ioctx); /* io_destroy() expects us to hold a ref */
- io_destroy(ioctx);
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/fs/Kconfig ubuntu-gutsy-xen/fs/Kconfig
---- ubuntu-gutsy/fs/Kconfig 2007-08-18 09:40:32.000000000 -0400
-+++ ubuntu-gutsy-xen/fs/Kconfig 2007-08-18 12:38:02.000000000 -0400
-@@ -992,6 +992,7 @@
- config HUGETLBFS
- bool "HugeTLB file system support"
- depends on X86 || IA64 || PPC64 || SPARC64 || SUPERH || BROKEN
-+ depends on !XEN
- help
- hugetlbfs is a filesystem backing for HugeTLB pages, based on
- ramfs. For architectures that support it, say Y here and read
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-i386/acpi.h ubuntu-gutsy-xen/include/asm-i386/acpi.h
---- ubuntu-gutsy/include/asm-i386/acpi.h 2007-08-18 09:40:33.000000000 -0400
-+++ ubuntu-gutsy-xen/include/asm-i386/acpi.h 2007-08-18 12:38:02.000000000 -0400
-@@ -145,7 +145,9 @@
-
- #endif /*CONFIG_ACPI_SLEEP*/
-
-+#ifndef CONFIG_XEN
- #define ARCH_HAS_POWER_INIT 1
-+#endif
-
- #endif /*__KERNEL__*/
-
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-i386/apicdef.h ubuntu-gutsy-xen/include/asm-i386/apicdef.h
---- ubuntu-gutsy/include/asm-i386/apicdef.h 2007-08-18 09:40:33.000000000 -0400
-+++ ubuntu-gutsy-xen/include/asm-i386/apicdef.h 2007-08-18 12:38:02.000000000 -0400
-@@ -1,6 +1,8 @@
- #ifndef __ASM_APICDEF_H
- #define __ASM_APICDEF_H
-
-+#ifndef CONFIG_XEN
-+
- /*
- * Constants for various Intel APICs. (local APIC, IOAPIC, etc.)
- *
-@@ -111,8 +113,20 @@
-
- #define APIC_BASE (fix_to_virt(FIX_APIC_BASE))
-
-+#else /* CONFIG_XEN */
-+
-+enum {
-+ APIC_DEST_ALLBUT = 0x1,
-+ APIC_DEST_SELF,
-+ APIC_DEST_ALLINC
-+};
-+
-+#endif /* CONFIG_XEN */
-+
- #define MAX_IO_APICS 64
-
-+#ifndef CONFIG_XEN
-+
- /*
- * the local APIC register structure, memory mapped. Not terribly well
- * tested, but we might eventually use this one in the future - the
-@@ -372,4 +386,6 @@
-
- #undef u32
-
-+#endif /* CONFIG_XEN */
-+
- #endif
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-i386/apic.h ubuntu-gutsy-xen/include/asm-i386/apic.h
---- ubuntu-gutsy/include/asm-i386/apic.h 2007-08-18 09:40:33.000000000 -0400
-+++ ubuntu-gutsy-xen/include/asm-i386/apic.h 2007-08-18 12:38:02.000000000 -0400
-@@ -3,7 +3,9 @@
-
- #include <linux/pm.h>
- #include <linux/delay.h>
-+#ifndef CONFIG_XEN
- #include <asm/fixmap.h>
-+#endif
- #include <asm/apicdef.h>
- #include <asm/processor.h>
- #include <asm/system.h>
-@@ -33,7 +35,7 @@
-
- extern void generic_apic_probe(void);
-
--#ifdef CONFIG_X86_LOCAL_APIC
-+#if defined(CONFIG_X86_LOCAL_APIC) && !defined(CONFIG_XEN)
-
- /*
- * Basic functions accessing APICs.
-@@ -111,7 +113,9 @@
-
- extern void enable_NMI_through_LVT0 (void * dummy);
-
-+#ifndef CONFIG_XEN
- #define ARCH_APICTIMER_STOPS_ON_C3 1
-+#endif
-
- extern int timer_over_8254;
- extern int local_apic_timer_c2_ok;
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-i386/i8253.h ubuntu-gutsy-xen/include/asm-i386/i8253.h
---- ubuntu-gutsy/include/asm-i386/i8253.h 2007-08-18 09:40:33.000000000 -0400
-+++ ubuntu-gutsy-xen/include/asm-i386/i8253.h 2007-08-18 12:38:02.000000000 -0400
-@@ -5,6 +5,8 @@
-
- extern spinlock_t i8253_lock;
-
-+#ifdef CONFIG_GENERIC_CLOCKEVENTS
-+
- extern struct clock_event_device *global_clock_event;
-
- /**
-@@ -18,4 +20,6 @@
- global_clock_event->event_handler(global_clock_event);
- }
-
-+#endif
-+
- #endif /* __ASM_I8253_H__ */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-i386/irq.h ubuntu-gutsy-xen/include/asm-i386/irq.h
---- ubuntu-gutsy/include/asm-i386/irq.h 2007-08-18 09:40:33.000000000 -0400
-+++ ubuntu-gutsy-xen/include/asm-i386/irq.h 2007-08-18 12:38:02.000000000 -0400
-@@ -20,7 +20,7 @@
- return ((irq == 2) ? 9 : irq);
- }
-
--#ifdef CONFIG_X86_LOCAL_APIC
-+#if defined(CONFIG_X86_LOCAL_APIC) && !defined(CONFIG_XEN)
- # define ARCH_HAS_NMI_WATCHDOG /* See include/linux/nmi.h */
- #endif
-
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-i386/kexec.h ubuntu-gutsy-xen/include/asm-i386/kexec.h
---- ubuntu-gutsy/include/asm-i386/kexec.h 2007-08-18 09:40:33.000000000 -0400
-+++ ubuntu-gutsy-xen/include/asm-i386/kexec.h 2007-08-18 12:38:02.000000000 -0400
-@@ -94,6 +94,20 @@
- unsigned long start_address,
- unsigned int has_pae) ATTRIB_NORET;
-
-+
-+/* Under Xen we need to work with machine addresses. These macros give the
-+ * machine address of a certain page to the generic kexec code instead of
-+ * the pseudo physical address which would be given by the default macros.
-+ */
-+
-+#ifdef CONFIG_XEN
-+#define KEXEC_ARCH_HAS_PAGE_MACROS
-+#define kexec_page_to_pfn(page) pfn_to_mfn(page_to_pfn(page))
-+#define kexec_pfn_to_page(pfn) pfn_to_page(mfn_to_pfn(pfn))
-+#define kexec_virt_to_phys(addr) virt_to_machine(addr)
-+#define kexec_phys_to_virt(addr) phys_to_virt(machine_to_phys(addr))
-+#endif
-+
- #endif /* __ASSEMBLY__ */
-
- #endif /* _I386_KEXEC_H */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-i386/mach-default/mach_traps.h ubuntu-gutsy-xen/include/asm-i386/mach-default/mach_traps.h
---- ubuntu-gutsy/include/asm-i386/mach-default/mach_traps.h 2007-08-18 09:40:33.000000000 -0400
-+++ ubuntu-gutsy-xen/include/asm-i386/mach-default/mach_traps.h 2007-08-18 12:38:02.000000000 -0400
-@@ -15,6 +15,18 @@
- outb(reason, 0x61);
- }
-
-+static inline void clear_io_check_error(unsigned char reason)
-+{
-+ unsigned long i;
-+
-+ reason = (reason & 0xf) | 8;
-+ outb(reason, 0x61);
-+ i = 2000;
-+ while (--i) udelay(1000);
-+ reason &= ~8;
-+ outb(reason, 0x61);
-+}
-+
- static inline unsigned char get_nmi_reason(void)
- {
- return inb(0x61);
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-i386/mach-xen/asm/agp.h ubuntu-gutsy-xen/include/asm-i386/mach-xen/asm/agp.h
---- ubuntu-gutsy/include/asm-i386/mach-xen/asm/agp.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/asm-i386/mach-xen/asm/agp.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,44 @@
-+#ifndef AGP_H
-+#define AGP_H 1
-+
-+#include <asm/pgtable.h>
-+#include <asm/cacheflush.h>
-+#include <asm/system.h>
-+
-+/*
-+ * Functions to keep the agpgart mappings coherent with the MMU.
-+ * The GART gives the CPU a physical alias of pages in memory. The alias region is
-+ * mapped uncacheable. Make sure there are no conflicting mappings
-+ * with different cachability attributes for the same page. This avoids
-+ * data corruption on some CPUs.
-+ */
-+
-+/* Caller's responsibility to call global_flush_tlb() for
-+ * performance reasons */
-+#define map_page_into_agp(page) ( \
-+ xen_create_contiguous_region((unsigned long)page_address(page), 0, 32) \
-+ ?: change_page_attr(page, 1, PAGE_KERNEL_NOCACHE))
-+#define unmap_page_from_agp(page) ( \
-+ xen_destroy_contiguous_region((unsigned long)page_address(page), 0), \
-+ /* only a fallback: xen_destroy_contiguous_region uses PAGE_KERNEL */ \
-+ change_page_attr(page, 1, PAGE_KERNEL))
-+#define flush_agp_mappings() global_flush_tlb()
-+
-+/* Could use CLFLUSH here if the cpu supports it. But then it would
-+ need to be called for each cacheline of the whole page so it may not be
-+ worth it. Would need a page for it. */
-+#define flush_agp_cache() wbinvd()
-+
-+/* Convert a physical address to an address suitable for the GART. */
-+#define phys_to_gart(x) phys_to_machine(x)
-+#define gart_to_phys(x) machine_to_phys(x)
-+
-+/* GATT allocation. Returns/accepts GATT kernel virtual address. */
-+#define alloc_gatt_pages(order) ({ \
-+ char *_t; dma_addr_t _d; \
-+ _t = dma_alloc_coherent(NULL,PAGE_SIZE<<(order),&_d,GFP_KERNEL); \
-+ _t; })
-+#define free_gatt_pages(table, order) \
-+ dma_free_coherent(NULL,PAGE_SIZE<<(order),(table),virt_to_bus(table))
-+
-+#endif
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-i386/mach-xen/asm/desc.h ubuntu-gutsy-xen/include/asm-i386/mach-xen/asm/desc.h
---- ubuntu-gutsy/include/asm-i386/mach-xen/asm/desc.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/asm-i386/mach-xen/asm/desc.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,261 @@
-+#ifndef __ARCH_DESC_H
-+#define __ARCH_DESC_H
-+
-+#include <asm/ldt.h>
-+#include <asm/segment.h>
-+
-+#ifndef __ASSEMBLY__
-+
-+#include <linux/preempt.h>
-+#include <linux/smp.h>
-+
-+#include <asm/mmu.h>
-+
-+struct Xgt_desc_struct {
-+ unsigned short size;
-+ unsigned long address __attribute__((packed));
-+ unsigned short pad;
-+} __attribute__ ((packed));
-+
-+struct gdt_page
-+{
-+ struct desc_struct gdt[GDT_ENTRIES];
-+} __attribute__((aligned(PAGE_SIZE)));
-+DECLARE_PER_CPU(struct gdt_page, gdt_page);
-+
-+static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
-+{
-+ return per_cpu(gdt_page, cpu).gdt;
-+}
-+
-+extern struct Xgt_desc_struct idt_descr;
-+extern struct desc_struct idt_table[];
-+extern void set_intr_gate(unsigned int irq, void * addr);
-+
-+static inline void pack_descriptor(__u32 *a, __u32 *b,
-+ unsigned long base, unsigned long limit, unsigned char type, unsigned char flags)
-+{
-+ *a = ((base & 0xffff) << 16) | (limit & 0xffff);
-+ *b = (base & 0xff000000) | ((base & 0xff0000) >> 16) |
-+ (limit & 0x000f0000) | ((type & 0xff) << 8) | ((flags & 0xf) << 20);
-+}
-+
-+static inline void pack_gate(__u32 *a, __u32 *b,
-+ unsigned long base, unsigned short seg, unsigned char type, unsigned char flags)
-+{
-+ *a = (seg << 16) | (base & 0xffff);
-+ *b = (base & 0xffff0000) | ((type & 0xff) << 8) | (flags & 0xff);
-+}
-+
-+#define DESCTYPE_LDT 0x82 /* present, system, DPL-0, LDT */
-+#define DESCTYPE_TSS 0x89 /* present, system, DPL-0, 32-bit TSS */
-+#define DESCTYPE_TASK 0x85 /* present, system, DPL-0, task gate */
-+#define DESCTYPE_INT 0x8e /* present, system, DPL-0, interrupt gate */
-+#define DESCTYPE_TRAP 0x8f /* present, system, DPL-0, trap gate */
-+#define DESCTYPE_DPL3 0x60 /* DPL-3 */
-+#define DESCTYPE_S 0x10 /* !system */
-+
-+#ifndef CONFIG_XEN
-+#define load_TR_desc() native_load_tr_desc()
-+#define load_gdt(dtr) native_load_gdt(dtr)
-+#define load_idt(dtr) native_load_idt(dtr)
-+#define load_tr(tr) __asm__ __volatile("ltr %0"::"m" (tr))
-+#define load_ldt(ldt) __asm__ __volatile("lldt %0"::"m" (ldt))
-+
-+#define store_gdt(dtr) native_store_gdt(dtr)
-+#define store_idt(dtr) native_store_idt(dtr)
-+#define store_tr(tr) (tr = native_store_tr())
-+#define store_ldt(ldt) __asm__ ("sldt %0":"=m" (ldt))
-+
-+#define load_TLS(t, cpu) native_load_tls(t, cpu)
-+#define set_ldt native_set_ldt
-+
-+#define write_ldt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b)
-+#define write_gdt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b)
-+#define write_idt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b)
-+
-+static inline void write_dt_entry(struct desc_struct *dt,
-+ int entry, u32 entry_low, u32 entry_high)
-+{
-+ dt[entry].a = entry_low;
-+ dt[entry].b = entry_high;
-+}
-+
-+static inline void native_set_ldt(const void *addr, unsigned int entries)
-+{
-+ if (likely(entries == 0))
-+ __asm__ __volatile__("lldt %w0"::"q" (0));
-+ else {
-+ unsigned cpu = smp_processor_id();
-+ __u32 a, b;
-+
-+ pack_descriptor(&a, &b, (unsigned long)addr,
-+ entries * sizeof(struct desc_struct) - 1,
-+ DESCTYPE_LDT, 0);
-+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_LDT, a, b);
-+ __asm__ __volatile__("lldt %w0"::"q" (GDT_ENTRY_LDT*8));
-+ }
-+}
-+
-+
-+static inline void native_load_tr_desc(void)
-+{
-+ asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
-+}
-+
-+static inline void native_load_gdt(const struct Xgt_desc_struct *dtr)
-+{
-+ asm volatile("lgdt %0"::"m" (*dtr));
-+}
-+
-+static inline void native_load_idt(const struct Xgt_desc_struct *dtr)
-+{
-+ asm volatile("lidt %0"::"m" (*dtr));
-+}
-+
-+static inline void native_store_gdt(struct Xgt_desc_struct *dtr)
-+{
-+ asm ("sgdt %0":"=m" (*dtr));
-+}
-+
-+static inline void native_store_idt(struct Xgt_desc_struct *dtr)
-+{
-+ asm ("sidt %0":"=m" (*dtr));
-+}
-+
-+static inline unsigned long native_store_tr(void)
-+{
-+ unsigned long tr;
-+ asm ("str %0":"=r" (tr));
-+ return tr;
-+}
-+
-+static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
-+{
-+ unsigned int i;
-+ struct desc_struct *gdt = get_cpu_gdt_table(cpu);
-+
-+ for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
-+ gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
-+}
-+#else
-+#define load_TLS(t, cpu) xen_load_tls(t, cpu)
-+#define set_ldt(addr, entries) xen_set_ldt((unsigned long)(addr), entries)
-+
-+extern int write_ldt_entry(void *ldt, int entry, __u32 entry_a, __u32 entry_b);
-+extern int write_gdt_entry(void *gdt, int entry, __u32 entry_a, __u32 entry_b);
-+
-+static inline void xen_load_tls(struct thread_struct *t, unsigned int cpu)
-+{
-+ unsigned int i;
-+ struct desc_struct *gdt = get_cpu_gdt_table(cpu);
-+
-+ for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
-+ HYPERVISOR_update_descriptor(virt_to_machine(&gdt[GDT_ENTRY_TLS_MIN + i]),
-+ *(u64 *)&t->tls_array[i]);
-+}
-+#endif
-+
-+#ifndef CONFIG_X86_NO_IDT
-+static inline void _set_gate(int gate, unsigned int type, void *addr, unsigned short seg)
-+{
-+ __u32 a, b;
-+ pack_gate(&a, &b, (unsigned long)addr, seg, type, 0);
-+ write_idt_entry(idt_table, gate, a, b);
-+}
-+#endif
-+
-+#ifndef CONFIG_X86_NO_TSS
-+static inline void __set_tss_desc(unsigned int cpu, unsigned int entry, const void *addr)
-+{
-+ __u32 a, b;
-+ pack_descriptor(&a, &b, (unsigned long)addr,
-+ offsetof(struct tss_struct, __cacheline_filler) - 1,
-+ DESCTYPE_TSS, 0);
-+ write_gdt_entry(get_cpu_gdt_table(cpu), entry, a, b);
-+}
-+#endif
-+
-+
-+#define set_tss_desc(cpu,addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr)
-+
-+#define LDT_entry_a(info) \
-+ ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff))
-+
-+#define LDT_entry_b(info) \
-+ (((info)->base_addr & 0xff000000) | \
-+ (((info)->base_addr & 0x00ff0000) >> 16) | \
-+ ((info)->limit & 0xf0000) | \
-+ (((info)->read_exec_only ^ 1) << 9) | \
-+ ((info)->contents << 10) | \
-+ (((info)->seg_not_present ^ 1) << 15) | \
-+ ((info)->seg_32bit << 22) | \
-+ ((info)->limit_in_pages << 23) | \
-+ ((info)->useable << 20) | \
-+ 0x7000)
-+
-+#define LDT_empty(info) (\
-+ (info)->base_addr == 0 && \
-+ (info)->limit == 0 && \
-+ (info)->contents == 0 && \
-+ (info)->read_exec_only == 1 && \
-+ (info)->seg_32bit == 0 && \
-+ (info)->limit_in_pages == 0 && \
-+ (info)->seg_not_present == 1 && \
-+ (info)->useable == 0 )
-+
-+static inline void clear_LDT(void)
-+{
-+ set_ldt(NULL, 0);
-+}
-+
-+/*
-+ * load one particular LDT into the current CPU
-+ */
-+static inline void load_LDT_nolock(mm_context_t *pc)
-+{
-+ set_ldt(pc->ldt, pc->size);
-+}
-+
-+static inline void load_LDT(mm_context_t *pc)
-+{
-+ preempt_disable();
-+ load_LDT_nolock(pc);
-+ preempt_enable();
-+}
-+
-+static inline unsigned long get_desc_base(unsigned long *desc)
-+{
-+ unsigned long base;
-+ base = ((desc[0] >> 16) & 0x0000ffff) |
-+ ((desc[1] << 16) & 0x00ff0000) |
-+ (desc[1] & 0xff000000);
-+ return base;
-+}
-+
-+#else /* __ASSEMBLY__ */
-+
-+/*
-+ * GET_DESC_BASE reads the descriptor base of the specified segment.
-+ *
-+ * Args:
-+ * idx - descriptor index
-+ * gdt - GDT pointer
-+ * base - 32bit register to which the base will be written
-+ * lo_w - lo word of the "base" register
-+ * lo_b - lo byte of the "base" register
-+ * hi_b - hi byte of the low word of the "base" register
-+ *
-+ * Example:
-+ * GET_DESC_BASE(GDT_ENTRY_ESPFIX_SS, %ebx, %eax, %ax, %al, %ah)
-+ * Will read the base address of GDT_ENTRY_ESPFIX_SS and put it into %eax.
-+ */
-+#define GET_DESC_BASE(idx, gdt, base, lo_w, lo_b, hi_b) \
-+ movb idx*8+4(gdt), lo_b; \
-+ movb idx*8+7(gdt), hi_b; \
-+ shll $16, base; \
-+ movw idx*8+2(gdt), lo_w;
-+
-+#endif /* !__ASSEMBLY__ */
-+
-+#endif
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-i386/mach-xen/asm/dma-mapping.h ubuntu-gutsy-xen/include/asm-i386/mach-xen/asm/dma-mapping.h
---- ubuntu-gutsy/include/asm-i386/mach-xen/asm/dma-mapping.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/asm-i386/mach-xen/asm/dma-mapping.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,157 @@
-+#ifndef _ASM_I386_DMA_MAPPING_H
-+#define _ASM_I386_DMA_MAPPING_H
-+
-+/*
-+ * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for
-+ * documentation.
-+ */
-+
-+#include <linux/mm.h>
-+#include <asm/cache.h>
-+#include <asm/io.h>
-+#include <asm/scatterlist.h>
-+#include <asm/swiotlb.h>
-+
-+static inline int
-+address_needs_mapping(struct device *hwdev, dma_addr_t addr)
-+{
-+ dma_addr_t mask = 0xffffffff;
-+ /* If the device has a mask, use it, otherwise default to 32 bits */
-+ if (hwdev && hwdev->dma_mask)
-+ mask = *hwdev->dma_mask;
-+ return (addr & ~mask) != 0;
-+}
-+
-+static inline int
-+range_straddles_page_boundary(paddr_t p, size_t size)
-+{
-+ extern unsigned long *contiguous_bitmap;
-+ return ((((p & ~PAGE_MASK) + size) > PAGE_SIZE) &&
-+ !test_bit(p >> PAGE_SHIFT, contiguous_bitmap));
-+}
-+
-+#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
-+#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
-+
-+void *dma_alloc_coherent(struct device *dev, size_t size,
-+ dma_addr_t *dma_handle, gfp_t flag);
-+
-+void dma_free_coherent(struct device *dev, size_t size,
-+ void *vaddr, dma_addr_t dma_handle);
-+
-+extern dma_addr_t
-+dma_map_single(struct device *dev, void *ptr, size_t size,
-+ enum dma_data_direction direction);
-+
-+extern void
-+dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
-+ enum dma_data_direction direction);
-+
-+extern int dma_map_sg(struct device *hwdev, struct scatterlist *sg,
-+ int nents, enum dma_data_direction direction);
-+extern void dma_unmap_sg(struct device *hwdev, struct scatterlist *sg,
-+ int nents, enum dma_data_direction direction);
-+
-+#ifdef CONFIG_HIGHMEM
-+extern dma_addr_t
-+dma_map_page(struct device *dev, struct page *page, unsigned long offset,
-+ size_t size, enum dma_data_direction direction);
-+
-+extern void
-+dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
-+ enum dma_data_direction direction);
-+#else
-+#define dma_map_page(dev, page, offset, size, dir) \
-+ dma_map_single(dev, page_address(page) + (offset), (size), (dir))
-+#define dma_unmap_page dma_unmap_single
-+#endif
-+
-+extern void
-+dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
-+ enum dma_data_direction direction);
-+
-+extern void
-+dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
-+ enum dma_data_direction direction);
-+
-+static inline void
-+dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
-+ unsigned long offset, size_t size,
-+ enum dma_data_direction direction)
-+{
-+ dma_sync_single_for_cpu(dev, dma_handle+offset, size, direction);
-+}
-+
-+static inline void
-+dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
-+ unsigned long offset, size_t size,
-+ enum dma_data_direction direction)
-+{
-+ dma_sync_single_for_device(dev, dma_handle+offset, size, direction);
-+}
-+
-+static inline void
-+dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
-+ enum dma_data_direction direction)
-+{
-+ if (swiotlb)
-+ swiotlb_sync_sg_for_cpu(dev,sg,nelems,direction);
-+ flush_write_buffers();
-+}
-+
-+static inline void
-+dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
-+ enum dma_data_direction direction)
-+{
-+ if (swiotlb)
-+ swiotlb_sync_sg_for_device(dev,sg,nelems,direction);
-+ flush_write_buffers();
-+}
-+
-+extern int
-+dma_mapping_error(dma_addr_t dma_addr);
-+
-+extern int
-+dma_supported(struct device *dev, u64 mask);
-+
-+static inline int
-+dma_set_mask(struct device *dev, u64 mask)
-+{
-+ if(!dev->dma_mask || !dma_supported(dev, mask))
-+ return -EIO;
-+
-+ *dev->dma_mask = mask;
-+
-+ return 0;
-+}
-+
-+static inline int
-+dma_get_cache_alignment(void)
-+{
-+ /* no easy way to get cache size on all x86, so return the
-+ * maximum possible, to be safe */
-+ return (1 << INTERNODE_CACHE_SHIFT);
-+}
-+
-+#define dma_is_consistent(d, h) (1)
-+
-+static inline void
-+dma_cache_sync(struct device *dev, void *vaddr, size_t size,
-+ enum dma_data_direction direction)
-+{
-+ flush_write_buffers();
-+}
-+
-+#define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
-+extern int
-+dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
-+ dma_addr_t device_addr, size_t size, int flags);
-+
-+extern void
-+dma_release_declared_memory(struct device *dev);
-+
-+extern void *
-+dma_mark_declared_memory_occupied(struct device *dev,
-+ dma_addr_t device_addr, size_t size);
-+
-+#endif
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-i386/mach-xen/asm/fixmap.h ubuntu-gutsy-xen/include/asm-i386/mach-xen/asm/fixmap.h
---- ubuntu-gutsy/include/asm-i386/mach-xen/asm/fixmap.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/asm-i386/mach-xen/asm/fixmap.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,160 @@
-+/*
-+ * fixmap.h: compile-time virtual memory allocation
-+ *
-+ * This file is subject to the terms and conditions of the GNU General Public
-+ * License. See the file "COPYING" in the main directory of this archive
-+ * for more details.
-+ *
-+ * Copyright (C) 1998 Ingo Molnar
-+ *
-+ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
-+ */
-+
-+#ifndef _ASM_FIXMAP_H
-+#define _ASM_FIXMAP_H
-+
-+/* used by vmalloc.c, vsyscall.lds.S.
-+ *
-+ * Leave one empty page between vmalloc'ed areas and
-+ * the start of the fixmap.
-+ */
-+extern unsigned long __FIXADDR_TOP;
-+#define FIXADDR_USER_START __fix_to_virt(FIX_VDSO)
-+#define FIXADDR_USER_END __fix_to_virt(FIX_VDSO - 1)
-+
-+#ifndef __ASSEMBLY__
-+#include <linux/kernel.h>
-+#include <asm/acpi.h>
-+#include <asm/apicdef.h>
-+#include <asm/page.h>
-+#ifdef CONFIG_HIGHMEM
-+#include <linux/threads.h>
-+#include <asm/kmap_types.h>
-+#endif
-+
-+/*
-+ * Here we define all the compile-time 'special' virtual
-+ * addresses. The point is to have a constant address at
-+ * compile time, but to set the physical address only
-+ * in the boot process. We allocate these special addresses
-+ * from the end of virtual memory (0xfffff000) backwards.
-+ * Also this lets us do fail-safe vmalloc(), we
-+ * can guarantee that these special addresses and
-+ * vmalloc()-ed addresses never overlap.
-+ *
-+ * these 'compile-time allocated' memory buffers are
-+ * fixed-size 4k pages. (or larger if used with an increment
-+ * highger than 1) use fixmap_set(idx,phys) to associate
-+ * physical memory with fixmap indices.
-+ *
-+ * TLB entries of such buffers will not be flushed across
-+ * task switches.
-+ */
-+enum fixed_addresses {
-+ FIX_HOLE,
-+ FIX_VDSO,
-+#ifndef CONFIG_XEN
-+#ifdef CONFIG_X86_LOCAL_APIC
-+ FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */
-+#endif
-+#ifdef CONFIG_X86_IO_APIC
-+ FIX_IO_APIC_BASE_0,
-+ FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS-1,
-+#endif
-+#endif
-+#ifdef CONFIG_X86_VISWS_APIC
-+ FIX_CO_CPU, /* Cobalt timer */
-+ FIX_CO_APIC, /* Cobalt APIC Redirection Table */
-+ FIX_LI_PCIA, /* Lithium PCI Bridge A */
-+ FIX_LI_PCIB, /* Lithium PCI Bridge B */
-+#endif
-+#ifdef CONFIG_X86_F00F_BUG
-+ FIX_F00F_IDT, /* Virtual mapping for IDT */
-+#endif
-+#ifdef CONFIG_X86_CYCLONE_TIMER
-+ FIX_CYCLONE_TIMER, /*cyclone timer register*/
-+#endif
-+#ifdef CONFIG_HIGHMEM
-+ FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
-+ FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
-+#endif
-+#ifdef CONFIG_ACPI
-+ FIX_ACPI_BEGIN,
-+ FIX_ACPI_END = FIX_ACPI_BEGIN + FIX_ACPI_PAGES - 1,
-+#endif
-+#ifdef CONFIG_PCI_MMCONFIG
-+ FIX_PCIE_MCFG,
-+#endif
-+#ifdef CONFIG_PARAVIRT
-+ FIX_PARAVIRT_BOOTMAP,
-+#endif
-+ FIX_SHARED_INFO,
-+#define NR_FIX_ISAMAPS 256
-+ FIX_ISAMAP_END,
-+ FIX_ISAMAP_BEGIN = FIX_ISAMAP_END + NR_FIX_ISAMAPS - 1,
-+ __end_of_permanent_fixed_addresses,
-+ /* temporary boot-time mappings, used before ioremap() is functional */
-+#define NR_FIX_BTMAPS 16
-+ FIX_BTMAP_END = __end_of_permanent_fixed_addresses,
-+ FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS - 1,
-+ FIX_WP_TEST,
-+ __end_of_fixed_addresses
-+};
-+
-+extern void __set_fixmap(enum fixed_addresses idx,
-+ maddr_t phys, pgprot_t flags);
-+extern void reserve_top_address(unsigned long reserve);
-+
-+#define set_fixmap(idx, phys) \
-+ __set_fixmap(idx, phys, PAGE_KERNEL)
-+/*
-+ * Some hardware wants to get fixmapped without caching.
-+ */
-+#define set_fixmap_nocache(idx, phys) \
-+ __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE)
-+
-+#define clear_fixmap(idx) \
-+ __set_fixmap(idx, 0, __pgprot(0))
-+
-+#define FIXADDR_TOP ((unsigned long)__FIXADDR_TOP)
-+
-+#define __FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT)
-+#define __FIXADDR_BOOT_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
-+#define FIXADDR_START (FIXADDR_TOP - __FIXADDR_SIZE)
-+#define FIXADDR_BOOT_START (FIXADDR_TOP - __FIXADDR_BOOT_SIZE)
-+
-+#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
-+#define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT)
-+
-+extern void __this_fixmap_does_not_exist(void);
-+
-+/*
-+ * 'index to address' translation. If anyone tries to use the idx
-+ * directly without tranlation, we catch the bug with a NULL-deference
-+ * kernel oops. Illegal ranges of incoming indices are caught too.
-+ */
-+static __always_inline unsigned long fix_to_virt(const unsigned int idx)
-+{
-+ /*
-+ * this branch gets completely eliminated after inlining,
-+ * except when someone tries to use fixaddr indices in an
-+ * illegal way. (such as mixing up address types or using
-+ * out-of-range indices).
-+ *
-+ * If it doesn't get removed, the linker will complain
-+ * loudly with a reasonably clear error message..
-+ */
-+ if (idx >= __end_of_fixed_addresses)
-+ __this_fixmap_does_not_exist();
-+
-+ return __fix_to_virt(idx);
-+}
-+
-+static inline unsigned long virt_to_fix(const unsigned long vaddr)
-+{
-+ BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
-+ return __virt_to_fix(vaddr);
-+}
-+
-+#endif /* !__ASSEMBLY__ */
-+#endif
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-i386/mach-xen/asm/floppy.h ubuntu-gutsy-xen/include/asm-i386/mach-xen/asm/floppy.h
---- ubuntu-gutsy/include/asm-i386/mach-xen/asm/floppy.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/asm-i386/mach-xen/asm/floppy.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,147 @@
-+/*
-+ * Architecture specific parts of the Floppy driver
-+ *
-+ * This file is subject to the terms and conditions of the GNU General Public
-+ * License. See the file "COPYING" in the main directory of this archive
-+ * for more details.
-+ *
-+ * Copyright (C) 1995
-+ *
-+ * Modifications for Xen are Copyright (c) 2004, Keir Fraser.
-+ */
-+#ifndef __ASM_XEN_I386_FLOPPY_H
-+#define __ASM_XEN_I386_FLOPPY_H
-+
-+#include <linux/vmalloc.h>
-+
-+/* XEN: Hit DMA paths on the head. This trick from asm-m68k/floppy.h. */
-+#include <asm/dma.h>
-+#undef MAX_DMA_ADDRESS
-+#define MAX_DMA_ADDRESS 0
-+#define CROSS_64KB(a,s) (0)
-+
-+#define fd_inb(port) inb_p(port)
-+#define fd_outb(value,port) outb_p(value,port)
-+
-+#define fd_request_dma() (0)
-+#define fd_free_dma() ((void)0)
-+#define fd_enable_irq() enable_irq(FLOPPY_IRQ)
-+#define fd_disable_irq() disable_irq(FLOPPY_IRQ)
-+#define fd_free_irq() free_irq(FLOPPY_IRQ, NULL)
-+#define fd_get_dma_residue() (virtual_dma_count + virtual_dma_residue)
-+#define fd_dma_setup(addr, size, mode, io) vdma_dma_setup(addr, size, mode, io)
-+/*
-+ * Do not use vmalloc/vfree: floppy_release_irq_and_dma() gets called from
-+ * softirq context via motor_off_callback. A generic bug we happen to trigger.
-+ */
-+#define fd_dma_mem_alloc(size) __get_free_pages(GFP_KERNEL, get_order(size))
-+#define fd_dma_mem_free(addr, size) free_pages(addr, get_order(size))
-+
-+static int virtual_dma_count;
-+static int virtual_dma_residue;
-+static char *virtual_dma_addr;
-+static int virtual_dma_mode;
-+static int doing_pdma;
-+
-+static irqreturn_t floppy_hardint(int irq, void *dev_id)
-+{
-+ register unsigned char st;
-+ register int lcount;
-+ register char *lptr;
-+
-+ if (!doing_pdma)
-+ return floppy_interrupt(irq, dev_id);
-+
-+ st = 1;
-+ for(lcount=virtual_dma_count, lptr=virtual_dma_addr;
-+ lcount; lcount--, lptr++) {
-+ st=inb(virtual_dma_port+4) & 0xa0 ;
-+ if(st != 0xa0)
-+ break;
-+ if(virtual_dma_mode)
-+ outb_p(*lptr, virtual_dma_port+5);
-+ else
-+ *lptr = inb_p(virtual_dma_port+5);
-+ }
-+ virtual_dma_count = lcount;
-+ virtual_dma_addr = lptr;
-+ st = inb(virtual_dma_port+4);
-+
-+ if(st == 0x20)
-+ return IRQ_HANDLED;
-+ if(!(st & 0x20)) {
-+ virtual_dma_residue += virtual_dma_count;
-+ virtual_dma_count=0;
-+ doing_pdma = 0;
-+ floppy_interrupt(irq, dev_id);
-+ return IRQ_HANDLED;
-+ }
-+ return IRQ_HANDLED;
-+}
-+
-+static void fd_disable_dma(void)
-+{
-+ doing_pdma = 0;
-+ virtual_dma_residue += virtual_dma_count;
-+ virtual_dma_count=0;
-+}
-+
-+static int fd_request_irq(void)
-+{
-+ return request_irq(FLOPPY_IRQ, floppy_hardint,
-+ IRQF_DISABLED, "floppy", NULL);
-+}
-+
-+static int vdma_dma_setup(char *addr, unsigned long size, int mode, int io)
-+{
-+ doing_pdma = 1;
-+ virtual_dma_port = io;
-+ virtual_dma_mode = (mode == DMA_MODE_WRITE);
-+ virtual_dma_addr = addr;
-+ virtual_dma_count = size;
-+ virtual_dma_residue = 0;
-+ return 0;
-+}
-+
-+/* XEN: This trick to force 'virtual DMA' is from include/asm-m68k/floppy.h. */
-+#define FDC1 xen_floppy_init()
-+static int FDC2 = -1;
-+
-+static int xen_floppy_init(void)
-+{
-+ use_virtual_dma = 1;
-+ can_use_virtual_dma = 1;
-+ return 0x3f0;
-+}
-+
-+/*
-+ * Floppy types are stored in the rtc's CMOS RAM and so rtc_lock
-+ * is needed to prevent corrupted CMOS RAM in case "insmod floppy"
-+ * coincides with another rtc CMOS user. Paul G.
-+ */
-+#define FLOPPY0_TYPE ({ \
-+ unsigned long flags; \
-+ unsigned char val; \
-+ spin_lock_irqsave(&rtc_lock, flags); \
-+ val = (CMOS_READ(0x10) >> 4) & 15; \
-+ spin_unlock_irqrestore(&rtc_lock, flags); \
-+ val; \
-+})
-+
-+#define FLOPPY1_TYPE ({ \
-+ unsigned long flags; \
-+ unsigned char val; \
-+ spin_lock_irqsave(&rtc_lock, flags); \
-+ val = CMOS_READ(0x10) & 15; \
-+ spin_unlock_irqrestore(&rtc_lock, flags); \
-+ val; \
-+})
-+
-+#define N_FDC 2
-+#define N_DRIVE 8
-+
-+#define FLOPPY_MOTOR_MASK 0xf0
-+
-+#define EXTRA_FLOPPY_PARAMS
-+
-+#endif /* __ASM_XEN_I386_FLOPPY_H */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-i386/mach-xen/asm/gnttab_dma.h ubuntu-gutsy-xen/include/asm-i386/mach-xen/asm/gnttab_dma.h
---- ubuntu-gutsy/include/asm-i386/mach-xen/asm/gnttab_dma.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/asm-i386/mach-xen/asm/gnttab_dma.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,41 @@
-+/*
-+ * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
-+ * Copyright (c) 2007 Isaku Yamahata <yamahata at valinux co jp>
-+ * VA Linux Systems Japan K.K.
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program; if not, write to the Free Software
-+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-+ */
-+
-+#ifndef _ASM_I386_GNTTAB_DMA_H
-+#define _ASM_I386_GNTTAB_DMA_H
-+
-+static inline int gnttab_dma_local_pfn(struct page *page)
-+{
-+ /* Has it become a local MFN? */
-+ return pfn_valid(mfn_to_local_pfn(pfn_to_mfn(page_to_pfn(page))));
-+}
-+
-+static inline maddr_t gnttab_dma_map_page(struct page *page)
-+{
-+ __gnttab_dma_map_page(page);
-+ return page_to_bus(page);
-+}
-+
-+static inline void gnttab_dma_unmap_page(maddr_t maddr)
-+{
-+ __gnttab_dma_unmap_page(virt_to_page(bus_to_virt(maddr)));
-+}
-+
-+#endif /* _ASM_I386_GNTTAB_DMA_H */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-i386/mach-xen/asm/highmem.h ubuntu-gutsy-xen/include/asm-i386/mach-xen/asm/highmem.h
---- ubuntu-gutsy/include/asm-i386/mach-xen/asm/highmem.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/asm-i386/mach-xen/asm/highmem.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,86 @@
-+/*
-+ * highmem.h: virtual kernel memory mappings for high memory
-+ *
-+ * Used in CONFIG_HIGHMEM systems for memory pages which
-+ * are not addressable by direct kernel virtual addresses.
-+ *
-+ * Copyright (C) 1999 Gerhard Wichert, Siemens AG
-+ * Gerhard.Wichert@pdb.siemens.de
-+ *
-+ *
-+ * Redesigned the x86 32-bit VM architecture to deal with
-+ * up to 16 Terabyte physical memory. With current x86 CPUs
-+ * we now support up to 64 Gigabytes physical RAM.
-+ *
-+ * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
-+ */
-+
-+#ifndef _ASM_HIGHMEM_H
-+#define _ASM_HIGHMEM_H
-+
-+#ifdef __KERNEL__
-+
-+#include <linux/interrupt.h>
-+#include <linux/threads.h>
-+#include <asm/kmap_types.h>
-+#include <asm/tlbflush.h>
-+
-+/* declarations for highmem.c */
-+extern unsigned long highstart_pfn, highend_pfn;
-+
-+extern pte_t *kmap_pte;
-+extern pgprot_t kmap_prot;
-+extern pte_t *pkmap_page_table;
-+
-+/*
-+ * Right now we initialize only a single pte table. It can be extended
-+ * easily, subsequent pte tables have to be allocated in one physical
-+ * chunk of RAM.
-+ */
-+#ifdef CONFIG_X86_PAE
-+#define LAST_PKMAP 512
-+#else
-+#define LAST_PKMAP 1024
-+#endif
-+/*
-+ * Ordering is:
-+ *
-+ * FIXADDR_TOP
-+ * fixed_addresses
-+ * FIXADDR_START
-+ * temp fixed addresses
-+ * FIXADDR_BOOT_START
-+ * Persistent kmap area
-+ * PKMAP_BASE
-+ * VMALLOC_END
-+ * Vmalloc area
-+ * VMALLOC_START
-+ * high_memory
-+ */
-+#define PKMAP_BASE ( (FIXADDR_BOOT_START - PAGE_SIZE*(LAST_PKMAP + 1)) & PMD_MASK )
-+#define LAST_PKMAP_MASK (LAST_PKMAP-1)
-+#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
-+#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
-+
-+extern void * FASTCALL(kmap_high(struct page *page));
-+extern void FASTCALL(kunmap_high(struct page *page));
-+
-+void *kmap(struct page *page);
-+void kunmap(struct page *page);
-+void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot);
-+void *kmap_atomic(struct page *page, enum km_type type);
-+void *kmap_atomic_pte(struct page *page, enum km_type type);
-+void kunmap_atomic(void *kvaddr, enum km_type type);
-+void *kmap_atomic_pfn(unsigned long pfn, enum km_type type);
-+struct page *kmap_atomic_to_page(void *ptr);
-+
-+#define kmap_atomic_pte(page, type) \
-+ kmap_atomic_prot(page, type, \
-+ test_bit(PG_pinned, &(page)->flags) \
-+ ? PAGE_KERNEL_RO : kmap_prot)
-+
-+#define flush_cache_kmaps() do { } while (0)
-+
-+#endif /* __KERNEL__ */
-+
-+#endif /* _ASM_HIGHMEM_H */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-i386/mach-xen/asm/hw_irq.h ubuntu-gutsy-xen/include/asm-i386/mach-xen/asm/hw_irq.h
---- ubuntu-gutsy/include/asm-i386/mach-xen/asm/hw_irq.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/asm-i386/mach-xen/asm/hw_irq.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,64 @@
-+#ifndef _ASM_HW_IRQ_H
-+#define _ASM_HW_IRQ_H
-+
-+/*
-+ * linux/include/asm/hw_irq.h
-+ *
-+ * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar
-+ *
-+ * moved some of the old arch/i386/kernel/irq.h to here. VY
-+ *
-+ * IRQ/IPI changes taken from work by Thomas Radke
-+ * <tomsoft@informatik.tu-chemnitz.de>
-+ */
-+
-+#include <linux/profile.h>
-+#include <asm/atomic.h>
-+#include <asm/irq.h>
-+#include <asm/sections.h>
-+
-+/*
-+ * Various low-level irq details needed by irq.c, process.c,
-+ * time.c, io_apic.c and smp.c
-+ *
-+ * Interrupt entry/exit code at both C and assembly level
-+ */
-+
-+extern void (*interrupt[NR_IRQS])(void);
-+
-+#ifdef CONFIG_SMP
-+fastcall void reschedule_interrupt(void);
-+fastcall void invalidate_interrupt(void);
-+fastcall void call_function_interrupt(void);
-+#endif
-+
-+#ifdef CONFIG_X86_LOCAL_APIC
-+fastcall void apic_timer_interrupt(void);
-+fastcall void error_interrupt(void);
-+fastcall void spurious_interrupt(void);
-+fastcall void thermal_interrupt(void);
-+#define platform_legacy_irq(irq) ((irq) < 16)
-+#endif
-+
-+void disable_8259A_irq(unsigned int irq);
-+void enable_8259A_irq(unsigned int irq);
-+int i8259A_irq_pending(unsigned int irq);
-+void make_8259A_irq(unsigned int irq);
-+void init_8259A(int aeoi);
-+void FASTCALL(send_IPI_self(int vector));
-+void init_VISWS_APIC_irqs(void);
-+void setup_IO_APIC(void);
-+void disable_IO_APIC(void);
-+void print_IO_APIC(void);
-+int IO_APIC_get_PCI_irq_vector(int bus, int slot, int fn);
-+void send_IPI(int dest, int vector);
-+void setup_ioapic_dest(void);
-+
-+extern unsigned long io_apic_irqs;
-+
-+extern atomic_t irq_err_count;
-+extern atomic_t irq_mis_count;
-+
-+#define IO_APIC_IRQ(x) (((x) >= 16) || ((1<<(x)) & io_apic_irqs))
-+
-+#endif /* _ASM_HW_IRQ_H */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-i386/mach-xen/asm/hypercall.h ubuntu-gutsy-xen/include/asm-i386/mach-xen/asm/hypercall.h
---- ubuntu-gutsy/include/asm-i386/mach-xen/asm/hypercall.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/asm-i386/mach-xen/asm/hypercall.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,407 @@
-+/******************************************************************************
-+ * hypercall.h
-+ *
-+ * Linux-specific hypervisor handling.
-+ *
-+ * Copyright (c) 2002-2004, K A Fraser
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#ifndef __HYPERCALL_H__
-+#define __HYPERCALL_H__
-+
-+#include <linux/string.h> /* memcpy() */
-+
-+#ifndef __HYPERVISOR_H__
-+# error "please don't include this file directly"
-+#endif
-+
-+#define __STR(x) #x
-+#define STR(x) __STR(x)
-+
-+#ifdef CONFIG_XEN
-+#define HYPERCALL_STR(name) \
-+ "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"
-+#else
-+#define HYPERCALL_STR(name) \
-+ "mov hypercall_stubs,%%eax; " \
-+ "add $("STR(__HYPERVISOR_##name)" * 32),%%eax; " \
-+ "call *%%eax"
-+#endif
-+
-+#define _hypercall0(type, name) \
-+({ \
-+ long __res; \
-+ asm volatile ( \
-+ HYPERCALL_STR(name) \
-+ : "=a" (__res) \
-+ : \
-+ : "memory" ); \
-+ (type)__res; \
-+})
-+
-+#define _hypercall1(type, name, a1) \
-+({ \
-+ long __res, __ign1; \
-+ asm volatile ( \
-+ HYPERCALL_STR(name) \
-+ : "=a" (__res), "=b" (__ign1) \
-+ : "1" ((long)(a1)) \
-+ : "memory" ); \
-+ (type)__res; \
-+})
-+
-+#define _hypercall2(type, name, a1, a2) \
-+({ \
-+ long __res, __ign1, __ign2; \
-+ asm volatile ( \
-+ HYPERCALL_STR(name) \
-+ : "=a" (__res), "=b" (__ign1), "=c" (__ign2) \
-+ : "1" ((long)(a1)), "2" ((long)(a2)) \
-+ : "memory" ); \
-+ (type)__res; \
-+})
-+
-+#define _hypercall3(type, name, a1, a2, a3) \
-+({ \
-+ long __res, __ign1, __ign2, __ign3; \
-+ asm volatile ( \
-+ HYPERCALL_STR(name) \
-+ : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \
-+ "=d" (__ign3) \
-+ : "1" ((long)(a1)), "2" ((long)(a2)), \
-+ "3" ((long)(a3)) \
-+ : "memory" ); \
-+ (type)__res; \
-+})
-+
-+#define _hypercall4(type, name, a1, a2, a3, a4) \
-+({ \
-+ long __res, __ign1, __ign2, __ign3, __ign4; \
-+ asm volatile ( \
-+ HYPERCALL_STR(name) \
-+ : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \
-+ "=d" (__ign3), "=S" (__ign4) \
-+ : "1" ((long)(a1)), "2" ((long)(a2)), \
-+ "3" ((long)(a3)), "4" ((long)(a4)) \
-+ : "memory" ); \
-+ (type)__res; \
-+})
-+
-+#define _hypercall5(type, name, a1, a2, a3, a4, a5) \
-+({ \
-+ long __res, __ign1, __ign2, __ign3, __ign4, __ign5; \
-+ asm volatile ( \
-+ HYPERCALL_STR(name) \
-+ : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \
-+ "=d" (__ign3), "=S" (__ign4), "=D" (__ign5) \
-+ : "1" ((long)(a1)), "2" ((long)(a2)), \
-+ "3" ((long)(a3)), "4" ((long)(a4)), \
-+ "5" ((long)(a5)) \
-+ : "memory" ); \
-+ (type)__res; \
-+})
-+
-+static inline int
-+HYPERVISOR_set_trap_table(
-+ trap_info_t *table)
-+{
-+ return _hypercall1(int, set_trap_table, table);
-+}
-+
-+static inline int
-+HYPERVISOR_mmu_update(
-+ mmu_update_t *req, int count, int *success_count, domid_t domid)
-+{
-+ return _hypercall4(int, mmu_update, req, count, success_count, domid);
-+}
-+
-+static inline int
-+HYPERVISOR_mmuext_op(
-+ struct mmuext_op *op, int count, int *success_count, domid_t domid)
-+{
-+ return _hypercall4(int, mmuext_op, op, count, success_count, domid);
-+}
-+
-+static inline int
-+HYPERVISOR_set_gdt(
-+ unsigned long *frame_list, int entries)
-+{
-+ return _hypercall2(int, set_gdt, frame_list, entries);
-+}
-+
-+static inline int
-+HYPERVISOR_stack_switch(
-+ unsigned long ss, unsigned long esp)
-+{
-+ return _hypercall2(int, stack_switch, ss, esp);
-+}
-+
-+static inline int
-+HYPERVISOR_set_callbacks(
-+ unsigned long event_selector, unsigned long event_address,
-+ unsigned long failsafe_selector, unsigned long failsafe_address)
-+{
-+ return _hypercall4(int, set_callbacks,
-+ event_selector, event_address,
-+ failsafe_selector, failsafe_address);
-+}
-+
-+static inline int
-+HYPERVISOR_fpu_taskswitch(
-+ int set)
-+{
-+ return _hypercall1(int, fpu_taskswitch, set);
-+}
-+
-+static inline int
-+HYPERVISOR_sched_op_compat(
-+ int cmd, unsigned long arg)
-+{
-+ return _hypercall2(int, sched_op_compat, cmd, arg);
-+}
-+
-+static inline int
-+HYPERVISOR_sched_op(
-+ int cmd, void *arg)
-+{
-+ return _hypercall2(int, sched_op, cmd, arg);
-+}
-+
-+static inline long
-+HYPERVISOR_set_timer_op(
-+ u64 timeout)
-+{
-+ unsigned long timeout_hi = (unsigned long)(timeout>>32);
-+ unsigned long timeout_lo = (unsigned long)timeout;
-+ return _hypercall2(long, set_timer_op, timeout_lo, timeout_hi);
-+}
-+
-+static inline int
-+HYPERVISOR_platform_op(
-+ struct xen_platform_op *platform_op)
-+{
-+ platform_op->interface_version = XENPF_INTERFACE_VERSION;
-+ return _hypercall1(int, platform_op, platform_op);
-+}
-+
-+static inline int
-+HYPERVISOR_set_debugreg(
-+ int reg, unsigned long value)
-+{
-+ return _hypercall2(int, set_debugreg, reg, value);
-+}
-+
-+static inline unsigned long
-+HYPERVISOR_get_debugreg(
-+ int reg)
-+{
-+ return _hypercall1(unsigned long, get_debugreg, reg);
-+}
-+
-+static inline int
-+HYPERVISOR_update_descriptor(
-+ u64 ma, u64 desc)
-+{
-+ return _hypercall4(int, update_descriptor, ma, ma>>32, desc, desc>>32);
-+}
-+
-+static inline int
-+HYPERVISOR_memory_op(
-+ unsigned int cmd, void *arg)
-+{
-+ return _hypercall2(int, memory_op, cmd, arg);
-+}
-+
-+static inline int
-+HYPERVISOR_multicall(
-+ multicall_entry_t *call_list, int nr_calls)
-+{
-+ return _hypercall2(int, multicall, call_list, nr_calls);
-+}
-+
-+static inline int
-+HYPERVISOR_update_va_mapping(
-+ unsigned long va, pte_t new_val, unsigned long flags)
-+{
-+ unsigned long pte_hi = 0;
-+#ifdef CONFIG_X86_PAE
-+ pte_hi = new_val.pte_high;
-+#endif
-+ return _hypercall4(int, update_va_mapping, va,
-+ new_val.pte_low, pte_hi, flags);
-+}
-+
-+static inline int
-+HYPERVISOR_event_channel_op(
-+ int cmd, void *arg)
-+{
-+ int rc = _hypercall2(int, event_channel_op, cmd, arg);
-+
-+#if CONFIG_XEN_COMPAT <= 0x030002
-+ if (unlikely(rc == -ENOSYS)) {
-+ struct evtchn_op op;
-+ op.cmd = cmd;
-+ memcpy(&op.u, arg, sizeof(op.u));
-+ rc = _hypercall1(int, event_channel_op_compat, &op);
-+ memcpy(arg, &op.u, sizeof(op.u));
-+ }
-+#endif
-+
-+ return rc;
-+}
-+
-+static inline int
-+HYPERVISOR_acm_op(
-+ int cmd, void *arg)
-+{
-+ return _hypercall2(int, acm_op, cmd, arg);
-+}
-+
-+static inline int
-+HYPERVISOR_xen_version(
-+ int cmd, void *arg)
-+{
-+ return _hypercall2(int, xen_version, cmd, arg);
-+}
-+
-+static inline int
-+HYPERVISOR_console_io(
-+ int cmd, int count, char *str)
-+{
-+ return _hypercall3(int, console_io, cmd, count, str);
-+}
-+
-+static inline int
-+HYPERVISOR_physdev_op(
-+ int cmd, void *arg)
-+{
-+ int rc = _hypercall2(int, physdev_op, cmd, arg);
-+
-+#if CONFIG_XEN_COMPAT <= 0x030002
-+ if (unlikely(rc == -ENOSYS)) {
-+ struct physdev_op op;
-+ op.cmd = cmd;
-+ memcpy(&op.u, arg, sizeof(op.u));
-+ rc = _hypercall1(int, physdev_op_compat, &op);
-+ memcpy(arg, &op.u, sizeof(op.u));
-+ }
-+#endif
-+
-+ return rc;
-+}
-+
-+static inline int
-+HYPERVISOR_grant_table_op(
-+ unsigned int cmd, void *uop, unsigned int count)
-+{
-+ return _hypercall3(int, grant_table_op, cmd, uop, count);
-+}
-+
-+static inline int
-+HYPERVISOR_update_va_mapping_otherdomain(
-+ unsigned long va, pte_t new_val, unsigned long flags, domid_t domid)
-+{
-+ unsigned long pte_hi = 0;
-+#ifdef CONFIG_X86_PAE
-+ pte_hi = new_val.pte_high;
-+#endif
-+ return _hypercall5(int, update_va_mapping_otherdomain, va,
-+ new_val.pte_low, pte_hi, flags, domid);
-+}
-+
-+static inline int
-+HYPERVISOR_vm_assist(
-+ unsigned int cmd, unsigned int type)
-+{
-+ return _hypercall2(int, vm_assist, cmd, type);
-+}
-+
-+static inline int
-+HYPERVISOR_vcpu_op(
-+ int cmd, int vcpuid, void *extra_args)
-+{
-+ return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args);
-+}
-+
-+static inline int
-+HYPERVISOR_suspend(
-+ unsigned long srec)
-+{
-+ struct sched_shutdown sched_shutdown = {
-+ .reason = SHUTDOWN_suspend
-+ };
-+
-+ int rc = _hypercall3(int, sched_op, SCHEDOP_shutdown,
-+ &sched_shutdown, srec);
-+
-+#if CONFIG_XEN_COMPAT <= 0x030002
-+ if (rc == -ENOSYS)
-+ rc = _hypercall3(int, sched_op_compat, SCHEDOP_shutdown,
-+ SHUTDOWN_suspend, srec);
-+#endif
-+
-+ return rc;
-+}
-+
-+static inline int
-+HYPERVISOR_nmi_op(
-+ unsigned long op, void *arg)
-+{
-+ return _hypercall2(int, nmi_op, op, arg);
-+}
-+
-+static inline unsigned long
-+HYPERVISOR_hvm_op(
-+ int op, void *arg)
-+{
-+ return _hypercall2(unsigned long, hvm_op, op, arg);
-+}
-+
-+static inline int
-+HYPERVISOR_callback_op(
-+ int cmd, void *arg)
-+{
-+ return _hypercall2(int, callback_op, cmd, arg);
-+}
-+
-+static inline int
-+HYPERVISOR_xenoprof_op(
-+ int op, void *arg)
-+{
-+ return _hypercall2(int, xenoprof_op, op, arg);
-+}
-+
-+static inline int
-+HYPERVISOR_kexec_op(
-+ unsigned long op, void *args)
-+{
-+ return _hypercall2(int, kexec_op, op, args);
-+}
-+
-+
-+
-+#endif /* __HYPERCALL_H__ */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-i386/mach-xen/asm/hypervisor.h ubuntu-gutsy-xen/include/asm-i386/mach-xen/asm/hypervisor.h
---- ubuntu-gutsy/include/asm-i386/mach-xen/asm/hypervisor.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/asm-i386/mach-xen/asm/hypervisor.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,251 @@
-+/******************************************************************************
-+ * hypervisor.h
-+ *
-+ * Linux-specific hypervisor handling.
-+ *
-+ * Copyright (c) 2002-2004, K A Fraser
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#ifndef __HYPERVISOR_H__
-+#define __HYPERVISOR_H__
-+
-+#include <linux/types.h>
-+#include <linux/kernel.h>
-+#include <linux/version.h>
-+#include <linux/errno.h>
-+#include <xen/interface/xen.h>
-+#include <xen/interface/platform.h>
-+#include <xen/interface/event_channel.h>
-+#include <xen/interface/physdev.h>
-+#include <xen/interface/sched.h>
-+#include <xen/interface/nmi.h>
-+#include <asm/ptrace.h>
-+#include <asm/page.h>
-+
-+extern shared_info_t *HYPERVISOR_shared_info;
-+
-+#define vcpu_info(cpu) (HYPERVISOR_shared_info->vcpu_info + (cpu))
-+#ifdef CONFIG_SMP
-+#define current_vcpu_info() vcpu_info(smp_processor_id())
-+#else
-+#define current_vcpu_info() vcpu_info(0)
-+#endif
-+
-+#ifdef CONFIG_X86_32
-+extern unsigned long hypervisor_virt_start;
-+#endif
-+
-+/* arch/xen/i386/kernel/setup.c */
-+extern start_info_t *xen_start_info;
-+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
-+#define is_initial_xendomain() (xen_start_info->flags & SIF_INITDOMAIN)
-+#else
-+#define is_initial_xendomain() 0
-+#endif
-+
-+/* arch/xen/kernel/evtchn.c */
-+/* Force a proper event-channel callback from Xen. */
-+void force_evtchn_callback(void);
-+
-+/* arch/xen/kernel/process.c */
-+void xen_cpu_idle (void);
-+
-+/* arch/xen/i386/kernel/hypervisor.c */
-+void do_hypervisor_callback(struct pt_regs *regs);
-+
-+/* arch/xen/i386/mm/hypervisor.c */
-+/*
-+ * NB. ptr values should be PHYSICAL, not MACHINE. 'vals' should be already
-+ * be MACHINE addresses.
-+ */
-+
-+void xen_pt_switch(unsigned long ptr);
-+void xen_new_user_pt(unsigned long ptr); /* x86_64 only */
-+void xen_load_gs(unsigned int selector); /* x86_64 only */
-+void xen_tlb_flush(void);
-+void xen_invlpg(unsigned long ptr);
-+
-+void xen_l1_entry_update(pte_t *ptr, pte_t val);
-+void xen_l2_entry_update(pmd_t *ptr, pmd_t val);
-+void xen_l3_entry_update(pud_t *ptr, pud_t val); /* x86_64/PAE */
-+void xen_l4_entry_update(pgd_t *ptr, pgd_t val); /* x86_64 only */
-+void xen_pgd_pin(unsigned long ptr);
-+void xen_pgd_unpin(unsigned long ptr);
-+
-+void xen_set_ldt(unsigned long ptr, unsigned long bytes);
-+
-+#ifdef CONFIG_SMP
-+#include <linux/cpumask.h>
-+void xen_tlb_flush_all(void);
-+void xen_invlpg_all(unsigned long ptr);
-+void xen_tlb_flush_mask(cpumask_t *mask);
-+void xen_invlpg_mask(cpumask_t *mask, unsigned long ptr);
-+#endif
-+
-+/* Returns zero on success else negative errno. */
-+int xen_create_contiguous_region(
-+ unsigned long vstart, unsigned int order, unsigned int address_bits);
-+void xen_destroy_contiguous_region(
-+ unsigned long vstart, unsigned int order);
-+
-+/* Turn jiffies into Xen system time. */
-+u64 jiffies_to_st(unsigned long jiffies);
-+
-+#ifdef CONFIG_XEN_SCRUB_PAGES
-+#define scrub_pages(_p,_n) memset((void *)(_p), 0, (_n) << PAGE_SHIFT)
-+#else
-+#define scrub_pages(_p,_n) ((void)0)
-+#endif
-+
-+#include <xen/hypercall.h>
-+
-+#if defined(CONFIG_X86_64)
-+#define MULTI_UVMFLAGS_INDEX 2
-+#define MULTI_UVMDOMID_INDEX 3
-+#else
-+#define MULTI_UVMFLAGS_INDEX 3
-+#define MULTI_UVMDOMID_INDEX 4
-+#endif
-+
-+#define is_running_on_xen() 1
-+
-+static inline int
-+HYPERVISOR_yield(
-+ void)
-+{
-+ int rc = HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
-+
-+#if CONFIG_XEN_COMPAT <= 0x030002
-+ if (rc == -ENOSYS)
-+ rc = HYPERVISOR_sched_op_compat(SCHEDOP_yield, 0);
-+#endif
-+
-+ return rc;
-+}
-+
-+static inline int
-+HYPERVISOR_block(
-+ void)
-+{
-+ int rc = HYPERVISOR_sched_op(SCHEDOP_block, NULL);
-+
-+#if CONFIG_XEN_COMPAT <= 0x030002
-+ if (rc == -ENOSYS)
-+ rc = HYPERVISOR_sched_op_compat(SCHEDOP_block, 0);
-+#endif
-+
-+ return rc;
-+}
-+
-+static inline int
-+HYPERVISOR_shutdown(
-+ unsigned int reason)
-+{
-+ struct sched_shutdown sched_shutdown = {
-+ .reason = reason
-+ };
-+
-+ int rc = HYPERVISOR_sched_op(SCHEDOP_shutdown, &sched_shutdown);
-+
-+#if CONFIG_XEN_COMPAT <= 0x030002
-+ if (rc == -ENOSYS)
-+ rc = HYPERVISOR_sched_op_compat(SCHEDOP_shutdown, reason);
-+#endif
-+
-+ return rc;
-+}
-+
-+static inline int
-+HYPERVISOR_poll(
-+ evtchn_port_t *ports, unsigned int nr_ports, u64 timeout)
-+{
-+ int rc;
-+ struct sched_poll sched_poll = {
-+ .nr_ports = nr_ports,
-+ .timeout = jiffies_to_st(timeout)
-+ };
-+ set_xen_guest_handle(sched_poll.ports, ports);
-+
-+ rc = HYPERVISOR_sched_op(SCHEDOP_poll, &sched_poll);
-+#if CONFIG_XEN_COMPAT <= 0x030002
-+ if (rc == -ENOSYS)
-+ rc = HYPERVISOR_sched_op_compat(SCHEDOP_yield, 0);
-+#endif
-+
-+ return rc;
-+}
-+
-+static inline void
-+MULTI_update_va_mapping(
-+ multicall_entry_t *mcl, unsigned long va,
-+ pte_t new_val, unsigned long flags)
-+{
-+ mcl->op = __HYPERVISOR_update_va_mapping;
-+ mcl->args[0] = va;
-+#if defined(CONFIG_X86_64)
-+ mcl->args[1] = new_val.pte;
-+#elif defined(CONFIG_X86_PAE)
-+ mcl->args[1] = new_val.pte_low;
-+ mcl->args[2] = new_val.pte_high;
-+#else
-+ mcl->args[1] = new_val.pte_low;
-+ mcl->args[2] = 0;
-+#endif
-+ mcl->args[MULTI_UVMFLAGS_INDEX] = flags;
-+}
-+
-+static inline void
-+MULTI_grant_table_op(multicall_entry_t *mcl, unsigned int cmd,
-+ void *uop, unsigned int count)
-+{
-+ mcl->op = __HYPERVISOR_grant_table_op;
-+ mcl->args[0] = cmd;
-+ mcl->args[1] = (unsigned long)uop;
-+ mcl->args[2] = count;
-+}
-+
-+static inline void
-+MULTI_update_va_mapping_otherdomain(
-+ multicall_entry_t *mcl, unsigned long va,
-+ pte_t new_val, unsigned long flags, domid_t domid)
-+{
-+ mcl->op = __HYPERVISOR_update_va_mapping_otherdomain;
-+ mcl->args[0] = va;
-+#if defined(CONFIG_X86_64)
-+ mcl->args[1] = new_val.pte;
-+#elif defined(CONFIG_X86_PAE)
-+ mcl->args[1] = new_val.pte_low;
-+ mcl->args[2] = new_val.pte_high;
-+#else
-+ mcl->args[1] = new_val.pte_low;
-+ mcl->args[2] = 0;
-+#endif
-+ mcl->args[MULTI_UVMFLAGS_INDEX] = flags;
-+ mcl->args[MULTI_UVMDOMID_INDEX] = domid;
-+}
-+
-+#endif /* __HYPERVISOR_H__ */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-i386/mach-xen/asm/io.h ubuntu-gutsy-xen/include/asm-i386/mach-xen/asm/io.h
---- ubuntu-gutsy/include/asm-i386/mach-xen/asm/io.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/asm-i386/mach-xen/asm/io.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,356 @@
-+#ifndef _ASM_IO_H
-+#define _ASM_IO_H
-+
-+#include <linux/string.h>
-+#include <linux/compiler.h>
-+
-+/*
-+ * This file contains the definitions for the x86 IO instructions
-+ * inb/inw/inl/outb/outw/outl and the "string versions" of the same
-+ * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing"
-+ * versions of the single-IO instructions (inb_p/inw_p/..).
-+ *
-+ * This file is not meant to be obfuscating: it's just complicated
-+ * to (a) handle it all in a way that makes gcc able to optimize it
-+ * as well as possible and (b) trying to avoid writing the same thing
-+ * over and over again with slight variations and possibly making a
-+ * mistake somewhere.
-+ */
-+
-+/*
-+ * Thanks to James van Artsdalen for a better timing-fix than
-+ * the two short jumps: using outb's to a nonexistent port seems
-+ * to guarantee better timings even on fast machines.
-+ *
-+ * On the other hand, I'd like to be sure of a non-existent port:
-+ * I feel a bit unsafe about using 0x80 (should be safe, though)
-+ *
-+ * Linus
-+ */
-+
-+ /*
-+ * Bit simplified and optimized by Jan Hubicka
-+ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999.
-+ *
-+ * isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added,
-+ * isa_read[wl] and isa_write[wl] fixed
-+ * - Arnaldo Carvalho de Melo <acme@conectiva.com.br>
-+ */
-+
-+#define IO_SPACE_LIMIT 0xffff
-+
-+#define XQUAD_PORTIO_BASE 0xfe400000
-+#define XQUAD_PORTIO_QUAD 0x40000 /* 256k per quad. */
-+
-+#ifdef __KERNEL__
-+
-+#include <asm-generic/iomap.h>
-+
-+#include <linux/vmalloc.h>
-+#include <asm/fixmap.h>
-+
-+/*
-+ * Convert a physical pointer to a virtual kernel pointer for /dev/mem
-+ * access
-+ */
-+#define xlate_dev_mem_ptr(p, sz) ioremap(p, sz)
-+#define xlate_dev_mem_ptr_unmap(p) iounmap(p)
-+
-+/*
-+ * Convert a virtual cached pointer to an uncached pointer
-+ */
-+#define xlate_dev_kmem_ptr(p) p
-+
-+/**
-+ * virt_to_phys - map virtual addresses to physical
-+ * @address: address to remap
-+ *
-+ * The returned physical address is the physical (CPU) mapping for
-+ * the memory address given. It is only valid to use this function on
-+ * addresses directly mapped or allocated via kmalloc.
-+ *
-+ * This function does not give bus mappings for DMA transfers. In
-+ * almost all conceivable cases a device driver should not be using
-+ * this function
-+ */
-+
-+static inline unsigned long virt_to_phys(volatile void * address)
-+{
-+ return __pa(address);
-+}
-+
-+/**
-+ * phys_to_virt - map physical address to virtual
-+ * @address: address to remap
-+ *
-+ * The returned virtual address is a current CPU mapping for
-+ * the memory address given. It is only valid to use this function on
-+ * addresses that have a kernel mapping
-+ *
-+ * This function does not handle bus mappings for DMA transfers. In
-+ * almost all conceivable cases a device driver should not be using
-+ * this function
-+ */
-+
-+static inline void * phys_to_virt(unsigned long address)
-+{
-+ return __va(address);
-+}
-+
-+/*
-+ * Change "struct page" to physical address.
-+ */
-+#define page_to_pseudophys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
-+#define page_to_phys(page) (phys_to_machine(page_to_pseudophys(page)))
-+#define page_to_bus(page) (phys_to_machine(page_to_pseudophys(page)))
-+
-+#define bio_to_pseudophys(bio) (page_to_pseudophys(bio_page((bio))) + \
-+ (unsigned long) bio_offset((bio)))
-+#define bvec_to_pseudophys(bv) (page_to_pseudophys((bv)->bv_page) + \
-+ (unsigned long) (bv)->bv_offset)
-+
-+#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
-+ (((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2))) && \
-+ ((bvec_to_pseudophys((vec1)) + (vec1)->bv_len) == \
-+ bvec_to_pseudophys((vec2))))
-+
-+extern void __iomem * __ioremap(unsigned long offset, unsigned long size, unsigned long flags);
-+
-+/**
-+ * ioremap - map bus memory into CPU space
-+ * @offset: bus address of the memory
-+ * @size: size of the resource to map
-+ *
-+ * ioremap performs a platform specific sequence of operations to
-+ * make bus memory CPU accessible via the readb/readw/readl/writeb/
-+ * writew/writel functions and the other mmio helpers. The returned
-+ * address is not guaranteed to be usable directly as a virtual
-+ * address.
-+ */
-+
-+static inline void __iomem * ioremap(unsigned long offset, unsigned long size)
-+{
-+ return __ioremap(offset, size, 0);
-+}
-+
-+extern void __iomem * ioremap_nocache(unsigned long offset, unsigned long size);
-+extern void iounmap(volatile void __iomem *addr);
-+
-+/*
-+ * bt_ioremap() and bt_iounmap() are for temporary early boot-time
-+ * mappings, before the real ioremap() is functional.
-+ * A boot-time mapping is currently limited to at most 16 pages.
-+ */
-+extern void *bt_ioremap(unsigned long offset, unsigned long size);
-+extern void bt_iounmap(void *addr, unsigned long size);
-+
-+/* Use early IO mappings for DMI because it's initialized early */
-+#define dmi_ioremap bt_ioremap
-+#define dmi_iounmap bt_iounmap
-+#define dmi_alloc alloc_bootmem
-+
-+/*
-+ * ISA I/O bus memory addresses are 1:1 with the physical address.
-+ */
-+#define isa_virt_to_bus(_x) isa_virt_to_bus_is_UNSUPPORTED->x
-+#define isa_page_to_bus(_x) isa_page_to_bus_is_UNSUPPORTED->x
-+#define isa_bus_to_virt(_x) (void *)(__fix_to_virt(FIX_ISAMAP_BEGIN) + (_x))
-+
-+/*
-+ * However PCI ones are not necessarily 1:1 and therefore these interfaces
-+ * are forbidden in portable PCI drivers.
-+ *
-+ * Allow them on x86 for legacy drivers, though.
-+ */
-+#define virt_to_bus(_x) phys_to_machine(__pa(_x))
-+#define bus_to_virt(_x) __va(machine_to_phys(_x))
-+
-+/*
-+ * readX/writeX() are used to access memory mapped devices. On some
-+ * architectures the memory mapped IO stuff needs to be accessed
-+ * differently. On the x86 architecture, we just read/write the
-+ * memory location directly.
-+ */
-+
-+static inline unsigned char readb(const volatile void __iomem *addr)
-+{
-+ return *(volatile unsigned char __force *) addr;
-+}
-+static inline unsigned short readw(const volatile void __iomem *addr)
-+{
-+ return *(volatile unsigned short __force *) addr;
-+}
-+static inline unsigned int readl(const volatile void __iomem *addr)
-+{
-+ return *(volatile unsigned int __force *) addr;
-+}
-+#define readb_relaxed(addr) readb(addr)
-+#define readw_relaxed(addr) readw(addr)
-+#define readl_relaxed(addr) readl(addr)
-+#define __raw_readb readb
-+#define __raw_readw readw
-+#define __raw_readl readl
-+
-+static inline void writeb(unsigned char b, volatile void __iomem *addr)
-+{
-+ *(volatile unsigned char __force *) addr = b;
-+}
-+static inline void writew(unsigned short b, volatile void __iomem *addr)
-+{
-+ *(volatile unsigned short __force *) addr = b;
-+}
-+static inline void writel(unsigned int b, volatile void __iomem *addr)
-+{
-+ *(volatile unsigned int __force *) addr = b;
-+}
-+#define __raw_writeb writeb
-+#define __raw_writew writew
-+#define __raw_writel writel
-+
-+#define mmiowb()
-+
-+static inline void memset_io(volatile void __iomem *addr, unsigned char val, int count)
-+{
-+ memset((void __force *) addr, val, count);
-+}
-+static inline void memcpy_fromio(void *dst, const volatile void __iomem *src, int count)
-+{
-+ __memcpy(dst, (void __force *) src, count);
-+}
-+static inline void memcpy_toio(volatile void __iomem *dst, const void *src, int count)
-+{
-+ __memcpy((void __force *) dst, src, count);
-+}
-+
-+/*
-+ * ISA space is 'always mapped' on a typical x86 system, no need to
-+ * explicitly ioremap() it. The fact that the ISA IO space is mapped
-+ * to PAGE_OFFSET is pure coincidence - it does not mean ISA values
-+ * are physical addresses. The following constant pointer can be
-+ * used as the IO-area pointer (it can be iounmapped as well, so the
-+ * analogy with PCI is quite large):
-+ */
-+#define __ISA_IO_base ((char __iomem *)(fix_to_virt(FIX_ISAMAP_BEGIN)))
-+
-+/*
-+ * Cache management
-+ *
-+ * This needed for two cases
-+ * 1. Out of order aware processors
-+ * 2. Accidentally out of order processors (PPro errata #51)
-+ */
-+
-+#if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE)
-+
-+static inline void flush_write_buffers(void)
-+{
-+ __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory");
-+}
-+
-+#define dma_cache_inv(_start,_size) flush_write_buffers()
-+#define dma_cache_wback(_start,_size) flush_write_buffers()
-+#define dma_cache_wback_inv(_start,_size) flush_write_buffers()
-+
-+#else
-+
-+/* Nothing to do */
-+
-+#define dma_cache_inv(_start,_size) do { } while (0)
-+#define dma_cache_wback(_start,_size) do { } while (0)
-+#define dma_cache_wback_inv(_start,_size) do { } while (0)
-+#define flush_write_buffers()
-+
-+#endif
-+
-+#endif /* __KERNEL__ */
-+
-+static inline void xen_io_delay(void)
-+{
-+ asm volatile("outb %%al,$0x80" : : : "memory");
-+}
-+
-+static inline void slow_down_io(void) {
-+ xen_io_delay();
-+#ifdef REALLY_SLOW_IO
-+ xen_io_delay();
-+ xen_io_delay();
-+ xen_io_delay();
-+#endif
-+}
-+
-+#ifdef CONFIG_X86_NUMAQ
-+extern void *xquad_portio; /* Where the IO area was mapped */
-+#define XQUAD_PORT_ADDR(port, quad) (xquad_portio + (XQUAD_PORTIO_QUAD*quad) + port)
-+#define __BUILDIO(bwl,bw,type) \
-+static inline void out##bwl##_quad(unsigned type value, int port, int quad) { \
-+ if (xquad_portio) \
-+ write##bwl(value, XQUAD_PORT_ADDR(port, quad)); \
-+ else \
-+ out##bwl##_local(value, port); \
-+} \
-+static inline void out##bwl(unsigned type value, int port) { \
-+ out##bwl##_quad(value, port, 0); \
-+} \
-+static inline unsigned type in##bwl##_quad(int port, int quad) { \
-+ if (xquad_portio) \
-+ return read##bwl(XQUAD_PORT_ADDR(port, quad)); \
-+ else \
-+ return in##bwl##_local(port); \
-+} \
-+static inline unsigned type in##bwl(int port) { \
-+ return in##bwl##_quad(port, 0); \
-+}
-+#else
-+#define __BUILDIO(bwl,bw,type) \
-+static inline void out##bwl(unsigned type value, int port) { \
-+ out##bwl##_local(value, port); \
-+} \
-+static inline unsigned type in##bwl(int port) { \
-+ return in##bwl##_local(port); \
-+}
-+#endif
-+
-+
-+#define BUILDIO(bwl,bw,type) \
-+static inline void out##bwl##_local(unsigned type value, int port) { \
-+ __asm__ __volatile__("out" #bwl " %" #bw "0, %w1" : : "a"(value), "Nd"(port)); \
-+} \
-+static inline unsigned type in##bwl##_local(int port) { \
-+ unsigned type value; \
-+ __asm__ __volatile__("in" #bwl " %w1, %" #bw "0" : "=a"(value) : "Nd"(port)); \
-+ return value; \
-+} \
-+static inline void out##bwl##_local_p(unsigned type value, int port) { \
-+ out##bwl##_local(value, port); \
-+ slow_down_io(); \
-+} \
-+static inline unsigned type in##bwl##_local_p(int port) { \
-+ unsigned type value = in##bwl##_local(port); \
-+ slow_down_io(); \
-+ return value; \
-+} \
-+__BUILDIO(bwl,bw,type) \
-+static inline void out##bwl##_p(unsigned type value, int port) { \
-+ out##bwl(value, port); \
-+ slow_down_io(); \
-+} \
-+static inline unsigned type in##bwl##_p(int port) { \
-+ unsigned type value = in##bwl(port); \
-+ slow_down_io(); \
-+ return value; \
-+} \
-+static inline void outs##bwl(int port, const void *addr, unsigned long count) { \
-+ __asm__ __volatile__("rep; outs" #bwl : "+S"(addr), "+c"(count) : "d"(port)); \
-+} \
-+static inline void ins##bwl(int port, void *addr, unsigned long count) { \
-+ __asm__ __volatile__("rep; ins" #bwl : "+D"(addr), "+c"(count) : "d"(port)); \
-+}
-+
-+BUILDIO(b,b,char)
-+BUILDIO(w,w,short)
-+BUILDIO(l,,int)
-+
-+/* We will be supplying our own /dev/mem implementation */
-+#define ARCH_HAS_DEV_MEM
-+
-+#endif
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-i386/mach-xen/asm/irqflags.h ubuntu-gutsy-xen/include/asm-i386/mach-xen/asm/irqflags.h
---- ubuntu-gutsy/include/asm-i386/mach-xen/asm/irqflags.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/asm-i386/mach-xen/asm/irqflags.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,185 @@
-+/*
-+ * include/asm-i386/irqflags.h
-+ *
-+ * IRQ flags handling
-+ *
-+ * This file gets included from lowlevel asm headers too, to provide
-+ * wrapped versions of the local_irq_*() APIs, based on the
-+ * raw_local_irq_*() functions from the lowlevel headers.
-+ */
-+#ifndef _ASM_IRQFLAGS_H
-+#define _ASM_IRQFLAGS_H
-+
-+#ifndef __ASSEMBLY__
-+#define xen_save_fl(void) (current_vcpu_info()->evtchn_upcall_mask)
-+
-+#define xen_restore_fl(f) \
-+do { \
-+ vcpu_info_t *_vcpu; \
-+ barrier(); \
-+ _vcpu = current_vcpu_info(); \
-+ if ((_vcpu->evtchn_upcall_mask = (f)) == 0) { \
-+ barrier(); /* unmask then check (avoid races) */\
-+ if (unlikely(_vcpu->evtchn_upcall_pending)) \
-+ force_evtchn_callback(); \
-+ } \
-+} while (0)
-+
-+#define xen_irq_disable() \
-+do { \
-+ current_vcpu_info()->evtchn_upcall_mask = 1; \
-+ barrier(); \
-+} while (0)
-+
-+#define xen_irq_enable() \
-+do { \
-+ vcpu_info_t *_vcpu; \
-+ barrier(); \
-+ _vcpu = current_vcpu_info(); \
-+ _vcpu->evtchn_upcall_mask = 0; \
-+ barrier(); /* unmask then check (avoid races) */ \
-+ if (unlikely(_vcpu->evtchn_upcall_pending)) \
-+ force_evtchn_callback(); \
-+} while (0)
-+
-+void xen_safe_halt(void);
-+
-+void xen_halt(void);
-+#endif /* __ASSEMBLY__ */
-+
-+#ifndef __ASSEMBLY__
-+
-+/*
-+ * The use of 'barrier' in the following reflects their use as local-lock
-+ * operations. Reentrancy must be prevented (e.g., __cli()) /before/ following
-+ * critical operations are executed. All critical operations must complete
-+ * /before/ reentrancy is permitted (e.g., __sti()). Alpha architecture also
-+ * includes these barriers, for example.
-+ */
-+
-+#define __raw_local_save_flags(void) xen_save_fl()
-+
-+#define raw_local_irq_restore(flags) xen_restore_fl(flags)
-+
-+#define raw_local_irq_disable() xen_irq_disable()
-+
-+#define raw_local_irq_enable() xen_irq_enable()
-+
-+/*
-+ * Used in the idle loop; sti takes one instruction cycle
-+ * to complete:
-+ */
-+static inline void raw_safe_halt(void)
-+{
-+ xen_safe_halt();
-+}
-+
-+/*
-+ * Used when interrupts are already enabled or to
-+ * shutdown the processor:
-+ */
-+static inline void halt(void)
-+{
-+ xen_halt();
-+}
-+
-+/*
-+ * For spinlocks, etc:
-+ */
-+#define __raw_local_irq_save() \
-+({ \
-+ unsigned long flags = __raw_local_save_flags(); \
-+ \
-+ raw_local_irq_disable(); \
-+ \
-+ flags; \
-+})
-+
-+#else
-+/* Offsets into shared_info_t. */
-+#define evtchn_upcall_pending /* 0 */
-+#define evtchn_upcall_mask 1
-+
-+#define sizeof_vcpu_shift 6
-+
-+#ifdef CONFIG_SMP
-+#define GET_VCPU_INFO movl TI_cpu(%ebp),%esi ; \
-+ shl $sizeof_vcpu_shift,%esi ; \
-+ addl HYPERVISOR_shared_info,%esi
-+#else
-+#define GET_VCPU_INFO movl HYPERVISOR_shared_info,%esi
-+#endif
-+
-+#define __DISABLE_INTERRUPTS movb $1,evtchn_upcall_mask(%esi)
-+#define __ENABLE_INTERRUPTS movb $0,evtchn_upcall_mask(%esi)
-+#define __TEST_PENDING testb $0xFF,evtchn_upcall_pending(%esi)
-+#define DISABLE_INTERRUPTS(clb) GET_VCPU_INFO ; \
-+ __DISABLE_INTERRUPTS
-+#define ENABLE_INTERRUPTS(clb) GET_VCPU_INFO ; \
-+ __ENABLE_INTERRUPTS
-+#define ENABLE_INTERRUPTS_SYSEXIT __ENABLE_INTERRUPTS ; \
-+sysexit_scrit: /**** START OF SYSEXIT CRITICAL REGION ****/ ; \
-+ __TEST_PENDING ; \
-+ jnz 14f /* process more events if necessary... */ ; \
-+ movl PT_ESI(%esp), %esi ; \
-+ sysexit ; \
-+14: __DISABLE_INTERRUPTS ; \
-+ TRACE_IRQS_OFF ; \
-+sysexit_ecrit: /**** END OF SYSEXIT CRITICAL REGION ****/ ; \
-+ push %esp ; \
-+ call evtchn_do_upcall ; \
-+ add $4,%esp ; \
-+ jmp ret_from_intr
-+#define INTERRUPT_RETURN iret
-+#endif /* __ASSEMBLY__ */
-+
-+#ifndef __ASSEMBLY__
-+#define raw_local_save_flags(flags) \
-+ do { (flags) = __raw_local_save_flags(); } while (0)
-+
-+#define raw_local_irq_save(flags) \
-+ do { (flags) = __raw_local_irq_save(); } while (0)
-+
-+static inline int raw_irqs_disabled_flags(unsigned long flags)
-+{
-+ return (flags != 0);
-+}
-+
-+#define raw_irqs_disabled() \
-+({ \
-+ unsigned long flags = __raw_local_save_flags(); \
-+ \
-+ raw_irqs_disabled_flags(flags); \
-+})
-+#endif /* __ASSEMBLY__ */
-+
-+/*
-+ * Do the CPU's IRQ-state tracing from assembly code. We call a
-+ * C function, so save all the C-clobbered registers:
-+ */
-+#ifdef CONFIG_TRACE_IRQFLAGS
-+
-+# define TRACE_IRQS_ON \
-+ pushl %eax; \
-+ pushl %ecx; \
-+ pushl %edx; \
-+ call trace_hardirqs_on; \
-+ popl %edx; \
-+ popl %ecx; \
-+ popl %eax;
-+
-+# define TRACE_IRQS_OFF \
-+ pushl %eax; \
-+ pushl %ecx; \
-+ pushl %edx; \
-+ call trace_hardirqs_off; \
-+ popl %edx; \
-+ popl %ecx; \
-+ popl %eax;
-+
-+#else
-+# define TRACE_IRQS_ON
-+# define TRACE_IRQS_OFF
-+#endif
-+
-+#endif
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-i386/mach-xen/asm/maddr.h ubuntu-gutsy-xen/include/asm-i386/mach-xen/asm/maddr.h
---- ubuntu-gutsy/include/asm-i386/mach-xen/asm/maddr.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/asm-i386/mach-xen/asm/maddr.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,193 @@
-+#ifndef _I386_MADDR_H
-+#define _I386_MADDR_H
-+
-+#include <xen/features.h>
-+#include <xen/interface/xen.h>
-+
-+/**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/
-+#define INVALID_P2M_ENTRY (~0UL)
-+#define FOREIGN_FRAME_BIT (1UL<<31)
-+#define FOREIGN_FRAME(m) ((m) | FOREIGN_FRAME_BIT)
-+
-+/* Definitions for machine and pseudophysical addresses. */
-+#ifdef CONFIG_X86_PAE
-+typedef unsigned long long paddr_t;
-+typedef unsigned long long maddr_t;
-+#else
-+typedef unsigned long paddr_t;
-+typedef unsigned long maddr_t;
-+#endif
-+
-+#ifdef CONFIG_XEN
-+
-+extern unsigned long *phys_to_machine_mapping;
-+extern unsigned long max_mapnr;
-+
-+#undef machine_to_phys_mapping
-+extern unsigned long *machine_to_phys_mapping;
-+extern unsigned int machine_to_phys_order;
-+
-+static inline unsigned long pfn_to_mfn(unsigned long pfn)
-+{
-+ if (xen_feature(XENFEAT_auto_translated_physmap))
-+ return pfn;
-+ BUG_ON(max_mapnr && pfn >= max_mapnr);
-+ return phys_to_machine_mapping[pfn] & ~FOREIGN_FRAME_BIT;
-+}
-+
-+static inline int phys_to_machine_mapping_valid(unsigned long pfn)
-+{
-+ if (xen_feature(XENFEAT_auto_translated_physmap))
-+ return 1;
-+ BUG_ON(max_mapnr && pfn >= max_mapnr);
-+ return (phys_to_machine_mapping[pfn] != INVALID_P2M_ENTRY);
-+}
-+
-+static inline unsigned long mfn_to_pfn(unsigned long mfn)
-+{
-+ unsigned long pfn;
-+
-+ if (xen_feature(XENFEAT_auto_translated_physmap))
-+ return mfn;
-+
-+ if (unlikely((mfn >> machine_to_phys_order) != 0))
-+ return max_mapnr;
-+
-+ /* The array access can fail (e.g., device space beyond end of RAM). */
-+ asm (
-+ "1: movl %1,%0\n"
-+ "2:\n"
-+ ".section .fixup,\"ax\"\n"
-+ "3: movl %2,%0\n"
-+ " jmp 2b\n"
-+ ".previous\n"
-+ ".section __ex_table,\"a\"\n"
-+ " .align 4\n"
-+ " .long 1b,3b\n"
-+ ".previous"
-+ : "=r" (pfn)
-+ : "m" (machine_to_phys_mapping[mfn]), "m" (max_mapnr) );
-+
-+ return pfn;
-+}
-+
-+/*
-+ * We detect special mappings in one of two ways:
-+ * 1. If the MFN is an I/O page then Xen will set the m2p entry
-+ * to be outside our maximum possible pseudophys range.
-+ * 2. If the MFN belongs to a different domain then we will certainly
-+ * not have MFN in our p2m table. Conversely, if the page is ours,
-+ * then we'll have p2m(m2p(MFN))==MFN.
-+ * If we detect a special mapping then it doesn't have a 'struct page'.
-+ * We force !pfn_valid() by returning an out-of-range pointer.
-+ *
-+ * NB. These checks require that, for any MFN that is not in our reservation,
-+ * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if
-+ * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN.
-+ * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety.
-+ *
-+ * NB2. When deliberately mapping foreign pages into the p2m table, you *must*
-+ * use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we
-+ * require. In all the cases we care about, the FOREIGN_FRAME bit is
-+ * masked (e.g., pfn_to_mfn()) so behaviour there is correct.
-+ */
-+static inline unsigned long mfn_to_local_pfn(unsigned long mfn)
-+{
-+ unsigned long pfn = mfn_to_pfn(mfn);
-+ if ((pfn < max_mapnr)
-+ && !xen_feature(XENFEAT_auto_translated_physmap)
-+ && (phys_to_machine_mapping[pfn] != mfn))
-+ return max_mapnr; /* force !pfn_valid() */
-+ return pfn;
-+}
-+
-+static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
-+{
-+ BUG_ON(max_mapnr && pfn >= max_mapnr);
-+ if (xen_feature(XENFEAT_auto_translated_physmap)) {
-+ BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
-+ return;
-+ }
-+ phys_to_machine_mapping[pfn] = mfn;
-+}
-+
-+static inline maddr_t phys_to_machine(paddr_t phys)
-+{
-+ maddr_t machine = pfn_to_mfn(phys >> PAGE_SHIFT);
-+ machine = (machine << PAGE_SHIFT) | (phys & ~PAGE_MASK);
-+ return machine;
-+}
-+
-+static inline paddr_t machine_to_phys(maddr_t machine)
-+{
-+ paddr_t phys = mfn_to_pfn(machine >> PAGE_SHIFT);
-+ phys = (phys << PAGE_SHIFT) | (machine & ~PAGE_MASK);
-+ return phys;
-+}
-+
-+#ifdef CONFIG_X86_PAE
-+static inline paddr_t pte_phys_to_machine(paddr_t phys)
-+{
-+ /*
-+ * In PAE mode, the NX bit needs to be dealt with in the value
-+ * passed to pfn_to_mfn(). On x86_64, we need to mask it off,
-+ * but for i386 the conversion to ulong for the argument will
-+ * clip it off.
-+ */
-+ maddr_t machine = pfn_to_mfn(phys >> PAGE_SHIFT);
-+ machine = (machine << PAGE_SHIFT) | (phys & ~PHYSICAL_PAGE_MASK);
-+ return machine;
-+}
-+
-+static inline paddr_t pte_machine_to_phys(maddr_t machine)
-+{
-+ /*
-+ * In PAE mode, the NX bit needs to be dealt with in the value
-+ * passed to mfn_to_pfn(). On x86_64, we need to mask it off,
-+ * but for i386 the conversion to ulong for the argument will
-+ * clip it off.
-+ */
-+ paddr_t phys = mfn_to_pfn(machine >> PAGE_SHIFT);
-+ phys = (phys << PAGE_SHIFT) | (machine & ~PHYSICAL_PAGE_MASK);
-+ return phys;
-+}
-+#endif
-+
-+#ifdef CONFIG_X86_PAE
-+static inline pte_t pfn_pte_ma(unsigned long page_nr, pgprot_t pgprot)
-+{
-+ pte_t pte;
-+
-+ pte.pte_high = (page_nr >> (32 - PAGE_SHIFT)) | \
-+ (pgprot_val(pgprot) >> 32);
-+ pte.pte_high &= (__supported_pte_mask >> 32);
-+ pte.pte_low = ((page_nr << PAGE_SHIFT) | pgprot_val(pgprot)) & \
-+ __supported_pte_mask;
-+ return pte;
-+}
-+#else
-+#define pfn_pte_ma(pfn, prot) __pte_ma(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
-+#endif
-+
-+#define __pte_ma(x) ((pte_t) { (x) } )
-+
-+#else /* !CONFIG_XEN */
-+
-+#define pfn_to_mfn(pfn) (pfn)
-+#define mfn_to_pfn(mfn) (mfn)
-+#define mfn_to_local_pfn(mfn) (mfn)
-+#define set_phys_to_machine(pfn, mfn) ((void)0)
-+#define phys_to_machine_mapping_valid(pfn) (1)
-+#define phys_to_machine(phys) ((maddr_t)(phys))
-+#define machine_to_phys(mach) ((paddr_t)(mach))
-+#define pfn_pte_ma(pfn, prot) pfn_pte(pfn, prot)
-+#define __pte_ma(x) __pte(x)
-+
-+#endif /* !CONFIG_XEN */
-+
-+/* VIRT <-> MACHINE conversion */
-+#define virt_to_machine(v) (phys_to_machine(__pa(v)))
-+#define virt_to_mfn(v) (pfn_to_mfn(__pa(v) >> PAGE_SHIFT))
-+#define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT))
-+
-+#endif /* _I386_MADDR_H */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-i386/mach-xen/asm/mmu_context.h ubuntu-gutsy-xen/include/asm-i386/mach-xen/asm/mmu_context.h
---- ubuntu-gutsy/include/asm-i386/mach-xen/asm/mmu_context.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/asm-i386/mach-xen/asm/mmu_context.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,116 @@
-+#ifndef __I386_SCHED_H
-+#define __I386_SCHED_H
-+
-+#include <asm/desc.h>
-+#include <asm/atomic.h>
-+#include <asm/pgalloc.h>
-+#include <asm/tlbflush.h>
-+
-+void arch_exit_mmap(struct mm_struct *mm);
-+void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm);
-+
-+void mm_pin(struct mm_struct *mm);
-+void mm_unpin(struct mm_struct *mm);
-+void mm_pin_all(void);
-+
-+static inline void xen_activate_mm(struct mm_struct *prev,
-+ struct mm_struct *next)
-+{
-+ if (!test_bit(PG_pinned, &virt_to_page(next->pgd)->flags))
-+ mm_pin(next);
-+}
-+
-+/*
-+ * Used for LDT copy/destruction.
-+ */
-+int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
-+void destroy_context(struct mm_struct *mm);
-+
-+
-+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
-+{
-+#if 0 /* XEN: no lazy tlb */
-+ unsigned cpu = smp_processor_id();
-+ if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)
-+ per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_LAZY;
-+#endif
-+}
-+
-+#define prepare_arch_switch(next) __prepare_arch_switch()
-+
-+static inline void __prepare_arch_switch(void)
-+{
-+ /*
-+ * Save away %gs. No need to save %fs, as it was saved on the
-+ * stack on entry. No need to save %es and %ds, as those are
-+ * always kernel segments while inside the kernel.
-+ */
-+ asm volatile ( "mov %%gs,%0"
-+ : "=m" (current->thread.gs));
-+ asm volatile ( "movl %0,%%gs"
-+ : : "r" (0) );
-+}
-+
-+static inline void switch_mm(struct mm_struct *prev,
-+ struct mm_struct *next,
-+ struct task_struct *tsk)
-+{
-+ int cpu = smp_processor_id();
-+ struct mmuext_op _op[2], *op = _op;
-+
-+ if (likely(prev != next)) {
-+ BUG_ON(!xen_feature(XENFEAT_writable_page_tables) &&
-+ !test_bit(PG_pinned, &virt_to_page(next->pgd)->flags));
-+
-+ /* stop flush ipis for the previous mm */
-+ cpu_clear(cpu, prev->cpu_vm_mask);
-+#if 0 /* XEN: no lazy tlb */
-+ per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK;
-+ per_cpu(cpu_tlbstate, cpu).active_mm = next;
-+#endif
-+ cpu_set(cpu, next->cpu_vm_mask);
-+
-+ /* Re-load page tables: load_cr3(next->pgd) */
-+ op->cmd = MMUEXT_NEW_BASEPTR;
-+ op->arg1.mfn = pfn_to_mfn(__pa(next->pgd) >> PAGE_SHIFT);
-+ op++;
-+
-+ /*
-+ * load the LDT, if the LDT is different:
-+ */
-+ if (unlikely(prev->context.ldt != next->context.ldt)) {
-+ /* load_LDT_nolock(&next->context, cpu) */
-+ op->cmd = MMUEXT_SET_LDT;
-+ op->arg1.linear_addr = (unsigned long)next->context.ldt;
-+ op->arg2.nr_ents = next->context.size;
-+ op++;
-+ }
-+
-+ BUG_ON(HYPERVISOR_mmuext_op(_op, op-_op, NULL, DOMID_SELF));
-+ }
-+#if 0 /* XEN: no lazy tlb */
-+ else {
-+ per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK;
-+ BUG_ON(per_cpu(cpu_tlbstate, cpu).active_mm != next);
-+
-+ if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
-+ /* We were in lazy tlb mode and leave_mm disabled
-+ * tlb flush IPI delivery. We must reload %cr3.
-+ */
-+ load_cr3(next->pgd);
-+ load_LDT_nolock(&next->context);
-+ }
-+ }
-+#endif
-+}
-+
-+#define deactivate_mm(tsk, mm) \
-+ asm("movl %0,%%gs": :"r" (0));
-+
-+#define activate_mm(prev, next) \
-+ do { \
-+ xen_activate_mm(prev, next); \
-+ switch_mm((prev),(next),NULL); \
-+ } while(0)
-+
-+#endif
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-i386/mach-xen/asm/mmu.h ubuntu-gutsy-xen/include/asm-i386/mach-xen/asm/mmu.h
---- ubuntu-gutsy/include/asm-i386/mach-xen/asm/mmu.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/asm-i386/mach-xen/asm/mmu.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,21 @@
-+#ifndef __i386_MMU_H
-+#define __i386_MMU_H
-+
-+#include <asm/semaphore.h>
-+/*
-+ * The i386 doesn't have a mmu context, but
-+ * we put the segment information here.
-+ *
-+ * cpu_vm_mask is used to optimize ldt flushing.
-+ */
-+typedef struct {
-+ int size;
-+ struct semaphore sem;
-+ void *ldt;
-+ void *vdso;
-+#ifdef CONFIG_XEN
-+ int has_foreign_mappings;
-+#endif
-+} mm_context_t;
-+
-+#endif
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-i386/mach-xen/asm/page.h ubuntu-gutsy-xen/include/asm-i386/mach-xen/asm/page.h
---- ubuntu-gutsy/include/asm-i386/mach-xen/asm/page.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/asm-i386/mach-xen/asm/page.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,281 @@
-+#ifndef _I386_PAGE_H
-+#define _I386_PAGE_H
-+
-+/* PAGE_SHIFT determines the page size */
-+#define PAGE_SHIFT 12
-+#define PAGE_SIZE (1UL << PAGE_SHIFT)
-+#define PAGE_MASK (~(PAGE_SIZE-1))
-+
-+#ifdef CONFIG_X86_PAE
-+#define __PHYSICAL_MASK_SHIFT 40
-+#define __PHYSICAL_MASK ((1ULL << __PHYSICAL_MASK_SHIFT) - 1)
-+#define PHYSICAL_PAGE_MASK (~((1ULL << PAGE_SHIFT) - 1) & __PHYSICAL_MASK)
-+#else
-+#define __PHYSICAL_MASK_SHIFT 32
-+#define __PHYSICAL_MASK (~0UL)
-+#define PHYSICAL_PAGE_MASK (PAGE_MASK & __PHYSICAL_MASK)
-+#endif
-+
-+#define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1))
-+#define LARGE_PAGE_SIZE (1UL << PMD_SHIFT)
-+
-+#ifdef __KERNEL__
-+
-+/*
-+ * Need to repeat this here in order to not include pgtable.h (which in turn
-+ * depends on definitions made here), but to be able to use the symbolic
-+ * below. The preprocessor will warn if the two definitions aren't identical.
-+ */
-+#define _PAGE_PRESENT 0x001
-+
-+#ifndef __ASSEMBLY__
-+
-+#include <linux/string.h>
-+#include <linux/types.h>
-+#include <linux/kernel.h>
-+#include <asm/bug.h>
-+#include <xen/interface/xen.h>
-+#include <xen/features.h>
-+
-+#ifdef CONFIG_X86_USE_3DNOW
-+
-+#include <asm/mmx.h>
-+
-+#define clear_page(page) mmx_clear_page((void *)(page))
-+#define copy_page(to,from) mmx_copy_page(to,from)
-+
-+#else
-+
-+#define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr)
-+#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
-+
-+/*
-+ * On older X86 processors it's not a win to use MMX here it seems.
-+ * Maybe the K6-III ?
-+ */
-+
-+#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
-+#define copy_page(to,from) memcpy((void *)(to), (void *)(from), PAGE_SIZE)
-+
-+#endif
-+
-+#define clear_user_page(page, vaddr, pg) clear_page(page)
-+#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
-+
-+/*
-+ * These are used to make use of C type-checking..
-+ */
-+extern int nx_enabled;
-+
-+#ifdef CONFIG_X86_PAE
-+extern unsigned long long __supported_pte_mask;
-+typedef struct { unsigned long pte_low, pte_high; } pte_t;
-+typedef struct { unsigned long long pmd; } pmd_t;
-+typedef struct { unsigned long long pgd; } pgd_t;
-+typedef struct { unsigned long long pgprot; } pgprot_t;
-+#define pgprot_val(x) ((x).pgprot)
-+#include <asm/maddr.h>
-+
-+#define __pgd_val(x) ((x).pgd)
-+static inline unsigned long long xen_pgd_val(pgd_t pgd)
-+{
-+ unsigned long long ret = __pgd_val(pgd);
-+ if (ret & _PAGE_PRESENT)
-+ ret = pte_machine_to_phys(ret);
-+ return ret;
-+}
-+
-+#define __pud_val(x) __pgd_val((x).pgd)
-+
-+#define __pmd_val(x) ((x).pmd)
-+static inline unsigned long long xen_pmd_val(pmd_t pmd)
-+{
-+ unsigned long long ret = __pmd_val(pmd);
-+#if CONFIG_XEN_COMPAT <= 0x030002
-+ if (ret)
-+ ret = pte_machine_to_phys(ret) | _PAGE_PRESENT;
-+#else
-+ if (ret & _PAGE_PRESENT)
-+ ret = pte_machine_to_phys(ret);
-+#endif
-+ return ret;
-+}
-+
-+static inline unsigned long long __pte_val(pte_t pte)
-+{
-+ return ((unsigned long long)pte.pte_high << 32) | pte.pte_low;
-+}
-+static inline unsigned long long xen_pte_val(pte_t pte)
-+{
-+ unsigned long long ret = __pte_val(pte);
-+ if (pte.pte_low & _PAGE_PRESENT)
-+ ret = pte_machine_to_phys(ret);
-+ return ret;
-+}
-+
-+static inline pgd_t xen_make_pgd(unsigned long long val)
-+{
-+ if (val & _PAGE_PRESENT)
-+ val = pte_phys_to_machine(val);
-+ return (pgd_t) { val };
-+}
-+
-+static inline pmd_t xen_make_pmd(unsigned long long val)
-+{
-+ if (val & _PAGE_PRESENT)
-+ val = pte_phys_to_machine(val);
-+ return (pmd_t) { val };
-+}
-+
-+static inline pte_t xen_make_pte(unsigned long long val)
-+{
-+ if (val & _PAGE_PRESENT)
-+ val = pte_phys_to_machine(val);
-+ return (pte_t) { .pte_low = val, .pte_high = (val >> 32) } ;
-+}
-+
-+#define pmd_val(x) xen_pmd_val(x)
-+#define __pmd(x) xen_make_pmd(x)
-+
-+#define HPAGE_SHIFT 21
-+#include <asm-generic/pgtable-nopud.h>
-+#else /* !CONFIG_X86_PAE */
-+typedef struct { unsigned long pte_low; } pte_t;
-+typedef struct { unsigned long pgd; } pgd_t;
-+typedef struct { unsigned long pgprot; } pgprot_t;
-+#define pgprot_val(x) ((x).pgprot)
-+#define boot_pte_t pte_t /* or would you rather have a typedef */
-+#include <asm/maddr.h>
-+
-+#define __pgd_val(x) ((x).pgd)
-+static inline unsigned long xen_pgd_val(pgd_t pgd)
-+{
-+ unsigned long ret = __pgd_val(pgd);
-+#if CONFIG_XEN_COMPAT <= 0x030002
-+ if (ret)
-+ ret = machine_to_phys(ret) | _PAGE_PRESENT;
-+#else
-+ if (ret & _PAGE_PRESENT)
-+ ret = machine_to_phys(ret);
-+#endif
-+ return ret;
-+}
-+
-+#define __pud_val(x) __pgd_val((x).pgd)
-+#define __pmd_val(x) __pud_val((x).pud)
-+
-+static inline unsigned long __pte_val(pte_t pte)
-+{
-+ return pte.pte_low;
-+}
-+static inline unsigned long xen_pte_val(pte_t pte)
-+{
-+ unsigned long ret = __pte_val(pte);
-+ if (ret & _PAGE_PRESENT)
-+ ret = machine_to_phys(ret);
-+ return ret;
-+}
-+
-+static inline pgd_t xen_make_pgd(unsigned long val)
-+{
-+ if (val & _PAGE_PRESENT)
-+ val = phys_to_machine(val);
-+ return (pgd_t) { val };
-+}
-+
-+static inline pte_t xen_make_pte(unsigned long val)
-+{
-+ if (val & _PAGE_PRESENT)
-+ val = phys_to_machine(val);
-+ return (pte_t) { .pte_low = val };
-+}
-+
-+#define HPAGE_SHIFT 22
-+#include <asm-generic/pgtable-nopmd.h>
-+#endif /* CONFIG_X86_PAE */
-+
-+#define PTE_MASK PHYSICAL_PAGE_MASK
-+
-+#ifdef CONFIG_HUGETLB_PAGE
-+#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
-+#define HPAGE_MASK (~(HPAGE_SIZE - 1))
-+#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
-+#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
-+#endif
-+
-+#define __pgprot(x) ((pgprot_t) { (x) } )
-+
-+#define pgd_val(x) xen_pgd_val(x)
-+#define __pgd(x) xen_make_pgd(x)
-+#define pte_val(x) xen_pte_val(x)
-+#define __pte(x) xen_make_pte(x)
-+
-+#endif /* !__ASSEMBLY__ */
-+
-+/* to align the pointer to the (next) page boundary */
-+#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
-+
-+/*
-+ * This handles the memory map.. We could make this a config
-+ * option, but too many people screw it up, and too few need
-+ * it.
-+ *
-+ * A __PAGE_OFFSET of 0xC0000000 means that the kernel has
-+ * a virtual address space of one gigabyte, which limits the
-+ * amount of physical memory you can use to about 950MB.
-+ *
-+ * If you want more physical memory than this then see the CONFIG_HIGHMEM4G
-+ * and CONFIG_HIGHMEM64G options in the kernel configuration.
-+ */
-+
-+#ifndef __ASSEMBLY__
-+
-+struct vm_area_struct;
-+
-+/*
-+ * This much address space is reserved for vmalloc() and iomap()
-+ * as well as fixmap mappings.
-+ */
-+extern unsigned int __VMALLOC_RESERVE;
-+
-+extern int sysctl_legacy_va_layout;
-+
-+extern int page_is_ram(unsigned long pagenr);
-+
-+#endif /* __ASSEMBLY__ */
-+
-+#ifdef __ASSEMBLY__
-+#define __PAGE_OFFSET CONFIG_PAGE_OFFSET
-+#else
-+#define __PAGE_OFFSET ((unsigned long)CONFIG_PAGE_OFFSET)
-+#endif
-+
-+
-+#define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
-+#define VMALLOC_RESERVE ((unsigned long)__VMALLOC_RESERVE)
-+#define MAXMEM (-__PAGE_OFFSET-__VMALLOC_RESERVE)
-+#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
-+/* __pa_symbol should be used for C visible symbols.
-+ This seems to be the official gcc blessed way to do such arithmetic. */
-+#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x),0))
-+#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
-+#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
-+#ifdef CONFIG_FLATMEM
-+#define pfn_valid(pfn) ((pfn) < max_mapnr)
-+#endif /* CONFIG_FLATMEM */
-+#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
-+
-+#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
-+
-+#define VM_DATA_DEFAULT_FLAGS \
-+ (VM_READ | VM_WRITE | \
-+ ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
-+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-+
-+#include <asm-generic/memory_model.h>
-+#include <asm-generic/page.h>
-+
-+#define __HAVE_ARCH_GATE_AREA 1
-+#endif /* __KERNEL__ */
-+
-+#endif /* _I386_PAGE_H */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-i386/mach-xen/asm/param.h ubuntu-gutsy-xen/include/asm-i386/mach-xen/asm/param.h
---- ubuntu-gutsy/include/asm-i386/mach-xen/asm/param.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/asm-i386/mach-xen/asm/param.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,22 @@
-+#ifndef _ASMi386_PARAM_H
-+#define _ASMi386_PARAM_H
-+
-+#ifdef __KERNEL__
-+# define HZ CONFIG_HZ /* Internal kernel timer frequency */
-+# define USER_HZ 100 /* .. some user interfaces are in "ticks" */
-+# define CLOCKS_PER_SEC (USER_HZ) /* like times() */
-+#endif
-+
-+#ifndef HZ
-+#define HZ 100
-+#endif
-+
-+#define EXEC_PAGESIZE 4096
-+
-+#ifndef NOGROUP
-+#define NOGROUP (-1)
-+#endif
-+
-+#define MAXHOSTNAMELEN 64 /* max length of hostname */
-+
-+#endif
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-i386/mach-xen/asm/pci.h ubuntu-gutsy-xen/include/asm-i386/mach-xen/asm/pci.h
---- ubuntu-gutsy/include/asm-i386/mach-xen/asm/pci.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/asm-i386/mach-xen/asm/pci.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,146 @@
-+#ifndef __i386_PCI_H
-+#define __i386_PCI_H
-+
-+
-+#ifdef __KERNEL__
-+#include <linux/mm.h> /* for struct page */
-+
-+/* Can be used to override the logic in pci_scan_bus for skipping
-+ already-configured bus numbers - to be used for buggy BIOSes
-+ or architectures with incomplete PCI setup by the loader */
-+
-+#ifdef CONFIG_PCI
-+extern unsigned int pcibios_assign_all_busses(void);
-+#else
-+#define pcibios_assign_all_busses() 0
-+#endif
-+#define pcibios_scan_all_fns(a, b) 0
-+
-+extern unsigned long pci_mem_start;
-+#define PCIBIOS_MIN_IO 0x1000
-+#define PCIBIOS_MIN_MEM (pci_mem_start)
-+
-+#define PCIBIOS_MIN_CARDBUS_IO 0x4000
-+
-+void pcibios_config_init(void);
-+struct pci_bus * pcibios_scan_root(int bus);
-+
-+void pcibios_set_master(struct pci_dev *dev);
-+void pcibios_penalize_isa_irq(int irq, int active);
-+struct irq_routing_table *pcibios_get_irq_routing_table(void);
-+int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq);
-+
-+/* Dynamic DMA mapping stuff.
-+ * i386 has everything mapped statically.
-+ */
-+
-+#include <linux/types.h>
-+#include <linux/slab.h>
-+#include <asm/scatterlist.h>
-+#include <linux/string.h>
-+#include <asm/io.h>
-+
-+struct pci_dev;
-+
-+#ifdef CONFIG_SWIOTLB
-+
-+
-+/* On Xen we use SWIOTLB instead of blk-specific bounce buffers. */
-+#define PCI_DMA_BUS_IS_PHYS (0)
-+
-+#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
-+ dma_addr_t ADDR_NAME;
-+#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \
-+ __u32 LEN_NAME;
-+#define pci_unmap_addr(PTR, ADDR_NAME) \
-+ ((PTR)->ADDR_NAME)
-+#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
-+ (((PTR)->ADDR_NAME) = (VAL))
-+#define pci_unmap_len(PTR, LEN_NAME) \
-+ ((PTR)->LEN_NAME)
-+#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
-+ (((PTR)->LEN_NAME) = (VAL))
-+
-+#else
-+
-+/* The PCI address space does equal the physical memory
-+ * address space. The networking and block device layers use
-+ * this boolean for bounce buffer decisions.
-+ */
-+#define PCI_DMA_BUS_IS_PHYS (1)
-+
-+/* pci_unmap_{page,single} is a nop so... */
-+#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
-+#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
-+#define pci_unmap_addr(PTR, ADDR_NAME) (0)
-+#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
-+#define pci_unmap_len(PTR, LEN_NAME) (0)
-+#define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
-+
-+#endif
-+
-+/* This is always fine. */
-+#define pci_dac_dma_supported(pci_dev, mask) (1)
-+
-+static inline dma64_addr_t
-+pci_dac_page_to_dma(struct pci_dev *pdev, struct page *page, unsigned long offset, int direction)
-+{
-+ return ((dma64_addr_t) page_to_phys(page) +
-+ (dma64_addr_t) offset);
-+}
-+
-+static inline struct page *
-+pci_dac_dma_to_page(struct pci_dev *pdev, dma64_addr_t dma_addr)
-+{
-+ return pfn_to_page(dma_addr >> PAGE_SHIFT);
-+}
-+
-+static inline unsigned long
-+pci_dac_dma_to_offset(struct pci_dev *pdev, dma64_addr_t dma_addr)
-+{
-+ return (dma_addr & ~PAGE_MASK);
-+}
-+
-+static inline void
-+pci_dac_dma_sync_single_for_cpu(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction)
-+{
-+}
-+
-+static inline void
-+pci_dac_dma_sync_single_for_device(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction)
-+{
-+ flush_write_buffers();
-+}
-+
-+#define HAVE_PCI_MMAP
-+extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
-+ enum pci_mmap_state mmap_state, int write_combine);
-+
-+
-+static inline void pcibios_add_platform_entries(struct pci_dev *dev)
-+{
-+}
-+
-+#ifdef CONFIG_PCI
-+static inline void pci_dma_burst_advice(struct pci_dev *pdev,
-+ enum pci_dma_burst_strategy *strat,
-+ unsigned long *strategy_parameter)
-+{
-+ *strat = PCI_DMA_BURST_INFINITY;
-+ *strategy_parameter = ~0UL;
-+}
-+#endif
-+
-+#endif /* __KERNEL__ */
-+
-+#ifdef CONFIG_XEN_PCIDEV_FRONTEND
-+#include <xen/pcifront.h>
-+#endif /* CONFIG_XEN_PCIDEV_FRONTEND */
-+
-+/* implement the pci_ DMA API in terms of the generic device dma_ one */
-+#include <asm-generic/pci-dma-compat.h>
-+
-+/* generic pci stuff */
-+#include <asm-generic/pci.h>
-+
-+#endif /* __i386_PCI_H */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-i386/mach-xen/asm/pgalloc.h ubuntu-gutsy-xen/include/asm-i386/mach-xen/asm/pgalloc.h
---- ubuntu-gutsy/include/asm-i386/mach-xen/asm/pgalloc.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/asm-i386/mach-xen/asm/pgalloc.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,71 @@
-+#ifndef _I386_PGALLOC_H
-+#define _I386_PGALLOC_H
-+
-+#include <linux/threads.h>
-+#include <linux/mm.h> /* for struct page */
-+#include <asm/io.h> /* for phys_to_virt and page_to_pseudophys */
-+
-+#define paravirt_alloc_pt(pfn) do { } while (0)
-+#define paravirt_alloc_pd(pfn) do { } while (0)
-+#define paravirt_alloc_pd(pfn) do { } while (0)
-+#define paravirt_alloc_pd_clone(pfn, clonepfn, start, count) do { } while (0)
-+#define paravirt_release_pt(pfn) do { } while (0)
-+#define paravirt_release_pd(pfn) do { } while (0)
-+
-+#define pmd_populate_kernel(mm, pmd, pte) \
-+do { \
-+ paravirt_alloc_pt(__pa(pte) >> PAGE_SHIFT); \
-+ set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte))); \
-+} while (0)
-+
-+#define pmd_populate(mm, pmd, pte) \
-+do { \
-+ unsigned long pfn = page_to_pfn(pte); \
-+ paravirt_alloc_pt(pfn); \
-+ if (test_bit(PG_pinned, &virt_to_page((mm)->pgd)->flags)) { \
-+ if (!PageHighMem(pte)) \
-+ BUG_ON(HYPERVISOR_update_va_mapping( \
-+ (unsigned long)__va(pfn << PAGE_SHIFT), \
-+ pfn_pte(pfn, PAGE_KERNEL_RO), 0)); \
-+ else if (!test_and_set_bit(PG_pinned, &pte->flags)) \
-+ kmap_flush_unused(); \
-+ set_pmd(pmd, \
-+ __pmd(_PAGE_TABLE + ((paddr_t)pfn << PAGE_SHIFT))); \
-+ } else \
-+ *(pmd) = __pmd(_PAGE_TABLE + ((paddr_t)pfn << PAGE_SHIFT)); \
-+} while (0)
-+
-+/*
-+ * Allocate and free page tables.
-+ */
-+extern pgd_t *pgd_alloc(struct mm_struct *);
-+extern void pgd_free(pgd_t *pgd);
-+
-+extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
-+extern struct page *pte_alloc_one(struct mm_struct *, unsigned long);
-+
-+static inline void pte_free_kernel(pte_t *pte)
-+{
-+ free_page((unsigned long)pte);
-+ make_lowmem_page_writable(pte, XENFEAT_writable_page_tables);
-+}
-+
-+extern void pte_free(struct page *pte);
-+
-+#define __pte_free_tlb(tlb,pte) \
-+do { \
-+ paravirt_release_pt(page_to_pfn(pte)); \
-+ tlb_remove_page((tlb),(pte)); \
-+} while (0)
-+
-+#ifdef CONFIG_X86_PAE
-+/*
-+ * In the PAE case we free the pmds as part of the pgd.
-+ */
-+#define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
-+#define pmd_free(x) do { } while (0)
-+#define __pmd_free_tlb(tlb,x) do { } while (0)
-+#define pud_populate(mm, pmd, pte) BUG()
-+#endif
-+
-+#endif /* _I386_PGALLOC_H */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-i386/mach-xen/asm/pgtable-2level.h ubuntu-gutsy-xen/include/asm-i386/mach-xen/asm/pgtable-2level.h
---- ubuntu-gutsy/include/asm-i386/mach-xen/asm/pgtable-2level.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/asm-i386/mach-xen/asm/pgtable-2level.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,117 @@
-+#ifndef _I386_PGTABLE_2LEVEL_H
-+#define _I386_PGTABLE_2LEVEL_H
-+
-+#define pte_ERROR(e) \
-+ printk("%s:%d: bad pte %08lx (pfn %05lx).\n", __FILE__, __LINE__, \
-+ __pte_val(e), pte_pfn(e))
-+#define pgd_ERROR(e) \
-+ printk("%s:%d: bad pgd %08lx (pfn %05lx).\n", __FILE__, __LINE__, \
-+ __pgd_val(e), pgd_val(e) >> PAGE_SHIFT)
-+
-+/*
-+ * Certain architectures need to do special things when PTEs
-+ * within a page table are directly modified. Thus, the following
-+ * hook is made available.
-+ */
-+static inline void xen_set_pte(pte_t *ptep , pte_t pte)
-+{
-+ *ptep = pte;
-+}
-+static inline void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
-+ pte_t *ptep , pte_t pte)
-+{
-+ if ((mm != current->mm && mm != &init_mm) ||
-+ HYPERVISOR_update_va_mapping(addr, pte, 0))
-+ xen_set_pte(ptep, pte);
-+}
-+static inline void xen_set_pmd(pmd_t *pmdp, pmd_t pmd)
-+{
-+ xen_l2_entry_update(pmdp, pmd);
-+}
-+#define set_pte(pteptr, pteval) xen_set_pte(pteptr, pteval)
-+#define set_pte_at(mm,addr,ptep,pteval) xen_set_pte_at(mm, addr, ptep, pteval)
-+#define set_pmd(pmdptr, pmdval) xen_set_pmd(pmdptr, pmdval)
-+
-+#define set_pte_atomic(pteptr, pteval) set_pte(pteptr,pteval)
-+
-+#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
-+#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
-+
-+static inline void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *xp)
-+{
-+ xen_set_pte_at(mm, addr, xp, __pte(0));
-+}
-+
-+#ifdef CONFIG_SMP
-+static inline pte_t xen_ptep_get_and_clear(pte_t *xp, pte_t res)
-+{
-+ return __pte_ma(xchg(&xp->pte_low, 0));
-+}
-+#else
-+#define xen_ptep_get_and_clear(xp, res) xen_local_ptep_get_and_clear(xp, res)
-+#endif
-+
-+#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
-+#define ptep_clear_flush(vma, addr, ptep) \
-+({ \
-+ pte_t *__ptep = (ptep); \
-+ pte_t __res = *__ptep; \
-+ if (!pte_none(__res) && \
-+ ((vma)->vm_mm != current->mm || \
-+ HYPERVISOR_update_va_mapping(addr, __pte(0), \
-+ (unsigned long)(vma)->vm_mm->cpu_vm_mask.bits| \
-+ UVMF_INVLPG|UVMF_MULTI))) { \
-+ __ptep->pte_low = 0; \
-+ flush_tlb_page(vma, addr); \
-+ } \
-+ __res; \
-+})
-+
-+#define __pte_mfn(_pte) ((_pte).pte_low >> PAGE_SHIFT)
-+#define pte_mfn(_pte) ((_pte).pte_low & _PAGE_PRESENT ? \
-+ __pte_mfn(_pte) : pfn_to_mfn(__pte_mfn(_pte)))
-+#define pte_pfn(_pte) ((_pte).pte_low & _PAGE_PRESENT ? \
-+ mfn_to_local_pfn(__pte_mfn(_pte)) : __pte_mfn(_pte))
-+
-+#define pte_page(_pte) pfn_to_page(pte_pfn(_pte))
-+#define pte_none(x) (!(x).pte_low)
-+
-+#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
-+#define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
-+
-+/*
-+ * All present user pages are user-executable:
-+ */
-+static inline int pte_exec(pte_t pte)
-+{
-+ return pte_user(pte);
-+}
-+
-+/*
-+ * All present pages are kernel-executable:
-+ */
-+static inline int pte_exec_kernel(pte_t pte)
-+{
-+ return 1;
-+}
-+
-+/*
-+ * Bits 0, 6 and 7 are taken, split up the 29 bits of offset
-+ * into this range:
-+ */
-+#define PTE_FILE_MAX_BITS 29
-+
-+#define pte_to_pgoff(pte) \
-+ ((((pte).pte_low >> 1) & 0x1f ) + (((pte).pte_low >> 8) << 5 ))
-+
-+#define pgoff_to_pte(off) \
-+ ((pte_t) { (((off) & 0x1f) << 1) + (((off) >> 5) << 8) + _PAGE_FILE })
-+
-+/* Encode and de-code a swap entry */
-+#define __swp_type(x) (((x).val >> 1) & 0x1f)
-+#define __swp_offset(x) ((x).val >> 8)
-+#define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 8) })
-+#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low })
-+#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
-+
-+#endif /* _I386_PGTABLE_2LEVEL_H */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-i386/mach-xen/asm/pgtable-3level-defs.h ubuntu-gutsy-xen/include/asm-i386/mach-xen/asm/pgtable-3level-defs.h
---- ubuntu-gutsy/include/asm-i386/mach-xen/asm/pgtable-3level-defs.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/asm-i386/mach-xen/asm/pgtable-3level-defs.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,24 @@
-+#ifndef _I386_PGTABLE_3LEVEL_DEFS_H
-+#define _I386_PGTABLE_3LEVEL_DEFS_H
-+
-+#define SHARED_KERNEL_PMD 0
-+
-+/*
-+ * PGDIR_SHIFT determines what a top-level page table entry can map
-+ */
-+#define PGDIR_SHIFT 30
-+#define PTRS_PER_PGD 4
-+
-+/*
-+ * PMD_SHIFT determines the size of the area a middle-level
-+ * page table can map
-+ */
-+#define PMD_SHIFT 21
-+#define PTRS_PER_PMD 512
-+
-+/*
-+ * entries per page directory level
-+ */
-+#define PTRS_PER_PTE 512
-+
-+#endif /* _I386_PGTABLE_3LEVEL_DEFS_H */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-i386/mach-xen/asm/pgtable-3level.h ubuntu-gutsy-xen/include/asm-i386/mach-xen/asm/pgtable-3level.h
---- ubuntu-gutsy/include/asm-i386/mach-xen/asm/pgtable-3level.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/asm-i386/mach-xen/asm/pgtable-3level.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,215 @@
-+#ifndef _I386_PGTABLE_3LEVEL_H
-+#define _I386_PGTABLE_3LEVEL_H
-+
-+/*
-+ * Intel Physical Address Extension (PAE) Mode - three-level page
-+ * tables on PPro+ CPUs.
-+ *
-+ * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
-+ */
-+
-+#define pte_ERROR(e) \
-+ printk("%s:%d: bad pte %p(%016Lx pfn %08lx).\n", __FILE__, __LINE__, \
-+ &(e), __pte_val(e), pte_pfn(e))
-+#define pmd_ERROR(e) \
-+ printk("%s:%d: bad pmd %p(%016Lx pfn %08Lx).\n", __FILE__, __LINE__, \
-+ &(e), __pmd_val(e), (pmd_val(e) & PTE_MASK) >> PAGE_SHIFT)
-+#define pgd_ERROR(e) \
-+ printk("%s:%d: bad pgd %p(%016Lx pfn %08Lx).\n", __FILE__, __LINE__, \
-+ &(e), __pgd_val(e), (pgd_val(e) & PTE_MASK) >> PAGE_SHIFT)
-+
-+#define pud_none(pud) 0
-+#define pud_bad(pud) 0
-+#define pud_present(pud) 1
-+
-+/*
-+ * Is the pte executable?
-+ */
-+static inline int pte_x(pte_t pte)
-+{
-+ return !(__pte_val(pte) & _PAGE_NX);
-+}
-+
-+/*
-+ * All present user-pages with !NX bit are user-executable:
-+ */
-+static inline int pte_exec(pte_t pte)
-+{
-+ return pte_user(pte) && pte_x(pte);
-+}
-+/*
-+ * All present pages with !NX bit are kernel-executable:
-+ */
-+static inline int pte_exec_kernel(pte_t pte)
-+{
-+ return pte_x(pte);
-+}
-+
-+/* Rules for using set_pte: the pte being assigned *must* be
-+ * either not present or in a state where the hardware will
-+ * not attempt to update the pte. In places where this is
-+ * not possible, use pte_get_and_clear to obtain the old pte
-+ * value and then use set_pte to update it. -ben
-+ */
-+
-+static inline void xen_set_pte(pte_t *ptep, pte_t pte)
-+{
-+ ptep->pte_high = pte.pte_high;
-+ smp_wmb();
-+ ptep->pte_low = pte.pte_low;
-+}
-+
-+static inline void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
-+ pte_t *ptep , pte_t pte)
-+{
-+ if ((mm != current->mm && mm != &init_mm) ||
-+ HYPERVISOR_update_va_mapping(addr, pte, 0))
-+ xen_set_pte(ptep, pte);
-+}
-+
-+static inline void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
-+{
-+ set_64bit((unsigned long long *)(ptep),__pte_val(pte));
-+}
-+static inline void xen_set_pmd(pmd_t *pmdp, pmd_t pmd)
-+{
-+ xen_l2_entry_update(pmdp, pmd);
-+}
-+static inline void xen_set_pud(pud_t *pudp, pud_t pud)
-+{
-+ xen_l3_entry_update(pudp, pud);
-+}
-+
-+/*
-+ * For PTEs and PDEs, we must clear the P-bit first when clearing a page table
-+ * entry, so clear the bottom half first and enforce ordering with a compiler
-+ * barrier.
-+ */
-+static inline void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
-+{
-+ if ((mm != current->mm && mm != &init_mm)
-+ || HYPERVISOR_update_va_mapping(addr, __pte(0), 0)) {
-+ ptep->pte_low = 0;
-+ smp_wmb();
-+ ptep->pte_high = 0;
-+ }
-+}
-+
-+static inline void xen_pmd_clear(pmd_t *pmd)
-+{
-+ xen_l2_entry_update(pmd, __pmd(0));
-+}
-+
-+#define set_pte(ptep, pte) xen_set_pte(ptep, pte)
-+#define set_pte_at(mm, addr, ptep, pte) xen_set_pte_at(mm, addr, ptep, pte)
-+#define set_pte_atomic(ptep, pte) xen_set_pte_atomic(ptep, pte)
-+#define set_pmd(pmdp, pmd) xen_set_pmd(pmdp, pmd)
-+#define set_pud(pudp, pud) xen_set_pud(pudp, pud)
-+#define pte_clear(mm, addr, ptep) xen_pte_clear(mm, addr, ptep)
-+#define pmd_clear(pmd) xen_pmd_clear(pmd)
-+
-+/*
-+ * Pentium-II erratum A13: in PAE mode we explicitly have to flush
-+ * the TLB via cr3 if the top-level pgd is changed...
-+ * We do not let the generic code free and clear pgd entries due to
-+ * this erratum.
-+ */
-+static inline void pud_clear (pud_t * pud) { }
-+
-+#define pud_page(pud) \
-+((struct page *) __va(pud_val(pud) & PAGE_MASK))
-+
-+#define pud_page_vaddr(pud) \
-+((unsigned long) __va(pud_val(pud) & PAGE_MASK))
-+
-+
-+/* Find an entry in the second-level page table.. */
-+#define pmd_offset(pud, address) ((pmd_t *) pud_page(*(pud)) + \
-+ pmd_index(address))
-+
-+#ifdef CONFIG_SMP
-+static inline pte_t xen_ptep_get_and_clear(pte_t *ptep, pte_t res)
-+{
-+ uint64_t val = __pte_val(res);
-+ if (__cmpxchg64(ptep, val, 0) != val) {
-+ /* xchg acts as a barrier before the setting of the high bits */
-+ res.pte_low = xchg(&ptep->pte_low, 0);
-+ res.pte_high = ptep->pte_high;
-+ ptep->pte_high = 0;
-+ }
-+ return res;
-+}
-+#else
-+#define xen_ptep_get_and_clear(xp, pte) xen_local_ptep_get_and_clear(xp, pte)
-+#endif
-+
-+#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
-+#define ptep_clear_flush(vma, addr, ptep) \
-+({ \
-+ pte_t *__ptep = (ptep); \
-+ pte_t __res = *__ptep; \
-+ if (!pte_none(__res) && \
-+ ((vma)->vm_mm != current->mm || \
-+ HYPERVISOR_update_va_mapping(addr, __pte(0), \
-+ (unsigned long)(vma)->vm_mm->cpu_vm_mask.bits| \
-+ UVMF_INVLPG|UVMF_MULTI))) { \
-+ __ptep->pte_low = 0; \
-+ smp_wmb(); \
-+ __ptep->pte_high = 0; \
-+ flush_tlb_page(vma, addr); \
-+ } \
-+ __res; \
-+})
-+
-+#define __HAVE_ARCH_PTE_SAME
-+static inline int pte_same(pte_t a, pte_t b)
-+{
-+ return a.pte_low == b.pte_low && a.pte_high == b.pte_high;
-+}
-+
-+#define pte_page(x) pfn_to_page(pte_pfn(x))
-+
-+static inline int pte_none(pte_t pte)
-+{
-+ return !(pte.pte_low | pte.pte_high);
-+}
-+
-+#define __pte_mfn(_pte) (((_pte).pte_low >> PAGE_SHIFT) | \
-+ ((_pte).pte_high << (32-PAGE_SHIFT)))
-+#define pte_mfn(_pte) ((_pte).pte_low & _PAGE_PRESENT ? \
-+ __pte_mfn(_pte) : pfn_to_mfn(__pte_mfn(_pte)))
-+#define pte_pfn(_pte) ((_pte).pte_low & _PAGE_PRESENT ? \
-+ mfn_to_local_pfn(__pte_mfn(_pte)) : __pte_mfn(_pte))
-+
-+extern unsigned long long __supported_pte_mask;
-+
-+static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
-+{
-+ return __pte((((unsigned long long)page_nr << PAGE_SHIFT) |
-+ pgprot_val(pgprot)) & __supported_pte_mask);
-+}
-+
-+static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
-+{
-+ return __pmd((((unsigned long long)page_nr << PAGE_SHIFT) |
-+ pgprot_val(pgprot)) & __supported_pte_mask);
-+}
-+
-+/*
-+ * Bits 0, 6 and 7 are taken in the low part of the pte,
-+ * put the 32 bits of offset into the high part.
-+ */
-+#define pte_to_pgoff(pte) ((pte).pte_high)
-+#define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) })
-+#define PTE_FILE_MAX_BITS 32
-+
-+/* Encode and de-code a swap entry */
-+#define __swp_type(x) (((x).val) & 0x1f)
-+#define __swp_offset(x) ((x).val >> 5)
-+#define __swp_entry(type, offset) ((swp_entry_t){(type) | (offset) << 5})
-+#define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high })
-+#define __swp_entry_to_pte(x) ((pte_t){ 0, (x).val })
-+
-+#define __pmd_free_tlb(tlb, x) do { } while (0)
-+
-+#endif /* _I386_PGTABLE_3LEVEL_H */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-i386/mach-xen/asm/pgtable.h ubuntu-gutsy-xen/include/asm-i386/mach-xen/asm/pgtable.h
---- ubuntu-gutsy/include/asm-i386/mach-xen/asm/pgtable.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/asm-i386/mach-xen/asm/pgtable.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,591 @@
-+#ifndef _I386_PGTABLE_H
-+#define _I386_PGTABLE_H
-+
-+#include <asm/hypervisor.h>
-+
-+/*
-+ * The Linux memory management assumes a three-level page table setup. On
-+ * the i386, we use that, but "fold" the mid level into the top-level page
-+ * table, so that we physically have the same two-level page table as the
-+ * i386 mmu expects.
-+ *
-+ * This file contains the functions and defines necessary to modify and use
-+ * the i386 page table tree.
-+ */
-+#ifndef __ASSEMBLY__
-+#include <asm/processor.h>
-+#include <asm/fixmap.h>
-+#include <linux/threads.h>
-+
-+#ifndef _I386_BITOPS_H
-+#include <asm/bitops.h>
-+#endif
-+
-+#include <linux/slab.h>
-+#include <linux/list.h>
-+#include <linux/spinlock.h>
-+#include <linux/sched.h>
-+
-+/* Is this pagetable pinned? */
-+#define PG_pinned PG_arch_1
-+
-+struct vm_area_struct;
-+
-+/*
-+ * ZERO_PAGE is a global shared page that is always zero: used
-+ * for zero-mapped memory areas etc..
-+ */
-+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
-+extern unsigned long empty_zero_page[1024];
-+extern pgd_t *swapper_pg_dir;
-+extern struct kmem_cache *pmd_cache;
-+extern spinlock_t pgd_lock;
-+extern struct page *pgd_list;
-+void check_pgt_cache(void);
-+
-+void pmd_ctor(void *, struct kmem_cache *, unsigned long);
-+void pgtable_cache_init(void);
-+void paging_init(void);
-+
-+
-+/*
-+ * The Linux x86 paging architecture is 'compile-time dual-mode', it
-+ * implements both the traditional 2-level x86 page tables and the
-+ * newer 3-level PAE-mode page tables.
-+ */
-+#ifdef CONFIG_X86_PAE
-+# include <asm/pgtable-3level-defs.h>
-+# define PMD_SIZE (1UL << PMD_SHIFT)
-+# define PMD_MASK (~(PMD_SIZE-1))
-+#else
-+# include <asm/pgtable-2level-defs.h>
-+#endif
-+
-+#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
-+#define PGDIR_MASK (~(PGDIR_SIZE-1))
-+
-+#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
-+#define FIRST_USER_ADDRESS 0
-+
-+#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
-+#define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
-+
-+#define TWOLEVEL_PGDIR_SHIFT 22
-+#define BOOT_USER_PGD_PTRS (__PAGE_OFFSET >> TWOLEVEL_PGDIR_SHIFT)
-+#define BOOT_KERNEL_PGD_PTRS (1024-BOOT_USER_PGD_PTRS)
-+
-+/* Just any arbitrary offset to the start of the vmalloc VM area: the
-+ * current 8MB value just means that there will be a 8MB "hole" after the
-+ * physical memory until the kernel virtual memory starts. That means that
-+ * any out-of-bounds memory accesses will hopefully be caught.
-+ * The vmalloc() routines leaves a hole of 4kB between each vmalloced
-+ * area for the same reason. ;)
-+ */
-+#define VMALLOC_OFFSET (8*1024*1024)
-+#define VMALLOC_START (((unsigned long) high_memory + vmalloc_earlyreserve + \
-+ 2*VMALLOC_OFFSET-1) & ~(VMALLOC_OFFSET-1))
-+#ifdef CONFIG_HIGHMEM
-+# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
-+#else
-+# define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
-+#endif
-+
-+/*
-+ * _PAGE_PSE set in the page directory entry just means that
-+ * the page directory entry points directly to a 4MB-aligned block of
-+ * memory.
-+ */
-+#define _PAGE_BIT_PRESENT 0
-+#define _PAGE_BIT_RW 1
-+#define _PAGE_BIT_USER 2
-+#define _PAGE_BIT_PWT 3
-+#define _PAGE_BIT_PCD 4
-+#define _PAGE_BIT_ACCESSED 5
-+#define _PAGE_BIT_DIRTY 6
-+#define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page, Pentium+, if present.. */
-+#define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
-+#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
-+#define _PAGE_BIT_UNUSED2 10
-+#define _PAGE_BIT_UNUSED3 11
-+#define _PAGE_BIT_NX 63
-+
-+#define _PAGE_PRESENT 0x001
-+#define _PAGE_RW 0x002
-+#define _PAGE_USER 0x004
-+#define _PAGE_PWT 0x008
-+#define _PAGE_PCD 0x010
-+#define _PAGE_ACCESSED 0x020
-+#define _PAGE_DIRTY 0x040
-+#define _PAGE_PSE 0x080 /* 4 MB (or 2MB) page, Pentium+, if present.. */
-+#define _PAGE_GLOBAL 0x100 /* Global TLB entry PPro+ */
-+#define _PAGE_UNUSED1 0x200 /* available for programmer */
-+#define _PAGE_UNUSED2 0x400
-+#define _PAGE_UNUSED3 0x800
-+
-+/* If _PAGE_PRESENT is clear, we use these: */
-+#define _PAGE_FILE 0x040 /* nonlinear file mapping, saved PTE; unset:swap */
-+#define _PAGE_PROTNONE 0x080 /* if the user mapped it with PROT_NONE;
-+ pte_present gives true */
-+#ifdef CONFIG_X86_PAE
-+#define _PAGE_NX (1ULL<<_PAGE_BIT_NX)
-+#else
-+#define _PAGE_NX 0
-+#endif
-+
-+#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
-+#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
-+#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
-+
-+#define PAGE_NONE \
-+ __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
-+#define PAGE_SHARED \
-+ __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
-+
-+#define PAGE_SHARED_EXEC \
-+ __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
-+#define PAGE_COPY_NOEXEC \
-+ __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
-+#define PAGE_COPY_EXEC \
-+ __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
-+#define PAGE_COPY \
-+ PAGE_COPY_NOEXEC
-+#define PAGE_READONLY \
-+ __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
-+#define PAGE_READONLY_EXEC \
-+ __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
-+
-+#define _PAGE_KERNEL \
-+ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX)
-+#define _PAGE_KERNEL_EXEC \
-+ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
-+
-+extern unsigned long long __PAGE_KERNEL, __PAGE_KERNEL_EXEC;
-+#define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
-+#define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW)
-+#define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD)
-+#define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
-+#define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
-+
-+#define PAGE_KERNEL __pgprot(__PAGE_KERNEL)
-+#define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO)
-+#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
-+#define PAGE_KERNEL_RX __pgprot(__PAGE_KERNEL_RX)
-+#define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE)
-+#define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE)
-+#define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC)
-+
-+/*
-+ * The i386 can't do page protection for execute, and considers that
-+ * the same are read. Also, write permissions imply read permissions.
-+ * This is the closest we can get..
-+ */
-+#define __P000 PAGE_NONE
-+#define __P001 PAGE_READONLY
-+#define __P010 PAGE_COPY
-+#define __P011 PAGE_COPY
-+#define __P100 PAGE_READONLY_EXEC
-+#define __P101 PAGE_READONLY_EXEC
-+#define __P110 PAGE_COPY_EXEC
-+#define __P111 PAGE_COPY_EXEC
-+
-+#define __S000 PAGE_NONE
-+#define __S001 PAGE_READONLY
-+#define __S010 PAGE_SHARED
-+#define __S011 PAGE_SHARED
-+#define __S100 PAGE_READONLY_EXEC
-+#define __S101 PAGE_READONLY_EXEC
-+#define __S110 PAGE_SHARED_EXEC
-+#define __S111 PAGE_SHARED_EXEC
-+
-+/*
-+ * Define this if things work differently on an i386 and an i486:
-+ * it will (on an i486) warn about kernel memory accesses that are
-+ * done without a 'access_ok(VERIFY_WRITE,..)'
-+ */
-+#undef TEST_ACCESS_OK
-+
-+/* The boot page tables (all created as a single array) */
-+extern unsigned long pg0[];
-+
-+#define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE))
-+
-+/* To avoid harmful races, pmd_none(x) should check only the lower when PAE */
-+#define pmd_none(x) (!(unsigned long)__pmd_val(x))
-+#if CONFIG_XEN_COMPAT <= 0x030002
-+/* pmd_present doesn't just test the _PAGE_PRESENT bit since wr.p.t.
-+ can temporarily clear it. */
-+#define pmd_present(x) (__pmd_val(x))
-+#define pmd_bad(x) ((__pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER & ~_PAGE_PRESENT)) != (_KERNPG_TABLE & ~_PAGE_PRESENT))
-+#else
-+#define pmd_present(x) (__pmd_val(x) & _PAGE_PRESENT)
-+#define pmd_bad(x) ((__pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
-+#endif
-+
-+
-+#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
-+
-+/*
-+ * The following only work if pte_present() is true.
-+ * Undefined behaviour if not..
-+ */
-+static inline int pte_user(pte_t pte) { return (pte).pte_low & _PAGE_USER; }
-+static inline int pte_read(pte_t pte) { return (pte).pte_low & _PAGE_USER; }
-+static inline int pte_dirty(pte_t pte) { return (pte).pte_low & _PAGE_DIRTY; }
-+static inline int pte_young(pte_t pte) { return (pte).pte_low & _PAGE_ACCESSED; }
-+static inline int pte_write(pte_t pte) { return (pte).pte_low & _PAGE_RW; }
-+static inline int pte_huge(pte_t pte) { return (pte).pte_low & _PAGE_PSE; }
-+
-+/*
-+ * The following only works if pte_present() is not true.
-+ */
-+static inline int pte_file(pte_t pte) { return (pte).pte_low & _PAGE_FILE; }
-+
-+static inline pte_t pte_rdprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_USER; return pte; }
-+static inline pte_t pte_exprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_USER; return pte; }
-+static inline pte_t pte_mkclean(pte_t pte) { (pte).pte_low &= ~_PAGE_DIRTY; return pte; }
-+static inline pte_t pte_mkold(pte_t pte) { (pte).pte_low &= ~_PAGE_ACCESSED; return pte; }
-+static inline pte_t pte_wrprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_RW; return pte; }
-+static inline pte_t pte_mkread(pte_t pte) { (pte).pte_low |= _PAGE_USER; return pte; }
-+static inline pte_t pte_mkexec(pte_t pte) { (pte).pte_low |= _PAGE_USER; return pte; }
-+static inline pte_t pte_mkdirty(pte_t pte) { (pte).pte_low |= _PAGE_DIRTY; return pte; }
-+static inline pte_t pte_mkyoung(pte_t pte) { (pte).pte_low |= _PAGE_ACCESSED; return pte; }
-+static inline pte_t pte_mkwrite(pte_t pte) { (pte).pte_low |= _PAGE_RW; return pte; }
-+static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= _PAGE_PSE; return pte; }
-+
-+#ifdef CONFIG_X86_PAE
-+# include <asm/pgtable-3level.h>
-+#else
-+# include <asm/pgtable-2level.h>
-+#endif
-+
-+/*
-+ * Rules for using pte_update - it must be called after any PTE update which
-+ * has not been done using the set_pte / clear_pte interfaces. It is used by
-+ * shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE
-+ * updates should either be sets, clears, or set_pte_atomic for P->P
-+ * transitions, which means this hook should only be called for user PTEs.
-+ * This hook implies a P->P protection or access change has taken place, which
-+ * requires a subsequent TLB flush. The notification can optionally be delayed
-+ * until the TLB flush event by using the pte_update_defer form of the
-+ * interface, but care must be taken to assure that the flush happens while
-+ * still holding the same page table lock so that the shadow and primary pages
-+ * do not become out of sync on SMP.
-+ */
-+#define pte_update(mm, addr, ptep) do { } while (0)
-+#define pte_update_defer(mm, addr, ptep) do { } while (0)
-+
-+/* local pte updates need not use xchg for locking */
-+static inline pte_t xen_local_ptep_get_and_clear(pte_t *ptep, pte_t res)
-+{
-+ xen_set_pte(ptep, __pte(0));
-+ return res;
-+}
-+
-+/*
-+ * We only update the dirty/accessed state if we set
-+ * the dirty bit by hand in the kernel, since the hardware
-+ * will do the accessed bit for us, and we don't want to
-+ * race with other CPU's that might be updating the dirty
-+ * bit at the same time.
-+ */
-+#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
-+#define ptep_set_access_flags(vma, address, ptep, entry, dirty) \
-+({ \
-+ int __changed = !pte_same(*(ptep), entry); \
-+ if (__changed && (dirty)) \
-+ ptep_establish(vma, address, ptep, entry); \
-+ __changed; \
-+})
-+
-+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
-+#define ptep_test_and_clear_dirty(vma, addr, ptep) ({ \
-+ int __ret = 0; \
-+ if (pte_dirty(*(ptep))) \
-+ __ret = test_and_clear_bit(_PAGE_BIT_DIRTY, \
-+ &(ptep)->pte_low); \
-+ if (__ret) \
-+ pte_update((vma)->vm_mm, addr, ptep); \
-+ __ret; \
-+})
-+
-+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
-+#define ptep_test_and_clear_young(vma, addr, ptep) ({ \
-+ int __ret = 0; \
-+ if (pte_young(*(ptep))) \
-+ __ret = test_and_clear_bit(_PAGE_BIT_ACCESSED, \
-+ &(ptep)->pte_low); \
-+ if (__ret) \
-+ pte_update((vma)->vm_mm, addr, ptep); \
-+ __ret; \
-+})
-+
-+/*
-+ * Rules for using ptep_establish: the pte MUST be a user pte, and
-+ * must be a present->present transition.
-+ */
-+#define __HAVE_ARCH_PTEP_ESTABLISH
-+#define ptep_establish(vma, address, ptep, pteval) \
-+do { \
-+ if ( likely((vma)->vm_mm == current->mm) ) { \
-+ BUG_ON(HYPERVISOR_update_va_mapping(address, \
-+ pteval, \
-+ (unsigned long)(vma)->vm_mm->cpu_vm_mask.bits| \
-+ UVMF_INVLPG|UVMF_MULTI)); \
-+ } else { \
-+ xen_l1_entry_update(ptep, pteval); \
-+ flush_tlb_page(vma, address); \
-+ } \
-+} while (0)
-+
-+#define __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH
-+#define ptep_clear_flush_dirty(vma, address, ptep) \
-+({ \
-+ pte_t __pte = *(ptep); \
-+ int __dirty = pte_dirty(__pte); \
-+ __pte = pte_mkclean(__pte); \
-+ if (test_bit(PG_pinned, &virt_to_page((vma)->vm_mm->pgd)->flags)) \
-+ (void)ptep_set_access_flags(vma, address, ptep, __pte, __dirty); \
-+ else if (__dirty) \
-+ (ptep)->pte_low = __pte.pte_low; \
-+ __dirty; \
-+})
-+
-+#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
-+#define ptep_clear_flush_young(vma, address, ptep) \
-+({ \
-+ pte_t __pte = *(ptep); \
-+ int __young = pte_young(__pte); \
-+ __pte = pte_mkold(__pte); \
-+ if (test_bit(PG_pinned, &virt_to_page((vma)->vm_mm->pgd)->flags)) \
-+ (void)ptep_set_access_flags(vma, address, ptep, __pte, __young); \
-+ else if (__young) \
-+ (ptep)->pte_low = __pte.pte_low; \
-+ __young; \
-+})
-+
-+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
-+static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
-+{
-+ pte_t pte = *ptep;
-+ if (!pte_none(pte)) {
-+ if (mm != &init_mm) {
-+ pte = xen_ptep_get_and_clear(ptep, pte);
-+ pte_update(mm, addr, ptep);
-+ } else
-+ HYPERVISOR_update_va_mapping(addr, __pte(0), 0);
-+ }
-+ return pte;
-+}
-+
-+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
-+#define ptep_get_and_clear_full(mm, addr, ptep, full) \
-+ ((full) ? ({ \
-+ pte_t __res = *(ptep); \
-+ if (test_bit(PG_pinned, &virt_to_page((mm)->pgd)->flags)) \
-+ xen_l1_entry_update(ptep, __pte(0)); \
-+ else \
-+ *(ptep) = __pte(0); \
-+ __res; \
-+ }) : \
-+ ptep_get_and_clear(mm, addr, ptep))
-+
-+#define __HAVE_ARCH_PTEP_SET_WRPROTECT
-+static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
-+{
-+ pte_t pte = *ptep;
-+ if (pte_write(pte))
-+ set_pte_at(mm, addr, ptep, pte_wrprotect(pte));
-+}
-+
-+/*
-+ * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
-+ *
-+ * dst - pointer to pgd range anwhere on a pgd page
-+ * src - ""
-+ * count - the number of pgds to copy.
-+ *
-+ * dst and src can be on the same page, but the range must not overlap,
-+ * and must not cross a page boundary.
-+ */
-+static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
-+{
-+ memcpy(dst, src, count * sizeof(pgd_t));
-+}
-+
-+/*
-+ * Macro to mark a page protection value as "uncacheable". On processors which do not support
-+ * it, this is a no-op.
-+ */
-+#define pgprot_noncached(prot) ((boot_cpu_data.x86 > 3) \
-+ ? (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT)) : (prot))
-+
-+/*
-+ * Conversion functions: convert a page and protection to a page entry,
-+ * and a page entry and page directory to the page they refer to.
-+ */
-+
-+#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
-+
-+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
-+{
-+ /*
-+ * Since this might change the present bit (which controls whether
-+ * a pte_t object has undergone p2m translation), we must use
-+ * pte_val() on the input pte and __pte() for the return value.
-+ */
-+ paddr_t pteval = pte_val(pte);
-+
-+ pteval &= _PAGE_CHG_MASK;
-+ pteval |= pgprot_val(newprot);
-+#ifdef CONFIG_X86_PAE
-+ pteval &= __supported_pte_mask;
-+#endif
-+ return __pte(pteval);
-+}
-+
-+#define pmd_large(pmd) \
-+((__pmd_val(pmd) & (_PAGE_PSE|_PAGE_PRESENT)) == (_PAGE_PSE|_PAGE_PRESENT))
-+
-+/*
-+ * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
-+ *
-+ * this macro returns the index of the entry in the pgd page which would
-+ * control the given virtual address
-+ */
-+#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
-+#define pgd_index_k(addr) pgd_index(addr)
-+
-+/*
-+ * pgd_offset() returns a (pgd_t *)
-+ * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
-+ */
-+#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
-+
-+/*
-+ * a shortcut which implies the use of the kernel's pgd, instead
-+ * of a process's
-+ */
-+#define pgd_offset_k(address) pgd_offset(&init_mm, address)
-+
-+/*
-+ * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
-+ *
-+ * this macro returns the index of the entry in the pmd page which would
-+ * control the given virtual address
-+ */
-+#define pmd_index(address) \
-+ (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
-+
-+/*
-+ * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
-+ *
-+ * this macro returns the index of the entry in the pte page which would
-+ * control the given virtual address
-+ */
-+#define pte_index(address) \
-+ (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-+#define pte_offset_kernel(dir, address) \
-+ ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address))
-+
-+#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
-+
-+#define pmd_page_vaddr(pmd) \
-+ ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
-+
-+/*
-+ * Helper function that returns the kernel pagetable entry controlling
-+ * the virtual address 'address'. NULL means no pagetable entry present.
-+ * NOTE: the return type is pte_t but if the pmd is PSE then we return it
-+ * as a pte too.
-+ */
-+extern pte_t *lookup_address(unsigned long address);
-+
-+/*
-+ * Make a given kernel text page executable/non-executable.
-+ * Returns the previous executability setting of that page (which
-+ * is used to restore the previous state). Used by the SMP bootup code.
-+ * NOTE: this is an __init function for security reasons.
-+ */
-+#ifdef CONFIG_X86_PAE
-+ extern int set_kernel_exec(unsigned long vaddr, int enable);
-+#else
-+ static inline int set_kernel_exec(unsigned long vaddr, int enable) { return 0;}
-+#endif
-+
-+#if defined(CONFIG_HIGHPTE)
-+#define pte_offset_map(dir, address) \
-+ ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE0) + pte_index(address))
-+#define pte_offset_map_nested(dir, address) \
-+ ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE1) + pte_index(address))
-+#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
-+#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
-+#else
-+#define pte_offset_map(dir, address) \
-+ ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
-+#define pte_offset_map_nested(dir, address) pte_offset_map(dir, address)
-+#define pte_unmap(pte) do { } while (0)
-+#define pte_unmap_nested(pte) do { } while (0)
-+#endif
-+
-+/* Clear a kernel PTE and flush it from the TLB */
-+#define kpte_clear_flush(ptep, vaddr) \
-+ HYPERVISOR_update_va_mapping(vaddr, __pte(0), UVMF_INVLPG)
-+
-+/*
-+ * The i386 doesn't have any external MMU info: the kernel page
-+ * tables contain all the necessary information.
-+ */
-+#define update_mmu_cache(vma,address,pte) do { } while (0)
-+
-+#include <xen/features.h>
-+void make_lowmem_page_readonly(void *va, unsigned int feature);
-+void make_lowmem_page_writable(void *va, unsigned int feature);
-+void make_page_readonly(void *va, unsigned int feature);
-+void make_page_writable(void *va, unsigned int feature);
-+void make_pages_readonly(void *va, unsigned int nr, unsigned int feature);
-+void make_pages_writable(void *va, unsigned int nr, unsigned int feature);
-+
-+#define virt_to_ptep(__va) \
-+({ \
-+ pgd_t *__pgd = pgd_offset_k((unsigned long)(__va)); \
-+ pud_t *__pud = pud_offset(__pgd, (unsigned long)(__va)); \
-+ pmd_t *__pmd = pmd_offset(__pud, (unsigned long)(__va)); \
-+ pte_offset_kernel(__pmd, (unsigned long)(__va)); \
-+})
-+
-+#define arbitrary_virt_to_machine(__va) \
-+({ \
-+ maddr_t m = (maddr_t)pte_mfn(*virt_to_ptep(__va)) << PAGE_SHIFT;\
-+ m | ((unsigned long)(__va) & (PAGE_SIZE-1)); \
-+})
-+
-+#endif /* !__ASSEMBLY__ */
-+
-+#ifdef CONFIG_FLATMEM
-+#define kern_addr_valid(addr) (1)
-+#endif /* CONFIG_FLATMEM */
-+
-+int direct_remap_pfn_range(struct vm_area_struct *vma,
-+ unsigned long address,
-+ unsigned long mfn,
-+ unsigned long size,
-+ pgprot_t prot,
-+ domid_t domid);
-+int direct_kernel_remap_pfn_range(unsigned long address,
-+ unsigned long mfn,
-+ unsigned long size,
-+ pgprot_t prot,
-+ domid_t domid);
-+int create_lookup_pte_addr(struct mm_struct *mm,
-+ unsigned long address,
-+ uint64_t *ptep);
-+int touch_pte_range(struct mm_struct *mm,
-+ unsigned long address,
-+ unsigned long size);
-+
-+#define io_remap_pfn_range(vma,from,pfn,size,prot) \
-+direct_remap_pfn_range(vma,from,pfn,size,prot,DOMID_IO)
-+
-+#include <asm-generic/pgtable.h>
-+
-+#endif /* _I386_PGTABLE_H */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-i386/mach-xen/asm/processor.h ubuntu-gutsy-xen/include/asm-i386/mach-xen/asm/processor.h
---- ubuntu-gutsy/include/asm-i386/mach-xen/asm/processor.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/asm-i386/mach-xen/asm/processor.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,725 @@
-+/*
-+ * include/asm-i386/processor.h
-+ *
-+ * Copyright (C) 1994 Linus Torvalds
-+ */
-+
-+#ifndef __ASM_I386_PROCESSOR_H
-+#define __ASM_I386_PROCESSOR_H
-+
-+#include <asm/vm86.h>
-+#include <asm/math_emu.h>
-+#include <asm/segment.h>
-+#include <asm/page.h>
-+#include <asm/types.h>
-+#include <asm/sigcontext.h>
-+#include <asm/cpufeature.h>
-+#include <asm/msr.h>
-+#include <asm/system.h>
-+#include <linux/cache.h>
-+#include <linux/threads.h>
-+#include <asm/percpu.h>
-+#include <linux/cpumask.h>
-+#include <linux/init.h>
-+#include <asm/processor-flags.h>
-+#include <xen/interface/physdev.h>
-+
-+/* flag for disabling the tsc */
-+#define tsc_disable 0
-+
-+struct desc_struct {
-+ unsigned long a,b;
-+};
-+
-+#define desc_empty(desc) \
-+ (!((desc)->a | (desc)->b))
-+
-+#define desc_equal(desc1, desc2) \
-+ (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b))
-+/*
-+ * Default implementation of macro that returns current
-+ * instruction pointer ("program counter").
-+ */
-+#define current_text_addr() ({ void *pc; __asm__("movl $1f,%0\n1:":"=g" (pc)); pc; })
-+
-+/*
-+ * CPU type and hardware bug flags. Kept separately for each CPU.
-+ * Members of this structure are referenced in head.S, so think twice
-+ * before touching them. [mj]
-+ */
-+
-+struct cpuinfo_x86 {
-+ __u8 x86; /* CPU family */
-+ __u8 x86_vendor; /* CPU vendor */
-+ __u8 x86_model;
-+ __u8 x86_mask;
-+ char wp_works_ok; /* It doesn't on 386's */
-+ char hlt_works_ok; /* Problems on some 486Dx4's and old 386's */
-+ char hard_math;
-+ char rfu;
-+ int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */
-+ unsigned long x86_capability[NCAPINTS];
-+ char x86_vendor_id[16];
-+ char x86_model_id[64];
-+ int x86_cache_size; /* in KB - valid for CPUS which support this
-+ call */
-+ int x86_cache_alignment; /* In bytes */
-+ char fdiv_bug;
-+ char f00f_bug;
-+ char coma_bug;
-+ char pad0;
-+ int x86_power;
-+ unsigned long loops_per_jiffy;
-+#ifdef CONFIG_SMP
-+ cpumask_t llc_shared_map; /* cpus sharing the last level cache */
-+#endif
-+ unsigned char x86_max_cores; /* cpuid returned max cores value */
-+ unsigned char apicid;
-+ unsigned short x86_clflush_size;
-+#ifdef CONFIG_SMP
-+ unsigned char booted_cores; /* number of cores as seen by OS */
-+ __u8 phys_proc_id; /* Physical processor id. */
-+ __u8 cpu_core_id; /* Core id */
-+#endif
-+} __attribute__((__aligned__(SMP_CACHE_BYTES)));
-+
-+#define X86_VENDOR_INTEL 0
-+#define X86_VENDOR_CYRIX 1
-+#define X86_VENDOR_AMD 2
-+#define X86_VENDOR_UMC 3
-+#define X86_VENDOR_NEXGEN 4
-+#define X86_VENDOR_CENTAUR 5
-+#define X86_VENDOR_RISE 6
-+#define X86_VENDOR_TRANSMETA 7
-+#define X86_VENDOR_NSC 8
-+#define X86_VENDOR_NUM 9
-+#define X86_VENDOR_UNKNOWN 0xff
-+
-+/*
-+ * capabilities of CPUs
-+ */
-+
-+extern struct cpuinfo_x86 boot_cpu_data;
-+extern struct cpuinfo_x86 new_cpu_data;
-+#ifndef CONFIG_X86_NO_TSS
-+extern struct tss_struct doublefault_tss;
-+DECLARE_PER_CPU(struct tss_struct, init_tss);
-+#endif
-+
-+#ifdef CONFIG_SMP
-+extern struct cpuinfo_x86 cpu_data[];
-+#define current_cpu_data cpu_data[smp_processor_id()]
-+#else
-+#define cpu_data (&boot_cpu_data)
-+#define current_cpu_data boot_cpu_data
-+#endif
-+
-+extern int cpu_llc_id[NR_CPUS];
-+extern char ignore_fpu_irq;
-+
-+void __init cpu_detect(struct cpuinfo_x86 *c);
-+
-+extern void identify_boot_cpu(void);
-+extern void identify_secondary_cpu(struct cpuinfo_x86 *);
-+extern void print_cpu_info(struct cpuinfo_x86 *);
-+extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
-+extern unsigned short num_cache_leaves;
-+
-+#ifdef CONFIG_X86_HT
-+extern void detect_ht(struct cpuinfo_x86 *c);
-+#else
-+static inline void detect_ht(struct cpuinfo_x86 *c) {}
-+#endif
-+
-+static inline void xen_cpuid(unsigned int *eax, unsigned int *ebx,
-+ unsigned int *ecx, unsigned int *edx)
-+{
-+ /* ecx is often an input as well as an output. */
-+ __asm__(XEN_CPUID
-+ : "=a" (*eax),
-+ "=b" (*ebx),
-+ "=c" (*ecx),
-+ "=d" (*edx)
-+ : "0" (*eax), "2" (*ecx));
-+}
-+
-+#define load_cr3(pgdir) write_cr3(__pa(pgdir))
-+
-+/*
-+ * Save the cr4 feature set we're using (ie
-+ * Pentium 4MB enable and PPro Global page
-+ * enable), so that any CPU's that boot up
-+ * after us can get the correct flags.
-+ */
-+extern unsigned long mmu_cr4_features;
-+
-+static inline void set_in_cr4 (unsigned long mask)
-+{
-+ unsigned cr4;
-+ mmu_cr4_features |= mask;
-+ cr4 = read_cr4();
-+ cr4 |= mask;
-+ write_cr4(cr4);
-+}
-+
-+static inline void clear_in_cr4 (unsigned long mask)
-+{
-+ unsigned cr4;
-+ mmu_cr4_features &= ~mask;
-+ cr4 = read_cr4();
-+ cr4 &= ~mask;
-+ write_cr4(cr4);
-+}
-+
-+/*
-+ * NSC/Cyrix CPU indexed register access macros
-+ */
-+
-+#define getCx86(reg) ({ outb((reg), 0x22); inb(0x23); })
-+
-+#define setCx86(reg, data) do { \
-+ outb((reg), 0x22); \
-+ outb((data), 0x23); \
-+} while (0)
-+
-+/* Stop speculative execution */
-+static inline void sync_core(void)
-+{
-+ int tmp;
-+ asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory");
-+}
-+
-+static inline void __monitor(const void *eax, unsigned long ecx,
-+ unsigned long edx)
-+{
-+ /* "monitor %eax,%ecx,%edx;" */
-+ asm volatile(
-+ ".byte 0x0f,0x01,0xc8;"
-+ : :"a" (eax), "c" (ecx), "d"(edx));
-+}
-+
-+static inline void __mwait(unsigned long eax, unsigned long ecx)
-+{
-+ /* "mwait %eax,%ecx;" */
-+ asm volatile(
-+ ".byte 0x0f,0x01,0xc9;"
-+ : :"a" (eax), "c" (ecx));
-+}
-+
-+extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx);
-+
-+/* from system description table in BIOS. Mostly for MCA use, but
-+others may find it useful. */
-+extern unsigned int machine_id;
-+extern unsigned int machine_submodel_id;
-+extern unsigned int BIOS_revision;
-+extern unsigned int mca_pentium_flag;
-+
-+/* Boot loader type from the setup header */
-+extern int bootloader_type;
-+
-+/*
-+ * User space process size: 3GB (default).
-+ */
-+#define TASK_SIZE (PAGE_OFFSET)
-+
-+/* This decides where the kernel will search for a free chunk of vm
-+ * space during mmap's.
-+ */
-+#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
-+
-+#define HAVE_ARCH_PICK_MMAP_LAYOUT
-+
-+/*
-+ * Size of io_bitmap.
-+ */
-+#define IO_BITMAP_BITS 65536
-+#define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
-+#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
-+#ifndef CONFIG_X86_NO_TSS
-+#define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap)
-+#endif
-+#define INVALID_IO_BITMAP_OFFSET 0x8000
-+#define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000
-+
-+struct i387_fsave_struct {
-+ long cwd;
-+ long swd;
-+ long twd;
-+ long fip;
-+ long fcs;
-+ long foo;
-+ long fos;
-+ long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */
-+ long status; /* software status information */
-+};
-+
-+struct i387_fxsave_struct {
-+ unsigned short cwd;
-+ unsigned short swd;
-+ unsigned short twd;
-+ unsigned short fop;
-+ long fip;
-+ long fcs;
-+ long foo;
-+ long fos;
-+ long mxcsr;
-+ long mxcsr_mask;
-+ long st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
-+ long xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
-+ long padding[56];
-+} __attribute__ ((aligned (16)));
-+
-+struct i387_soft_struct {
-+ long cwd;
-+ long swd;
-+ long twd;
-+ long fip;
-+ long fcs;
-+ long foo;
-+ long fos;
-+ long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */
-+ unsigned char ftop, changed, lookahead, no_update, rm, alimit;
-+ struct info *info;
-+ unsigned long entry_eip;
-+};
-+
-+union i387_union {
-+ struct i387_fsave_struct fsave;
-+ struct i387_fxsave_struct fxsave;
-+ struct i387_soft_struct soft;
-+};
-+
-+typedef struct {
-+ unsigned long seg;
-+} mm_segment_t;
-+
-+struct thread_struct;
-+
-+#ifndef CONFIG_X86_NO_TSS
-+/* This is the TSS defined by the hardware. */
-+struct i386_hw_tss {
-+ unsigned short back_link,__blh;
-+ unsigned long esp0;
-+ unsigned short ss0,__ss0h;
-+ unsigned long esp1;
-+ unsigned short ss1,__ss1h; /* ss1 is used to cache MSR_IA32_SYSENTER_CS */
-+ unsigned long esp2;
-+ unsigned short ss2,__ss2h;
-+ unsigned long __cr3;
-+ unsigned long eip;
-+ unsigned long eflags;
-+ unsigned long eax,ecx,edx,ebx;
-+ unsigned long esp;
-+ unsigned long ebp;
-+ unsigned long esi;
-+ unsigned long edi;
-+ unsigned short es, __esh;
-+ unsigned short cs, __csh;
-+ unsigned short ss, __ssh;
-+ unsigned short ds, __dsh;
-+ unsigned short fs, __fsh;
-+ unsigned short gs, __gsh;
-+ unsigned short ldt, __ldth;
-+ unsigned short trace, io_bitmap_base;
-+} __attribute__((packed));
-+
-+struct tss_struct {
-+ struct i386_hw_tss x86_tss;
-+
-+ /*
-+ * The extra 1 is there because the CPU will access an
-+ * additional byte beyond the end of the IO permission
-+ * bitmap. The extra byte must be all 1 bits, and must
-+ * be within the limit.
-+ */
-+ unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
-+ /*
-+ * Cache the current maximum and the last task that used the bitmap:
-+ */
-+ unsigned long io_bitmap_max;
-+ struct thread_struct *io_bitmap_owner;
-+ /*
-+ * pads the TSS to be cacheline-aligned (size is 0x100)
-+ */
-+ unsigned long __cacheline_filler[35];
-+ /*
-+ * .. and then another 0x100 bytes for emergency kernel stack
-+ */
-+ unsigned long stack[64];
-+} __attribute__((packed));
-+#endif
-+
-+#define ARCH_MIN_TASKALIGN 16
-+
-+struct thread_struct {
-+/* cached TLS descriptors. */
-+ struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
-+ unsigned long esp0;
-+ unsigned long sysenter_cs;
-+ unsigned long eip;
-+ unsigned long esp;
-+ unsigned long fs;
-+ unsigned long gs;
-+/* Hardware debugging registers */
-+ unsigned long debugreg[8]; /* %%db0-7 debug registers */
-+/* fault info */
-+ unsigned long cr2, trap_no, error_code;
-+/* floating point info */
-+ union i387_union i387;
-+/* virtual 86 mode info */
-+ struct vm86_struct __user * vm86_info;
-+ unsigned long screen_bitmap;
-+ unsigned long v86flags, v86mask, saved_esp0;
-+ unsigned int saved_fs, saved_gs;
-+/* IO permissions */
-+ unsigned long *io_bitmap_ptr;
-+ unsigned long iopl;
-+/* max allowed port in the bitmap, in bytes: */
-+ unsigned long io_bitmap_max;
-+};
-+
-+#define INIT_THREAD { \
-+ .esp0 = sizeof(init_stack) + (long)&init_stack, \
-+ .vm86_info = NULL, \
-+ .sysenter_cs = __KERNEL_CS, \
-+ .io_bitmap_ptr = NULL, \
-+ .fs = __KERNEL_PERCPU, \
-+}
-+
-+/*
-+ * Note that the .io_bitmap member must be extra-big. This is because
-+ * the CPU will access an additional byte beyond the end of the IO
-+ * permission bitmap. The extra byte must be all 1 bits, and must
-+ * be within the limit.
-+ */
-+#define INIT_TSS { \
-+ .x86_tss = { \
-+ .esp0 = sizeof(init_stack) + (long)&init_stack, \
-+ .ss0 = __KERNEL_DS, \
-+ .ss1 = __KERNEL_CS, \
-+ .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
-+ }, \
-+ .io_bitmap = { [ 0 ... IO_BITMAP_LONGS] = ~0 }, \
-+}
-+
-+#define start_thread(regs, new_eip, new_esp) do { \
-+ __asm__("movl %0,%%gs": :"r" (0)); \
-+ regs->xfs = 0; \
-+ set_fs(USER_DS); \
-+ regs->xds = __USER_DS; \
-+ regs->xes = __USER_DS; \
-+ regs->xss = __USER_DS; \
-+ regs->xcs = __USER_CS; \
-+ regs->eip = new_eip; \
-+ regs->esp = new_esp; \
-+} while (0)
-+
-+/* Forward declaration, a strange C thing */
-+struct task_struct;
-+struct mm_struct;
-+
-+/* Free all resources held by a thread. */
-+extern void release_thread(struct task_struct *);
-+
-+/* Prepare to copy thread state - unlazy all lazy status */
-+extern void prepare_to_copy(struct task_struct *tsk);
-+
-+/*
-+ * create a kernel thread without removing it from tasklists
-+ */
-+extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
-+
-+extern unsigned long thread_saved_pc(struct task_struct *tsk);
-+void show_trace(struct task_struct *task, struct pt_regs *regs, unsigned long *stack);
-+
-+unsigned long get_wchan(struct task_struct *p);
-+
-+#define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
-+#define KSTK_TOP(info) \
-+({ \
-+ unsigned long *__ptr = (unsigned long *)(info); \
-+ (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
-+})
-+
-+/*
-+ * The below -8 is to reserve 8 bytes on top of the ring0 stack.
-+ * This is necessary to guarantee that the entire "struct pt_regs"
-+ * is accessable even if the CPU haven't stored the SS/ESP registers
-+ * on the stack (interrupt gate does not save these registers
-+ * when switching to the same priv ring).
-+ * Therefore beware: accessing the xss/esp fields of the
-+ * "struct pt_regs" is possible, but they may contain the
-+ * completely wrong values.
-+ */
-+#define task_pt_regs(task) \
-+({ \
-+ struct pt_regs *__regs__; \
-+ __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
-+ __regs__ - 1; \
-+})
-+
-+#define KSTK_EIP(task) (task_pt_regs(task)->eip)
-+#define KSTK_ESP(task) (task_pt_regs(task)->esp)
-+
-+
-+struct microcode_header {
-+ unsigned int hdrver;
-+ unsigned int rev;
-+ unsigned int date;
-+ unsigned int sig;
-+ unsigned int cksum;
-+ unsigned int ldrver;
-+ unsigned int pf;
-+ unsigned int datasize;
-+ unsigned int totalsize;
-+ unsigned int reserved[3];
-+};
-+
-+struct microcode {
-+ struct microcode_header hdr;
-+ unsigned int bits[0];
-+};
-+
-+typedef struct microcode microcode_t;
-+typedef struct microcode_header microcode_header_t;
-+
-+/* microcode format is extended from prescott processors */
-+struct extended_signature {
-+ unsigned int sig;
-+ unsigned int pf;
-+ unsigned int cksum;
-+};
-+
-+struct extended_sigtable {
-+ unsigned int count;
-+ unsigned int cksum;
-+ unsigned int reserved[3];
-+ struct extended_signature sigs[0];
-+};
-+
-+/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
-+static inline void rep_nop(void)
-+{
-+ __asm__ __volatile__("rep;nop": : :"memory");
-+}
-+
-+#define cpu_relax() rep_nop()
-+
-+#ifndef CONFIG_X86_NO_TSS
-+static inline void native_load_esp0(struct tss_struct *tss, struct thread_struct *thread)
-+{
-+ tss->x86_tss.esp0 = thread->esp0;
-+ /* This can only happen when SEP is enabled, no need to test "SEP"arately */
-+ if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) {
-+ tss->x86_tss.ss1 = thread->sysenter_cs;
-+ wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
-+ }
-+}
-+#else
-+#define xen_load_esp0(tss, thread) \
-+ HYPERVISOR_stack_switch(__KERNEL_DS, (thread)->esp0)
-+#endif
-+
-+
-+static inline unsigned long xen_get_debugreg(int regno)
-+{
-+ return HYPERVISOR_get_debugreg(regno);
-+}
-+
-+static inline void xen_set_debugreg(int regno, unsigned long value)
-+{
-+ HYPERVISOR_set_debugreg(regno, value);
-+}
-+
-+/*
-+ * Set IOPL bits in EFLAGS from given mask
-+ */
-+static inline void xen_set_iopl_mask(unsigned mask)
-+{
-+ struct physdev_set_iopl set_iopl;
-+
-+ /* Force the change at ring 0. */
-+ set_iopl.iopl = (mask == 0) ? 1 : (mask >> 12) & 3;
-+ HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
-+}
-+
-+
-+#define paravirt_enabled() 0
-+#define __cpuid xen_cpuid
-+
-+#define load_esp0 xen_load_esp0
-+
-+/*
-+ * These special macros can be used to get or set a debugging register
-+ */
-+#define get_debugreg(var, register) \
-+ (var) = xen_get_debugreg(register)
-+#define set_debugreg(value, register) \
-+ xen_set_debugreg(register, value)
-+
-+#define set_iopl_mask xen_set_iopl_mask
-+
-+/*
-+ * Generic CPUID function
-+ * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
-+ * resulting in stale register contents being returned.
-+ */
-+static inline void cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx)
-+{
-+ *eax = op;
-+ *ecx = 0;
-+ __cpuid(eax, ebx, ecx, edx);
-+}
-+
-+/* Some CPUID calls want 'count' to be placed in ecx */
-+static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx,
-+ int *edx)
-+{
-+ *eax = op;
-+ *ecx = count;
-+ __cpuid(eax, ebx, ecx, edx);
-+}
-+
-+/*
-+ * CPUID functions returning a single datum
-+ */
-+static inline unsigned int cpuid_eax(unsigned int op)
-+{
-+ unsigned int eax, ebx, ecx, edx;
-+
-+ cpuid(op, &eax, &ebx, &ecx, &edx);
-+ return eax;
-+}
-+static inline unsigned int cpuid_ebx(unsigned int op)
-+{
-+ unsigned int eax, ebx, ecx, edx;
-+
-+ cpuid(op, &eax, &ebx, &ecx, &edx);
-+ return ebx;
-+}
-+static inline unsigned int cpuid_ecx(unsigned int op)
-+{
-+ unsigned int eax, ebx, ecx, edx;
-+
-+ cpuid(op, &eax, &ebx, &ecx, &edx);
-+ return ecx;
-+}
-+static inline unsigned int cpuid_edx(unsigned int op)
-+{
-+ unsigned int eax, ebx, ecx, edx;
-+
-+ cpuid(op, &eax, &ebx, &ecx, &edx);
-+ return edx;
-+}
-+
-+/* generic versions from gas */
-+#define GENERIC_NOP1 ".byte 0x90\n"
-+#define GENERIC_NOP2 ".byte 0x89,0xf6\n"
-+#define GENERIC_NOP3 ".byte 0x8d,0x76,0x00\n"
-+#define GENERIC_NOP4 ".byte 0x8d,0x74,0x26,0x00\n"
-+#define GENERIC_NOP5 GENERIC_NOP1 GENERIC_NOP4
-+#define GENERIC_NOP6 ".byte 0x8d,0xb6,0x00,0x00,0x00,0x00\n"
-+#define GENERIC_NOP7 ".byte 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00\n"
-+#define GENERIC_NOP8 GENERIC_NOP1 GENERIC_NOP7
-+
-+/* Opteron nops */
-+#define K8_NOP1 GENERIC_NOP1
-+#define K8_NOP2 ".byte 0x66,0x90\n"
-+#define K8_NOP3 ".byte 0x66,0x66,0x90\n"
-+#define K8_NOP4 ".byte 0x66,0x66,0x66,0x90\n"
-+#define K8_NOP5 K8_NOP3 K8_NOP2
-+#define K8_NOP6 K8_NOP3 K8_NOP3
-+#define K8_NOP7 K8_NOP4 K8_NOP3
-+#define K8_NOP8 K8_NOP4 K8_NOP4
-+
-+/* K7 nops */
-+/* uses eax dependencies (arbitary choice) */
-+#define K7_NOP1 GENERIC_NOP1
-+#define K7_NOP2 ".byte 0x8b,0xc0\n"
-+#define K7_NOP3 ".byte 0x8d,0x04,0x20\n"
-+#define K7_NOP4 ".byte 0x8d,0x44,0x20,0x00\n"
-+#define K7_NOP5 K7_NOP4 ASM_NOP1
-+#define K7_NOP6 ".byte 0x8d,0x80,0,0,0,0\n"
-+#define K7_NOP7 ".byte 0x8D,0x04,0x05,0,0,0,0\n"
-+#define K7_NOP8 K7_NOP7 ASM_NOP1
-+
-+#ifdef CONFIG_MK8
-+#define ASM_NOP1 K8_NOP1
-+#define ASM_NOP2 K8_NOP2
-+#define ASM_NOP3 K8_NOP3
-+#define ASM_NOP4 K8_NOP4
-+#define ASM_NOP5 K8_NOP5
-+#define ASM_NOP6 K8_NOP6
-+#define ASM_NOP7 K8_NOP7
-+#define ASM_NOP8 K8_NOP8
-+#elif defined(CONFIG_MK7)
-+#define ASM_NOP1 K7_NOP1
-+#define ASM_NOP2 K7_NOP2
-+#define ASM_NOP3 K7_NOP3
-+#define ASM_NOP4 K7_NOP4
-+#define ASM_NOP5 K7_NOP5
-+#define ASM_NOP6 K7_NOP6
-+#define ASM_NOP7 K7_NOP7
-+#define ASM_NOP8 K7_NOP8
-+#else
-+#define ASM_NOP1 GENERIC_NOP1
-+#define ASM_NOP2 GENERIC_NOP2
-+#define ASM_NOP3 GENERIC_NOP3
-+#define ASM_NOP4 GENERIC_NOP4
-+#define ASM_NOP5 GENERIC_NOP5
-+#define ASM_NOP6 GENERIC_NOP6
-+#define ASM_NOP7 GENERIC_NOP7
-+#define ASM_NOP8 GENERIC_NOP8
-+#endif
-+
-+#define ASM_NOP_MAX 8
-+
-+/* Prefetch instructions for Pentium III and AMD Athlon */
-+/* It's not worth to care about 3dnow! prefetches for the K6
-+ because they are microcoded there and very slow.
-+ However we don't do prefetches for pre XP Athlons currently
-+ That should be fixed. */
-+#define ARCH_HAS_PREFETCH
-+static inline void prefetch(const void *x)
-+{
-+ alternative_input(ASM_NOP4,
-+ "prefetchnta (%1)",
-+ X86_FEATURE_XMM,
-+ "r" (x));
-+}
-+
-+#define ARCH_HAS_PREFETCH
-+#define ARCH_HAS_PREFETCHW
-+#define ARCH_HAS_SPINLOCK_PREFETCH
-+
-+/* 3dnow! prefetch to get an exclusive cache line. Useful for
-+ spinlocks to avoid one state transition in the cache coherency protocol. */
-+static inline void prefetchw(const void *x)
-+{
-+ alternative_input(ASM_NOP4,
-+ "prefetchw (%1)",
-+ X86_FEATURE_3DNOW,
-+ "r" (x));
-+}
-+#define spin_lock_prefetch(x) prefetchw(x)
-+
-+extern void select_idle_routine(const struct cpuinfo_x86 *c);
-+
-+#define cache_line_size() (boot_cpu_data.x86_cache_alignment)
-+
-+extern unsigned long boot_option_idle_override;
-+extern void enable_sep_cpu(void);
-+extern int sysenter_setup(void);
-+
-+/* Defined in head.S */
-+extern struct Xgt_desc_struct early_gdt_descr;
-+
-+extern void cpu_set_gdt(int);
-+extern void switch_to_new_gdt(void);
-+extern void cpu_init(void);
-+extern void init_gdt(int cpu);
-+
-+extern int force_mwait;
-+
-+#endif /* __ASM_I386_PROCESSOR_H */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-i386/mach-xen/asm/ptrace.h ubuntu-gutsy-xen/include/asm-i386/mach-xen/asm/ptrace.h
---- ubuntu-gutsy/include/asm-i386/mach-xen/asm/ptrace.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/asm-i386/mach-xen/asm/ptrace.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,63 @@
-+#ifndef _I386_PTRACE_H
-+#define _I386_PTRACE_H
-+
-+#include <asm/ptrace-abi.h>
-+
-+/* this struct defines the way the registers are stored on the
-+ stack during a system call. */
-+
-+struct pt_regs {
-+ long ebx;
-+ long ecx;
-+ long edx;
-+ long esi;
-+ long edi;
-+ long ebp;
-+ long eax;
-+ int xds;
-+ int xes;
-+ int xfs;
-+ /* int xgs; */
-+ long orig_eax;
-+ long eip;
-+ int xcs;
-+ long eflags;
-+ long esp;
-+ int xss;
-+};
-+
-+#ifdef __KERNEL__
-+
-+#include <asm/vm86.h>
-+#include <asm/segment.h>
-+
-+struct task_struct;
-+extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code);
-+
-+/*
-+ * user_mode_vm(regs) determines whether a register set came from user mode.
-+ * This is true if V8086 mode was enabled OR if the register set was from
-+ * protected mode with RPL-3 CS value. This tricky test checks that with
-+ * one comparison. Many places in the kernel can bypass this full check
-+ * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
-+ */
-+static inline int user_mode(struct pt_regs *regs)
-+{
-+ return (regs->xcs & SEGMENT_RPL_MASK) == USER_RPL;
-+}
-+static inline int user_mode_vm(struct pt_regs *regs)
-+{
-+ return ((regs->xcs & SEGMENT_RPL_MASK) | (regs->eflags & VM_MASK)) >= USER_RPL;
-+}
-+static inline int v8086_mode(struct pt_regs *regs)
-+{
-+ return (regs->eflags & VM_MASK);
-+}
-+
-+#define instruction_pointer(regs) ((regs)->eip)
-+#define regs_return_value(regs) ((regs)->eax)
-+
-+extern unsigned long profile_pc(struct pt_regs *regs);
-+#endif /* __KERNEL__ */
-+
-+#endif
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-i386/mach-xen/asm/scatterlist.h ubuntu-gutsy-xen/include/asm-i386/mach-xen/asm/scatterlist.h
---- ubuntu-gutsy/include/asm-i386/mach-xen/asm/scatterlist.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/asm-i386/mach-xen/asm/scatterlist.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,24 @@
-+#ifndef _I386_SCATTERLIST_H
-+#define _I386_SCATTERLIST_H
-+
-+#include <asm/types.h>
-+
-+struct scatterlist {
-+ struct page *page;
-+ unsigned int offset;
-+ unsigned int length;
-+ dma_addr_t dma_address;
-+ unsigned int dma_length;
-+};
-+
-+/* These macros should be used after a pci_map_sg call has been done
-+ * to get bus addresses of each of the SG entries and their lengths.
-+ * You should only work with the number of sg entries pci_map_sg
-+ * returns.
-+ */
-+#define sg_dma_address(sg) ((sg)->dma_address)
-+#define sg_dma_len(sg) ((sg)->dma_length)
-+
-+#define ISA_DMA_THRESHOLD (0x00ffffff)
-+
-+#endif /* !(_I386_SCATTERLIST_H) */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-i386/mach-xen/asm/segment.h ubuntu-gutsy-xen/include/asm-i386/mach-xen/asm/segment.h
---- ubuntu-gutsy/include/asm-i386/mach-xen/asm/segment.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/asm-i386/mach-xen/asm/segment.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,150 @@
-+#ifndef _ASM_SEGMENT_H
-+#define _ASM_SEGMENT_H
-+
-+/*
-+ * The layout of the per-CPU GDT under Linux:
-+ *
-+ * 0 - null
-+ * 1 - reserved
-+ * 2 - reserved
-+ * 3 - reserved
-+ *
-+ * 4 - unused <==== new cacheline
-+ * 5 - unused
-+ *
-+ * ------- start of TLS (Thread-Local Storage) segments:
-+ *
-+ * 6 - TLS segment #1 [ glibc's TLS segment ]
-+ * 7 - TLS segment #2 [ Wine's %fs Win32 segment ]
-+ * 8 - TLS segment #3
-+ * 9 - reserved
-+ * 10 - reserved
-+ * 11 - reserved
-+ *
-+ * ------- start of kernel segments:
-+ *
-+ * 12 - kernel code segment <==== new cacheline
-+ * 13 - kernel data segment
-+ * 14 - default user CS
-+ * 15 - default user DS
-+ * 16 - TSS
-+ * 17 - LDT
-+ * 18 - PNPBIOS support (16->32 gate)
-+ * 19 - PNPBIOS support
-+ * 20 - PNPBIOS support
-+ * 21 - PNPBIOS support
-+ * 22 - PNPBIOS support
-+ * 23 - APM BIOS support
-+ * 24 - APM BIOS support
-+ * 25 - APM BIOS support
-+ *
-+ * 26 - ESPFIX small SS
-+ * 27 - per-cpu [ offset to per-cpu data area ]
-+ * 28 - unused
-+ * 29 - unused
-+ * 30 - unused
-+ * 31 - TSS for double fault handler
-+ */
-+#define GDT_ENTRY_TLS_ENTRIES 3
-+#define GDT_ENTRY_TLS_MIN 6
-+#define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
-+
-+#define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8)
-+
-+#define GDT_ENTRY_DEFAULT_USER_CS 14
-+#define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS * 8 + 3)
-+
-+#define GDT_ENTRY_DEFAULT_USER_DS 15
-+#define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS * 8 + 3)
-+
-+#define GDT_ENTRY_KERNEL_BASE 12
-+
-+#define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE + 0)
-+#define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8)
-+
-+#define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1)
-+#define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8)
-+
-+#define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4)
-+#define GDT_ENTRY_LDT (GDT_ENTRY_KERNEL_BASE + 5)
-+
-+#define GDT_ENTRY_PNPBIOS_BASE (GDT_ENTRY_KERNEL_BASE + 6)
-+#define GDT_ENTRY_APMBIOS_BASE (GDT_ENTRY_KERNEL_BASE + 11)
-+
-+#define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14)
-+#define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)
-+
-+#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
-+#ifdef CONFIG_SMP
-+#define __KERNEL_PERCPU (GDT_ENTRY_PERCPU * 8)
-+#else
-+#define __KERNEL_PERCPU 0
-+#endif
-+
-+#define GDT_ENTRY_DOUBLEFAULT_TSS 31
-+
-+/*
-+ * The GDT has 32 entries
-+ */
-+#define GDT_ENTRIES 32
-+#define GDT_SIZE (GDT_ENTRIES * 8)
-+
-+/* Simple and small GDT entries for booting only */
-+
-+#define GDT_ENTRY_BOOT_CS 2
-+#define __BOOT_CS (GDT_ENTRY_BOOT_CS * 8)
-+
-+#define GDT_ENTRY_BOOT_DS (GDT_ENTRY_BOOT_CS + 1)
-+#define __BOOT_DS (GDT_ENTRY_BOOT_DS * 8)
-+
-+/* The PnP BIOS entries in the GDT */
-+#define GDT_ENTRY_PNPBIOS_CS32 (GDT_ENTRY_PNPBIOS_BASE + 0)
-+#define GDT_ENTRY_PNPBIOS_CS16 (GDT_ENTRY_PNPBIOS_BASE + 1)
-+#define GDT_ENTRY_PNPBIOS_DS (GDT_ENTRY_PNPBIOS_BASE + 2)
-+#define GDT_ENTRY_PNPBIOS_TS1 (GDT_ENTRY_PNPBIOS_BASE + 3)
-+#define GDT_ENTRY_PNPBIOS_TS2 (GDT_ENTRY_PNPBIOS_BASE + 4)
-+
-+/* The PnP BIOS selectors */
-+#define PNP_CS32 (GDT_ENTRY_PNPBIOS_CS32 * 8) /* segment for calling fn */
-+#define PNP_CS16 (GDT_ENTRY_PNPBIOS_CS16 * 8) /* code segment for BIOS */
-+#define PNP_DS (GDT_ENTRY_PNPBIOS_DS * 8) /* data segment for BIOS */
-+#define PNP_TS1 (GDT_ENTRY_PNPBIOS_TS1 * 8) /* transfer data segment */
-+#define PNP_TS2 (GDT_ENTRY_PNPBIOS_TS2 * 8) /* another data segment */
-+
-+/*
-+ * The interrupt descriptor table has room for 256 idt's,
-+ * the global descriptor table is dependent on the number
-+ * of tasks we can have..
-+ */
-+#define IDT_ENTRIES 256
-+
-+/* Bottom two bits of selector give the ring privilege level */
-+#define SEGMENT_RPL_MASK 0x3
-+/* Bit 2 is table indicator (LDT/GDT) */
-+#define SEGMENT_TI_MASK 0x4
-+
-+/* User mode is privilege level 3 */
-+#define USER_RPL 0x3
-+/* LDT segment has TI set, GDT has it cleared */
-+#define SEGMENT_LDT 0x4
-+#define SEGMENT_GDT 0x0
-+
-+#define get_kernel_rpl() (xen_feature(XENFEAT_supervisor_mode_kernel)?0:1)
-+
-+/*
-+ * Matching rules for certain types of segments.
-+ */
-+
-+/* Matches only __KERNEL_CS, ignoring PnP / USER / APM segments */
-+#define SEGMENT_IS_KERNEL_CODE(x) (((x) & ~3) == GDT_ENTRY_KERNEL_CS * 8 \
-+ || ((x) & ~3) == (FLAT_KERNEL_CS & ~3))
-+
-+/* Matches __KERNEL_CS and __USER_CS (they must be 2 entries apart) */
-+#define SEGMENT_IS_FLAT_CODE(x) (((x) & ~0x13) == GDT_ENTRY_KERNEL_CS * 8 \
-+ || ((x) & ~3) == (FLAT_KERNEL_CS & ~3) \
-+ || ((x) & ~3) == (FLAT_USER_CS & ~3))
-+
-+/* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
-+#define SEGMENT_IS_PNP_CODE(x) (((x) & ~0x0b) == GDT_ENTRY_PNPBIOS_BASE * 8)
-+
-+#endif
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-i386/mach-xen/asm/setup.h ubuntu-gutsy-xen/include/asm-i386/mach-xen/asm/setup.h
---- ubuntu-gutsy/include/asm-i386/mach-xen/asm/setup.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/asm-i386/mach-xen/asm/setup.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,86 @@
-+/*
-+ * Just a place holder. We don't want to have to test x86 before
-+ * we include stuff
-+ */
-+
-+#ifndef _i386_SETUP_H
-+#define _i386_SETUP_H
-+
-+#define COMMAND_LINE_SIZE 2048
-+
-+#ifdef __KERNEL__
-+#include <linux/pfn.h>
-+
-+/*
-+ * Reserved space for vmalloc and iomap - defined in asm/page.h
-+ */
-+#define MAXMEM_PFN PFN_DOWN(MAXMEM)
-+#define MAX_NONPAE_PFN (1 << 20)
-+
-+#define PARAM_SIZE 4096
-+
-+#define OLD_CL_MAGIC_ADDR 0x90020
-+#define OLD_CL_MAGIC 0xA33F
-+#define OLD_CL_BASE_ADDR 0x90000
-+#define OLD_CL_OFFSET 0x90022
-+#define NEW_CL_POINTER 0x228 /* Relative to real mode data */
-+
-+#ifndef __ASSEMBLY__
-+/*
-+ * This is set up by the setup-routine at boot-time
-+ */
-+extern unsigned char boot_params[PARAM_SIZE];
-+
-+#define PARAM (boot_params)
-+#define SCREEN_INFO (*(struct screen_info *) (PARAM+0))
-+#define EXT_MEM_K (*(unsigned short *) (PARAM+2))
-+#define ALT_MEM_K (*(unsigned long *) (PARAM+0x1e0))
-+#define E820_MAP_NR (*(char*) (PARAM+E820NR))
-+#define E820_MAP ((struct e820entry *) (PARAM+E820MAP))
-+#define APM_BIOS_INFO (*(struct apm_bios_info *) (PARAM+0x40))
-+#define IST_INFO (*(struct ist_info *) (PARAM+0x60))
-+#define DRIVE_INFO (*(struct drive_info_struct *) (PARAM+0x80))
-+#define SYS_DESC_TABLE (*(struct sys_desc_table_struct*)(PARAM+0xa0))
-+#define EFI_SYSTAB ((efi_system_table_t *) *((unsigned long *)(PARAM+0x1c4)))
-+#define EFI_MEMDESC_SIZE (*((unsigned long *) (PARAM+0x1c8)))
-+#define EFI_MEMDESC_VERSION (*((unsigned long *) (PARAM+0x1cc)))
-+#define EFI_MEMMAP ((void *) *((unsigned long *)(PARAM+0x1d0)))
-+#define EFI_MEMMAP_SIZE (*((unsigned long *) (PARAM+0x1d4)))
-+#define MOUNT_ROOT_RDONLY (*(unsigned short *) (PARAM+0x1F2))
-+#define RAMDISK_FLAGS (*(unsigned short *) (PARAM+0x1F8))
-+#define VIDEO_MODE (*(unsigned short *) (PARAM+0x1FA))
-+#define ORIG_ROOT_DEV (*(unsigned short *) (PARAM+0x1FC))
-+#define AUX_DEVICE_INFO (*(unsigned char *) (PARAM+0x1FF))
-+#define LOADER_TYPE (*(unsigned char *) (PARAM+0x210))
-+#define KERNEL_START (*(unsigned long *) (PARAM+0x214))
-+#define INITRD_START (__pa(xen_start_info->mod_start))
-+#define INITRD_SIZE (xen_start_info->mod_len)
-+#define EDID_INFO (*(struct edid_info *) (PARAM+0x440))
-+#define EDD_NR (*(unsigned char *) (PARAM+EDDNR))
-+#define EDD_MBR_SIG_NR (*(unsigned char *) (PARAM+EDD_MBR_SIG_NR_BUF))
-+#define EDD_MBR_SIGNATURE ((unsigned int *) (PARAM+EDD_MBR_SIG_BUF))
-+#define EDD_BUF ((struct edd_info *) (PARAM+EDDBUF))
-+
-+/*
-+ * Do NOT EVER look at the BIOS memory size location.
-+ * It does not work on many machines.
-+ */
-+#define LOWMEMSIZE() (0x9f000)
-+
-+struct e820entry;
-+
-+char * __init machine_specific_memory_setup(void);
-+char *memory_setup(void);
-+
-+int __init copy_e820_map(struct e820entry * biosmap, int nr_map);
-+int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map);
-+void __init add_memory_region(unsigned long long start,
-+ unsigned long long size, int type);
-+
-+extern unsigned long init_pg_tables_end;
-+
-+#endif /* __ASSEMBLY__ */
-+
-+#endif /* __KERNEL__ */
-+
-+#endif /* _i386_SETUP_H */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-i386/mach-xen/asm/smp.h ubuntu-gutsy-xen/include/asm-i386/mach-xen/asm/smp.h
---- ubuntu-gutsy/include/asm-i386/mach-xen/asm/smp.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/asm-i386/mach-xen/asm/smp.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,191 @@
-+#ifndef __ASM_SMP_H
-+#define __ASM_SMP_H
-+
-+/*
-+ * We need the APIC definitions automatically as part of 'smp.h'
-+ */
-+#ifndef __ASSEMBLY__
-+#include <linux/kernel.h>
-+#include <linux/threads.h>
-+#include <linux/cpumask.h>
-+#endif
-+
-+#if defined(CONFIG_X86_LOCAL_APIC) && !defined(__ASSEMBLY__)
-+#include <asm/bitops.h>
-+#include <asm/mpspec.h>
-+#include <asm/apic.h>
-+#ifdef CONFIG_X86_IO_APIC
-+#include <asm/io_apic.h>
-+#endif
-+#endif
-+
-+#define BAD_APICID 0xFFu
-+#ifdef CONFIG_SMP
-+#ifndef __ASSEMBLY__
-+
-+/*
-+ * Private routines/data
-+ */
-+
-+extern void smp_alloc_memory(void);
-+extern int pic_mode;
-+extern int smp_num_siblings;
-+extern cpumask_t cpu_sibling_map[];
-+extern cpumask_t cpu_core_map[];
-+
-+extern void (*mtrr_hook) (void);
-+extern void zap_low_mappings (void);
-+extern void lock_ipi_call_lock(void);
-+extern void unlock_ipi_call_lock(void);
-+
-+#define MAX_APICID 256
-+extern u8 x86_cpu_to_apicid[];
-+
-+#define cpu_physical_id(cpu) x86_cpu_to_apicid[cpu]
-+
-+#ifdef CONFIG_HOTPLUG_CPU
-+extern void cpu_exit_clear(void);
-+extern void cpu_uninit(void);
-+#endif
-+
-+#ifndef CONFIG_XEN
-+struct smp_ops
-+{
-+ void (*smp_prepare_boot_cpu)(void);
-+ void (*smp_prepare_cpus)(unsigned max_cpus);
-+ int (*cpu_up)(unsigned cpu);
-+ void (*smp_cpus_done)(unsigned max_cpus);
-+
-+ void (*smp_send_stop)(void);
-+ void (*smp_send_reschedule)(int cpu);
-+ int (*smp_call_function_mask)(cpumask_t mask,
-+ void (*func)(void *info), void *info,
-+ int wait);
-+};
-+
-+extern struct smp_ops smp_ops;
-+
-+static inline void smp_prepare_boot_cpu(void)
-+{
-+ smp_ops.smp_prepare_boot_cpu();
-+}
-+static inline void smp_prepare_cpus(unsigned int max_cpus)
-+{
-+ smp_ops.smp_prepare_cpus(max_cpus);
-+}
-+static inline int __cpu_up(unsigned int cpu)
-+{
-+ return smp_ops.cpu_up(cpu);
-+}
-+static inline void smp_cpus_done(unsigned int max_cpus)
-+{
-+ smp_ops.smp_cpus_done(max_cpus);
-+}
-+
-+static inline void smp_send_stop(void)
-+{
-+ smp_ops.smp_send_stop();
-+}
-+static inline void smp_send_reschedule(int cpu)
-+{
-+ smp_ops.smp_send_reschedule(cpu);
-+}
-+static inline int smp_call_function_mask(cpumask_t mask,
-+ void (*func) (void *info), void *info,
-+ int wait)
-+{
-+ return smp_ops.smp_call_function_mask(mask, func, info, wait);
-+}
-+
-+void native_smp_prepare_boot_cpu(void);
-+void native_smp_prepare_cpus(unsigned int max_cpus);
-+int native_cpu_up(unsigned int cpunum);
-+void native_smp_cpus_done(unsigned int max_cpus);
-+
-+#define startup_ipi_hook(phys_apicid, start_eip, start_esp) \
-+do { } while (0)
-+
-+#else
-+
-+
-+void xen_smp_send_stop(void);
-+void xen_smp_send_reschedule(int cpu);
-+int xen_smp_call_function_mask(cpumask_t mask,
-+ void (*func) (void *info), void *info,
-+ int wait);
-+
-+#define smp_send_stop xen_smp_send_stop
-+#define smp_send_reschedule xen_smp_send_reschedule
-+#define smp_call_function_mask xen_smp_call_function_mask
-+
-+#endif
-+
-+/*
-+ * This function is needed by all SMP systems. It must _always_ be valid
-+ * from the initial startup. We map APIC_BASE very early in page_setup(),
-+ * so this is correct in the x86 case.
-+ */
-+DECLARE_PER_CPU(int, cpu_number);
-+#define raw_smp_processor_id() (x86_read_percpu(cpu_number))
-+
-+extern cpumask_t cpu_possible_map;
-+#define cpu_callin_map cpu_possible_map
-+
-+/* We don't mark CPUs online until __cpu_up(), so we need another measure */
-+static inline int num_booting_cpus(void)
-+{
-+ return cpus_weight(cpu_possible_map);
-+}
-+
-+extern int safe_smp_processor_id(void);
-+extern int __cpu_disable(void);
-+extern void __cpu_die(unsigned int cpu);
-+extern void prefill_possible_map(void);
-+extern unsigned int num_processors;
-+
-+#endif /* !__ASSEMBLY__ */
-+
-+#else /* CONFIG_SMP */
-+
-+#define safe_smp_processor_id() 0
-+#define cpu_physical_id(cpu) boot_cpu_physical_apicid
-+
-+#define NO_PROC_ID 0xFF /* No processor magic marker */
-+
-+#endif /* CONFIG_SMP */
-+
-+#ifndef __ASSEMBLY__
-+
-+#if defined(CONFIG_X86_LOCAL_APIC) && !defined(CONFIG_XEN)
-+
-+#ifdef APIC_DEFINITION
-+extern int hard_smp_processor_id(void);
-+#else
-+#include <mach_apicdef.h>
-+static inline int hard_smp_processor_id(void)
-+{
-+ /* we don't want to mark this access volatile - bad code generation */
-+ return GET_APIC_ID(*(unsigned long *)(APIC_BASE+APIC_ID));
-+}
-+#endif /* APIC_DEFINITION */
-+
-+#else /* CONFIG_X86_LOCAL_APIC */
-+
-+#ifndef CONFIG_SMP
-+#define hard_smp_processor_id() 0
-+#endif
-+
-+#endif /* CONFIG_X86_LOCAL_APIC */
-+
-+extern u8 apicid_2_node[];
-+
-+#if defined(CONFIG_X86_LOCAL_APIC) && !defined(CONFIG_XEN)
-+static __inline int logical_smp_processor_id(void)
-+{
-+ /* we don't want to mark this access volatile - bad code generation */
-+ return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR));
-+}
-+#endif
-+#endif
-+
-+#endif
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-i386/mach-xen/asm/spinlock.h ubuntu-gutsy-xen/include/asm-i386/mach-xen/asm/spinlock.h
---- ubuntu-gutsy/include/asm-i386/mach-xen/asm/spinlock.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/asm-i386/mach-xen/asm/spinlock.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,217 @@
-+#ifndef __ASM_SPINLOCK_H
-+#define __ASM_SPINLOCK_H
-+
-+#include <asm/atomic.h>
-+#include <asm/rwlock.h>
-+#include <asm/page.h>
-+#include <asm/processor.h>
-+#include <linux/compiler.h>
-+
-+#define CLI_STRING "#cli"
-+#define STI_STRING "#sti"
-+#define CLI_STI_CLOBBERS
-+#define CLI_STI_INPUT_ARGS
-+
-+/*
-+ * Your basic SMP spinlocks, allowing only a single CPU anywhere
-+ *
-+ * Simple spin lock operations. There are two variants, one clears IRQ's
-+ * on the local processor, one does not.
-+ *
-+ * We make no fairness assumptions. They have a cost.
-+ *
-+ * (the type definitions are in asm/spinlock_types.h)
-+ */
-+
-+static inline int __raw_spin_is_locked(raw_spinlock_t *x)
-+{
-+ return *(volatile signed char *)(&(x)->slock) <= 0;
-+}
-+
-+static inline void __raw_spin_lock(raw_spinlock_t *lock)
-+{
-+ asm volatile("\n1:\n" \
-+ LOCK_PREFIX "decb %0\n\t"
-+ "jns 3f\n"
-+ "2:\t"
-+ "rep;nop\n\t"
-+ "cmpb $0,%0\n\t"
-+ "jle 2b\n\t"
-+ "jmp 1b\n"
-+ "3:\n\t"
-+ : "+m" (lock->slock) : : "memory");
-+}
-+
-+/*
-+ * It is easier for the lock validator if interrupts are not re-enabled
-+ * in the middle of a lock-acquire. This is a performance feature anyway
-+ * so we turn it off:
-+ *
-+ * NOTE: there's an irqs-on section here, which normally would have to be
-+ * irq-traced, but on CONFIG_TRACE_IRQFLAGS we never use this variant.
-+ */
-+#ifndef CONFIG_PROVE_LOCKING
-+static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
-+{
-+ asm volatile(
-+ "\n1:\t"
-+ LOCK_PREFIX "decb %[slock]\n\t"
-+ "jns 5f\n"
-+ "2:\t"
-+ "testl $0x200, %[flags]\n\t"
-+ "jz 4f\n\t"
-+ STI_STRING "\n"
-+ "3:\t"
-+ "rep;nop\n\t"
-+ "cmpb $0, %[slock]\n\t"
-+ "jle 3b\n\t"
-+ CLI_STRING "\n\t"
-+ "jmp 1b\n"
-+ "4:\t"
-+ "rep;nop\n\t"
-+ "cmpb $0, %[slock]\n\t"
-+ "jg 1b\n\t"
-+ "jmp 4b\n"
-+ "5:\n\t"
-+ : [slock] "+m" (lock->slock)
-+ : [flags] "r" (flags)
-+ CLI_STI_INPUT_ARGS
-+ : "memory" CLI_STI_CLOBBERS);
-+}
-+#endif
-+
-+static inline int __raw_spin_trylock(raw_spinlock_t *lock)
-+{
-+ char oldval;
-+ asm volatile(
-+ "xchgb %b0,%1"
-+ :"=q" (oldval), "+m" (lock->slock)
-+ :"0" (0) : "memory");
-+ return oldval > 0;
-+}
-+
-+/*
-+ * __raw_spin_unlock based on writing $1 to the low byte.
-+ * This method works. Despite all the confusion.
-+ * (except on PPro SMP or if we are using OOSTORE, so we use xchgb there)
-+ * (PPro errata 66, 92)
-+ */
-+
-+#if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE)
-+
-+static inline void __raw_spin_unlock(raw_spinlock_t *lock)
-+{
-+ asm volatile("movb $1,%0" : "+m" (lock->slock) :: "memory");
-+}
-+
-+#else
-+
-+static inline void __raw_spin_unlock(raw_spinlock_t *lock)
-+{
-+ char oldval = 1;
-+
-+ asm volatile("xchgb %b0, %1"
-+ : "=q" (oldval), "+m" (lock->slock)
-+ : "0" (oldval) : "memory");
-+}
-+
-+#endif
-+
-+static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
-+{
-+ while (__raw_spin_is_locked(lock))
-+ cpu_relax();
-+}
-+
-+/*
-+ * Read-write spinlocks, allowing multiple readers
-+ * but only one writer.
-+ *
-+ * NOTE! it is quite common to have readers in interrupts
-+ * but no interrupt writers. For those circumstances we
-+ * can "mix" irq-safe locks - any writer needs to get a
-+ * irq-safe write-lock, but readers can get non-irqsafe
-+ * read-locks.
-+ *
-+ * On x86, we implement read-write locks as a 32-bit counter
-+ * with the high bit (sign) being the "contended" bit.
-+ *
-+ * The inline assembly is non-obvious. Think about it.
-+ *
-+ * Changed to use the same technique as rw semaphores. See
-+ * semaphore.h for details. -ben
-+ *
-+ * the helpers are in arch/i386/kernel/semaphore.c
-+ */
-+
-+/**
-+ * read_can_lock - would read_trylock() succeed?
-+ * @lock: the rwlock in question.
-+ */
-+static inline int __raw_read_can_lock(raw_rwlock_t *x)
-+{
-+ return (int)(x)->lock > 0;
-+}
-+
-+/**
-+ * write_can_lock - would write_trylock() succeed?
-+ * @lock: the rwlock in question.
-+ */
-+static inline int __raw_write_can_lock(raw_rwlock_t *x)
-+{
-+ return (x)->lock == RW_LOCK_BIAS;
-+}
-+
-+static inline void __raw_read_lock(raw_rwlock_t *rw)
-+{
-+ asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
-+ "jns 1f\n"
-+ "call __read_lock_failed\n\t"
-+ "1:\n"
-+ ::"a" (rw) : "memory");
-+}
-+
-+static inline void __raw_write_lock(raw_rwlock_t *rw)
-+{
-+ asm volatile(LOCK_PREFIX " subl $" RW_LOCK_BIAS_STR ",(%0)\n\t"
-+ "jz 1f\n"
-+ "call __write_lock_failed\n\t"
-+ "1:\n"
-+ ::"a" (rw) : "memory");
-+}
-+
-+static inline int __raw_read_trylock(raw_rwlock_t *lock)
-+{
-+ atomic_t *count = (atomic_t *)lock;
-+ atomic_dec(count);
-+ if (atomic_read(count) >= 0)
-+ return 1;
-+ atomic_inc(count);
-+ return 0;
-+}
-+
-+static inline int __raw_write_trylock(raw_rwlock_t *lock)
-+{
-+ atomic_t *count = (atomic_t *)lock;
-+ if (atomic_sub_and_test(RW_LOCK_BIAS, count))
-+ return 1;
-+ atomic_add(RW_LOCK_BIAS, count);
-+ return 0;
-+}
-+
-+static inline void __raw_read_unlock(raw_rwlock_t *rw)
-+{
-+ asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
-+}
-+
-+static inline void __raw_write_unlock(raw_rwlock_t *rw)
-+{
-+ asm volatile(LOCK_PREFIX "addl $" RW_LOCK_BIAS_STR ", %0"
-+ : "+m" (rw->lock) : : "memory");
-+}
-+
-+#define _raw_spin_relax(lock) cpu_relax()
-+#define _raw_read_relax(lock) cpu_relax()
-+#define _raw_write_relax(lock) cpu_relax()
-+
-+#endif /* __ASM_SPINLOCK_H */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-i386/mach-xen/asm/swiotlb.h ubuntu-gutsy-xen/include/asm-i386/mach-xen/asm/swiotlb.h
---- ubuntu-gutsy/include/asm-i386/mach-xen/asm/swiotlb.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/asm-i386/mach-xen/asm/swiotlb.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,43 @@
-+#ifndef _ASM_SWIOTLB_H
-+#define _ASM_SWIOTLB_H 1
-+
-+/* SWIOTLB interface */
-+
-+extern dma_addr_t swiotlb_map_single(struct device *hwdev, void *ptr, size_t size,
-+ int dir);
-+extern void swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
-+ size_t size, int dir);
-+extern void swiotlb_sync_single_for_cpu(struct device *hwdev,
-+ dma_addr_t dev_addr,
-+ size_t size, int dir);
-+extern void swiotlb_sync_single_for_device(struct device *hwdev,
-+ dma_addr_t dev_addr,
-+ size_t size, int dir);
-+extern void swiotlb_sync_sg_for_cpu(struct device *hwdev,
-+ struct scatterlist *sg, int nelems,
-+ int dir);
-+extern void swiotlb_sync_sg_for_device(struct device *hwdev,
-+ struct scatterlist *sg, int nelems,
-+ int dir);
-+extern int swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg,
-+ int nents, int direction);
-+extern void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg,
-+ int nents, int direction);
-+extern int swiotlb_dma_mapping_error(dma_addr_t dma_addr);
-+#ifdef CONFIG_HIGHMEM
-+extern dma_addr_t swiotlb_map_page(struct device *hwdev, struct page *page,
-+ unsigned long offset, size_t size,
-+ enum dma_data_direction direction);
-+extern void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dma_address,
-+ size_t size, enum dma_data_direction direction);
-+#endif
-+extern int swiotlb_dma_supported(struct device *hwdev, u64 mask);
-+extern void swiotlb_init(void);
-+
-+#ifdef CONFIG_SWIOTLB
-+extern int swiotlb;
-+#else
-+#define swiotlb 0
-+#endif
-+
-+#endif
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-i386/mach-xen/asm/synch_bitops.h ubuntu-gutsy-xen/include/asm-i386/mach-xen/asm/synch_bitops.h
---- ubuntu-gutsy/include/asm-i386/mach-xen/asm/synch_bitops.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/asm-i386/mach-xen/asm/synch_bitops.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,145 @@
-+#ifndef __XEN_SYNCH_BITOPS_H__
-+#define __XEN_SYNCH_BITOPS_H__
-+
-+/*
-+ * Copyright 1992, Linus Torvalds.
-+ * Heavily modified to provide guaranteed strong synchronisation
-+ * when communicating with Xen or other guest OSes running on other CPUs.
-+ */
-+
-+#ifdef HAVE_XEN_PLATFORM_COMPAT_H
-+#include <xen/platform-compat.h>
-+#endif
-+
-+#define ADDR (*(volatile long *) addr)
-+
-+static __inline__ void synch_set_bit(int nr, volatile void * addr)
-+{
-+ __asm__ __volatile__ (
-+ "lock btsl %1,%0"
-+ : "+m" (ADDR) : "Ir" (nr) : "memory" );
-+}
-+
-+static __inline__ void synch_clear_bit(int nr, volatile void * addr)
-+{
-+ __asm__ __volatile__ (
-+ "lock btrl %1,%0"
-+ : "+m" (ADDR) : "Ir" (nr) : "memory" );
-+}
-+
-+static __inline__ void synch_change_bit(int nr, volatile void * addr)
-+{
-+ __asm__ __volatile__ (
-+ "lock btcl %1,%0"
-+ : "+m" (ADDR) : "Ir" (nr) : "memory" );
-+}
-+
-+static __inline__ int synch_test_and_set_bit(int nr, volatile void * addr)
-+{
-+ int oldbit;
-+ __asm__ __volatile__ (
-+ "lock btsl %2,%1\n\tsbbl %0,%0"
-+ : "=r" (oldbit), "+m" (ADDR) : "Ir" (nr) : "memory");
-+ return oldbit;
-+}
-+
-+static __inline__ int synch_test_and_clear_bit(int nr, volatile void * addr)
-+{
-+ int oldbit;
-+ __asm__ __volatile__ (
-+ "lock btrl %2,%1\n\tsbbl %0,%0"
-+ : "=r" (oldbit), "+m" (ADDR) : "Ir" (nr) : "memory");
-+ return oldbit;
-+}
-+
-+static __inline__ int synch_test_and_change_bit(int nr, volatile void * addr)
-+{
-+ int oldbit;
-+
-+ __asm__ __volatile__ (
-+ "lock btcl %2,%1\n\tsbbl %0,%0"
-+ : "=r" (oldbit), "+m" (ADDR) : "Ir" (nr) : "memory");
-+ return oldbit;
-+}
-+
-+struct __synch_xchg_dummy { unsigned long a[100]; };
-+#define __synch_xg(x) ((struct __synch_xchg_dummy *)(x))
-+
-+#define synch_cmpxchg(ptr, old, new) \
-+((__typeof__(*(ptr)))__synch_cmpxchg((ptr),\
-+ (unsigned long)(old), \
-+ (unsigned long)(new), \
-+ sizeof(*(ptr))))
-+
-+static inline unsigned long __synch_cmpxchg(volatile void *ptr,
-+ unsigned long old,
-+ unsigned long new, int size)
-+{
-+ unsigned long prev;
-+ switch (size) {
-+ case 1:
-+ __asm__ __volatile__("lock; cmpxchgb %b1,%2"
-+ : "=a"(prev)
-+ : "q"(new), "m"(*__synch_xg(ptr)),
-+ "0"(old)
-+ : "memory");
-+ return prev;
-+ case 2:
-+ __asm__ __volatile__("lock; cmpxchgw %w1,%2"
-+ : "=a"(prev)
-+ : "r"(new), "m"(*__synch_xg(ptr)),
-+ "0"(old)
-+ : "memory");
-+ return prev;
-+#ifdef CONFIG_X86_64
-+ case 4:
-+ __asm__ __volatile__("lock; cmpxchgl %k1,%2"
-+ : "=a"(prev)
-+ : "r"(new), "m"(*__synch_xg(ptr)),
-+ "0"(old)
-+ : "memory");
-+ return prev;
-+ case 8:
-+ __asm__ __volatile__("lock; cmpxchgq %1,%2"
-+ : "=a"(prev)
-+ : "r"(new), "m"(*__synch_xg(ptr)),
-+ "0"(old)
-+ : "memory");
-+ return prev;
-+#else
-+ case 4:
-+ __asm__ __volatile__("lock; cmpxchgl %1,%2"
-+ : "=a"(prev)
-+ : "r"(new), "m"(*__synch_xg(ptr)),
-+ "0"(old)
-+ : "memory");
-+ return prev;
-+#endif
-+ }
-+ return old;
-+}
-+
-+static __always_inline int synch_const_test_bit(int nr,
-+ const volatile void * addr)
-+{
-+ return ((1UL << (nr & 31)) &
-+ (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
-+}
-+
-+static __inline__ int synch_var_test_bit(int nr, volatile void * addr)
-+{
-+ int oldbit;
-+ __asm__ __volatile__ (
-+ "btl %2,%1\n\tsbbl %0,%0"
-+ : "=r" (oldbit) : "m" (ADDR), "Ir" (nr) );
-+ return oldbit;
-+}
-+
-+#define synch_test_bit(nr,addr) \
-+(__builtin_constant_p(nr) ? \
-+ synch_const_test_bit((nr),(addr)) : \
-+ synch_var_test_bit((nr),(addr)))
-+
-+#define synch_cmpxchg_subword synch_cmpxchg
-+
-+#endif /* __XEN_SYNCH_BITOPS_H__ */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-i386/mach-xen/asm/system.h ubuntu-gutsy-xen/include/asm-i386/mach-xen/asm/system.h
---- ubuntu-gutsy/include/asm-i386/mach-xen/asm/system.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/asm-i386/mach-xen/asm/system.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,318 @@
-+#ifndef __ASM_SYSTEM_H
-+#define __ASM_SYSTEM_H
-+
-+#include <linux/kernel.h>
-+#include <asm/segment.h>
-+#include <asm/cpufeature.h>
-+#include <asm/cmpxchg.h>
-+#include <asm/synch_bitops.h>
-+#include <asm/hypervisor.h>
-+
-+#ifdef __KERNEL__
-+
-+struct task_struct; /* one of the stranger aspects of C forward declarations.. */
-+extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next));
-+
-+/*
-+ * Saving eflags is important. It switches not only IOPL between tasks,
-+ * it also protects other tasks from NT leaking through sysenter etc.
-+ */
-+#define switch_to(prev,next,last) do { \
-+ unsigned long esi,edi; \
-+ asm volatile("pushfl\n\t" /* Save flags */ \
-+ "pushl %%ebp\n\t" \
-+ "movl %%esp,%0\n\t" /* save ESP */ \
-+ "movl %5,%%esp\n\t" /* restore ESP */ \
-+ "movl $1f,%1\n\t" /* save EIP */ \
-+ "pushl %6\n\t" /* restore EIP */ \
-+ "jmp __switch_to\n" \
-+ "1:\t" \
-+ "popl %%ebp\n\t" \
-+ "popfl" \
-+ :"=m" (prev->thread.esp),"=m" (prev->thread.eip), \
-+ "=a" (last),"=S" (esi),"=D" (edi) \
-+ :"m" (next->thread.esp),"m" (next->thread.eip), \
-+ "2" (prev), "d" (next)); \
-+} while (0)
-+
-+#define _set_base(addr,base) do { unsigned long __pr; \
-+__asm__ __volatile__ ("movw %%dx,%1\n\t" \
-+ "rorl $16,%%edx\n\t" \
-+ "movb %%dl,%2\n\t" \
-+ "movb %%dh,%3" \
-+ :"=&d" (__pr) \
-+ :"m" (*((addr)+2)), \
-+ "m" (*((addr)+4)), \
-+ "m" (*((addr)+7)), \
-+ "0" (base) \
-+ ); } while(0)
-+
-+#define _set_limit(addr,limit) do { unsigned long __lr; \
-+__asm__ __volatile__ ("movw %%dx,%1\n\t" \
-+ "rorl $16,%%edx\n\t" \
-+ "movb %2,%%dh\n\t" \
-+ "andb $0xf0,%%dh\n\t" \
-+ "orb %%dh,%%dl\n\t" \
-+ "movb %%dl,%2" \
-+ :"=&d" (__lr) \
-+ :"m" (*(addr)), \
-+ "m" (*((addr)+6)), \
-+ "0" (limit) \
-+ ); } while(0)
-+
-+#define set_base(ldt,base) _set_base( ((char *)&(ldt)) , (base) )
-+#define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , ((limit)-1) )
-+
-+/*
-+ * Load a segment. Fall back on loading the zero
-+ * segment if something goes wrong..
-+ */
-+#define loadsegment(seg,value) \
-+ asm volatile("\n" \
-+ "1:\t" \
-+ "mov %0,%%" #seg "\n" \
-+ "2:\n" \
-+ ".section .fixup,\"ax\"\n" \
-+ "3:\t" \
-+ "pushl $0\n\t" \
-+ "popl %%" #seg "\n\t" \
-+ "jmp 2b\n" \
-+ ".previous\n" \
-+ ".section __ex_table,\"a\"\n\t" \
-+ ".align 4\n\t" \
-+ ".long 1b,3b\n" \
-+ ".previous" \
-+ : :"rm" (value))
-+
-+/*
-+ * Save a segment register away
-+ */
-+#define savesegment(seg, value) \
-+ asm volatile("mov %%" #seg ",%0":"=rm" (value))
-+
-+static inline void xen_clts(void)
-+{
-+ HYPERVISOR_fpu_taskswitch(0);
-+}
-+
-+static inline unsigned long xen_read_cr0(void)
-+{
-+ unsigned long val;
-+ asm volatile("movl %%cr0,%0\n\t" :"=r" (val));
-+ return val;
-+}
-+
-+static inline void xen_write_cr0(unsigned long val)
-+{
-+ asm volatile("movl %0,%%cr0": :"r" (val));
-+}
-+
-+#define xen_read_cr2() (current_vcpu_info()->arch.cr2)
-+
-+static inline void xen_write_cr2(unsigned long val)
-+{
-+ asm volatile("movl %0,%%cr2": :"r" (val));
-+}
-+
-+static inline unsigned long xen_read_cr3(void)
-+{
-+ unsigned long val;
-+ asm volatile("movl %%cr3,%0\n\t" :"=r" (val));
-+ return mfn_to_pfn(xen_cr3_to_pfn(val)) << PAGE_SHIFT;
-+}
-+
-+static inline void xen_write_cr3(unsigned long val)
-+{
-+ val = xen_pfn_to_cr3(pfn_to_mfn(val >> PAGE_SHIFT));
-+ asm volatile("movl %0,%%cr3": :"r" (val));
-+}
-+
-+static inline unsigned long xen_read_cr4(void)
-+{
-+ unsigned long val;
-+ asm volatile("movl %%cr4,%0\n\t" :"=r" (val));
-+ return val;
-+}
-+
-+static inline unsigned long xen_read_cr4_safe(void)
-+{
-+ unsigned long val;
-+ /* This could fault if %cr4 does not exist */
-+ asm("1: movl %%cr4, %0 \n"
-+ "2: \n"
-+ ".section __ex_table,\"a\" \n"
-+ ".long 1b,2b \n"
-+ ".previous \n"
-+ : "=r" (val): "0" (0));
-+ return val;
-+}
-+
-+static inline void xen_write_cr4(unsigned long val)
-+{
-+ asm volatile("movl %0,%%cr4": :"r" (val));
-+}
-+
-+static inline void xen_wbinvd(void)
-+{
-+ asm volatile("wbinvd": : :"memory");
-+}
-+
-+#define read_cr0() (xen_read_cr0())
-+#define write_cr0(x) (xen_write_cr0(x))
-+#define read_cr2() (xen_read_cr2())
-+#define write_cr2(x) (xen_write_cr2(x))
-+#define read_cr3() (xen_read_cr3())
-+#define write_cr3(x) (xen_write_cr3(x))
-+#define read_cr4() (xen_read_cr4())
-+#define read_cr4_safe() (xen_read_cr4_safe())
-+#define write_cr4(x) (xen_write_cr4(x))
-+#define wbinvd() (xen_wbinvd())
-+
-+/* Clear the 'TS' bit */
-+#define clts() (xen_clts())
-+
-+/* Set the 'TS' bit */
-+#define stts() (HYPERVISOR_fpu_taskswitch(1))
-+
-+#endif /* __KERNEL__ */
-+
-+static inline unsigned long get_limit(unsigned long segment)
-+{
-+ unsigned long __limit;
-+ __asm__("lsll %1,%0"
-+ :"=r" (__limit):"r" (segment));
-+ return __limit+1;
-+}
-+
-+#define nop() __asm__ __volatile__ ("nop")
-+
-+/*
-+ * Force strict CPU ordering.
-+ * And yes, this is required on UP too when we're talking
-+ * to devices.
-+ *
-+ * For now, "wmb()" doesn't actually do anything, as all
-+ * Intel CPU's follow what Intel calls a *Processor Order*,
-+ * in which all writes are seen in the program order even
-+ * outside the CPU.
-+ *
-+ * I expect future Intel CPU's to have a weaker ordering,
-+ * but I'd also expect them to finally get their act together
-+ * and add some real memory barriers if so.
-+ *
-+ * Some non intel clones support out of order store. wmb() ceases to be a
-+ * nop for these.
-+ */
-+
-+
-+/*
-+ * Actually only lfence would be needed for mb() because all stores done
-+ * by the kernel should be already ordered. But keep a full barrier for now.
-+ */
-+
-+#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
-+#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
-+
-+/**
-+ * read_barrier_depends - Flush all pending reads that subsequents reads
-+ * depend on.
-+ *
-+ * No data-dependent reads from memory-like regions are ever reordered
-+ * over this barrier. All reads preceding this primitive are guaranteed
-+ * to access memory (but not necessarily other CPUs' caches) before any
-+ * reads following this primitive that depend on the data return by
-+ * any of the preceding reads. This primitive is much lighter weight than
-+ * rmb() on most CPUs, and is never heavier weight than is
-+ * rmb().
-+ *
-+ * These ordering constraints are respected by both the local CPU
-+ * and the compiler.
-+ *
-+ * Ordering is not guaranteed by anything other than these primitives,
-+ * not even by data dependencies. See the documentation for
-+ * memory_barrier() for examples and URLs to more information.
-+ *
-+ * For example, the following code would force ordering (the initial
-+ * value of "a" is zero, "b" is one, and "p" is "&a"):
-+ *
-+ * <programlisting>
-+ * CPU 0 CPU 1
-+ *
-+ * b = 2;
-+ * memory_barrier();
-+ * p = &b; q = p;
-+ * read_barrier_depends();
-+ * d = *q;
-+ * </programlisting>
-+ *
-+ * because the read of "*q" depends on the read of "p" and these
-+ * two reads are separated by a read_barrier_depends(). However,
-+ * the following code, with the same initial values for "a" and "b":
-+ *
-+ * <programlisting>
-+ * CPU 0 CPU 1
-+ *
-+ * a = 2;
-+ * memory_barrier();
-+ * b = 3; y = b;
-+ * read_barrier_depends();
-+ * x = a;
-+ * </programlisting>
-+ *
-+ * does not enforce ordering, since there is no data dependency between
-+ * the read of "a" and the read of "b". Therefore, on some CPUs, such
-+ * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
-+ * in cases like this where there are no data dependencies.
-+ **/
-+
-+#define read_barrier_depends() do { } while(0)
-+
-+#ifdef CONFIG_X86_OOSTORE
-+/* Actually there are no OOO store capable CPUs for now that do SSE,
-+ but make it already an possibility. */
-+#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
-+#else
-+#define wmb() __asm__ __volatile__ ("": : :"memory")
-+#endif
-+
-+#ifdef CONFIG_SMP
-+#define smp_mb() mb()
-+#define smp_rmb() rmb()
-+#define smp_wmb() wmb()
-+#define smp_read_barrier_depends() read_barrier_depends()
-+#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
-+#else
-+#define smp_mb() barrier()
-+#define smp_rmb() barrier()
-+#define smp_wmb() barrier()
-+#define smp_read_barrier_depends() do { } while(0)
-+#define set_mb(var, value) do { var = value; barrier(); } while (0)
-+#endif
-+
-+#include <linux/irqflags.h>
-+
-+/*
-+ * disable hlt during certain critical i/o operations
-+ */
-+#define HAVE_DISABLE_HLT
-+void disable_hlt(void);
-+void enable_hlt(void);
-+
-+extern int es7000_plat;
-+void cpu_idle_wait(void);
-+
-+/*
-+ * On SMP systems, when the scheduler does migration-cost autodetection,
-+ * it needs a way to flush as much of the CPU's caches as possible:
-+ */
-+static inline void sched_cacheflush(void)
-+{
-+ wbinvd();
-+}
-+
-+extern unsigned long arch_align_stack(unsigned long sp);
-+extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
-+
-+void default_idle(void);
-+
-+#endif
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-i386/mach-xen/asm/tlbflush.h ubuntu-gutsy-xen/include/asm-i386/mach-xen/asm/tlbflush.h
---- ubuntu-gutsy/include/asm-i386/mach-xen/asm/tlbflush.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/asm-i386/mach-xen/asm/tlbflush.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,114 @@
-+#ifndef _I386_TLBFLUSH_H
-+#define _I386_TLBFLUSH_H
-+
-+#include <linux/mm.h>
-+#include <asm/processor.h>
-+
-+#define __flush_tlb() xen_tlb_flush()
-+#define __flush_tlb_global() xen_tlb_flush()
-+#define __flush_tlb_all() xen_tlb_flush()
-+
-+#define cpu_has_invlpg (boot_cpu_data.x86 > 3)
-+
-+#define __flush_tlb_single(addr) xen_invlpg(addr)
-+
-+#define __flush_tlb_one(addr) __flush_tlb_single(addr)
-+
-+/*
-+ * TLB flushing:
-+ *
-+ * - flush_tlb() flushes the current mm struct TLBs
-+ * - flush_tlb_all() flushes all processes TLBs
-+ * - flush_tlb_mm(mm) flushes the specified mm context TLB's
-+ * - flush_tlb_page(vma, vmaddr) flushes one page
-+ * - flush_tlb_range(vma, start, end) flushes a range of pages
-+ * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
-+ * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
-+ * - flush_tlb_others(cpumask, mm, va) flushes a TLBs on other cpus
-+ *
-+ * ..but the i386 has somewhat limited tlb flushing capabilities,
-+ * and page-granular flushes are available only on i486 and up.
-+ */
-+
-+#define TLB_FLUSH_ALL 0xffffffff
-+
-+
-+#ifndef CONFIG_SMP
-+
-+#include <linux/sched.h>
-+
-+#define flush_tlb() __flush_tlb()
-+#define flush_tlb_all() __flush_tlb_all()
-+#define local_flush_tlb() __flush_tlb()
-+
-+static inline void flush_tlb_mm(struct mm_struct *mm)
-+{
-+ if (mm == current->active_mm)
-+ __flush_tlb();
-+}
-+
-+static inline void flush_tlb_page(struct vm_area_struct *vma,
-+ unsigned long addr)
-+{
-+ if (vma->vm_mm == current->active_mm)
-+ __flush_tlb_one(addr);
-+}
-+
-+static inline void flush_tlb_range(struct vm_area_struct *vma,
-+ unsigned long start, unsigned long end)
-+{
-+ if (vma->vm_mm == current->active_mm)
-+ __flush_tlb();
-+}
-+
-+static inline void xen_flush_tlb_others(const cpumask_t *cpumask,
-+ struct mm_struct *mm, unsigned long va)
-+{
-+}
-+
-+#else /* SMP */
-+
-+#include <asm/smp.h>
-+
-+#define local_flush_tlb() \
-+ __flush_tlb()
-+
-+extern void flush_tlb_all(void);
-+extern void flush_tlb_current_task(void);
-+extern void flush_tlb_mm(struct mm_struct *);
-+extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
-+
-+#define flush_tlb() flush_tlb_current_task()
-+
-+static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long start, unsigned long end)
-+{
-+ flush_tlb_mm(vma->vm_mm);
-+}
-+
-+void xen_flush_tlb_others(const cpumask_t *cpumask, struct mm_struct *mm,
-+ unsigned long va);
-+
-+#define TLBSTATE_OK 1
-+#define TLBSTATE_LAZY 2
-+
-+struct tlb_state
-+{
-+ struct mm_struct *active_mm;
-+ int state;
-+ char __cacheline_padding[L1_CACHE_BYTES-8];
-+};
-+DECLARE_PER_CPU(struct tlb_state, cpu_tlbstate);
-+#endif /* SMP */
-+
-+#define flush_tlb_others(mask, mm, va) \
-+ xen_flush_tlb_others(&mask, mm, va)
-+
-+#define flush_tlb_kernel_range(start, end) flush_tlb_all()
-+
-+static inline void flush_tlb_pgtables(struct mm_struct *mm,
-+ unsigned long start, unsigned long end)
-+{
-+ /* i386 does not keep any page table caches in TLB */
-+}
-+
-+#endif /* _I386_TLBFLUSH_H */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-i386/mach-xen/asm/vga.h ubuntu-gutsy-xen/include/asm-i386/mach-xen/asm/vga.h
---- ubuntu-gutsy/include/asm-i386/mach-xen/asm/vga.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/asm-i386/mach-xen/asm/vga.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,20 @@
-+/*
-+ * Access to VGA videoram
-+ *
-+ * (c) 1998 Martin Mares <mj@ucw.cz>
-+ */
-+
-+#ifndef _LINUX_ASM_VGA_H_
-+#define _LINUX_ASM_VGA_H_
-+
-+/*
-+ * On the PC, we can just recalculate addresses and then
-+ * access the videoram directly without any black magic.
-+ */
-+
-+#define VGA_MAP_MEM(x,s) (unsigned long)isa_bus_to_virt(x)
-+
-+#define vga_readb(x) (*(x))
-+#define vga_writeb(x,y) (*(y) = (x))
-+
-+#endif
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-i386/mach-xen/asm/xenoprof.h ubuntu-gutsy-xen/include/asm-i386/mach-xen/asm/xenoprof.h
---- ubuntu-gutsy/include/asm-i386/mach-xen/asm/xenoprof.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/asm-i386/mach-xen/asm/xenoprof.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,48 @@
-+/******************************************************************************
-+ * asm-i386/mach-xen/asm/xenoprof.h
-+ *
-+ * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
-+ * VA Linux Systems Japan K.K.
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program; if not, write to the Free Software
-+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-+ *
-+ */
-+#ifndef __ASM_XENOPROF_H__
-+#define __ASM_XENOPROF_H__
-+#ifdef CONFIG_XEN
-+
-+struct super_block;
-+struct dentry;
-+int xenoprof_create_files(struct super_block * sb, struct dentry * root);
-+#define HAVE_XENOPROF_CREATE_FILES
-+
-+struct xenoprof_init;
-+void xenoprof_arch_init_counter(struct xenoprof_init *init);
-+void xenoprof_arch_counter(void);
-+void xenoprof_arch_start(void);
-+void xenoprof_arch_stop(void);
-+
-+struct xenoprof_arch_shared_buffer {
-+ /* nothing */
-+};
-+struct xenoprof_shared_buffer;
-+void xenoprof_arch_unmap_shared_buffer(struct xenoprof_shared_buffer* sbuf);
-+struct xenoprof_get_buffer;
-+int xenoprof_arch_map_shared_buffer(struct xenoprof_get_buffer* get_buffer, struct xenoprof_shared_buffer* sbuf);
-+struct xenoprof_passive;
-+int xenoprof_arch_set_passive(struct xenoprof_passive* pdomain, struct xenoprof_shared_buffer* sbuf);
-+
-+#endif /* CONFIG_XEN */
-+#endif /* __ASM_XENOPROF_H__ */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-i386/mach-xen/irq_vectors.h ubuntu-gutsy-xen/include/asm-i386/mach-xen/irq_vectors.h
---- ubuntu-gutsy/include/asm-i386/mach-xen/irq_vectors.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/asm-i386/mach-xen/irq_vectors.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,125 @@
-+/*
-+ * This file should contain #defines for all of the interrupt vector
-+ * numbers used by this architecture.
-+ *
-+ * In addition, there are some standard defines:
-+ *
-+ * FIRST_EXTERNAL_VECTOR:
-+ * The first free place for external interrupts
-+ *
-+ * SYSCALL_VECTOR:
-+ * The IRQ vector a syscall makes the user to kernel transition
-+ * under.
-+ *
-+ * TIMER_IRQ:
-+ * The IRQ number the timer interrupt comes in at.
-+ *
-+ * NR_IRQS:
-+ * The total number of interrupt vectors (including all the
-+ * architecture specific interrupts) needed.
-+ *
-+ */
-+#ifndef _ASM_IRQ_VECTORS_H
-+#define _ASM_IRQ_VECTORS_H
-+
-+/*
-+ * IDT vectors usable for external interrupt sources start
-+ * at 0x20:
-+ */
-+#define FIRST_EXTERNAL_VECTOR 0x20
-+
-+#define SYSCALL_VECTOR 0x80
-+
-+/*
-+ * Vectors 0x20-0x2f are used for ISA interrupts.
-+ */
-+
-+#ifndef CONFIG_XEN
-+/*
-+ * Special IRQ vectors used by the SMP architecture, 0xf0-0xff
-+ *
-+ * some of the following vectors are 'rare', they are merged
-+ * into a single vector (CALL_FUNCTION_VECTOR) to save vector space.
-+ * TLB, reschedule and local APIC vectors are performance-critical.
-+ *
-+ * Vectors 0xf0-0xfa are free (reserved for future Linux use).
-+ */
-+#define SPURIOUS_APIC_VECTOR 0xff
-+#define ERROR_APIC_VECTOR 0xfe
-+#define INVALIDATE_TLB_VECTOR 0xfd
-+#define RESCHEDULE_VECTOR 0xfc
-+#define CALL_FUNCTION_VECTOR 0xfb
-+
-+#define THERMAL_APIC_VECTOR 0xf0
-+/*
-+ * Local APIC timer IRQ vector is on a different priority level,
-+ * to work around the 'lost local interrupt if more than 2 IRQ
-+ * sources per level' errata.
-+ */
-+#define LOCAL_TIMER_VECTOR 0xef
-+
-+#define SPURIOUS_APIC_VECTOR 0xff
-+#define ERROR_APIC_VECTOR 0xfe
-+
-+/*
-+ * First APIC vector available to drivers: (vectors 0x30-0xee)
-+ * we start at 0x31 to spread out vectors evenly between priority
-+ * levels. (0x80 is the syscall vector)
-+ */
-+#define FIRST_DEVICE_VECTOR 0x31
-+#define FIRST_SYSTEM_VECTOR 0xef
-+
-+/*
-+ * 16 8259A IRQ's, 208 potential APIC interrupt sources.
-+ * Right now the APIC is mostly only used for SMP.
-+ * 256 vectors is an architectural limit. (we can have
-+ * more than 256 devices theoretically, but they will
-+ * have to use shared interrupts)
-+ * Since vectors 0x00-0x1f are used/reserved for the CPU,
-+ * the usable vector space is 0x20-0xff (224 vectors)
-+ */
-+#endif
-+
-+#define RESCHEDULE_VECTOR 0
-+#define CALL_FUNCTION_VECTOR 1
-+#define NR_IPIS 2
-+
-+/*
-+ * The maximum number of vectors supported by i386 processors
-+ * is limited to 256. For processors other than i386, NR_VECTORS
-+ * should be changed accordingly.
-+ */
-+#define NR_VECTORS 256
-+
-+#define FPU_IRQ 13
-+
-+#define FIRST_VM86_IRQ 3
-+#define LAST_VM86_IRQ 15
-+#define invalid_vm86_irq(irq) ((irq) < 3 || (irq) > 15)
-+
-+/*
-+ * The flat IRQ space is divided into two regions:
-+ * 1. A one-to-one mapping of real physical IRQs. This space is only used
-+ * if we have physical device-access privilege. This region is at the
-+ * start of the IRQ space so that existing device drivers do not need
-+ * to be modified to translate physical IRQ numbers into our IRQ space.
-+ * 3. A dynamic mapping of inter-domain and Xen-sourced virtual IRQs. These
-+ * are bound using the provided bind/unbind functions.
-+ */
-+
-+#define PIRQ_BASE 0
-+#define NR_PIRQS 256
-+
-+#define DYNIRQ_BASE (PIRQ_BASE + NR_PIRQS)
-+#define NR_DYNIRQS 256
-+
-+#define NR_IRQS (NR_PIRQS + NR_DYNIRQS)
-+#define NR_IRQ_VECTORS NR_IRQS
-+
-+#define pirq_to_irq(_x) ((_x) + PIRQ_BASE)
-+#define irq_to_pirq(_x) ((_x) - PIRQ_BASE)
-+
-+#define dynirq_to_irq(_x) ((_x) + DYNIRQ_BASE)
-+#define irq_to_dynirq(_x) ((_x) - DYNIRQ_BASE)
-+
-+#endif /* _ASM_IRQ_VECTORS_H */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-i386/mach-xen/mach_apic.h ubuntu-gutsy-xen/include/asm-i386/mach-xen/mach_apic.h
---- ubuntu-gutsy/include/asm-i386/mach-xen/mach_apic.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/asm-i386/mach-xen/mach_apic.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,44 @@
-+#ifndef __ASM_MACH_APIC_H
-+#define __ASM_MACH_APIC_H
-+
-+#include <mach_apicdef.h>
-+#include <asm/smp.h>
-+
-+static inline cpumask_t target_cpus(void)
-+{
-+#ifdef CONFIG_SMP
-+ return cpu_online_map;
-+#else
-+ return cpumask_of_cpu(0);
-+#endif
-+}
-+#define TARGET_CPUS (target_cpus())
-+
-+#define INT_DELIVERY_MODE dest_LowestPrio
-+#define INT_DEST_MODE 1 /* logical delivery broadcast to all procs */
-+
-+static inline void setup_apic_routing(void)
-+{
-+}
-+
-+static inline int multi_timer_check(int apic, int irq)
-+{
-+ return 0;
-+}
-+
-+static inline int apicid_to_node(int logical_apicid)
-+{
-+ return 0;
-+}
-+
-+static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
-+{
-+ return cpus_addr(cpumask)[0];
-+}
-+
-+static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
-+{
-+ return cpuid_apic >> index_msb;
-+}
-+
-+#endif /* __ASM_MACH_APIC_H */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-i386/mach-xen/mach_traps.h ubuntu-gutsy-xen/include/asm-i386/mach-xen/mach_traps.h
---- ubuntu-gutsy/include/asm-i386/mach-xen/mach_traps.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/asm-i386/mach-xen/mach_traps.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,33 @@
-+/*
-+ * include/asm-xen/asm-i386/mach-xen/mach_traps.h
-+ *
-+ * Machine specific NMI handling for Xen
-+ */
-+#ifndef _MACH_TRAPS_H
-+#define _MACH_TRAPS_H
-+
-+#include <linux/bitops.h>
-+#include <xen/interface/nmi.h>
-+
-+static inline void clear_mem_error(unsigned char reason) {}
-+static inline void clear_io_check_error(unsigned char reason) {}
-+
-+static inline unsigned char get_nmi_reason(void)
-+{
-+ shared_info_t *s = HYPERVISOR_shared_info;
-+ unsigned char reason = 0;
-+
-+ /* construct a value which looks like it came from
-+ * port 0x61.
-+ */
-+ if (test_bit(_XEN_NMIREASON_io_error, &s->arch.nmi_reason))
-+ reason |= 0x40;
-+ if (test_bit(_XEN_NMIREASON_parity_error, &s->arch.nmi_reason))
-+ reason |= 0x80;
-+
-+ return reason;
-+}
-+
-+static inline void reassert_nmi(void) {}
-+
-+#endif /* !_MACH_TRAPS_H */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-i386/mach-xen/setup_arch.h ubuntu-gutsy-xen/include/asm-i386/mach-xen/setup_arch.h
---- ubuntu-gutsy/include/asm-i386/mach-xen/setup_arch.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/asm-i386/mach-xen/setup_arch.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,5 @@
-+/* Hook to call BIOS initialisation function */
-+
-+#define ARCH_SETUP machine_specific_arch_setup();
-+
-+void __init machine_specific_arch_setup(void);
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-i386/nmi.h ubuntu-gutsy-xen/include/asm-i386/nmi.h
---- ubuntu-gutsy/include/asm-i386/nmi.h 2007-08-18 09:40:33.000000000 -0400
-+++ ubuntu-gutsy-xen/include/asm-i386/nmi.h 2007-08-18 12:38:02.000000000 -0400
-@@ -7,8 +7,6 @@
- #include <linux/pm.h>
- #include <asm/irq.h>
-
--#ifdef ARCH_HAS_NMI_WATCHDOG
--
- /**
- * do_nmi_callback
- *
-@@ -17,6 +15,8 @@
- */
- int do_nmi_callback(struct pt_regs *regs, int cpu);
-
-+#ifdef ARCH_HAS_NMI_WATCHDOG
-+
- extern int nmi_watchdog_enabled;
- extern int avail_to_resrv_perfctr_nmi_bit(unsigned int);
- extern int avail_to_resrv_perfctr_nmi(unsigned int);
-@@ -43,13 +43,10 @@
- struct file;
- extern int proc_nmi_enabled(struct ctl_table *, int , struct file *,
- void __user *, size_t *, loff_t *);
--extern int unknown_nmi_panic;
-
- void __trigger_all_cpu_backtrace(void);
- #define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace()
-
--#endif
--
- void lapic_watchdog_stop(void);
- int lapic_watchdog_init(unsigned nmi_hz);
- int lapic_wd_event(unsigned nmi_hz);
-@@ -58,4 +55,8 @@
- void disable_lapic_nmi_watchdog(void);
- void enable_lapic_nmi_watchdog(void);
-
-+#endif
-+
-+extern int unknown_nmi_panic;
-+
- #endif /* ASM_NMI_H */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-i386/thread_info.h ubuntu-gutsy-xen/include/asm-i386/thread_info.h
---- ubuntu-gutsy/include/asm-i386/thread_info.h 2007-08-18 09:40:33.000000000 -0400
-+++ ubuntu-gutsy-xen/include/asm-i386/thread_info.h 2007-08-18 12:38:02.000000000 -0400
-@@ -160,7 +160,11 @@
- #define _TIF_ALLWORK_MASK (0x0000FFFF & ~_TIF_SECCOMP)
-
- /* flags to check in __switch_to() */
-+#ifndef CONFIG_XEN
- #define _TIF_WORK_CTXSW (_TIF_DEBUG|_TIF_IO_BITMAP)
-+#else
-+#define _TIF_WORK_CTXSW _TIF_DEBUG
-+#endif
-
- /*
- * Thread-synchronous status.
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-x86_64/acpi.h ubuntu-gutsy-xen/include/asm-x86_64/acpi.h
---- ubuntu-gutsy/include/asm-x86_64/acpi.h 2007-08-18 09:40:34.000000000 -0400
-+++ ubuntu-gutsy-xen/include/asm-x86_64/acpi.h 2007-08-18 12:38:02.000000000 -0400
-@@ -136,7 +136,9 @@
- extern int acpi_disabled;
- extern int acpi_pci_disabled;
-
-+#ifndef CONFIG_XEN
- #define ARCH_HAS_POWER_INIT 1
-+#endif
-
- extern int acpi_skip_timer_override;
- extern int acpi_use_timer_override;
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-x86_64/apicdef.h ubuntu-gutsy-xen/include/asm-x86_64/apicdef.h
---- ubuntu-gutsy/include/asm-x86_64/apicdef.h 2007-08-18 09:40:34.000000000 -0400
-+++ ubuntu-gutsy-xen/include/asm-x86_64/apicdef.h 2007-08-18 12:38:02.000000000 -0400
-@@ -1,6 +1,8 @@
- #ifndef __ASM_APICDEF_H
- #define __ASM_APICDEF_H
-
-+#ifndef CONFIG_XEN
-+
- /*
- * Constants for various Intel APICs. (local APIC, IOAPIC, etc.)
- *
-@@ -114,7 +116,22 @@
-
- #define APIC_BASE (fix_to_virt(FIX_APIC_BASE))
-
-+#else /* CONFIG_XEN */
-+
-+#define APIC_ALL_CPUS 0xFFu
-+
-+enum {
-+ APIC_DEST_ALLBUT = 0x1,
-+ APIC_DEST_SELF,
-+ APIC_DEST_ALLINC
-+};
-+
-+#endif /* CONFIG_XEN */
-+
- #define MAX_IO_APICS 128
-+
-+#ifndef CONFIG_XEN
-+
- #define MAX_LOCAL_APIC 256
-
- /*
-@@ -387,6 +404,8 @@
-
- #undef u32
-
-+#endif /* CONFIG_XEN */
-+
- #define BAD_APICID 0xFFu
-
- #endif
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-x86_64/apic.h ubuntu-gutsy-xen/include/asm-x86_64/apic.h
---- ubuntu-gutsy/include/asm-x86_64/apic.h 2007-08-18 09:40:34.000000000 -0400
-+++ ubuntu-gutsy-xen/include/asm-x86_64/apic.h 2007-08-18 12:38:02.000000000 -0400
-@@ -3,7 +3,9 @@
-
- #include <linux/pm.h>
- #include <linux/delay.h>
-+#ifndef CONFIG_XEN
- #include <asm/fixmap.h>
-+#endif
- #include <asm/apicdef.h>
- #include <asm/system.h>
-
-@@ -32,6 +34,8 @@
- printk(s, ##a); \
- } while (0)
-
-+#ifndef CONFIG_XEN
-+
- struct pt_regs;
-
- /*
-@@ -99,6 +103,13 @@
-
- #define ARCH_APICTIMER_STOPS_ON_C3 1
-
-+#elif defined(CONFIG_X86_LOCAL_APIC)
-+
-+extern int APIC_init_uniprocessor (void);
-+extern void setup_apic_routing(void);
-+
-+#endif /* CONFIG_XEN / CONFIG_X86_LOCAL_APIC */
-+
- extern unsigned boot_cpu_id;
- extern int local_apic_timer_c2_ok;
-
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-x86_64/i387.h ubuntu-gutsy-xen/include/asm-x86_64/i387.h
---- ubuntu-gutsy/include/asm-x86_64/i387.h 2007-08-18 09:40:34.000000000 -0400
-+++ ubuntu-gutsy-xen/include/asm-x86_64/i387.h 2007-08-18 12:38:02.000000000 -0400
-@@ -191,10 +191,15 @@
- preempt_enable();
- }
-
--static inline void save_init_fpu(struct task_struct *tsk)
-+static inline void __save_init_fpu(struct task_struct *tsk)
- {
- __fxsave_clear(tsk);
- task_thread_info(tsk)->status &= ~TS_USEDFPU;
-+}
-+
-+static inline void save_init_fpu(struct task_struct *tsk)
-+{
-+ __save_init_fpu(tsk);
- stts();
- }
-
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-x86_64/io_apic.h ubuntu-gutsy-xen/include/asm-x86_64/io_apic.h
---- ubuntu-gutsy/include/asm-x86_64/io_apic.h 2007-08-18 09:40:34.000000000 -0400
-+++ ubuntu-gutsy-xen/include/asm-x86_64/io_apic.h 2007-08-18 12:38:02.000000000 -0400
-@@ -125,6 +125,8 @@
-
- void enable_NMI_through_LVT0 (void * dummy);
-
-+#ifndef CONFIG_XEN
- extern spinlock_t i8259A_lock;
-+#endif
-
- #endif
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-x86_64/ipi.h ubuntu-gutsy-xen/include/asm-x86_64/ipi.h
---- ubuntu-gutsy/include/asm-x86_64/ipi.h 2007-08-18 09:40:34.000000000 -0400
-+++ ubuntu-gutsy-xen/include/asm-x86_64/ipi.h 2007-08-18 12:38:02.000000000 -0400
-@@ -1,6 +1,8 @@
- #ifndef __ASM_IPI_H
- #define __ASM_IPI_H
-
-+#ifndef CONFIG_XEN
-+
- /*
- * Copyright 2004 James Cleverdon, IBM.
- * Subject to the GNU Public License, v.2
-@@ -125,4 +127,6 @@
- local_irq_restore(flags);
- }
-
-+#endif /* CONFIG_XEN */
-+
- #endif /* __ASM_IPI_H */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-x86_64/kexec.h ubuntu-gutsy-xen/include/asm-x86_64/kexec.h
---- ubuntu-gutsy/include/asm-x86_64/kexec.h 2007-08-18 09:40:34.000000000 -0400
-+++ ubuntu-gutsy-xen/include/asm-x86_64/kexec.h 2007-08-18 12:38:02.000000000 -0400
-@@ -89,6 +89,19 @@
- unsigned long page_list,
- unsigned long start_address) ATTRIB_NORET;
-
-+/* Under Xen we need to work with machine addresses. These macros give the
-+ * machine address of a certain page to the generic kexec code instead of
-+ * the pseudo physical address which would be given by the default macros.
-+ */
-+
-+#ifdef CONFIG_XEN
-+#define KEXEC_ARCH_HAS_PAGE_MACROS
-+#define kexec_page_to_pfn(page) pfn_to_mfn(page_to_pfn(page))
-+#define kexec_pfn_to_page(pfn) pfn_to_page(mfn_to_pfn(pfn))
-+#define kexec_virt_to_phys(addr) virt_to_machine(addr)
-+#define kexec_phys_to_virt(addr) phys_to_virt(machine_to_phys(addr))
-+#endif
-+
- #endif /* __ASSEMBLY__ */
-
- #endif /* _X86_64_KEXEC_H */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-x86_64/mach-xen/asm/agp.h ubuntu-gutsy-xen/include/asm-x86_64/mach-xen/asm/agp.h
---- ubuntu-gutsy/include/asm-x86_64/mach-xen/asm/agp.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/asm-x86_64/mach-xen/asm/agp.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,42 @@
-+#ifndef AGP_H
-+#define AGP_H 1
-+
-+#include <asm/cacheflush.h>
-+#include <asm/system.h>
-+
-+/*
-+ * Functions to keep the agpgart mappings coherent.
-+ * The GART gives the CPU a physical alias of memory. The alias is
-+ * mapped uncacheable. Make sure there are no conflicting mappings
-+ * with different cachability attributes for the same page.
-+ */
-+
-+/* Caller's responsibility to call global_flush_tlb() for
-+ * performance reasons */
-+#define map_page_into_agp(page) ( \
-+ xen_create_contiguous_region((unsigned long)page_address(page), 0, 32) \
-+ ?: change_page_attr(page, 1, PAGE_KERNEL_NOCACHE))
-+#define unmap_page_from_agp(page) ( \
-+ xen_destroy_contiguous_region((unsigned long)page_address(page), 0), \
-+ /* only a fallback: xen_destroy_contiguous_region uses PAGE_KERNEL */ \
-+ change_page_attr(page, 1, PAGE_KERNEL))
-+#define flush_agp_mappings() global_flush_tlb()
-+
-+/* Could use CLFLUSH here if the cpu supports it. But then it would
-+ need to be called for each cacheline of the whole page so it may not be
-+ worth it. Would need a page for it. */
-+#define flush_agp_cache() wbinvd()
-+
-+/* Convert a physical address to an address suitable for the GART. */
-+#define phys_to_gart(x) phys_to_machine(x)
-+#define gart_to_phys(x) machine_to_phys(x)
-+
-+/* GATT allocation. Returns/accepts GATT kernel virtual address. */
-+#define alloc_gatt_pages(order) ({ \
-+ char *_t; dma_addr_t _d; \
-+ _t = dma_alloc_coherent(NULL,PAGE_SIZE<<(order),&_d,GFP_KERNEL); \
-+ _t; })
-+#define free_gatt_pages(table, order) \
-+ dma_free_coherent(NULL,PAGE_SIZE<<(order),(table),virt_to_bus(table))
-+
-+#endif
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-x86_64/mach-xen/asm/arch_hooks.h ubuntu-gutsy-xen/include/asm-x86_64/mach-xen/asm/arch_hooks.h
---- ubuntu-gutsy/include/asm-x86_64/mach-xen/asm/arch_hooks.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/asm-x86_64/mach-xen/asm/arch_hooks.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,27 @@
-+#ifndef _ASM_ARCH_HOOKS_H
-+#define _ASM_ARCH_HOOKS_H
-+
-+#include <linux/interrupt.h>
-+
-+/*
-+ * linux/include/asm/arch_hooks.h
-+ *
-+ * define the architecture specific hooks
-+ */
-+
-+/* these aren't arch hooks, they are generic routines
-+ * that can be used by the hooks */
-+extern void init_ISA_irqs(void);
-+extern void apic_intr_init(void);
-+extern void smp_intr_init(void);
-+extern irqreturn_t timer_interrupt(int irq, void *dev_id);
-+
-+/* these are the defined hooks */
-+extern void intr_init_hook(void);
-+extern void pre_intr_init_hook(void);
-+extern void pre_setup_arch_hook(void);
-+extern void trap_init_hook(void);
-+extern void time_init_hook(void);
-+extern void mca_nmi_hook(void);
-+
-+#endif
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-x86_64/mach-xen/asm/bootsetup.h ubuntu-gutsy-xen/include/asm-x86_64/mach-xen/asm/bootsetup.h
---- ubuntu-gutsy/include/asm-x86_64/mach-xen/asm/bootsetup.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/asm-x86_64/mach-xen/asm/bootsetup.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,42 @@
-+
-+#ifndef _X86_64_BOOTSETUP_H
-+#define _X86_64_BOOTSETUP_H 1
-+
-+#define BOOT_PARAM_SIZE 4096
-+extern char x86_boot_params[BOOT_PARAM_SIZE];
-+
-+/*
-+ * This is set up by the setup-routine at boot-time
-+ */
-+#define PARAM ((unsigned char *)x86_boot_params)
-+#define SCREEN_INFO (*(struct screen_info *) (PARAM+0))
-+#define EXT_MEM_K (*(unsigned short *) (PARAM+2))
-+#define ALT_MEM_K (*(unsigned int *) (PARAM+0x1e0))
-+#define E820_MAP_NR (*(char*) (PARAM+E820NR))
-+#define E820_MAP ((struct e820entry *) (PARAM+E820MAP))
-+#define APM_BIOS_INFO (*(struct apm_bios_info *) (PARAM+0x40))
-+#define DRIVE_INFO (*(struct drive_info_struct *) (PARAM+0x80))
-+#define SYS_DESC_TABLE (*(struct sys_desc_table_struct*)(PARAM+0xa0))
-+#define MOUNT_ROOT_RDONLY (*(unsigned short *) (PARAM+0x1F2))
-+#define RAMDISK_FLAGS (*(unsigned short *) (PARAM+0x1F8))
-+#define SAVED_VIDEO_MODE (*(unsigned short *) (PARAM+0x1FA))
-+#define ORIG_ROOT_DEV (*(unsigned short *) (PARAM+0x1FC))
-+#define AUX_DEVICE_INFO (*(unsigned char *) (PARAM+0x1FF))
-+#define LOADER_TYPE (*(unsigned char *) (PARAM+0x210))
-+#define KERNEL_START (*(unsigned int *) (PARAM+0x214))
-+
-+#define INITRD_START (__pa(xen_start_info->mod_start))
-+#define INITRD_SIZE (xen_start_info->mod_len)
-+#define EDID_INFO (*(struct edid_info *) (PARAM+0x440))
-+
-+#define EDD_NR (*(unsigned char *) (PARAM+EDDNR))
-+#define EDD_MBR_SIG_NR (*(unsigned char *) (PARAM+EDD_MBR_SIG_NR_BUF))
-+#define EDD_MBR_SIGNATURE ((unsigned int *) (PARAM+EDD_MBR_SIG_BUF))
-+#define EDD_BUF ((struct edd_info *) (PARAM+EDDBUF))
-+#define COMMAND_LINE boot_command_line
-+
-+#define RAMDISK_IMAGE_START_MASK 0x07FF
-+#define RAMDISK_PROMPT_FLAG 0x8000
-+#define RAMDISK_LOAD_FLAG 0x4000
-+
-+#endif
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-x86_64/mach-xen/asm/desc.h ubuntu-gutsy-xen/include/asm-x86_64/mach-xen/asm/desc.h
---- ubuntu-gutsy/include/asm-x86_64/mach-xen/asm/desc.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/asm-x86_64/mach-xen/asm/desc.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,192 @@
-+/* Written 2000 by Andi Kleen */
-+#ifndef __ARCH_DESC_H
-+#define __ARCH_DESC_H
-+
-+#include <linux/threads.h>
-+#include <asm/ldt.h>
-+
-+#ifndef __ASSEMBLY__
-+
-+#include <linux/string.h>
-+#include <linux/smp.h>
-+#include <asm/desc_defs.h>
-+
-+#include <asm/segment.h>
-+#include <asm/mmu.h>
-+
-+extern struct desc_ptr idt_descr, cpu_gdt_descr[NR_CPUS];
-+
-+extern struct desc_struct cpu_gdt_table[GDT_ENTRIES];
-+
-+#define load_TR_desc() asm volatile("ltr %w0"::"r" (GDT_ENTRY_TSS*8))
-+#define load_LDT_desc() asm volatile("lldt %w0"::"r" (GDT_ENTRY_LDT*8))
-+
-+static inline void clear_LDT(void)
-+{
-+ int cpu = get_cpu();
-+
-+ /*
-+ * NB. We load the default_ldt for lcall7/27 handling on demand, as
-+ * it slows down context switching. Noone uses it anyway.
-+ */
-+ cpu = cpu; /* XXX avoid compiler warning */
-+ xen_set_ldt(0UL, 0);
-+ put_cpu();
-+}
-+
-+/*
-+ * This is the ldt that every process will get unless we need
-+ * something other than this.
-+ */
-+extern struct desc_struct default_ldt[];
-+#ifndef CONFIG_X86_NO_IDT
-+extern struct gate_struct idt_table[];
-+#endif
-+extern struct desc_ptr cpu_gdt_descr[];
-+
-+/* the cpu gdt accessor */
-+#define cpu_gdt(_cpu) ((struct desc_struct *)cpu_gdt_descr[_cpu].address)
-+
-+static inline void _set_gate(void *adr, unsigned type, unsigned long func, unsigned dpl, unsigned ist)
-+{
-+ struct gate_struct s;
-+ s.offset_low = PTR_LOW(func);
-+ s.segment = __KERNEL_CS;
-+ s.ist = ist;
-+ s.p = 1;
-+ s.dpl = dpl;
-+ s.zero0 = 0;
-+ s.zero1 = 0;
-+ s.type = type;
-+ s.offset_middle = PTR_MIDDLE(func);
-+ s.offset_high = PTR_HIGH(func);
-+ /* does not need to be atomic because it is only done once at setup time */
-+ memcpy(adr, &s, 16);
-+}
-+
-+#ifndef CONFIG_X86_NO_IDT
-+static inline void set_intr_gate(int nr, void *func)
-+{
-+ BUG_ON((unsigned)nr > 0xFF);
-+ _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 0, 0);
-+}
-+
-+static inline void set_intr_gate_ist(int nr, void *func, unsigned ist)
-+{
-+ BUG_ON((unsigned)nr > 0xFF);
-+ _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 0, ist);
-+}
-+
-+static inline void set_system_gate(int nr, void *func)
-+{
-+ BUG_ON((unsigned)nr > 0xFF);
-+ _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 3, 0);
-+}
-+
-+static inline void set_system_gate_ist(int nr, void *func, unsigned ist)
-+{
-+ _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 3, ist);
-+}
-+#endif
-+
-+static inline void set_tssldt_descriptor(void *ptr, unsigned long tss, unsigned type,
-+ unsigned size)
-+{
-+ struct ldttss_desc d;
-+ memset(&d,0,sizeof(d));
-+ d.limit0 = size & 0xFFFF;
-+ d.base0 = PTR_LOW(tss);
-+ d.base1 = PTR_MIDDLE(tss) & 0xFF;
-+ d.type = type;
-+ d.p = 1;
-+ d.limit1 = (size >> 16) & 0xF;
-+ d.base2 = (PTR_MIDDLE(tss) >> 8) & 0xFF;
-+ d.base3 = PTR_HIGH(tss);
-+ memcpy(ptr, &d, 16);
-+}
-+
-+#ifndef CONFIG_X86_NO_TSS
-+static inline void set_tss_desc(unsigned cpu, void *addr)
-+{
-+ /*
-+ * sizeof(unsigned long) coming from an extra "long" at the end
-+ * of the iobitmap. See tss_struct definition in processor.h
-+ *
-+ * -1? seg base+limit should be pointing to the address of the
-+ * last valid byte
-+ */
-+ set_tssldt_descriptor(&cpu_gdt(cpu)[GDT_ENTRY_TSS],
-+ (unsigned long)addr, DESC_TSS,
-+ IO_BITMAP_OFFSET + IO_BITMAP_BYTES + sizeof(unsigned long) - 1);
-+}
-+#endif
-+
-+static inline void set_ldt_desc(unsigned cpu, void *addr, int size)
-+{
-+ set_tssldt_descriptor(&cpu_gdt(cpu)[GDT_ENTRY_LDT], (unsigned long)addr,
-+ DESC_LDT, size * 8 - 1);
-+}
-+
-+#define LDT_entry_a(info) \
-+ ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff))
-+/* Don't allow setting of the lm bit. It is useless anyways because
-+ 64bit system calls require __USER_CS. */
-+#define LDT_entry_b(info) \
-+ (((info)->base_addr & 0xff000000) | \
-+ (((info)->base_addr & 0x00ff0000) >> 16) | \
-+ ((info)->limit & 0xf0000) | \
-+ (((info)->read_exec_only ^ 1) << 9) | \
-+ ((info)->contents << 10) | \
-+ (((info)->seg_not_present ^ 1) << 15) | \
-+ ((info)->seg_32bit << 22) | \
-+ ((info)->limit_in_pages << 23) | \
-+ ((info)->useable << 20) | \
-+ /* ((info)->lm << 21) | */ \
-+ 0x7000)
-+
-+#define LDT_empty(info) (\
-+ (info)->base_addr == 0 && \
-+ (info)->limit == 0 && \
-+ (info)->contents == 0 && \
-+ (info)->read_exec_only == 1 && \
-+ (info)->seg_32bit == 0 && \
-+ (info)->limit_in_pages == 0 && \
-+ (info)->seg_not_present == 1 && \
-+ (info)->useable == 0 && \
-+ (info)->lm == 0)
-+
-+static inline void load_TLS(struct thread_struct *t, unsigned int cpu)
-+{
-+ unsigned int i;
-+ u64 *gdt = (u64 *)(cpu_gdt(cpu) + GDT_ENTRY_TLS_MIN);
-+
-+ for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
-+ HYPERVISOR_update_descriptor(virt_to_machine(&gdt[i]), t->tls_array[i]);
-+}
-+
-+/*
-+ * load one particular LDT into the current CPU
-+ */
-+static inline void load_LDT_nolock (mm_context_t *pc, int cpu)
-+{
-+ void *segments = pc->ldt;
-+ int count = pc->size;
-+
-+ if (likely(!count))
-+ segments = NULL;
-+
-+ xen_set_ldt((unsigned long)segments, count);
-+}
-+
-+static inline void load_LDT(mm_context_t *pc)
-+{
-+ int cpu = get_cpu();
-+ load_LDT_nolock(pc, cpu);
-+ put_cpu();
-+}
-+
-+extern struct desc_ptr idt_descr;
-+
-+#endif /* !__ASSEMBLY__ */
-+
-+#endif
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-x86_64/mach-xen/asm/dma-mapping.h ubuntu-gutsy-xen/include/asm-x86_64/mach-xen/asm/dma-mapping.h
---- ubuntu-gutsy/include/asm-x86_64/mach-xen/asm/dma-mapping.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/asm-x86_64/mach-xen/asm/dma-mapping.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,206 @@
-+#ifndef _X8664_DMA_MAPPING_H
-+#define _X8664_DMA_MAPPING_H 1
-+
-+/*
-+ * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for
-+ * documentation.
-+ */
-+
-+
-+#include <asm/scatterlist.h>
-+#include <asm/io.h>
-+
-+struct dma_mapping_ops {
-+ int (*mapping_error)(dma_addr_t dma_addr);
-+ void* (*alloc_coherent)(struct device *dev, size_t size,
-+ dma_addr_t *dma_handle, gfp_t gfp);
-+ void (*free_coherent)(struct device *dev, size_t size,
-+ void *vaddr, dma_addr_t dma_handle);
-+ dma_addr_t (*map_single)(struct device *hwdev, void *ptr,
-+ size_t size, int direction);
-+ /* like map_single, but doesn't check the device mask */
-+ dma_addr_t (*map_simple)(struct device *hwdev, char *ptr,
-+ size_t size, int direction);
-+ void (*unmap_single)(struct device *dev, dma_addr_t addr,
-+ size_t size, int direction);
-+ void (*sync_single_for_cpu)(struct device *hwdev,
-+ dma_addr_t dma_handle, size_t size,
-+ int direction);
-+ void (*sync_single_for_device)(struct device *hwdev,
-+ dma_addr_t dma_handle, size_t size,
-+ int direction);
-+ void (*sync_single_range_for_cpu)(struct device *hwdev,
-+ dma_addr_t dma_handle, unsigned long offset,
-+ size_t size, int direction);
-+ void (*sync_single_range_for_device)(struct device *hwdev,
-+ dma_addr_t dma_handle, unsigned long offset,
-+ size_t size, int direction);
-+ void (*sync_sg_for_cpu)(struct device *hwdev,
-+ struct scatterlist *sg, int nelems,
-+ int direction);
-+ void (*sync_sg_for_device)(struct device *hwdev,
-+ struct scatterlist *sg, int nelems,
-+ int direction);
-+ int (*map_sg)(struct device *hwdev, struct scatterlist *sg,
-+ int nents, int direction);
-+ void (*unmap_sg)(struct device *hwdev,
-+ struct scatterlist *sg, int nents,
-+ int direction);
-+ int (*dma_supported)(struct device *hwdev, u64 mask);
-+ int is_phys;
-+};
-+
-+extern dma_addr_t bad_dma_address;
-+extern const struct dma_mapping_ops* dma_ops;
-+extern int iommu_merge;
-+
-+#if 0
-+static inline int dma_mapping_error(dma_addr_t dma_addr)
-+{
-+ if (dma_ops->mapping_error)
-+ return dma_ops->mapping_error(dma_addr);
-+
-+ return (dma_addr == bad_dma_address);
-+}
-+
-+#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
-+#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
-+
-+#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
-+#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
-+
-+extern void *dma_alloc_coherent(struct device *dev, size_t size,
-+ dma_addr_t *dma_handle, gfp_t gfp);
-+extern void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
-+ dma_addr_t dma_handle);
-+
-+static inline dma_addr_t
-+dma_map_single(struct device *hwdev, void *ptr, size_t size,
-+ int direction)
-+{
-+ BUG_ON(!valid_dma_direction(direction));
-+ return dma_ops->map_single(hwdev, ptr, size, direction);
-+}
-+
-+static inline void
-+dma_unmap_single(struct device *dev, dma_addr_t addr,size_t size,
-+ int direction)
-+{
-+ BUG_ON(!valid_dma_direction(direction));
-+ dma_ops->unmap_single(dev, addr, size, direction);
-+}
-+
-+#define dma_map_page(dev,page,offset,size,dir) \
-+ dma_map_single((dev), page_address(page)+(offset), (size), (dir))
-+
-+#define dma_unmap_page dma_unmap_single
-+
-+static inline void
-+dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
-+ size_t size, int direction)
-+{
-+ BUG_ON(!valid_dma_direction(direction));
-+ if (dma_ops->sync_single_for_cpu)
-+ dma_ops->sync_single_for_cpu(hwdev, dma_handle, size,
-+ direction);
-+ flush_write_buffers();
-+}
-+
-+static inline void
-+dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
-+ size_t size, int direction)
-+{
-+ BUG_ON(!valid_dma_direction(direction));
-+ if (dma_ops->sync_single_for_device)
-+ dma_ops->sync_single_for_device(hwdev, dma_handle, size,
-+ direction);
-+ flush_write_buffers();
-+}
-+
-+static inline void
-+dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
-+ unsigned long offset, size_t size, int direction)
-+{
-+ BUG_ON(!valid_dma_direction(direction));
-+ if (dma_ops->sync_single_range_for_cpu) {
-+ dma_ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, size, direction);
-+ }
-+
-+ flush_write_buffers();
-+}
-+
-+static inline void
-+dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
-+ unsigned long offset, size_t size, int direction)
-+{
-+ BUG_ON(!valid_dma_direction(direction));
-+ if (dma_ops->sync_single_range_for_device)
-+ dma_ops->sync_single_range_for_device(hwdev, dma_handle,
-+ offset, size, direction);
-+
-+ flush_write_buffers();
-+}
-+
-+static inline void
-+dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
-+ int nelems, int direction)
-+{
-+ BUG_ON(!valid_dma_direction(direction));
-+ if (dma_ops->sync_sg_for_cpu)
-+ dma_ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
-+ flush_write_buffers();
-+}
-+
-+static inline void
-+dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
-+ int nelems, int direction)
-+{
-+ BUG_ON(!valid_dma_direction(direction));
-+ if (dma_ops->sync_sg_for_device) {
-+ dma_ops->sync_sg_for_device(hwdev, sg, nelems, direction);
-+ }
-+
-+ flush_write_buffers();
-+}
-+
-+static inline int
-+dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, int direction)
-+{
-+ BUG_ON(!valid_dma_direction(direction));
-+ return dma_ops->map_sg(hwdev, sg, nents, direction);
-+}
-+
-+static inline void
-+dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
-+ int direction)
-+{
-+ BUG_ON(!valid_dma_direction(direction));
-+ dma_ops->unmap_sg(hwdev, sg, nents, direction);
-+}
-+
-+extern int dma_supported(struct device *hwdev, u64 mask);
-+
-+/* same for gart, swiotlb, and nommu */
-+static inline int dma_get_cache_alignment(void)
-+{
-+ return boot_cpu_data.x86_clflush_size;
-+}
-+
-+#define dma_is_consistent(d, h) 1
-+
-+extern int dma_set_mask(struct device *dev, u64 mask);
-+
-+static inline void
-+dma_cache_sync(struct device *dev, void *vaddr, size_t size,
-+ enum dma_data_direction dir)
-+{
-+ flush_write_buffers();
-+}
-+
-+extern struct device fallback_dev;
-+extern int panic_on_overflow;
-+#endif
-+
-+#endif /* _X8664_DMA_MAPPING_H */
-+
-+#include <asm-i386/mach-xen/asm/dma-mapping.h>
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-x86_64/mach-xen/asm/e820.h ubuntu-gutsy-xen/include/asm-x86_64/mach-xen/asm/e820.h
---- ubuntu-gutsy/include/asm-x86_64/mach-xen/asm/e820.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/asm-x86_64/mach-xen/asm/e820.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,63 @@
-+/*
-+ * structures and definitions for the int 15, ax=e820 memory map
-+ * scheme.
-+ *
-+ * In a nutshell, setup.S populates a scratch table in the
-+ * empty_zero_block that contains a list of usable address/size
-+ * duples. setup.c, this information is transferred into the e820map,
-+ * and in init.c/numa.c, that new information is used to mark pages
-+ * reserved or not.
-+ */
-+#ifndef __E820_HEADER
-+#define __E820_HEADER
-+
-+#include <linux/mmzone.h>
-+
-+#define E820MAP 0x2d0 /* our map */
-+#define E820MAX 128 /* number of entries in E820MAP */
-+#define E820NR 0x1e8 /* # entries in E820MAP */
-+
-+#define E820_RAM 1
-+#define E820_RESERVED 2
-+#define E820_ACPI 3
-+#define E820_NVS 4
-+
-+#ifndef __ASSEMBLY__
-+struct e820entry {
-+ u64 addr; /* start of memory segment */
-+ u64 size; /* size of memory segment */
-+ u32 type; /* type of memory segment */
-+} __attribute__((packed));
-+
-+struct e820map {
-+ int nr_map;
-+ struct e820entry map[E820MAX];
-+};
-+
-+extern unsigned long find_e820_area(unsigned long start, unsigned long end,
-+ unsigned size);
-+extern void add_memory_region(unsigned long start, unsigned long size,
-+ int type);
-+extern void setup_memory_region(void);
-+extern void contig_e820_setup(void);
-+extern unsigned long e820_end_of_ram(void);
-+extern void e820_reserve_resources(struct e820entry *e820, int nr_map);
-+extern void e820_mark_nosave_regions(void);
-+extern void e820_print_map(char *who);
-+extern int e820_any_mapped(unsigned long start, unsigned long end, unsigned type);
-+extern int e820_all_mapped(unsigned long start, unsigned long end, unsigned type);
-+extern unsigned long e820_hole_size(unsigned long start, unsigned long end);
-+
-+extern void e820_setup_gap(struct e820entry *e820, int nr_map);
-+extern void e820_register_active_regions(int nid,
-+ unsigned long start_pfn, unsigned long end_pfn);
-+
-+extern void finish_e820_parsing(void);
-+
-+extern struct e820map e820;
-+
-+extern unsigned ebda_addr, ebda_size;
-+extern unsigned long nodemap_addr, nodemap_size;
-+#endif/*!__ASSEMBLY__*/
-+
-+#endif/*__E820_HEADER*/
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-x86_64/mach-xen/asm/fixmap.h ubuntu-gutsy-xen/include/asm-x86_64/mach-xen/asm/fixmap.h
---- ubuntu-gutsy/include/asm-x86_64/mach-xen/asm/fixmap.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/asm-x86_64/mach-xen/asm/fixmap.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,108 @@
-+/*
-+ * fixmap.h: compile-time virtual memory allocation
-+ *
-+ * This file is subject to the terms and conditions of the GNU General Public
-+ * License. See the file "COPYING" in the main directory of this archive
-+ * for more details.
-+ *
-+ * Copyright (C) 1998 Ingo Molnar
-+ */
-+
-+#ifndef _ASM_FIXMAP_H
-+#define _ASM_FIXMAP_H
-+
-+#include <linux/kernel.h>
-+#include <asm/page.h>
-+#include <asm/vsyscall.h>
-+#include <asm/acpi.h>
-+
-+/*
-+ * Here we define all the compile-time 'special' virtual
-+ * addresses. The point is to have a constant address at
-+ * compile time, but to set the physical address only
-+ * in the boot process.
-+ *
-+ * these 'compile-time allocated' memory buffers are
-+ * fixed-size 4k pages. (or larger if used with an increment
-+ * highger than 1) use fixmap_set(idx,phys) to associate
-+ * physical memory with fixmap indices.
-+ *
-+ * TLB entries of such buffers will not be flushed across
-+ * task switches.
-+ */
-+
-+enum fixed_addresses {
-+ VSYSCALL_LAST_PAGE,
-+ VSYSCALL_FIRST_PAGE = VSYSCALL_LAST_PAGE + ((VSYSCALL_END-VSYSCALL_START) >> PAGE_SHIFT) - 1,
-+ VSYSCALL_HPET,
-+ FIX_HPET_BASE,
-+#ifndef CONFIG_XEN
-+ FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */
-+ FIX_IO_APIC_BASE_0,
-+ FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS-1,
-+#endif
-+#ifdef CONFIG_ACPI
-+ FIX_ACPI_BEGIN,
-+ FIX_ACPI_END = FIX_ACPI_BEGIN + FIX_ACPI_PAGES - 1,
-+#endif
-+ FIX_SHARED_INFO,
-+#define NR_FIX_ISAMAPS 256
-+ FIX_ISAMAP_END,
-+ FIX_ISAMAP_BEGIN = FIX_ISAMAP_END + NR_FIX_ISAMAPS - 1,
-+ __end_of_permanent_fixed_addresses,
-+ /* temporary boot-time mappings, used before ioremap() is functional */
-+#define NR_FIX_BTMAPS 16
-+ FIX_BTMAP_END = __end_of_permanent_fixed_addresses,
-+ FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS - 1,
-+ __end_of_fixed_addresses
-+};
-+
-+extern void __set_fixmap (enum fixed_addresses idx,
-+ unsigned long phys, pgprot_t flags);
-+
-+#define set_fixmap(idx, phys) \
-+ __set_fixmap(idx, phys, PAGE_KERNEL)
-+/*
-+ * Some hardware wants to get fixmapped without caching.
-+ */
-+#define set_fixmap_nocache(idx, phys) \
-+ __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE)
-+
-+#define clear_fixmap(idx) \
-+ __set_fixmap(idx, 0, __pgprot(0))
-+
-+#define FIXADDR_TOP (VSYSCALL_END-PAGE_SIZE)
-+#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
-+#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
-+
-+/* Only covers 32bit vsyscalls currently. Need another set for 64bit. */
-+#define FIXADDR_USER_START ((unsigned long)VSYSCALL32_VSYSCALL)
-+#define FIXADDR_USER_END (FIXADDR_USER_START + PAGE_SIZE)
-+
-+#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
-+
-+extern void __this_fixmap_does_not_exist(void);
-+
-+/*
-+ * 'index to address' translation. If anyone tries to use the idx
-+ * directly without translation, we catch the bug with a NULL-deference
-+ * kernel oops. Illegal ranges of incoming indices are caught too.
-+ */
-+static __always_inline unsigned long fix_to_virt(const unsigned int idx)
-+{
-+ /*
-+ * this branch gets completely eliminated after inlining,
-+ * except when someone tries to use fixaddr indices in an
-+ * illegal way. (such as mixing up address types or using
-+ * out-of-range indices).
-+ *
-+ * If it doesn't get removed, the linker will complain
-+ * loudly with a reasonably clear error message..
-+ */
-+ if (idx >= __end_of_fixed_addresses)
-+ __this_fixmap_does_not_exist();
-+
-+ return __fix_to_virt(idx);
-+}
-+
-+#endif
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-x86_64/mach-xen/asm/floppy.h ubuntu-gutsy-xen/include/asm-x86_64/mach-xen/asm/floppy.h
---- ubuntu-gutsy/include/asm-x86_64/mach-xen/asm/floppy.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/asm-x86_64/mach-xen/asm/floppy.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,206 @@
-+/*
-+ * Architecture specific parts of the Floppy driver
-+ *
-+ * This file is subject to the terms and conditions of the GNU General Public
-+ * License. See the file "COPYING" in the main directory of this archive
-+ * for more details.
-+ *
-+ * Copyright (C) 1995
-+ *
-+ * Modifications for Xen are Copyright (c) 2004, Keir Fraser.
-+ */
-+#ifndef __ASM_XEN_X86_64_FLOPPY_H
-+#define __ASM_XEN_X86_64_FLOPPY_H
-+
-+#include <linux/vmalloc.h>
-+
-+/*
-+ * The DMA channel used by the floppy controller cannot access data at
-+ * addresses >= 16MB
-+ *
-+ * Went back to the 1MB limit, as some people had problems with the floppy
-+ * driver otherwise. It doesn't matter much for performance anyway, as most
-+ * floppy accesses go through the track buffer.
-+ */
-+#define _CROSS_64KB(a,s,vdma) \
-+(!(vdma) && ((unsigned long)(a)/K_64 != ((unsigned long)(a) + (s) - 1) / K_64))
-+
-+/* XEN: Hit DMA paths on the head. This trick from asm-m68k/floppy.h. */
-+#include <asm/dma.h>
-+#undef MAX_DMA_ADDRESS
-+#define MAX_DMA_ADDRESS 0
-+#define CROSS_64KB(a,s) (0)
-+
-+#define fd_inb(port) inb_p(port)
-+#define fd_outb(value,port) outb_p(value,port)
-+
-+#define fd_request_dma() (0)
-+#define fd_free_dma() ((void)0)
-+#define fd_enable_irq() enable_irq(FLOPPY_IRQ)
-+#define fd_disable_irq() disable_irq(FLOPPY_IRQ)
-+#define fd_free_irq() free_irq(FLOPPY_IRQ, NULL)
-+#define fd_get_dma_residue() vdma_get_dma_residue(FLOPPY_DMA)
-+/*
-+ * Do not use vmalloc/vfree: floppy_release_irq_and_dma() gets called from
-+ * softirq context via motor_off_callback. A generic bug we happen to trigger.
-+ */
-+#define fd_dma_mem_alloc(size) __get_free_pages(GFP_KERNEL|__GFP_NORETRY, get_order(size))
-+#define fd_dma_mem_free(addr, size) free_pages(addr, get_order(size))
-+#define fd_dma_setup(addr, size, mode, io) vdma_dma_setup(addr, size, mode, io)
-+
-+static int virtual_dma_count;
-+static int virtual_dma_residue;
-+static char *virtual_dma_addr;
-+static int virtual_dma_mode;
-+static int doing_pdma;
-+
-+static irqreturn_t floppy_hardint(int irq, void *dev_id)
-+{
-+ register unsigned char st;
-+
-+#undef TRACE_FLPY_INT
-+
-+#ifdef TRACE_FLPY_INT
-+ static int calls=0;
-+ static int bytes=0;
-+ static int dma_wait=0;
-+#endif
-+ if (!doing_pdma)
-+ return floppy_interrupt(irq, dev_id);
-+
-+#ifdef TRACE_FLPY_INT
-+ if(!calls)
-+ bytes = virtual_dma_count;
-+#endif
-+
-+ {
-+ register int lcount;
-+ register char *lptr;
-+
-+ st = 1;
-+ for(lcount=virtual_dma_count, lptr=virtual_dma_addr;
-+ lcount; lcount--, lptr++) {
-+ st=inb(virtual_dma_port+4) & 0xa0 ;
-+ if(st != 0xa0)
-+ break;
-+ if(virtual_dma_mode)
-+ outb_p(*lptr, virtual_dma_port+5);
-+ else
-+ *lptr = inb_p(virtual_dma_port+5);
-+ }
-+ virtual_dma_count = lcount;
-+ virtual_dma_addr = lptr;
-+ st = inb(virtual_dma_port+4);
-+ }
-+
-+#ifdef TRACE_FLPY_INT
-+ calls++;
-+#endif
-+ if(st == 0x20)
-+ return IRQ_HANDLED;
-+ if(!(st & 0x20)) {
-+ virtual_dma_residue += virtual_dma_count;
-+ virtual_dma_count=0;
-+#ifdef TRACE_FLPY_INT
-+ printk("count=%x, residue=%x calls=%d bytes=%d dma_wait=%d\n",
-+ virtual_dma_count, virtual_dma_residue, calls, bytes,
-+ dma_wait);
-+ calls = 0;
-+ dma_wait=0;
-+#endif
-+ doing_pdma = 0;
-+ floppy_interrupt(irq, dev_id);
-+ return IRQ_HANDLED;
-+ }
-+#ifdef TRACE_FLPY_INT
-+ if(!virtual_dma_count)
-+ dma_wait++;
-+#endif
-+ return IRQ_HANDLED;
-+}
-+
-+static void fd_disable_dma(void)
-+{
-+ doing_pdma = 0;
-+ virtual_dma_residue += virtual_dma_count;
-+ virtual_dma_count=0;
-+}
-+
-+static int vdma_get_dma_residue(unsigned int dummy)
-+{
-+ return virtual_dma_count + virtual_dma_residue;
-+}
-+
-+
-+static int fd_request_irq(void)
-+{
-+ return request_irq(FLOPPY_IRQ, floppy_hardint,
-+ IRQF_DISABLED, "floppy", NULL);
-+}
-+
-+#if 0
-+static unsigned long vdma_mem_alloc(unsigned long size)
-+{
-+ return (unsigned long) vmalloc(size);
-+
-+}
-+
-+static void vdma_mem_free(unsigned long addr, unsigned long size)
-+{
-+ vfree((void *)addr);
-+}
-+#endif
-+
-+static int vdma_dma_setup(char *addr, unsigned long size, int mode, int io)
-+{
-+ doing_pdma = 1;
-+ virtual_dma_port = io;
-+ virtual_dma_mode = (mode == DMA_MODE_WRITE);
-+ virtual_dma_addr = addr;
-+ virtual_dma_count = size;
-+ virtual_dma_residue = 0;
-+ return 0;
-+}
-+
-+/* XEN: This trick to force 'virtual DMA' is from include/asm-m68k/floppy.h. */
-+#define FDC1 xen_floppy_init()
-+static int FDC2 = -1;
-+
-+static int xen_floppy_init(void)
-+{
-+ use_virtual_dma = 1;
-+ can_use_virtual_dma = 1;
-+ return 0x3f0;
-+}
-+
-+/*
-+ * Floppy types are stored in the rtc's CMOS RAM and so rtc_lock
-+ * is needed to prevent corrupted CMOS RAM in case "insmod floppy"
-+ * coincides with another rtc CMOS user. Paul G.
-+ */
-+#define FLOPPY0_TYPE ({ \
-+ unsigned long flags; \
-+ unsigned char val; \
-+ spin_lock_irqsave(&rtc_lock, flags); \
-+ val = (CMOS_READ(0x10) >> 4) & 15; \
-+ spin_unlock_irqrestore(&rtc_lock, flags); \
-+ val; \
-+})
-+
-+#define FLOPPY1_TYPE ({ \
-+ unsigned long flags; \
-+ unsigned char val; \
-+ spin_lock_irqsave(&rtc_lock, flags); \
-+ val = CMOS_READ(0x10) & 15; \
-+ spin_unlock_irqrestore(&rtc_lock, flags); \
-+ val; \
-+})
-+
-+#define N_FDC 2
-+#define N_DRIVE 8
-+
-+#define FLOPPY_MOTOR_MASK 0xf0
-+
-+#define EXTRA_FLOPPY_PARAMS
-+
-+#endif /* __ASM_XEN_X86_64_FLOPPY_H */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-x86_64/mach-xen/asm/gnttab_dma.h ubuntu-gutsy-xen/include/asm-x86_64/mach-xen/asm/gnttab_dma.h
---- ubuntu-gutsy/include/asm-x86_64/mach-xen/asm/gnttab_dma.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/asm-x86_64/mach-xen/asm/gnttab_dma.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1 @@
-+#include <asm-i386/mach-xen/asm/gnttab_dma.h>
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-x86_64/mach-xen/asm/hw_irq.h ubuntu-gutsy-xen/include/asm-x86_64/mach-xen/asm/hw_irq.h
---- ubuntu-gutsy/include/asm-x86_64/mach-xen/asm/hw_irq.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/asm-x86_64/mach-xen/asm/hw_irq.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,156 @@
-+#ifndef _ASM_HW_IRQ_H
-+#define _ASM_HW_IRQ_H
-+
-+/*
-+ * linux/include/asm/hw_irq.h
-+ *
-+ * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar
-+ *
-+ * moved some of the old arch/i386/kernel/irq.h to here. VY
-+ *
-+ * IRQ/IPI changes taken from work by Thomas Radke
-+ * <tomsoft@informatik.tu-chemnitz.de>
-+ *
-+ * hacked by Andi Kleen for x86-64.
-+ */
-+
-+#ifndef __ASSEMBLY__
-+#include <asm/atomic.h>
-+#include <asm/irq.h>
-+#include <linux/profile.h>
-+#include <linux/smp.h>
-+#include <linux/percpu.h>
-+#endif
-+
-+/*
-+ * IDT vectors usable for external interrupt sources start
-+ * at 0x20:
-+ */
-+#define FIRST_EXTERNAL_VECTOR 0x20
-+
-+#define IA32_SYSCALL_VECTOR 0x80
-+
-+#ifndef CONFIG_XEN
-+
-+/* Reserve the lowest usable priority level 0x20 - 0x2f for triggering
-+ * cleanup after irq migration.
-+ */
-+#define IRQ_MOVE_CLEANUP_VECTOR FIRST_EXTERNAL_VECTOR
-+
-+/*
-+ * Vectors 0x30-0x3f are used for ISA interrupts.
-+ */
-+#define IRQ0_VECTOR FIRST_EXTERNAL_VECTOR + 0x10
-+#define IRQ1_VECTOR IRQ0_VECTOR + 1
-+#define IRQ2_VECTOR IRQ0_VECTOR + 2
-+#define IRQ3_VECTOR IRQ0_VECTOR + 3
-+#define IRQ4_VECTOR IRQ0_VECTOR + 4
-+#define IRQ5_VECTOR IRQ0_VECTOR + 5
-+#define IRQ6_VECTOR IRQ0_VECTOR + 6
-+#define IRQ7_VECTOR IRQ0_VECTOR + 7
-+#define IRQ8_VECTOR IRQ0_VECTOR + 8
-+#define IRQ9_VECTOR IRQ0_VECTOR + 9
-+#define IRQ10_VECTOR IRQ0_VECTOR + 10
-+#define IRQ11_VECTOR IRQ0_VECTOR + 11
-+#define IRQ12_VECTOR IRQ0_VECTOR + 12
-+#define IRQ13_VECTOR IRQ0_VECTOR + 13
-+#define IRQ14_VECTOR IRQ0_VECTOR + 14
-+#define IRQ15_VECTOR IRQ0_VECTOR + 15
-+
-+/*
-+ * Special IRQ vectors used by the SMP architecture, 0xf0-0xff
-+ *
-+ * some of the following vectors are 'rare', they are merged
-+ * into a single vector (CALL_FUNCTION_VECTOR) to save vector space.
-+ * TLB, reschedule and local APIC vectors are performance-critical.
-+ */
-+#define SPURIOUS_APIC_VECTOR 0xff
-+#define ERROR_APIC_VECTOR 0xfe
-+#define RESCHEDULE_VECTOR 0xfd
-+#define CALL_FUNCTION_VECTOR 0xfc
-+/* fb free - please don't readd KDB here because it's useless
-+ (hint - think what a NMI bit does to a vector) */
-+#define THERMAL_APIC_VECTOR 0xfa
-+#define THRESHOLD_APIC_VECTOR 0xf9
-+/* f8 free */
-+#define INVALIDATE_TLB_VECTOR_END 0xf7
-+#define INVALIDATE_TLB_VECTOR_START 0xf0 /* f0-f7 used for TLB flush */
-+
-+#define NUM_INVALIDATE_TLB_VECTORS 8
-+
-+/*
-+ * Local APIC timer IRQ vector is on a different priority level,
-+ * to work around the 'lost local interrupt if more than 2 IRQ
-+ * sources per level' errata.
-+ */
-+#define LOCAL_TIMER_VECTOR 0xef
-+
-+/*
-+ * First APIC vector available to drivers: (vectors 0x30-0xee)
-+ * we start at 0x41 to spread out vectors evenly between priority
-+ * levels. (0x80 is the syscall vector)
-+ */
-+#define FIRST_DEVICE_VECTOR (IRQ15_VECTOR + 2)
-+#define FIRST_SYSTEM_VECTOR 0xef /* duplicated in irq.h */
-+
-+#endif
-+
-+#ifndef __ASSEMBLY__
-+typedef int vector_irq_t[NR_VECTORS];
-+DECLARE_PER_CPU(vector_irq_t, vector_irq);
-+extern void __setup_vector_irq(int cpu);
-+extern spinlock_t vector_lock;
-+
-+/*
-+ * Various low-level irq details needed by irq.c, process.c,
-+ * time.c, io_apic.c and smp.c
-+ *
-+ * Interrupt entry/exit code at both C and assembly level
-+ */
-+
-+extern void disable_8259A_irq(unsigned int irq);
-+extern void enable_8259A_irq(unsigned int irq);
-+extern int i8259A_irq_pending(unsigned int irq);
-+extern void make_8259A_irq(unsigned int irq);
-+extern void init_8259A(int aeoi);
-+extern void send_IPI_self(int vector);
-+extern void init_VISWS_APIC_irqs(void);
-+extern void setup_IO_APIC(void);
-+extern void disable_IO_APIC(void);
-+extern void print_IO_APIC(void);
-+extern int IO_APIC_get_PCI_irq_vector(int bus, int slot, int fn);
-+extern void send_IPI(int dest, int vector);
-+extern void setup_ioapic_dest(void);
-+
-+extern unsigned long io_apic_irqs;
-+
-+extern atomic_t irq_err_count;
-+extern atomic_t irq_mis_count;
-+
-+#define IO_APIC_IRQ(x) (((x) >= 16) || ((1<<(x)) & io_apic_irqs))
-+
-+#define __STR(x) #x
-+#define STR(x) __STR(x)
-+
-+#include <asm/ptrace.h>
-+
-+#define IRQ_NAME2(nr) nr##_interrupt(void)
-+#define IRQ_NAME(nr) IRQ_NAME2(IRQ##nr)
-+
-+/*
-+ * SMP has a few special interrupts for IPI messages
-+ */
-+
-+#define BUILD_IRQ(nr) \
-+asmlinkage void IRQ_NAME(nr); \
-+__asm__( \
-+"\n.p2align\n" \
-+"IRQ" #nr "_interrupt:\n\t" \
-+ "push $~(" #nr ") ; " \
-+ "jmp common_interrupt");
-+
-+#define platform_legacy_irq(irq) ((irq) < 16)
-+
-+#endif
-+
-+#endif /* _ASM_HW_IRQ_H */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-x86_64/mach-xen/asm/hypercall.h ubuntu-gutsy-xen/include/asm-x86_64/mach-xen/asm/hypercall.h
---- ubuntu-gutsy/include/asm-x86_64/mach-xen/asm/hypercall.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/asm-x86_64/mach-xen/asm/hypercall.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,406 @@
-+/******************************************************************************
-+ * hypercall.h
-+ *
-+ * Linux-specific hypervisor handling.
-+ *
-+ * Copyright (c) 2002-2004, K A Fraser
-+ *
-+ * 64-bit updates:
-+ * Benjamin Liu <benjamin.liu@intel.com>
-+ * Jun Nakajima <jun.nakajima@intel.com>
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#ifndef __HYPERCALL_H__
-+#define __HYPERCALL_H__
-+
-+#include <linux/string.h> /* memcpy() */
-+
-+#ifndef __HYPERVISOR_H__
-+# error "please don't include this file directly"
-+#endif
-+
-+#define __STR(x) #x
-+#define STR(x) __STR(x)
-+
-+#ifdef CONFIG_XEN
-+#define HYPERCALL_STR(name) \
-+ "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"
-+#else
-+#define HYPERCALL_STR(name) \
-+ "mov hypercall_stubs,%%rax; " \
-+ "add $("STR(__HYPERVISOR_##name)" * 32),%%rax; " \
-+ "call *%%rax"
-+#endif
-+
-+#define _hypercall0(type, name) \
-+({ \
-+ long __res; \
-+ asm volatile ( \
-+ HYPERCALL_STR(name) \
-+ : "=a" (__res) \
-+ : \
-+ : "memory" ); \
-+ (type)__res; \
-+})
-+
-+#define _hypercall1(type, name, a1) \
-+({ \
-+ long __res, __ign1; \
-+ asm volatile ( \
-+ HYPERCALL_STR(name) \
-+ : "=a" (__res), "=D" (__ign1) \
-+ : "1" ((long)(a1)) \
-+ : "memory" ); \
-+ (type)__res; \
-+})
-+
-+#define _hypercall2(type, name, a1, a2) \
-+({ \
-+ long __res, __ign1, __ign2; \
-+ asm volatile ( \
-+ HYPERCALL_STR(name) \
-+ : "=a" (__res), "=D" (__ign1), "=S" (__ign2) \
-+ : "1" ((long)(a1)), "2" ((long)(a2)) \
-+ : "memory" ); \
-+ (type)__res; \
-+})
-+
-+#define _hypercall3(type, name, a1, a2, a3) \
-+({ \
-+ long __res, __ign1, __ign2, __ign3; \
-+ asm volatile ( \
-+ HYPERCALL_STR(name) \
-+ : "=a" (__res), "=D" (__ign1), "=S" (__ign2), \
-+ "=d" (__ign3) \
-+ : "1" ((long)(a1)), "2" ((long)(a2)), \
-+ "3" ((long)(a3)) \
-+ : "memory" ); \
-+ (type)__res; \
-+})
-+
-+#define _hypercall4(type, name, a1, a2, a3, a4) \
-+({ \
-+ long __res, __ign1, __ign2, __ign3; \
-+ asm volatile ( \
-+ "movq %7,%%r10; " \
-+ HYPERCALL_STR(name) \
-+ : "=a" (__res), "=D" (__ign1), "=S" (__ign2), \
-+ "=d" (__ign3) \
-+ : "1" ((long)(a1)), "2" ((long)(a2)), \
-+ "3" ((long)(a3)), "g" ((long)(a4)) \
-+ : "memory", "r10" ); \
-+ (type)__res; \
-+})
-+
-+#define _hypercall5(type, name, a1, a2, a3, a4, a5) \
-+({ \
-+ long __res, __ign1, __ign2, __ign3; \
-+ asm volatile ( \
-+ "movq %7,%%r10; movq %8,%%r8; " \
-+ HYPERCALL_STR(name) \
-+ : "=a" (__res), "=D" (__ign1), "=S" (__ign2), \
-+ "=d" (__ign3) \
-+ : "1" ((long)(a1)), "2" ((long)(a2)), \
-+ "3" ((long)(a3)), "g" ((long)(a4)), \
-+ "g" ((long)(a5)) \
-+ : "memory", "r10", "r8" ); \
-+ (type)__res; \
-+})
-+
-+static inline int
-+HYPERVISOR_set_trap_table(
-+ trap_info_t *table)
-+{
-+ return _hypercall1(int, set_trap_table, table);
-+}
-+
-+static inline int
-+HYPERVISOR_mmu_update(
-+ mmu_update_t *req, int count, int *success_count, domid_t domid)
-+{
-+ return _hypercall4(int, mmu_update, req, count, success_count, domid);
-+}
-+
-+static inline int
-+HYPERVISOR_mmuext_op(
-+ struct mmuext_op *op, int count, int *success_count, domid_t domid)
-+{
-+ return _hypercall4(int, mmuext_op, op, count, success_count, domid);
-+}
-+
-+static inline int
-+HYPERVISOR_set_gdt(
-+ unsigned long *frame_list, int entries)
-+{
-+ return _hypercall2(int, set_gdt, frame_list, entries);
-+}
-+
-+static inline int
-+HYPERVISOR_stack_switch(
-+ unsigned long ss, unsigned long esp)
-+{
-+ return _hypercall2(int, stack_switch, ss, esp);
-+}
-+
-+static inline int
-+HYPERVISOR_set_callbacks(
-+ unsigned long event_address, unsigned long failsafe_address,
-+ unsigned long syscall_address)
-+{
-+ return _hypercall3(int, set_callbacks,
-+ event_address, failsafe_address, syscall_address);
-+}
-+
-+static inline int
-+HYPERVISOR_fpu_taskswitch(
-+ int set)
-+{
-+ return _hypercall1(int, fpu_taskswitch, set);
-+}
-+
-+static inline int
-+HYPERVISOR_sched_op_compat(
-+ int cmd, unsigned long arg)
-+{
-+ return _hypercall2(int, sched_op_compat, cmd, arg);
-+}
-+
-+static inline int
-+HYPERVISOR_sched_op(
-+ int cmd, void *arg)
-+{
-+ return _hypercall2(int, sched_op, cmd, arg);
-+}
-+
-+static inline long
-+HYPERVISOR_set_timer_op(
-+ u64 timeout)
-+{
-+ return _hypercall1(long, set_timer_op, timeout);
-+}
-+
-+static inline int
-+HYPERVISOR_platform_op(
-+ struct xen_platform_op *platform_op)
-+{
-+ platform_op->interface_version = XENPF_INTERFACE_VERSION;
-+ return _hypercall1(int, platform_op, platform_op);
-+}
-+
-+static inline int
-+HYPERVISOR_set_debugreg(
-+ int reg, unsigned long value)
-+{
-+ return _hypercall2(int, set_debugreg, reg, value);
-+}
-+
-+static inline unsigned long
-+HYPERVISOR_get_debugreg(
-+ int reg)
-+{
-+ return _hypercall1(unsigned long, get_debugreg, reg);
-+}
-+
-+static inline int
-+HYPERVISOR_update_descriptor(
-+ unsigned long ma, unsigned long word)
-+{
-+ return _hypercall2(int, update_descriptor, ma, word);
-+}
-+
-+static inline int
-+HYPERVISOR_memory_op(
-+ unsigned int cmd, void *arg)
-+{
-+ return _hypercall2(int, memory_op, cmd, arg);
-+}
-+
-+static inline int
-+HYPERVISOR_multicall(
-+ multicall_entry_t *call_list, int nr_calls)
-+{
-+ return _hypercall2(int, multicall, call_list, nr_calls);
-+}
-+
-+static inline int
-+HYPERVISOR_update_va_mapping(
-+ unsigned long va, pte_t new_val, unsigned long flags)
-+{
-+ return _hypercall3(int, update_va_mapping, va, new_val.pte, flags);
-+}
-+
-+static inline int
-+HYPERVISOR_event_channel_op(
-+ int cmd, void *arg)
-+{
-+ int rc = _hypercall2(int, event_channel_op, cmd, arg);
-+
-+#if CONFIG_XEN_COMPAT <= 0x030002
-+ if (unlikely(rc == -ENOSYS)) {
-+ struct evtchn_op op;
-+ op.cmd = cmd;
-+ memcpy(&op.u, arg, sizeof(op.u));
-+ rc = _hypercall1(int, event_channel_op_compat, &op);
-+ memcpy(arg, &op.u, sizeof(op.u));
-+ }
-+#endif
-+
-+ return rc;
-+}
-+
-+static inline int
-+HYPERVISOR_acm_op(
-+ int cmd, void *arg)
-+{
-+ return _hypercall2(int, acm_op, cmd, arg);
-+}
-+
-+static inline int
-+HYPERVISOR_xen_version(
-+ int cmd, void *arg)
-+{
-+ return _hypercall2(int, xen_version, cmd, arg);
-+}
-+
-+static inline int
-+HYPERVISOR_console_io(
-+ int cmd, int count, char *str)
-+{
-+ return _hypercall3(int, console_io, cmd, count, str);
-+}
-+
-+static inline int
-+HYPERVISOR_physdev_op(
-+ int cmd, void *arg)
-+{
-+ int rc = _hypercall2(int, physdev_op, cmd, arg);
-+
-+#if CONFIG_XEN_COMPAT <= 0x030002
-+ if (unlikely(rc == -ENOSYS)) {
-+ struct physdev_op op;
-+ op.cmd = cmd;
-+ memcpy(&op.u, arg, sizeof(op.u));
-+ rc = _hypercall1(int, physdev_op_compat, &op);
-+ memcpy(arg, &op.u, sizeof(op.u));
-+ }
-+#endif
-+
-+ return rc;
-+}
-+
-+static inline int
-+HYPERVISOR_grant_table_op(
-+ unsigned int cmd, void *uop, unsigned int count)
-+{
-+ return _hypercall3(int, grant_table_op, cmd, uop, count);
-+}
-+
-+static inline int
-+HYPERVISOR_update_va_mapping_otherdomain(
-+ unsigned long va, pte_t new_val, unsigned long flags, domid_t domid)
-+{
-+ return _hypercall4(int, update_va_mapping_otherdomain, va,
-+ new_val.pte, flags, domid);
-+}
-+
-+static inline int
-+HYPERVISOR_vm_assist(
-+ unsigned int cmd, unsigned int type)
-+{
-+ return _hypercall2(int, vm_assist, cmd, type);
-+}
-+
-+static inline int
-+HYPERVISOR_vcpu_op(
-+ int cmd, int vcpuid, void *extra_args)
-+{
-+ return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args);
-+}
-+
-+static inline int
-+HYPERVISOR_set_segment_base(
-+ int reg, unsigned long value)
-+{
-+ return _hypercall2(int, set_segment_base, reg, value);
-+}
-+
-+static inline int
-+HYPERVISOR_suspend(
-+ unsigned long srec)
-+{
-+ struct sched_shutdown sched_shutdown = {
-+ .reason = SHUTDOWN_suspend
-+ };
-+
-+ int rc = _hypercall3(int, sched_op, SCHEDOP_shutdown,
-+ &sched_shutdown, srec);
-+
-+#if CONFIG_XEN_COMPAT <= 0x030002
-+ if (rc == -ENOSYS)
-+ rc = _hypercall3(int, sched_op_compat, SCHEDOP_shutdown,
-+ SHUTDOWN_suspend, srec);
-+#endif
-+
-+ return rc;
-+}
-+
-+static inline int
-+HYPERVISOR_nmi_op(
-+ unsigned long op, void *arg)
-+{
-+ return _hypercall2(int, nmi_op, op, arg);
-+}
-+
-+static inline unsigned long
-+HYPERVISOR_hvm_op(
-+ int op, void *arg)
-+{
-+ return _hypercall2(unsigned long, hvm_op, op, arg);
-+}
-+
-+static inline int
-+HYPERVISOR_callback_op(
-+ int cmd, void *arg)
-+{
-+ return _hypercall2(int, callback_op, cmd, arg);
-+}
-+
-+static inline int
-+HYPERVISOR_xenoprof_op(
-+ int op, void *arg)
-+{
-+ return _hypercall2(int, xenoprof_op, op, arg);
-+}
-+
-+static inline int
-+HYPERVISOR_kexec_op(
-+ unsigned long op, void *args)
-+{
-+ return _hypercall2(int, kexec_op, op, args);
-+}
-+
-+#endif /* __HYPERCALL_H__ */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-x86_64/mach-xen/asm/hypervisor.h ubuntu-gutsy-xen/include/asm-x86_64/mach-xen/asm/hypervisor.h
---- ubuntu-gutsy/include/asm-x86_64/mach-xen/asm/hypervisor.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/asm-x86_64/mach-xen/asm/hypervisor.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,2 @@
-+
-+#include <asm-i386/mach-xen/asm/hypervisor.h>
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-x86_64/mach-xen/asm/io.h ubuntu-gutsy-xen/include/asm-x86_64/mach-xen/asm/io.h
---- ubuntu-gutsy/include/asm-x86_64/mach-xen/asm/io.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/asm-x86_64/mach-xen/asm/io.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,297 @@
-+#ifndef _ASM_IO_H
-+#define _ASM_IO_H
-+
-+#include <asm/fixmap.h>
-+
-+/*
-+ * This file contains the definitions for the x86 IO instructions
-+ * inb/inw/inl/outb/outw/outl and the "string versions" of the same
-+ * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing"
-+ * versions of the single-IO instructions (inb_p/inw_p/..).
-+ *
-+ * This file is not meant to be obfuscating: it's just complicated
-+ * to (a) handle it all in a way that makes gcc able to optimize it
-+ * as well as possible and (b) trying to avoid writing the same thing
-+ * over and over again with slight variations and possibly making a
-+ * mistake somewhere.
-+ */
-+
-+/*
-+ * Thanks to James van Artsdalen for a better timing-fix than
-+ * the two short jumps: using outb's to a nonexistent port seems
-+ * to guarantee better timings even on fast machines.
-+ *
-+ * On the other hand, I'd like to be sure of a non-existent port:
-+ * I feel a bit unsafe about using 0x80 (should be safe, though)
-+ *
-+ * Linus
-+ */
-+
-+ /*
-+ * Bit simplified and optimized by Jan Hubicka
-+ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999.
-+ *
-+ * isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added,
-+ * isa_read[wl] and isa_write[wl] fixed
-+ * - Arnaldo Carvalho de Melo <acme@conectiva.com.br>
-+ */
-+
-+#define __SLOW_DOWN_IO "\noutb %%al,$0x80"
-+
-+#ifdef REALLY_SLOW_IO
-+#define __FULL_SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO
-+#else
-+#define __FULL_SLOW_DOWN_IO __SLOW_DOWN_IO
-+#endif
-+
-+/*
-+ * Talk about misusing macros..
-+ */
-+#define __OUT1(s,x) \
-+static inline void out##s(unsigned x value, unsigned short port) {
-+
-+#define __OUT2(s,s1,s2) \
-+__asm__ __volatile__ ("out" #s " %" s1 "0,%" s2 "1"
-+
-+#define __OUT(s,s1,x) \
-+__OUT1(s,x) __OUT2(s,s1,"w") : : "a" (value), "Nd" (port)); } \
-+__OUT1(s##_p,x) __OUT2(s,s1,"w") __FULL_SLOW_DOWN_IO : : "a" (value), "Nd" (port));} \
-+
-+#define __IN1(s) \
-+static inline RETURN_TYPE in##s(unsigned short port) { RETURN_TYPE _v;
-+
-+#define __IN2(s,s1,s2) \
-+__asm__ __volatile__ ("in" #s " %" s2 "1,%" s1 "0"
-+
-+#define __IN(s,s1,i...) \
-+__IN1(s) __IN2(s,s1,"w") : "=a" (_v) : "Nd" (port) ,##i ); return _v; } \
-+__IN1(s##_p) __IN2(s,s1,"w") __FULL_SLOW_DOWN_IO : "=a" (_v) : "Nd" (port) ,##i ); return _v; } \
-+
-+#define __INS(s) \
-+static inline void ins##s(unsigned short port, void * addr, unsigned long count) \
-+{ __asm__ __volatile__ ("rep ; ins" #s \
-+: "=D" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); }
-+
-+#define __OUTS(s) \
-+static inline void outs##s(unsigned short port, const void * addr, unsigned long count) \
-+{ __asm__ __volatile__ ("rep ; outs" #s \
-+: "=S" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); }
-+
-+#define RETURN_TYPE unsigned char
-+__IN(b,"")
-+#undef RETURN_TYPE
-+#define RETURN_TYPE unsigned short
-+__IN(w,"")
-+#undef RETURN_TYPE
-+#define RETURN_TYPE unsigned int
-+__IN(l,"")
-+#undef RETURN_TYPE
-+
-+__OUT(b,"b",char)
-+__OUT(w,"w",short)
-+__OUT(l,,int)
-+
-+__INS(b)
-+__INS(w)
-+__INS(l)
-+
-+__OUTS(b)
-+__OUTS(w)
-+__OUTS(l)
-+
-+#define IO_SPACE_LIMIT 0xffff
-+
-+#if defined(__KERNEL__) && defined(__x86_64__)
-+
-+#include <linux/vmalloc.h>
-+
-+#ifndef __i386__
-+/*
-+ * Change virtual addresses to physical addresses and vv.
-+ * These are pretty trivial
-+ */
-+static inline unsigned long virt_to_phys(volatile void * address)
-+{
-+ return __pa(address);
-+}
-+
-+static inline void * phys_to_virt(unsigned long address)
-+{
-+ return __va(address);
-+}
-+
-+#define virt_to_bus(_x) phys_to_machine(__pa(_x))
-+#define bus_to_virt(_x) __va(machine_to_phys(_x))
-+#endif
-+
-+/*
-+ * Change "struct page" to physical address.
-+ */
-+#define page_to_pseudophys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
-+#define page_to_phys(page) (phys_to_machine(page_to_pseudophys(page)))
-+#define page_to_bus(page) (phys_to_machine(page_to_pseudophys(page)))
-+
-+#define bio_to_pseudophys(bio) (page_to_pseudophys(bio_page((bio))) + \
-+ (unsigned long) bio_offset((bio)))
-+#define bvec_to_pseudophys(bv) (page_to_pseudophys((bv)->bv_page) + \
-+ (unsigned long) (bv)->bv_offset)
-+
-+#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
-+ (((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2))) && \
-+ ((bvec_to_pseudophys((vec1)) + (vec1)->bv_len) == \
-+ bvec_to_pseudophys((vec2))))
-+
-+#include <asm-generic/iomap.h>
-+
-+extern void __iomem *__ioremap(unsigned long offset, unsigned long size, unsigned long flags);
-+
-+static inline void __iomem * ioremap (unsigned long offset, unsigned long size)
-+{
-+ return __ioremap(offset, size, 0);
-+}
-+
-+extern void *bt_ioremap(unsigned long addr, unsigned long size);
-+extern void bt_iounmap(void *addr, unsigned long size);
-+#define early_ioremap bt_ioremap
-+#define early_iounmap bt_iounmap
-+
-+/*
-+ * This one maps high address device memory and turns off caching for that area.
-+ * it's useful if some control registers are in such an area and write combining
-+ * or read caching is not desirable:
-+ */
-+extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size);
-+extern void iounmap(volatile void __iomem *addr);
-+
-+/*
-+ * ISA I/O bus memory addresses are 1:1 with the physical address.
-+ */
-+
-+#define isa_virt_to_bus(_x) isa_virt_to_bus_is_UNSUPPORTED->x
-+#define isa_page_to_bus(_x) isa_page_to_bus_is_UNSUPPORTED->x
-+#define isa_bus_to_virt(_x) (void *)(__fix_to_virt(FIX_ISAMAP_BEGIN) + (_x))
-+
-+/*
-+ * However PCI ones are not necessarily 1:1 and therefore these interfaces
-+ * are forbidden in portable PCI drivers.
-+ *
-+ * Allow them on x86 for legacy drivers, though.
-+ */
-+#define virt_to_bus(_x) phys_to_machine(__pa(_x))
-+#define bus_to_virt(_x) __va(machine_to_phys(_x))
-+
-+/*
-+ * readX/writeX() are used to access memory mapped devices. On some
-+ * architectures the memory mapped IO stuff needs to be accessed
-+ * differently. On the x86 architecture, we just read/write the
-+ * memory location directly.
-+ */
-+
-+static inline __u8 __readb(const volatile void __iomem *addr)
-+{
-+ return *(__force volatile __u8 *)addr;
-+}
-+static inline __u16 __readw(const volatile void __iomem *addr)
-+{
-+ return *(__force volatile __u16 *)addr;
-+}
-+static __always_inline __u32 __readl(const volatile void __iomem *addr)
-+{
-+ return *(__force volatile __u32 *)addr;
-+}
-+static inline __u64 __readq(const volatile void __iomem *addr)
-+{
-+ return *(__force volatile __u64 *)addr;
-+}
-+#define readb(x) __readb(x)
-+#define readw(x) __readw(x)
-+#define readl(x) __readl(x)
-+#define readq(x) __readq(x)
-+#define readb_relaxed(a) readb(a)
-+#define readw_relaxed(a) readw(a)
-+#define readl_relaxed(a) readl(a)
-+#define readq_relaxed(a) readq(a)
-+#define __raw_readb readb
-+#define __raw_readw readw
-+#define __raw_readl readl
-+#define __raw_readq readq
-+
-+#define mmiowb()
-+
-+static inline void __writel(__u32 b, volatile void __iomem *addr)
-+{
-+ *(__force volatile __u32 *)addr = b;
-+}
-+static inline void __writeq(__u64 b, volatile void __iomem *addr)
-+{
-+ *(__force volatile __u64 *)addr = b;
-+}
-+static inline void __writeb(__u8 b, volatile void __iomem *addr)
-+{
-+ *(__force volatile __u8 *)addr = b;
-+}
-+static inline void __writew(__u16 b, volatile void __iomem *addr)
-+{
-+ *(__force volatile __u16 *)addr = b;
-+}
-+#define writeq(val,addr) __writeq((val),(addr))
-+#define writel(val,addr) __writel((val),(addr))
-+#define writew(val,addr) __writew((val),(addr))
-+#define writeb(val,addr) __writeb((val),(addr))
-+#define __raw_writeb writeb
-+#define __raw_writew writew
-+#define __raw_writel writel
-+#define __raw_writeq writeq
-+
-+void __memcpy_fromio(void*,unsigned long,unsigned);
-+void __memcpy_toio(unsigned long,const void*,unsigned);
-+
-+static inline void memcpy_fromio(void *to, const volatile void __iomem *from, unsigned len)
-+{
-+ __memcpy_fromio(to,(unsigned long)from,len);
-+}
-+static inline void memcpy_toio(volatile void __iomem *to, const void *from, unsigned len)
-+{
-+ __memcpy_toio((unsigned long)to,from,len);
-+}
-+
-+void memset_io(volatile void __iomem *a, int b, size_t c);
-+
-+/*
-+ * ISA space is 'always mapped' on a typical x86 system, no need to
-+ * explicitly ioremap() it. The fact that the ISA IO space is mapped
-+ * to PAGE_OFFSET is pure coincidence - it does not mean ISA values
-+ * are physical addresses. The following constant pointer can be
-+ * used as the IO-area pointer (it can be iounmapped as well, so the
-+ * analogy with PCI is quite large):
-+ */
-+#define __ISA_IO_base ((char __iomem *)(fix_to_virt(FIX_ISAMAP_BEGIN)))
-+
-+/* Nothing to do */
-+
-+#define dma_cache_inv(_start,_size) do { } while (0)
-+#define dma_cache_wback(_start,_size) do { } while (0)
-+#define dma_cache_wback_inv(_start,_size) do { } while (0)
-+
-+#define flush_write_buffers()
-+
-+extern int iommu_bio_merge;
-+#define BIO_VMERGE_BOUNDARY iommu_bio_merge
-+
-+/*
-+ * Convert a physical pointer to a virtual kernel pointer for /dev/mem
-+ * access
-+ */
-+#define xlate_dev_mem_ptr(p, sz) ioremap(p, sz)
-+#define xlate_dev_mem_ptr_unmap(p) iounmap(p)
-+
-+/*
-+ * Convert a virtual cached pointer to an uncached pointer
-+ */
-+#define xlate_dev_kmem_ptr(p) p
-+
-+#endif /* __KERNEL__ */
-+
-+#define ARCH_HAS_DEV_MEM
-+
-+#endif
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-x86_64/mach-xen/asm/irqflags.h ubuntu-gutsy-xen/include/asm-x86_64/mach-xen/asm/irqflags.h
---- ubuntu-gutsy/include/asm-x86_64/mach-xen/asm/irqflags.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/asm-x86_64/mach-xen/asm/irqflags.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,148 @@
-+/*
-+ * include/asm-x86_64/irqflags.h
-+ *
-+ * IRQ flags handling
-+ *
-+ * This file gets included from lowlevel asm headers too, to provide
-+ * wrapped versions of the local_irq_*() APIs, based on the
-+ * raw_local_irq_*() functions from the lowlevel headers.
-+ */
-+#ifndef _ASM_IRQFLAGS_H
-+#define _ASM_IRQFLAGS_H
-+#include <asm/processor-flags.h>
-+
-+#ifndef __ASSEMBLY__
-+/*
-+ * Interrupt control:
-+ */
-+
-+/*
-+ * The use of 'barrier' in the following reflects their use as local-lock
-+ * operations. Reentrancy must be prevented (e.g., __cli()) /before/ following
-+ * critical operations are executed. All critical operations must complete
-+ * /before/ reentrancy is permitted (e.g., __sti()). Alpha architecture also
-+ * includes these barriers, for example.
-+ */
-+
-+#define __raw_local_save_flags() (current_vcpu_info()->evtchn_upcall_mask)
-+
-+#define raw_local_save_flags(flags) \
-+ do { (flags) = __raw_local_save_flags(); } while (0)
-+
-+#define raw_local_irq_restore(x) \
-+do { \
-+ vcpu_info_t *_vcpu; \
-+ barrier(); \
-+ _vcpu = current_vcpu_info(); \
-+ if ((_vcpu->evtchn_upcall_mask = (x)) == 0) { \
-+ barrier(); /* unmask then check (avoid races) */ \
-+ if ( unlikely(_vcpu->evtchn_upcall_pending) ) \
-+ force_evtchn_callback(); \
-+ } \
-+} while (0)
-+
-+#ifdef CONFIG_X86_VSMP
-+
-+/*
-+ * Interrupt control for the VSMP architecture:
-+ */
-+
-+static inline void raw_local_irq_disable(void)
-+{
-+ unsigned long flags = __raw_local_save_flags();
-+
-+ raw_local_irq_restore((flags & ~X86_EFLAGS_IF) | X86_EFLAGS_AC);
-+}
-+
-+static inline void raw_local_irq_enable(void)
-+{
-+ unsigned long flags = __raw_local_save_flags();
-+
-+ raw_local_irq_restore((flags | X86_EFLAGS_IF) & (~X86_EFLAGS_AC));
-+}
-+
-+static inline int raw_irqs_disabled_flags(unsigned long flags)
-+{
-+ return !(flags & X86_EFLAGS_IF) || (flags & X86_EFLAGS_AC);
-+}
-+
-+#else /* CONFIG_X86_VSMP */
-+
-+#define raw_local_irq_disable() \
-+do { \
-+ current_vcpu_info()->evtchn_upcall_mask = 1; \
-+ barrier(); \
-+} while (0)
-+
-+#define raw_local_irq_enable() \
-+do { \
-+ vcpu_info_t *_vcpu; \
-+ barrier(); \
-+ _vcpu = current_vcpu_info(); \
-+ _vcpu->evtchn_upcall_mask = 0; \
-+ barrier(); /* unmask then check (avoid races) */ \
-+ if ( unlikely(_vcpu->evtchn_upcall_pending) ) \
-+ force_evtchn_callback(); \
-+} while (0)
-+
-+static inline int raw_irqs_disabled_flags(unsigned long flags)
-+{
-+ return (flags != 0);
-+}
-+
-+#endif
-+
-+/*
-+ * For spinlocks, etc.:
-+ */
-+
-+#define __raw_local_irq_save() \
-+({ \
-+ unsigned long flags = __raw_local_save_flags(); \
-+ \
-+ raw_local_irq_disable(); \
-+ \
-+ flags; \
-+})
-+
-+#define raw_local_irq_save(flags) \
-+ do { (flags) = __raw_local_irq_save(); } while (0)
-+
-+#define raw_irqs_disabled() \
-+({ \
-+ unsigned long flags = __raw_local_save_flags(); \
-+ \
-+ raw_irqs_disabled_flags(flags); \
-+})
-+
-+/*
-+ * Used in the idle loop; sti takes one instruction cycle
-+ * to complete:
-+ */
-+void xen_safe_halt(void);
-+static inline void raw_safe_halt(void)
-+{
-+ xen_safe_halt();
-+}
-+
-+/*
-+ * Used when interrupts are already enabled or to
-+ * shutdown the processor:
-+ */
-+void xen_halt(void);
-+static inline void halt(void)
-+{
-+ xen_halt();
-+}
-+
-+#else /* __ASSEMBLY__: */
-+# ifdef CONFIG_TRACE_IRQFLAGS
-+# define TRACE_IRQS_ON call trace_hardirqs_on_thunk
-+# define TRACE_IRQS_OFF call trace_hardirqs_off_thunk
-+# else
-+# define TRACE_IRQS_ON
-+# define TRACE_IRQS_OFF
-+# endif
-+#endif
-+
-+#endif
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-x86_64/mach-xen/asm/irq.h ubuntu-gutsy-xen/include/asm-x86_64/mach-xen/asm/irq.h
---- ubuntu-gutsy/include/asm-x86_64/mach-xen/asm/irq.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/asm-x86_64/mach-xen/asm/irq.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,38 @@
-+#ifndef _ASM_IRQ_H
-+#define _ASM_IRQ_H
-+
-+/*
-+ * linux/include/asm/irq.h
-+ *
-+ * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar
-+ *
-+ * IRQ/IPI changes taken from work by Thomas Radke
-+ * <tomsoft@informatik.tu-chemnitz.de>
-+ */
-+
-+#include <linux/sched.h>
-+/* include comes from machine specific directory */
-+#include "irq_vectors.h"
-+#include <asm/thread_info.h>
-+
-+static __inline__ int irq_canonicalize(int irq)
-+{
-+ return ((irq == 2) ? 9 : irq);
-+}
-+
-+#ifndef CONFIG_XEN
-+#define ARCH_HAS_NMI_WATCHDOG /* See include/linux/nmi.h */
-+#endif
-+
-+#define KDB_VECTOR 0xf9
-+
-+# define irq_ctx_init(cpu) do { } while (0)
-+
-+#ifdef CONFIG_HOTPLUG_CPU
-+#include <linux/cpumask.h>
-+extern void fixup_irqs(cpumask_t map);
-+#endif
-+
-+#define __ARCH_HAS_DO_SOFTIRQ 1
-+
-+#endif /* _ASM_IRQ_H */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-x86_64/mach-xen/asm/maddr.h ubuntu-gutsy-xen/include/asm-x86_64/mach-xen/asm/maddr.h
---- ubuntu-gutsy/include/asm-x86_64/mach-xen/asm/maddr.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/asm-x86_64/mach-xen/asm/maddr.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,161 @@
-+#ifndef _X86_64_MADDR_H
-+#define _X86_64_MADDR_H
-+
-+#include <xen/features.h>
-+#include <xen/interface/xen.h>
-+
-+/**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/
-+#define INVALID_P2M_ENTRY (~0UL)
-+#define FOREIGN_FRAME_BIT (1UL<<63)
-+#define FOREIGN_FRAME(m) ((m) | FOREIGN_FRAME_BIT)
-+
-+/* Definitions for machine and pseudophysical addresses. */
-+typedef unsigned long paddr_t;
-+typedef unsigned long maddr_t;
-+
-+#ifdef CONFIG_XEN
-+
-+extern unsigned long *phys_to_machine_mapping;
-+
-+#undef machine_to_phys_mapping
-+extern unsigned long *machine_to_phys_mapping;
-+extern unsigned int machine_to_phys_order;
-+
-+static inline unsigned long pfn_to_mfn(unsigned long pfn)
-+{
-+ if (xen_feature(XENFEAT_auto_translated_physmap))
-+ return pfn;
-+ BUG_ON(end_pfn && pfn >= end_pfn);
-+ return phys_to_machine_mapping[pfn] & ~FOREIGN_FRAME_BIT;
-+}
-+
-+static inline int phys_to_machine_mapping_valid(unsigned long pfn)
-+{
-+ if (xen_feature(XENFEAT_auto_translated_physmap))
-+ return 1;
-+ BUG_ON(end_pfn && pfn >= end_pfn);
-+ return (phys_to_machine_mapping[pfn] != INVALID_P2M_ENTRY);
-+}
-+
-+static inline unsigned long mfn_to_pfn(unsigned long mfn)
-+{
-+ unsigned long pfn;
-+
-+ if (xen_feature(XENFEAT_auto_translated_physmap))
-+ return mfn;
-+
-+ if (unlikely((mfn >> machine_to_phys_order) != 0))
-+ return end_pfn;
-+
-+ /* The array access can fail (e.g., device space beyond end of RAM). */
-+ asm (
-+ "1: movq %1,%0\n"
-+ "2:\n"
-+ ".section .fixup,\"ax\"\n"
-+ "3: movq %2,%0\n"
-+ " jmp 2b\n"
-+ ".previous\n"
-+ ".section __ex_table,\"a\"\n"
-+ " .align 8\n"
-+ " .quad 1b,3b\n"
-+ ".previous"
-+ : "=r" (pfn)
-+ : "m" (machine_to_phys_mapping[mfn]), "m" (end_pfn) );
-+
-+ return pfn;
-+}
-+
-+/*
-+ * We detect special mappings in one of two ways:
-+ * 1. If the MFN is an I/O page then Xen will set the m2p entry
-+ * to be outside our maximum possible pseudophys range.
-+ * 2. If the MFN belongs to a different domain then we will certainly
-+ * not have MFN in our p2m table. Conversely, if the page is ours,
-+ * then we'll have p2m(m2p(MFN))==MFN.
-+ * If we detect a special mapping then it doesn't have a 'struct page'.
-+ * We force !pfn_valid() by returning an out-of-range pointer.
-+ *
-+ * NB. These checks require that, for any MFN that is not in our reservation,
-+ * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if
-+ * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN.
-+ * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety.
-+ *
-+ * NB2. When deliberately mapping foreign pages into the p2m table, you *must*
-+ * use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we
-+ * require. In all the cases we care about, the FOREIGN_FRAME bit is
-+ * masked (e.g., pfn_to_mfn()) so behaviour there is correct.
-+ */
-+static inline unsigned long mfn_to_local_pfn(unsigned long mfn)
-+{
-+ unsigned long pfn = mfn_to_pfn(mfn);
-+ if ((pfn < end_pfn)
-+ && !xen_feature(XENFEAT_auto_translated_physmap)
-+ && (phys_to_machine_mapping[pfn] != mfn))
-+ return end_pfn; /* force !pfn_valid() */
-+ return pfn;
-+}
-+
-+static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
-+{
-+ BUG_ON(end_pfn && pfn >= end_pfn);
-+ if (xen_feature(XENFEAT_auto_translated_physmap)) {
-+ BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
-+ return;
-+ }
-+ phys_to_machine_mapping[pfn] = mfn;
-+}
-+
-+static inline maddr_t phys_to_machine(paddr_t phys)
-+{
-+ maddr_t machine = pfn_to_mfn(phys >> PAGE_SHIFT);
-+ machine = (machine << PAGE_SHIFT) | (phys & ~PAGE_MASK);
-+ return machine;
-+}
-+
-+static inline paddr_t machine_to_phys(maddr_t machine)
-+{
-+ paddr_t phys = mfn_to_pfn(machine >> PAGE_SHIFT);
-+ phys = (phys << PAGE_SHIFT) | (machine & ~PAGE_MASK);
-+ return phys;
-+}
-+
-+static inline paddr_t pte_phys_to_machine(paddr_t phys)
-+{
-+ maddr_t machine;
-+ machine = pfn_to_mfn((phys & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT);
-+ machine = (machine << PAGE_SHIFT) | (phys & ~PHYSICAL_PAGE_MASK);
-+ return machine;
-+}
-+
-+static inline paddr_t pte_machine_to_phys(maddr_t machine)
-+{
-+ paddr_t phys;
-+ phys = mfn_to_pfn((machine & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT);
-+ phys = (phys << PAGE_SHIFT) | (machine & ~PHYSICAL_PAGE_MASK);
-+ return phys;
-+}
-+
-+#define __pte_ma(x) ((pte_t) { (x) } )
-+#define pfn_pte_ma(pfn, prot) __pte_ma((((pfn) << PAGE_SHIFT) | pgprot_val(prot)) & __supported_pte_mask)
-+
-+#else /* !CONFIG_XEN */
-+
-+#define pfn_to_mfn(pfn) (pfn)
-+#define mfn_to_pfn(mfn) (mfn)
-+#define mfn_to_local_pfn(mfn) (mfn)
-+#define set_phys_to_machine(pfn, mfn) ((void)0)
-+#define phys_to_machine_mapping_valid(pfn) (1)
-+#define phys_to_machine(phys) ((maddr_t)(phys))
-+#define machine_to_phys(mach) ((paddr_t)(mach))
-+#define pfn_pte_ma(pfn, prot) pfn_pte(pfn, prot)
-+#define __pte_ma(x) __pte(x)
-+
-+#endif /* !CONFIG_XEN */
-+
-+/* VIRT <-> MACHINE conversion */
-+#define virt_to_machine(v) (phys_to_machine(__pa(v)))
-+#define virt_to_mfn(v) (pfn_to_mfn(__pa(v) >> PAGE_SHIFT))
-+#define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT))
-+
-+#endif /* _X86_64_MADDR_H */
-+
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-x86_64/mach-xen/asm/mmu_context.h ubuntu-gutsy-xen/include/asm-x86_64/mach-xen/asm/mmu_context.h
---- ubuntu-gutsy/include/asm-x86_64/mach-xen/asm/mmu_context.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/asm-x86_64/mach-xen/asm/mmu_context.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,139 @@
-+#ifndef __X86_64_MMU_CONTEXT_H
-+#define __X86_64_MMU_CONTEXT_H
-+
-+#include <asm/desc.h>
-+#include <asm/atomic.h>
-+#include <asm/pgalloc.h>
-+#include <asm/page.h>
-+#include <asm/pda.h>
-+#include <asm/pgtable.h>
-+#include <asm/tlbflush.h>
-+
-+void arch_exit_mmap(struct mm_struct *mm);
-+void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm);
-+
-+/*
-+ * possibly do the LDT unload here?
-+ */
-+int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
-+void destroy_context(struct mm_struct *mm);
-+
-+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
-+{
-+#if defined(CONFIG_SMP) && !defined(CONFIG_XEN)
-+ if (read_pda(mmu_state) == TLBSTATE_OK)
-+ write_pda(mmu_state, TLBSTATE_LAZY);
-+#endif
-+}
-+
-+#define prepare_arch_switch(next) __prepare_arch_switch()
-+
-+static inline void __prepare_arch_switch(void)
-+{
-+ /*
-+ * Save away %es, %ds, %fs and %gs. Must happen before reload
-+ * of cr3/ldt (i.e., not in __switch_to).
-+ */
-+ __asm__ __volatile__ (
-+ "mov %%es,%0 ; mov %%ds,%1 ; mov %%fs,%2 ; mov %%gs,%3"
-+ : "=m" (current->thread.es),
-+ "=m" (current->thread.ds),
-+ "=m" (current->thread.fsindex),
-+ "=m" (current->thread.gsindex) );
-+
-+ if (current->thread.ds)
-+ __asm__ __volatile__ ( "movl %0,%%ds" : : "r" (0) );
-+
-+ if (current->thread.es)
-+ __asm__ __volatile__ ( "movl %0,%%es" : : "r" (0) );
-+
-+ if (current->thread.fsindex) {
-+ __asm__ __volatile__ ( "movl %0,%%fs" : : "r" (0) );
-+ current->thread.fs = 0;
-+ }
-+
-+ if (current->thread.gsindex) {
-+ load_gs_index(0);
-+ current->thread.gs = 0;
-+ }
-+}
-+
-+extern void mm_pin(struct mm_struct *mm);
-+extern void mm_unpin(struct mm_struct *mm);
-+void mm_pin_all(void);
-+
-+static inline void load_cr3(pgd_t *pgd)
-+{
-+ asm volatile("movq %0,%%cr3" :: "r" (phys_to_machine(__pa(pgd))) :
-+ "memory");
-+}
-+
-+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
-+ struct task_struct *tsk)
-+{
-+ unsigned cpu = smp_processor_id();
-+ struct mmuext_op _op[3], *op = _op;
-+
-+ if (likely(prev != next)) {
-+ BUG_ON(!xen_feature(XENFEAT_writable_page_tables) &&
-+ !next->context.pinned);
-+
-+ /* stop flush ipis for the previous mm */
-+ cpu_clear(cpu, prev->cpu_vm_mask);
-+#if defined(CONFIG_SMP) && !defined(CONFIG_XEN)
-+ write_pda(mmu_state, TLBSTATE_OK);
-+ write_pda(active_mm, next);
-+#endif
-+ cpu_set(cpu, next->cpu_vm_mask);
-+
-+ /* load_cr3(next->pgd) */
-+ op->cmd = MMUEXT_NEW_BASEPTR;
-+ op->arg1.mfn = pfn_to_mfn(__pa(next->pgd) >> PAGE_SHIFT);
-+ op++;
-+
-+ /* xen_new_user_pt(__pa(__user_pgd(next->pgd))) */
-+ op->cmd = MMUEXT_NEW_USER_BASEPTR;
-+ op->arg1.mfn = pfn_to_mfn(__pa(__user_pgd(next->pgd)) >> PAGE_SHIFT);
-+ op++;
-+
-+ if (unlikely(next->context.ldt != prev->context.ldt)) {
-+ /* load_LDT_nolock(&next->context, cpu) */
-+ op->cmd = MMUEXT_SET_LDT;
-+ op->arg1.linear_addr = (unsigned long)next->context.ldt;
-+ op->arg2.nr_ents = next->context.size;
-+ op++;
-+ }
-+
-+ BUG_ON(HYPERVISOR_mmuext_op(_op, op-_op, NULL, DOMID_SELF));
-+ }
-+#if defined(CONFIG_SMP) && !defined(CONFIG_XEN)
-+ else {
-+ write_pda(mmu_state, TLBSTATE_OK);
-+ if (read_pda(active_mm) != next)
-+ out_of_line_bug();
-+ if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
-+ /* We were in lazy tlb mode and leave_mm disabled
-+ * tlb flush IPI delivery. We must reload CR3
-+ * to make sure to use no freed page tables.
-+ */
-+ load_cr3(next->pgd);
-+ xen_new_user_pt(__pa(__user_pgd(next->pgd)));
-+ load_LDT_nolock(&next->context, cpu);
-+ }
-+ }
-+#endif
-+}
-+
-+#define deactivate_mm(tsk,mm) do { \
-+ load_gs_index(0); \
-+ asm volatile("movl %0,%%fs"::"r"(0)); \
-+} while(0)
-+
-+static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
-+{
-+ if (!next->context.pinned)
-+ mm_pin(next);
-+ switch_mm(prev, next, NULL);
-+}
-+
-+#endif
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-x86_64/mach-xen/asm/mmu.h ubuntu-gutsy-xen/include/asm-x86_64/mach-xen/asm/mmu.h
---- ubuntu-gutsy/include/asm-x86_64/mach-xen/asm/mmu.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/asm-x86_64/mach-xen/asm/mmu.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,30 @@
-+#ifndef __x86_64_MMU_H
-+#define __x86_64_MMU_H
-+
-+#include <linux/spinlock.h>
-+#include <asm/semaphore.h>
-+
-+/*
-+ * The x86_64 doesn't have a mmu context, but
-+ * we put the segment information here.
-+ *
-+ * cpu_vm_mask is used to optimize ldt flushing.
-+ */
-+typedef struct {
-+ void *ldt;
-+ rwlock_t ldtlock;
-+ int size;
-+ struct semaphore sem;
-+#ifdef CONFIG_XEN
-+ unsigned pinned:1;
-+ unsigned has_foreign_mappings:1;
-+ struct list_head unpinned;
-+#endif
-+} mm_context_t;
-+
-+#ifdef CONFIG_XEN
-+extern struct list_head mm_unpinned;
-+extern spinlock_t mm_unpinned_lock;
-+#endif
-+
-+#endif
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-x86_64/mach-xen/asm/msr.h ubuntu-gutsy-xen/include/asm-x86_64/mach-xen/asm/msr.h
---- ubuntu-gutsy/include/asm-x86_64/mach-xen/asm/msr.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/asm-x86_64/mach-xen/asm/msr.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,187 @@
-+#ifndef X86_64_MSR_H
-+#define X86_64_MSR_H 1
-+
-+#include <asm/msr-index.h>
-+
-+#ifndef __ASSEMBLY__
-+#include <linux/errno.h>
-+/*
-+ * Access to machine-specific registers (available on 586 and better only)
-+ * Note: the rd* operations modify the parameters directly (without using
-+ * pointer indirection), this allows gcc to optimize better
-+ */
-+
-+#define rdmsr(msr,val1,val2) \
-+ __asm__ __volatile__("rdmsr" \
-+ : "=a" (val1), "=d" (val2) \
-+ : "c" (msr))
-+
-+
-+#define rdmsrl(msr,val) do { unsigned long a__,b__; \
-+ __asm__ __volatile__("rdmsr" \
-+ : "=a" (a__), "=d" (b__) \
-+ : "c" (msr)); \
-+ val = a__ | (b__<<32); \
-+} while(0)
-+
-+#define wrmsr(msr,val1,val2) \
-+ __asm__ __volatile__("wrmsr" \
-+ : /* no outputs */ \
-+ : "c" (msr), "a" (val1), "d" (val2))
-+
-+#define wrmsrl(msr,val) wrmsr(msr,(__u32)((__u64)(val)),((__u64)(val))>>32)
-+
-+/* wrmsr with exception handling */
-+#define wrmsr_safe(msr,a,b) ({ int ret__; \
-+ asm volatile("2: wrmsr ; xorl %0,%0\n" \
-+ "1:\n\t" \
-+ ".section .fixup,\"ax\"\n\t" \
-+ "3: movl %4,%0 ; jmp 1b\n\t" \
-+ ".previous\n\t" \
-+ ".section __ex_table,\"a\"\n" \
-+ " .align 8\n\t" \
-+ " .quad 2b,3b\n\t" \
-+ ".previous" \
-+ : "=a" (ret__) \
-+ : "c" (msr), "0" (a), "d" (b), "i" (-EFAULT)); \
-+ ret__; })
-+
-+#define checking_wrmsrl(msr,val) wrmsr_safe(msr,(u32)(val),(u32)((val)>>32))
-+
-+#define rdmsr_safe(msr,a,b) \
-+ ({ int ret__; \
-+ asm volatile ("1: rdmsr\n" \
-+ "2:\n" \
-+ ".section .fixup,\"ax\"\n" \
-+ "3: movl %4,%0\n" \
-+ " jmp 2b\n" \
-+ ".previous\n" \
-+ ".section __ex_table,\"a\"\n" \
-+ " .align 8\n" \
-+ " .quad 1b,3b\n" \
-+ ".previous":"=&bDS" (ret__), "=a"(*(a)), "=d"(*(b))\
-+ :"c"(msr), "i"(-EIO), "0"(0)); \
-+ ret__; })
-+
-+#define rdtsc(low,high) \
-+ __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high))
-+
-+#define rdtscl(low) \
-+ __asm__ __volatile__ ("rdtsc" : "=a" (low) : : "edx")
-+
-+#define rdtscp(low,high,aux) \
-+ asm volatile (".byte 0x0f,0x01,0xf9" : "=a" (low), "=d" (high), "=c" (aux))
-+
-+#define rdtscll(val) do { \
-+ unsigned int __a,__d; \
-+ asm volatile("rdtsc" : "=a" (__a), "=d" (__d)); \
-+ (val) = ((unsigned long)__a) | (((unsigned long)__d)<<32); \
-+} while(0)
-+
-+#define rdtscpll(val, aux) do { \
-+ unsigned long __a, __d; \
-+ asm volatile (".byte 0x0f,0x01,0xf9" : "=a" (__a), "=d" (__d), "=c" (aux)); \
-+ (val) = (__d << 32) | __a; \
-+} while (0)
-+
-+#define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
-+
-+#define write_rdtscp_aux(val) wrmsr(0xc0000103, val, 0)
-+
-+#define rdpmc(counter,low,high) \
-+ __asm__ __volatile__("rdpmc" \
-+ : "=a" (low), "=d" (high) \
-+ : "c" (counter))
-+
-+static inline void cpuid(int op, unsigned int *eax, unsigned int *ebx,
-+ unsigned int *ecx, unsigned int *edx)
-+{
-+ __asm__(XEN_CPUID
-+ : "=a" (*eax),
-+ "=b" (*ebx),
-+ "=c" (*ecx),
-+ "=d" (*edx)
-+ : "0" (op));
-+}
-+
-+/* Some CPUID calls want 'count' to be placed in ecx */
-+static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx,
-+ int *edx)
-+{
-+ __asm__(XEN_CPUID
-+ : "=a" (*eax),
-+ "=b" (*ebx),
-+ "=c" (*ecx),
-+ "=d" (*edx)
-+ : "0" (op), "c" (count));
-+}
-+
-+/*
-+ * CPUID functions returning a single datum
-+ */
-+static inline unsigned int cpuid_eax(unsigned int op)
-+{
-+ unsigned int eax;
-+
-+ __asm__(XEN_CPUID
-+ : "=a" (eax)
-+ : "0" (op)
-+ : "bx", "cx", "dx");
-+ return eax;
-+}
-+static inline unsigned int cpuid_ebx(unsigned int op)
-+{
-+ unsigned int eax, ebx;
-+
-+ __asm__(XEN_CPUID
-+ : "=a" (eax), "=b" (ebx)
-+ : "0" (op)
-+ : "cx", "dx" );
-+ return ebx;
-+}
-+static inline unsigned int cpuid_ecx(unsigned int op)
-+{
-+ unsigned int eax, ecx;
-+
-+ __asm__(XEN_CPUID
-+ : "=a" (eax), "=c" (ecx)
-+ : "0" (op)
-+ : "bx", "dx" );
-+ return ecx;
-+}
-+static inline unsigned int cpuid_edx(unsigned int op)
-+{
-+ unsigned int eax, edx;
-+
-+ __asm__(XEN_CPUID
-+ : "=a" (eax), "=d" (edx)
-+ : "0" (op)
-+ : "bx", "cx");
-+ return edx;
-+}
-+
-+#ifdef CONFIG_SMP
-+void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
-+void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
-+int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
-+int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
-+#else /* CONFIG_SMP */
-+static inline void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
-+{
-+ rdmsr(msr_no, *l, *h);
-+}
-+static inline void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
-+{
-+ wrmsr(msr_no, l, h);
-+}
-+static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
-+{
-+ return rdmsr_safe(msr_no, l, h);
-+}
-+static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
-+{
-+ return wrmsr_safe(msr_no, l, h);
-+}
-+#endif /* CONFIG_SMP */
-+#endif /* __ASSEMBLY__ */
-+#endif /* X86_64_MSR_H */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-x86_64/mach-xen/asm/nmi.h ubuntu-gutsy-xen/include/asm-x86_64/mach-xen/asm/nmi.h
---- ubuntu-gutsy/include/asm-x86_64/mach-xen/asm/nmi.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/asm-x86_64/mach-xen/asm/nmi.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,113 @@
-+/*
-+ * linux/include/asm-i386/nmi.h
-+ */
-+#ifndef ASM_NMI_H
-+#define ASM_NMI_H
-+
-+#include <linux/pm.h>
-+#include <asm/io.h>
-+
-+#include <xen/interface/nmi.h>
-+
-+/**
-+ * do_nmi_callback
-+ *
-+ * Check to see if a callback exists and execute it. Return 1
-+ * if the handler exists and was handled successfully.
-+ */
-+int do_nmi_callback(struct pt_regs *regs, int cpu);
-+
-+#ifdef CONFIG_PM
-+
-+/** Replace the PM callback routine for NMI. */
-+struct pm_dev * set_nmi_pm_callback(pm_callback callback);
-+
-+/** Unset the PM callback routine back to the default. */
-+void unset_nmi_pm_callback(struct pm_dev * dev);
-+
-+#else
-+
-+static inline struct pm_dev * set_nmi_pm_callback(pm_callback callback)
-+{
-+ return 0;
-+}
-+
-+static inline void unset_nmi_pm_callback(struct pm_dev * dev)
-+{
-+}
-+
-+#endif /* CONFIG_PM */
-+
-+extern void default_do_nmi(struct pt_regs *);
-+extern void die_nmi(char *str, struct pt_regs *regs, int do_panic);
-+
-+static inline unsigned char get_nmi_reason(void)
-+{
-+ shared_info_t *s = HYPERVISOR_shared_info;
-+ unsigned char reason = 0;
-+
-+ /* construct a value which looks like it came from
-+ * port 0x61.
-+ */
-+ if (test_bit(_XEN_NMIREASON_io_error, &s->arch.nmi_reason))
-+ reason |= 0x40;
-+ if (test_bit(_XEN_NMIREASON_parity_error, &s->arch.nmi_reason))
-+ reason |= 0x80;
-+
-+ return reason;
-+}
-+
-+extern int panic_on_timeout;
-+extern int unknown_nmi_panic;
-+
-+#ifndef CONFIG_XEN
-+
-+extern int nmi_watchdog_enabled;
-+
-+extern int check_nmi_watchdog(void);
-+extern int avail_to_resrv_perfctr_nmi_bit(unsigned int);
-+extern int avail_to_resrv_perfctr_nmi(unsigned int);
-+extern int reserve_perfctr_nmi(unsigned int);
-+extern void release_perfctr_nmi(unsigned int);
-+extern int reserve_evntsel_nmi(unsigned int);
-+extern void release_evntsel_nmi(unsigned int);
-+
-+extern void setup_apic_nmi_watchdog (void *);
-+extern void stop_apic_nmi_watchdog (void *);
-+extern void disable_timer_nmi_watchdog(void);
-+extern void enable_timer_nmi_watchdog(void);
-+extern int nmi_watchdog_tick (struct pt_regs * regs, unsigned reason);
-+
-+extern void nmi_watchdog_default(void);
-+extern int setup_nmi_watchdog(char *);
-+
-+extern atomic_t nmi_active;
-+extern unsigned int nmi_watchdog;
-+#define NMI_DEFAULT -1
-+#define NMI_NONE 0
-+#define NMI_IO_APIC 1
-+#define NMI_LOCAL_APIC 2
-+#define NMI_INVALID 3
-+
-+struct ctl_table;
-+struct file;
-+extern int proc_nmi_enabled(struct ctl_table *, int , struct file *,
-+ void __user *, size_t *, loff_t *);
-+
-+extern int unknown_nmi_panic;
-+
-+void __trigger_all_cpu_backtrace(void);
-+#define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace()
-+
-+
-+void lapic_watchdog_stop(void);
-+int lapic_watchdog_init(unsigned nmi_hz);
-+int lapic_wd_event(unsigned nmi_hz);
-+unsigned lapic_adjust_nmi_hz(unsigned hz);
-+int lapic_watchdog_ok(void);
-+void disable_lapic_nmi_watchdog(void);
-+void enable_lapic_nmi_watchdog(void);
-+
-+#endif
-+
-+#endif /* ASM_NMI_H */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-x86_64/mach-xen/asm/page.h ubuntu-gutsy-xen/include/asm-x86_64/mach-xen/asm/page.h
---- ubuntu-gutsy/include/asm-x86_64/mach-xen/asm/page.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/asm-x86_64/mach-xen/asm/page.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,212 @@
-+#ifndef _X86_64_PAGE_H
-+#define _X86_64_PAGE_H
-+
-+/* #include <linux/string.h> */
-+#ifndef __ASSEMBLY__
-+#include <linux/kernel.h>
-+#include <linux/types.h>
-+#include <asm/bug.h>
-+#endif
-+#include <linux/const.h>
-+#include <xen/interface/xen.h>
-+
-+/*
-+ * Need to repeat this here in order to not include pgtable.h (which in turn
-+ * depends on definitions made here), but to be able to use the symbolic
-+ * below. The preprocessor will warn if the two definitions aren't identical.
-+ */
-+#define _PAGE_PRESENT 0x001
-+
-+/* PAGE_SHIFT determines the page size */
-+#define PAGE_SHIFT 12
-+#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
-+#define PAGE_MASK (~(PAGE_SIZE-1))
-+
-+/* See Documentation/x86_64/mm.txt for a description of the memory map. */
-+#define __PHYSICAL_MASK_SHIFT 46
-+#define __PHYSICAL_MASK ((_AC(1,UL) << __PHYSICAL_MASK_SHIFT) - 1)
-+#define __VIRTUAL_MASK_SHIFT 48
-+#define __VIRTUAL_MASK ((_AC(1,UL) << __VIRTUAL_MASK_SHIFT) - 1)
-+
-+#define PHYSICAL_PAGE_MASK (~(PAGE_SIZE-1) & __PHYSICAL_MASK)
-+
-+#define THREAD_ORDER 1
-+#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
-+#define CURRENT_MASK (~(THREAD_SIZE-1))
-+
-+#define EXCEPTION_STACK_ORDER 0
-+#define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER)
-+
-+#define DEBUG_STACK_ORDER (EXCEPTION_STACK_ORDER + 1)
-+#define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER)
-+
-+#define IRQSTACK_ORDER 2
-+#define IRQSTACKSIZE (PAGE_SIZE << IRQSTACK_ORDER)
-+
-+#define STACKFAULT_STACK 1
-+#define DOUBLEFAULT_STACK 2
-+#define NMI_STACK 3
-+#define DEBUG_STACK 4
-+#define MCE_STACK 5
-+#define N_EXCEPTION_STACKS 5 /* hw limit: 7 */
-+
-+#define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1))
-+#define LARGE_PAGE_SIZE (_AC(1,UL) << PMD_SHIFT)
-+
-+#define HPAGE_SHIFT PMD_SHIFT
-+#define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT)
-+#define HPAGE_MASK (~(HPAGE_SIZE - 1))
-+#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
-+
-+#ifdef __KERNEL__
-+#ifndef __ASSEMBLY__
-+
-+extern unsigned long end_pfn;
-+
-+#include <asm/maddr.h>
-+
-+void clear_page(void *);
-+void copy_page(void *, void *);
-+
-+#define clear_user_page(page, vaddr, pg) clear_page(page)
-+#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
-+
-+#define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr)
-+#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
-+
-+/*
-+ * These are used to make use of C type-checking..
-+ */
-+typedef struct { unsigned long pte; } pte_t;
-+typedef struct { unsigned long pmd; } pmd_t;
-+typedef struct { unsigned long pud; } pud_t;
-+typedef struct { unsigned long pgd; } pgd_t;
-+#define PTE_MASK PHYSICAL_PAGE_MASK
-+
-+typedef struct { unsigned long pgprot; } pgprot_t;
-+
-+#define __pte_val(x) ((x).pte)
-+#define pte_val(x) ((__pte_val(x) & _PAGE_PRESENT) ? \
-+ pte_machine_to_phys(__pte_val(x)) : \
-+ __pte_val(x))
-+
-+#define __pmd_val(x) ((x).pmd)
-+static inline unsigned long pmd_val(pmd_t x)
-+{
-+ unsigned long ret = __pmd_val(x);
-+#if CONFIG_XEN_COMPAT <= 0x030002
-+ if (ret) ret = pte_machine_to_phys(ret) | _PAGE_PRESENT;
-+#else
-+ if (ret & _PAGE_PRESENT) ret = pte_machine_to_phys(ret);
-+#endif
-+ return ret;
-+}
-+
-+#define __pud_val(x) ((x).pud)
-+static inline unsigned long pud_val(pud_t x)
-+{
-+ unsigned long ret = __pud_val(x);
-+ if (ret & _PAGE_PRESENT) ret = pte_machine_to_phys(ret);
-+ return ret;
-+}
-+
-+#define __pgd_val(x) ((x).pgd)
-+static inline unsigned long pgd_val(pgd_t x)
-+{
-+ unsigned long ret = __pgd_val(x);
-+ if (ret & _PAGE_PRESENT) ret = pte_machine_to_phys(ret);
-+ return ret;
-+}
-+
-+#define pgprot_val(x) ((x).pgprot)
-+
-+static inline pte_t __pte(unsigned long x)
-+{
-+ if (x & _PAGE_PRESENT) x = pte_phys_to_machine(x);
-+ return ((pte_t) { (x) });
-+}
-+
-+static inline pmd_t __pmd(unsigned long x)
-+{
-+ if (x & _PAGE_PRESENT) x = pte_phys_to_machine(x);
-+ return ((pmd_t) { (x) });
-+}
-+
-+static inline pud_t __pud(unsigned long x)
-+{
-+ if (x & _PAGE_PRESENT) x = pte_phys_to_machine(x);
-+ return ((pud_t) { (x) });
-+}
-+
-+static inline pgd_t __pgd(unsigned long x)
-+{
-+ if (x & _PAGE_PRESENT) x = pte_phys_to_machine(x);
-+ return ((pgd_t) { (x) });
-+}
-+
-+#define __pgprot(x) ((pgprot_t) { (x) } )
-+
-+#endif /* !__ASSEMBLY__ */
-+
-+#define __PHYSICAL_START CONFIG_PHYSICAL_START
-+#define __KERNEL_ALIGN 0x200000
-+
-+/*
-+ * Make sure kernel is aligned to 2MB address. Catching it at compile
-+ * time is better. Change your config file and compile the kernel
-+ * for a 2MB aligned address (CONFIG_PHYSICAL_START)
-+ */
-+#if (CONFIG_PHYSICAL_START % __KERNEL_ALIGN) != 0
-+#error "CONFIG_PHYSICAL_START must be a multiple of 2MB"
-+#endif
-+
-+#define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START)
-+#define __START_KERNEL_map _AC(0xffffffff80000000, UL)
-+#define __PAGE_OFFSET _AC(0xffff880000000000, UL)
-+
-+#if CONFIG_XEN_COMPAT <= 0x030002
-+#undef LOAD_OFFSET
-+#define LOAD_OFFSET 0
-+#endif
-+
-+/* to align the pointer to the (next) page boundary */
-+#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
-+
-+#define KERNEL_TEXT_SIZE (40*1024*1024)
-+#define KERNEL_TEXT_START _AC(0xffffffff80000000, UL)
-+
-+#define PAGE_OFFSET __PAGE_OFFSET
-+
-+#ifndef __ASSEMBLY__
-+static inline unsigned long __phys_addr(unsigned long x)
-+{
-+ return x - (x >= __START_KERNEL_map ? __START_KERNEL_map : PAGE_OFFSET);
-+}
-+#endif
-+
-+#define __pa(x) __phys_addr((unsigned long)(x))
-+#define __pa_symbol(x) __phys_addr((unsigned long)(x))
-+
-+#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
-+#define __boot_va(x) __va(x)
-+#define __boot_pa(x) __pa(x)
-+#ifdef CONFIG_FLATMEM
-+#define pfn_valid(pfn) ((pfn) < end_pfn)
-+#endif
-+
-+#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
-+#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
-+#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
-+
-+#define VM_DATA_DEFAULT_FLAGS \
-+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
-+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-+
-+#define __HAVE_ARCH_GATE_AREA 1
-+
-+#include <asm-generic/memory_model.h>
-+#include <asm-generic/page.h>
-+
-+#endif /* __KERNEL__ */
-+
-+#endif /* _X86_64_PAGE_H */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-x86_64/mach-xen/asm/pci.h ubuntu-gutsy-xen/include/asm-x86_64/mach-xen/asm/pci.h
---- ubuntu-gutsy/include/asm-x86_64/mach-xen/asm/pci.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/asm-x86_64/mach-xen/asm/pci.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,166 @@
-+#ifndef __x8664_PCI_H
-+#define __x8664_PCI_H
-+
-+#include <asm/io.h>
-+
-+#ifdef __KERNEL__
-+
-+#include <linux/mm.h> /* for struct page */
-+
-+/* Can be used to override the logic in pci_scan_bus for skipping
-+ already-configured bus numbers - to be used for buggy BIOSes
-+ or architectures with incomplete PCI setup by the loader */
-+
-+#ifdef CONFIG_PCI
-+extern unsigned int pcibios_assign_all_busses(void);
-+#else
-+#define pcibios_assign_all_busses() 0
-+#endif
-+#define pcibios_scan_all_fns(a, b) 0
-+
-+extern unsigned long pci_mem_start;
-+#define PCIBIOS_MIN_IO 0x1000
-+#define PCIBIOS_MIN_MEM (pci_mem_start)
-+
-+#define PCIBIOS_MIN_CARDBUS_IO 0x4000
-+
-+void pcibios_config_init(void);
-+struct pci_bus * pcibios_scan_root(int bus);
-+extern int (*pci_config_read)(int seg, int bus, int dev, int fn, int reg, int len, u32 *value);
-+extern int (*pci_config_write)(int seg, int bus, int dev, int fn, int reg, int len, u32 value);
-+
-+void pcibios_set_master(struct pci_dev *dev);
-+void pcibios_penalize_isa_irq(int irq, int active);
-+struct irq_routing_table *pcibios_get_irq_routing_table(void);
-+int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq);
-+
-+#include <linux/types.h>
-+#include <linux/slab.h>
-+#include <asm/scatterlist.h>
-+#include <linux/string.h>
-+#include <asm/page.h>
-+
-+extern void pci_iommu_alloc(void);
-+extern int iommu_setup(char *opt);
-+
-+/* The PCI address space does equal the physical memory
-+ * address space. The networking and block device layers use
-+ * this boolean for bounce buffer decisions
-+ *
-+ * On AMD64 it mostly equals, but we set it to zero if a hardware
-+ * IOMMU (gart) of sotware IOMMU (swiotlb) is available.
-+ */
-+#define PCI_DMA_BUS_IS_PHYS (dma_ops->is_phys)
-+
-+#if defined(CONFIG_IOMMU) || defined(CONFIG_CALGARY_IOMMU)
-+
-+/*
-+ * x86-64 always supports DAC, but sometimes it is useful to force
-+ * devices through the IOMMU to get automatic sg list merging.
-+ * Optional right now.
-+ */
-+extern int iommu_sac_force;
-+#define pci_dac_dma_supported(pci_dev, mask) (!iommu_sac_force)
-+
-+#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
-+ dma_addr_t ADDR_NAME;
-+#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \
-+ __u32 LEN_NAME;
-+#define pci_unmap_addr(PTR, ADDR_NAME) \
-+ ((PTR)->ADDR_NAME)
-+#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
-+ (((PTR)->ADDR_NAME) = (VAL))
-+#define pci_unmap_len(PTR, LEN_NAME) \
-+ ((PTR)->LEN_NAME)
-+#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
-+ (((PTR)->LEN_NAME) = (VAL))
-+
-+#elif defined(CONFIG_SWIOTLB)
-+
-+#define pci_dac_dma_supported(pci_dev, mask) 1
-+
-+#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
-+ dma_addr_t ADDR_NAME;
-+#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \
-+ __u32 LEN_NAME;
-+#define pci_unmap_addr(PTR, ADDR_NAME) \
-+ ((PTR)->ADDR_NAME)
-+#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
-+ (((PTR)->ADDR_NAME) = (VAL))
-+#define pci_unmap_len(PTR, LEN_NAME) \
-+ ((PTR)->LEN_NAME)
-+#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
-+ (((PTR)->LEN_NAME) = (VAL))
-+
-+#else
-+/* No IOMMU */
-+
-+#define pci_dac_dma_supported(pci_dev, mask) 1
-+
-+#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
-+#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
-+#define pci_unmap_addr(PTR, ADDR_NAME) (0)
-+#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
-+#define pci_unmap_len(PTR, LEN_NAME) (0)
-+#define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
-+
-+#endif
-+
-+#include <asm-generic/pci-dma-compat.h>
-+
-+static inline dma64_addr_t
-+pci_dac_page_to_dma(struct pci_dev *pdev, struct page *page, unsigned long offset, int direction)
-+{
-+ return ((dma64_addr_t) page_to_phys(page) +
-+ (dma64_addr_t) offset);
-+}
-+
-+static inline struct page *
-+pci_dac_dma_to_page(struct pci_dev *pdev, dma64_addr_t dma_addr)
-+{
-+ return virt_to_page(__va(dma_addr));
-+}
-+
-+static inline unsigned long
-+pci_dac_dma_to_offset(struct pci_dev *pdev, dma64_addr_t dma_addr)
-+{
-+ return (dma_addr & ~PAGE_MASK);
-+}
-+
-+static inline void
-+pci_dac_dma_sync_single_for_cpu(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction)
-+{
-+}
-+
-+static inline void
-+pci_dac_dma_sync_single_for_device(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction)
-+{
-+ flush_write_buffers();
-+}
-+
-+#ifdef CONFIG_PCI
-+static inline void pci_dma_burst_advice(struct pci_dev *pdev,
-+ enum pci_dma_burst_strategy *strat,
-+ unsigned long *strategy_parameter)
-+{
-+ *strat = PCI_DMA_BURST_INFINITY;
-+ *strategy_parameter = ~0UL;
-+}
-+#endif
-+
-+#define HAVE_PCI_MMAP
-+extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
-+ enum pci_mmap_state mmap_state, int write_combine);
-+
-+static inline void pcibios_add_platform_entries(struct pci_dev *dev)
-+{
-+}
-+
-+#endif /* __KERNEL__ */
-+
-+/* generic pci stuff */
-+#ifdef CONFIG_PCI
-+#include <asm-generic/pci.h>
-+#endif
-+
-+#endif /* __x8664_PCI_H */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-x86_64/mach-xen/asm/pgalloc.h ubuntu-gutsy-xen/include/asm-x86_64/mach-xen/asm/pgalloc.h
---- ubuntu-gutsy/include/asm-x86_64/mach-xen/asm/pgalloc.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/asm-x86_64/mach-xen/asm/pgalloc.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,195 @@
-+#ifndef _X86_64_PGALLOC_H
-+#define _X86_64_PGALLOC_H
-+
-+#include <asm/pda.h>
-+#include <linux/threads.h>
-+#include <linux/mm.h>
-+#include <asm/io.h> /* for phys_to_virt and page_to_pseudophys */
-+
-+#include <xen/features.h>
-+void make_page_readonly(void *va, unsigned int feature);
-+void make_page_writable(void *va, unsigned int feature);
-+void make_pages_readonly(void *va, unsigned int nr, unsigned int feature);
-+void make_pages_writable(void *va, unsigned int nr, unsigned int feature);
-+
-+#define __user_pgd(pgd) ((pgd) + PTRS_PER_PGD)
-+
-+static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
-+{
-+ set_pmd(pmd, __pmd(_PAGE_TABLE | __pa(pte)));
-+}
-+
-+static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *pte)
-+{
-+ if (unlikely((mm)->context.pinned)) {
-+ BUG_ON(HYPERVISOR_update_va_mapping(
-+ (unsigned long)__va(page_to_pfn(pte) << PAGE_SHIFT),
-+ pfn_pte(page_to_pfn(pte), PAGE_KERNEL_RO), 0));
-+ set_pmd(pmd, __pmd(_PAGE_TABLE | (page_to_pfn(pte) << PAGE_SHIFT)));
-+ } else {
-+ *(pmd) = __pmd(_PAGE_TABLE | (page_to_pfn(pte) << PAGE_SHIFT));
-+ }
-+}
-+
-+static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
-+{
-+ if (unlikely((mm)->context.pinned)) {
-+ BUG_ON(HYPERVISOR_update_va_mapping(
-+ (unsigned long)pmd,
-+ pfn_pte(virt_to_phys(pmd)>>PAGE_SHIFT,
-+ PAGE_KERNEL_RO), 0));
-+ set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
-+ } else {
-+ *(pud) = __pud(_PAGE_TABLE | __pa(pmd));
-+ }
-+}
-+
-+/*
-+ * We need to use the batch mode here, but pgd_pupulate() won't be
-+ * be called frequently.
-+ */
-+static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
-+{
-+ if (unlikely((mm)->context.pinned)) {
-+ BUG_ON(HYPERVISOR_update_va_mapping(
-+ (unsigned long)pud,
-+ pfn_pte(virt_to_phys(pud)>>PAGE_SHIFT,
-+ PAGE_KERNEL_RO), 0));
-+ set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
-+ set_pgd(__user_pgd(pgd), __pgd(_PAGE_TABLE | __pa(pud)));
-+ } else {
-+ *(pgd) = __pgd(_PAGE_TABLE | __pa(pud));
-+ *(__user_pgd(pgd)) = *(pgd);
-+ }
-+}
-+
-+extern struct page *pte_alloc_one(struct mm_struct *mm, unsigned long addr);
-+extern void pte_free(struct page *pte);
-+
-+static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
-+{
-+ struct page *pg;
-+
-+ pg = pte_alloc_one(mm, addr);
-+ return pg ? page_address(pg) : NULL;
-+}
-+
-+static inline void pmd_free(pmd_t *pmd)
-+{
-+ BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
-+ pte_free(virt_to_page(pmd));
-+}
-+
-+static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
-+{
-+ struct page *pg;
-+
-+ pg = pte_alloc_one(mm, addr);
-+ return pg ? page_address(pg) : NULL;
-+}
-+
-+static inline void pud_free(pud_t *pud)
-+{
-+ BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
-+ pte_free(virt_to_page(pud));
-+}
-+
-+static inline void pgd_list_add(pgd_t *pgd)
-+{
-+ struct page *page = virt_to_page(pgd);
-+
-+ spin_lock(&pgd_lock);
-+ list_add(&page->lru, &pgd_list);
-+ spin_unlock(&pgd_lock);
-+}
-+
-+static inline void pgd_list_del(pgd_t *pgd)
-+{
-+ struct page *page = virt_to_page(pgd);
-+
-+ spin_lock(&pgd_lock);
-+ list_del(&page->lru);
-+ spin_unlock(&pgd_lock);
-+}
-+
-+static inline pgd_t *pgd_alloc(struct mm_struct *mm)
-+{
-+ /*
-+ * We allocate two contiguous pages for kernel and user.
-+ */
-+ unsigned boundary;
-+ pgd_t *pgd = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT, 1);
-+ if (!pgd)
-+ return NULL;
-+ pgd_list_add(pgd);
-+ /*
-+ * Copy kernel pointers in from init.
-+ * Could keep a freelist or slab cache of those because the kernel
-+ * part never changes.
-+ */
-+ boundary = pgd_index(__PAGE_OFFSET);
-+ memset(pgd, 0, boundary * sizeof(pgd_t));
-+ memcpy(pgd + boundary,
-+ init_level4_pgt + boundary,
-+ (PTRS_PER_PGD - boundary) * sizeof(pgd_t));
-+
-+ memset(__user_pgd(pgd), 0, PAGE_SIZE); /* clean up user pgd */
-+ /*
-+ * Set level3_user_pgt for vsyscall area
-+ */
-+ __user_pgd(pgd)[pgd_index(VSYSCALL_START)] =
-+ __pgd(__pa_symbol(level3_user_pgt) | _PAGE_TABLE);
-+ return pgd;
-+}
-+
-+static inline void pgd_free(pgd_t *pgd)
-+{
-+ pte_t *ptep = virt_to_ptep(pgd);
-+
-+ if (!pte_write(*ptep)) {
-+ xen_pgd_unpin(__pa(pgd));
-+ BUG_ON(HYPERVISOR_update_va_mapping(
-+ (unsigned long)pgd,
-+ pfn_pte(virt_to_phys(pgd)>>PAGE_SHIFT, PAGE_KERNEL),
-+ 0));
-+ }
-+
-+ ptep = virt_to_ptep(__user_pgd(pgd));
-+
-+ if (!pte_write(*ptep)) {
-+ xen_pgd_unpin(__pa(__user_pgd(pgd)));
-+ BUG_ON(HYPERVISOR_update_va_mapping(
-+ (unsigned long)__user_pgd(pgd),
-+ pfn_pte(virt_to_phys(__user_pgd(pgd))>>PAGE_SHIFT,
-+ PAGE_KERNEL),
-+ 0));
-+ }
-+
-+ pgd_list_del(pgd);
-+ free_pages((unsigned long)pgd, 1);
-+}
-+
-+static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
-+{
-+ pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
-+ if (pte)
-+ make_page_readonly(pte, XENFEAT_writable_page_tables);
-+
-+ return pte;
-+}
-+
-+/* Should really implement gc for free page table pages. This could be
-+ done with a reference count in struct page. */
-+
-+static inline void pte_free_kernel(pte_t *pte)
-+{
-+ BUG_ON((unsigned long)pte & (PAGE_SIZE-1));
-+ make_page_writable(pte, XENFEAT_writable_page_tables);
-+ free_page((unsigned long)pte);
-+}
-+
-+#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
-+#define __pmd_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x))
-+#define __pud_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x))
-+
-+#endif /* _X86_64_PGALLOC_H */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-x86_64/mach-xen/asm/pgtable.h ubuntu-gutsy-xen/include/asm-x86_64/mach-xen/asm/pgtable.h
---- ubuntu-gutsy/include/asm-x86_64/mach-xen/asm/pgtable.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/asm-x86_64/mach-xen/asm/pgtable.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,586 @@
-+#ifndef _X86_64_PGTABLE_H
-+#define _X86_64_PGTABLE_H
-+
-+#include <linux/const.h>
-+#ifndef __ASSEMBLY__
-+
-+/*
-+ * This file contains the functions and defines necessary to modify and use
-+ * the x86-64 page table tree.
-+ */
-+#include <asm/processor.h>
-+#include <asm/bitops.h>
-+#include <linux/threads.h>
-+#include <linux/sched.h>
-+#include <asm/pda.h>
-+#ifdef CONFIG_XEN
-+#include <asm/hypervisor.h>
-+
-+extern pud_t level3_user_pgt[512];
-+
-+extern void xen_init_pt(void);
-+
-+#define virt_to_ptep(__va) \
-+({ \
-+ pgd_t *__pgd = pgd_offset_k((unsigned long)(__va)); \
-+ pud_t *__pud = pud_offset(__pgd, (unsigned long)(__va)); \
-+ pmd_t *__pmd = pmd_offset(__pud, (unsigned long)(__va)); \
-+ pte_offset_kernel(__pmd, (unsigned long)(__va)); \
-+})
-+
-+#define arbitrary_virt_to_machine(__va) \
-+({ \
-+ maddr_t m = (maddr_t)pte_mfn(*virt_to_ptep(__va)) << PAGE_SHIFT;\
-+ m | ((unsigned long)(__va) & (PAGE_SIZE-1)); \
-+})
-+#endif
-+
-+extern pud_t level3_kernel_pgt[512];
-+extern pud_t level3_ident_pgt[512];
-+extern pmd_t level2_kernel_pgt[512];
-+extern pgd_t init_level4_pgt[];
-+extern unsigned long __supported_pte_mask;
-+
-+#define swapper_pg_dir init_level4_pgt
-+
-+extern void paging_init(void);
-+extern void clear_kernel_mapping(unsigned long addr, unsigned long size);
-+
-+/*
-+ * ZERO_PAGE is a global shared page that is always zero: used
-+ * for zero-mapped memory areas etc..
-+ */
-+extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
-+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
-+
-+#endif /* !__ASSEMBLY__ */
-+
-+/*
-+ * PGDIR_SHIFT determines what a top-level page table entry can map
-+ */
-+#define PGDIR_SHIFT 39
-+#define PTRS_PER_PGD 512
-+
-+/*
-+ * 3rd level page
-+ */
-+#define PUD_SHIFT 30
-+#define PTRS_PER_PUD 512
-+
-+/*
-+ * PMD_SHIFT determines the size of the area a middle-level
-+ * page table can map
-+ */
-+#define PMD_SHIFT 21
-+#define PTRS_PER_PMD 512
-+
-+/*
-+ * entries per page directory level
-+ */
-+#define PTRS_PER_PTE 512
-+
-+#ifndef __ASSEMBLY__
-+
-+#define pte_ERROR(e) \
-+ printk("%s:%d: bad pte %p(%016lx pfn %010lx).\n", __FILE__, __LINE__, \
-+ &(e), __pte_val(e), pte_pfn(e))
-+#define pmd_ERROR(e) \
-+ printk("%s:%d: bad pmd %p(%016lx pfn %010lx).\n", __FILE__, __LINE__, \
-+ &(e), __pmd_val(e), pmd_pfn(e))
-+#define pud_ERROR(e) \
-+ printk("%s:%d: bad pud %p(%016lx pfn %010lx).\n", __FILE__, __LINE__, \
-+ &(e), __pud_val(e), (pud_val(e) & __PHYSICAL_MASK) >> PAGE_SHIFT)
-+#define pgd_ERROR(e) \
-+ printk("%s:%d: bad pgd %p(%016lx pfn %010lx).\n", __FILE__, __LINE__, \
-+ &(e), __pgd_val(e), (pgd_val(e) & __PHYSICAL_MASK) >> PAGE_SHIFT)
-+
-+#define pgd_none(x) (!__pgd_val(x))
-+#define pud_none(x) (!__pud_val(x))
-+
-+static inline void set_pte(pte_t *dst, pte_t val)
-+{
-+ *dst = val;
-+}
-+
-+#define set_pmd(pmdptr, pmdval) xen_l2_entry_update(pmdptr, (pmdval))
-+#define set_pud(pudptr, pudval) xen_l3_entry_update(pudptr, (pudval))
-+#define set_pgd(pgdptr, pgdval) xen_l4_entry_update(pgdptr, (pgdval))
-+
-+static inline void pud_clear (pud_t * pud)
-+{
-+ set_pud(pud, __pud(0));
-+}
-+
-+#define __user_pgd(pgd) ((pgd) + PTRS_PER_PGD)
-+
-+static inline void pgd_clear (pgd_t * pgd)
-+{
-+ set_pgd(pgd, __pgd(0));
-+ set_pgd(__user_pgd(pgd), __pgd(0));
-+}
-+
-+#define pte_same(a, b) ((a).pte == (b).pte)
-+
-+#define pte_pgprot(a) (__pgprot((a).pte & ~PHYSICAL_PAGE_MASK))
-+
-+#endif /* !__ASSEMBLY__ */
-+
-+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
-+#define PMD_MASK (~(PMD_SIZE-1))
-+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
-+#define PUD_MASK (~(PUD_SIZE-1))
-+#define PGDIR_SIZE (_AC(1,UL) << PGDIR_SHIFT)
-+#define PGDIR_MASK (~(PGDIR_SIZE-1))
-+
-+#define USER_PTRS_PER_PGD ((TASK_SIZE-1)/PGDIR_SIZE+1)
-+#define FIRST_USER_ADDRESS 0
-+
-+#define MAXMEM _AC(0x3fffffffffff, UL)
-+#define VMALLOC_START _AC(0xffffc20000000000, UL)
-+#define VMALLOC_END _AC(0xffffe1ffffffffff, UL)
-+#define MODULES_VADDR _AC(0xffffffff88000000, UL)
-+#define MODULES_END _AC(0xfffffffffff00000, UL)
-+#define MODULES_LEN (MODULES_END - MODULES_VADDR)
-+
-+#define _PAGE_BIT_PRESENT 0
-+#define _PAGE_BIT_RW 1
-+#define _PAGE_BIT_USER 2
-+#define _PAGE_BIT_PWT 3
-+#define _PAGE_BIT_PCD 4
-+#define _PAGE_BIT_ACCESSED 5
-+#define _PAGE_BIT_DIRTY 6
-+#define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
-+#define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
-+#define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
-+
-+#define _PAGE_PRESENT 0x001
-+#define _PAGE_RW 0x002
-+#define _PAGE_USER 0x004
-+#define _PAGE_PWT 0x008
-+#define _PAGE_PCD 0x010
-+#define _PAGE_ACCESSED 0x020
-+#define _PAGE_DIRTY 0x040
-+#define _PAGE_PSE 0x080 /* 2MB page */
-+#define _PAGE_FILE 0x040 /* nonlinear file mapping, saved PTE; unset:swap */
-+#define _PAGE_GLOBAL 0x100 /* Global TLB entry */
-+
-+#define _PAGE_PROTNONE 0x080 /* If not present */
-+#define _PAGE_NX (_AC(1,UL)<<_PAGE_BIT_NX)
-+
-+#if CONFIG_XEN_COMPAT <= 0x030002
-+extern unsigned int __kernel_page_user;
-+#else
-+#define __kernel_page_user 0
-+#endif
-+
-+#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
-+#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY | __kernel_page_user)
-+
-+#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
-+
-+#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
-+#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
-+#define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
-+#define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
-+#define PAGE_COPY PAGE_COPY_NOEXEC
-+#define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
-+#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
-+#define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
-+#define __PAGE_KERNEL \
-+ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX | __kernel_page_user)
-+#define __PAGE_KERNEL_EXEC \
-+ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | __kernel_page_user)
-+#define __PAGE_KERNEL_NOCACHE \
-+ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_PCD | _PAGE_ACCESSED | _PAGE_NX | __kernel_page_user)
-+#define __PAGE_KERNEL_RO \
-+ (_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX | __kernel_page_user)
-+#define __PAGE_KERNEL_VSYSCALL \
-+ (_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
-+#define __PAGE_KERNEL_VSYSCALL_NOCACHE \
-+ (_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_PCD)
-+#define __PAGE_KERNEL_LARGE \
-+ (__PAGE_KERNEL | _PAGE_PSE)
-+#define __PAGE_KERNEL_LARGE_EXEC \
-+ (__PAGE_KERNEL_EXEC | _PAGE_PSE)
-+
-+/*
-+ * We don't support GLOBAL page in xenolinux64
-+ */
-+#define MAKE_GLOBAL(x) __pgprot((x))
-+
-+#define PAGE_KERNEL MAKE_GLOBAL(__PAGE_KERNEL)
-+#define PAGE_KERNEL_EXEC MAKE_GLOBAL(__PAGE_KERNEL_EXEC)
-+#define PAGE_KERNEL_RO MAKE_GLOBAL(__PAGE_KERNEL_RO)
-+#define PAGE_KERNEL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_NOCACHE)
-+#define PAGE_KERNEL_VSYSCALL32 __pgprot(__PAGE_KERNEL_VSYSCALL)
-+#define PAGE_KERNEL_VSYSCALL MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL)
-+#define PAGE_KERNEL_LARGE MAKE_GLOBAL(__PAGE_KERNEL_LARGE)
-+#define PAGE_KERNEL_VSYSCALL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL_NOCACHE)
-+
-+/* xwr */
-+#define __P000 PAGE_NONE
-+#define __P001 PAGE_READONLY
-+#define __P010 PAGE_COPY
-+#define __P011 PAGE_COPY
-+#define __P100 PAGE_READONLY_EXEC
-+#define __P101 PAGE_READONLY_EXEC
-+#define __P110 PAGE_COPY_EXEC
-+#define __P111 PAGE_COPY_EXEC
-+
-+#define __S000 PAGE_NONE
-+#define __S001 PAGE_READONLY
-+#define __S010 PAGE_SHARED
-+#define __S011 PAGE_SHARED
-+#define __S100 PAGE_READONLY_EXEC
-+#define __S101 PAGE_READONLY_EXEC
-+#define __S110 PAGE_SHARED_EXEC
-+#define __S111 PAGE_SHARED_EXEC
-+
-+#ifndef __ASSEMBLY__
-+
-+static inline unsigned long pgd_bad(pgd_t pgd)
-+{
-+ return __pgd_val(pgd) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER);
-+}
-+
-+static inline unsigned long pud_bad(pud_t pud)
-+{
-+ return __pud_val(pud) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER);
-+}
-+
-+static inline unsigned long pmd_bad(pmd_t pmd)
-+{
-+ return __pmd_val(pmd) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER);
-+}
-+
-+#define set_pte_at(_mm,addr,ptep,pteval) do { \
-+ if (((_mm) != current->mm && (_mm) != &init_mm) || \
-+ HYPERVISOR_update_va_mapping((addr), (pteval), 0)) \
-+ set_pte((ptep), (pteval)); \
-+} while (0)
-+
-+#define pte_none(x) (!(x).pte)
-+#define pte_present(x) ((x).pte & (_PAGE_PRESENT | _PAGE_PROTNONE))
-+#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
-+
-+#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
-+
-+#define __pte_mfn(_pte) (((_pte).pte & PTE_MASK) >> PAGE_SHIFT)
-+#define pte_mfn(_pte) ((_pte).pte & _PAGE_PRESENT ? \
-+ __pte_mfn(_pte) : pfn_to_mfn(__pte_mfn(_pte)))
-+#define pte_pfn(_pte) ((_pte).pte & _PAGE_PRESENT ? \
-+ mfn_to_local_pfn(__pte_mfn(_pte)) : __pte_mfn(_pte))
-+
-+#define pte_page(x) pfn_to_page(pte_pfn(x))
-+
-+static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
-+{
-+ unsigned long pte = page_nr << PAGE_SHIFT;
-+ pte |= pgprot_val(pgprot);
-+ pte &= __supported_pte_mask;
-+ return __pte(pte);
-+}
-+
-+static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
-+{
-+ pte_t pte = *ptep;
-+ if (!pte_none(pte)) {
-+ if (mm != &init_mm)
-+ pte = __pte_ma(xchg(&ptep->pte, 0));
-+ else
-+ HYPERVISOR_update_va_mapping(addr, __pte(0), 0);
-+ }
-+ return pte;
-+}
-+
-+static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full)
-+{
-+ if (full) {
-+ pte_t pte = *ptep;
-+ if (mm->context.pinned)
-+ xen_l1_entry_update(ptep, __pte(0));
-+ else
-+ *ptep = __pte(0);
-+ return pte;
-+ }
-+ return ptep_get_and_clear(mm, addr, ptep);
-+}
-+
-+#define ptep_clear_flush(vma, addr, ptep) \
-+({ \
-+ pte_t *__ptep = (ptep); \
-+ pte_t __res = *__ptep; \
-+ if (!pte_none(__res) && \
-+ ((vma)->vm_mm != current->mm || \
-+ HYPERVISOR_update_va_mapping(addr, __pte(0), \
-+ (unsigned long)(vma)->vm_mm->cpu_vm_mask.bits| \
-+ UVMF_INVLPG|UVMF_MULTI))) { \
-+ __ptep->pte = 0; \
-+ flush_tlb_page(vma, addr); \
-+ } \
-+ __res; \
-+})
-+
-+/*
-+ * The following only work if pte_present() is true.
-+ * Undefined behaviour if not..
-+ */
-+#define __LARGE_PTE (_PAGE_PSE|_PAGE_PRESENT)
-+static inline int pte_user(pte_t pte) { return __pte_val(pte) & _PAGE_USER; }
-+static inline int pte_read(pte_t pte) { return __pte_val(pte) & _PAGE_USER; }
-+static inline int pte_exec(pte_t pte) { return !(__pte_val(pte) & _PAGE_NX); }
-+static inline int pte_dirty(pte_t pte) { return __pte_val(pte) & _PAGE_DIRTY; }
-+static inline int pte_young(pte_t pte) { return __pte_val(pte) & _PAGE_ACCESSED; }
-+static inline int pte_write(pte_t pte) { return __pte_val(pte) & _PAGE_RW; }
-+static inline int pte_file(pte_t pte) { return __pte_val(pte) & _PAGE_FILE; }
-+static inline int pte_huge(pte_t pte) { return __pte_val(pte) & _PAGE_PSE; }
-+
-+static inline pte_t pte_rdprotect(pte_t pte) { __pte_val(pte) &= ~_PAGE_USER; return pte; }
-+static inline pte_t pte_exprotect(pte_t pte) { __pte_val(pte) &= ~_PAGE_USER; return pte; }
-+static inline pte_t pte_mkclean(pte_t pte) { __pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
-+static inline pte_t pte_mkold(pte_t pte) { __pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
-+static inline pte_t pte_wrprotect(pte_t pte) { __pte_val(pte) &= ~_PAGE_RW; return pte; }
-+static inline pte_t pte_mkread(pte_t pte) { __pte_val(pte) |= _PAGE_USER; return pte; }
-+static inline pte_t pte_mkexec(pte_t pte) { __pte_val(pte) &= ~_PAGE_NX; return pte; }
-+static inline pte_t pte_mkdirty(pte_t pte) { __pte_val(pte) |= _PAGE_DIRTY; return pte; }
-+static inline pte_t pte_mkyoung(pte_t pte) { __pte_val(pte) |= _PAGE_ACCESSED; return pte; }
-+static inline pte_t pte_mkwrite(pte_t pte) { __pte_val(pte) |= _PAGE_RW; return pte; }
-+static inline pte_t pte_mkhuge(pte_t pte) { __pte_val(pte) |= _PAGE_PSE; return pte; }
-+static inline pte_t pte_clrhuge(pte_t pte) { __pte_val(pte) &= ~_PAGE_PSE; return pte; }
-+
-+static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
-+{
-+ if (!pte_dirty(*ptep))
-+ return 0;
-+ return test_and_clear_bit(_PAGE_BIT_DIRTY, &ptep->pte);
-+}
-+
-+static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
-+{
-+ if (!pte_young(*ptep))
-+ return 0;
-+ return test_and_clear_bit(_PAGE_BIT_ACCESSED, &ptep->pte);
-+}
-+
-+static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
-+{
-+ pte_t pte = *ptep;
-+ if (pte_write(pte))
-+ set_pte_at(mm, addr, ptep, pte_wrprotect(pte));
-+}
-+
-+/*
-+ * Macro to mark a page protection value as "uncacheable".
-+ */
-+#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT))
-+
-+static inline int pmd_large(pmd_t pte) {
-+ return (__pmd_val(pte) & __LARGE_PTE) == __LARGE_PTE;
-+}
-+
-+
-+/*
-+ * Conversion functions: convert a page and protection to a page entry,
-+ * and a page entry and page directory to the page they refer to.
-+ */
-+
-+/*
-+ * Level 4 access.
-+ * Never use these in the common code.
-+ */
-+#define pgd_page_vaddr(pgd) ((unsigned long) __va(pgd_val(pgd) & PTE_MASK))
-+#define pgd_page(pgd) (pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT))
-+#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
-+#define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
-+#define pgd_offset_k(address) (init_level4_pgt + pgd_index(address))
-+#define pgd_present(pgd) (__pgd_val(pgd) & _PAGE_PRESENT)
-+#define mk_kernel_pgd(address) __pgd((address) | _KERNPG_TABLE)
-+
-+/* PUD - Level3 access */
-+/* to find an entry in a page-table-directory. */
-+#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PHYSICAL_PAGE_MASK))
-+#define pud_page(pud) (pfn_to_page(pud_val(pud) >> PAGE_SHIFT))
-+#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
-+#define pud_offset(pgd, address) ((pud_t *) pgd_page_vaddr(*(pgd)) + pud_index(address))
-+#define pud_present(pud) (__pud_val(pud) & _PAGE_PRESENT)
-+
-+/* PMD - Level 2 access */
-+#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PTE_MASK))
-+#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
-+
-+#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
-+#define pmd_offset(dir, address) ((pmd_t *) pud_page_vaddr(*(dir)) + \
-+ pmd_index(address))
-+#define pmd_none(x) (!__pmd_val(x))
-+#if CONFIG_XEN_COMPAT <= 0x030002
-+/* pmd_present doesn't just test the _PAGE_PRESENT bit since wr.p.t.
-+ can temporarily clear it. */
-+#define pmd_present(x) (__pmd_val(x))
-+#else
-+#define pmd_present(x) (__pmd_val(x) & _PAGE_PRESENT)
-+#endif
-+#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
-+#define pfn_pmd(nr,prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val(prot)))
-+#define pmd_pfn(x) ((pmd_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT)
-+
-+#define pte_to_pgoff(pte) ((__pte_val(pte) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT)
-+#define pgoff_to_pte(off) ((pte_t) { ((off) << PAGE_SHIFT) | _PAGE_FILE })
-+#define PTE_FILE_MAX_BITS __PHYSICAL_MASK_SHIFT
-+
-+/* PTE - Level 1 access. */
-+
-+/* page, protection -> pte */
-+#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
-+#define mk_pte_huge(entry) (__pte_val(entry) |= _PAGE_PRESENT | _PAGE_PSE)
-+
-+/* Change flags of a PTE */
-+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
-+{
-+ /*
-+ * Since this might change the present bit (which controls whether
-+ * a pte_t object has undergone p2m translation), we must use
-+ * pte_val() on the input pte and __pte() for the return value.
-+ */
-+ unsigned long pteval = pte_val(pte);
-+
-+ pteval &= _PAGE_CHG_MASK;
-+ pteval |= pgprot_val(newprot);
-+ pteval &= __supported_pte_mask;
-+ return __pte(pteval);
-+}
-+
-+#define pte_index(address) \
-+ (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-+#define pte_offset_kernel(dir, address) ((pte_t *) pmd_page_vaddr(*(dir)) + \
-+ pte_index(address))
-+
-+/* x86-64 always has all page tables mapped. */
-+#define pte_offset_map(dir,address) pte_offset_kernel(dir,address)
-+#define pte_offset_map_nested(dir,address) pte_offset_kernel(dir,address)
-+#define pte_unmap(pte) /* NOP */
-+#define pte_unmap_nested(pte) /* NOP */
-+
-+#define update_mmu_cache(vma,address,pte) do { } while (0)
-+
-+/*
-+ * Rules for using ptep_establish: the pte MUST be a user pte, and
-+ * must be a present->present transition.
-+ */
-+#define __HAVE_ARCH_PTEP_ESTABLISH
-+#define ptep_establish(vma, address, ptep, pteval) \
-+ do { \
-+ if ( likely((vma)->vm_mm == current->mm) ) { \
-+ BUG_ON(HYPERVISOR_update_va_mapping(address, \
-+ pteval, \
-+ (unsigned long)(vma)->vm_mm->cpu_vm_mask.bits| \
-+ UVMF_INVLPG|UVMF_MULTI)); \
-+ } else { \
-+ xen_l1_entry_update(ptep, pteval); \
-+ flush_tlb_page(vma, address); \
-+ } \
-+ } while (0)
-+
-+/* We only update the dirty/accessed state if we set
-+ * the dirty bit by hand in the kernel, since the hardware
-+ * will do the accessed bit for us, and we don't want to
-+ * race with other CPU's that might be updating the dirty
-+ * bit at the same time. */
-+#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
-+#define ptep_set_access_flags(vma, address, ptep, entry, dirty) \
-+({ \
-+ int __changed = !pte_same(*(ptep), entry); \
-+ if (__changed && (dirty)) \
-+ ptep_establish(vma, address, ptep, entry); \
-+ __changed; \
-+})
-+
-+#define __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH
-+#define ptep_clear_flush_dirty(vma, address, ptep) \
-+({ \
-+ pte_t __pte = *(ptep); \
-+ int __dirty = pte_dirty(__pte); \
-+ __pte = pte_mkclean(__pte); \
-+ if ((vma)->vm_mm->context.pinned) \
-+ (void)ptep_set_access_flags(vma, address, ptep, __pte, __dirty); \
-+ else if (__dirty) \
-+ set_pte(ptep, __pte); \
-+ __dirty; \
-+})
-+
-+#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
-+#define ptep_clear_flush_young(vma, address, ptep) \
-+({ \
-+ pte_t __pte = *(ptep); \
-+ int __young = pte_young(__pte); \
-+ __pte = pte_mkold(__pte); \
-+ if ((vma)->vm_mm->context.pinned) \
-+ (void)ptep_set_access_flags(vma, address, ptep, __pte, __young); \
-+ else if (__young) \
-+ set_pte(ptep, __pte); \
-+ __young; \
-+})
-+
-+/* Encode and de-code a swap entry */
-+#define __swp_type(x) (((x).val >> 1) & 0x3f)
-+#define __swp_offset(x) ((x).val >> 8)
-+#define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 8) })
-+#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
-+#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
-+
-+extern spinlock_t pgd_lock;
-+extern struct list_head pgd_list;
-+
-+extern int kern_addr_valid(unsigned long addr);
-+
-+#define DOMID_LOCAL (0xFFFFU)
-+
-+struct vm_area_struct;
-+
-+int direct_remap_pfn_range(struct vm_area_struct *vma,
-+ unsigned long address,
-+ unsigned long mfn,
-+ unsigned long size,
-+ pgprot_t prot,
-+ domid_t domid);
-+
-+int direct_kernel_remap_pfn_range(unsigned long address,
-+ unsigned long mfn,
-+ unsigned long size,
-+ pgprot_t prot,
-+ domid_t domid);
-+
-+int create_lookup_pte_addr(struct mm_struct *mm,
-+ unsigned long address,
-+ uint64_t *ptep);
-+
-+int touch_pte_range(struct mm_struct *mm,
-+ unsigned long address,
-+ unsigned long size);
-+
-+#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
-+ direct_remap_pfn_range(vma,vaddr,pfn,size,prot,DOMID_IO)
-+
-+#define HAVE_ARCH_UNMAPPED_AREA
-+
-+#define pgtable_cache_init() do { } while (0)
-+#define check_pgt_cache() do { } while (0)
-+
-+#define PAGE_AGP PAGE_KERNEL_NOCACHE
-+#define HAVE_PAGE_AGP 1
-+
-+/* fs/proc/kcore.c */
-+#define kc_vaddr_to_offset(v) ((v) & __VIRTUAL_MASK)
-+#define kc_offset_to_vaddr(o) \
-+ (((o) & (1UL << (__VIRTUAL_MASK_SHIFT-1))) ? ((o) | (~__VIRTUAL_MASK)) : (o))
-+
-+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
-+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
-+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
-+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
-+#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
-+#define __HAVE_ARCH_PTEP_SET_WRPROTECT
-+#define __HAVE_ARCH_PTE_SAME
-+#include <asm-generic/pgtable.h>
-+#endif /* !__ASSEMBLY__ */
-+
-+#endif /* _X86_64_PGTABLE_H */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-x86_64/mach-xen/asm/processor.h ubuntu-gutsy-xen/include/asm-x86_64/mach-xen/asm/processor.h
---- ubuntu-gutsy/include/asm-x86_64/mach-xen/asm/processor.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/asm-x86_64/mach-xen/asm/processor.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,465 @@
-+/*
-+ * include/asm-x86_64/processor.h
-+ *
-+ * Copyright (C) 1994 Linus Torvalds
-+ */
-+
-+#ifndef __ASM_X86_64_PROCESSOR_H
-+#define __ASM_X86_64_PROCESSOR_H
-+
-+#include <asm/segment.h>
-+#include <asm/page.h>
-+#include <asm/types.h>
-+#include <asm/sigcontext.h>
-+#include <asm/cpufeature.h>
-+#include <linux/threads.h>
-+#include <asm/msr.h>
-+#include <asm/current.h>
-+#include <asm/system.h>
-+#include <asm/mmsegment.h>
-+#include <asm/percpu.h>
-+#include <linux/personality.h>
-+#include <linux/cpumask.h>
-+#include <asm/processor-flags.h>
-+
-+#define TF_MASK 0x00000100
-+#define IF_MASK 0x00000200
-+#define IOPL_MASK 0x00003000
-+#define NT_MASK 0x00004000
-+#define VM_MASK 0x00020000
-+#define AC_MASK 0x00040000
-+#define VIF_MASK 0x00080000 /* virtual interrupt flag */
-+#define VIP_MASK 0x00100000 /* virtual interrupt pending */
-+#define ID_MASK 0x00200000
-+
-+#define desc_empty(desc) \
-+ (!((desc)->a | (desc)->b))
-+
-+#define desc_equal(desc1, desc2) \
-+ (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b))
-+
-+/*
-+ * Default implementation of macro that returns current
-+ * instruction pointer ("program counter").
-+ */
-+#define current_text_addr() ({ void *pc; asm volatile("leaq 1f(%%rip),%0\n1:":"=r"(pc)); pc; })
-+
-+/*
-+ * CPU type and hardware bug flags. Kept separately for each CPU.
-+ */
-+
-+struct cpuinfo_x86 {
-+ __u8 x86; /* CPU family */
-+ __u8 x86_vendor; /* CPU vendor */
-+ __u8 x86_model;
-+ __u8 x86_mask;
-+ int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */
-+ __u32 x86_capability[NCAPINTS];
-+ char x86_vendor_id[16];
-+ char x86_model_id[64];
-+ int x86_cache_size; /* in KB */
-+ int x86_clflush_size;
-+ int x86_cache_alignment;
-+ int x86_tlbsize; /* number of 4K pages in DTLB/ITLB combined(in pages)*/
-+ __u8 x86_virt_bits, x86_phys_bits;
-+ __u8 x86_max_cores; /* cpuid returned max cores value */
-+ __u32 x86_power;
-+ __u32 extended_cpuid_level; /* Max extended CPUID function supported */
-+ unsigned long loops_per_jiffy;
-+#ifdef CONFIG_SMP
-+ cpumask_t llc_shared_map; /* cpus sharing the last level cache */
-+#endif
-+ __u8 apicid;
-+#ifdef CONFIG_SMP
-+ __u8 booted_cores; /* number of cores as seen by OS */
-+ __u8 phys_proc_id; /* Physical Processor id. */
-+ __u8 cpu_core_id; /* Core id. */
-+#endif
-+} ____cacheline_aligned;
-+
-+#define X86_VENDOR_INTEL 0
-+#define X86_VENDOR_CYRIX 1
-+#define X86_VENDOR_AMD 2
-+#define X86_VENDOR_UMC 3
-+#define X86_VENDOR_NEXGEN 4
-+#define X86_VENDOR_CENTAUR 5
-+#define X86_VENDOR_RISE 6
-+#define X86_VENDOR_TRANSMETA 7
-+#define X86_VENDOR_NUM 8
-+#define X86_VENDOR_UNKNOWN 0xff
-+
-+#ifdef CONFIG_SMP
-+extern struct cpuinfo_x86 cpu_data[];
-+#define current_cpu_data cpu_data[smp_processor_id()]
-+#else
-+#define cpu_data (&boot_cpu_data)
-+#define current_cpu_data boot_cpu_data
-+#endif
-+
-+extern char ignore_irq13;
-+
-+extern void identify_cpu(struct cpuinfo_x86 *);
-+extern void print_cpu_info(struct cpuinfo_x86 *);
-+extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
-+extern unsigned short num_cache_leaves;
-+
-+/*
-+ * Save the cr4 feature set we're using (ie
-+ * Pentium 4MB enable and PPro Global page
-+ * enable), so that any CPU's that boot up
-+ * after us can get the correct flags.
-+ */
-+extern unsigned long mmu_cr4_features;
-+
-+static inline void set_in_cr4 (unsigned long mask)
-+{
-+ mmu_cr4_features |= mask;
-+ __asm__("movq %%cr4,%%rax\n\t"
-+ "orq %0,%%rax\n\t"
-+ "movq %%rax,%%cr4\n"
-+ : : "irg" (mask)
-+ :"ax");
-+}
-+
-+static inline void clear_in_cr4 (unsigned long mask)
-+{
-+ mmu_cr4_features &= ~mask;
-+ __asm__("movq %%cr4,%%rax\n\t"
-+ "andq %0,%%rax\n\t"
-+ "movq %%rax,%%cr4\n"
-+ : : "irg" (~mask)
-+ :"ax");
-+}
-+
-+
-+/*
-+ * Bus types
-+ */
-+#define MCA_bus 0
-+#define MCA_bus__is_a_macro
-+
-+/*
-+ * User space process size. 47bits minus one guard page.
-+ */
-+#define TASK_SIZE64 (0x800000000000UL - 4096)
-+
-+/* This decides where the kernel will search for a free chunk of vm
-+ * space during mmap's.
-+ */
-+#define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? 0xc0000000 : 0xFFFFe000)
-+
-+#define TASK_SIZE (test_thread_flag(TIF_IA32) ? IA32_PAGE_OFFSET : TASK_SIZE64)
-+#define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_IA32)) ? IA32_PAGE_OFFSET : TASK_SIZE64)
-+
-+#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE/3)
-+
-+/*
-+ * Size of io_bitmap.
-+ */
-+#define IO_BITMAP_BITS 65536
-+#define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
-+#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
-+#ifndef CONFIG_X86_NO_TSS
-+#define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap)
-+#endif
-+#define INVALID_IO_BITMAP_OFFSET 0x8000
-+
-+struct i387_fxsave_struct {
-+ u16 cwd;
-+ u16 swd;
-+ u16 twd;
-+ u16 fop;
-+ u64 rip;
-+ u64 rdp;
-+ u32 mxcsr;
-+ u32 mxcsr_mask;
-+ u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
-+ u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */
-+ u32 padding[24];
-+} __attribute__ ((aligned (16)));
-+
-+union i387_union {
-+ struct i387_fxsave_struct fxsave;
-+};
-+
-+#ifndef CONFIG_X86_NO_TSS
-+struct tss_struct {
-+ u32 reserved1;
-+ u64 rsp0;
-+ u64 rsp1;
-+ u64 rsp2;
-+ u64 reserved2;
-+ u64 ist[7];
-+ u32 reserved3;
-+ u32 reserved4;
-+ u16 reserved5;
-+ u16 io_bitmap_base;
-+ /*
-+ * The extra 1 is there because the CPU will access an
-+ * additional byte beyond the end of the IO permission
-+ * bitmap. The extra byte must be all 1 bits, and must
-+ * be within the limit. Thus we have:
-+ *
-+ * 128 bytes, the bitmap itself, for ports 0..0x3ff
-+ * 8 bytes, for an extra "long" of ~0UL
-+ */
-+ unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
-+} __attribute__((packed)) ____cacheline_aligned;
-+
-+DECLARE_PER_CPU(struct tss_struct,init_tss);
-+#endif
-+
-+
-+extern struct cpuinfo_x86 boot_cpu_data;
-+#ifndef CONFIG_X86_NO_TSS
-+/* Save the original ist values for checking stack pointers during debugging */
-+struct orig_ist {
-+ unsigned long ist[7];
-+};
-+DECLARE_PER_CPU(struct orig_ist, orig_ist);
-+#endif
-+
-+#ifdef CONFIG_X86_VSMP
-+#define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT)
-+#define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT)
-+#else
-+#define ARCH_MIN_TASKALIGN 16
-+#define ARCH_MIN_MMSTRUCT_ALIGN 0
-+#endif
-+
-+struct thread_struct {
-+ unsigned long rsp0;
-+ unsigned long rsp;
-+ unsigned long userrsp; /* Copy from PDA */
-+ unsigned long fs;
-+ unsigned long gs;
-+ unsigned short es, ds, fsindex, gsindex;
-+/* Hardware debugging registers */
-+ unsigned long debugreg0;
-+ unsigned long debugreg1;
-+ unsigned long debugreg2;
-+ unsigned long debugreg3;
-+ unsigned long debugreg6;
-+ unsigned long debugreg7;
-+/* fault info */
-+ unsigned long cr2, trap_no, error_code;
-+/* floating point info */
-+ union i387_union i387 __attribute__((aligned(16)));
-+/* IO permissions. the bitmap could be moved into the GDT, that would make
-+ switch faster for a limited number of ioperm using tasks. -AK */
-+ int ioperm;
-+ unsigned long *io_bitmap_ptr;
-+ unsigned io_bitmap_max;
-+/* cached TLS descriptors. */
-+ u64 tls_array[GDT_ENTRY_TLS_ENTRIES];
-+ unsigned int iopl;
-+} __attribute__((aligned(16)));
-+
-+#define INIT_THREAD { \
-+ .rsp0 = (unsigned long)&init_stack + sizeof(init_stack) \
-+}
-+
-+#ifndef CONFIG_X86_NO_TSS
-+#define INIT_TSS { \
-+ .rsp0 = (unsigned long)&init_stack + sizeof(init_stack) \
-+}
-+#endif
-+
-+#define INIT_MMAP \
-+{ &init_mm, 0, 0, NULL, PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC, 1, NULL, NULL }
-+
-+#define start_thread(regs,new_rip,new_rsp) do { \
-+ asm volatile("movl %0,%%fs; movl %0,%%es; movl %0,%%ds": :"r" (0)); \
-+ load_gs_index(0); \
-+ (regs)->rip = (new_rip); \
-+ (regs)->rsp = (new_rsp); \
-+ write_pda(oldrsp, (new_rsp)); \
-+ (regs)->cs = __USER_CS; \
-+ (regs)->ss = __USER_DS; \
-+ (regs)->eflags = 0x200; \
-+ set_fs(USER_DS); \
-+} while(0)
-+
-+#define get_debugreg(var, register) \
-+ var = HYPERVISOR_get_debugreg(register)
-+#define set_debugreg(value, register) \
-+ HYPERVISOR_set_debugreg(register, value)
-+
-+struct task_struct;
-+struct mm_struct;
-+
-+/* Free all resources held by a thread. */
-+extern void release_thread(struct task_struct *);
-+
-+/* Prepare to copy thread state - unlazy all lazy status */
-+extern void prepare_to_copy(struct task_struct *tsk);
-+
-+/*
-+ * create a kernel thread without removing it from tasklists
-+ */
-+extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
-+
-+/*
-+ * Return saved PC of a blocked thread.
-+ * What is this good for? it will be always the scheduler or ret_from_fork.
-+ */
-+#define thread_saved_pc(t) (*(unsigned long *)((t)->thread.rsp - 8))
-+
-+extern unsigned long get_wchan(struct task_struct *p);
-+#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.rsp0 - 1)
-+#define KSTK_EIP(tsk) (task_pt_regs(tsk)->rip)
-+#define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */
-+
-+
-+struct microcode_header {
-+ unsigned int hdrver;
-+ unsigned int rev;
-+ unsigned int date;
-+ unsigned int sig;
-+ unsigned int cksum;
-+ unsigned int ldrver;
-+ unsigned int pf;
-+ unsigned int datasize;
-+ unsigned int totalsize;
-+ unsigned int reserved[3];
-+};
-+
-+struct microcode {
-+ struct microcode_header hdr;
-+ unsigned int bits[0];
-+};
-+
-+typedef struct microcode microcode_t;
-+typedef struct microcode_header microcode_header_t;
-+
-+/* microcode format is extended from prescott processors */
-+struct extended_signature {
-+ unsigned int sig;
-+ unsigned int pf;
-+ unsigned int cksum;
-+};
-+
-+struct extended_sigtable {
-+ unsigned int count;
-+ unsigned int cksum;
-+ unsigned int reserved[3];
-+ struct extended_signature sigs[0];
-+};
-+
-+
-+#define ASM_NOP1 K8_NOP1
-+#define ASM_NOP2 K8_NOP2
-+#define ASM_NOP3 K8_NOP3
-+#define ASM_NOP4 K8_NOP4
-+#define ASM_NOP5 K8_NOP5
-+#define ASM_NOP6 K8_NOP6
-+#define ASM_NOP7 K8_NOP7
-+#define ASM_NOP8 K8_NOP8
-+
-+/* Opteron nops */
-+#define K8_NOP1 ".byte 0x90\n"
-+#define K8_NOP2 ".byte 0x66,0x90\n"
-+#define K8_NOP3 ".byte 0x66,0x66,0x90\n"
-+#define K8_NOP4 ".byte 0x66,0x66,0x66,0x90\n"
-+#define K8_NOP5 K8_NOP3 K8_NOP2
-+#define K8_NOP6 K8_NOP3 K8_NOP3
-+#define K8_NOP7 K8_NOP4 K8_NOP3
-+#define K8_NOP8 K8_NOP4 K8_NOP4
-+
-+#define ASM_NOP_MAX 8
-+
-+/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
-+static inline void rep_nop(void)
-+{
-+ __asm__ __volatile__("rep;nop": : :"memory");
-+}
-+
-+/* Stop speculative execution */
-+static inline void sync_core(void)
-+{
-+ int tmp;
-+ asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory");
-+}
-+
-+#define cpu_has_fpu 1
-+
-+#define ARCH_HAS_PREFETCH
-+static inline void prefetch(void *x)
-+{
-+ asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x));
-+}
-+
-+#define ARCH_HAS_PREFETCHW 1
-+static inline void prefetchw(void *x)
-+{
-+ alternative_input("prefetcht0 (%1)",
-+ "prefetchw (%1)",
-+ X86_FEATURE_3DNOW,
-+ "r" (x));
-+}
-+
-+#define ARCH_HAS_SPINLOCK_PREFETCH 1
-+
-+#define spin_lock_prefetch(x) prefetchw(x)
-+
-+#define cpu_relax() rep_nop()
-+
-+/*
-+ * NSC/Cyrix CPU indexed register access macros
-+ */
-+
-+#define getCx86(reg) ({ outb((reg), 0x22); inb(0x23); })
-+
-+#define setCx86(reg, data) do { \
-+ outb((reg), 0x22); \
-+ outb((data), 0x23); \
-+} while (0)
-+
-+static inline void serialize_cpu(void)
-+{
-+ __asm__ __volatile__ ("cpuid" : : : "ax", "bx", "cx", "dx");
-+}
-+
-+static inline void __monitor(const void *eax, unsigned long ecx,
-+ unsigned long edx)
-+{
-+ /* "monitor %eax,%ecx,%edx;" */
-+ asm volatile(
-+ ".byte 0x0f,0x01,0xc8;"
-+ : :"a" (eax), "c" (ecx), "d"(edx));
-+}
-+
-+static inline void __mwait(unsigned long eax, unsigned long ecx)
-+{
-+ /* "mwait %eax,%ecx;" */
-+ asm volatile(
-+ ".byte 0x0f,0x01,0xc9;"
-+ : :"a" (eax), "c" (ecx));
-+}
-+
-+static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
-+{
-+ /* "mwait %eax,%ecx;" */
-+ asm volatile(
-+ "sti; .byte 0x0f,0x01,0xc9;"
-+ : :"a" (eax), "c" (ecx));
-+}
-+
-+extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx);
-+
-+#define stack_current() \
-+({ \
-+ struct thread_info *ti; \
-+ asm("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
-+ ti->task; \
-+})
-+
-+#define cache_line_size() (boot_cpu_data.x86_cache_alignment)
-+
-+extern unsigned long boot_option_idle_override;
-+/* Boot loader type from the setup header */
-+extern int bootloader_type;
-+
-+#define HAVE_ARCH_PICK_MMAP_LAYOUT 1
-+
-+#endif /* __ASM_X86_64_PROCESSOR_H */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-x86_64/mach-xen/asm/ptrace.h ubuntu-gutsy-xen/include/asm-x86_64/mach-xen/asm/ptrace.h
---- ubuntu-gutsy/include/asm-x86_64/mach-xen/asm/ptrace.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/asm-x86_64/mach-xen/asm/ptrace.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,80 @@
-+#ifndef _X86_64_PTRACE_H
-+#define _X86_64_PTRACE_H
-+
-+#include <asm/ptrace-abi.h>
-+
-+#ifndef __ASSEMBLY__
-+
-+struct pt_regs {
-+ unsigned long r15;
-+ unsigned long r14;
-+ unsigned long r13;
-+ unsigned long r12;
-+ unsigned long rbp;
-+ unsigned long rbx;
-+/* arguments: non interrupts/non tracing syscalls only save upto here*/
-+ unsigned long r11;
-+ unsigned long r10;
-+ unsigned long r9;
-+ unsigned long r8;
-+ unsigned long rax;
-+ unsigned long rcx;
-+ unsigned long rdx;
-+ unsigned long rsi;
-+ unsigned long rdi;
-+ unsigned long orig_rax;
-+/* end of arguments */
-+/* cpu exception frame or undefined */
-+ unsigned long rip;
-+ unsigned long cs;
-+ unsigned long eflags;
-+ unsigned long rsp;
-+ unsigned long ss;
-+/* top of stack page */
-+};
-+
-+#endif
-+
-+#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
-+#define user_mode(regs) (!!((regs)->cs & 3))
-+#define user_mode_vm(regs) user_mode(regs)
-+#define instruction_pointer(regs) ((regs)->rip)
-+#define regs_return_value(regs) ((regs)->rax)
-+
-+extern unsigned long profile_pc(struct pt_regs *regs);
-+
-+#include <linux/compiler.h>
-+
-+void signal_fault(struct pt_regs *regs, void __user *frame, char *where);
-+
-+struct task_struct;
-+
-+extern unsigned long
-+convert_rip_to_linear(struct task_struct *child, struct pt_regs *regs);
-+
-+enum {
-+ EF_CF = 0x00000001,
-+ EF_PF = 0x00000004,
-+ EF_AF = 0x00000010,
-+ EF_ZF = 0x00000040,
-+ EF_SF = 0x00000080,
-+ EF_TF = 0x00000100,
-+ EF_IE = 0x00000200,
-+ EF_DF = 0x00000400,
-+ EF_OF = 0x00000800,
-+ EF_IOPL = 0x00003000,
-+ EF_IOPL_RING0 = 0x00000000,
-+ EF_IOPL_RING1 = 0x00001000,
-+ EF_IOPL_RING2 = 0x00002000,
-+ EF_NT = 0x00004000, /* nested task */
-+ EF_RF = 0x00010000, /* resume */
-+ EF_VM = 0x00020000, /* virtual mode */
-+ EF_AC = 0x00040000, /* alignment */
-+ EF_VIF = 0x00080000, /* virtual interrupt */
-+ EF_VIP = 0x00100000, /* virtual interrupt pending */
-+ EF_ID = 0x00200000, /* id */
-+};
-+
-+#endif
-+
-+#endif
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-x86_64/mach-xen/asm/smp.h ubuntu-gutsy-xen/include/asm-x86_64/mach-xen/asm/smp.h
---- ubuntu-gutsy/include/asm-x86_64/mach-xen/asm/smp.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/asm-x86_64/mach-xen/asm/smp.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,126 @@
-+#ifndef __ASM_SMP_H
-+#define __ASM_SMP_H
-+
-+/*
-+ * We need the APIC definitions automatically as part of 'smp.h'
-+ */
-+#include <linux/threads.h>
-+#include <linux/cpumask.h>
-+#include <linux/bitops.h>
-+#include <linux/init.h>
-+#include <linux/thread_info.h>
-+extern int disable_apic;
-+
-+#ifdef CONFIG_X86_LOCAL_APIC
-+#include <asm/mpspec.h>
-+#include <asm/apic.h>
-+#ifdef CONFIG_X86_IO_APIC
-+#include <asm/io_apic.h>
-+#endif
-+#endif
-+
-+#ifdef CONFIG_SMP
-+
-+#include <asm/pda.h>
-+
-+struct pt_regs;
-+
-+extern cpumask_t cpu_present_mask;
-+extern cpumask_t cpu_possible_map;
-+extern cpumask_t cpu_online_map;
-+extern cpumask_t cpu_initialized;
-+
-+/*
-+ * Private routines/data
-+ */
-+
-+extern void smp_alloc_memory(void);
-+extern volatile unsigned long smp_invalidate_needed;
-+extern void lock_ipi_call_lock(void);
-+extern void unlock_ipi_call_lock(void);
-+extern int smp_num_siblings;
-+extern void smp_send_reschedule(int cpu);
-+
-+extern cpumask_t cpu_sibling_map[NR_CPUS];
-+extern cpumask_t cpu_core_map[NR_CPUS];
-+extern u8 cpu_llc_id[NR_CPUS];
-+
-+#define SMP_TRAMPOLINE_BASE 0x6000
-+
-+/*
-+ * On x86 all CPUs are mapped 1:1 to the APIC space.
-+ * This simplifies scheduling and IPI sending and
-+ * compresses data structures.
-+ */
-+
-+static inline int num_booting_cpus(void)
-+{
-+ return cpus_weight(cpu_possible_map);
-+}
-+
-+#define raw_smp_processor_id() read_pda(cpunumber)
-+
-+extern int __cpu_disable(void);
-+extern void __cpu_die(unsigned int cpu);
-+extern void prefill_possible_map(void);
-+extern unsigned num_processors;
-+extern unsigned __cpuinitdata disabled_cpus;
-+
-+#define NO_PROC_ID 0xFF /* No processor magic marker */
-+
-+#endif /* CONFIG_SMP */
-+
-+#if defined(CONFIG_X86_LOCAL_APIC) && !defined(CONFIG_XEN)
-+static inline int hard_smp_processor_id(void)
-+{
-+ /* we don't want to mark this access volatile - bad code generation */
-+ return GET_APIC_ID(*(unsigned int *)(APIC_BASE+APIC_ID));
-+}
-+#endif
-+
-+/*
-+ * Some lowlevel functions might want to know about
-+ * the real APIC ID <-> CPU # mapping.
-+ */
-+extern u8 x86_cpu_to_apicid[NR_CPUS]; /* physical ID */
-+extern u8 x86_cpu_to_log_apicid[NR_CPUS];
-+extern u8 bios_cpu_apicid[];
-+
-+#ifdef CONFIG_X86_LOCAL_APIC
-+static inline int cpu_present_to_apicid(int mps_cpu)
-+{
-+ if (mps_cpu < NR_CPUS)
-+ return (int)bios_cpu_apicid[mps_cpu];
-+ else
-+ return BAD_APICID;
-+}
-+#endif
-+
-+#ifndef CONFIG_SMP
-+#define stack_smp_processor_id() 0
-+#define cpu_logical_map(x) (x)
-+#else
-+#include <asm/thread_info.h>
-+#define stack_smp_processor_id() \
-+({ \
-+ struct thread_info *ti; \
-+ __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
-+ ti->cpu; \
-+})
-+#endif
-+
-+#if defined(CONFIG_X86_LOCAL_APIC) && !defined(CONFIG_XEN)
-+static __inline int logical_smp_processor_id(void)
-+{
-+ /* we don't want to mark this access volatile - bad code generation */
-+ return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR));
-+}
-+#endif
-+
-+#ifdef CONFIG_SMP
-+#define cpu_physical_id(cpu) x86_cpu_to_apicid[cpu]
-+#else
-+#define cpu_physical_id(cpu) boot_cpu_id
-+#endif /* !CONFIG_SMP */
-+#endif
-+
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-x86_64/mach-xen/asm/synch_bitops.h ubuntu-gutsy-xen/include/asm-x86_64/mach-xen/asm/synch_bitops.h
---- ubuntu-gutsy/include/asm-x86_64/mach-xen/asm/synch_bitops.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/asm-x86_64/mach-xen/asm/synch_bitops.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,2 @@
-+
-+#include <asm-i386/mach-xen/asm/synch_bitops.h>
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-x86_64/mach-xen/asm/system.h ubuntu-gutsy-xen/include/asm-x86_64/mach-xen/asm/system.h
---- ubuntu-gutsy/include/asm-x86_64/mach-xen/asm/system.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/asm-x86_64/mach-xen/asm/system.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,175 @@
-+#ifndef __ASM_SYSTEM_H
-+#define __ASM_SYSTEM_H
-+
-+#include <linux/kernel.h>
-+#include <asm/segment.h>
-+#include <asm/cmpxchg.h>
-+
-+#include <asm/synch_bitops.h>
-+#include <asm/hypervisor.h>
-+#include <xen/interface/arch-x86_64.h>
-+
-+#ifdef __KERNEL__
-+
-+#ifdef CONFIG_SMP
-+#define __vcpu_id smp_processor_id()
-+#else
-+#define __vcpu_id 0
-+#endif
-+
-+#define __STR(x) #x
-+#define STR(x) __STR(x)
-+
-+#define __SAVE(reg,offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t"
-+#define __RESTORE(reg,offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t"
-+
-+/* frame pointer must be last for get_wchan */
-+#define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
-+#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t"
-+
-+#define __EXTRA_CLOBBER \
-+ ,"rcx","rbx","rdx","r8","r9","r10","r11","r12","r13","r14","r15"
-+
-+/* Save restore flags to clear handle leaking NT */
-+#define switch_to(prev,next,last) \
-+ asm volatile(SAVE_CONTEXT \
-+ "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \
-+ "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \
-+ "call __switch_to\n\t" \
-+ ".globl thread_return\n" \
-+ "thread_return:\n\t" \
-+ "movq %%gs:%P[pda_pcurrent],%%rsi\n\t" \
-+ "movq %P[thread_info](%%rsi),%%r8\n\t" \
-+ LOCK_PREFIX "btr %[tif_fork],%P[ti_flags](%%r8)\n\t" \
-+ "movq %%rax,%%rdi\n\t" \
-+ "jc ret_from_fork\n\t" \
-+ RESTORE_CONTEXT \
-+ : "=a" (last) \
-+ : [next] "S" (next), [prev] "D" (prev), \
-+ [threadrsp] "i" (offsetof(struct task_struct, thread.rsp)), \
-+ [ti_flags] "i" (offsetof(struct thread_info, flags)),\
-+ [tif_fork] "i" (TIF_FORK), \
-+ [thread_info] "i" (offsetof(struct task_struct, stack)), \
-+ [pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent)) \
-+ : "memory", "cc" __EXTRA_CLOBBER)
-+
-+extern void load_gs_index(unsigned);
-+
-+/*
-+ * Load a segment. Fall back on loading the zero
-+ * segment if something goes wrong..
-+ */
-+#define loadsegment(seg,value) \
-+ asm volatile("\n" \
-+ "1:\t" \
-+ "movl %k0,%%" #seg "\n" \
-+ "2:\n" \
-+ ".section .fixup,\"ax\"\n" \
-+ "3:\t" \
-+ "movl %1,%%" #seg "\n\t" \
-+ "jmp 2b\n" \
-+ ".previous\n" \
-+ ".section __ex_table,\"a\"\n\t" \
-+ ".align 8\n\t" \
-+ ".quad 1b,3b\n" \
-+ ".previous" \
-+ : :"r" (value), "r" (0))
-+
-+/*
-+ * Clear and set 'TS' bit respectively
-+ */
-+#define clts() (HYPERVISOR_fpu_taskswitch(0))
-+
-+static inline unsigned long read_cr0(void)
-+{
-+ unsigned long cr0;
-+ asm volatile("movq %%cr0,%0" : "=r" (cr0));
-+ return cr0;
-+}
-+
-+static inline void write_cr0(unsigned long val)
-+{
-+ asm volatile("movq %0,%%cr0" :: "r" (val));
-+}
-+
-+#define read_cr3() ({ \
-+ unsigned long __dummy; \
-+ asm("movq %%cr3,%0" : "=r" (__dummy)); \
-+ machine_to_phys(__dummy); \
-+})
-+
-+static inline void write_cr3(unsigned long val)
-+{
-+ val = phys_to_machine(val);
-+ asm volatile("movq %0,%%cr3" :: "r" (val) : "memory");
-+}
-+
-+static inline unsigned long read_cr4(void)
-+{
-+ unsigned long cr4;
-+ asm("movq %%cr4,%0" : "=r" (cr4));
-+ return cr4;
-+}
-+
-+static inline void write_cr4(unsigned long val)
-+{
-+ asm volatile("movq %0,%%cr4" :: "r" (val) : "memory");
-+}
-+
-+#define stts() (HYPERVISOR_fpu_taskswitch(1))
-+
-+#define wbinvd() \
-+ __asm__ __volatile__ ("wbinvd": : :"memory");
-+
-+/*
-+ * On SMP systems, when the scheduler does migration-cost autodetection,
-+ * it needs a way to flush as much of the CPU's caches as possible.
-+ */
-+static inline void sched_cacheflush(void)
-+{
-+ wbinvd();
-+}
-+
-+#endif /* __KERNEL__ */
-+
-+#define nop() __asm__ __volatile__ ("nop")
-+
-+#ifdef CONFIG_SMP
-+#define smp_mb() mb()
-+#define smp_rmb() rmb()
-+#define smp_wmb() wmb()
-+#define smp_read_barrier_depends() do {} while(0)
-+#else
-+#define smp_mb() barrier()
-+#define smp_rmb() barrier()
-+#define smp_wmb() barrier()
-+#define smp_read_barrier_depends() do {} while(0)
-+#endif
-+
-+
-+/*
-+ * Force strict CPU ordering.
-+ * And yes, this is required on UP too when we're talking
-+ * to devices.
-+ */
-+#define mb() asm volatile("mfence":::"memory")
-+#define rmb() asm volatile("lfence":::"memory")
-+
-+#ifdef CONFIG_UNORDERED_IO
-+#define wmb() asm volatile("sfence" ::: "memory")
-+#else
-+#define wmb() asm volatile("" ::: "memory")
-+#endif
-+#define read_barrier_depends() do {} while(0)
-+#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
-+
-+#define warn_if_not_ulong(x) do { unsigned long foo; (void) (&(x) == &foo); } while (0)
-+
-+#include <linux/irqflags.h>
-+
-+void cpu_idle_wait(void);
-+
-+extern unsigned long arch_align_stack(unsigned long sp);
-+extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
-+
-+#endif
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-x86_64/mach-xen/asm/time.h ubuntu-gutsy-xen/include/asm-x86_64/mach-xen/asm/time.h
---- ubuntu-gutsy/include/asm-x86_64/mach-xen/asm/time.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/asm-x86_64/mach-xen/asm/time.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1 @@
-+#include <asm-i386/time.h>
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-x86_64/mach-xen/asm/timer.h ubuntu-gutsy-xen/include/asm-x86_64/mach-xen/asm/timer.h
---- ubuntu-gutsy/include/asm-x86_64/mach-xen/asm/timer.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/asm-x86_64/mach-xen/asm/timer.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,7 @@
-+#ifndef _ASMi386_TIMER_H
-+#define _ASMi386_TIMER_H
-+#include <linux/init.h>
-+
-+#define TICK_SIZE (tick_nsec / 1000)
-+
-+#endif
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-x86_64/mach-xen/asm/tlbflush.h ubuntu-gutsy-xen/include/asm-x86_64/mach-xen/asm/tlbflush.h
---- ubuntu-gutsy/include/asm-x86_64/mach-xen/asm/tlbflush.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/asm-x86_64/mach-xen/asm/tlbflush.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,105 @@
-+#ifndef _X8664_TLBFLUSH_H
-+#define _X8664_TLBFLUSH_H
-+
-+#include <linux/mm.h>
-+#include <linux/sched.h>
-+#include <asm/processor.h>
-+#include <asm/system.h>
-+
-+#define __flush_tlb() xen_tlb_flush()
-+
-+/*
-+ * Global pages have to be flushed a bit differently. Not a real
-+ * performance problem because this does not happen often.
-+ */
-+#define __flush_tlb_global() xen_tlb_flush()
-+
-+
-+extern unsigned long pgkern_mask;
-+
-+#define __flush_tlb_all() __flush_tlb_global()
-+
-+#define __flush_tlb_one(addr) xen_invlpg((unsigned long)addr)
-+
-+
-+/*
-+ * TLB flushing:
-+ *
-+ * - flush_tlb() flushes the current mm struct TLBs
-+ * - flush_tlb_all() flushes all processes TLBs
-+ * - flush_tlb_mm(mm) flushes the specified mm context TLB's
-+ * - flush_tlb_page(vma, vmaddr) flushes one page
-+ * - flush_tlb_range(vma, start, end) flushes a range of pages
-+ * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
-+ * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
-+ *
-+ * x86-64 can only flush individual pages or full VMs. For a range flush
-+ * we always do the full VM. Might be worth trying if for a small
-+ * range a few INVLPGs in a row are a win.
-+ */
-+
-+#ifndef CONFIG_SMP
-+
-+#define flush_tlb() __flush_tlb()
-+#define flush_tlb_all() __flush_tlb_all()
-+#define local_flush_tlb() __flush_tlb()
-+
-+static inline void flush_tlb_mm(struct mm_struct *mm)
-+{
-+ if (mm == current->active_mm)
-+ __flush_tlb();
-+}
-+
-+static inline void flush_tlb_page(struct vm_area_struct *vma,
-+ unsigned long addr)
-+{
-+ if (vma->vm_mm == current->active_mm)
-+ __flush_tlb_one(addr);
-+}
-+
-+static inline void flush_tlb_range(struct vm_area_struct *vma,
-+ unsigned long start, unsigned long end)
-+{
-+ if (vma->vm_mm == current->active_mm)
-+ __flush_tlb();
-+}
-+
-+#else
-+
-+#include <asm/smp.h>
-+
-+#define local_flush_tlb() \
-+ __flush_tlb()
-+
-+extern void flush_tlb_all(void);
-+extern void flush_tlb_current_task(void);
-+extern void flush_tlb_mm(struct mm_struct *);
-+extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
-+
-+#define flush_tlb() flush_tlb_current_task()
-+
-+static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long start, unsigned long end)
-+{
-+ flush_tlb_mm(vma->vm_mm);
-+}
-+
-+#define TLBSTATE_OK 1
-+#define TLBSTATE_LAZY 2
-+
-+/* Roughly an IPI every 20MB with 4k pages for freeing page table
-+ ranges. Cost is about 42k of memory for each CPU. */
-+#define ARCH_FREE_PTE_NR 5350
-+
-+#endif
-+
-+#define flush_tlb_kernel_range(start, end) flush_tlb_all()
-+
-+static inline void flush_tlb_pgtables(struct mm_struct *mm,
-+ unsigned long start, unsigned long end)
-+{
-+ /* x86_64 does not keep any page table caches in a software TLB.
-+ The CPUs do in their hardware TLBs, but they are handled
-+ by the normal TLB flushing algorithms. */
-+}
-+
-+#endif /* _X8664_TLBFLUSH_H */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-x86_64/mach-xen/asm/vga.h ubuntu-gutsy-xen/include/asm-x86_64/mach-xen/asm/vga.h
---- ubuntu-gutsy/include/asm-x86_64/mach-xen/asm/vga.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/asm-x86_64/mach-xen/asm/vga.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,20 @@
-+/*
-+ * Access to VGA videoram
-+ *
-+ * (c) 1998 Martin Mares <mj@ucw.cz>
-+ */
-+
-+#ifndef _LINUX_ASM_VGA_H_
-+#define _LINUX_ASM_VGA_H_
-+
-+/*
-+ * On the PC, we can just recalculate addresses and then
-+ * access the videoram directly without any black magic.
-+ */
-+
-+#define VGA_MAP_MEM(x,s) (unsigned long)isa_bus_to_virt(x)
-+
-+#define vga_readb(x) (*(x))
-+#define vga_writeb(x,y) (*(y) = (x))
-+
-+#endif
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-x86_64/mach-xen/asm/xenoprof.h ubuntu-gutsy-xen/include/asm-x86_64/mach-xen/asm/xenoprof.h
---- ubuntu-gutsy/include/asm-x86_64/mach-xen/asm/xenoprof.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/asm-x86_64/mach-xen/asm/xenoprof.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1 @@
-+#include <asm-i386/mach-xen/asm/xenoprof.h>
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-x86_64/mach-xen/asm/xor.h ubuntu-gutsy-xen/include/asm-x86_64/mach-xen/asm/xor.h
---- ubuntu-gutsy/include/asm-x86_64/mach-xen/asm/xor.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/asm-x86_64/mach-xen/asm/xor.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,328 @@
-+/*
-+ * x86-64 changes / gcc fixes from Andi Kleen.
-+ * Copyright 2002 Andi Kleen, SuSE Labs.
-+ *
-+ * This hasn't been optimized for the hammer yet, but there are likely
-+ * no advantages to be gotten from x86-64 here anyways.
-+ */
-+
-+typedef struct { unsigned long a,b; } __attribute__((aligned(16))) xmm_store_t;
-+
-+/* Doesn't use gcc to save the XMM registers, because there is no easy way to
-+ tell it to do a clts before the register saving. */
-+#define XMMS_SAVE do { \
-+ preempt_disable(); \
-+ if (!(current_thread_info()->status & TS_USEDFPU)) \
-+ clts(); \
-+ __asm__ __volatile__ ( \
-+ "movups %%xmm0,(%1) ;\n\t" \
-+ "movups %%xmm1,0x10(%1) ;\n\t" \
-+ "movups %%xmm2,0x20(%1) ;\n\t" \
-+ "movups %%xmm3,0x30(%1) ;\n\t" \
-+ : "=&r" (cr0) \
-+ : "r" (xmm_save) \
-+ : "memory"); \
-+} while(0)
-+
-+#define XMMS_RESTORE do { \
-+ asm volatile ( \
-+ "sfence ;\n\t" \
-+ "movups (%1),%%xmm0 ;\n\t" \
-+ "movups 0x10(%1),%%xmm1 ;\n\t" \
-+ "movups 0x20(%1),%%xmm2 ;\n\t" \
-+ "movups 0x30(%1),%%xmm3 ;\n\t" \
-+ : \
-+ : "r" (cr0), "r" (xmm_save) \
-+ : "memory"); \
-+ if (!(current_thread_info()->status & TS_USEDFPU)) \
-+ stts(); \
-+ preempt_enable(); \
-+} while(0)
-+
-+#define OFFS(x) "16*("#x")"
-+#define PF_OFFS(x) "256+16*("#x")"
-+#define PF0(x) " prefetchnta "PF_OFFS(x)"(%[p1]) ;\n"
-+#define LD(x,y) " movaps "OFFS(x)"(%[p1]), %%xmm"#y" ;\n"
-+#define ST(x,y) " movaps %%xmm"#y", "OFFS(x)"(%[p1]) ;\n"
-+#define PF1(x) " prefetchnta "PF_OFFS(x)"(%[p2]) ;\n"
-+#define PF2(x) " prefetchnta "PF_OFFS(x)"(%[p3]) ;\n"
-+#define PF3(x) " prefetchnta "PF_OFFS(x)"(%[p4]) ;\n"
-+#define PF4(x) " prefetchnta "PF_OFFS(x)"(%[p5]) ;\n"
-+#define PF5(x) " prefetchnta "PF_OFFS(x)"(%[p6]) ;\n"
-+#define XO1(x,y) " xorps "OFFS(x)"(%[p2]), %%xmm"#y" ;\n"
-+#define XO2(x,y) " xorps "OFFS(x)"(%[p3]), %%xmm"#y" ;\n"
-+#define XO3(x,y) " xorps "OFFS(x)"(%[p4]), %%xmm"#y" ;\n"
-+#define XO4(x,y) " xorps "OFFS(x)"(%[p5]), %%xmm"#y" ;\n"
-+#define XO5(x,y) " xorps "OFFS(x)"(%[p6]), %%xmm"#y" ;\n"
-+
-+
-+static void
-+xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
-+{
-+ unsigned int lines = bytes >> 8;
-+ unsigned long cr0;
-+ xmm_store_t xmm_save[4];
-+
-+ XMMS_SAVE;
-+
-+ asm volatile (
-+#undef BLOCK
-+#define BLOCK(i) \
-+ LD(i,0) \
-+ LD(i+1,1) \
-+ PF1(i) \
-+ PF1(i+2) \
-+ LD(i+2,2) \
-+ LD(i+3,3) \
-+ PF0(i+4) \
-+ PF0(i+6) \
-+ XO1(i,0) \
-+ XO1(i+1,1) \
-+ XO1(i+2,2) \
-+ XO1(i+3,3) \
-+ ST(i,0) \
-+ ST(i+1,1) \
-+ ST(i+2,2) \
-+ ST(i+3,3) \
-+
-+
-+ PF0(0)
-+ PF0(2)
-+
-+ " .align 32 ;\n"
-+ " 1: ;\n"
-+
-+ BLOCK(0)
-+ BLOCK(4)
-+ BLOCK(8)
-+ BLOCK(12)
-+
-+ " addq %[inc], %[p1] ;\n"
-+ " addq %[inc], %[p2] ;\n"
-+ " decl %[cnt] ; jnz 1b"
-+ : [p1] "+r" (p1), [p2] "+r" (p2), [cnt] "+r" (lines)
-+ : [inc] "r" (256UL)
-+ : "memory");
-+
-+ XMMS_RESTORE;
-+}
-+
-+static void
-+xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
-+ unsigned long *p3)
-+{
-+ unsigned int lines = bytes >> 8;
-+ xmm_store_t xmm_save[4];
-+ unsigned long cr0;
-+
-+ XMMS_SAVE;
-+
-+ __asm__ __volatile__ (
-+#undef BLOCK
-+#define BLOCK(i) \
-+ PF1(i) \
-+ PF1(i+2) \
-+ LD(i,0) \
-+ LD(i+1,1) \
-+ LD(i+2,2) \
-+ LD(i+3,3) \
-+ PF2(i) \
-+ PF2(i+2) \
-+ PF0(i+4) \
-+ PF0(i+6) \
-+ XO1(i,0) \
-+ XO1(i+1,1) \
-+ XO1(i+2,2) \
-+ XO1(i+3,3) \
-+ XO2(i,0) \
-+ XO2(i+1,1) \
-+ XO2(i+2,2) \
-+ XO2(i+3,3) \
-+ ST(i,0) \
-+ ST(i+1,1) \
-+ ST(i+2,2) \
-+ ST(i+3,3) \
-+
-+
-+ PF0(0)
-+ PF0(2)
-+
-+ " .align 32 ;\n"
-+ " 1: ;\n"
-+
-+ BLOCK(0)
-+ BLOCK(4)
-+ BLOCK(8)
-+ BLOCK(12)
-+
-+ " addq %[inc], %[p1] ;\n"
-+ " addq %[inc], %[p2] ;\n"
-+ " addq %[inc], %[p3] ;\n"
-+ " decl %[cnt] ; jnz 1b"
-+ : [cnt] "+r" (lines),
-+ [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3)
-+ : [inc] "r" (256UL)
-+ : "memory");
-+ XMMS_RESTORE;
-+}
-+
-+static void
-+xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
-+ unsigned long *p3, unsigned long *p4)
-+{
-+ unsigned int lines = bytes >> 8;
-+ xmm_store_t xmm_save[4];
-+ unsigned long cr0;
-+
-+ XMMS_SAVE;
-+
-+ __asm__ __volatile__ (
-+#undef BLOCK
-+#define BLOCK(i) \
-+ PF1(i) \
-+ PF1(i+2) \
-+ LD(i,0) \
-+ LD(i+1,1) \
-+ LD(i+2,2) \
-+ LD(i+3,3) \
-+ PF2(i) \
-+ PF2(i+2) \
-+ XO1(i,0) \
-+ XO1(i+1,1) \
-+ XO1(i+2,2) \
-+ XO1(i+3,3) \
-+ PF3(i) \
-+ PF3(i+2) \
-+ PF0(i+4) \
-+ PF0(i+6) \
-+ XO2(i,0) \
-+ XO2(i+1,1) \
-+ XO2(i+2,2) \
-+ XO2(i+3,3) \
-+ XO3(i,0) \
-+ XO3(i+1,1) \
-+ XO3(i+2,2) \
-+ XO3(i+3,3) \
-+ ST(i,0) \
-+ ST(i+1,1) \
-+ ST(i+2,2) \
-+ ST(i+3,3) \
-+
-+
-+ PF0(0)
-+ PF0(2)
-+
-+ " .align 32 ;\n"
-+ " 1: ;\n"
-+
-+ BLOCK(0)
-+ BLOCK(4)
-+ BLOCK(8)
-+ BLOCK(12)
-+
-+ " addq %[inc], %[p1] ;\n"
-+ " addq %[inc], %[p2] ;\n"
-+ " addq %[inc], %[p3] ;\n"
-+ " addq %[inc], %[p4] ;\n"
-+ " decl %[cnt] ; jnz 1b"
-+ : [cnt] "+c" (lines),
-+ [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4)
-+ : [inc] "r" (256UL)
-+ : "memory" );
-+
-+ XMMS_RESTORE;
-+}
-+
-+static void
-+xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
-+ unsigned long *p3, unsigned long *p4, unsigned long *p5)
-+{
-+ unsigned int lines = bytes >> 8;
-+ xmm_store_t xmm_save[4];
-+ unsigned long cr0;
-+
-+ XMMS_SAVE;
-+
-+ __asm__ __volatile__ (
-+#undef BLOCK
-+#define BLOCK(i) \
-+ PF1(i) \
-+ PF1(i+2) \
-+ LD(i,0) \
-+ LD(i+1,1) \
-+ LD(i+2,2) \
-+ LD(i+3,3) \
-+ PF2(i) \
-+ PF2(i+2) \
-+ XO1(i,0) \
-+ XO1(i+1,1) \
-+ XO1(i+2,2) \
-+ XO1(i+3,3) \
-+ PF3(i) \
-+ PF3(i+2) \
-+ XO2(i,0) \
-+ XO2(i+1,1) \
-+ XO2(i+2,2) \
-+ XO2(i+3,3) \
-+ PF4(i) \
-+ PF4(i+2) \
-+ PF0(i+4) \
-+ PF0(i+6) \
-+ XO3(i,0) \
-+ XO3(i+1,1) \
-+ XO3(i+2,2) \
-+ XO3(i+3,3) \
-+ XO4(i,0) \
-+ XO4(i+1,1) \
-+ XO4(i+2,2) \
-+ XO4(i+3,3) \
-+ ST(i,0) \
-+ ST(i+1,1) \
-+ ST(i+2,2) \
-+ ST(i+3,3) \
-+
-+
-+ PF0(0)
-+ PF0(2)
-+
-+ " .align 32 ;\n"
-+ " 1: ;\n"
-+
-+ BLOCK(0)
-+ BLOCK(4)
-+ BLOCK(8)
-+ BLOCK(12)
-+
-+ " addq %[inc], %[p1] ;\n"
-+ " addq %[inc], %[p2] ;\n"
-+ " addq %[inc], %[p3] ;\n"
-+ " addq %[inc], %[p4] ;\n"
-+ " addq %[inc], %[p5] ;\n"
-+ " decl %[cnt] ; jnz 1b"
-+ : [cnt] "+c" (lines),
-+ [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4),
-+ [p5] "+r" (p5)
-+ : [inc] "r" (256UL)
-+ : "memory");
-+
-+ XMMS_RESTORE;
-+}
-+
-+static struct xor_block_template xor_block_sse = {
-+ .name = "generic_sse",
-+ .do_2 = xor_sse_2,
-+ .do_3 = xor_sse_3,
-+ .do_4 = xor_sse_4,
-+ .do_5 = xor_sse_5,
-+};
-+
-+#undef XOR_TRY_TEMPLATES
-+#define XOR_TRY_TEMPLATES \
-+ do { \
-+ xor_speed(&xor_block_sse); \
-+ } while (0)
-+
-+/* We force the use of the SSE xor block because it can write around L2.
-+ We may also be able to load into the L1 only depending on how the cpu
-+ deals with a load to a line that is being prefetched. */
-+#define XOR_SELECT_TEMPLATE(FASTEST) (&xor_block_sse)
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-x86_64/mach-xen/irq_vectors.h ubuntu-gutsy-xen/include/asm-x86_64/mach-xen/irq_vectors.h
---- ubuntu-gutsy/include/asm-x86_64/mach-xen/irq_vectors.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/asm-x86_64/mach-xen/irq_vectors.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,63 @@
-+/*
-+ * This file should contain #defines for all of the interrupt vector
-+ * numbers used by this architecture.
-+ *
-+ * In addition, there are some standard defines:
-+ *
-+ * SYSCALL_VECTOR:
-+ * The IRQ vector a syscall makes the user to kernel transition
-+ * under.
-+ *
-+ * NR_IRQS:
-+ * The total number of interrupt vectors (including all the
-+ * architecture specific interrupts) needed.
-+ *
-+ */
-+#ifndef _ASM_IRQ_VECTORS_H
-+#define _ASM_IRQ_VECTORS_H
-+
-+#define SYSCALL_VECTOR 0x80
-+
-+#define RESCHEDULE_VECTOR 0
-+#define CALL_FUNCTION_VECTOR 1
-+#define NR_IPIS 2
-+
-+/*
-+ * The maximum number of vectors supported by i386 processors
-+ * is limited to 256. For processors other than i386, NR_VECTORS
-+ * should be changed accordingly.
-+ */
-+#define NR_VECTORS 256
-+
-+#define FPU_IRQ 13
-+
-+#define FIRST_VM86_IRQ 3
-+#define LAST_VM86_IRQ 15
-+#define invalid_vm86_irq(irq) ((irq) < 3 || (irq) > 15)
-+
-+/*
-+ * The flat IRQ space is divided into two regions:
-+ * 1. A one-to-one mapping of real physical IRQs. This space is only used
-+ * if we have physical device-access privilege. This region is at the
-+ * start of the IRQ space so that existing device drivers do not need
-+ * to be modified to translate physical IRQ numbers into our IRQ space.
-+ * 3. A dynamic mapping of inter-domain and Xen-sourced virtual IRQs. These
-+ * are bound using the provided bind/unbind functions.
-+ */
-+
-+#define PIRQ_BASE 0
-+#define NR_PIRQS 256
-+
-+#define DYNIRQ_BASE (PIRQ_BASE + NR_PIRQS)
-+#define NR_DYNIRQS 256
-+
-+#define NR_IRQS (NR_PIRQS + NR_DYNIRQS)
-+#define NR_IRQ_VECTORS NR_IRQS
-+
-+#define pirq_to_irq(_x) ((_x) + PIRQ_BASE)
-+#define irq_to_pirq(_x) ((_x) - PIRQ_BASE)
-+
-+#define dynirq_to_irq(_x) ((_x) + DYNIRQ_BASE)
-+#define irq_to_dynirq(_x) ((_x) - DYNIRQ_BASE)
-+
-+#endif /* _ASM_IRQ_VECTORS_H */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-x86_64/mach-xen/mach_time.h ubuntu-gutsy-xen/include/asm-x86_64/mach-xen/mach_time.h
---- ubuntu-gutsy/include/asm-x86_64/mach-xen/mach_time.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/asm-x86_64/mach-xen/mach_time.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,111 @@
-+/*
-+ * include/asm-i386/mach-default/mach_time.h
-+ *
-+ * Machine specific set RTC function for generic.
-+ * Split out from time.c by Osamu Tomita <tomita@cinet.co.jp>
-+ */
-+#ifndef _MACH_TIME_H
-+#define _MACH_TIME_H
-+
-+#include <asm-i386/mc146818rtc.h>
-+
-+/* for check timing call set_rtc_mmss() 500ms */
-+/* used in arch/i386/time.c::do_timer_interrupt() */
-+#define USEC_AFTER 500000
-+#define USEC_BEFORE 500000
-+
-+/*
-+ * In order to set the CMOS clock precisely, set_rtc_mmss has to be
-+ * called 500 ms after the second nowtime has started, because when
-+ * nowtime is written into the registers of the CMOS clock, it will
-+ * jump to the next second precisely 500 ms later. Check the Motorola
-+ * MC146818A or Dallas DS12887 data sheet for details.
-+ *
-+ * BUG: This routine does not handle hour overflow properly; it just
-+ * sets the minutes. Usually you'll only notice that after reboot!
-+ */
-+static inline int mach_set_rtc_mmss(unsigned long nowtime)
-+{
-+ int retval = 0;
-+ int real_seconds, real_minutes, cmos_minutes;
-+ unsigned char save_control, save_freq_select;
-+
-+ save_control = CMOS_READ(RTC_CONTROL); /* tell the clock it's being set */
-+ CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
-+
-+ save_freq_select = CMOS_READ(RTC_FREQ_SELECT); /* stop and reset prescaler */
-+ CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
-+
-+ cmos_minutes = CMOS_READ(RTC_MINUTES);
-+ if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
-+ BCD_TO_BIN(cmos_minutes);
-+
-+ /*
-+ * since we're only adjusting minutes and seconds,
-+ * don't interfere with hour overflow. This avoids
-+ * messing with unknown time zones but requires your
-+ * RTC not to be off by more than 15 minutes
-+ */
-+ real_seconds = nowtime % 60;
-+ real_minutes = nowtime / 60;
-+ if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1)
-+ real_minutes += 30; /* correct for half hour time zone */
-+ real_minutes %= 60;
-+
-+ if (abs(real_minutes - cmos_minutes) < 30) {
-+ if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
-+ BIN_TO_BCD(real_seconds);
-+ BIN_TO_BCD(real_minutes);
-+ }
-+ CMOS_WRITE(real_seconds,RTC_SECONDS);
-+ CMOS_WRITE(real_minutes,RTC_MINUTES);
-+ } else {
-+ printk(KERN_WARNING
-+ "set_rtc_mmss: can't update from %d to %d\n",
-+ cmos_minutes, real_minutes);
-+ retval = -1;
-+ }
-+
-+ /* The following flags have to be released exactly in this order,
-+ * otherwise the DS12887 (popular MC146818A clone with integrated
-+ * battery and quartz) will not reset the oscillator and will not
-+ * update precisely 500 ms later. You won't find this mentioned in
-+ * the Dallas Semiconductor data sheets, but who believes data
-+ * sheets anyway ... -- Markus Kuhn
-+ */
-+ CMOS_WRITE(save_control, RTC_CONTROL);
-+ CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
-+
-+ return retval;
-+}
-+
-+static inline unsigned long mach_get_cmos_time(void)
-+{
-+ unsigned int year, mon, day, hour, min, sec;
-+
-+ do {
-+ sec = CMOS_READ(RTC_SECONDS);
-+ min = CMOS_READ(RTC_MINUTES);
-+ hour = CMOS_READ(RTC_HOURS);
-+ day = CMOS_READ(RTC_DAY_OF_MONTH);
-+ mon = CMOS_READ(RTC_MONTH);
-+ year = CMOS_READ(RTC_YEAR);
-+ } while (sec != CMOS_READ(RTC_SECONDS));
-+
-+ if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
-+ BCD_TO_BIN(sec);
-+ BCD_TO_BIN(min);
-+ BCD_TO_BIN(hour);
-+ BCD_TO_BIN(day);
-+ BCD_TO_BIN(mon);
-+ BCD_TO_BIN(year);
-+ }
-+
-+ year += 1900;
-+ if (year < 1970)
-+ year += 100;
-+
-+ return mktime(year, mon, day, hour, min, sec);
-+}
-+
-+#endif /* !_MACH_TIME_H */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-x86_64/mach-xen/mach_timer.h ubuntu-gutsy-xen/include/asm-x86_64/mach-xen/mach_timer.h
---- ubuntu-gutsy/include/asm-x86_64/mach-xen/mach_timer.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/asm-x86_64/mach-xen/mach_timer.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,50 @@
-+/*
-+ * include/asm-i386/mach-default/mach_timer.h
-+ *
-+ * Machine specific calibrate_tsc() for generic.
-+ * Split out from timer_tsc.c by Osamu Tomita <tomita@cinet.co.jp>
-+ */
-+/* ------ Calibrate the TSC -------
-+ * Return 2^32 * (1 / (TSC clocks per usec)) for do_fast_gettimeoffset().
-+ * Too much 64-bit arithmetic here to do this cleanly in C, and for
-+ * accuracy's sake we want to keep the overhead on the CTC speaker (channel 2)
-+ * output busy loop as low as possible. We avoid reading the CTC registers
-+ * directly because of the awkward 8-bit access mechanism of the 82C54
-+ * device.
-+ */
-+#ifndef _MACH_TIMER_H
-+#define _MACH_TIMER_H
-+
-+#define CALIBRATE_TIME_MSEC 30 /* 30 msecs */
-+#define CALIBRATE_LATCH \
-+ ((CLOCK_TICK_RATE * CALIBRATE_TIME_MSEC + 1000/2)/1000)
-+
-+static inline void mach_prepare_counter(void)
-+{
-+ /* Set the Gate high, disable speaker */
-+ outb((inb(0x61) & ~0x02) | 0x01, 0x61);
-+
-+ /*
-+ * Now let's take care of CTC channel 2
-+ *
-+ * Set the Gate high, program CTC channel 2 for mode 0,
-+ * (interrupt on terminal count mode), binary count,
-+ * load 5 * LATCH count, (LSB and MSB) to begin countdown.
-+ *
-+ * Some devices need a delay here.
-+ */
-+ outb(0xb0, 0x43); /* binary, mode 0, LSB/MSB, Ch 2 */
-+ outb_p(CALIBRATE_LATCH & 0xff, 0x42); /* LSB of count */
-+ outb_p(CALIBRATE_LATCH >> 8, 0x42); /* MSB of count */
-+}
-+
-+static inline void mach_countup(unsigned long *count_p)
-+{
-+ unsigned long count = 0;
-+ do {
-+ count++;
-+ } while ((inb_p(0x61) & 0x20) == 0);
-+ *count_p = count;
-+}
-+
-+#endif /* !_MACH_TIMER_H */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-x86_64/mach-xen/setup_arch_post.h ubuntu-gutsy-xen/include/asm-x86_64/mach-xen/setup_arch_post.h
---- ubuntu-gutsy/include/asm-x86_64/mach-xen/setup_arch_post.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/asm-x86_64/mach-xen/setup_arch_post.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,59 @@
-+/**
-+ * machine_specific_* - Hooks for machine specific setup.
-+ *
-+ * Description:
-+ * This is included late in kernel/setup.c so that it can make
-+ * use of all of the static functions.
-+ **/
-+
-+#include <xen/interface/callback.h>
-+
-+extern void hypervisor_callback(void);
-+extern void failsafe_callback(void);
-+extern void nmi(void);
-+
-+static void __init machine_specific_arch_setup(void)
-+{
-+ int ret;
-+ static struct callback_register __initdata event = {
-+ .type = CALLBACKTYPE_event,
-+ .address = (unsigned long) hypervisor_callback,
-+ };
-+ static struct callback_register __initdata failsafe = {
-+ .type = CALLBACKTYPE_failsafe,
-+ .address = (unsigned long)failsafe_callback,
-+ };
-+ static struct callback_register __initdata syscall = {
-+ .type = CALLBACKTYPE_syscall,
-+ .address = (unsigned long)system_call,
-+ };
-+ static struct callback_register __initdata nmi_cb = {
-+ .type = CALLBACKTYPE_nmi,
-+ .address = (unsigned long)nmi,
-+ };
-+
-+ ret = HYPERVISOR_callback_op(CALLBACKOP_register, &event);
-+ if (ret == 0)
-+ ret = HYPERVISOR_callback_op(CALLBACKOP_register, &failsafe);
-+ if (ret == 0)
-+ ret = HYPERVISOR_callback_op(CALLBACKOP_register, &syscall);
-+#if CONFIG_XEN_COMPAT <= 0x030002
-+ if (ret == -ENOSYS)
-+ ret = HYPERVISOR_set_callbacks(
-+ event.address,
-+ failsafe.address,
-+ syscall.address);
-+#endif
-+ BUG_ON(ret);
-+
-+ ret = HYPERVISOR_callback_op(CALLBACKOP_register, &nmi_cb);
-+#if CONFIG_XEN_COMPAT <= 0x030002
-+ if (ret == -ENOSYS) {
-+ static struct xennmi_callback __initdata cb = {
-+ .handler_address = (unsigned long)nmi
-+ };
-+
-+ HYPERVISOR_nmi_op(XENNMI_register_callback, &cb);
-+ }
-+#endif
-+}
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-x86_64/mach-xen/setup_arch_pre.h ubuntu-gutsy-xen/include/asm-x86_64/mach-xen/setup_arch_pre.h
---- ubuntu-gutsy/include/asm-x86_64/mach-xen/setup_arch_pre.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/asm-x86_64/mach-xen/setup_arch_pre.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,5 @@
-+/* Hook to call BIOS initialisation function */
-+
-+#define ARCH_SETUP machine_specific_arch_setup();
-+
-+static void __init machine_specific_arch_setup(void);
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/asm-x86_64/thread_info.h ubuntu-gutsy-xen/include/asm-x86_64/thread_info.h
---- ubuntu-gutsy/include/asm-x86_64/thread_info.h 2007-08-18 09:40:34.000000000 -0400
-+++ ubuntu-gutsy-xen/include/asm-x86_64/thread_info.h 2007-08-18 12:38:02.000000000 -0400
-@@ -147,7 +147,11 @@
- #define _TIF_ALLWORK_MASK (0x0000FFFF & ~_TIF_SECCOMP)
-
- /* flags to check in __switch_to() */
-+#ifndef CONFIG_XEN
- #define _TIF_WORK_CTXSW (_TIF_DEBUG|_TIF_IO_BITMAP)
-+#else
-+#define _TIF_WORK_CTXSW _TIF_DEBUG
-+#endif
-
- #define PREEMPT_ACTIVE 0x10000000
-
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/linux/aio.h ubuntu-gutsy-xen/include/linux/aio.h
---- ubuntu-gutsy/include/linux/aio.h 2007-08-18 09:40:34.000000000 -0400
-+++ ubuntu-gutsy-xen/include/linux/aio.h 2007-08-18 12:38:02.000000000 -0400
-@@ -201,6 +201,11 @@
- struct aio_ring_info ring_info;
-
- struct delayed_work wq;
-+#ifdef CONFIG_EPOLL
-+ // poll integration
-+ wait_queue_head_t poll_wait;
-+ struct file *file;
-+#endif
- };
-
- /* prototypes */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/linux/console.h ubuntu-gutsy-xen/include/linux/console.h
---- ubuntu-gutsy/include/linux/console.h 2007-08-18 09:40:34.000000000 -0400
-+++ ubuntu-gutsy-xen/include/linux/console.h 2007-08-18 12:38:02.000000000 -0400
-@@ -63,6 +63,7 @@
- extern const struct consw vga_con; /* VGA text console */
- extern const struct consw newport_con; /* SGI Newport console */
- extern const struct consw prom_con; /* SPARC PROM console */
-+extern int console_use_vt;
-
- int con_is_bound(const struct consw *csw);
- int register_con_driver(const struct consw *csw, int first, int last);
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/linux/elfnote.h ubuntu-gutsy-xen/include/linux/elfnote.h
---- ubuntu-gutsy/include/linux/elfnote.h 2007-08-18 09:40:34.000000000 -0400
-+++ ubuntu-gutsy-xen/include/linux/elfnote.h 2007-08-18 12:38:02.000000000 -0400
-@@ -38,7 +38,7 @@
- * e.g. ELFNOTE(XYZCo, 42, .asciz, "forty-two")
- * ELFNOTE(XYZCo, 12, .long, 0xdeadbeef)
- */
--#define ELFNOTE(name, type, desctype, descdata) \
-+#define ELFNOTE(name, type, desctype, descdata...) \
- .pushsection .note.name, "",@note ; \
- .align 4 ; \
- .long 2f - 1f /* namesz */ ; \
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/linux/interrupt.h ubuntu-gutsy-xen/include/linux/interrupt.h
---- ubuntu-gutsy/include/linux/interrupt.h 2007-08-18 09:40:34.000000000 -0400
-+++ ubuntu-gutsy-xen/include/linux/interrupt.h 2007-08-18 12:38:02.000000000 -0400
-@@ -207,6 +207,12 @@
-
- #endif /* CONFIG_GENERIC_HARDIRQS */
-
-+#ifdef CONFIG_HAVE_IRQ_IGNORE_UNHANDLED
-+int irq_ignore_unhandled(unsigned int irq);
-+#else
-+#define irq_ignore_unhandled(irq) 0
-+#endif
-+
- #ifndef __ARCH_SET_SOFTIRQ_PENDING
- #define set_softirq_pending(x) (local_softirq_pending() = (x))
- #define or_softirq_pending(x) (local_softirq_pending() |= (x))
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/linux/kexec.h ubuntu-gutsy-xen/include/linux/kexec.h
---- ubuntu-gutsy/include/linux/kexec.h 2007-08-18 09:40:34.000000000 -0400
-+++ ubuntu-gutsy-xen/include/linux/kexec.h 2007-08-18 12:38:02.000000000 -0400
-@@ -46,6 +46,13 @@
- KEXEC_CORE_NOTE_NAME_BYTES + \
- KEXEC_CORE_NOTE_DESC_BYTES )
-
-+#ifndef KEXEC_ARCH_HAS_PAGE_MACROS
-+#define kexec_page_to_pfn(page) page_to_pfn(page)
-+#define kexec_pfn_to_page(pfn) pfn_to_page(pfn)
-+#define kexec_virt_to_phys(addr) virt_to_phys(addr)
-+#define kexec_phys_to_virt(addr) phys_to_virt(addr)
-+#endif
-+
- /*
- * This structure is used to hold the arguments that are used when loading
- * kernel binaries.
-@@ -106,6 +113,12 @@
- extern NORET_TYPE void machine_kexec(struct kimage *image) ATTRIB_NORET;
- extern int machine_kexec_prepare(struct kimage *image);
- extern void machine_kexec_cleanup(struct kimage *image);
-+#ifdef CONFIG_XEN
-+extern int xen_machine_kexec_load(struct kimage *image);
-+extern void xen_machine_kexec_unload(struct kimage *image);
-+extern void xen_machine_kexec_setup_resources(void);
-+extern void xen_machine_kexec_register_resources(struct resource *res);
-+#endif
- extern asmlinkage long sys_kexec_load(unsigned long entry,
- unsigned long nr_segments,
- struct kexec_segment __user *segments,
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/linux/mm.h ubuntu-gutsy-xen/include/linux/mm.h
---- ubuntu-gutsy/include/linux/mm.h 2007-08-18 09:40:34.000000000 -0400
-+++ ubuntu-gutsy-xen/include/linux/mm.h 2007-08-18 12:38:02.000000000 -0400
-@@ -169,6 +169,9 @@
- #define VM_MAPPED_COPY 0x01000000 /* T if mapped copy of data (nommu mmap) */
- #define VM_INSERTPAGE 0x02000000 /* The vma has had "vm_insert_page()" done on it */
- #define VM_ALWAYSDUMP 0x04000000 /* Always include in core dumps */
-+#ifdef CONFIG_XEN
-+#define VM_FOREIGN 0x08000000 /* Has pages belonging to another VM */
-+#endif
-
- #ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */
- #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
-@@ -208,6 +211,12 @@
- /* notification that a previously read-only page is about to become
- * writable, if an error is returned it will cause a SIGBUS */
- int (*page_mkwrite)(struct vm_area_struct *vma, struct page *page);
-+#ifdef CONFIG_XEN
-+ /* Area-specific function for clearing the PTE at @ptep. Returns the
-+ * original value of @ptep. */
-+ pte_t (*zap_pte)(struct vm_area_struct *vma,
-+ unsigned long addr, pte_t *ptep, int is_fullmm);
-+#endif
- #ifdef CONFIG_NUMA
- int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
- struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/linux/oprofile.h ubuntu-gutsy-xen/include/linux/oprofile.h
---- ubuntu-gutsy/include/linux/oprofile.h 2007-08-18 09:40:34.000000000 -0400
-+++ ubuntu-gutsy-xen/include/linux/oprofile.h 2007-08-18 12:38:02.000000000 -0400
-@@ -16,6 +16,9 @@
- #include <linux/types.h>
- #include <linux/spinlock.h>
- #include <asm/atomic.h>
-+#ifdef CONFIG_XEN
-+#include <xen/interface/xenoprof.h>
-+#endif
-
- struct super_block;
- struct dentry;
-@@ -27,6 +30,12 @@
- /* create any necessary configuration files in the oprofile fs.
- * Optional. */
- int (*create_files)(struct super_block * sb, struct dentry * root);
-+#ifdef CONFIG_XEN
-+ /* setup active domains with Xen */
-+ int (*set_active)(int *active_domains, unsigned int adomains);
-+ /* setup passive domains with Xen */
-+ int (*set_passive)(int *passive_domains, unsigned int pdomains);
-+#endif
- /* Do any necessary interrupt setup. Optional. */
- int (*setup)(void);
- /* Do any necessary interrupt shutdown. Optional. */
-@@ -78,6 +87,8 @@
- /* add a backtrace entry, to be called from the ->backtrace callback */
- void oprofile_add_trace(unsigned long eip);
-
-+/* add a domain switch entry */
-+int oprofile_add_domain_switch(int32_t domain_id);
-
- /**
- * Create a file of the given name as a child of the given root, with
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/linux/page-flags.h ubuntu-gutsy-xen/include/linux/page-flags.h
---- ubuntu-gutsy/include/linux/page-flags.h 2007-08-18 09:40:34.000000000 -0400
-+++ ubuntu-gutsy-xen/include/linux/page-flags.h 2007-08-18 12:38:02.000000000 -0400
-@@ -88,6 +88,7 @@
-
- #define PG_mappedtodisk 16 /* Has blocks allocated on-disk */
- #define PG_reclaim 17 /* To be reclaimed asap */
-+#define PG_foreign 18 /* Page is owned by foreign allocator. */
- #define PG_buddy 19 /* Page is free, on buddy lists */
-
- /* PG_owner_priv_1 users should have descriptive aliases */
-@@ -270,6 +271,18 @@
- #define SetPageUncached(page) set_bit(PG_uncached, &(page)->flags)
- #define ClearPageUncached(page) clear_bit(PG_uncached, &(page)->flags)
-
-+#define PageForeign(page) test_bit(PG_foreign, &(page)->flags)
-+#define SetPageForeign(page, dtor) do { \
-+ set_bit(PG_foreign, &(page)->flags); \
-+ (page)->index = (long)(dtor); \
-+} while (0)
-+#define ClearPageForeign(page) do { \
-+ clear_bit(PG_foreign, &(page)->flags); \
-+ (page)->index = 0; \
-+} while (0)
-+#define PageForeignDestructor(page) \
-+ ( (void (*) (struct page *)) (page)->index )(page)
-+
- struct page; /* forward declaration */
-
- extern void cancel_dirty_page(struct page *page, unsigned int account_size);
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/linux/sched.h ubuntu-gutsy-xen/include/linux/sched.h
---- ubuntu-gutsy/include/linux/sched.h 2007-08-18 09:40:34.000000000 -0400
-+++ ubuntu-gutsy-xen/include/linux/sched.h 2007-08-18 12:38:02.000000000 -0400
-@@ -232,11 +232,16 @@
- extern void scheduler_tick(void);
-
- #ifdef CONFIG_DETECT_SOFTLOCKUP
-+extern unsigned long softlockup_get_next_event(void);
- extern void softlockup_tick(void);
- extern void spawn_softlockup_task(void);
- extern void touch_softlockup_watchdog(void);
- extern void touch_all_softlockup_watchdogs(void);
- #else
-+static inline unsigned long softlockup_get_next_event(void)
-+{
-+ return MAX_JIFFY_OFFSET;
-+}
- static inline void softlockup_tick(void)
- {
- }
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/linux/skbuff.h ubuntu-gutsy-xen/include/linux/skbuff.h
---- ubuntu-gutsy/include/linux/skbuff.h 2007-08-18 09:40:34.000000000 -0400
-+++ ubuntu-gutsy-xen/include/linux/skbuff.h 2007-08-18 12:38:02.000000000 -0400
-@@ -212,6 +212,8 @@
- * @local_df: allow local fragmentation
- * @cloned: Head may be cloned (check refcnt to be sure)
- * @nohdr: Payload reference only, must not modify header
-+ * @proto_data_valid: Protocol data validated since arriving at localhost
-+ * @proto_csum_blank: Protocol csum must be added before leaving localhost
- * @pkt_type: Packet class
- * @fclone: skbuff clone status
- * @ip_summed: Driver fed us an IP checksum
-@@ -277,7 +279,13 @@
- nfctinfo:3;
- __u8 pkt_type:3,
- fclone:2,
-+#ifndef CONFIG_XEN
- ipvs_property:1;
-+#else
-+ ipvs_property:1,
-+ proto_data_valid:1,
-+ proto_csum_blank:1;
-+#endif
- __be16 protocol;
-
- void (*destructor)(struct sk_buff *skb);
-@@ -1721,5 +1729,11 @@
- skb->ip_summed = CHECKSUM_NONE;
- }
-
-+#ifdef CONFIG_XEN
-+int skb_checksum_setup(struct sk_buff *skb);
-+#else
-+static inline int skb_checksum_setup(struct sk_buff *skb) { return 0; }
-+#endif
-+
- #endif /* __KERNEL__ */
- #endif /* _LINUX_SKBUFF_H */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/xen/balloon.h ubuntu-gutsy-xen/include/xen/balloon.h
---- ubuntu-gutsy/include/xen/balloon.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/xen/balloon.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,61 @@
-+/******************************************************************************
-+ * balloon.h
-+ *
-+ * Xen balloon driver - enables returning/claiming memory to/from Xen.
-+ *
-+ * Copyright (c) 2003, B Dragovic
-+ * Copyright (c) 2003-2004, M Williamson, K Fraser
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#ifndef __ASM_BALLOON_H__
-+#define __ASM_BALLOON_H__
-+
-+/*
-+ * Inform the balloon driver that it should allow some slop for device-driver
-+ * memory activities.
-+ */
-+void balloon_update_driver_allowance(long delta);
-+
-+/* Allocate/free a set of empty pages in low memory (i.e., no RAM mapped). */
-+struct page **alloc_empty_pages_and_pagevec(int nr_pages);
-+void free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages);
-+
-+/* Free an empty page range (not allocated through
-+ alloc_empty_pages_and_pagevec), adding to the balloon. */
-+void free_empty_pages(struct page **pagevec, int nr_pages);
-+
-+void balloon_release_driver_page(struct page *page);
-+
-+/*
-+ * Prevent the balloon driver from changing the memory reservation during
-+ * a driver critical region.
-+ */
-+extern spinlock_t balloon_lock;
-+#define balloon_lock(__flags) spin_lock_irqsave(&balloon_lock, __flags)
-+#define balloon_unlock(__flags) spin_unlock_irqrestore(&balloon_lock, __flags)
-+
-+#endif /* __ASM_BALLOON_H__ */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/xen/blkif.h ubuntu-gutsy-xen/include/xen/blkif.h
---- ubuntu-gutsy/include/xen/blkif.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/xen/blkif.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,97 @@
-+#ifndef __XEN_BLKIF_H__
-+#define __XEN_BLKIF_H__
-+
-+#include <xen/interface/io/ring.h>
-+#include <xen/interface/io/blkif.h>
-+#include <xen/interface/io/protocols.h>
-+
-+/* Not a real protocol. Used to generate ring structs which contain
-+ * the elements common to all protocols only. This way we get a
-+ * compiler-checkable way to use common struct elements, so we can
-+ * avoid using switch(protocol) in a number of places. */
-+struct blkif_common_request {
-+ char dummy;
-+};
-+struct blkif_common_response {
-+ char dummy;
-+};
-+
-+/* i386 protocol version */
-+#pragma pack(push, 4)
-+struct blkif_x86_32_request {
-+ uint8_t operation; /* BLKIF_OP_??? */
-+ uint8_t nr_segments; /* number of segments */
-+ blkif_vdev_t handle; /* only for read/write requests */
-+ uint64_t id; /* private guest value, echoed in resp */
-+ blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
-+ struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
-+};
-+struct blkif_x86_32_response {
-+ uint64_t id; /* copied from request */
-+ uint8_t operation; /* copied from request */
-+ int16_t status; /* BLKIF_RSP_??? */
-+};
-+typedef struct blkif_x86_32_request blkif_x86_32_request_t;
-+typedef struct blkif_x86_32_response blkif_x86_32_response_t;
-+#pragma pack(pop)
-+
-+/* x86_64 protocol version */
-+struct blkif_x86_64_request {
-+ uint8_t operation; /* BLKIF_OP_??? */
-+ uint8_t nr_segments; /* number of segments */
-+ blkif_vdev_t handle; /* only for read/write requests */
-+ uint64_t __attribute__((__aligned__(8))) id;
-+ blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
-+ struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
-+};
-+struct blkif_x86_64_response {
-+ uint64_t __attribute__((__aligned__(8))) id;
-+ uint8_t operation; /* copied from request */
-+ int16_t status; /* BLKIF_RSP_??? */
-+};
-+typedef struct blkif_x86_64_request blkif_x86_64_request_t;
-+typedef struct blkif_x86_64_response blkif_x86_64_response_t;
-+
-+DEFINE_RING_TYPES(blkif_common, struct blkif_common_request, struct blkif_common_response);
-+DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request, struct blkif_x86_32_response);
-+DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request, struct blkif_x86_64_response);
-+
-+union blkif_back_rings {
-+ blkif_back_ring_t native;
-+ blkif_common_back_ring_t common;
-+ blkif_x86_32_back_ring_t x86_32;
-+ blkif_x86_64_back_ring_t x86_64;
-+};
-+typedef union blkif_back_rings blkif_back_rings_t;
-+
-+enum blkif_protocol {
-+ BLKIF_PROTOCOL_NATIVE = 1,
-+ BLKIF_PROTOCOL_X86_32 = 2,
-+ BLKIF_PROTOCOL_X86_64 = 3,
-+};
-+
-+static void inline blkif_get_x86_32_req(blkif_request_t *dst, blkif_x86_32_request_t *src)
-+{
-+ int i;
-+ dst->operation = src->operation;
-+ dst->nr_segments = src->nr_segments;
-+ dst->handle = src->handle;
-+ dst->id = src->id;
-+ dst->sector_number = src->sector_number;
-+ for (i = 0; i < src->nr_segments; i++)
-+ dst->seg[i] = src->seg[i];
-+}
-+
-+static void inline blkif_get_x86_64_req(blkif_request_t *dst, blkif_x86_64_request_t *src)
-+{
-+ int i;
-+ dst->operation = src->operation;
-+ dst->nr_segments = src->nr_segments;
-+ dst->handle = src->handle;
-+ dst->id = src->id;
-+ dst->sector_number = src->sector_number;
-+ for (i = 0; i < src->nr_segments; i++)
-+ dst->seg[i] = src->seg[i];
-+}
-+
-+#endif /* __XEN_BLKIF_H__ */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/xen/cpu_hotplug.h ubuntu-gutsy-xen/include/xen/cpu_hotplug.h
---- ubuntu-gutsy/include/xen/cpu_hotplug.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/xen/cpu_hotplug.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,44 @@
-+#ifndef __XEN_CPU_HOTPLUG_H__
-+#define __XEN_CPU_HOTPLUG_H__
-+
-+#include <linux/kernel.h>
-+#include <linux/cpumask.h>
-+
-+#if defined(CONFIG_X86) && defined(CONFIG_SMP)
-+extern cpumask_t cpu_initialized_map;
-+#define cpu_set_initialized(cpu) cpu_set(cpu, cpu_initialized_map)
-+#else
-+#define cpu_set_initialized(cpu) ((void)0)
-+#endif
-+
-+#if defined(CONFIG_HOTPLUG_CPU)
-+
-+int cpu_up_check(unsigned int cpu);
-+void init_xenbus_allowed_cpumask(void);
-+int smp_suspend(void);
-+void smp_resume(void);
-+
-+void cpu_bringup(void);
-+
-+#else /* !defined(CONFIG_HOTPLUG_CPU) */
-+
-+#define cpu_up_check(cpu) (0)
-+#define init_xenbus_allowed_cpumask() ((void)0)
-+
-+static inline int smp_suspend(void)
-+{
-+ if (num_online_cpus() > 1) {
-+ printk(KERN_WARNING "Can't suspend SMP guests "
-+ "without CONFIG_HOTPLUG_CPU\n");
-+ return -EOPNOTSUPP;
-+ }
-+ return 0;
-+}
-+
-+static inline void smp_resume(void)
-+{
-+}
-+
-+#endif /* !defined(CONFIG_HOTPLUG_CPU) */
-+
-+#endif /* __XEN_CPU_HOTPLUG_H__ */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/xen/driver_util.h ubuntu-gutsy-xen/include/xen/driver_util.h
---- ubuntu-gutsy/include/xen/driver_util.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/xen/driver_util.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,14 @@
-+
-+#ifndef __ASM_XEN_DRIVER_UTIL_H__
-+#define __ASM_XEN_DRIVER_UTIL_H__
-+
-+#include <linux/vmalloc.h>
-+#include <linux/device.h>
-+
-+/* Allocate/destroy a 'vmalloc' VM area. */
-+extern struct vm_struct *alloc_vm_area(unsigned long size);
-+extern void free_vm_area(struct vm_struct *area);
-+
-+extern struct class *get_xen_class(void);
-+
-+#endif /* __ASM_XEN_DRIVER_UTIL_H__ */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/xen/evtchn.h ubuntu-gutsy-xen/include/xen/evtchn.h
---- ubuntu-gutsy/include/xen/evtchn.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/xen/evtchn.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,128 @@
-+/******************************************************************************
-+ * evtchn.h
-+ *
-+ * Communication via Xen event channels.
-+ * Also definitions for the device that demuxes notifications to userspace.
-+ *
-+ * Copyright (c) 2004-2005, K A Fraser
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#ifndef __ASM_EVTCHN_H__
-+#define __ASM_EVTCHN_H__
-+
-+#include <linux/interrupt.h>
-+#include <asm/hypervisor.h>
-+#include <asm/ptrace.h>
-+#include <asm/synch_bitops.h>
-+#include <xen/interface/event_channel.h>
-+#include <linux/smp.h>
-+
-+/*
-+ * LOW-LEVEL DEFINITIONS
-+ */
-+
-+/*
-+ * Dynamically bind an event source to an IRQ-like callback handler.
-+ * On some platforms this may not be implemented via the Linux IRQ subsystem.
-+ * The IRQ argument passed to the callback handler is the same as returned
-+ * from the bind call. It may not correspond to a Linux IRQ number.
-+ * Returns IRQ or negative errno.
-+ */
-+int bind_caller_port_to_irqhandler(
-+ unsigned int caller_port,
-+ irq_handler_t handler,
-+ unsigned long irqflags,
-+ const char *devname,
-+ void *dev_id);
-+int bind_listening_port_to_irqhandler(
-+ unsigned int remote_domain,
-+ irq_handler_t handler,
-+ unsigned long irqflags,
-+ const char *devname,
-+ void *dev_id);
-+int bind_interdomain_evtchn_to_irqhandler(
-+ unsigned int remote_domain,
-+ unsigned int remote_port,
-+ irq_handler_t handler,
-+ unsigned long irqflags,
-+ const char *devname,
-+ void *dev_id);
-+int bind_virq_to_irqhandler(
-+ unsigned int virq,
-+ unsigned int cpu,
-+ irq_handler_t handler,
-+ unsigned long irqflags,
-+ const char *devname,
-+ void *dev_id);
-+int bind_ipi_to_irqhandler(
-+ unsigned int ipi,
-+ unsigned int cpu,
-+ irq_handler_t handler,
-+ unsigned long irqflags,
-+ const char *devname,
-+ void *dev_id);
-+
-+/*
-+ * Common unbind function for all event sources. Takes IRQ to unbind from.
-+ * Automatically closes the underlying event channel (except for bindings
-+ * made with bind_caller_port_to_irqhandler()).
-+ */
-+void unbind_from_irqhandler(unsigned int irq, void *dev_id);
-+
-+void irq_resume(void);
-+
-+/* Entry point for notifications into Linux subsystems. */
-+asmlinkage void evtchn_do_upcall(struct pt_regs *regs);
-+
-+/* Entry point for notifications into the userland character device. */
-+void evtchn_device_upcall(int port);
-+
-+void mask_evtchn(int port);
-+void unmask_evtchn(int port);
-+
-+extern void mask_evtchn_local(void);
-+
-+static inline void clear_evtchn(int port)
-+{
-+ shared_info_t *s = HYPERVISOR_shared_info;
-+ synch_clear_bit(port, s->evtchn_pending);
-+}
-+
-+static inline void notify_remote_via_evtchn(int port)
-+{
-+ struct evtchn_send send = { .port = port };
-+ (void)HYPERVISOR_event_channel_op(EVTCHNOP_send, &send);
-+}
-+
-+/*
-+ * Use these to access the event channel underlying the IRQ handle returned
-+ * by bind_*_to_irqhandler().
-+ */
-+void notify_remote_via_irq(int irq);
-+int irq_to_evtchn_port(int irq);
-+
-+#endif /* __ASM_EVTCHN_H__ */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/xen/features.h ubuntu-gutsy-xen/include/xen/features.h
---- ubuntu-gutsy/include/xen/features.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/xen/features.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,20 @@
-+/******************************************************************************
-+ * features.h
-+ *
-+ * Query the features reported by Xen.
-+ *
-+ * Copyright (c) 2006, Ian Campbell
-+ */
-+
-+#ifndef __ASM_XEN_FEATURES_H__
-+#define __ASM_XEN_FEATURES_H__
-+
-+#include <xen/interface/version.h>
-+
-+extern void setup_xen_features(void);
-+
-+extern u8 xen_features[XENFEAT_NR_SUBMAPS * 32];
-+
-+#define xen_feature(flag) (xen_features[flag])
-+
-+#endif /* __ASM_XEN_FEATURES_H__ */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/xen/firmware.h ubuntu-gutsy-xen/include/xen/firmware.h
---- ubuntu-gutsy/include/xen/firmware.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/xen/firmware.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,10 @@
-+#ifndef __XEN_FIRMWARE_H__
-+#define __XEN_FIRMWARE_H__
-+
-+#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
-+void copy_edd(void);
-+#endif
-+
-+void copy_edid(void);
-+
-+#endif /* __XEN_FIRMWARE_H__ */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/xen/gnttab.h ubuntu-gutsy-xen/include/xen/gnttab.h
---- ubuntu-gutsy/include/xen/gnttab.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/xen/gnttab.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,166 @@
-+/******************************************************************************
-+ * gnttab.h
-+ *
-+ * Two sets of functionality:
-+ * 1. Granting foreign access to our memory reservation.
-+ * 2. Accessing others' memory reservations via grant references.
-+ * (i.e., mechanisms for both sender and recipient of grant references)
-+ *
-+ * Copyright (c) 2004-2005, K A Fraser
-+ * Copyright (c) 2005, Christopher Clark
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#ifndef __ASM_GNTTAB_H__
-+#define __ASM_GNTTAB_H__
-+
-+#include <asm/hypervisor.h>
-+#include <asm/maddr.h> /* maddr_t */
-+#include <linux/mm.h>
-+#include <xen/interface/grant_table.h>
-+#include <xen/features.h>
-+
-+struct gnttab_free_callback {
-+ struct gnttab_free_callback *next;
-+ void (*fn)(void *);
-+ void *arg;
-+ u16 count;
-+};
-+
-+int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
-+ int readonly);
-+
-+/*
-+ * End access through the given grant reference, iff the grant entry is no
-+ * longer in use. Return 1 if the grant entry was freed, 0 if it is still in
-+ * use.
-+ */
-+int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly);
-+
-+/*
-+ * Eventually end access through the given grant reference, and once that
-+ * access has been ended, free the given page too. Access will be ended
-+ * immediately iff the grant entry is not in use, otherwise it will happen
-+ * some time later. page may be 0, in which case no freeing will occur.
-+ */
-+void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
-+ unsigned long page);
-+
-+int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn);
-+
-+unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref);
-+unsigned long gnttab_end_foreign_transfer(grant_ref_t ref);
-+
-+int gnttab_query_foreign_access(grant_ref_t ref);
-+
-+/*
-+ * operations on reserved batches of grant references
-+ */
-+int gnttab_alloc_grant_references(u16 count, grant_ref_t *pprivate_head);
-+
-+void gnttab_free_grant_reference(grant_ref_t ref);
-+
-+void gnttab_free_grant_references(grant_ref_t head);
-+
-+int gnttab_empty_grant_references(const grant_ref_t *pprivate_head);
-+
-+int gnttab_claim_grant_reference(grant_ref_t *pprivate_head);
-+
-+void gnttab_release_grant_reference(grant_ref_t *private_head,
-+ grant_ref_t release);
-+
-+void gnttab_request_free_callback(struct gnttab_free_callback *callback,
-+ void (*fn)(void *), void *arg, u16 count);
-+void gnttab_cancel_free_callback(struct gnttab_free_callback *callback);
-+
-+void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
-+ unsigned long frame, int readonly);
-+
-+void gnttab_grant_foreign_transfer_ref(grant_ref_t, domid_t domid,
-+ unsigned long pfn);
-+
-+int gnttab_copy_grant_page(grant_ref_t ref, struct page **pagep);
-+void __gnttab_dma_map_page(struct page *page);
-+static inline void __gnttab_dma_unmap_page(struct page *page)
-+{
-+}
-+
-+static inline void gnttab_reset_grant_page(struct page *page)
-+{
-+ init_page_count(page);
-+ reset_page_mapcount(page);
-+}
-+
-+int gnttab_suspend(void);
-+int gnttab_resume(void);
-+
-+static inline void
-+gnttab_set_map_op(struct gnttab_map_grant_ref *map, maddr_t addr,
-+ uint32_t flags, grant_ref_t ref, domid_t domid)
-+{
-+ if (flags & GNTMAP_contains_pte)
-+ map->host_addr = addr;
-+ else if (xen_feature(XENFEAT_auto_translated_physmap))
-+ map->host_addr = __pa(addr);
-+ else
-+ map->host_addr = addr;
-+
-+ map->flags = flags;
-+ map->ref = ref;
-+ map->dom = domid;
-+}
-+
-+static inline void
-+gnttab_set_unmap_op(struct gnttab_unmap_grant_ref *unmap, maddr_t addr,
-+ uint32_t flags, grant_handle_t handle)
-+{
-+ if (flags & GNTMAP_contains_pte)
-+ unmap->host_addr = addr;
-+ else if (xen_feature(XENFEAT_auto_translated_physmap))
-+ unmap->host_addr = __pa(addr);
-+ else
-+ unmap->host_addr = addr;
-+
-+ unmap->handle = handle;
-+ unmap->dev_bus_addr = 0;
-+}
-+
-+static inline void
-+gnttab_set_replace_op(struct gnttab_unmap_and_replace *unmap, maddr_t addr,
-+ maddr_t new_addr, grant_handle_t handle)
-+{
-+ if (xen_feature(XENFEAT_auto_translated_physmap)) {
-+ unmap->host_addr = __pa(addr);
-+ unmap->new_addr = __pa(new_addr);
-+ } else {
-+ unmap->host_addr = addr;
-+ unmap->new_addr = new_addr;
-+ }
-+
-+ unmap->handle = handle;
-+}
-+
-+#endif /* __ASM_GNTTAB_H__ */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/xen/hvm.h ubuntu-gutsy-xen/include/xen/hvm.h
---- ubuntu-gutsy/include/xen/hvm.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/xen/hvm.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,23 @@
-+/* Simple wrappers around HVM functions */
-+#ifndef XEN_HVM_H__
-+#define XEN_HVM_H__
-+
-+#include <xen/interface/hvm/params.h>
-+
-+static inline unsigned long hvm_get_parameter(int idx)
-+{
-+ struct xen_hvm_param xhv;
-+ int r;
-+
-+ xhv.domid = DOMID_SELF;
-+ xhv.index = idx;
-+ r = HYPERVISOR_hvm_op(HVMOP_get_param, &xhv);
-+ if (r < 0) {
-+ printk(KERN_ERR "cannot get hvm parameter %d: %d.\n",
-+ idx, r);
-+ return 0;
-+ }
-+ return xhv.value;
-+}
-+
-+#endif /* XEN_HVM_H__ */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/xen/hypercall.h ubuntu-gutsy-xen/include/xen/hypercall.h
---- ubuntu-gutsy/include/xen/hypercall.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/xen/hypercall.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,24 @@
-+#ifndef __XEN_HYPERCALL_H__
-+#define __XEN_HYPERCALL_H__
-+
-+#include <asm/hypercall.h>
-+
-+static inline int
-+HYPERVISOR_multicall_check(
-+ multicall_entry_t *call_list, int nr_calls,
-+ const unsigned long *rc_list)
-+{
-+ int rc = HYPERVISOR_multicall(call_list, nr_calls);
-+
-+ if (unlikely(rc < 0))
-+ return rc;
-+ BUG_ON(rc);
-+
-+ for ( ; nr_calls > 0; --nr_calls, ++call_list)
-+ if (unlikely(call_list->result != (rc_list ? *rc_list++ : 0)))
-+ return nr_calls;
-+
-+ return 0;
-+}
-+
-+#endif /* __XEN_HYPERCALL_H__ */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/xen/hypervisor_sysfs.h ubuntu-gutsy-xen/include/xen/hypervisor_sysfs.h
---- ubuntu-gutsy/include/xen/hypervisor_sysfs.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/xen/hypervisor_sysfs.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,30 @@
-+/*
-+ * copyright (c) 2006 IBM Corporation
-+ * Authored by: Mike D. Day <ncmike@us.ibm.com>
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ */
-+
-+#ifndef _HYP_SYSFS_H_
-+#define _HYP_SYSFS_H_
-+
-+#include <linux/kobject.h>
-+#include <linux/sysfs.h>
-+
-+#define HYPERVISOR_ATTR_RO(_name) \
-+static struct hyp_sysfs_attr _name##_attr = __ATTR_RO(_name)
-+
-+#define HYPERVISOR_ATTR_RW(_name) \
-+static struct hyp_sysfs_attr _name##_attr = \
-+ __ATTR(_name, 0644, _name##_show, _name##_store)
-+
-+struct hyp_sysfs_attr {
-+ struct attribute attr;
-+ ssize_t (*show)(struct hyp_sysfs_attr *, char *);
-+ ssize_t (*store)(struct hyp_sysfs_attr *, const char *, size_t);
-+ void *hyp_attr_data;
-+};
-+
-+#endif /* _HYP_SYSFS_H_ */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/xen/interface/acm.h ubuntu-gutsy-xen/include/xen/interface/acm.h
---- ubuntu-gutsy/include/xen/interface/acm.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/xen/interface/acm.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,228 @@
-+/*
-+ * acm.h: Xen access control module interface defintions
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this software and associated documentation files (the "Software"), to
-+ * deal in the Software without restriction, including without limitation the
-+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-+ * sell copies of the Software, and to permit persons to whom the Software is
-+ * furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-+ * DEALINGS IN THE SOFTWARE.
-+ *
-+ * Reiner Sailer <sailer@watson.ibm.com>
-+ * Copyright (c) 2005, International Business Machines Corporation.
-+ */
-+
-+#ifndef _XEN_PUBLIC_ACM_H
-+#define _XEN_PUBLIC_ACM_H
-+
-+#include "xen.h"
-+
-+/* if ACM_DEBUG defined, all hooks should
-+ * print a short trace message (comment it out
-+ * when not in testing mode )
-+ */
-+/* #define ACM_DEBUG */
-+
-+#ifdef ACM_DEBUG
-+# define printkd(fmt, args...) printk(fmt,## args)
-+#else
-+# define printkd(fmt, args...)
-+#endif
-+
-+/* default ssid reference value if not supplied */
-+#define ACM_DEFAULT_SSID 0x0
-+#define ACM_DEFAULT_LOCAL_SSID 0x0
-+
-+/* Internal ACM ERROR types */
-+#define ACM_OK 0
-+#define ACM_UNDEF -1
-+#define ACM_INIT_SSID_ERROR -2
-+#define ACM_INIT_SOID_ERROR -3
-+#define ACM_ERROR -4
-+
-+/* External ACCESS DECISIONS */
-+#define ACM_ACCESS_PERMITTED 0
-+#define ACM_ACCESS_DENIED -111
-+#define ACM_NULL_POINTER_ERROR -200
-+
-+/*
-+ Error codes reported in when trying to test for a new policy
-+ These error codes are reported in an array of tuples where
-+ each error code is followed by a parameter describing the error
-+ more closely, such as a domain id.
-+*/
-+#define ACM_EVTCHN_SHARING_VIOLATION 0x100
-+#define ACM_GNTTAB_SHARING_VIOLATION 0x101
-+#define ACM_DOMAIN_LOOKUP 0x102
-+#define ACM_CHWALL_CONFLICT 0x103
-+#define ACM_SSIDREF_IN_USE 0x104
-+
-+
-+/* primary policy in lower 4 bits */
-+#define ACM_NULL_POLICY 0
-+#define ACM_CHINESE_WALL_POLICY 1
-+#define ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY 2
-+#define ACM_POLICY_UNDEFINED 15
-+
-+/* combinations have secondary policy component in higher 4bit */
-+#define ACM_CHINESE_WALL_AND_SIMPLE_TYPE_ENFORCEMENT_POLICY \
-+ ((ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY << 4) | ACM_CHINESE_WALL_POLICY)
-+
-+/* policy: */
-+#define ACM_POLICY_NAME(X) \
-+ ((X) == (ACM_NULL_POLICY)) ? "NULL" : \
-+ ((X) == (ACM_CHINESE_WALL_POLICY)) ? "CHINESE WALL" : \
-+ ((X) == (ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY)) ? "SIMPLE TYPE ENFORCEMENT" : \
-+ ((X) == (ACM_CHINESE_WALL_AND_SIMPLE_TYPE_ENFORCEMENT_POLICY)) ? "CHINESE WALL AND SIMPLE TYPE ENFORCEMENT" : \
-+ "UNDEFINED"
-+
-+/* the following policy versions must be increased
-+ * whenever the interpretation of the related
-+ * policy's data structure changes
-+ */
-+#define ACM_POLICY_VERSION 3
-+#define ACM_CHWALL_VERSION 1
-+#define ACM_STE_VERSION 1
-+
-+/* defines a ssid reference used by xen */
-+typedef uint32_t ssidref_t;
-+
-+/* hooks that are known to domains */
-+#define ACMHOOK_none 0
-+#define ACMHOOK_sharing 1
-+
-+/* -------security policy relevant type definitions-------- */
-+
-+/* type identifier; compares to "equal" or "not equal" */
-+typedef uint16_t domaintype_t;
-+
-+/* CHINESE WALL POLICY DATA STRUCTURES
-+ *
-+ * current accumulated conflict type set:
-+ * When a domain is started and has a type that is in
-+ * a conflict set, the conflicting types are incremented in
-+ * the aggregate set. When a domain is destroyed, the
-+ * conflicting types to its type are decremented.
-+ * If a domain has multiple types, this procedure works over
-+ * all those types.
-+ *
-+ * conflict_aggregate_set[i] holds the number of
-+ * running domains that have a conflict with type i.
-+ *
-+ * running_types[i] holds the number of running domains
-+ * that include type i in their ssidref-referenced type set
-+ *
-+ * conflict_sets[i][j] is "0" if type j has no conflict
-+ * with type i and is "1" otherwise.
-+ */
-+/* high-16 = version, low-16 = check magic */
-+#define ACM_MAGIC 0x0001debc
-+
-+/* each offset in bytes from start of the struct they
-+ * are part of */
-+
-+/* V3 of the policy buffer aded a version structure */
-+struct acm_policy_version
-+{
-+ uint32_t major;
-+ uint32_t minor;
-+};
-+
-+
-+/* each buffer consists of all policy information for
-+ * the respective policy given in the policy code
-+ *
-+ * acm_policy_buffer, acm_chwall_policy_buffer,
-+ * and acm_ste_policy_buffer need to stay 32-bit aligned
-+ * because we create binary policies also with external
-+ * tools that assume packed representations (e.g. the java tool)
-+ */
-+struct acm_policy_buffer {
-+ uint32_t policy_version; /* ACM_POLICY_VERSION */
-+ uint32_t magic;
-+ uint32_t len;
-+ uint32_t policy_reference_offset;
-+ uint32_t primary_policy_code;
-+ uint32_t primary_buffer_offset;
-+ uint32_t secondary_policy_code;
-+ uint32_t secondary_buffer_offset;
-+ struct acm_policy_version xml_pol_version; /* add in V3 */
-+};
-+
-+
-+struct acm_policy_reference_buffer {
-+ uint32_t len;
-+};
-+
-+struct acm_chwall_policy_buffer {
-+ uint32_t policy_version; /* ACM_CHWALL_VERSION */
-+ uint32_t policy_code;
-+ uint32_t chwall_max_types;
-+ uint32_t chwall_max_ssidrefs;
-+ uint32_t chwall_max_conflictsets;
-+ uint32_t chwall_ssid_offset;
-+ uint32_t chwall_conflict_sets_offset;
-+ uint32_t chwall_running_types_offset;
-+ uint32_t chwall_conflict_aggregate_offset;
-+};
-+
-+struct acm_ste_policy_buffer {
-+ uint32_t policy_version; /* ACM_STE_VERSION */
-+ uint32_t policy_code;
-+ uint32_t ste_max_types;
-+ uint32_t ste_max_ssidrefs;
-+ uint32_t ste_ssid_offset;
-+};
-+
-+struct acm_stats_buffer {
-+ uint32_t magic;
-+ uint32_t len;
-+ uint32_t primary_policy_code;
-+ uint32_t primary_stats_offset;
-+ uint32_t secondary_policy_code;
-+ uint32_t secondary_stats_offset;
-+};
-+
-+struct acm_ste_stats_buffer {
-+ uint32_t ec_eval_count;
-+ uint32_t gt_eval_count;
-+ uint32_t ec_denied_count;
-+ uint32_t gt_denied_count;
-+ uint32_t ec_cachehit_count;
-+ uint32_t gt_cachehit_count;
-+};
-+
-+struct acm_ssid_buffer {
-+ uint32_t len;
-+ ssidref_t ssidref;
-+ uint32_t policy_reference_offset;
-+ uint32_t primary_policy_code;
-+ uint32_t primary_max_types;
-+ uint32_t primary_types_offset;
-+ uint32_t secondary_policy_code;
-+ uint32_t secondary_max_types;
-+ uint32_t secondary_types_offset;
-+};
-+
-+#endif
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/xen/interface/acm_ops.h ubuntu-gutsy-xen/include/xen/interface/acm_ops.h
---- ubuntu-gutsy/include/xen/interface/acm_ops.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/xen/interface/acm_ops.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,166 @@
-+/*
-+ * acm_ops.h: Xen access control module hypervisor commands
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this software and associated documentation files (the "Software"), to
-+ * deal in the Software without restriction, including without limitation the
-+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-+ * sell copies of the Software, and to permit persons to whom the Software is
-+ * furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-+ * DEALINGS IN THE SOFTWARE.
-+ *
-+ * Reiner Sailer <sailer@watson.ibm.com>
-+ * Copyright (c) 2005,2006 International Business Machines Corporation.
-+ */
-+
-+#ifndef __XEN_PUBLIC_ACM_OPS_H__
-+#define __XEN_PUBLIC_ACM_OPS_H__
-+
-+#include "xen.h"
-+#include "acm.h"
-+
-+/*
-+ * Make sure you increment the interface version whenever you modify this file!
-+ * This makes sure that old versions of acm tools will stop working in a
-+ * well-defined way (rather than crashing the machine, for instance).
-+ */
-+#define ACM_INTERFACE_VERSION 0xAAAA0009
-+
-+/************************************************************************/
-+
-+/*
-+ * Prototype for this hypercall is:
-+ * int acm_op(int cmd, void *args)
-+ * @cmd == ACMOP_??? (access control module operation).
-+ * @args == Operation-specific extra arguments (NULL if none).
-+ */
-+
-+
-+#define ACMOP_setpolicy 1
-+struct acm_setpolicy {
-+ /* IN */
-+ uint32_t interface_version;
-+ XEN_GUEST_HANDLE_64(void) pushcache;
-+ uint32_t pushcache_size;
-+};
-+
-+
-+#define ACMOP_getpolicy 2
-+struct acm_getpolicy {
-+ /* IN */
-+ uint32_t interface_version;
-+ XEN_GUEST_HANDLE_64(void) pullcache;
-+ uint32_t pullcache_size;
-+};
-+
-+
-+#define ACMOP_dumpstats 3
-+struct acm_dumpstats {
-+ /* IN */
-+ uint32_t interface_version;
-+ XEN_GUEST_HANDLE_64(void) pullcache;
-+ uint32_t pullcache_size;
-+};
-+
-+
-+#define ACMOP_getssid 4
-+#define ACM_GETBY_ssidref 1
-+#define ACM_GETBY_domainid 2
-+struct acm_getssid {
-+ /* IN */
-+ uint32_t interface_version;
-+ uint32_t get_ssid_by; /* ACM_GETBY_* */
-+ union {
-+ domaintype_t domainid;
-+ ssidref_t ssidref;
-+ } id;
-+ XEN_GUEST_HANDLE_64(void) ssidbuf;
-+ uint32_t ssidbuf_size;
-+};
-+
-+#define ACMOP_getdecision 5
-+struct acm_getdecision {
-+ /* IN */
-+ uint32_t interface_version;
-+ uint32_t get_decision_by1; /* ACM_GETBY_* */
-+ uint32_t get_decision_by2; /* ACM_GETBY_* */
-+ union {
-+ domaintype_t domainid;
-+ ssidref_t ssidref;
-+ } id1;
-+ union {
-+ domaintype_t domainid;
-+ ssidref_t ssidref;
-+ } id2;
-+ uint32_t hook;
-+ /* OUT */
-+ uint32_t acm_decision;
-+};
-+
-+
-+#define ACMOP_chgpolicy 6
-+struct acm_change_policy {
-+ /* IN */
-+ uint32_t interface_version;
-+ XEN_GUEST_HANDLE_64(void) policy_pushcache;
-+ uint32_t policy_pushcache_size;
-+ XEN_GUEST_HANDLE_64(void) del_array;
-+ uint32_t delarray_size;
-+ XEN_GUEST_HANDLE_64(void) chg_array;
-+ uint32_t chgarray_size;
-+ /* OUT */
-+ /* array with error code */
-+ XEN_GUEST_HANDLE_64(void) err_array;
-+ uint32_t errarray_size;
-+};
-+
-+#define ACMOP_relabeldoms 7
-+struct acm_relabel_doms {
-+ /* IN */
-+ uint32_t interface_version;
-+ XEN_GUEST_HANDLE_64(void) relabel_map;
-+ uint32_t relabel_map_size;
-+ /* OUT */
-+ XEN_GUEST_HANDLE_64(void) err_array;
-+ uint32_t errarray_size;
-+};
-+
-+/* future interface to Xen */
-+struct xen_acmctl {
-+ uint32_t cmd;
-+ uint32_t interface_version;
-+ union {
-+ struct acm_setpolicy setpolicy;
-+ struct acm_getpolicy getpolicy;
-+ struct acm_dumpstats dumpstats;
-+ struct acm_getssid getssid;
-+ struct acm_getdecision getdecision;
-+ struct acm_change_policy change_policy;
-+ struct acm_relabel_doms relabel_doms;
-+ } u;
-+};
-+
-+typedef struct xen_acmctl xen_acmctl_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_acmctl_t);
-+
-+#endif /* __XEN_PUBLIC_ACM_OPS_H__ */
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/xen/interface/arch-ia64.h ubuntu-gutsy-xen/include/xen/interface/arch-ia64.h
---- ubuntu-gutsy/include/xen/interface/arch-ia64.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/xen/interface/arch-ia64.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,504 @@
-+/******************************************************************************
-+ * arch-ia64/hypervisor-if.h
-+ *
-+ * Guest OS interface to IA64 Xen.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this software and associated documentation files (the "Software"), to
-+ * deal in the Software without restriction, including without limitation the
-+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-+ * sell copies of the Software, and to permit persons to whom the Software is
-+ * furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-+ * DEALINGS IN THE SOFTWARE.
-+ *
-+ */
-+
-+#ifndef __HYPERVISOR_IF_IA64_H__
-+#define __HYPERVISOR_IF_IA64_H__
-+
-+/* Structural guest handles introduced in 0x00030201. */
-+#if __XEN_INTERFACE_VERSION__ >= 0x00030201
-+#define __DEFINE_XEN_GUEST_HANDLE(name, type) \
-+ typedef struct { type *p; } __guest_handle_ ## name
-+#else
-+#define __DEFINE_XEN_GUEST_HANDLE(name, type) \
-+ typedef type * __guest_handle_ ## name
-+#endif
-+
-+#define DEFINE_XEN_GUEST_HANDLE(name) __DEFINE_XEN_GUEST_HANDLE(name, name)
-+#define XEN_GUEST_HANDLE(name) __guest_handle_ ## name
-+#define set_xen_guest_handle(hnd, val) do { (hnd).p = val; } while (0)
-+#ifdef __XEN_TOOLS__
-+#define get_xen_guest_handle(val, hnd) do { val = (hnd).p; } while (0)
-+#endif
-+
-+#ifndef __ASSEMBLY__
-+/* Guest handles for primitive C types. */
-+__DEFINE_XEN_GUEST_HANDLE(uchar, unsigned char);
-+__DEFINE_XEN_GUEST_HANDLE(uint, unsigned int);
-+__DEFINE_XEN_GUEST_HANDLE(ulong, unsigned long);
-+__DEFINE_XEN_GUEST_HANDLE(u64, unsigned long);
-+DEFINE_XEN_GUEST_HANDLE(char);
-+DEFINE_XEN_GUEST_HANDLE(int);
-+DEFINE_XEN_GUEST_HANDLE(long);
-+DEFINE_XEN_GUEST_HANDLE(void);
-+
-+typedef unsigned long xen_pfn_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_pfn_t);
-+#endif
-+
-+/* Arch specific VIRQs definition */
-+#define VIRQ_ITC VIRQ_ARCH_0 /* V. Virtual itc timer */
-+#define VIRQ_MCA_CMC VIRQ_ARCH_1 /* MCA cmc interrupt */
-+#define VIRQ_MCA_CPE VIRQ_ARCH_2 /* MCA cpe interrupt */
-+
-+/* Arch specific callback irq definition */
-+/* using Requester-ID(RID) as callback irq */
-+#define IA64_CALLBACK_IRQ_RID (1 << 31)
-+
-+/* Maximum number of virtual CPUs in multi-processor guests. */
-+/* WARNING: before changing this, check that shared_info fits on a page */
-+#define MAX_VIRT_CPUS 64
-+
-+#ifndef __ASSEMBLY__
-+
-+typedef unsigned long xen_ulong_t;
-+
-+#define INVALID_MFN (~0UL)
-+
-+#define MEM_G (1UL << 30)
-+#define MEM_M (1UL << 20)
-+
-+#define MMIO_START (3 * MEM_G)
-+#define MMIO_SIZE (512 * MEM_M)
-+
-+#define VGA_IO_START 0xA0000UL
-+#define VGA_IO_SIZE 0x20000
-+
-+#define LEGACY_IO_START (MMIO_START + MMIO_SIZE)
-+#define LEGACY_IO_SIZE (64*MEM_M)
-+
-+#define IO_PAGE_START (LEGACY_IO_START + LEGACY_IO_SIZE)
-+#define IO_PAGE_SIZE PAGE_SIZE
-+
-+#define STORE_PAGE_START (IO_PAGE_START + IO_PAGE_SIZE)
-+#define STORE_PAGE_SIZE PAGE_SIZE
-+
-+#define BUFFER_IO_PAGE_START (STORE_PAGE_START+PAGE_SIZE)
-+#define BUFFER_IO_PAGE_SIZE PAGE_SIZE
-+
-+#define IO_SAPIC_START 0xfec00000UL
-+#define IO_SAPIC_SIZE 0x100000
-+
-+#define PIB_START 0xfee00000UL
-+#define PIB_SIZE 0x200000
-+
-+#define GFW_START (4*MEM_G -16*MEM_M)
-+#define GFW_SIZE (16*MEM_M)
-+
-+struct pt_fpreg {
-+ union {
-+ unsigned long bits[2];
-+ long double __dummy; /* force 16-byte alignment */
-+ } u;
-+};
-+
-+struct cpu_user_regs {
-+ /* The following registers are saved by SAVE_MIN: */
-+ unsigned long b6; /* scratch */
-+ unsigned long b7; /* scratch */
-+
-+ unsigned long ar_csd; /* used by cmp8xchg16 (scratch) */
-+ unsigned long ar_ssd; /* reserved for future use (scratch) */
-+
-+ unsigned long r8; /* scratch (return value register 0) */
-+ unsigned long r9; /* scratch (return value register 1) */
-+ unsigned long r10; /* scratch (return value register 2) */
-+ unsigned long r11; /* scratch (return value register 3) */
-+
-+ unsigned long cr_ipsr; /* interrupted task's psr */
-+ unsigned long cr_iip; /* interrupted task's instruction pointer */
-+ unsigned long cr_ifs; /* interrupted task's function state */
-+
-+ unsigned long ar_unat; /* interrupted task's NaT register (preserved) */
-+ unsigned long ar_pfs; /* prev function state */
-+ unsigned long ar_rsc; /* RSE configuration */
-+ /* The following two are valid only if cr_ipsr.cpl > 0: */
-+ unsigned long ar_rnat; /* RSE NaT */
-+ unsigned long ar_bspstore; /* RSE bspstore */
-+
-+ unsigned long pr; /* 64 predicate registers (1 bit each) */
-+ unsigned long b0; /* return pointer (bp) */
-+ unsigned long loadrs; /* size of dirty partition << 16 */
-+
-+ unsigned long r1; /* the gp pointer */
-+ unsigned long r12; /* interrupted task's memory stack pointer */
-+ unsigned long r13; /* thread pointer */
-+
-+ unsigned long ar_fpsr; /* floating point status (preserved) */
-+ unsigned long r15; /* scratch */
-+
-+ /* The remaining registers are NOT saved for system calls. */
-+
-+ unsigned long r14; /* scratch */
-+ unsigned long r2; /* scratch */
-+ unsigned long r3; /* scratch */
-+ unsigned long r16; /* scratch */
-+ unsigned long r17; /* scratch */
-+ unsigned long r18; /* scratch */
-+ unsigned long r19; /* scratch */
-+ unsigned long r20; /* scratch */
-+ unsigned long r21; /* scratch */
-+ unsigned long r22; /* scratch */
-+ unsigned long r23; /* scratch */
-+ unsigned long r24; /* scratch */
-+ unsigned long r25; /* scratch */
-+ unsigned long r26; /* scratch */
-+ unsigned long r27; /* scratch */
-+ unsigned long r28; /* scratch */
-+ unsigned long r29; /* scratch */
-+ unsigned long r30; /* scratch */
-+ unsigned long r31; /* scratch */
-+ unsigned long ar_ccv; /* compare/exchange value (scratch) */
-+
-+ /*
-+ * Floating point registers that the kernel considers scratch:
-+ */
-+ struct pt_fpreg f6; /* scratch */
-+ struct pt_fpreg f7; /* scratch */
-+ struct pt_fpreg f8; /* scratch */
-+ struct pt_fpreg f9; /* scratch */
-+ struct pt_fpreg f10; /* scratch */
-+ struct pt_fpreg f11; /* scratch */
-+ unsigned long r4; /* preserved */
-+ unsigned long r5; /* preserved */
-+ unsigned long r6; /* preserved */
-+ unsigned long r7; /* preserved */
-+ unsigned long eml_unat; /* used for emulating instruction */
-+ unsigned long pad0; /* alignment pad */
-+
-+};
-+typedef struct cpu_user_regs cpu_user_regs_t;
-+
-+union vac {
-+ unsigned long value;
-+ struct {
-+ int a_int:1;
-+ int a_from_int_cr:1;
-+ int a_to_int_cr:1;
-+ int a_from_psr:1;
-+ int a_from_cpuid:1;
-+ int a_cover:1;
-+ int a_bsw:1;
-+ long reserved:57;
-+ };
-+};
-+typedef union vac vac_t;
-+
-+union vdc {
-+ unsigned long value;
-+ struct {
-+ int d_vmsw:1;
-+ int d_extint:1;
-+ int d_ibr_dbr:1;
-+ int d_pmc:1;
-+ int d_to_pmd:1;
-+ int d_itm:1;
-+ long reserved:58;
-+ };
-+};
-+typedef union vdc vdc_t;
-+
-+struct mapped_regs {
-+ union vac vac;
-+ union vdc vdc;
-+ unsigned long virt_env_vaddr;
-+ unsigned long reserved1[29];
-+ unsigned long vhpi;
-+ unsigned long reserved2[95];
-+ union {
-+ unsigned long vgr[16];
-+ unsigned long bank1_regs[16]; // bank1 regs (r16-r31) when bank0 active
-+ };
-+ union {
-+ unsigned long vbgr[16];
-+ unsigned long bank0_regs[16]; // bank0 regs (r16-r31) when bank1 active
-+ };
-+ unsigned long vnat;
-+ unsigned long vbnat;
-+ unsigned long vcpuid[5];
-+ unsigned long reserved3[11];
-+ unsigned long vpsr;
-+ unsigned long vpr;
-+ unsigned long reserved4[76];
-+ union {
-+ unsigned long vcr[128];
-+ struct {
-+ unsigned long dcr; // CR0
-+ unsigned long itm;
-+ unsigned long iva;
-+ unsigned long rsv1[5];
-+ unsigned long pta; // CR8
-+ unsigned long rsv2[7];
-+ unsigned long ipsr; // CR16
-+ unsigned long isr;
-+ unsigned long rsv3;
-+ unsigned long iip;
-+ unsigned long ifa;
-+ unsigned long itir;
-+ unsigned long iipa;
-+ unsigned long ifs;
-+ unsigned long iim; // CR24
-+ unsigned long iha;
-+ unsigned long rsv4[38];
-+ unsigned long lid; // CR64
-+ unsigned long ivr;
-+ unsigned long tpr;
-+ unsigned long eoi;
-+ unsigned long irr[4];
-+ unsigned long itv; // CR72
-+ unsigned long pmv;
-+ unsigned long cmcv;
-+ unsigned long rsv5[5];
-+ unsigned long lrr0; // CR80
-+ unsigned long lrr1;
-+ unsigned long rsv6[46];
-+ };
-+ };
-+ union {
-+ unsigned long reserved5[128];
-+ struct {
-+ unsigned long precover_ifs;
-+ unsigned long unat; // not sure if this is needed until NaT arch is done
-+ int interrupt_collection_enabled; // virtual psr.ic
-+ /* virtual interrupt deliverable flag is evtchn_upcall_mask in
-+ * shared info area now. interrupt_mask_addr is the address
-+ * of evtchn_upcall_mask for current vcpu
-+ */
-+ unsigned char *interrupt_mask_addr;
-+ int pending_interruption;
-+ unsigned char vpsr_pp;
-+ unsigned char reserved5_2[3];
-+ unsigned long reserved5_1[4];
-+ int metaphysical_mode; // 1 = use metaphys mapping, 0 = use virtual
-+ int banknum; // 0 or 1, which virtual register bank is active
-+ unsigned long rrs[8]; // region registers
-+ unsigned long krs[8]; // kernel registers
-+ unsigned long pkrs[8]; // protection key registers
-+ unsigned long tmp[8]; // temp registers (e.g. for hyperprivops)
-+ };
-+ };
-+};
-+typedef struct mapped_regs mapped_regs_t;
-+
-+struct vpd {
-+ struct mapped_regs vpd_low;
-+ unsigned long reserved6[3456];
-+ unsigned long vmm_avail[128];
-+ unsigned long reserved7[4096];
-+};
-+typedef struct vpd vpd_t;
-+
-+struct arch_vcpu_info {
-+};
-+typedef struct arch_vcpu_info arch_vcpu_info_t;
-+
-+struct arch_shared_info {
-+ /* PFN of the start_info page. */
-+ unsigned long start_info_pfn;
-+
-+ /* Interrupt vector for event channel. */
-+ int evtchn_vector;
-+
-+ uint64_t pad[32];
-+};
-+typedef struct arch_shared_info arch_shared_info_t;
-+
-+typedef unsigned long xen_callback_t;
-+
-+struct ia64_tr_entry {
-+ unsigned long pte;
-+ unsigned long itir;
-+ unsigned long vadr;
-+ unsigned long rid;
-+};
-+
-+struct vcpu_extra_regs {
-+ struct ia64_tr_entry itrs[8];
-+ struct ia64_tr_entry dtrs[8];
-+ unsigned long iva;
-+ unsigned long dcr;
-+ unsigned long event_callback_ip;
-+};
-+
-+struct vcpu_guest_context {
-+#define VGCF_EXTRA_REGS (1<<1) /* Get/Set extra regs. */
-+ unsigned long flags; /* VGCF_* flags */
-+
-+ struct cpu_user_regs user_regs;
-+ struct vcpu_extra_regs extra_regs;
-+ unsigned long privregs_pfn;
-+};
-+typedef struct vcpu_guest_context vcpu_guest_context_t;
-+DEFINE_XEN_GUEST_HANDLE(vcpu_guest_context_t);
-+
-+/* dom0 vp op */
-+#define __HYPERVISOR_ia64_dom0vp_op __HYPERVISOR_arch_0
-+/* Map io space in machine address to dom0 physical address space.
-+ Currently physical assigned address equals to machine address. */
-+#define IA64_DOM0VP_ioremap 0
-+
-+/* Convert a pseudo physical page frame number to the corresponding
-+ machine page frame number. If no page is assigned, INVALID_MFN or
-+ GPFN_INV_MASK is returned depending on domain's non-vti/vti mode. */
-+#define IA64_DOM0VP_phystomach 1
-+
-+/* Convert a machine page frame number to the corresponding pseudo physical
-+ page frame number of the caller domain. */
-+#define IA64_DOM0VP_machtophys 3
-+
-+/* Reserved for future use. */
-+#define IA64_DOM0VP_iounmap 4
-+
-+/* Unmap and free pages contained in the specified pseudo physical region. */
-+#define IA64_DOM0VP_zap_physmap 5
-+
-+/* Assign machine page frame to dom0's pseudo physical address space. */
-+#define IA64_DOM0VP_add_physmap 6
-+
-+/* expose the p2m table into domain */
-+#define IA64_DOM0VP_expose_p2m 7
-+
-+/* xen perfmon */
-+#define IA64_DOM0VP_perfmon 8
-+
-+/* gmfn version of IA64_DOM0VP_add_physmap */
-+#define IA64_DOM0VP_add_physmap_with_gmfn 9
-+
-+// flags for page assignement to pseudo physical address space
-+#define _ASSIGN_readonly 0
-+#define ASSIGN_readonly (1UL << _ASSIGN_readonly)
-+#define ASSIGN_writable (0UL << _ASSIGN_readonly) // dummy flag
-+/* Internal only: memory attribute must be WC/UC/UCE. */
-+#define _ASSIGN_nocache 1
-+#define ASSIGN_nocache (1UL << _ASSIGN_nocache)
-+// tlb tracking
-+#define _ASSIGN_tlb_track 2
-+#define ASSIGN_tlb_track (1UL << _ASSIGN_tlb_track)
-+/* Internal only: associated with PGC_allocated bit */
-+#define _ASSIGN_pgc_allocated 3
-+#define ASSIGN_pgc_allocated (1UL << _ASSIGN_pgc_allocated)
-+
-+/* This structure has the same layout of struct ia64_boot_param, defined in
-+ <asm/system.h>. It is redefined here to ease use. */
-+struct xen_ia64_boot_param {
-+ unsigned long command_line; /* physical address of cmd line args */
-+ unsigned long efi_systab; /* physical address of EFI system table */
-+ unsigned long efi_memmap; /* physical address of EFI memory map */
-+ unsigned long efi_memmap_size; /* size of EFI memory map */
-+ unsigned long efi_memdesc_size; /* size of an EFI memory map descriptor */
-+ unsigned int efi_memdesc_version; /* memory descriptor version */
-+ struct {
-+ unsigned short num_cols; /* number of columns on console. */
-+ unsigned short num_rows; /* number of rows on console. */
-+ unsigned short orig_x; /* cursor's x position */
-+ unsigned short orig_y; /* cursor's y position */
-+ } console_info;
-+ unsigned long fpswa; /* physical address of the fpswa interface */
-+ unsigned long initrd_start;
-+ unsigned long initrd_size;
-+ unsigned long domain_start; /* va where the boot time domain begins */
-+ unsigned long domain_size; /* how big is the boot domain */
-+};
-+
-+#endif /* !__ASSEMBLY__ */
-+
-+/* Size of the shared_info area (this is not related to page size). */
-+#define XSI_SHIFT 14
-+#define XSI_SIZE (1 << XSI_SHIFT)
-+/* Log size of mapped_regs area (64 KB - only 4KB is used). */
-+#define XMAPPEDREGS_SHIFT 12
-+#define XMAPPEDREGS_SIZE (1 << XMAPPEDREGS_SHIFT)
-+/* Offset of XASI (Xen arch shared info) wrt XSI_BASE. */
-+#define XMAPPEDREGS_OFS XSI_SIZE
-+
-+/* Hyperprivops. */
-+#define HYPERPRIVOP_START 0x1
-+#define HYPERPRIVOP_RFI (HYPERPRIVOP_START + 0x0)
-+#define HYPERPRIVOP_RSM_DT (HYPERPRIVOP_START + 0x1)
-+#define HYPERPRIVOP_SSM_DT (HYPERPRIVOP_START + 0x2)
-+#define HYPERPRIVOP_COVER (HYPERPRIVOP_START + 0x3)
-+#define HYPERPRIVOP_ITC_D (HYPERPRIVOP_START + 0x4)
-+#define HYPERPRIVOP_ITC_I (HYPERPRIVOP_START + 0x5)
-+#define HYPERPRIVOP_SSM_I (HYPERPRIVOP_START + 0x6)
-+#define HYPERPRIVOP_GET_IVR (HYPERPRIVOP_START + 0x7)
-+#define HYPERPRIVOP_GET_TPR (HYPERPRIVOP_START + 0x8)
-+#define HYPERPRIVOP_SET_TPR (HYPERPRIVOP_START + 0x9)
-+#define HYPERPRIVOP_EOI (HYPERPRIVOP_START + 0xa)
-+#define HYPERPRIVOP_SET_ITM (HYPERPRIVOP_START + 0xb)
-+#define HYPERPRIVOP_THASH (HYPERPRIVOP_START + 0xc)
-+#define HYPERPRIVOP_PTC_GA (HYPERPRIVOP_START + 0xd)
-+#define HYPERPRIVOP_ITR_D (HYPERPRIVOP_START + 0xe)
-+#define HYPERPRIVOP_GET_RR (HYPERPRIVOP_START + 0xf)
-+#define HYPERPRIVOP_SET_RR (HYPERPRIVOP_START + 0x10)
-+#define HYPERPRIVOP_SET_KR (HYPERPRIVOP_START + 0x11)
-+#define HYPERPRIVOP_FC (HYPERPRIVOP_START + 0x12)
-+#define HYPERPRIVOP_GET_CPUID (HYPERPRIVOP_START + 0x13)
-+#define HYPERPRIVOP_GET_PMD (HYPERPRIVOP_START + 0x14)
-+#define HYPERPRIVOP_GET_EFLAG (HYPERPRIVOP_START + 0x15)
-+#define HYPERPRIVOP_SET_EFLAG (HYPERPRIVOP_START + 0x16)
-+#define HYPERPRIVOP_RSM_BE (HYPERPRIVOP_START + 0x17)
-+#define HYPERPRIVOP_GET_PSR (HYPERPRIVOP_START + 0x18)
-+#define HYPERPRIVOP_MAX (0x19)
-+
-+/* Fast and light hypercalls. */
-+#define __HYPERVISOR_ia64_fast_eoi __HYPERVISOR_arch_1
-+
-+/* Xencomm macros. */
-+#define XENCOMM_INLINE_MASK 0xf800000000000000UL
-+#define XENCOMM_INLINE_FLAG 0x8000000000000000UL
-+
-+#define XENCOMM_IS_INLINE(addr) \
-+ (((unsigned long)(addr) & XENCOMM_INLINE_MASK) == XENCOMM_INLINE_FLAG)
-+#define XENCOMM_INLINE_ADDR(addr) \
-+ ((unsigned long)(addr) & ~XENCOMM_INLINE_MASK)
-+
-+/* xen perfmon */
-+#ifdef XEN
-+#ifndef __ASSEMBLY__
-+#ifndef _ASM_IA64_PERFMON_H
-+
-+#include <xen/list.h> // asm/perfmon.h requires struct list_head
-+#include <asm/perfmon.h>
-+// for PFM_xxx and pfarg_features_t, pfarg_context_t, pfarg_reg_t, pfarg_load_t
-+
-+#endif /* _ASM_IA64_PERFMON_H */
-+
-+DEFINE_XEN_GUEST_HANDLE(pfarg_features_t);
-+DEFINE_XEN_GUEST_HANDLE(pfarg_context_t);
-+DEFINE_XEN_GUEST_HANDLE(pfarg_reg_t);
-+DEFINE_XEN_GUEST_HANDLE(pfarg_load_t);
-+#endif /* __ASSEMBLY__ */
-+#endif /* XEN */
-+
-+#endif /* __HYPERVISOR_IF_IA64_H__ */
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/xen/interface/arch-powerpc.h ubuntu-gutsy-xen/include/xen/interface/arch-powerpc.h
---- ubuntu-gutsy/include/xen/interface/arch-powerpc.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/xen/interface/arch-powerpc.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,121 @@
-+/*
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this software and associated documentation files (the "Software"), to
-+ * deal in the Software without restriction, including without limitation the
-+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-+ * sell copies of the Software, and to permit persons to whom the Software is
-+ * furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-+ * DEALINGS IN THE SOFTWARE.
-+ *
-+ * Copyright (C) IBM Corp. 2005, 2006
-+ *
-+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
-+ */
-+
-+#ifndef __XEN_PUBLIC_ARCH_PPC_64_H__
-+#define __XEN_PUBLIC_ARCH_PPC_64_H__
-+
-+#define __DEFINE_XEN_GUEST_HANDLE(name, type) \
-+ typedef struct { \
-+ int __pad[(sizeof (long long) - sizeof (void *)) / sizeof (int)]; \
-+ type *p; \
-+ } __attribute__((__aligned__(8))) __guest_handle_ ## name
-+
-+#define DEFINE_XEN_GUEST_HANDLE(name) __DEFINE_XEN_GUEST_HANDLE(name, name)
-+#define XEN_GUEST_HANDLE(name) __guest_handle_ ## name
-+#define set_xen_guest_handle(hnd, val) \
-+ do { \
-+ if (sizeof ((hnd).__pad)) \
-+ (hnd).__pad[0] = 0; \
-+ (hnd).p = val; \
-+ } while (0)
-+
-+#ifdef __XEN_TOOLS__
-+#define get_xen_guest_handle(val, hnd) do { val = (hnd).p; } while (0)
-+#endif
-+
-+#ifndef __ASSEMBLY__
-+/* Guest handles for primitive C types. */
-+__DEFINE_XEN_GUEST_HANDLE(uchar, unsigned char);
-+__DEFINE_XEN_GUEST_HANDLE(uint, unsigned int);
-+__DEFINE_XEN_GUEST_HANDLE(ulong, unsigned long);
-+DEFINE_XEN_GUEST_HANDLE(char);
-+DEFINE_XEN_GUEST_HANDLE(int);
-+DEFINE_XEN_GUEST_HANDLE(long);
-+DEFINE_XEN_GUEST_HANDLE(void);
-+
-+typedef unsigned long long xen_pfn_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_pfn_t);
-+#endif
-+
-+/*
-+ * Pointers and other address fields inside interface structures are padded to
-+ * 64 bits. This means that field alignments aren't different between 32- and
-+ * 64-bit architectures.
-+ */
-+/* NB. Multi-level macro ensures __LINE__ is expanded before concatenation. */
-+#define __MEMORY_PADDING(_X)
-+#define _MEMORY_PADDING(_X) __MEMORY_PADDING(_X)
-+#define MEMORY_PADDING _MEMORY_PADDING(__LINE__)
-+
-+/* And the trap vector is... */
-+#define TRAP_INSTR "li 0,-1; sc" /* XXX just "sc"? */
-+
-+#ifndef __ASSEMBLY__
-+
-+#define XENCOMM_INLINE_FLAG (1UL << 63)
-+
-+typedef uint64_t xen_ulong_t;
-+
-+/* User-accessible registers: need to be saved/restored for every nested Xen
-+ * invocation. */
-+struct cpu_user_regs
-+{
-+ uint64_t gprs[32];
-+ uint64_t lr;
-+ uint64_t ctr;
-+ uint64_t srr0;
-+ uint64_t srr1;
-+ uint64_t pc;
-+ uint64_t msr;
-+ uint64_t fpscr;
-+ uint64_t xer;
-+ uint64_t hid4;
-+ uint32_t cr;
-+ uint32_t entry_vector;
-+};
-+typedef struct cpu_user_regs cpu_user_regs_t;
-+
-+typedef uint64_t tsc_timestamp_t; /* RDTSC timestamp */ /* XXX timebase */
-+
-+/* ONLY used to communicate with dom0! See also struct exec_domain. */
-+struct vcpu_guest_context {
-+ cpu_user_regs_t user_regs; /* User-level CPU registers */
-+ uint64_t sdr1; /* Pagetable base */
-+ /* XXX etc */
-+};
-+typedef struct vcpu_guest_context vcpu_guest_context_t;
-+DEFINE_XEN_GUEST_HANDLE(vcpu_guest_context_t);
-+
-+struct arch_shared_info {
-+ uint64_t pad[32];
-+};
-+
-+struct arch_vcpu_info {
-+};
-+
-+/* Support for multi-processor guests. */
-+#define MAX_VIRT_CPUS 32
-+#endif
-+
-+#endif
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/xen/interface/arch-x86/xen.h ubuntu-gutsy-xen/include/xen/interface/arch-x86/xen.h
---- ubuntu-gutsy/include/xen/interface/arch-x86/xen.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/xen/interface/arch-x86/xen.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,204 @@
-+/******************************************************************************
-+ * arch-x86/xen.h
-+ *
-+ * Guest OS interface to x86 Xen.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this software and associated documentation files (the "Software"), to
-+ * deal in the Software without restriction, including without limitation the
-+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-+ * sell copies of the Software, and to permit persons to whom the Software is
-+ * furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-+ * DEALINGS IN THE SOFTWARE.
-+ *
-+ * Copyright (c) 2004-2006, K A Fraser
-+ */
-+
-+#ifndef __XEN_PUBLIC_ARCH_X86_XEN_H__
-+#define __XEN_PUBLIC_ARCH_X86_XEN_H__
-+
-+/* Structural guest handles introduced in 0x00030201. */
-+#if __XEN_INTERFACE_VERSION__ >= 0x00030201
-+#define __DEFINE_XEN_GUEST_HANDLE(name, type) \
-+ typedef struct { type *p; } __guest_handle_ ## name
-+#else
-+#define __DEFINE_XEN_GUEST_HANDLE(name, type) \
-+ typedef type * __guest_handle_ ## name
-+#endif
-+
-+#define DEFINE_XEN_GUEST_HANDLE(name) __DEFINE_XEN_GUEST_HANDLE(name, name)
-+#define XEN_GUEST_HANDLE(name) __guest_handle_ ## name
-+#define set_xen_guest_handle(hnd, val) do { (hnd).p = val; } while (0)
-+#ifdef __XEN_TOOLS__
-+#define get_xen_guest_handle(val, hnd) do { val = (hnd).p; } while (0)
-+#endif
-+
-+#if defined(__i386__)
-+#include "xen-x86_32.h"
-+#elif defined(__x86_64__)
-+#include "xen-x86_64.h"
-+#endif
-+
-+#ifndef __ASSEMBLY__
-+/* Guest handles for primitive C types. */
-+__DEFINE_XEN_GUEST_HANDLE(uchar, unsigned char);
-+__DEFINE_XEN_GUEST_HANDLE(uint, unsigned int);
-+__DEFINE_XEN_GUEST_HANDLE(ulong, unsigned long);
-+DEFINE_XEN_GUEST_HANDLE(char);
-+DEFINE_XEN_GUEST_HANDLE(int);
-+DEFINE_XEN_GUEST_HANDLE(long);
-+DEFINE_XEN_GUEST_HANDLE(void);
-+
-+typedef unsigned long xen_pfn_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_pfn_t);
-+#define PRI_xen_pfn "lx"
-+#endif
-+
-+/*
-+ * SEGMENT DESCRIPTOR TABLES
-+ */
-+/*
-+ * A number of GDT entries are reserved by Xen. These are not situated at the
-+ * start of the GDT because some stupid OSes export hard-coded selector values
-+ * in their ABI. These hard-coded values are always near the start of the GDT,
-+ * so Xen places itself out of the way, at the far end of the GDT.
-+ */
-+#define FIRST_RESERVED_GDT_PAGE 14
-+#define FIRST_RESERVED_GDT_BYTE (FIRST_RESERVED_GDT_PAGE * 4096)
-+#define FIRST_RESERVED_GDT_ENTRY (FIRST_RESERVED_GDT_BYTE / 8)
-+
-+/* Maximum number of virtual CPUs in multi-processor guests. */
-+#define MAX_VIRT_CPUS 32
-+
-+#ifndef __ASSEMBLY__
-+
-+typedef unsigned long xen_ulong_t;
-+
-+/*
-+ * Send an array of these to HYPERVISOR_set_trap_table().
-+ * The privilege level specifies which modes may enter a trap via a software
-+ * interrupt. On x86/64, since rings 1 and 2 are unavailable, we allocate
-+ * privilege levels as follows:
-+ * Level == 0: Noone may enter
-+ * Level == 1: Kernel may enter
-+ * Level == 2: Kernel may enter
-+ * Level == 3: Everyone may enter
-+ */
-+#define TI_GET_DPL(_ti) ((_ti)->flags & 3)
-+#define TI_GET_IF(_ti) ((_ti)->flags & 4)
-+#define TI_SET_DPL(_ti,_dpl) ((_ti)->flags |= (_dpl))
-+#define TI_SET_IF(_ti,_if) ((_ti)->flags |= ((!!(_if))<<2))
-+struct trap_info {
-+ uint8_t vector; /* exception vector */
-+ uint8_t flags; /* 0-3: privilege level; 4: clear event enable? */
-+ uint16_t cs; /* code selector */
-+ unsigned long address; /* code offset */
-+};
-+typedef struct trap_info trap_info_t;
-+DEFINE_XEN_GUEST_HANDLE(trap_info_t);
-+
-+typedef uint64_t tsc_timestamp_t; /* RDTSC timestamp */
-+
-+/*
-+ * The following is all CPU context. Note that the fpu_ctxt block is filled
-+ * in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used.
-+ */
-+struct vcpu_guest_context {
-+ /* FPU registers come first so they can be aligned for FXSAVE/FXRSTOR. */
-+ struct { char x[512]; } fpu_ctxt; /* User-level FPU registers */
-+#define VGCF_I387_VALID (1<<0)
-+#define VGCF_IN_KERNEL (1<<2)
-+#define _VGCF_i387_valid 0
-+#define VGCF_i387_valid (1<<_VGCF_i387_valid)
-+#define _VGCF_in_kernel 2
-+#define VGCF_in_kernel (1<<_VGCF_in_kernel)
-+#define _VGCF_failsafe_disables_events 3
-+#define VGCF_failsafe_disables_events (1<<_VGCF_failsafe_disables_events)
-+#define _VGCF_syscall_disables_events 4
-+#define VGCF_syscall_disables_events (1<<_VGCF_syscall_disables_events)
-+#define _VGCF_online 5
-+#define VGCF_online (1<<_VGCF_online)
-+ unsigned long flags; /* VGCF_* flags */
-+ struct cpu_user_regs user_regs; /* User-level CPU registers */
-+ struct trap_info trap_ctxt[256]; /* Virtual IDT */
-+ unsigned long ldt_base, ldt_ents; /* LDT (linear address, # ents) */
-+ unsigned long gdt_frames[16], gdt_ents; /* GDT (machine frames, # ents) */
-+ unsigned long kernel_ss, kernel_sp; /* Virtual TSS (only SS1/SP1) */
-+ /* NB. User pagetable on x86/64 is placed in ctrlreg[1]. */
-+ unsigned long ctrlreg[8]; /* CR0-CR7 (control registers) */
-+ unsigned long debugreg[8]; /* DB0-DB7 (debug registers) */
-+#ifdef __i386__
-+ unsigned long event_callback_cs; /* CS:EIP of event callback */
-+ unsigned long event_callback_eip;
-+ unsigned long failsafe_callback_cs; /* CS:EIP of failsafe callback */
-+ unsigned long failsafe_callback_eip;
-+#else
-+ unsigned long event_callback_eip;
-+ unsigned long failsafe_callback_eip;
-+#ifdef __XEN__
-+ union {
-+ unsigned long syscall_callback_eip;
-+ struct {
-+ unsigned int event_callback_cs; /* compat CS of event cb */
-+ unsigned int failsafe_callback_cs; /* compat CS of failsafe cb */
-+ };
-+ };
-+#else
-+ unsigned long syscall_callback_eip;
-+#endif
-+#endif
-+ unsigned long vm_assist; /* VMASST_TYPE_* bitmap */
-+#ifdef __x86_64__
-+ /* Segment base addresses. */
-+ uint64_t fs_base;
-+ uint64_t gs_base_kernel;
-+ uint64_t gs_base_user;
-+#endif
-+};
-+typedef struct vcpu_guest_context vcpu_guest_context_t;
-+DEFINE_XEN_GUEST_HANDLE(vcpu_guest_context_t);
-+
-+struct arch_shared_info {
-+ unsigned long max_pfn; /* max pfn that appears in table */
-+ /* Frame containing list of mfns containing list of mfns containing p2m. */
-+ xen_pfn_t pfn_to_mfn_frame_list_list;
-+ unsigned long nmi_reason;
-+ uint64_t pad[32];
-+};
-+typedef struct arch_shared_info arch_shared_info_t;
-+
-+#endif /* !__ASSEMBLY__ */
-+
-+/*
-+ * Prefix forces emulation of some non-trapping instructions.
-+ * Currently only CPUID.
-+ */
-+#ifdef __ASSEMBLY__
-+#define XEN_EMULATE_PREFIX .byte 0x0f,0x0b,0x78,0x65,0x6e ;
-+#define XEN_CPUID XEN_EMULATE_PREFIX cpuid
-+#else
-+#define XEN_EMULATE_PREFIX ".byte 0x0f,0x0b,0x78,0x65,0x6e ; "
-+#define XEN_CPUID XEN_EMULATE_PREFIX "cpuid"
-+#endif
-+
-+#endif /* __XEN_PUBLIC_ARCH_X86_XEN_H__ */
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/xen/interface/arch-x86/xen-x86_32.h ubuntu-gutsy-xen/include/xen/interface/arch-x86/xen-x86_32.h
---- ubuntu-gutsy/include/xen/interface/arch-x86/xen-x86_32.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/xen/interface/arch-x86/xen-x86_32.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,168 @@
-+/******************************************************************************
-+ * xen-x86_32.h
-+ *
-+ * Guest OS interface to x86 32-bit Xen.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this software and associated documentation files (the "Software"), to
-+ * deal in the Software without restriction, including without limitation the
-+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-+ * sell copies of the Software, and to permit persons to whom the Software is
-+ * furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-+ * DEALINGS IN THE SOFTWARE.
-+ *
-+ * Copyright (c) 2004-2007, K A Fraser
-+ */
-+
-+#ifndef __XEN_PUBLIC_ARCH_X86_XEN_X86_32_H__
-+#define __XEN_PUBLIC_ARCH_X86_XEN_X86_32_H__
-+
-+/*
-+ * Hypercall interface:
-+ * Input: %ebx, %ecx, %edx, %esi, %edi (arguments 1-5)
-+ * Output: %eax
-+ * Access is via hypercall page (set up by guest loader or via a Xen MSR):
-+ * call hypercall_page + hypercall-number * 32
-+ * Clobbered: Argument registers (e.g., 2-arg hypercall clobbers %ebx,%ecx)
-+ */
-+
-+#if __XEN_INTERFACE_VERSION__ < 0x00030203
-+/*
-+ * Legacy hypercall interface:
-+ * As above, except the entry sequence to the hypervisor is:
-+ * mov $hypercall-number*32,%eax ; int $0x82
-+ */
-+#define TRAP_INSTR "int $0x82"
-+#endif
-+
-+/*
-+ * These flat segments are in the Xen-private section of every GDT. Since these
-+ * are also present in the initial GDT, many OSes will be able to avoid
-+ * installing their own GDT.
-+ */
-+#define FLAT_RING1_CS 0xe019 /* GDT index 259 */
-+#define FLAT_RING1_DS 0xe021 /* GDT index 260 */
-+#define FLAT_RING1_SS 0xe021 /* GDT index 260 */
-+#define FLAT_RING3_CS 0xe02b /* GDT index 261 */
-+#define FLAT_RING3_DS 0xe033 /* GDT index 262 */
-+#define FLAT_RING3_SS 0xe033 /* GDT index 262 */
-+
-+#define FLAT_KERNEL_CS FLAT_RING1_CS
-+#define FLAT_KERNEL_DS FLAT_RING1_DS
-+#define FLAT_KERNEL_SS FLAT_RING1_SS
-+#define FLAT_USER_CS FLAT_RING3_CS
-+#define FLAT_USER_DS FLAT_RING3_DS
-+#define FLAT_USER_SS FLAT_RING3_SS
-+
-+/*
-+ * Virtual addresses beyond this are not modifiable by guest OSes. The
-+ * machine->physical mapping table starts at this address, read-only.
-+ */
-+#ifdef CONFIG_X86_PAE
-+#define __HYPERVISOR_VIRT_START 0xF5800000
-+#define __MACH2PHYS_VIRT_START 0xF5800000
-+#define __MACH2PHYS_VIRT_END 0xF6800000
-+#else
-+#define __HYPERVISOR_VIRT_START 0xFC000000
-+#define __MACH2PHYS_VIRT_START 0xFC000000
-+#define __MACH2PHYS_VIRT_END 0xFC400000
-+#endif
-+
-+#ifndef HYPERVISOR_VIRT_START
-+#define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START)
-+#endif
-+
-+#define MACH2PHYS_VIRT_START mk_unsigned_long(__MACH2PHYS_VIRT_START)
-+#define MACH2PHYS_VIRT_END mk_unsigned_long(__MACH2PHYS_VIRT_END)
-+#define MACH2PHYS_NR_ENTRIES ((MACH2PHYS_VIRT_END-MACH2PHYS_VIRT_START)>>2)
-+#ifndef machine_to_phys_mapping
-+#define machine_to_phys_mapping ((unsigned long *)MACH2PHYS_VIRT_START)
-+#endif
-+
-+/* 32-/64-bit invariability for control interfaces (domctl/sysctl). */
-+#if defined(__XEN__) || defined(__XEN_TOOLS__)
-+#undef __DEFINE_XEN_GUEST_HANDLE
-+#define __DEFINE_XEN_GUEST_HANDLE(name, type) \
-+ typedef struct { type *p; } \
-+ __guest_handle_ ## name; \
-+ typedef struct { union { type *p; uint64_aligned_t q; }; } \
-+ __guest_handle_64_ ## name
-+#undef set_xen_guest_handle
-+#define set_xen_guest_handle(hnd, val) \
-+ do { if ( sizeof(hnd) == 8 ) *(uint64_t *)&(hnd) = 0; \
-+ (hnd).p = val; \
-+ } while ( 0 )
-+#define uint64_aligned_t uint64_t __attribute__((aligned(8)))
-+#define XEN_GUEST_HANDLE_64(name) __guest_handle_64_ ## name
-+#endif
-+
-+#ifndef __ASSEMBLY__
-+
-+struct cpu_user_regs {
-+ uint32_t ebx;
-+ uint32_t ecx;
-+ uint32_t edx;
-+ uint32_t esi;
-+ uint32_t edi;
-+ uint32_t ebp;
-+ uint32_t eax;
-+ uint16_t error_code; /* private */
-+ uint16_t entry_vector; /* private */
-+ uint32_t eip;
-+ uint16_t cs;
-+ uint8_t saved_upcall_mask;
-+ uint8_t _pad0;
-+ uint32_t eflags; /* eflags.IF == !saved_upcall_mask */
-+ uint32_t esp;
-+ uint16_t ss, _pad1;
-+ uint16_t es, _pad2;
-+ uint16_t ds, _pad3;
-+ uint16_t fs, _pad4;
-+ uint16_t gs, _pad5;
-+};
-+typedef struct cpu_user_regs cpu_user_regs_t;
-+DEFINE_XEN_GUEST_HANDLE(cpu_user_regs_t);
-+
-+/*
-+ * Page-directory addresses above 4GB do not fit into architectural %cr3.
-+ * When accessing %cr3, or equivalent field in vcpu_guest_context, guests
-+ * must use the following accessor macros to pack/unpack valid MFNs.
-+ */
-+#define xen_pfn_to_cr3(pfn) (((unsigned)(pfn) << 12) | ((unsigned)(pfn) >> 20))
-+#define xen_cr3_to_pfn(cr3) (((unsigned)(cr3) >> 12) | ((unsigned)(cr3) << 20))
-+
-+struct arch_vcpu_info {
-+ unsigned long cr2;
-+ unsigned long pad[5]; /* sizeof(vcpu_info_t) == 64 */
-+};
-+typedef struct arch_vcpu_info arch_vcpu_info_t;
-+
-+struct xen_callback {
-+ unsigned long cs;
-+ unsigned long eip;
-+};
-+typedef struct xen_callback xen_callback_t;
-+
-+#endif /* !__ASSEMBLY__ */
-+
-+#endif /* __XEN_PUBLIC_ARCH_X86_XEN_X86_32_H__ */
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/xen/interface/arch-x86/xen-x86_64.h ubuntu-gutsy-xen/include/xen/interface/arch-x86/xen-x86_64.h
---- ubuntu-gutsy/include/xen/interface/arch-x86/xen-x86_64.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/xen/interface/arch-x86/xen-x86_64.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,211 @@
-+/******************************************************************************
-+ * xen-x86_64.h
-+ *
-+ * Guest OS interface to x86 64-bit Xen.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this software and associated documentation files (the "Software"), to
-+ * deal in the Software without restriction, including without limitation the
-+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-+ * sell copies of the Software, and to permit persons to whom the Software is
-+ * furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-+ * DEALINGS IN THE SOFTWARE.
-+ *
-+ * Copyright (c) 2004-2006, K A Fraser
-+ */
-+
-+#ifndef __XEN_PUBLIC_ARCH_X86_XEN_X86_64_H__
-+#define __XEN_PUBLIC_ARCH_X86_XEN_X86_64_H__
-+
-+/*
-+ * Hypercall interface:
-+ * Input: %rdi, %rsi, %rdx, %r10, %r8 (arguments 1-5)
-+ * Output: %rax
-+ * Access is via hypercall page (set up by guest loader or via a Xen MSR):
-+ * call hypercall_page + hypercall-number * 32
-+ * Clobbered: argument registers (e.g., 2-arg hypercall clobbers %rdi,%rsi)
-+ */
-+
-+#if __XEN_INTERFACE_VERSION__ < 0x00030203
-+/*
-+ * Legacy hypercall interface:
-+ * As above, except the entry sequence to the hypervisor is:
-+ * mov $hypercall-number*32,%eax ; syscall
-+ * Clobbered: %rcx, %r11, argument registers (as above)
-+ */
-+#define TRAP_INSTR "syscall"
-+#endif
-+
-+/*
-+ * 64-bit segment selectors
-+ * These flat segments are in the Xen-private section of every GDT. Since these
-+ * are also present in the initial GDT, many OSes will be able to avoid
-+ * installing their own GDT.
-+ */
-+
-+#define FLAT_RING3_CS32 0xe023 /* GDT index 260 */
-+#define FLAT_RING3_CS64 0xe033 /* GDT index 261 */
-+#define FLAT_RING3_DS32 0xe02b /* GDT index 262 */
-+#define FLAT_RING3_DS64 0x0000 /* NULL selector */
-+#define FLAT_RING3_SS32 0xe02b /* GDT index 262 */
-+#define FLAT_RING3_SS64 0xe02b /* GDT index 262 */
-+
-+#define FLAT_KERNEL_DS64 FLAT_RING3_DS64
-+#define FLAT_KERNEL_DS32 FLAT_RING3_DS32
-+#define FLAT_KERNEL_DS FLAT_KERNEL_DS64
-+#define FLAT_KERNEL_CS64 FLAT_RING3_CS64
-+#define FLAT_KERNEL_CS32 FLAT_RING3_CS32
-+#define FLAT_KERNEL_CS FLAT_KERNEL_CS64
-+#define FLAT_KERNEL_SS64 FLAT_RING3_SS64
-+#define FLAT_KERNEL_SS32 FLAT_RING3_SS32
-+#define FLAT_KERNEL_SS FLAT_KERNEL_SS64
-+
-+#define FLAT_USER_DS64 FLAT_RING3_DS64
-+#define FLAT_USER_DS32 FLAT_RING3_DS32
-+#define FLAT_USER_DS FLAT_USER_DS64
-+#define FLAT_USER_CS64 FLAT_RING3_CS64
-+#define FLAT_USER_CS32 FLAT_RING3_CS32
-+#define FLAT_USER_CS FLAT_USER_CS64
-+#define FLAT_USER_SS64 FLAT_RING3_SS64
-+#define FLAT_USER_SS32 FLAT_RING3_SS32
-+#define FLAT_USER_SS FLAT_USER_SS64
-+
-+#define __HYPERVISOR_VIRT_START 0xFFFF800000000000
-+#define __HYPERVISOR_VIRT_END 0xFFFF880000000000
-+#define __MACH2PHYS_VIRT_START 0xFFFF800000000000
-+#define __MACH2PHYS_VIRT_END 0xFFFF804000000000
-+
-+#ifndef HYPERVISOR_VIRT_START
-+#define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START)
-+#define HYPERVISOR_VIRT_END mk_unsigned_long(__HYPERVISOR_VIRT_END)
-+#endif
-+
-+#define MACH2PHYS_VIRT_START mk_unsigned_long(__MACH2PHYS_VIRT_START)
-+#define MACH2PHYS_VIRT_END mk_unsigned_long(__MACH2PHYS_VIRT_END)
-+#define MACH2PHYS_NR_ENTRIES ((MACH2PHYS_VIRT_END-MACH2PHYS_VIRT_START)>>3)
-+#ifndef machine_to_phys_mapping
-+#define machine_to_phys_mapping ((unsigned long *)HYPERVISOR_VIRT_START)
-+#endif
-+
-+#ifndef __ASSEMBLY__
-+
-+/*
-+ * int HYPERVISOR_set_segment_base(unsigned int which, unsigned long base)
-+ * @which == SEGBASE_* ; @base == 64-bit base address
-+ * Returns 0 on success.
-+ */
-+#define SEGBASE_FS 0
-+#define SEGBASE_GS_USER 1
-+#define SEGBASE_GS_KERNEL 2
-+#define SEGBASE_GS_USER_SEL 3 /* Set user %gs specified in base[15:0] */
-+
-+/*
-+ * int HYPERVISOR_iret(void)
-+ * All arguments are on the kernel stack, in the following format.
-+ * Never returns if successful. Current kernel context is lost.
-+ * The saved CS is mapped as follows:
-+ * RING0 -> RING3 kernel mode.
-+ * RING1 -> RING3 kernel mode.
-+ * RING2 -> RING3 kernel mode.
-+ * RING3 -> RING3 user mode.
-+ * However RING0 indicates that the guest kernel should return to iteself
-+ * directly with
-+ * orb $3,1*8(%rsp)
-+ * iretq
-+ * If flags contains VGCF_in_syscall:
-+ * Restore RAX, RIP, RFLAGS, RSP.
-+ * Discard R11, RCX, CS, SS.
-+ * Otherwise:
-+ * Restore RAX, R11, RCX, CS:RIP, RFLAGS, SS:RSP.
-+ * All other registers are saved on hypercall entry and restored to user.
-+ */
-+/* Guest exited in SYSCALL context? Return to guest with SYSRET? */
-+#define _VGCF_in_syscall 8
-+#define VGCF_in_syscall (1<<_VGCF_in_syscall)
-+#define VGCF_IN_SYSCALL VGCF_in_syscall
-+struct iret_context {
-+ /* Top of stack (%rsp at point of hypercall). */
-+ uint64_t rax, r11, rcx, flags, rip, cs, rflags, rsp, ss;
-+ /* Bottom of iret stack frame. */
-+};
-+
-+#ifdef __GNUC__
-+/* Anonymous union includes both 32- and 64-bit names (e.g., eax/rax). */
-+#define __DECL_REG(name) union { \
-+ uint64_t r ## name, e ## name; \
-+ uint32_t _e ## name; \
-+}
-+#else
-+/* Non-gcc sources must always use the proper 64-bit name (e.g., rax). */
-+#define __DECL_REG(name) uint64_t r ## name
-+#endif
-+
-+struct cpu_user_regs {
-+ uint64_t r15;
-+ uint64_t r14;
-+ uint64_t r13;
-+ uint64_t r12;
-+ __DECL_REG(bp);
-+ __DECL_REG(bx);
-+ uint64_t r11;
-+ uint64_t r10;
-+ uint64_t r9;
-+ uint64_t r8;
-+ __DECL_REG(ax);
-+ __DECL_REG(cx);
-+ __DECL_REG(dx);
-+ __DECL_REG(si);
-+ __DECL_REG(di);
-+ uint32_t error_code; /* private */
-+ uint32_t entry_vector; /* private */
-+ __DECL_REG(ip);
-+ uint16_t cs, _pad0[1];
-+ uint8_t saved_upcall_mask;
-+ uint8_t _pad1[3];
-+ __DECL_REG(flags); /* rflags.IF == !saved_upcall_mask */
-+ __DECL_REG(sp);
-+ uint16_t ss, _pad2[3];
-+ uint16_t es, _pad3[3];
-+ uint16_t ds, _pad4[3];
-+ uint16_t fs, _pad5[3]; /* Non-zero => takes precedence over fs_base. */
-+ uint16_t gs, _pad6[3]; /* Non-zero => takes precedence over gs_base_usr. */
-+};
-+typedef struct cpu_user_regs cpu_user_regs_t;
-+DEFINE_XEN_GUEST_HANDLE(cpu_user_regs_t);
-+
-+#undef __DECL_REG
-+
-+#define xen_pfn_to_cr3(pfn) ((unsigned long)(pfn) << 12)
-+#define xen_cr3_to_pfn(cr3) ((unsigned long)(cr3) >> 12)
-+
-+struct arch_vcpu_info {
-+ unsigned long cr2;
-+ unsigned long pad; /* sizeof(vcpu_info_t) == 64 */
-+};
-+typedef struct arch_vcpu_info arch_vcpu_info_t;
-+
-+typedef unsigned long xen_callback_t;
-+
-+#endif /* !__ASSEMBLY__ */
-+
-+#endif /* __XEN_PUBLIC_ARCH_X86_XEN_X86_64_H__ */
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/xen/interface/arch-x86_32.h ubuntu-gutsy-xen/include/xen/interface/arch-x86_32.h
---- ubuntu-gutsy/include/xen/interface/arch-x86_32.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/xen/interface/arch-x86_32.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,27 @@
-+/******************************************************************************
-+ * arch-x86_32.h
-+ *
-+ * Guest OS interface to x86 32-bit Xen.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this software and associated documentation files (the "Software"), to
-+ * deal in the Software without restriction, including without limitation the
-+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-+ * sell copies of the Software, and to permit persons to whom the Software is
-+ * furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-+ * DEALINGS IN THE SOFTWARE.
-+ *
-+ * Copyright (c) 2004-2006, K A Fraser
-+ */
-+
-+#include "arch-x86/xen.h"
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/xen/interface/arch-x86_64.h ubuntu-gutsy-xen/include/xen/interface/arch-x86_64.h
---- ubuntu-gutsy/include/xen/interface/arch-x86_64.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/xen/interface/arch-x86_64.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,27 @@
-+/******************************************************************************
-+ * arch-x86_64.h
-+ *
-+ * Guest OS interface to x86 64-bit Xen.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this software and associated documentation files (the "Software"), to
-+ * deal in the Software without restriction, including without limitation the
-+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-+ * sell copies of the Software, and to permit persons to whom the Software is
-+ * furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-+ * DEALINGS IN THE SOFTWARE.
-+ *
-+ * Copyright (c) 2004-2006, K A Fraser
-+ */
-+
-+#include "arch-x86/xen.h"
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/xen/interface/callback.h ubuntu-gutsy-xen/include/xen/interface/callback.h
---- ubuntu-gutsy/include/xen/interface/callback.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/xen/interface/callback.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,92 @@
-+/******************************************************************************
-+ * callback.h
-+ *
-+ * Register guest OS callbacks with Xen.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this software and associated documentation files (the "Software"), to
-+ * deal in the Software without restriction, including without limitation the
-+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-+ * sell copies of the Software, and to permit persons to whom the Software is
-+ * furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-+ * DEALINGS IN THE SOFTWARE.
-+ *
-+ * Copyright (c) 2006, Ian Campbell
-+ */
-+
-+#ifndef __XEN_PUBLIC_CALLBACK_H__
-+#define __XEN_PUBLIC_CALLBACK_H__
-+
-+#include "xen.h"
-+
-+/*
-+ * Prototype for this hypercall is:
-+ * long callback_op(int cmd, void *extra_args)
-+ * @cmd == CALLBACKOP_??? (callback operation).
-+ * @extra_args == Operation-specific extra arguments (NULL if none).
-+ */
-+
-+#define CALLBACKTYPE_event 0
-+#define CALLBACKTYPE_failsafe 1
-+#define CALLBACKTYPE_syscall 2 /* x86_64 only */
-+/*
-+ * sysenter is only available on x86_32 with the
-+ * supervisor_mode_kernel option enabled.
-+ */
-+#define CALLBACKTYPE_sysenter 3
-+#define CALLBACKTYPE_nmi 4
-+
-+/*
-+ * Disable event deliver during callback? This flag is ignored for event and
-+ * NMI callbacks: event delivery is unconditionally disabled.
-+ */
-+#define _CALLBACKF_mask_events 0
-+#define CALLBACKF_mask_events (1U << _CALLBACKF_mask_events)
-+
-+/*
-+ * Register a callback.
-+ */
-+#define CALLBACKOP_register 0
-+struct callback_register {
-+ uint16_t type;
-+ uint16_t flags;
-+ xen_callback_t address;
-+};
-+typedef struct callback_register callback_register_t;
-+DEFINE_XEN_GUEST_HANDLE(callback_register_t);
-+
-+/*
-+ * Unregister a callback.
-+ *
-+ * Not all callbacks can be unregistered. -EINVAL will be returned if
-+ * you attempt to unregister such a callback.
-+ */
-+#define CALLBACKOP_unregister 1
-+struct callback_unregister {
-+ uint16_t type;
-+ uint16_t _unused;
-+};
-+typedef struct callback_unregister callback_unregister_t;
-+DEFINE_XEN_GUEST_HANDLE(callback_unregister_t);
-+
-+#endif /* __XEN_PUBLIC_CALLBACK_H__ */
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/xen/interface/COPYING ubuntu-gutsy-xen/include/xen/interface/COPYING
---- ubuntu-gutsy/include/xen/interface/COPYING 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/xen/interface/COPYING 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,38 @@
-+XEN NOTICE
-+==========
-+
-+This copyright applies to all files within this subdirectory and its
-+subdirectories:
-+ include/public/*.h
-+ include/public/hvm/*.h
-+ include/public/io/*.h
-+
-+The intention is that these files can be freely copied into the source
-+tree of an operating system when porting that OS to run on Xen. Doing
-+so does *not* cause the OS to become subject to the terms of the GPL.
-+
-+All other files in the Xen source distribution are covered by version
-+2 of the GNU General Public License except where explicitly stated
-+otherwise within individual source files.
-+
-+ -- Keir Fraser (on behalf of the Xen team)
-+
-+=====================================================================
-+
-+Permission is hereby granted, free of charge, to any person obtaining a copy
-+of this software and associated documentation files (the "Software"), to
-+deal in the Software without restriction, including without limitation the
-+rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-+sell copies of the Software, and to permit persons to whom the Software is
-+furnished to do so, subject to the following conditions:
-+
-+The above copyright notice and this permission notice shall be included in
-+all copies or substantial portions of the Software.
-+
-+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-+DEALINGS IN THE SOFTWARE.
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/xen/interface/domctl.h ubuntu-gutsy-xen/include/xen/interface/domctl.h
---- ubuntu-gutsy/include/xen/interface/domctl.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/xen/interface/domctl.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,478 @@
-+/******************************************************************************
-+ * domctl.h
-+ *
-+ * Domain management operations. For use by node control stack.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this software and associated documentation files (the "Software"), to
-+ * deal in the Software without restriction, including without limitation the
-+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-+ * sell copies of the Software, and to permit persons to whom the Software is
-+ * furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-+ * DEALINGS IN THE SOFTWARE.
-+ *
-+ * Copyright (c) 2002-2003, B Dragovic
-+ * Copyright (c) 2002-2006, K Fraser
-+ */
-+
-+#ifndef __XEN_PUBLIC_DOMCTL_H__
-+#define __XEN_PUBLIC_DOMCTL_H__
-+
-+#if !defined(__XEN__) && !defined(__XEN_TOOLS__)
-+#error "domctl operations are intended for use by node control tools only"
-+#endif
-+
-+#include "xen.h"
-+
-+#define XEN_DOMCTL_INTERFACE_VERSION 0x00000005
-+
-+struct xenctl_cpumap {
-+ XEN_GUEST_HANDLE_64(uint8_t) bitmap;
-+ uint32_t nr_cpus;
-+};
-+
-+/*
-+ * NB. xen_domctl.domain is an IN/OUT parameter for this operation.
-+ * If it is specified as zero, an id is auto-allocated and returned.
-+ */
-+#define XEN_DOMCTL_createdomain 1
-+struct xen_domctl_createdomain {
-+ /* IN parameters */
-+ uint32_t ssidref;
-+ xen_domain_handle_t handle;
-+ /* Is this an HVM guest (as opposed to a PV guest)? */
-+#define _XEN_DOMCTL_CDF_hvm_guest 0
-+#define XEN_DOMCTL_CDF_hvm_guest (1U<<_XEN_DOMCTL_CDF_hvm_guest)
-+ uint32_t flags;
-+};
-+typedef struct xen_domctl_createdomain xen_domctl_createdomain_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_domctl_createdomain_t);
-+
-+#define XEN_DOMCTL_destroydomain 2
-+#define XEN_DOMCTL_pausedomain 3
-+#define XEN_DOMCTL_unpausedomain 4
-+#define XEN_DOMCTL_resumedomain 27
-+
-+#define XEN_DOMCTL_getdomaininfo 5
-+struct xen_domctl_getdomaininfo {
-+ /* OUT variables. */
-+ domid_t domain; /* Also echoed in domctl.domain */
-+ /* Domain is scheduled to die. */
-+#define _XEN_DOMINF_dying 0
-+#define XEN_DOMINF_dying (1U<<_XEN_DOMINF_dying)
-+ /* Domain is an HVM guest (as opposed to a PV guest). */
-+#define _XEN_DOMINF_hvm_guest 1
-+#define XEN_DOMINF_hvm_guest (1U<<_XEN_DOMINF_hvm_guest)
-+ /* The guest OS has shut down. */
-+#define _XEN_DOMINF_shutdown 2
-+#define XEN_DOMINF_shutdown (1U<<_XEN_DOMINF_shutdown)
-+ /* Currently paused by control software. */
-+#define _XEN_DOMINF_paused 3
-+#define XEN_DOMINF_paused (1U<<_XEN_DOMINF_paused)
-+ /* Currently blocked pending an event. */
-+#define _XEN_DOMINF_blocked 4
-+#define XEN_DOMINF_blocked (1U<<_XEN_DOMINF_blocked)
-+ /* Domain is currently running. */
-+#define _XEN_DOMINF_running 5
-+#define XEN_DOMINF_running (1U<<_XEN_DOMINF_running)
-+ /* CPU to which this domain is bound. */
-+#define XEN_DOMINF_cpumask 255
-+#define XEN_DOMINF_cpushift 8
-+ /* XEN_DOMINF_shutdown guest-supplied code. */
-+#define XEN_DOMINF_shutdownmask 255
-+#define XEN_DOMINF_shutdownshift 16
-+ uint32_t flags; /* XEN_DOMINF_* */
-+ uint64_aligned_t tot_pages;
-+ uint64_aligned_t max_pages;
-+ uint64_aligned_t shared_info_frame; /* GMFN of shared_info struct */
-+ uint64_aligned_t cpu_time;
-+ uint32_t nr_online_vcpus; /* Number of VCPUs currently online. */
-+ uint32_t max_vcpu_id; /* Maximum VCPUID in use by this domain. */
-+ uint32_t ssidref;
-+ xen_domain_handle_t handle;
-+};
-+typedef struct xen_domctl_getdomaininfo xen_domctl_getdomaininfo_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_domctl_getdomaininfo_t);
-+
-+
-+#define XEN_DOMCTL_getmemlist 6
-+struct xen_domctl_getmemlist {
-+ /* IN variables. */
-+ /* Max entries to write to output buffer. */
-+ uint64_aligned_t max_pfns;
-+ /* Start index in guest's page list. */
-+ uint64_aligned_t start_pfn;
-+ XEN_GUEST_HANDLE_64(uint64_t) buffer;
-+ /* OUT variables. */
-+ uint64_aligned_t num_pfns;
-+};
-+typedef struct xen_domctl_getmemlist xen_domctl_getmemlist_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_domctl_getmemlist_t);
-+
-+
-+#define XEN_DOMCTL_getpageframeinfo 7
-+
-+#define XEN_DOMCTL_PFINFO_LTAB_SHIFT 28
-+#define XEN_DOMCTL_PFINFO_NOTAB (0x0U<<28)
-+#define XEN_DOMCTL_PFINFO_L1TAB (0x1U<<28)
-+#define XEN_DOMCTL_PFINFO_L2TAB (0x2U<<28)
-+#define XEN_DOMCTL_PFINFO_L3TAB (0x3U<<28)
-+#define XEN_DOMCTL_PFINFO_L4TAB (0x4U<<28)
-+#define XEN_DOMCTL_PFINFO_LTABTYPE_MASK (0x7U<<28)
-+#define XEN_DOMCTL_PFINFO_LPINTAB (0x1U<<31)
-+#define XEN_DOMCTL_PFINFO_XTAB (0xfU<<28) /* invalid page */
-+#define XEN_DOMCTL_PFINFO_LTAB_MASK (0xfU<<28)
-+
-+struct xen_domctl_getpageframeinfo {
-+ /* IN variables. */
-+ uint64_aligned_t gmfn; /* GMFN to query */
-+ /* OUT variables. */
-+ /* Is the page PINNED to a type? */
-+ uint32_t type; /* see above type defs */
-+};
-+typedef struct xen_domctl_getpageframeinfo xen_domctl_getpageframeinfo_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_domctl_getpageframeinfo_t);
-+
-+
-+#define XEN_DOMCTL_getpageframeinfo2 8
-+struct xen_domctl_getpageframeinfo2 {
-+ /* IN variables. */
-+ uint64_aligned_t num;
-+ /* IN/OUT variables. */
-+ XEN_GUEST_HANDLE_64(uint32_t) array;
-+};
-+typedef struct xen_domctl_getpageframeinfo2 xen_domctl_getpageframeinfo2_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_domctl_getpageframeinfo2_t);
-+
-+
-+/*
-+ * Control shadow pagetables operation
-+ */
-+#define XEN_DOMCTL_shadow_op 10
-+
-+/* Disable shadow mode. */
-+#define XEN_DOMCTL_SHADOW_OP_OFF 0
-+
-+/* Enable shadow mode (mode contains ORed XEN_DOMCTL_SHADOW_ENABLE_* flags). */
-+#define XEN_DOMCTL_SHADOW_OP_ENABLE 32
-+
-+/* Log-dirty bitmap operations. */
-+ /* Return the bitmap and clean internal copy for next round. */
-+#define XEN_DOMCTL_SHADOW_OP_CLEAN 11
-+ /* Return the bitmap but do not modify internal copy. */
-+#define XEN_DOMCTL_SHADOW_OP_PEEK 12
-+
-+/* Memory allocation accessors. */
-+#define XEN_DOMCTL_SHADOW_OP_GET_ALLOCATION 30
-+#define XEN_DOMCTL_SHADOW_OP_SET_ALLOCATION 31
-+
-+/* Legacy enable operations. */
-+ /* Equiv. to ENABLE with no mode flags. */
-+#define XEN_DOMCTL_SHADOW_OP_ENABLE_TEST 1
-+ /* Equiv. to ENABLE with mode flag ENABLE_LOG_DIRTY. */
-+#define XEN_DOMCTL_SHADOW_OP_ENABLE_LOGDIRTY 2
-+ /* Equiv. to ENABLE with mode flags ENABLE_REFCOUNT and ENABLE_TRANSLATE. */
-+#define XEN_DOMCTL_SHADOW_OP_ENABLE_TRANSLATE 3
-+
-+/* Mode flags for XEN_DOMCTL_SHADOW_OP_ENABLE. */
-+ /*
-+ * Shadow pagetables are refcounted: guest does not use explicit mmu
-+ * operations nor write-protect its pagetables.
-+ */
-+#define XEN_DOMCTL_SHADOW_ENABLE_REFCOUNT (1 << 1)
-+ /*
-+ * Log pages in a bitmap as they are dirtied.
-+ * Used for live relocation to determine which pages must be re-sent.
-+ */
-+#define XEN_DOMCTL_SHADOW_ENABLE_LOG_DIRTY (1 << 2)
-+ /*
-+ * Automatically translate GPFNs into MFNs.
-+ */
-+#define XEN_DOMCTL_SHADOW_ENABLE_TRANSLATE (1 << 3)
-+ /*
-+ * Xen does not steal virtual address space from the guest.
-+ * Requires HVM support.
-+ */
-+#define XEN_DOMCTL_SHADOW_ENABLE_EXTERNAL (1 << 4)
-+
-+struct xen_domctl_shadow_op_stats {
-+ uint32_t fault_count;
-+ uint32_t dirty_count;
-+};
-+typedef struct xen_domctl_shadow_op_stats xen_domctl_shadow_op_stats_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_domctl_shadow_op_stats_t);
-+
-+struct xen_domctl_shadow_op {
-+ /* IN variables. */
-+ uint32_t op; /* XEN_DOMCTL_SHADOW_OP_* */
-+
-+ /* OP_ENABLE */
-+ uint32_t mode; /* XEN_DOMCTL_SHADOW_ENABLE_* */
-+
-+ /* OP_GET_ALLOCATION / OP_SET_ALLOCATION */
-+ uint32_t mb; /* Shadow memory allocation in MB */
-+
-+ /* OP_PEEK / OP_CLEAN */
-+ XEN_GUEST_HANDLE_64(uint8_t) dirty_bitmap;
-+ uint64_aligned_t pages; /* Size of buffer. Updated with actual size. */
-+ struct xen_domctl_shadow_op_stats stats;
-+};
-+typedef struct xen_domctl_shadow_op xen_domctl_shadow_op_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_domctl_shadow_op_t);
-+
-+
-+#define XEN_DOMCTL_max_mem 11
-+struct xen_domctl_max_mem {
-+ /* IN variables. */
-+ uint64_aligned_t max_memkb;
-+};
-+typedef struct xen_domctl_max_mem xen_domctl_max_mem_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_domctl_max_mem_t);
-+
-+
-+#define XEN_DOMCTL_setvcpucontext 12
-+#define XEN_DOMCTL_getvcpucontext 13
-+struct xen_domctl_vcpucontext {
-+ uint32_t vcpu; /* IN */
-+ XEN_GUEST_HANDLE_64(vcpu_guest_context_t) ctxt; /* IN/OUT */
-+};
-+typedef struct xen_domctl_vcpucontext xen_domctl_vcpucontext_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_domctl_vcpucontext_t);
-+
-+
-+#define XEN_DOMCTL_getvcpuinfo 14
-+struct xen_domctl_getvcpuinfo {
-+ /* IN variables. */
-+ uint32_t vcpu;
-+ /* OUT variables. */
-+ uint8_t online; /* currently online (not hotplugged)? */
-+ uint8_t blocked; /* blocked waiting for an event? */
-+ uint8_t running; /* currently scheduled on its CPU? */
-+ uint64_aligned_t cpu_time; /* total cpu time consumed (ns) */
-+ uint32_t cpu; /* current mapping */
-+};
-+typedef struct xen_domctl_getvcpuinfo xen_domctl_getvcpuinfo_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_domctl_getvcpuinfo_t);
-+
-+
-+/* Get/set which physical cpus a vcpu can execute on. */
-+#define XEN_DOMCTL_setvcpuaffinity 9
-+#define XEN_DOMCTL_getvcpuaffinity 25
-+struct xen_domctl_vcpuaffinity {
-+ uint32_t vcpu; /* IN */
-+ struct xenctl_cpumap cpumap; /* IN/OUT */
-+};
-+typedef struct xen_domctl_vcpuaffinity xen_domctl_vcpuaffinity_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_domctl_vcpuaffinity_t);
-+
-+
-+#define XEN_DOMCTL_max_vcpus 15
-+struct xen_domctl_max_vcpus {
-+ uint32_t max; /* maximum number of vcpus */
-+};
-+typedef struct xen_domctl_max_vcpus xen_domctl_max_vcpus_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_domctl_max_vcpus_t);
-+
-+
-+#define XEN_DOMCTL_scheduler_op 16
-+/* Scheduler types. */
-+#define XEN_SCHEDULER_SEDF 4
-+#define XEN_SCHEDULER_CREDIT 5
-+/* Set or get info? */
-+#define XEN_DOMCTL_SCHEDOP_putinfo 0
-+#define XEN_DOMCTL_SCHEDOP_getinfo 1
-+struct xen_domctl_scheduler_op {
-+ uint32_t sched_id; /* XEN_SCHEDULER_* */
-+ uint32_t cmd; /* XEN_DOMCTL_SCHEDOP_* */
-+ union {
-+ struct xen_domctl_sched_sedf {
-+ uint64_aligned_t period;
-+ uint64_aligned_t slice;
-+ uint64_aligned_t latency;
-+ uint32_t extratime;
-+ uint32_t weight;
-+ } sedf;
-+ struct xen_domctl_sched_credit {
-+ uint16_t weight;
-+ uint16_t cap;
-+ } credit;
-+ } u;
-+};
-+typedef struct xen_domctl_scheduler_op xen_domctl_scheduler_op_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_domctl_scheduler_op_t);
-+
-+
-+#define XEN_DOMCTL_setdomainhandle 17
-+struct xen_domctl_setdomainhandle {
-+ xen_domain_handle_t handle;
-+};
-+typedef struct xen_domctl_setdomainhandle xen_domctl_setdomainhandle_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_domctl_setdomainhandle_t);
-+
-+
-+#define XEN_DOMCTL_setdebugging 18
-+struct xen_domctl_setdebugging {
-+ uint8_t enable;
-+};
-+typedef struct xen_domctl_setdebugging xen_domctl_setdebugging_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_domctl_setdebugging_t);
-+
-+
-+#define XEN_DOMCTL_irq_permission 19
-+struct xen_domctl_irq_permission {
-+ uint8_t pirq;
-+ uint8_t allow_access; /* flag to specify enable/disable of IRQ access */
-+};
-+typedef struct xen_domctl_irq_permission xen_domctl_irq_permission_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_domctl_irq_permission_t);
-+
-+
-+#define XEN_DOMCTL_iomem_permission 20
-+struct xen_domctl_iomem_permission {
-+ uint64_aligned_t first_mfn;/* first page (physical page number) in range */
-+ uint64_aligned_t nr_mfns; /* number of pages in range (>0) */
-+ uint8_t allow_access; /* allow (!0) or deny (0) access to range? */
-+};
-+typedef struct xen_domctl_iomem_permission xen_domctl_iomem_permission_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_domctl_iomem_permission_t);
-+
-+
-+#define XEN_DOMCTL_ioport_permission 21
-+struct xen_domctl_ioport_permission {
-+ uint32_t first_port; /* first port int range */
-+ uint32_t nr_ports; /* size of port range */
-+ uint8_t allow_access; /* allow or deny access to range? */
-+};
-+typedef struct xen_domctl_ioport_permission xen_domctl_ioport_permission_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_domctl_ioport_permission_t);
-+
-+
-+#define XEN_DOMCTL_hypercall_init 22
-+struct xen_domctl_hypercall_init {
-+ uint64_aligned_t gmfn; /* GMFN to be initialised */
-+};
-+typedef struct xen_domctl_hypercall_init xen_domctl_hypercall_init_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_domctl_hypercall_init_t);
-+
-+
-+#define XEN_DOMCTL_arch_setup 23
-+#define _XEN_DOMAINSETUP_hvm_guest 0
-+#define XEN_DOMAINSETUP_hvm_guest (1UL<<_XEN_DOMAINSETUP_hvm_guest)
-+#define _XEN_DOMAINSETUP_query 1 /* Get parameters (for save) */
-+#define XEN_DOMAINSETUP_query (1UL<<_XEN_DOMAINSETUP_query)
-+typedef struct xen_domctl_arch_setup {
-+ uint64_aligned_t flags; /* XEN_DOMAINSETUP_* */
-+#ifdef __ia64__
-+ uint64_aligned_t bp; /* mpaddr of boot param area */
-+ uint64_aligned_t maxmem; /* Highest memory address for MDT. */
-+ uint64_aligned_t xsi_va; /* Xen shared_info area virtual address. */
-+ uint32_t hypercall_imm; /* Break imm for Xen hypercalls. */
-+#endif
-+} xen_domctl_arch_setup_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_domctl_arch_setup_t);
-+
-+
-+#define XEN_DOMCTL_settimeoffset 24
-+struct xen_domctl_settimeoffset {
-+ int32_t time_offset_seconds; /* applied to domain wallclock time */
-+};
-+typedef struct xen_domctl_settimeoffset xen_domctl_settimeoffset_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_domctl_settimeoffset_t);
-+
-+
-+#define XEN_DOMCTL_gethvmcontext 33
-+#define XEN_DOMCTL_sethvmcontext 34
-+typedef struct xen_domctl_hvmcontext {
-+ uint32_t size; /* IN/OUT: size of buffer / bytes filled */
-+ XEN_GUEST_HANDLE_64(uint8_t) buffer; /* IN/OUT: data, or call
-+ * gethvmcontext with NULL
-+ * buffer to get size
-+ * req'd */
-+} xen_domctl_hvmcontext_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_domctl_hvmcontext_t);
-+
-+
-+#define XEN_DOMCTL_set_address_size 35
-+#define XEN_DOMCTL_get_address_size 36
-+typedef struct xen_domctl_address_size {
-+ uint32_t size;
-+} xen_domctl_address_size_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_domctl_address_size_t);
-+
-+
-+#define XEN_DOMCTL_real_mode_area 26
-+struct xen_domctl_real_mode_area {
-+ uint32_t log; /* log2 of Real Mode Area size */
-+};
-+typedef struct xen_domctl_real_mode_area xen_domctl_real_mode_area_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_domctl_real_mode_area_t);
-+
-+
-+#define XEN_DOMCTL_sendtrigger 28
-+#define XEN_DOMCTL_SENDTRIGGER_NMI 0
-+#define XEN_DOMCTL_SENDTRIGGER_RESET 1
-+#define XEN_DOMCTL_SENDTRIGGER_INIT 2
-+struct xen_domctl_sendtrigger {
-+ uint32_t trigger; /* IN */
-+ uint32_t vcpu; /* IN */
-+};
-+typedef struct xen_domctl_sendtrigger xen_domctl_sendtrigger_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_domctl_sendtrigger_t);
-+
-+
-+struct xen_domctl {
-+ uint32_t cmd;
-+ uint32_t interface_version; /* XEN_DOMCTL_INTERFACE_VERSION */
-+ domid_t domain;
-+ union {
-+ struct xen_domctl_createdomain createdomain;
-+ struct xen_domctl_getdomaininfo getdomaininfo;
-+ struct xen_domctl_getmemlist getmemlist;
-+ struct xen_domctl_getpageframeinfo getpageframeinfo;
-+ struct xen_domctl_getpageframeinfo2 getpageframeinfo2;
-+ struct xen_domctl_vcpuaffinity vcpuaffinity;
-+ struct xen_domctl_shadow_op shadow_op;
-+ struct xen_domctl_max_mem max_mem;
-+ struct xen_domctl_vcpucontext vcpucontext;
-+ struct xen_domctl_getvcpuinfo getvcpuinfo;
-+ struct xen_domctl_max_vcpus max_vcpus;
-+ struct xen_domctl_scheduler_op scheduler_op;
-+ struct xen_domctl_setdomainhandle setdomainhandle;
-+ struct xen_domctl_setdebugging setdebugging;
-+ struct xen_domctl_irq_permission irq_permission;
-+ struct xen_domctl_iomem_permission iomem_permission;
-+ struct xen_domctl_ioport_permission ioport_permission;
-+ struct xen_domctl_hypercall_init hypercall_init;
-+ struct xen_domctl_arch_setup arch_setup;
-+ struct xen_domctl_settimeoffset settimeoffset;
-+ struct xen_domctl_real_mode_area real_mode_area;
-+ struct xen_domctl_hvmcontext hvmcontext;
-+ struct xen_domctl_address_size address_size;
-+ struct xen_domctl_sendtrigger sendtrigger;
-+ uint8_t pad[128];
-+ } u;
-+};
-+typedef struct xen_domctl xen_domctl_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_domctl_t);
-+
-+#endif /* __XEN_PUBLIC_DOMCTL_H__ */
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/xen/interface/elfnote.h ubuntu-gutsy-xen/include/xen/interface/elfnote.h
---- ubuntu-gutsy/include/xen/interface/elfnote.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/xen/interface/elfnote.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,233 @@
-+/******************************************************************************
-+ * elfnote.h
-+ *
-+ * Definitions used for the Xen ELF notes.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this software and associated documentation files (the "Software"), to
-+ * deal in the Software without restriction, including without limitation the
-+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-+ * sell copies of the Software, and to permit persons to whom the Software is
-+ * furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-+ * DEALINGS IN THE SOFTWARE.
-+ *
-+ * Copyright (c) 2006, Ian Campbell, XenSource Ltd.
-+ */
-+
-+#ifndef __XEN_PUBLIC_ELFNOTE_H__
-+#define __XEN_PUBLIC_ELFNOTE_H__
-+
-+/*
-+ * The notes should live in a PT_NOTE segment and have "Xen" in the
-+ * name field.
-+ *
-+ * Numeric types are either 4 or 8 bytes depending on the content of
-+ * the desc field.
-+ *
-+ * LEGACY indicated the fields in the legacy __xen_guest string which
-+ * this a note type replaces.
-+ */
-+
-+/*
-+ * NAME=VALUE pair (string).
-+ */
-+#define XEN_ELFNOTE_INFO 0
-+
-+/*
-+ * The virtual address of the entry point (numeric).
-+ *
-+ * LEGACY: VIRT_ENTRY
-+ */
-+#define XEN_ELFNOTE_ENTRY 1
-+
-+/* The virtual address of the hypercall transfer page (numeric).
-+ *
-+ * LEGACY: HYPERCALL_PAGE. (n.b. legacy value is a physical page
-+ * number not a virtual address)
-+ */
-+#define XEN_ELFNOTE_HYPERCALL_PAGE 2
-+
-+/* The virtual address where the kernel image should be mapped (numeric).
-+ *
-+ * Defaults to 0.
-+ *
-+ * LEGACY: VIRT_BASE
-+ */
-+#define XEN_ELFNOTE_VIRT_BASE 3
-+
-+/*
-+ * The offset of the ELF paddr field from the acutal required
-+ * psuedo-physical address (numeric).
-+ *
-+ * This is used to maintain backwards compatibility with older kernels
-+ * which wrote __PAGE_OFFSET into that field. This field defaults to 0
-+ * if not present.
-+ *
-+ * LEGACY: ELF_PADDR_OFFSET. (n.b. legacy default is VIRT_BASE)
-+ */
-+#define XEN_ELFNOTE_PADDR_OFFSET 4
-+
-+/*
-+ * The version of Xen that we work with (string).
-+ *
-+ * LEGACY: XEN_VER
-+ */
-+#define XEN_ELFNOTE_XEN_VERSION 5
-+
-+/*
-+ * The name of the guest operating system (string).
-+ *
-+ * LEGACY: GUEST_OS
-+ */
-+#define XEN_ELFNOTE_GUEST_OS 6
-+
-+/*
-+ * The version of the guest operating system (string).
-+ *
-+ * LEGACY: GUEST_VER
-+ */
-+#define XEN_ELFNOTE_GUEST_VERSION 7
-+
-+/*
-+ * The loader type (string).
-+ *
-+ * LEGACY: LOADER
-+ */
-+#define XEN_ELFNOTE_LOADER 8
-+
-+/*
-+ * The kernel supports PAE (x86/32 only, string = "yes", "no" or
-+ * "bimodal").
-+ *
-+ * For compatibility with Xen 3.0.3 and earlier the "bimodal" setting
-+ * may be given as "yes,bimodal" which will cause older Xen to treat
-+ * this kernel as PAE.
-+ *
-+ * LEGACY: PAE (n.b. The legacy interface included a provision to
-+ * indicate 'extended-cr3' support allowing L3 page tables to be
-+ * placed above 4G. It is assumed that any kernel new enough to use
-+ * these ELF notes will include this and therefore "yes" here is
-+ * equivalent to "yes[entended-cr3]" in the __xen_guest interface.
-+ */
-+#define XEN_ELFNOTE_PAE_MODE 9
-+
-+/*
-+ * The features supported/required by this kernel (string).
-+ *
-+ * The string must consist of a list of feature names (as given in
-+ * features.h, without the "XENFEAT_" prefix) separated by '|'
-+ * characters. If a feature is required for the kernel to function
-+ * then the feature name must be preceded by a '!' character.
-+ *
-+ * LEGACY: FEATURES
-+ */
-+#define XEN_ELFNOTE_FEATURES 10
-+
-+/*
-+ * The kernel requires the symbol table to be loaded (string = "yes" or "no")
-+ * LEGACY: BSD_SYMTAB (n.b. The legacy treated the presence or absence
-+ * of this string as a boolean flag rather than requiring "yes" or
-+ * "no".
-+ */
-+#define XEN_ELFNOTE_BSD_SYMTAB 11
-+
-+/*
-+ * The lowest address the hypervisor hole can begin at (numeric).
-+ *
-+ * This must not be set higher than HYPERVISOR_VIRT_START. Its presence
-+ * also indicates to the hypervisor that the kernel can deal with the
-+ * hole starting at a higher address.
-+ */
-+#define XEN_ELFNOTE_HV_START_LOW 12
-+
-+/*
-+ * List of maddr_t-sized mask/value pairs describing how to recognize
-+ * (non-present) L1 page table entries carrying valid MFNs (numeric).
-+ */
-+#define XEN_ELFNOTE_L1_MFN_VALID 13
-+
-+/*
-+ * Whether or not the guest supports cooperative suspend cancellation.
-+ */
-+#define XEN_ELFNOTE_SUSPEND_CANCEL 14
-+
-+/*
-+ * The number of the highest elfnote defined.
-+ */
-+#define XEN_ELFNOTE_MAX XEN_ELFNOTE_SUSPEND_CANCEL
-+
-+/*
-+ * System information exported through crash notes.
-+ *
-+ * The kexec / kdump code will create one XEN_ELFNOTE_CRASH_INFO
-+ * note in case of a system crash. This note will contain various
-+ * information about the system, see xen/include/xen/elfcore.h.
-+ */
-+#define XEN_ELFNOTE_CRASH_INFO 0x1000001
-+
-+/*
-+ * System registers exported through crash notes.
-+ *
-+ * The kexec / kdump code will create one XEN_ELFNOTE_CRASH_REGS
-+ * note per cpu in case of a system crash. This note is architecture
-+ * specific and will contain registers not saved in the "CORE" note.
-+ * See xen/include/xen/elfcore.h for more information.
-+ */
-+#define XEN_ELFNOTE_CRASH_REGS 0x1000002
-+
-+
-+/*
-+ * xen dump-core none note.
-+ * xm dump-core code will create one XEN_ELFNOTE_DUMPCORE_NONE
-+ * in its dump file to indicate that the file is xen dump-core
-+ * file. This note doesn't have any other information.
-+ * See tools/libxc/xc_core.h for more information.
-+ */
-+#define XEN_ELFNOTE_DUMPCORE_NONE 0x2000000
-+
-+/*
-+ * xen dump-core header note.
-+ * xm dump-core code will create one XEN_ELFNOTE_DUMPCORE_HEADER
-+ * in its dump file.
-+ * See tools/libxc/xc_core.h for more information.
-+ */
-+#define XEN_ELFNOTE_DUMPCORE_HEADER 0x2000001
-+
-+/*
-+ * xen dump-core xen version note.
-+ * xm dump-core code will create one XEN_ELFNOTE_DUMPCORE_XEN_VERSION
-+ * in its dump file. It contains the xen version obtained via the
-+ * XENVER hypercall.
-+ * See tools/libxc/xc_core.h for more information.
-+ */
-+#define XEN_ELFNOTE_DUMPCORE_XEN_VERSION 0x2000002
-+
-+/*
-+ * xen dump-core format version note.
-+ * xm dump-core code will create one XEN_ELFNOTE_DUMPCORE_FORMAT_VERSION
-+ * in its dump file. It contains a format version identifier.
-+ * See tools/libxc/xc_core.h for more information.
-+ */
-+#define XEN_ELFNOTE_DUMPCORE_FORMAT_VERSION 0x2000003
-+
-+#endif /* __XEN_PUBLIC_ELFNOTE_H__ */
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/xen/interface/elfstructs.h ubuntu-gutsy-xen/include/xen/interface/elfstructs.h
---- ubuntu-gutsy/include/xen/interface/elfstructs.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/xen/interface/elfstructs.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,527 @@
-+#ifndef __XEN_PUBLIC_ELFSTRUCTS_H__
-+#define __XEN_PUBLIC_ELFSTRUCTS_H__ 1
-+/*
-+ * Copyright (c) 1995, 1996 Erik Theisen. All rights reserved.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions
-+ * are met:
-+ * 1. Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * 2. Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * 3. The name of the author may not be used to endorse or promote products
-+ * derived from this software without specific prior written permission
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+typedef uint8_t Elf_Byte;
-+
-+typedef uint32_t Elf32_Addr; /* Unsigned program address */
-+typedef uint32_t Elf32_Off; /* Unsigned file offset */
-+typedef int32_t Elf32_Sword; /* Signed large integer */
-+typedef uint32_t Elf32_Word; /* Unsigned large integer */
-+typedef uint16_t Elf32_Half; /* Unsigned medium integer */
-+
-+typedef uint64_t Elf64_Addr;
-+typedef uint64_t Elf64_Off;
-+typedef int32_t Elf64_Shalf;
-+
-+typedef int32_t Elf64_Sword;
-+typedef uint32_t Elf64_Word;
-+
-+typedef int64_t Elf64_Sxword;
-+typedef uint64_t Elf64_Xword;
-+
-+typedef uint32_t Elf64_Half;
-+typedef uint16_t Elf64_Quarter;
-+
-+/*
-+ * e_ident[] identification indexes
-+ * See http://www.caldera.com/developers/gabi/2000-07-17/ch4.eheader.html
-+ */
-+#define EI_MAG0 0 /* file ID */
-+#define EI_MAG1 1 /* file ID */
-+#define EI_MAG2 2 /* file ID */
-+#define EI_MAG3 3 /* file ID */
-+#define EI_CLASS 4 /* file class */
-+#define EI_DATA 5 /* data encoding */
-+#define EI_VERSION 6 /* ELF header version */
-+#define EI_OSABI 7 /* OS/ABI ID */
-+#define EI_ABIVERSION 8 /* ABI version */
-+#define EI_PAD 9 /* start of pad bytes */
-+#define EI_NIDENT 16 /* Size of e_ident[] */
-+
-+/* e_ident[] magic number */
-+#define ELFMAG0 0x7f /* e_ident[EI_MAG0] */
-+#define ELFMAG1 'E' /* e_ident[EI_MAG1] */
-+#define ELFMAG2 'L' /* e_ident[EI_MAG2] */
-+#define ELFMAG3 'F' /* e_ident[EI_MAG3] */
-+#define ELFMAG "\177ELF" /* magic */
-+#define SELFMAG 4 /* size of magic */
-+
-+/* e_ident[] file class */
-+#define ELFCLASSNONE 0 /* invalid */
-+#define ELFCLASS32 1 /* 32-bit objs */
-+#define ELFCLASS64 2 /* 64-bit objs */
-+#define ELFCLASSNUM 3 /* number of classes */
-+
-+/* e_ident[] data encoding */
-+#define ELFDATANONE 0 /* invalid */
-+#define ELFDATA2LSB 1 /* Little-Endian */
-+#define ELFDATA2MSB 2 /* Big-Endian */
-+#define ELFDATANUM 3 /* number of data encode defines */
-+
-+/* e_ident[] Operating System/ABI */
-+#define ELFOSABI_SYSV 0 /* UNIX System V ABI */
-+#define ELFOSABI_HPUX 1 /* HP-UX operating system */
-+#define ELFOSABI_NETBSD 2 /* NetBSD */
-+#define ELFOSABI_LINUX 3 /* GNU/Linux */
-+#define ELFOSABI_HURD 4 /* GNU/Hurd */
-+#define ELFOSABI_86OPEN 5 /* 86Open common IA32 ABI */
-+#define ELFOSABI_SOLARIS 6 /* Solaris */
-+#define ELFOSABI_MONTEREY 7 /* Monterey */
-+#define ELFOSABI_IRIX 8 /* IRIX */
-+#define ELFOSABI_FREEBSD 9 /* FreeBSD */
-+#define ELFOSABI_TRU64 10 /* TRU64 UNIX */
-+#define ELFOSABI_MODESTO 11 /* Novell Modesto */
-+#define ELFOSABI_OPENBSD 12 /* OpenBSD */
-+#define ELFOSABI_ARM 97 /* ARM */
-+#define ELFOSABI_STANDALONE 255 /* Standalone (embedded) application */
-+
-+/* e_ident */
-+#define IS_ELF(ehdr) ((ehdr).e_ident[EI_MAG0] == ELFMAG0 && \
-+ (ehdr).e_ident[EI_MAG1] == ELFMAG1 && \
-+ (ehdr).e_ident[EI_MAG2] == ELFMAG2 && \
-+ (ehdr).e_ident[EI_MAG3] == ELFMAG3)
-+
-+/* ELF Header */
-+typedef struct elfhdr {
-+ unsigned char e_ident[EI_NIDENT]; /* ELF Identification */
-+ Elf32_Half e_type; /* object file type */
-+ Elf32_Half e_machine; /* machine */
-+ Elf32_Word e_version; /* object file version */
-+ Elf32_Addr e_entry; /* virtual entry point */
-+ Elf32_Off e_phoff; /* program header table offset */
-+ Elf32_Off e_shoff; /* section header table offset */
-+ Elf32_Word e_flags; /* processor-specific flags */
-+ Elf32_Half e_ehsize; /* ELF header size */
-+ Elf32_Half e_phentsize; /* program header entry size */
-+ Elf32_Half e_phnum; /* number of program header entries */
-+ Elf32_Half e_shentsize; /* section header entry size */
-+ Elf32_Half e_shnum; /* number of section header entries */
-+ Elf32_Half e_shstrndx; /* section header table's "section
-+ header string table" entry offset */
-+} Elf32_Ehdr;
-+
-+typedef struct {
-+ unsigned char e_ident[EI_NIDENT]; /* Id bytes */
-+ Elf64_Quarter e_type; /* file type */
-+ Elf64_Quarter e_machine; /* machine type */
-+ Elf64_Half e_version; /* version number */
-+ Elf64_Addr e_entry; /* entry point */
-+ Elf64_Off e_phoff; /* Program hdr offset */
-+ Elf64_Off e_shoff; /* Section hdr offset */
-+ Elf64_Half e_flags; /* Processor flags */
-+ Elf64_Quarter e_ehsize; /* sizeof ehdr */
-+ Elf64_Quarter e_phentsize; /* Program header entry size */
-+ Elf64_Quarter e_phnum; /* Number of program headers */
-+ Elf64_Quarter e_shentsize; /* Section header entry size */
-+ Elf64_Quarter e_shnum; /* Number of section headers */
-+ Elf64_Quarter e_shstrndx; /* String table index */
-+} Elf64_Ehdr;
-+
-+/* e_type */
-+#define ET_NONE 0 /* No file type */
-+#define ET_REL 1 /* relocatable file */
-+#define ET_EXEC 2 /* executable file */
-+#define ET_DYN 3 /* shared object file */
-+#define ET_CORE 4 /* core file */
-+#define ET_NUM 5 /* number of types */
-+#define ET_LOPROC 0xff00 /* reserved range for processor */
-+#define ET_HIPROC 0xffff /* specific e_type */
-+
-+/* e_machine */
-+#define EM_NONE 0 /* No Machine */
-+#define EM_M32 1 /* AT&T WE 32100 */
-+#define EM_SPARC 2 /* SPARC */
-+#define EM_386 3 /* Intel 80386 */
-+#define EM_68K 4 /* Motorola 68000 */
-+#define EM_88K 5 /* Motorola 88000 */
-+#define EM_486 6 /* Intel 80486 - unused? */
-+#define EM_860 7 /* Intel 80860 */
-+#define EM_MIPS 8 /* MIPS R3000 Big-Endian only */
-+/*
-+ * Don't know if EM_MIPS_RS4_BE,
-+ * EM_SPARC64, EM_PARISC,
-+ * or EM_PPC are ABI compliant
-+ */
-+#define EM_MIPS_RS4_BE 10 /* MIPS R4000 Big-Endian */
-+#define EM_SPARC64 11 /* SPARC v9 64-bit unoffical */
-+#define EM_PARISC 15 /* HPPA */
-+#define EM_SPARC32PLUS 18 /* Enhanced instruction set SPARC */
-+#define EM_PPC 20 /* PowerPC */
-+#define EM_PPC64 21 /* PowerPC 64-bit */
-+#define EM_ARM 40 /* Advanced RISC Machines ARM */
-+#define EM_ALPHA 41 /* DEC ALPHA */
-+#define EM_SPARCV9 43 /* SPARC version 9 */
-+#define EM_ALPHA_EXP 0x9026 /* DEC ALPHA */
-+#define EM_IA_64 50 /* Intel Merced */
-+#define EM_X86_64 62 /* AMD x86-64 architecture */
-+#define EM_VAX 75 /* DEC VAX */
-+
-+/* Version */
-+#define EV_NONE 0 /* Invalid */
-+#define EV_CURRENT 1 /* Current */
-+#define EV_NUM 2 /* number of versions */
-+
-+/* Section Header */
-+typedef struct {
-+ Elf32_Word sh_name; /* name - index into section header
-+ string table section */
-+ Elf32_Word sh_type; /* type */
-+ Elf32_Word sh_flags; /* flags */
-+ Elf32_Addr sh_addr; /* address */
-+ Elf32_Off sh_offset; /* file offset */
-+ Elf32_Word sh_size; /* section size */
-+ Elf32_Word sh_link; /* section header table index link */
-+ Elf32_Word sh_info; /* extra information */
-+ Elf32_Word sh_addralign; /* address alignment */
-+ Elf32_Word sh_entsize; /* section entry size */
-+} Elf32_Shdr;
-+
-+typedef struct {
-+ Elf64_Half sh_name; /* section name */
-+ Elf64_Half sh_type; /* section type */
-+ Elf64_Xword sh_flags; /* section flags */
-+ Elf64_Addr sh_addr; /* virtual address */
-+ Elf64_Off sh_offset; /* file offset */
-+ Elf64_Xword sh_size; /* section size */
-+ Elf64_Half sh_link; /* link to another */
-+ Elf64_Half sh_info; /* misc info */
-+ Elf64_Xword sh_addralign; /* memory alignment */
-+ Elf64_Xword sh_entsize; /* table entry size */
-+} Elf64_Shdr;
-+
-+/* Special Section Indexes */
-+#define SHN_UNDEF 0 /* undefined */
-+#define SHN_LORESERVE 0xff00 /* lower bounds of reserved indexes */
-+#define SHN_LOPROC 0xff00 /* reserved range for processor */
-+#define SHN_HIPROC 0xff1f /* specific section indexes */
-+#define SHN_ABS 0xfff1 /* absolute value */
-+#define SHN_COMMON 0xfff2 /* common symbol */
-+#define SHN_HIRESERVE 0xffff /* upper bounds of reserved indexes */
-+
-+/* sh_type */
-+#define SHT_NULL 0 /* inactive */
-+#define SHT_PROGBITS 1 /* program defined information */
-+#define SHT_SYMTAB 2 /* symbol table section */
-+#define SHT_STRTAB 3 /* string table section */
-+#define SHT_RELA 4 /* relocation section with addends*/
-+#define SHT_HASH 5 /* symbol hash table section */
-+#define SHT_DYNAMIC 6 /* dynamic section */
-+#define SHT_NOTE 7 /* note section */
-+#define SHT_NOBITS 8 /* no space section */
-+#define SHT_REL 9 /* relation section without addends */
-+#define SHT_SHLIB 10 /* reserved - purpose unknown */
-+#define SHT_DYNSYM 11 /* dynamic symbol table section */
-+#define SHT_NUM 12 /* number of section types */
-+#define SHT_LOPROC 0x70000000 /* reserved range for processor */
-+#define SHT_HIPROC 0x7fffffff /* specific section header types */
-+#define SHT_LOUSER 0x80000000 /* reserved range for application */
-+#define SHT_HIUSER 0xffffffff /* specific indexes */
-+
-+/* Section names */
-+#define ELF_BSS ".bss" /* uninitialized data */
-+#define ELF_DATA ".data" /* initialized data */
-+#define ELF_DEBUG ".debug" /* debug */
-+#define ELF_DYNAMIC ".dynamic" /* dynamic linking information */
-+#define ELF_DYNSTR ".dynstr" /* dynamic string table */
-+#define ELF_DYNSYM ".dynsym" /* dynamic symbol table */
-+#define ELF_FINI ".fini" /* termination code */
-+#define ELF_GOT ".got" /* global offset table */
-+#define ELF_HASH ".hash" /* symbol hash table */
-+#define ELF_INIT ".init" /* initialization code */
-+#define ELF_REL_DATA ".rel.data" /* relocation data */
-+#define ELF_REL_FINI ".rel.fini" /* relocation termination code */
-+#define ELF_REL_INIT ".rel.init" /* relocation initialization code */
-+#define ELF_REL_DYN ".rel.dyn" /* relocaltion dynamic link info */
-+#define ELF_REL_RODATA ".rel.rodata" /* relocation read-only data */
-+#define ELF_REL_TEXT ".rel.text" /* relocation code */
-+#define ELF_RODATA ".rodata" /* read-only data */
-+#define ELF_SHSTRTAB ".shstrtab" /* section header string table */
-+#define ELF_STRTAB ".strtab" /* string table */
-+#define ELF_SYMTAB ".symtab" /* symbol table */
-+#define ELF_TEXT ".text" /* code */
-+
-+
-+/* Section Attribute Flags - sh_flags */
-+#define SHF_WRITE 0x1 /* Writable */
-+#define SHF_ALLOC 0x2 /* occupies memory */
-+#define SHF_EXECINSTR 0x4 /* executable */
-+#define SHF_MASKPROC 0xf0000000 /* reserved bits for processor */
-+ /* specific section attributes */
-+
-+/* Symbol Table Entry */
-+typedef struct elf32_sym {
-+ Elf32_Word st_name; /* name - index into string table */
-+ Elf32_Addr st_value; /* symbol value */
-+ Elf32_Word st_size; /* symbol size */
-+ unsigned char st_info; /* type and binding */
-+ unsigned char st_other; /* 0 - no defined meaning */
-+ Elf32_Half st_shndx; /* section header index */
-+} Elf32_Sym;
-+
-+typedef struct {
-+ Elf64_Half st_name; /* Symbol name index in str table */
-+ Elf_Byte st_info; /* type / binding attrs */
-+ Elf_Byte st_other; /* unused */
-+ Elf64_Quarter st_shndx; /* section index of symbol */
-+ Elf64_Xword st_value; /* value of symbol */
-+ Elf64_Xword st_size; /* size of symbol */
-+} Elf64_Sym;
-+
-+/* Symbol table index */
-+#define STN_UNDEF 0 /* undefined */
-+
-+/* Extract symbol info - st_info */
-+#define ELF32_ST_BIND(x) ((x) >> 4)
-+#define ELF32_ST_TYPE(x) (((unsigned int) x) & 0xf)
-+#define ELF32_ST_INFO(b,t) (((b) << 4) + ((t) & 0xf))
-+
-+#define ELF64_ST_BIND(x) ((x) >> 4)
-+#define ELF64_ST_TYPE(x) (((unsigned int) x) & 0xf)
-+#define ELF64_ST_INFO(b,t) (((b) << 4) + ((t) & 0xf))
-+
-+/* Symbol Binding - ELF32_ST_BIND - st_info */
-+#define STB_LOCAL 0 /* Local symbol */
-+#define STB_GLOBAL 1 /* Global symbol */
-+#define STB_WEAK 2 /* like global - lower precedence */
-+#define STB_NUM 3 /* number of symbol bindings */
-+#define STB_LOPROC 13 /* reserved range for processor */
-+#define STB_HIPROC 15 /* specific symbol bindings */
-+
-+/* Symbol type - ELF32_ST_TYPE - st_info */
-+#define STT_NOTYPE 0 /* not specified */
-+#define STT_OBJECT 1 /* data object */
-+#define STT_FUNC 2 /* function */
-+#define STT_SECTION 3 /* section */
-+#define STT_FILE 4 /* file */
-+#define STT_NUM 5 /* number of symbol types */
-+#define STT_LOPROC 13 /* reserved range for processor */
-+#define STT_HIPROC 15 /* specific symbol types */
-+
-+/* Relocation entry with implicit addend */
-+typedef struct {
-+ Elf32_Addr r_offset; /* offset of relocation */
-+ Elf32_Word r_info; /* symbol table index and type */
-+} Elf32_Rel;
-+
-+/* Relocation entry with explicit addend */
-+typedef struct {
-+ Elf32_Addr r_offset; /* offset of relocation */
-+ Elf32_Word r_info; /* symbol table index and type */
-+ Elf32_Sword r_addend;
-+} Elf32_Rela;
-+
-+/* Extract relocation info - r_info */
-+#define ELF32_R_SYM(i) ((i) >> 8)
-+#define ELF32_R_TYPE(i) ((unsigned char) (i))
-+#define ELF32_R_INFO(s,t) (((s) << 8) + (unsigned char)(t))
-+
-+typedef struct {
-+ Elf64_Xword r_offset; /* where to do it */
-+ Elf64_Xword r_info; /* index & type of relocation */
-+} Elf64_Rel;
-+
-+typedef struct {
-+ Elf64_Xword r_offset; /* where to do it */
-+ Elf64_Xword r_info; /* index & type of relocation */
-+ Elf64_Sxword r_addend; /* adjustment value */
-+} Elf64_Rela;
-+
-+#define ELF64_R_SYM(info) ((info) >> 32)
-+#define ELF64_R_TYPE(info) ((info) & 0xFFFFFFFF)
-+#define ELF64_R_INFO(s,t) (((s) << 32) + (u_int32_t)(t))
-+
-+/* Program Header */
-+typedef struct {
-+ Elf32_Word p_type; /* segment type */
-+ Elf32_Off p_offset; /* segment offset */
-+ Elf32_Addr p_vaddr; /* virtual address of segment */
-+ Elf32_Addr p_paddr; /* physical address - ignored? */
-+ Elf32_Word p_filesz; /* number of bytes in file for seg. */
-+ Elf32_Word p_memsz; /* number of bytes in mem. for seg. */
-+ Elf32_Word p_flags; /* flags */
-+ Elf32_Word p_align; /* memory alignment */
-+} Elf32_Phdr;
-+
-+typedef struct {
-+ Elf64_Half p_type; /* entry type */
-+ Elf64_Half p_flags; /* flags */
-+ Elf64_Off p_offset; /* offset */
-+ Elf64_Addr p_vaddr; /* virtual address */
-+ Elf64_Addr p_paddr; /* physical address */
-+ Elf64_Xword p_filesz; /* file size */
-+ Elf64_Xword p_memsz; /* memory size */
-+ Elf64_Xword p_align; /* memory & file alignment */
-+} Elf64_Phdr;
-+
-+/* Segment types - p_type */
-+#define PT_NULL 0 /* unused */
-+#define PT_LOAD 1 /* loadable segment */
-+#define PT_DYNAMIC 2 /* dynamic linking section */
-+#define PT_INTERP 3 /* the RTLD */
-+#define PT_NOTE 4 /* auxiliary information */
-+#define PT_SHLIB 5 /* reserved - purpose undefined */
-+#define PT_PHDR 6 /* program header */
-+#define PT_NUM 7 /* Number of segment types */
-+#define PT_LOPROC 0x70000000 /* reserved range for processor */
-+#define PT_HIPROC 0x7fffffff /* specific segment types */
-+
-+/* Segment flags - p_flags */
-+#define PF_X 0x1 /* Executable */
-+#define PF_W 0x2 /* Writable */
-+#define PF_R 0x4 /* Readable */
-+#define PF_MASKPROC 0xf0000000 /* reserved bits for processor */
-+ /* specific segment flags */
-+
-+/* Dynamic structure */
-+typedef struct {
-+ Elf32_Sword d_tag; /* controls meaning of d_val */
-+ union {
-+ Elf32_Word d_val; /* Multiple meanings - see d_tag */
-+ Elf32_Addr d_ptr; /* program virtual address */
-+ } d_un;
-+} Elf32_Dyn;
-+
-+typedef struct {
-+ Elf64_Xword d_tag; /* controls meaning of d_val */
-+ union {
-+ Elf64_Addr d_ptr;
-+ Elf64_Xword d_val;
-+ } d_un;
-+} Elf64_Dyn;
-+
-+/* Dynamic Array Tags - d_tag */
-+#define DT_NULL 0 /* marks end of _DYNAMIC array */
-+#define DT_NEEDED 1 /* string table offset of needed lib */
-+#define DT_PLTRELSZ 2 /* size of relocation entries in PLT */
-+#define DT_PLTGOT 3 /* address PLT/GOT */
-+#define DT_HASH 4 /* address of symbol hash table */
-+#define DT_STRTAB 5 /* address of string table */
-+#define DT_SYMTAB 6 /* address of symbol table */
-+#define DT_RELA 7 /* address of relocation table */
-+#define DT_RELASZ 8 /* size of relocation table */
-+#define DT_RELAENT 9 /* size of relocation entry */
-+#define DT_STRSZ 10 /* size of string table */
-+#define DT_SYMENT 11 /* size of symbol table entry */
-+#define DT_INIT 12 /* address of initialization func. */
-+#define DT_FINI 13 /* address of termination function */
-+#define DT_SONAME 14 /* string table offset of shared obj */
-+#define DT_RPATH 15 /* string table offset of library
-+ search path */
-+#define DT_SYMBOLIC 16 /* start sym search in shared obj. */
-+#define DT_REL 17 /* address of rel. tbl. w addends */
-+#define DT_RELSZ 18 /* size of DT_REL relocation table */
-+#define DT_RELENT 19 /* size of DT_REL relocation entry */
-+#define DT_PLTREL 20 /* PLT referenced relocation entry */
-+#define DT_DEBUG 21 /* bugger */
-+#define DT_TEXTREL 22 /* Allow rel. mod. to unwritable seg */
-+#define DT_JMPREL 23 /* add. of PLT's relocation entries */
-+#define DT_BIND_NOW 24 /* Bind now regardless of env setting */
-+#define DT_NUM 25 /* Number used. */
-+#define DT_LOPROC 0x70000000 /* reserved range for processor */
-+#define DT_HIPROC 0x7fffffff /* specific dynamic array tags */
-+
-+/* Standard ELF hashing function */
-+unsigned int elf_hash(const unsigned char *name);
-+
-+/*
-+ * Note Definitions
-+ */
-+typedef struct {
-+ Elf32_Word namesz;
-+ Elf32_Word descsz;
-+ Elf32_Word type;
-+} Elf32_Note;
-+
-+typedef struct {
-+ Elf64_Half namesz;
-+ Elf64_Half descsz;
-+ Elf64_Half type;
-+} Elf64_Note;
-+
-+
-+#if defined(ELFSIZE)
-+#define CONCAT(x,y) __CONCAT(x,y)
-+#define ELFNAME(x) CONCAT(elf,CONCAT(ELFSIZE,CONCAT(_,x)))
-+#define ELFNAME2(x,y) CONCAT(x,CONCAT(_elf,CONCAT(ELFSIZE,CONCAT(_,y))))
-+#define ELFNAMEEND(x) CONCAT(x,CONCAT(_elf,ELFSIZE))
-+#define ELFDEFNNAME(x) CONCAT(ELF,CONCAT(ELFSIZE,CONCAT(_,x)))
-+#endif
-+
-+#if defined(ELFSIZE) && (ELFSIZE == 32)
-+#define Elf_Ehdr Elf32_Ehdr
-+#define Elf_Phdr Elf32_Phdr
-+#define Elf_Shdr Elf32_Shdr
-+#define Elf_Sym Elf32_Sym
-+#define Elf_Rel Elf32_Rel
-+#define Elf_RelA Elf32_Rela
-+#define Elf_Dyn Elf32_Dyn
-+#define Elf_Word Elf32_Word
-+#define Elf_Sword Elf32_Sword
-+#define Elf_Addr Elf32_Addr
-+#define Elf_Off Elf32_Off
-+#define Elf_Nhdr Elf32_Nhdr
-+#define Elf_Note Elf32_Note
-+
-+#define ELF_R_SYM ELF32_R_SYM
-+#define ELF_R_TYPE ELF32_R_TYPE
-+#define ELF_R_INFO ELF32_R_INFO
-+#define ELFCLASS ELFCLASS32
-+
-+#define ELF_ST_BIND ELF32_ST_BIND
-+#define ELF_ST_TYPE ELF32_ST_TYPE
-+#define ELF_ST_INFO ELF32_ST_INFO
-+
-+#define AuxInfo Aux32Info
-+#elif defined(ELFSIZE) && (ELFSIZE == 64)
-+#define Elf_Ehdr Elf64_Ehdr
-+#define Elf_Phdr Elf64_Phdr
-+#define Elf_Shdr Elf64_Shdr
-+#define Elf_Sym Elf64_Sym
-+#define Elf_Rel Elf64_Rel
-+#define Elf_RelA Elf64_Rela
-+#define Elf_Dyn Elf64_Dyn
-+#define Elf_Word Elf64_Word
-+#define Elf_Sword Elf64_Sword
-+#define Elf_Addr Elf64_Addr
-+#define Elf_Off Elf64_Off
-+#define Elf_Nhdr Elf64_Nhdr
-+#define Elf_Note Elf64_Note
-+
-+#define ELF_R_SYM ELF64_R_SYM
-+#define ELF_R_TYPE ELF64_R_TYPE
-+#define ELF_R_INFO ELF64_R_INFO
-+#define ELFCLASS ELFCLASS64
-+
-+#define ELF_ST_BIND ELF64_ST_BIND
-+#define ELF_ST_TYPE ELF64_ST_TYPE
-+#define ELF_ST_INFO ELF64_ST_INFO
-+
-+#define AuxInfo Aux64Info
-+#endif
-+
-+#endif /* __XEN_PUBLIC_ELFSTRUCTS_H__ */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/xen/interface/event_channel.h ubuntu-gutsy-xen/include/xen/interface/event_channel.h
---- ubuntu-gutsy/include/xen/interface/event_channel.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/xen/interface/event_channel.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,264 @@
-+/******************************************************************************
-+ * event_channel.h
-+ *
-+ * Event channels between domains.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this software and associated documentation files (the "Software"), to
-+ * deal in the Software without restriction, including without limitation the
-+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-+ * sell copies of the Software, and to permit persons to whom the Software is
-+ * furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-+ * DEALINGS IN THE SOFTWARE.
-+ *
-+ * Copyright (c) 2003-2004, K A Fraser.
-+ */
-+
-+#ifndef __XEN_PUBLIC_EVENT_CHANNEL_H__
-+#define __XEN_PUBLIC_EVENT_CHANNEL_H__
-+
-+/*
-+ * Prototype for this hypercall is:
-+ * int event_channel_op(int cmd, void *args)
-+ * @cmd == EVTCHNOP_??? (event-channel operation).
-+ * @args == Operation-specific extra arguments (NULL if none).
-+ */
-+
-+typedef uint32_t evtchn_port_t;
-+DEFINE_XEN_GUEST_HANDLE(evtchn_port_t);
-+
-+/*
-+ * EVTCHNOP_alloc_unbound: Allocate a port in domain <dom> and mark as
-+ * accepting interdomain bindings from domain <remote_dom>. A fresh port
-+ * is allocated in <dom> and returned as <port>.
-+ * NOTES:
-+ * 1. If the caller is unprivileged then <dom> must be DOMID_SELF.
-+ * 2. <rdom> may be DOMID_SELF, allowing loopback connections.
-+ */
-+#define EVTCHNOP_alloc_unbound 6
-+struct evtchn_alloc_unbound {
-+ /* IN parameters */
-+ domid_t dom, remote_dom;
-+ /* OUT parameters */
-+ evtchn_port_t port;
-+};
-+typedef struct evtchn_alloc_unbound evtchn_alloc_unbound_t;
-+
-+/*
-+ * EVTCHNOP_bind_interdomain: Construct an interdomain event channel between
-+ * the calling domain and <remote_dom>. <remote_dom,remote_port> must identify
-+ * a port that is unbound and marked as accepting bindings from the calling
-+ * domain. A fresh port is allocated in the calling domain and returned as
-+ * <local_port>.
-+ * NOTES:
-+ * 2. <remote_dom> may be DOMID_SELF, allowing loopback connections.
-+ */
-+#define EVTCHNOP_bind_interdomain 0
-+struct evtchn_bind_interdomain {
-+ /* IN parameters. */
-+ domid_t remote_dom;
-+ evtchn_port_t remote_port;
-+ /* OUT parameters. */
-+ evtchn_port_t local_port;
-+};
-+typedef struct evtchn_bind_interdomain evtchn_bind_interdomain_t;
-+
-+/*
-+ * EVTCHNOP_bind_virq: Bind a local event channel to VIRQ <irq> on specified
-+ * vcpu.
-+ * NOTES:
-+ * 1. Virtual IRQs are classified as per-vcpu or global. See the VIRQ list
-+ * in xen.h for the classification of each VIRQ.
-+ * 2. Global VIRQs must be allocated on VCPU0 but can subsequently be
-+ * re-bound via EVTCHNOP_bind_vcpu.
-+ * 3. Per-vcpu VIRQs may be bound to at most one event channel per vcpu.
-+ * The allocated event channel is bound to the specified vcpu and the
-+ * binding cannot be changed.
-+ */
-+#define EVTCHNOP_bind_virq 1
-+struct evtchn_bind_virq {
-+ /* IN parameters. */
-+ uint32_t virq;
-+ uint32_t vcpu;
-+ /* OUT parameters. */
-+ evtchn_port_t port;
-+};
-+typedef struct evtchn_bind_virq evtchn_bind_virq_t;
-+
-+/*
-+ * EVTCHNOP_bind_pirq: Bind a local event channel to PIRQ <irq>.
-+ * NOTES:
-+ * 1. A physical IRQ may be bound to at most one event channel per domain.
-+ * 2. Only a sufficiently-privileged domain may bind to a physical IRQ.
-+ */
-+#define EVTCHNOP_bind_pirq 2
-+struct evtchn_bind_pirq {
-+ /* IN parameters. */
-+ uint32_t pirq;
-+#define BIND_PIRQ__WILL_SHARE 1
-+ uint32_t flags; /* BIND_PIRQ__* */
-+ /* OUT parameters. */
-+ evtchn_port_t port;
-+};
-+typedef struct evtchn_bind_pirq evtchn_bind_pirq_t;
-+
-+/*
-+ * EVTCHNOP_bind_ipi: Bind a local event channel to receive events.
-+ * NOTES:
-+ * 1. The allocated event channel is bound to the specified vcpu. The binding
-+ * may not be changed.
-+ */
-+#define EVTCHNOP_bind_ipi 7
-+struct evtchn_bind_ipi {
-+ uint32_t vcpu;
-+ /* OUT parameters. */
-+ evtchn_port_t port;
-+};
-+typedef struct evtchn_bind_ipi evtchn_bind_ipi_t;
-+
-+/*
-+ * EVTCHNOP_close: Close a local event channel <port>. If the channel is
-+ * interdomain then the remote end is placed in the unbound state
-+ * (EVTCHNSTAT_unbound), awaiting a new connection.
-+ */
-+#define EVTCHNOP_close 3
-+struct evtchn_close {
-+ /* IN parameters. */
-+ evtchn_port_t port;
-+};
-+typedef struct evtchn_close evtchn_close_t;
-+
-+/*
-+ * EVTCHNOP_send: Send an event to the remote end of the channel whose local
-+ * endpoint is <port>.
-+ */
-+#define EVTCHNOP_send 4
-+struct evtchn_send {
-+ /* IN parameters. */
-+ evtchn_port_t port;
-+};
-+typedef struct evtchn_send evtchn_send_t;
-+
-+/*
-+ * EVTCHNOP_status: Get the current status of the communication channel which
-+ * has an endpoint at <dom, port>.
-+ * NOTES:
-+ * 1. <dom> may be specified as DOMID_SELF.
-+ * 2. Only a sufficiently-privileged domain may obtain the status of an event
-+ * channel for which <dom> is not DOMID_SELF.
-+ */
-+#define EVTCHNOP_status 5
-+struct evtchn_status {
-+ /* IN parameters */
-+ domid_t dom;
-+ evtchn_port_t port;
-+ /* OUT parameters */
-+#define EVTCHNSTAT_closed 0 /* Channel is not in use. */
-+#define EVTCHNSTAT_unbound 1 /* Channel is waiting interdom connection.*/
-+#define EVTCHNSTAT_interdomain 2 /* Channel is connected to remote domain. */
-+#define EVTCHNSTAT_pirq 3 /* Channel is bound to a phys IRQ line. */
-+#define EVTCHNSTAT_virq 4 /* Channel is bound to a virtual IRQ line */
-+#define EVTCHNSTAT_ipi 5 /* Channel is bound to a virtual IPI line */
-+ uint32_t status;
-+ uint32_t vcpu; /* VCPU to which this channel is bound. */
-+ union {
-+ struct {
-+ domid_t dom;
-+ } unbound; /* EVTCHNSTAT_unbound */
-+ struct {
-+ domid_t dom;
-+ evtchn_port_t port;
-+ } interdomain; /* EVTCHNSTAT_interdomain */
-+ uint32_t pirq; /* EVTCHNSTAT_pirq */
-+ uint32_t virq; /* EVTCHNSTAT_virq */
-+ } u;
-+};
-+typedef struct evtchn_status evtchn_status_t;
-+
-+/*
-+ * EVTCHNOP_bind_vcpu: Specify which vcpu a channel should notify when an
-+ * event is pending.
-+ * NOTES:
-+ * 1. IPI-bound channels always notify the vcpu specified at bind time.
-+ * This binding cannot be changed.
-+ * 2. Per-VCPU VIRQ channels always notify the vcpu specified at bind time.
-+ * This binding cannot be changed.
-+ * 3. All other channels notify vcpu0 by default. This default is set when
-+ * the channel is allocated (a port that is freed and subsequently reused
-+ * has its binding reset to vcpu0).
-+ */
-+#define EVTCHNOP_bind_vcpu 8
-+struct evtchn_bind_vcpu {
-+ /* IN parameters. */
-+ evtchn_port_t port;
-+ uint32_t vcpu;
-+};
-+typedef struct evtchn_bind_vcpu evtchn_bind_vcpu_t;
-+
-+/*
-+ * EVTCHNOP_unmask: Unmask the specified local event-channel port and deliver
-+ * a notification to the appropriate VCPU if an event is pending.
-+ */
-+#define EVTCHNOP_unmask 9
-+struct evtchn_unmask {
-+ /* IN parameters. */
-+ evtchn_port_t port;
-+};
-+typedef struct evtchn_unmask evtchn_unmask_t;
-+
-+/*
-+ * EVTCHNOP_reset: Close all event channels associated with specified domain.
-+ * NOTES:
-+ * 1. <dom> may be specified as DOMID_SELF.
-+ * 2. Only a sufficiently-privileged domain may specify other than DOMID_SELF.
-+ */
-+#define EVTCHNOP_reset 10
-+struct evtchn_reset {
-+ /* IN parameters. */
-+ domid_t dom;
-+};
-+typedef struct evtchn_reset evtchn_reset_t;
-+
-+/*
-+ * Argument to event_channel_op_compat() hypercall. Superceded by new
-+ * event_channel_op() hypercall since 0x00030202.
-+ */
-+struct evtchn_op {
-+ uint32_t cmd; /* EVTCHNOP_* */
-+ union {
-+ struct evtchn_alloc_unbound alloc_unbound;
-+ struct evtchn_bind_interdomain bind_interdomain;
-+ struct evtchn_bind_virq bind_virq;
-+ struct evtchn_bind_pirq bind_pirq;
-+ struct evtchn_bind_ipi bind_ipi;
-+ struct evtchn_close close;
-+ struct evtchn_send send;
-+ struct evtchn_status status;
-+ struct evtchn_bind_vcpu bind_vcpu;
-+ struct evtchn_unmask unmask;
-+ } u;
-+};
-+typedef struct evtchn_op evtchn_op_t;
-+DEFINE_XEN_GUEST_HANDLE(evtchn_op_t);
-+
-+#endif /* __XEN_PUBLIC_EVENT_CHANNEL_H__ */
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/xen/interface/features.h ubuntu-gutsy-xen/include/xen/interface/features.h
---- ubuntu-gutsy/include/xen/interface/features.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/xen/interface/features.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,71 @@
-+/******************************************************************************
-+ * features.h
-+ *
-+ * Feature flags, reported by XENVER_get_features.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this software and associated documentation files (the "Software"), to
-+ * deal in the Software without restriction, including without limitation the
-+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-+ * sell copies of the Software, and to permit persons to whom the Software is
-+ * furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-+ * DEALINGS IN THE SOFTWARE.
-+ *
-+ * Copyright (c) 2006, Keir Fraser <keir@xensource.com>
-+ */
-+
-+#ifndef __XEN_PUBLIC_FEATURES_H__
-+#define __XEN_PUBLIC_FEATURES_H__
-+
-+/*
-+ * If set, the guest does not need to write-protect its pagetables, and can
-+ * update them via direct writes.
-+ */
-+#define XENFEAT_writable_page_tables 0
-+
-+/*
-+ * If set, the guest does not need to write-protect its segment descriptor
-+ * tables, and can update them via direct writes.
-+ */
-+#define XENFEAT_writable_descriptor_tables 1
-+
-+/*
-+ * If set, translation between the guest's 'pseudo-physical' address space
-+ * and the host's machine address space are handled by the hypervisor. In this
-+ * mode the guest does not need to perform phys-to/from-machine translations
-+ * when performing page table operations.
-+ */
-+#define XENFEAT_auto_translated_physmap 2
-+
-+/* If set, the guest is running in supervisor mode (e.g., x86 ring 0). */
-+#define XENFEAT_supervisor_mode_kernel 3
-+
-+/*
-+ * If set, the guest does not need to allocate x86 PAE page directories
-+ * below 4GB. This flag is usually implied by auto_translated_physmap.
-+ */
-+#define XENFEAT_pae_pgdir_above_4gb 4
-+
-+#define XENFEAT_NR_SUBMAPS 1
-+
-+#endif /* __XEN_PUBLIC_FEATURES_H__ */
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/xen/interface/grant_table.h ubuntu-gutsy-xen/include/xen/interface/grant_table.h
---- ubuntu-gutsy/include/xen/interface/grant_table.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/xen/interface/grant_table.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,422 @@
-+/******************************************************************************
-+ * grant_table.h
-+ *
-+ * Interface for granting foreign access to page frames, and receiving
-+ * page-ownership transfers.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this software and associated documentation files (the "Software"), to
-+ * deal in the Software without restriction, including without limitation the
-+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-+ * sell copies of the Software, and to permit persons to whom the Software is
-+ * furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-+ * DEALINGS IN THE SOFTWARE.
-+ *
-+ * Copyright (c) 2004, K A Fraser
-+ */
-+
-+#ifndef __XEN_PUBLIC_GRANT_TABLE_H__
-+#define __XEN_PUBLIC_GRANT_TABLE_H__
-+
-+
-+/***********************************
-+ * GRANT TABLE REPRESENTATION
-+ */
-+
-+/* Some rough guidelines on accessing and updating grant-table entries
-+ * in a concurrency-safe manner. For more information, Linux contains a
-+ * reference implementation for guest OSes (arch/xen/kernel/grant_table.c).
-+ *
-+ * NB. WMB is a no-op on current-generation x86 processors. However, a
-+ * compiler barrier will still be required.
-+ *
-+ * Introducing a valid entry into the grant table:
-+ * 1. Write ent->domid.
-+ * 2. Write ent->frame:
-+ * GTF_permit_access: Frame to which access is permitted.
-+ * GTF_accept_transfer: Pseudo-phys frame slot being filled by new
-+ * frame, or zero if none.
-+ * 3. Write memory barrier (WMB).
-+ * 4. Write ent->flags, inc. valid type.
-+ *
-+ * Invalidating an unused GTF_permit_access entry:
-+ * 1. flags = ent->flags.
-+ * 2. Observe that !(flags & (GTF_reading|GTF_writing)).
-+ * 3. Check result of SMP-safe CMPXCHG(&ent->flags, flags, 0).
-+ * NB. No need for WMB as reuse of entry is control-dependent on success of
-+ * step 3, and all architectures guarantee ordering of ctrl-dep writes.
-+ *
-+ * Invalidating an in-use GTF_permit_access entry:
-+ * This cannot be done directly. Request assistance from the domain controller
-+ * which can set a timeout on the use of a grant entry and take necessary
-+ * action. (NB. This is not yet implemented!).
-+ *
-+ * Invalidating an unused GTF_accept_transfer entry:
-+ * 1. flags = ent->flags.
-+ * 2. Observe that !(flags & GTF_transfer_committed). [*]
-+ * 3. Check result of SMP-safe CMPXCHG(&ent->flags, flags, 0).
-+ * NB. No need for WMB as reuse of entry is control-dependent on success of
-+ * step 3, and all architectures guarantee ordering of ctrl-dep writes.
-+ * [*] If GTF_transfer_committed is set then the grant entry is 'committed'.
-+ * The guest must /not/ modify the grant entry until the address of the
-+ * transferred frame is written. It is safe for the guest to spin waiting
-+ * for this to occur (detect by observing GTF_transfer_completed in
-+ * ent->flags).
-+ *
-+ * Invalidating a committed GTF_accept_transfer entry:
-+ * 1. Wait for (ent->flags & GTF_transfer_completed).
-+ *
-+ * Changing a GTF_permit_access from writable to read-only:
-+ * Use SMP-safe CMPXCHG to set GTF_readonly, while checking !GTF_writing.
-+ *
-+ * Changing a GTF_permit_access from read-only to writable:
-+ * Use SMP-safe bit-setting instruction.
-+ */
-+
-+/*
-+ * A grant table comprises a packed array of grant entries in one or more
-+ * page frames shared between Xen and a guest.
-+ * [XEN]: This field is written by Xen and read by the sharing guest.
-+ * [GST]: This field is written by the guest and read by Xen.
-+ */
-+struct grant_entry {
-+ /* GTF_xxx: various type and flag information. [XEN,GST] */
-+ uint16_t flags;
-+ /* The domain being granted foreign privileges. [GST] */
-+ domid_t domid;
-+ /*
-+ * GTF_permit_access: Frame that @domid is allowed to map and access. [GST]
-+ * GTF_accept_transfer: Frame whose ownership transferred by @domid. [XEN]
-+ */
-+ uint32_t frame;
-+};
-+typedef struct grant_entry grant_entry_t;
-+
-+/*
-+ * Type of grant entry.
-+ * GTF_invalid: This grant entry grants no privileges.
-+ * GTF_permit_access: Allow @domid to map/access @frame.
-+ * GTF_accept_transfer: Allow @domid to transfer ownership of one page frame
-+ * to this guest. Xen writes the page number to @frame.
-+ */
-+#define GTF_invalid (0U<<0)
-+#define GTF_permit_access (1U<<0)
-+#define GTF_accept_transfer (2U<<0)
-+#define GTF_type_mask (3U<<0)
-+
-+/*
-+ * Subflags for GTF_permit_access.
-+ * GTF_readonly: Restrict @domid to read-only mappings and accesses. [GST]
-+ * GTF_reading: Grant entry is currently mapped for reading by @domid. [XEN]
-+ * GTF_writing: Grant entry is currently mapped for writing by @domid. [XEN]
-+ */
-+#define _GTF_readonly (2)
-+#define GTF_readonly (1U<<_GTF_readonly)
-+#define _GTF_reading (3)
-+#define GTF_reading (1U<<_GTF_reading)
-+#define _GTF_writing (4)
-+#define GTF_writing (1U<<_GTF_writing)
-+
-+/*
-+ * Subflags for GTF_accept_transfer:
-+ * GTF_transfer_committed: Xen sets this flag to indicate that it is committed
-+ * to transferring ownership of a page frame. When a guest sees this flag
-+ * it must /not/ modify the grant entry until GTF_transfer_completed is
-+ * set by Xen.
-+ * GTF_transfer_completed: It is safe for the guest to spin-wait on this flag
-+ * after reading GTF_transfer_committed. Xen will always write the frame
-+ * address, followed by ORing this flag, in a timely manner.
-+ */
-+#define _GTF_transfer_committed (2)
-+#define GTF_transfer_committed (1U<<_GTF_transfer_committed)
-+#define _GTF_transfer_completed (3)
-+#define GTF_transfer_completed (1U<<_GTF_transfer_completed)
-+
-+
-+/***********************************
-+ * GRANT TABLE QUERIES AND USES
-+ */
-+
-+/*
-+ * Reference to a grant entry in a specified domain's grant table.
-+ */
-+typedef uint32_t grant_ref_t;
-+
-+/*
-+ * Handle to track a mapping created via a grant reference.
-+ */
-+typedef uint32_t grant_handle_t;
-+
-+/*
-+ * GNTTABOP_map_grant_ref: Map the grant entry (<dom>,<ref>) for access
-+ * by devices and/or host CPUs. If successful, <handle> is a tracking number
-+ * that must be presented later to destroy the mapping(s). On error, <handle>
-+ * is a negative status code.
-+ * NOTES:
-+ * 1. If GNTMAP_device_map is specified then <dev_bus_addr> is the address
-+ * via which I/O devices may access the granted frame.
-+ * 2. If GNTMAP_host_map is specified then a mapping will be added at
-+ * either a host virtual address in the current address space, or at
-+ * a PTE at the specified machine address. The type of mapping to
-+ * perform is selected through the GNTMAP_contains_pte flag, and the
-+ * address is specified in <host_addr>.
-+ * 3. Mappings should only be destroyed via GNTTABOP_unmap_grant_ref. If a
-+ * host mapping is destroyed by other means then it is *NOT* guaranteed
-+ * to be accounted to the correct grant reference!
-+ */
-+#define GNTTABOP_map_grant_ref 0
-+struct gnttab_map_grant_ref {
-+ /* IN parameters. */
-+ uint64_t host_addr;
-+ uint32_t flags; /* GNTMAP_* */
-+ grant_ref_t ref;
-+ domid_t dom;
-+ /* OUT parameters. */
-+ int16_t status; /* GNTST_* */
-+ grant_handle_t handle;
-+ uint64_t dev_bus_addr;
-+};
-+typedef struct gnttab_map_grant_ref gnttab_map_grant_ref_t;
-+DEFINE_XEN_GUEST_HANDLE(gnttab_map_grant_ref_t);
-+
-+/*
-+ * GNTTABOP_unmap_grant_ref: Destroy one or more grant-reference mappings
-+ * tracked by <handle>. If <host_addr> or <dev_bus_addr> is zero, that
-+ * field is ignored. If non-zero, they must refer to a device/host mapping
-+ * that is tracked by <handle>
-+ * NOTES:
-+ * 1. The call may fail in an undefined manner if either mapping is not
-+ * tracked by <handle>.
-+ * 3. After executing a batch of unmaps, it is guaranteed that no stale
-+ * mappings will remain in the device or host TLBs.
-+ */
-+#define GNTTABOP_unmap_grant_ref 1
-+struct gnttab_unmap_grant_ref {
-+ /* IN parameters. */
-+ uint64_t host_addr;
-+ uint64_t dev_bus_addr;
-+ grant_handle_t handle;
-+ /* OUT parameters. */
-+ int16_t status; /* GNTST_* */
-+};
-+typedef struct gnttab_unmap_grant_ref gnttab_unmap_grant_ref_t;
-+DEFINE_XEN_GUEST_HANDLE(gnttab_unmap_grant_ref_t);
-+
-+/*
-+ * GNTTABOP_setup_table: Set up a grant table for <dom> comprising at least
-+ * <nr_frames> pages. The frame addresses are written to the <frame_list>.
-+ * Only <nr_frames> addresses are written, even if the table is larger.
-+ * NOTES:
-+ * 1. <dom> may be specified as DOMID_SELF.
-+ * 2. Only a sufficiently-privileged domain may specify <dom> != DOMID_SELF.
-+ * 3. Xen may not support more than a single grant-table page per domain.
-+ */
-+#define GNTTABOP_setup_table 2
-+struct gnttab_setup_table {
-+ /* IN parameters. */
-+ domid_t dom;
-+ uint32_t nr_frames;
-+ /* OUT parameters. */
-+ int16_t status; /* GNTST_* */
-+ XEN_GUEST_HANDLE(ulong) frame_list;
-+};
-+typedef struct gnttab_setup_table gnttab_setup_table_t;
-+DEFINE_XEN_GUEST_HANDLE(gnttab_setup_table_t);
-+
-+/*
-+ * GNTTABOP_dump_table: Dump the contents of the grant table to the
-+ * xen console. Debugging use only.
-+ */
-+#define GNTTABOP_dump_table 3
-+struct gnttab_dump_table {
-+ /* IN parameters. */
-+ domid_t dom;
-+ /* OUT parameters. */
-+ int16_t status; /* GNTST_* */
-+};
-+typedef struct gnttab_dump_table gnttab_dump_table_t;
-+DEFINE_XEN_GUEST_HANDLE(gnttab_dump_table_t);
-+
-+/*
-+ * GNTTABOP_transfer_grant_ref: Transfer <frame> to a foreign domain. The
-+ * foreign domain has previously registered its interest in the transfer via
-+ * <domid, ref>.
-+ *
-+ * Note that, even if the transfer fails, the specified page no longer belongs
-+ * to the calling domain *unless* the error is GNTST_bad_page.
-+ */
-+#define GNTTABOP_transfer 4
-+struct gnttab_transfer {
-+ /* IN parameters. */
-+ xen_pfn_t mfn;
-+ domid_t domid;
-+ grant_ref_t ref;
-+ /* OUT parameters. */
-+ int16_t status;
-+};
-+typedef struct gnttab_transfer gnttab_transfer_t;
-+DEFINE_XEN_GUEST_HANDLE(gnttab_transfer_t);
-+
-+
-+/*
-+ * GNTTABOP_copy: Hypervisor based copy
-+ * source and destinations can be eithers MFNs or, for foreign domains,
-+ * grant references. the foreign domain has to grant read/write access
-+ * in its grant table.
-+ *
-+ * The flags specify what type source and destinations are (either MFN
-+ * or grant reference).
-+ *
-+ * Note that this can also be used to copy data between two domains
-+ * via a third party if the source and destination domains had previously
-+ * grant appropriate access to their pages to the third party.
-+ *
-+ * source_offset specifies an offset in the source frame, dest_offset
-+ * the offset in the target frame and len specifies the number of
-+ * bytes to be copied.
-+ */
-+
-+#define _GNTCOPY_source_gref (0)
-+#define GNTCOPY_source_gref (1<<_GNTCOPY_source_gref)
-+#define _GNTCOPY_dest_gref (1)
-+#define GNTCOPY_dest_gref (1<<_GNTCOPY_dest_gref)
-+
-+#define GNTTABOP_copy 5
-+typedef struct gnttab_copy {
-+ /* IN parameters. */
-+ struct {
-+ union {
-+ grant_ref_t ref;
-+ xen_pfn_t gmfn;
-+ } u;
-+ domid_t domid;
-+ uint16_t offset;
-+ } source, dest;
-+ uint16_t len;
-+ uint16_t flags; /* GNTCOPY_* */
-+ /* OUT parameters. */
-+ int16_t status;
-+} gnttab_copy_t;
-+DEFINE_XEN_GUEST_HANDLE(gnttab_copy_t);
-+
-+/*
-+ * GNTTABOP_query_size: Query the current and maximum sizes of the shared
-+ * grant table.
-+ * NOTES:
-+ * 1. <dom> may be specified as DOMID_SELF.
-+ * 2. Only a sufficiently-privileged domain may specify <dom> != DOMID_SELF.
-+ */
-+#define GNTTABOP_query_size 6
-+struct gnttab_query_size {
-+ /* IN parameters. */
-+ domid_t dom;
-+ /* OUT parameters. */
-+ uint32_t nr_frames;
-+ uint32_t max_nr_frames;
-+ int16_t status; /* GNTST_* */
-+};
-+typedef struct gnttab_query_size gnttab_query_size_t;
-+DEFINE_XEN_GUEST_HANDLE(gnttab_query_size_t);
-+
-+/*
-+ * GNTTABOP_unmap_and_replace: Destroy one or more grant-reference mappings
-+ * tracked by <handle> but atomically replace the page table entry with one
-+ * pointing to the machine address under <new_addr>. <new_addr> will be
-+ * redirected to the null entry.
-+ * NOTES:
-+ * 1. The call may fail in an undefined manner if either mapping is not
-+ * tracked by <handle>.
-+ * 2. After executing a batch of unmaps, it is guaranteed that no stale
-+ * mappings will remain in the device or host TLBs.
-+ */
-+#define GNTTABOP_unmap_and_replace 7
-+struct gnttab_unmap_and_replace {
-+ /* IN parameters. */
-+ uint64_t host_addr;
-+ uint64_t new_addr;
-+ grant_handle_t handle;
-+ /* OUT parameters. */
-+ int16_t status; /* GNTST_* */
-+};
-+typedef struct gnttab_unmap_and_replace gnttab_unmap_and_replace_t;
-+DEFINE_XEN_GUEST_HANDLE(gnttab_unmap_and_replace_t);
-+
-+
-+/*
-+ * Bitfield values for update_pin_status.flags.
-+ */
-+ /* Map the grant entry for access by I/O devices. */
-+#define _GNTMAP_device_map (0)
-+#define GNTMAP_device_map (1<<_GNTMAP_device_map)
-+ /* Map the grant entry for access by host CPUs. */
-+#define _GNTMAP_host_map (1)
-+#define GNTMAP_host_map (1<<_GNTMAP_host_map)
-+ /* Accesses to the granted frame will be restricted to read-only access. */
-+#define _GNTMAP_readonly (2)
-+#define GNTMAP_readonly (1<<_GNTMAP_readonly)
-+ /*
-+ * GNTMAP_host_map subflag:
-+ * 0 => The host mapping is usable only by the guest OS.
-+ * 1 => The host mapping is usable by guest OS + current application.
-+ */
-+#define _GNTMAP_application_map (3)
-+#define GNTMAP_application_map (1<<_GNTMAP_application_map)
-+
-+ /*
-+ * GNTMAP_contains_pte subflag:
-+ * 0 => This map request contains a host virtual address.
-+ * 1 => This map request contains the machine addess of the PTE to update.
-+ */
-+#define _GNTMAP_contains_pte (4)
-+#define GNTMAP_contains_pte (1<<_GNTMAP_contains_pte)
-+
-+/*
-+ * Values for error status returns. All errors are -ve.
-+ */
-+#define GNTST_okay (0) /* Normal return. */
-+#define GNTST_general_error (-1) /* General undefined error. */
-+#define GNTST_bad_domain (-2) /* Unrecognsed domain id. */
-+#define GNTST_bad_gntref (-3) /* Unrecognised or inappropriate gntref. */
-+#define GNTST_bad_handle (-4) /* Unrecognised or inappropriate handle. */
-+#define GNTST_bad_virt_addr (-5) /* Inappropriate virtual address to map. */
-+#define GNTST_bad_dev_addr (-6) /* Inappropriate device address to unmap.*/
-+#define GNTST_no_device_space (-7) /* Out of space in I/O MMU. */
-+#define GNTST_permission_denied (-8) /* Not enough privilege for operation. */
-+#define GNTST_bad_page (-9) /* Specified page was invalid for op. */
-+#define GNTST_bad_copy_arg (-10) /* copy arguments cross page boundary */
-+
-+#define GNTTABOP_error_msgs { \
-+ "okay", \
-+ "undefined error", \
-+ "unrecognised domain id", \
-+ "invalid grant reference", \
-+ "invalid mapping handle", \
-+ "invalid virtual address", \
-+ "invalid device address", \
-+ "no spare translation slot in the I/O MMU", \
-+ "permission denied", \
-+ "bad page", \
-+ "copy arguments cross page boundary" \
-+}
-+
-+#endif /* __XEN_PUBLIC_GRANT_TABLE_H__ */
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/xen/interface/hvm/e820.h ubuntu-gutsy-xen/include/xen/interface/hvm/e820.h
---- ubuntu-gutsy/include/xen/interface/hvm/e820.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/xen/interface/hvm/e820.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,47 @@
-+
-+/*
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this software and associated documentation files (the "Software"), to
-+ * deal in the Software without restriction, including without limitation the
-+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-+ * sell copies of the Software, and to permit persons to whom the Software is
-+ * furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-+ * DEALINGS IN THE SOFTWARE.
-+ */
-+
-+#ifndef __XEN_PUBLIC_HVM_E820_H__
-+#define __XEN_PUBLIC_HVM_E820_H__
-+
-+/* PC BIOS standard E820 types. */
-+#define E820_RAM 1
-+#define E820_RESERVED 2
-+#define E820_ACPI 3
-+#define E820_NVS 4
-+
-+/* E820 location in HVM virtual address space. */
-+#define E820_MAP_PAGE 0x00090000
-+#define E820_MAP_NR_OFFSET 0x000001E8
-+#define E820_MAP_OFFSET 0x000002D0
-+
-+struct e820entry {
-+ uint64_t addr;
-+ uint64_t size;
-+ uint32_t type;
-+} __attribute__((packed));
-+
-+#define HVM_BELOW_4G_RAM_END 0xF0000000
-+
-+#define HVM_BELOW_4G_MMIO_START HVM_BELOW_4G_RAM_END
-+#define HVM_BELOW_4G_MMIO_LENGTH ((1ULL << 32) - HVM_BELOW_4G_MMIO_START)
-+
-+#endif /* __XEN_PUBLIC_HVM_E820_H__ */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/xen/interface/hvm/hvm_info_table.h ubuntu-gutsy-xen/include/xen/interface/hvm/hvm_info_table.h
---- ubuntu-gutsy/include/xen/interface/hvm/hvm_info_table.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/xen/interface/hvm/hvm_info_table.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,41 @@
-+/******************************************************************************
-+ * hvm/hvm_info_table.h
-+ *
-+ * HVM parameter and information table, written into guest memory map.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this software and associated documentation files (the "Software"), to
-+ * deal in the Software without restriction, including without limitation the
-+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-+ * sell copies of the Software, and to permit persons to whom the Software is
-+ * furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-+ * DEALINGS IN THE SOFTWARE.
-+ */
-+
-+#ifndef __XEN_PUBLIC_HVM_HVM_INFO_TABLE_H__
-+#define __XEN_PUBLIC_HVM_HVM_INFO_TABLE_H__
-+
-+#define HVM_INFO_PFN 0x09F
-+#define HVM_INFO_OFFSET 0x800
-+#define HVM_INFO_PADDR ((HVM_INFO_PFN << 12) + HVM_INFO_OFFSET)
-+
-+struct hvm_info_table {
-+ char signature[8]; /* "HVM INFO" */
-+ uint32_t length;
-+ uint8_t checksum;
-+ uint8_t acpi_enabled;
-+ uint8_t apic_mode;
-+ uint32_t nr_vcpus;
-+};
-+
-+#endif /* __XEN_PUBLIC_HVM_HVM_INFO_TABLE_H__ */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/xen/interface/hvm/hvm_op.h ubuntu-gutsy-xen/include/xen/interface/hvm/hvm_op.h
---- ubuntu-gutsy/include/xen/interface/hvm/hvm_op.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/xen/interface/hvm/hvm_op.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,73 @@
-+/*
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this software and associated documentation files (the "Software"), to
-+ * deal in the Software without restriction, including without limitation the
-+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-+ * sell copies of the Software, and to permit persons to whom the Software is
-+ * furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-+ * DEALINGS IN THE SOFTWARE.
-+ */
-+
-+#ifndef __XEN_PUBLIC_HVM_HVM_OP_H__
-+#define __XEN_PUBLIC_HVM_HVM_OP_H__
-+
-+/* Get/set subcommands: extra argument == pointer to xen_hvm_param struct. */
-+#define HVMOP_set_param 0
-+#define HVMOP_get_param 1
-+struct xen_hvm_param {
-+ domid_t domid; /* IN */
-+ uint32_t index; /* IN */
-+ uint64_t value; /* IN/OUT */
-+};
-+typedef struct xen_hvm_param xen_hvm_param_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_hvm_param_t);
-+
-+/* Set the logical level of one of a domain's PCI INTx wires. */
-+#define HVMOP_set_pci_intx_level 2
-+struct xen_hvm_set_pci_intx_level {
-+ /* Domain to be updated. */
-+ domid_t domid;
-+ /* PCI INTx identification in PCI topology (domain:bus:device:intx). */
-+ uint8_t domain, bus, device, intx;
-+ /* Assertion level (0 = unasserted, 1 = asserted). */
-+ uint8_t level;
-+};
-+typedef struct xen_hvm_set_pci_intx_level xen_hvm_set_pci_intx_level_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_pci_intx_level_t);
-+
-+/* Set the logical level of one of a domain's ISA IRQ wires. */
-+#define HVMOP_set_isa_irq_level 3
-+struct xen_hvm_set_isa_irq_level {
-+ /* Domain to be updated. */
-+ domid_t domid;
-+ /* ISA device identification, by ISA IRQ (0-15). */
-+ uint8_t isa_irq;
-+ /* Assertion level (0 = unasserted, 1 = asserted). */
-+ uint8_t level;
-+};
-+typedef struct xen_hvm_set_isa_irq_level xen_hvm_set_isa_irq_level_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_isa_irq_level_t);
-+
-+#define HVMOP_set_pci_link_route 4
-+struct xen_hvm_set_pci_link_route {
-+ /* Domain to be updated. */
-+ domid_t domid;
-+ /* PCI link identifier (0-3). */
-+ uint8_t link;
-+ /* ISA IRQ (1-15), or 0 (disable link). */
-+ uint8_t isa_irq;
-+};
-+typedef struct xen_hvm_set_pci_link_route xen_hvm_set_pci_link_route_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_pci_link_route_t);
-+
-+#endif /* __XEN_PUBLIC_HVM_HVM_OP_H__ */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/xen/interface/hvm/ioreq.h ubuntu-gutsy-xen/include/xen/interface/hvm/ioreq.h
---- ubuntu-gutsy/include/xen/interface/hvm/ioreq.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/xen/interface/hvm/ioreq.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,122 @@
-+/*
-+ * ioreq.h: I/O request definitions for device models
-+ * Copyright (c) 2004, Intel Corporation.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this software and associated documentation files (the "Software"), to
-+ * deal in the Software without restriction, including without limitation the
-+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-+ * sell copies of the Software, and to permit persons to whom the Software is
-+ * furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-+ * DEALINGS IN THE SOFTWARE.
-+ */
-+
-+#ifndef _IOREQ_H_
-+#define _IOREQ_H_
-+
-+#define IOREQ_READ 1
-+#define IOREQ_WRITE 0
-+
-+#define STATE_IOREQ_NONE 0
-+#define STATE_IOREQ_READY 1
-+#define STATE_IOREQ_INPROCESS 2
-+#define STATE_IORESP_READY 3
-+
-+#define IOREQ_TYPE_PIO 0 /* pio */
-+#define IOREQ_TYPE_COPY 1 /* mmio ops */
-+#define IOREQ_TYPE_AND 2
-+#define IOREQ_TYPE_OR 3
-+#define IOREQ_TYPE_XOR 4
-+#define IOREQ_TYPE_XCHG 5
-+#define IOREQ_TYPE_ADD 6
-+#define IOREQ_TYPE_TIMEOFFSET 7
-+#define IOREQ_TYPE_INVALIDATE 8 /* mapcache */
-+#define IOREQ_TYPE_SUB 9
-+
-+/*
-+ * VMExit dispatcher should cooperate with instruction decoder to
-+ * prepare this structure and notify service OS and DM by sending
-+ * virq
-+ */
-+struct ioreq {
-+ uint64_t addr; /* physical address */
-+ uint64_t size; /* size in bytes */
-+ uint64_t count; /* for rep prefixes */
-+ uint64_t data; /* data (or paddr of data) */
-+ uint8_t state:4;
-+ uint8_t data_is_ptr:1; /* if 1, data above is the guest paddr
-+ * of the real data to use. */
-+ uint8_t dir:1; /* 1=read, 0=write */
-+ uint8_t df:1;
-+ uint8_t type; /* I/O type */
-+ uint8_t _pad0[6];
-+ uint64_t io_count; /* How many IO done on a vcpu */
-+};
-+typedef struct ioreq ioreq_t;
-+
-+struct vcpu_iodata {
-+ struct ioreq vp_ioreq;
-+ /* Event channel port, used for notifications to/from the device model. */
-+ uint32_t vp_eport;
-+ uint32_t _pad0;
-+};
-+typedef struct vcpu_iodata vcpu_iodata_t;
-+
-+struct shared_iopage {
-+ struct vcpu_iodata vcpu_iodata[1];
-+};
-+typedef struct shared_iopage shared_iopage_t;
-+
-+#define IOREQ_BUFFER_SLOT_NUM 80
-+struct buffered_iopage {
-+ unsigned int read_pointer;
-+ unsigned int write_pointer;
-+ ioreq_t ioreq[IOREQ_BUFFER_SLOT_NUM];
-+}; /* NB. Size of this structure must be no greater than one page. */
-+typedef struct buffered_iopage buffered_iopage_t;
-+
-+#if defined(__ia64__)
-+struct pio_buffer {
-+ uint32_t page_offset;
-+ uint32_t pointer;
-+ uint32_t data_end;
-+ uint32_t buf_size;
-+ void *opaque;
-+};
-+
-+#define PIO_BUFFER_IDE_PRIMARY 0 /* I/O port = 0x1F0 */
-+#define PIO_BUFFER_IDE_SECONDARY 1 /* I/O port = 0x170 */
-+#define PIO_BUFFER_ENTRY_NUM 2
-+struct buffered_piopage {
-+ struct pio_buffer pio[PIO_BUFFER_ENTRY_NUM];
-+ uint8_t buffer[1];
-+};
-+#endif /* defined(__ia64__) */
-+
-+#if defined(__i386__) || defined(__x86_64__)
-+#define ACPI_PM1A_EVT_BLK_ADDRESS 0x0000000000001f40
-+#define ACPI_PM1A_CNT_BLK_ADDRESS (ACPI_PM1A_EVT_BLK_ADDRESS + 0x04)
-+#define ACPI_PM_TMR_BLK_ADDRESS (ACPI_PM1A_EVT_BLK_ADDRESS + 0x08)
-+#endif /* defined(__i386__) || defined(__x86_64__) */
-+
-+#endif /* _IOREQ_H_ */
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/xen/interface/hvm/params.h ubuntu-gutsy-xen/include/xen/interface/hvm/params.h
---- ubuntu-gutsy/include/xen/interface/hvm/params.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/xen/interface/hvm/params.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,55 @@
-+/*
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this software and associated documentation files (the "Software"), to
-+ * deal in the Software without restriction, including without limitation the
-+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-+ * sell copies of the Software, and to permit persons to whom the Software is
-+ * furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-+ * DEALINGS IN THE SOFTWARE.
-+ */
-+
-+#ifndef __XEN_PUBLIC_HVM_PARAMS_H__
-+#define __XEN_PUBLIC_HVM_PARAMS_H__
-+
-+#include "hvm_op.h"
-+
-+/*
-+ * Parameter space for HVMOP_{set,get}_param.
-+ */
-+
-+/*
-+ * How should CPU0 event-channel notifications be delivered?
-+ * val[63:56] == 0: val[55:0] is a delivery GSI (Global System Interrupt).
-+ * val[63:56] == 1: val[55:0] is a delivery PCI INTx line, as follows:
-+ * Domain = val[47:32], Bus = val[31:16],
-+ * DevFn = val[15: 8], IntX = val[ 1: 0]
-+ * If val == 0 then CPU0 event-channel notifications are not delivered.
-+ */
-+#define HVM_PARAM_CALLBACK_IRQ 0
-+
-+/*
-+ * These are not used by Xen. They are here for convenience of HVM-guest
-+ * xenbus implementations.
-+ */
-+#define HVM_PARAM_STORE_PFN 1
-+#define HVM_PARAM_STORE_EVTCHN 2
-+
-+#define HVM_PARAM_PAE_ENABLED 4
-+
-+#define HVM_PARAM_IOREQ_PFN 5
-+
-+#define HVM_PARAM_BUFIOREQ_PFN 6
-+
-+#define HVM_NR_PARAMS 7
-+
-+#endif /* __XEN_PUBLIC_HVM_PARAMS_H__ */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/xen/interface/hvm/save.h ubuntu-gutsy-xen/include/xen/interface/hvm/save.h
---- ubuntu-gutsy/include/xen/interface/hvm/save.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/xen/interface/hvm/save.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,462 @@
-+/*
-+ * hvm/save.h
-+ *
-+ * Structure definitions for HVM state that is held by Xen and must
-+ * be saved along with the domain's memory and device-model state.
-+ *
-+ *
-+ * Copyright (c) 2007 XenSource Ltd.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this software and associated documentation files (the "Software"), to
-+ * deal in the Software without restriction, including without limitation the
-+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-+ * sell copies of the Software, and to permit persons to whom the Software is
-+ * furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-+ * DEALINGS IN THE SOFTWARE.
-+ */
-+
-+#ifndef __XEN_PUBLIC_HVM_SAVE_H__
-+#define __XEN_PUBLIC_HVM_SAVE_H__
-+
-+/*
-+ * Structures in this header *must* have the same layout in 32bit
-+ * and 64bit environments: this means that all fields must be explicitly
-+ * sized types and aligned to their sizes, and the structs must be
-+ * a multiple of eight bytes long.
-+ *
-+ * Only the state necessary for saving and restoring (i.e. fields
-+ * that are analogous to actual hardware state) should go in this file.
-+ * Internal mechanisms should be kept in Xen-private headers.
-+ */
-+
-+/*
-+ * Each entry is preceded by a descriptor giving its type and length
-+ */
-+struct hvm_save_descriptor {
-+ uint16_t typecode; /* Used to demux the various types below */
-+ uint16_t instance; /* Further demux within a type */
-+ uint32_t length; /* In bytes, *not* including this descriptor */
-+};
-+
-+
-+/*
-+ * Each entry has a datatype associated with it: for example, the CPU state
-+ * is saved as a HVM_SAVE_TYPE(CPU), which has HVM_SAVE_LENGTH(CPU),
-+ * and is identified by a descriptor with typecode HVM_SAVE_CODE(CPU).
-+ * DECLARE_HVM_SAVE_TYPE binds these things together with some type-system
-+ * ugliness.
-+ */
-+
-+#define DECLARE_HVM_SAVE_TYPE(_x, _code, _type) \
-+ struct __HVM_SAVE_TYPE_##_x { _type t; char c[_code]; }
-+
-+#define HVM_SAVE_TYPE(_x) typeof (((struct __HVM_SAVE_TYPE_##_x *)(0))->t)
-+#define HVM_SAVE_LENGTH(_x) (sizeof (HVM_SAVE_TYPE(_x)))
-+#define HVM_SAVE_CODE(_x) (sizeof (((struct __HVM_SAVE_TYPE_##_x *)(0))->c))
-+
-+
-+/*
-+ * Save/restore header: general info about the save file.
-+ */
-+
-+#define HVM_FILE_MAGIC 0x54381286
-+#define HVM_FILE_VERSION 0x00000001
-+
-+struct hvm_save_header {
-+ uint32_t magic; /* Must be HVM_FILE_MAGIC */
-+ uint32_t version; /* File format version */
-+ uint64_t changeset; /* Version of Xen that saved this file */
-+ uint32_t cpuid; /* CPUID[0x01][%eax] on the saving machine */
-+ uint32_t pad0;
-+};
-+
-+DECLARE_HVM_SAVE_TYPE(HEADER, 1, struct hvm_save_header);
-+
-+
-+/*
-+ * Processor
-+ */
-+
-+struct hvm_hw_cpu {
-+ uint8_t fpu_regs[512];
-+
-+ uint64_t rax;
-+ uint64_t rbx;
-+ uint64_t rcx;
-+ uint64_t rdx;
-+ uint64_t rbp;
-+ uint64_t rsi;
-+ uint64_t rdi;
-+ uint64_t rsp;
-+ uint64_t r8;
-+ uint64_t r9;
-+ uint64_t r10;
-+ uint64_t r11;
-+ uint64_t r12;
-+ uint64_t r13;
-+ uint64_t r14;
-+ uint64_t r15;
-+
-+ uint64_t rip;
-+ uint64_t rflags;
-+
-+ uint64_t cr0;
-+ uint64_t cr2;
-+ uint64_t cr3;
-+ uint64_t cr4;
-+
-+ uint64_t dr0;
-+ uint64_t dr1;
-+ uint64_t dr2;
-+ uint64_t dr3;
-+ uint64_t dr6;
-+ uint64_t dr7;
-+
-+ uint32_t cs_sel;
-+ uint32_t ds_sel;
-+ uint32_t es_sel;
-+ uint32_t fs_sel;
-+ uint32_t gs_sel;
-+ uint32_t ss_sel;
-+ uint32_t tr_sel;
-+ uint32_t ldtr_sel;
-+
-+ uint32_t cs_limit;
-+ uint32_t ds_limit;
-+ uint32_t es_limit;
-+ uint32_t fs_limit;
-+ uint32_t gs_limit;
-+ uint32_t ss_limit;
-+ uint32_t tr_limit;
-+ uint32_t ldtr_limit;
-+ uint32_t idtr_limit;
-+ uint32_t gdtr_limit;
-+
-+ uint64_t cs_base;
-+ uint64_t ds_base;
-+ uint64_t es_base;
-+ uint64_t fs_base;
-+ uint64_t gs_base;
-+ uint64_t ss_base;
-+ uint64_t tr_base;
-+ uint64_t ldtr_base;
-+ uint64_t idtr_base;
-+ uint64_t gdtr_base;
-+
-+ uint32_t cs_arbytes;
-+ uint32_t ds_arbytes;
-+ uint32_t es_arbytes;
-+ uint32_t fs_arbytes;
-+ uint32_t gs_arbytes;
-+ uint32_t ss_arbytes;
-+ uint32_t tr_arbytes;
-+ uint32_t ldtr_arbytes;
-+
-+ uint32_t sysenter_cs;
-+ uint32_t padding0;
-+
-+ uint64_t sysenter_esp;
-+ uint64_t sysenter_eip;
-+
-+ /* msr for em64t */
-+ uint64_t shadow_gs;
-+
-+ /* msr content saved/restored. */
-+ uint64_t msr_flags;
-+ uint64_t msr_lstar;
-+ uint64_t msr_star;
-+ uint64_t msr_cstar;
-+ uint64_t msr_syscall_mask;
-+ uint64_t msr_efer;
-+
-+ /* guest's idea of what rdtsc() would return */
-+ uint64_t tsc;
-+
-+ /* pending event, if any */
-+ union {
-+ uint32_t pending_event;
-+ struct {
-+ uint8_t pending_vector:8;
-+ uint8_t pending_type:3;
-+ uint8_t pending_error_valid:1;
-+ uint32_t pending_reserved:19;
-+ uint8_t pending_valid:1;
-+ };
-+ };
-+ /* error code for pending event */
-+ uint32_t error_code;
-+};
-+
-+DECLARE_HVM_SAVE_TYPE(CPU, 2, struct hvm_hw_cpu);
-+
-+
-+/*
-+ * PIC
-+ */
-+
-+struct hvm_hw_vpic {
-+ /* IR line bitmasks. */
-+ uint8_t irr;
-+ uint8_t imr;
-+ uint8_t isr;
-+
-+ /* Line IRx maps to IRQ irq_base+x */
-+ uint8_t irq_base;
-+
-+ /*
-+ * Where are we in ICW2-4 initialisation (0 means no init in progress)?
-+ * Bits 0-1 (=x): Next write at A=1 sets ICW(x+1).
-+ * Bit 2: ICW1.IC4 (1 == ICW4 included in init sequence)
-+ * Bit 3: ICW1.SNGL (0 == ICW3 included in init sequence)
-+ */
-+ uint8_t init_state:4;
-+
-+ /* IR line with highest priority. */
-+ uint8_t priority_add:4;
-+
-+ /* Reads from A=0 obtain ISR or IRR? */
-+ uint8_t readsel_isr:1;
-+
-+ /* Reads perform a polling read? */
-+ uint8_t poll:1;
-+
-+ /* Automatically clear IRQs from the ISR during INTA? */
-+ uint8_t auto_eoi:1;
-+
-+ /* Automatically rotate IRQ priorities during AEOI? */
-+ uint8_t rotate_on_auto_eoi:1;
-+
-+ /* Exclude slave inputs when considering in-service IRQs? */
-+ uint8_t special_fully_nested_mode:1;
-+
-+ /* Special mask mode excludes masked IRs from AEOI and priority checks. */
-+ uint8_t special_mask_mode:1;
-+
-+ /* Is this a master PIC or slave PIC? (NB. This is not programmable.) */
-+ uint8_t is_master:1;
-+
-+ /* Edge/trigger selection. */
-+ uint8_t elcr;
-+
-+ /* Virtual INT output. */
-+ uint8_t int_output;
-+};
-+
-+DECLARE_HVM_SAVE_TYPE(PIC, 3, struct hvm_hw_vpic);
-+
-+
-+/*
-+ * IO-APIC
-+ */
-+
-+#ifdef __ia64__
-+#define VIOAPIC_IS_IOSAPIC 1
-+#define VIOAPIC_NUM_PINS 24
-+#else
-+#define VIOAPIC_NUM_PINS 48 /* 16 ISA IRQs, 32 non-legacy PCI IRQS. */
-+#endif
-+
-+struct hvm_hw_vioapic {
-+ uint64_t base_address;
-+ uint32_t ioregsel;
-+ uint32_t id;
-+ union vioapic_redir_entry
-+ {
-+ uint64_t bits;
-+ struct {
-+ uint8_t vector;
-+ uint8_t delivery_mode:3;
-+ uint8_t dest_mode:1;
-+ uint8_t delivery_status:1;
-+ uint8_t polarity:1;
-+ uint8_t remote_irr:1;
-+ uint8_t trig_mode:1;
-+ uint8_t mask:1;
-+ uint8_t reserve:7;
-+#if !VIOAPIC_IS_IOSAPIC
-+ uint8_t reserved[4];
-+ uint8_t dest_id;
-+#else
-+ uint8_t reserved[3];
-+ uint16_t dest_id;
-+#endif
-+ } fields;
-+ } redirtbl[VIOAPIC_NUM_PINS];
-+};
-+
-+DECLARE_HVM_SAVE_TYPE(IOAPIC, 4, struct hvm_hw_vioapic);
-+
-+
-+/*
-+ * LAPIC
-+ */
-+
-+struct hvm_hw_lapic {
-+ uint64_t apic_base_msr;
-+ uint32_t disabled; /* VLAPIC_xx_DISABLED */
-+ uint32_t timer_divisor;
-+};
-+
-+DECLARE_HVM_SAVE_TYPE(LAPIC, 5, struct hvm_hw_lapic);
-+
-+struct hvm_hw_lapic_regs {
-+ /* A 4k page of register state */
-+ uint8_t data[0x400];
-+};
-+
-+DECLARE_HVM_SAVE_TYPE(LAPIC_REGS, 6, struct hvm_hw_lapic_regs);
-+
-+
-+/*
-+ * IRQs
-+ */
-+
-+struct hvm_hw_pci_irqs {
-+ /*
-+ * Virtual interrupt wires for a single PCI bus.
-+ * Indexed by: device*4 + INTx#.
-+ */
-+ union {
-+ DECLARE_BITMAP(i, 32*4);
-+ uint64_t pad[2];
-+ };
-+};
-+
-+DECLARE_HVM_SAVE_TYPE(PCI_IRQ, 7, struct hvm_hw_pci_irqs);
-+
-+struct hvm_hw_isa_irqs {
-+ /*
-+ * Virtual interrupt wires for ISA devices.
-+ * Indexed by ISA IRQ (assumes no ISA-device IRQ sharing).
-+ */
-+ union {
-+ DECLARE_BITMAP(i, 16);
-+ uint64_t pad[1];
-+ };
-+};
-+
-+DECLARE_HVM_SAVE_TYPE(ISA_IRQ, 8, struct hvm_hw_isa_irqs);
-+
-+struct hvm_hw_pci_link {
-+ /*
-+ * PCI-ISA interrupt router.
-+ * Each PCI <device:INTx#> is 'wire-ORed' into one of four links using
-+ * the traditional 'barber's pole' mapping ((device + INTx#) & 3).
-+ * The router provides a programmable mapping from each link to a GSI.
-+ */
-+ uint8_t route[4];
-+ uint8_t pad0[4];
-+};
-+
-+DECLARE_HVM_SAVE_TYPE(PCI_LINK, 9, struct hvm_hw_pci_link);
-+
-+/*
-+ * PIT
-+ */
-+
-+struct hvm_hw_pit {
-+ struct hvm_hw_pit_channel {
-+ uint32_t count; /* can be 65536 */
-+ uint16_t latched_count;
-+ uint8_t count_latched;
-+ uint8_t status_latched;
-+ uint8_t status;
-+ uint8_t read_state;
-+ uint8_t write_state;
-+ uint8_t write_latch;
-+ uint8_t rw_mode;
-+ uint8_t mode;
-+ uint8_t bcd; /* not supported */
-+ uint8_t gate; /* timer start */
-+ } channels[3]; /* 3 x 16 bytes */
-+ uint32_t speaker_data_on;
-+ uint32_t pad0;
-+};
-+
-+DECLARE_HVM_SAVE_TYPE(PIT, 10, struct hvm_hw_pit);
-+
-+
-+/*
-+ * RTC
-+ */
-+
-+#define RTC_CMOS_SIZE 14
-+struct hvm_hw_rtc {
-+ /* CMOS bytes */
-+ uint8_t cmos_data[RTC_CMOS_SIZE];
-+ /* Index register for 2-part operations */
-+ uint8_t cmos_index;
-+ uint8_t pad0;
-+};
-+
-+DECLARE_HVM_SAVE_TYPE(RTC, 11, struct hvm_hw_rtc);
-+
-+
-+/*
-+ * HPET
-+ */
-+
-+#define HPET_TIMER_NUM 3 /* 3 timers supported now */
-+struct hvm_hw_hpet {
-+ /* Memory-mapped, software visible registers */
-+ uint64_t capability; /* capabilities */
-+ uint64_t res0; /* reserved */
-+ uint64_t config; /* configuration */
-+ uint64_t res1; /* reserved */
-+ uint64_t isr; /* interrupt status reg */
-+ uint64_t res2[25]; /* reserved */
-+ uint64_t mc64; /* main counter */
-+ uint64_t res3; /* reserved */
-+ struct { /* timers */
-+ uint64_t config; /* configuration/cap */
-+ uint64_t cmp; /* comparator */
-+ uint64_t fsb; /* FSB route, not supported now */
-+ uint64_t res4; /* reserved */
-+ } timers[HPET_TIMER_NUM];
-+ uint64_t res5[4*(24-HPET_TIMER_NUM)]; /* reserved, up to 0x3ff */
-+
-+ /* Hidden register state */
-+ uint64_t period[HPET_TIMER_NUM]; /* Last value written to comparator */
-+};
-+
-+DECLARE_HVM_SAVE_TYPE(HPET, 12, struct hvm_hw_hpet);
-+
-+
-+/*
-+ * PM timer
-+ */
-+
-+struct hvm_hw_pmtimer {
-+ uint32_t tmr_val; /* PM_TMR_BLK.TMR_VAL: 32bit free-running counter */
-+ uint16_t pm1a_sts; /* PM1a_EVT_BLK.PM1a_STS: status register */
-+ uint16_t pm1a_en; /* PM1a_EVT_BLK.PM1a_EN: enable register */
-+};
-+
-+DECLARE_HVM_SAVE_TYPE(PMTIMER, 13, struct hvm_hw_pmtimer);
-+
-+/*
-+ * Largest type-code in use
-+ */
-+#define HVM_SAVE_CODE_MAX 13
-+
-+
-+/*
-+ * The series of save records is teminated by a zero-type, zero-length
-+ * descriptor.
-+ */
-+
-+struct hvm_save_end {};
-+DECLARE_HVM_SAVE_TYPE(END, 0, struct hvm_save_end);
-+
-+#endif /* __XEN_PUBLIC_HVM_SAVE_H__ */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/xen/interface/hvm/vmx_assist.h ubuntu-gutsy-xen/include/xen/interface/hvm/vmx_assist.h
---- ubuntu-gutsy/include/xen/interface/hvm/vmx_assist.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/xen/interface/hvm/vmx_assist.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,116 @@
-+/*
-+ * vmx_assist.h: Context definitions for the VMXASSIST world switch.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this software and associated documentation files (the "Software"), to
-+ * deal in the Software without restriction, including without limitation the
-+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-+ * sell copies of the Software, and to permit persons to whom the Software is
-+ * furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-+ * DEALINGS IN THE SOFTWARE.
-+ *
-+ * Leendert van Doorn, leendert@watson.ibm.com
-+ * Copyright (c) 2005, International Business Machines Corporation.
-+ */
-+
-+#ifndef _VMX_ASSIST_H_
-+#define _VMX_ASSIST_H_
-+
-+#define VMXASSIST_BASE 0xD0000
-+#define VMXASSIST_MAGIC 0x17101966
-+#define VMXASSIST_MAGIC_OFFSET (VMXASSIST_BASE+8)
-+
-+#define VMXASSIST_NEW_CONTEXT (VMXASSIST_BASE + 12)
-+#define VMXASSIST_OLD_CONTEXT (VMXASSIST_NEW_CONTEXT + 4)
-+
-+#ifndef __ASSEMBLY__
-+
-+union vmcs_arbytes {
-+ struct arbyte_fields {
-+ unsigned int seg_type : 4,
-+ s : 1,
-+ dpl : 2,
-+ p : 1,
-+ reserved0 : 4,
-+ avl : 1,
-+ reserved1 : 1,
-+ default_ops_size: 1,
-+ g : 1,
-+ null_bit : 1,
-+ reserved2 : 15;
-+ } fields;
-+ unsigned int bytes;
-+};
-+
-+/*
-+ * World switch state
-+ */
-+struct vmx_assist_context {
-+ uint32_t eip; /* execution pointer */
-+ uint32_t esp; /* stack pointer */
-+ uint32_t eflags; /* flags register */
-+ uint32_t cr0;
-+ uint32_t cr3; /* page table directory */
-+ uint32_t cr4;
-+ uint32_t idtr_limit; /* idt */
-+ uint32_t idtr_base;
-+ uint32_t gdtr_limit; /* gdt */
-+ uint32_t gdtr_base;
-+ uint32_t cs_sel; /* cs selector */
-+ uint32_t cs_limit;
-+ uint32_t cs_base;
-+ union vmcs_arbytes cs_arbytes;
-+ uint32_t ds_sel; /* ds selector */
-+ uint32_t ds_limit;
-+ uint32_t ds_base;
-+ union vmcs_arbytes ds_arbytes;
-+ uint32_t es_sel; /* es selector */
-+ uint32_t es_limit;
-+ uint32_t es_base;
-+ union vmcs_arbytes es_arbytes;
-+ uint32_t ss_sel; /* ss selector */
-+ uint32_t ss_limit;
-+ uint32_t ss_base;
-+ union vmcs_arbytes ss_arbytes;
-+ uint32_t fs_sel; /* fs selector */
-+ uint32_t fs_limit;
-+ uint32_t fs_base;
-+ union vmcs_arbytes fs_arbytes;
-+ uint32_t gs_sel; /* gs selector */
-+ uint32_t gs_limit;
-+ uint32_t gs_base;
-+ union vmcs_arbytes gs_arbytes;
-+ uint32_t tr_sel; /* task selector */
-+ uint32_t tr_limit;
-+ uint32_t tr_base;
-+ union vmcs_arbytes tr_arbytes;
-+ uint32_t ldtr_sel; /* ldtr selector */
-+ uint32_t ldtr_limit;
-+ uint32_t ldtr_base;
-+ union vmcs_arbytes ldtr_arbytes;
-+};
-+typedef struct vmx_assist_context vmx_assist_context_t;
-+
-+#endif /* __ASSEMBLY__ */
-+
-+#endif /* _VMX_ASSIST_H_ */
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/xen/interface/io/blkif.h ubuntu-gutsy-xen/include/xen/interface/io/blkif.h
---- ubuntu-gutsy/include/xen/interface/io/blkif.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/xen/interface/io/blkif.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,128 @@
-+/******************************************************************************
-+ * blkif.h
-+ *
-+ * Unified block-device I/O interface for Xen guest OSes.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this software and associated documentation files (the "Software"), to
-+ * deal in the Software without restriction, including without limitation the
-+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-+ * sell copies of the Software, and to permit persons to whom the Software is
-+ * furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-+ * DEALINGS IN THE SOFTWARE.
-+ *
-+ * Copyright (c) 2003-2004, Keir Fraser
-+ */
-+
-+#ifndef __XEN_PUBLIC_IO_BLKIF_H__
-+#define __XEN_PUBLIC_IO_BLKIF_H__
-+
-+#include "ring.h"
-+#include "../grant_table.h"
-+
-+/*
-+ * Front->back notifications: When enqueuing a new request, sending a
-+ * notification can be made conditional on req_event (i.e., the generic
-+ * hold-off mechanism provided by the ring macros). Backends must set
-+ * req_event appropriately (e.g., using RING_FINAL_CHECK_FOR_REQUESTS()).
-+ *
-+ * Back->front notifications: When enqueuing a new response, sending a
-+ * notification can be made conditional on rsp_event (i.e., the generic
-+ * hold-off mechanism provided by the ring macros). Frontends must set
-+ * rsp_event appropriately (e.g., using RING_FINAL_CHECK_FOR_RESPONSES()).
-+ */
-+
-+#ifndef blkif_vdev_t
-+#define blkif_vdev_t uint16_t
-+#endif
-+#define blkif_sector_t uint64_t
-+
-+/*
-+ * REQUEST CODES.
-+ */
-+#define BLKIF_OP_READ 0
-+#define BLKIF_OP_WRITE 1
-+/*
-+ * Recognised only if "feature-barrier" is present in backend xenbus info.
-+ * The "feature_barrier" node contains a boolean indicating whether barrier
-+ * requests are likely to succeed or fail. Either way, a barrier request
-+ * may fail at any time with BLKIF_RSP_EOPNOTSUPP if it is unsupported by
-+ * the underlying block-device hardware. The boolean simply indicates whether
-+ * or not it is worthwhile for the frontend to attempt barrier requests.
-+ * If a backend does not recognise BLKIF_OP_WRITE_BARRIER, it should *not*
-+ * create the "feature-barrier" node!
-+ */
-+#define BLKIF_OP_WRITE_BARRIER 2
-+
-+/*
-+ * Maximum scatter/gather segments per request.
-+ * This is carefully chosen so that sizeof(blkif_ring_t) <= PAGE_SIZE.
-+ * NB. This could be 12 if the ring indexes weren't stored in the same page.
-+ */
-+#define BLKIF_MAX_SEGMENTS_PER_REQUEST 11
-+
-+struct blkif_request_segment {
-+ grant_ref_t gref; /* reference to I/O buffer frame */
-+ /* @first_sect: first sector in frame to transfer (inclusive). */
-+ /* @last_sect: last sector in frame to transfer (inclusive). */
-+ uint8_t first_sect, last_sect;
-+};
-+
-+struct blkif_request {
-+ uint8_t operation; /* BLKIF_OP_??? */
-+ uint8_t nr_segments; /* number of segments */
-+ blkif_vdev_t handle; /* only for read/write requests */
-+ uint64_t id; /* private guest value, echoed in resp */
-+ blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
-+ struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
-+};
-+typedef struct blkif_request blkif_request_t;
-+
-+struct blkif_response {
-+ uint64_t id; /* copied from request */
-+ uint8_t operation; /* copied from request */
-+ int16_t status; /* BLKIF_RSP_??? */
-+};
-+typedef struct blkif_response blkif_response_t;
-+
-+/*
-+ * STATUS RETURN CODES.
-+ */
-+ /* Operation not supported (only happens on barrier writes). */
-+#define BLKIF_RSP_EOPNOTSUPP -2
-+ /* Operation failed for some unspecified reason (-EIO). */
-+#define BLKIF_RSP_ERROR -1
-+ /* Operation completed successfully. */
-+#define BLKIF_RSP_OKAY 0
-+
-+/*
-+ * Generate blkif ring structures and types.
-+ */
-+
-+DEFINE_RING_TYPES(blkif, struct blkif_request, struct blkif_response);
-+
-+#define VDISK_CDROM 0x1
-+#define VDISK_REMOVABLE 0x2
-+#define VDISK_READONLY 0x4
-+
-+#endif /* __XEN_PUBLIC_IO_BLKIF_H__ */
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/xen/interface/io/console.h ubuntu-gutsy-xen/include/xen/interface/io/console.h
---- ubuntu-gutsy/include/xen/interface/io/console.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/xen/interface/io/console.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,51 @@
-+/******************************************************************************
-+ * console.h
-+ *
-+ * Console I/O interface for Xen guest OSes.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this software and associated documentation files (the "Software"), to
-+ * deal in the Software without restriction, including without limitation the
-+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-+ * sell copies of the Software, and to permit persons to whom the Software is
-+ * furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-+ * DEALINGS IN THE SOFTWARE.
-+ *
-+ * Copyright (c) 2005, Keir Fraser
-+ */
-+
-+#ifndef __XEN_PUBLIC_IO_CONSOLE_H__
-+#define __XEN_PUBLIC_IO_CONSOLE_H__
-+
-+typedef uint32_t XENCONS_RING_IDX;
-+
-+#define MASK_XENCONS_IDX(idx, ring) ((idx) & (sizeof(ring)-1))
-+
-+struct xencons_interface {
-+ char in[1024];
-+ char out[2048];
-+ XENCONS_RING_IDX in_cons, in_prod;
-+ XENCONS_RING_IDX out_cons, out_prod;
-+};
-+
-+#endif /* __XEN_PUBLIC_IO_CONSOLE_H__ */
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/xen/interface/io/fbif.h ubuntu-gutsy-xen/include/xen/interface/io/fbif.h
---- ubuntu-gutsy/include/xen/interface/io/fbif.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/xen/interface/io/fbif.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,138 @@
-+/*
-+ * fbif.h -- Xen virtual frame buffer device
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this software and associated documentation files (the "Software"), to
-+ * deal in the Software without restriction, including without limitation the
-+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-+ * sell copies of the Software, and to permit persons to whom the Software is
-+ * furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-+ * DEALINGS IN THE SOFTWARE.
-+ *
-+ * Copyright (C) 2005 Anthony Liguori <aliguori@us.ibm.com>
-+ * Copyright (C) 2006 Red Hat, Inc., Markus Armbruster <armbru@redhat.com>
-+ */
-+
-+#ifndef __XEN_PUBLIC_IO_FBIF_H__
-+#define __XEN_PUBLIC_IO_FBIF_H__
-+
-+/* Out events (frontend -> backend) */
-+
-+/*
-+ * Out events may be sent only when requested by backend, and receipt
-+ * of an unknown out event is an error.
-+ */
-+
-+/* Event type 1 currently not used */
-+/*
-+ * Framebuffer update notification event
-+ * Capable frontend sets feature-update in xenstore.
-+ * Backend requests it by setting request-update in xenstore.
-+ */
-+#define XENFB_TYPE_UPDATE 2
-+
-+struct xenfb_update
-+{
-+ uint8_t type; /* XENFB_TYPE_UPDATE */
-+ int32_t x; /* source x */
-+ int32_t y; /* source y */
-+ int32_t width; /* rect width */
-+ int32_t height; /* rect height */
-+};
-+
-+#define XENFB_OUT_EVENT_SIZE 40
-+
-+union xenfb_out_event
-+{
-+ uint8_t type;
-+ struct xenfb_update update;
-+ char pad[XENFB_OUT_EVENT_SIZE];
-+};
-+
-+/* In events (backend -> frontend) */
-+
-+/*
-+ * Frontends should ignore unknown in events.
-+ * No in events currently defined.
-+ */
-+
-+#define XENFB_IN_EVENT_SIZE 40
-+
-+union xenfb_in_event
-+{
-+ uint8_t type;
-+ char pad[XENFB_IN_EVENT_SIZE];
-+};
-+
-+/* shared page */
-+
-+#define XENFB_IN_RING_SIZE 1024
-+#define XENFB_IN_RING_LEN (XENFB_IN_RING_SIZE / XENFB_IN_EVENT_SIZE)
-+#define XENFB_IN_RING_OFFS 1024
-+#define XENFB_IN_RING(page) \
-+ ((union xenfb_in_event *)((char *)(page) + XENFB_IN_RING_OFFS))
-+#define XENFB_IN_RING_REF(page, idx) \
-+ (XENFB_IN_RING((page))[(idx) % XENFB_IN_RING_LEN])
-+
-+#define XENFB_OUT_RING_SIZE 2048
-+#define XENFB_OUT_RING_LEN (XENFB_OUT_RING_SIZE / XENFB_OUT_EVENT_SIZE)
-+#define XENFB_OUT_RING_OFFS (XENFB_IN_RING_OFFS + XENFB_IN_RING_SIZE)
-+#define XENFB_OUT_RING(page) \
-+ ((union xenfb_out_event *)((char *)(page) + XENFB_OUT_RING_OFFS))
-+#define XENFB_OUT_RING_REF(page, idx) \
-+ (XENFB_OUT_RING((page))[(idx) % XENFB_OUT_RING_LEN])
-+
-+struct xenfb_page
-+{
-+ uint32_t in_cons, in_prod;
-+ uint32_t out_cons, out_prod;
-+
-+ int32_t width; /* the width of the framebuffer (in pixels) */
-+ int32_t height; /* the height of the framebuffer (in pixels) */
-+ uint32_t line_length; /* the length of a row of pixels (in bytes) */
-+ uint32_t mem_length; /* the length of the framebuffer (in bytes) */
-+ uint8_t depth; /* the depth of a pixel (in bits) */
-+
-+ /*
-+ * Framebuffer page directory
-+ *
-+ * Each directory page holds PAGE_SIZE / sizeof(*pd)
-+ * framebuffer pages, and can thus map up to PAGE_SIZE *
-+ * PAGE_SIZE / sizeof(*pd) bytes. With PAGE_SIZE == 4096 and
-+ * sizeof(unsigned long) == 4, that's 4 Megs. Two directory
-+ * pages should be enough for a while.
-+ */
-+ unsigned long pd[2];
-+};
-+
-+/*
-+ * Wart: xenkbd needs to know resolution. Put it here until a better
-+ * solution is found, but don't leak it to the backend.
-+ */
-+#ifdef __KERNEL__
-+#define XENFB_WIDTH 800
-+#define XENFB_HEIGHT 600
-+#define XENFB_DEPTH 32
-+#endif
-+
-+#endif
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/xen/interface/io/kbdif.h ubuntu-gutsy-xen/include/xen/interface/io/kbdif.h
---- ubuntu-gutsy/include/xen/interface/io/kbdif.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/xen/interface/io/kbdif.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,130 @@
-+/*
-+ * kbdif.h -- Xen virtual keyboard/mouse
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this software and associated documentation files (the "Software"), to
-+ * deal in the Software without restriction, including without limitation the
-+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-+ * sell copies of the Software, and to permit persons to whom the Software is
-+ * furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-+ * DEALINGS IN THE SOFTWARE.
-+ *
-+ * Copyright (C) 2005 Anthony Liguori <aliguori@us.ibm.com>
-+ * Copyright (C) 2006 Red Hat, Inc., Markus Armbruster <armbru@redhat.com>
-+ */
-+
-+#ifndef __XEN_PUBLIC_IO_KBDIF_H__
-+#define __XEN_PUBLIC_IO_KBDIF_H__
-+
-+/* In events (backend -> frontend) */
-+
-+/*
-+ * Frontends should ignore unknown in events.
-+ */
-+
-+/* Pointer movement event */
-+#define XENKBD_TYPE_MOTION 1
-+/* Event type 2 currently not used */
-+/* Key event (includes pointer buttons) */
-+#define XENKBD_TYPE_KEY 3
-+/*
-+ * Pointer position event
-+ * Capable backend sets feature-abs-pointer in xenstore.
-+ * Frontend requests ot instead of XENKBD_TYPE_MOTION by setting
-+ * request-abs-update in xenstore.
-+ */
-+#define XENKBD_TYPE_POS 4
-+
-+struct xenkbd_motion
-+{
-+ uint8_t type; /* XENKBD_TYPE_MOTION */
-+ int32_t rel_x; /* relative X motion */
-+ int32_t rel_y; /* relative Y motion */
-+};
-+
-+struct xenkbd_key
-+{
-+ uint8_t type; /* XENKBD_TYPE_KEY */
-+ uint8_t pressed; /* 1 if pressed; 0 otherwise */
-+ uint32_t keycode; /* KEY_* from linux/input.h */
-+};
-+
-+struct xenkbd_position
-+{
-+ uint8_t type; /* XENKBD_TYPE_POS */
-+ int32_t abs_x; /* absolute X position (in FB pixels) */
-+ int32_t abs_y; /* absolute Y position (in FB pixels) */
-+};
-+
-+#define XENKBD_IN_EVENT_SIZE 40
-+
-+union xenkbd_in_event
-+{
-+ uint8_t type;
-+ struct xenkbd_motion motion;
-+ struct xenkbd_key key;
-+ struct xenkbd_position pos;
-+ char pad[XENKBD_IN_EVENT_SIZE];
-+};
-+
-+/* Out events (frontend -> backend) */
-+
-+/*
-+ * Out events may be sent only when requested by backend, and receipt
-+ * of an unknown out event is an error.
-+ * No out events currently defined.
-+ */
-+
-+#define XENKBD_OUT_EVENT_SIZE 40
-+
-+union xenkbd_out_event
-+{
-+ uint8_t type;
-+ char pad[XENKBD_OUT_EVENT_SIZE];
-+};
-+
-+/* shared page */
-+
-+#define XENKBD_IN_RING_SIZE 2048
-+#define XENKBD_IN_RING_LEN (XENKBD_IN_RING_SIZE / XENKBD_IN_EVENT_SIZE)
-+#define XENKBD_IN_RING_OFFS 1024
-+#define XENKBD_IN_RING(page) \
-+ ((union xenkbd_in_event *)((char *)(page) + XENKBD_IN_RING_OFFS))
-+#define XENKBD_IN_RING_REF(page, idx) \
-+ (XENKBD_IN_RING((page))[(idx) % XENKBD_IN_RING_LEN])
-+
-+#define XENKBD_OUT_RING_SIZE 1024
-+#define XENKBD_OUT_RING_LEN (XENKBD_OUT_RING_SIZE / XENKBD_OUT_EVENT_SIZE)
-+#define XENKBD_OUT_RING_OFFS (XENKBD_IN_RING_OFFS + XENKBD_IN_RING_SIZE)
-+#define XENKBD_OUT_RING(page) \
-+ ((union xenkbd_out_event *)((char *)(page) + XENKBD_OUT_RING_OFFS))
-+#define XENKBD_OUT_RING_REF(page, idx) \
-+ (XENKBD_OUT_RING((page))[(idx) % XENKBD_OUT_RING_LEN])
-+
-+struct xenkbd_page
-+{
-+ uint32_t in_cons, in_prod;
-+ uint32_t out_cons, out_prod;
-+};
-+
-+#endif
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/xen/interface/io/netif.h ubuntu-gutsy-xen/include/xen/interface/io/netif.h
---- ubuntu-gutsy/include/xen/interface/io/netif.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/xen/interface/io/netif.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,184 @@
-+/******************************************************************************
-+ * netif.h
-+ *
-+ * Unified network-device I/O interface for Xen guest OSes.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this software and associated documentation files (the "Software"), to
-+ * deal in the Software without restriction, including without limitation the
-+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-+ * sell copies of the Software, and to permit persons to whom the Software is
-+ * furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-+ * DEALINGS IN THE SOFTWARE.
-+ *
-+ * Copyright (c) 2003-2004, Keir Fraser
-+ */
-+
-+#ifndef __XEN_PUBLIC_IO_NETIF_H__
-+#define __XEN_PUBLIC_IO_NETIF_H__
-+
-+#include "ring.h"
-+#include "../grant_table.h"
-+
-+/*
-+ * Notifications after enqueuing any type of message should be conditional on
-+ * the appropriate req_event or rsp_event field in the shared ring.
-+ * If the client sends notification for rx requests then it should specify
-+ * feature 'feature-rx-notify' via xenbus. Otherwise the backend will assume
-+ * that it cannot safely queue packets (as it may not be kicked to send them).
-+ */
-+
-+/*
-+ * This is the 'wire' format for packets:
-+ * Request 1: netif_tx_request -- NETTXF_* (any flags)
-+ * [Request 2: netif_tx_extra] (only if request 1 has NETTXF_extra_info)
-+ * [Request 3: netif_tx_extra] (only if request 2 has XEN_NETIF_EXTRA_MORE)
-+ * Request 4: netif_tx_request -- NETTXF_more_data
-+ * Request 5: netif_tx_request -- NETTXF_more_data
-+ * ...
-+ * Request N: netif_tx_request -- 0
-+ */
-+
-+/* Protocol checksum field is blank in the packet (hardware offload)? */
-+#define _NETTXF_csum_blank (0)
-+#define NETTXF_csum_blank (1U<<_NETTXF_csum_blank)
-+
-+/* Packet data has been validated against protocol checksum. */
-+#define _NETTXF_data_validated (1)
-+#define NETTXF_data_validated (1U<<_NETTXF_data_validated)
-+
-+/* Packet continues in the next request descriptor. */
-+#define _NETTXF_more_data (2)
-+#define NETTXF_more_data (1U<<_NETTXF_more_data)
-+
-+/* Packet to be followed by extra descriptor(s). */
-+#define _NETTXF_extra_info (3)
-+#define NETTXF_extra_info (1U<<_NETTXF_extra_info)
-+
-+struct netif_tx_request {
-+ grant_ref_t gref; /* Reference to buffer page */
-+ uint16_t offset; /* Offset within buffer page */
-+ uint16_t flags; /* NETTXF_* */
-+ uint16_t id; /* Echoed in response message. */
-+ uint16_t size; /* Packet size in bytes. */
-+};
-+typedef struct netif_tx_request netif_tx_request_t;
-+
-+/* Types of netif_extra_info descriptors. */
-+#define XEN_NETIF_EXTRA_TYPE_NONE (0) /* Never used - invalid */
-+#define XEN_NETIF_EXTRA_TYPE_GSO (1) /* u.gso */
-+#define XEN_NETIF_EXTRA_TYPE_MAX (2)
-+
-+/* netif_extra_info flags. */
-+#define _XEN_NETIF_EXTRA_FLAG_MORE (0)
-+#define XEN_NETIF_EXTRA_FLAG_MORE (1U<<_XEN_NETIF_EXTRA_FLAG_MORE)
-+
-+/* GSO types - only TCPv4 currently supported. */
-+#define XEN_NETIF_GSO_TYPE_TCPV4 (1)
-+
-+/*
-+ * This structure needs to fit within both netif_tx_request and
-+ * netif_rx_response for compatibility.
-+ */
-+struct netif_extra_info {
-+ uint8_t type; /* XEN_NETIF_EXTRA_TYPE_* */
-+ uint8_t flags; /* XEN_NETIF_EXTRA_FLAG_* */
-+
-+ union {
-+ struct {
-+ /*
-+ * Maximum payload size of each segment. For example, for TCP this
-+ * is just the path MSS.
-+ */
-+ uint16_t size;
-+
-+ /*
-+ * GSO type. This determines the protocol of the packet and any
-+ * extra features required to segment the packet properly.
-+ */
-+ uint8_t type; /* XEN_NETIF_GSO_TYPE_* */
-+
-+ /* Future expansion. */
-+ uint8_t pad;
-+
-+ /*
-+ * GSO features. This specifies any extra GSO features required
-+ * to process this packet, such as ECN support for TCPv4.
-+ */
-+ uint16_t features; /* XEN_NETIF_GSO_FEAT_* */
-+ } gso;
-+
-+ uint16_t pad[3];
-+ } u;
-+};
-+
-+struct netif_tx_response {
-+ uint16_t id;
-+ int16_t status; /* NETIF_RSP_* */
-+};
-+typedef struct netif_tx_response netif_tx_response_t;
-+
-+struct netif_rx_request {
-+ uint16_t id; /* Echoed in response message. */
-+ grant_ref_t gref; /* Reference to incoming granted frame */
-+};
-+typedef struct netif_rx_request netif_rx_request_t;
-+
-+/* Packet data has been validated against protocol checksum. */
-+#define _NETRXF_data_validated (0)
-+#define NETRXF_data_validated (1U<<_NETRXF_data_validated)
-+
-+/* Protocol checksum field is blank in the packet (hardware offload)? */
-+#define _NETRXF_csum_blank (1)
-+#define NETRXF_csum_blank (1U<<_NETRXF_csum_blank)
-+
-+/* Packet continues in the next request descriptor. */
-+#define _NETRXF_more_data (2)
-+#define NETRXF_more_data (1U<<_NETRXF_more_data)
-+
-+/* Packet to be followed by extra descriptor(s). */
-+#define _NETRXF_extra_info (3)
-+#define NETRXF_extra_info (1U<<_NETRXF_extra_info)
-+
-+struct netif_rx_response {
-+ uint16_t id;
-+ uint16_t offset; /* Offset in page of start of received packet */
-+ uint16_t flags; /* NETRXF_* */
-+ int16_t status; /* -ve: BLKIF_RSP_* ; +ve: Rx'ed pkt size. */
-+};
-+typedef struct netif_rx_response netif_rx_response_t;
-+
-+/*
-+ * Generate netif ring structures and types.
-+ */
-+
-+DEFINE_RING_TYPES(netif_tx, struct netif_tx_request, struct netif_tx_response);
-+DEFINE_RING_TYPES(netif_rx, struct netif_rx_request, struct netif_rx_response);
-+
-+#define NETIF_RSP_DROPPED -2
-+#define NETIF_RSP_ERROR -1
-+#define NETIF_RSP_OKAY 0
-+/* No response: used for auxiliary requests (e.g., netif_tx_extra). */
-+#define NETIF_RSP_NULL 1
-+
-+#endif
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/xen/interface/io/pciif.h ubuntu-gutsy-xen/include/xen/interface/io/pciif.h
---- ubuntu-gutsy/include/xen/interface/io/pciif.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/xen/interface/io/pciif.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,83 @@
-+/*
-+ * PCI Backend/Frontend Common Data Structures & Macros
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this software and associated documentation files (the "Software"), to
-+ * deal in the Software without restriction, including without limitation the
-+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-+ * sell copies of the Software, and to permit persons to whom the Software is
-+ * furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-+ * DEALINGS IN THE SOFTWARE.
-+ *
-+ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
-+ */
-+#ifndef __XEN_PCI_COMMON_H__
-+#define __XEN_PCI_COMMON_H__
-+
-+/* Be sure to bump this number if you change this file */
-+#define XEN_PCI_MAGIC "7"
-+
-+/* xen_pci_sharedinfo flags */
-+#define _XEN_PCIF_active (0)
-+#define XEN_PCIF_active (1<<_XEN_PCI_active)
-+
-+/* xen_pci_op commands */
-+#define XEN_PCI_OP_conf_read (0)
-+#define XEN_PCI_OP_conf_write (1)
-+
-+/* xen_pci_op error numbers */
-+#define XEN_PCI_ERR_success (0)
-+#define XEN_PCI_ERR_dev_not_found (-1)
-+#define XEN_PCI_ERR_invalid_offset (-2)
-+#define XEN_PCI_ERR_access_denied (-3)
-+#define XEN_PCI_ERR_not_implemented (-4)
-+/* XEN_PCI_ERR_op_failed - backend failed to complete the operation */
-+#define XEN_PCI_ERR_op_failed (-5)
-+
-+struct xen_pci_op {
-+ /* IN: what action to perform: XEN_PCI_OP_* */
-+ uint32_t cmd;
-+
-+ /* OUT: will contain an error number (if any) from errno.h */
-+ int32_t err;
-+
-+ /* IN: which device to touch */
-+ uint32_t domain; /* PCI Domain/Segment */
-+ uint32_t bus;
-+ uint32_t devfn;
-+
-+ /* IN: which configuration registers to touch */
-+ int32_t offset;
-+ int32_t size;
-+
-+ /* IN/OUT: Contains the result after a READ or the value to WRITE */
-+ uint32_t value;
-+};
-+
-+struct xen_pci_sharedinfo {
-+ /* flags - XEN_PCIF_* */
-+ uint32_t flags;
-+ struct xen_pci_op op;
-+};
-+
-+#endif /* __XEN_PCI_COMMON_H__ */
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/xen/interface/io/protocols.h ubuntu-gutsy-xen/include/xen/interface/io/protocols.h
---- ubuntu-gutsy/include/xen/interface/io/protocols.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/xen/interface/io/protocols.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,21 @@
-+#ifndef __XEN_PROTOCOLS_H__
-+#define __XEN_PROTOCOLS_H__
-+
-+#define XEN_IO_PROTO_ABI_X86_32 "x86_32-abi"
-+#define XEN_IO_PROTO_ABI_X86_64 "x86_64-abi"
-+#define XEN_IO_PROTO_ABI_IA64 "ia64-abi"
-+#define XEN_IO_PROTO_ABI_POWERPC64 "powerpc64-abi"
-+
-+#if defined(__i386__)
-+# define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_X86_32
-+#elif defined(__x86_64__)
-+# define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_X86_64
-+#elif defined(__ia64__)
-+# define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_IA64
-+#elif defined(__powerpc64__)
-+# define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_POWERPC64
-+#else
-+# error arch fixup needed here
-+#endif
-+
-+#endif
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/xen/interface/io/ring.h ubuntu-gutsy-xen/include/xen/interface/io/ring.h
---- ubuntu-gutsy/include/xen/interface/io/ring.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/xen/interface/io/ring.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,299 @@
-+/******************************************************************************
-+ * ring.h
-+ *
-+ * Shared producer-consumer ring macros.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this software and associated documentation files (the "Software"), to
-+ * deal in the Software without restriction, including without limitation the
-+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-+ * sell copies of the Software, and to permit persons to whom the Software is
-+ * furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-+ * DEALINGS IN THE SOFTWARE.
-+ *
-+ * Tim Deegan and Andrew Warfield November 2004.
-+ */
-+
-+#ifndef __XEN_PUBLIC_IO_RING_H__
-+#define __XEN_PUBLIC_IO_RING_H__
-+
-+typedef unsigned int RING_IDX;
-+
-+/* Round a 32-bit unsigned constant down to the nearest power of two. */
-+#define __RD2(_x) (((_x) & 0x00000002) ? 0x2 : ((_x) & 0x1))
-+#define __RD4(_x) (((_x) & 0x0000000c) ? __RD2((_x)>>2)<<2 : __RD2(_x))
-+#define __RD8(_x) (((_x) & 0x000000f0) ? __RD4((_x)>>4)<<4 : __RD4(_x))
-+#define __RD16(_x) (((_x) & 0x0000ff00) ? __RD8((_x)>>8)<<8 : __RD8(_x))
-+#define __RD32(_x) (((_x) & 0xffff0000) ? __RD16((_x)>>16)<<16 : __RD16(_x))
-+
-+/*
-+ * Calculate size of a shared ring, given the total available space for the
-+ * ring and indexes (_sz), and the name tag of the request/response structure.
-+ * A ring contains as many entries as will fit, rounded down to the nearest
-+ * power of two (so we can mask with (size-1) to loop around).
-+ */
-+#define __RING_SIZE(_s, _sz) \
-+ (__RD32(((_sz) - (long)(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0])))
-+
-+/*
-+ * Macros to make the correct C datatypes for a new kind of ring.
-+ *
-+ * To make a new ring datatype, you need to have two message structures,
-+ * let's say request_t, and response_t already defined.
-+ *
-+ * In a header where you want the ring datatype declared, you then do:
-+ *
-+ * DEFINE_RING_TYPES(mytag, request_t, response_t);
-+ *
-+ * These expand out to give you a set of types, as you can see below.
-+ * The most important of these are:
-+ *
-+ * mytag_sring_t - The shared ring.
-+ * mytag_front_ring_t - The 'front' half of the ring.
-+ * mytag_back_ring_t - The 'back' half of the ring.
-+ *
-+ * To initialize a ring in your code you need to know the location and size
-+ * of the shared memory area (PAGE_SIZE, for instance). To initialise
-+ * the front half:
-+ *
-+ * mytag_front_ring_t front_ring;
-+ * SHARED_RING_INIT((mytag_sring_t *)shared_page);
-+ * FRONT_RING_INIT(&front_ring, (mytag_sring_t *)shared_page, PAGE_SIZE);
-+ *
-+ * Initializing the back follows similarly (note that only the front
-+ * initializes the shared ring):
-+ *
-+ * mytag_back_ring_t back_ring;
-+ * BACK_RING_INIT(&back_ring, (mytag_sring_t *)shared_page, PAGE_SIZE);
-+ */
-+
-+#define DEFINE_RING_TYPES(__name, __req_t, __rsp_t) \
-+ \
-+/* Shared ring entry */ \
-+union __name##_sring_entry { \
-+ __req_t req; \
-+ __rsp_t rsp; \
-+}; \
-+ \
-+/* Shared ring page */ \
-+struct __name##_sring { \
-+ RING_IDX req_prod, req_event; \
-+ RING_IDX rsp_prod, rsp_event; \
-+ uint8_t pad[48]; \
-+ union __name##_sring_entry ring[1]; /* variable-length */ \
-+}; \
-+ \
-+/* "Front" end's private variables */ \
-+struct __name##_front_ring { \
-+ RING_IDX req_prod_pvt; \
-+ RING_IDX rsp_cons; \
-+ unsigned int nr_ents; \
-+ struct __name##_sring *sring; \
-+}; \
-+ \
-+/* "Back" end's private variables */ \
-+struct __name##_back_ring { \
-+ RING_IDX rsp_prod_pvt; \
-+ RING_IDX req_cons; \
-+ unsigned int nr_ents; \
-+ struct __name##_sring *sring; \
-+}; \
-+ \
-+/* Syntactic sugar */ \
-+typedef struct __name##_sring __name##_sring_t; \
-+typedef struct __name##_front_ring __name##_front_ring_t; \
-+typedef struct __name##_back_ring __name##_back_ring_t
-+
-+/*
-+ * Macros for manipulating rings.
-+ *
-+ * FRONT_RING_whatever works on the "front end" of a ring: here
-+ * requests are pushed on to the ring and responses taken off it.
-+ *
-+ * BACK_RING_whatever works on the "back end" of a ring: here
-+ * requests are taken off the ring and responses put on.
-+ *
-+ * N.B. these macros do NO INTERLOCKS OR FLOW CONTROL.
-+ * This is OK in 1-for-1 request-response situations where the
-+ * requestor (front end) never has more than RING_SIZE()-1
-+ * outstanding requests.
-+ */
-+
-+/* Initialising empty rings */
-+#define SHARED_RING_INIT(_s) do { \
-+ (_s)->req_prod = (_s)->rsp_prod = 0; \
-+ (_s)->req_event = (_s)->rsp_event = 1; \
-+ memset((_s)->pad, 0, sizeof((_s)->pad)); \
-+} while(0)
-+
-+#define FRONT_RING_INIT(_r, _s, __size) do { \
-+ (_r)->req_prod_pvt = 0; \
-+ (_r)->rsp_cons = 0; \
-+ (_r)->nr_ents = __RING_SIZE(_s, __size); \
-+ (_r)->sring = (_s); \
-+} while (0)
-+
-+#define BACK_RING_INIT(_r, _s, __size) do { \
-+ (_r)->rsp_prod_pvt = 0; \
-+ (_r)->req_cons = 0; \
-+ (_r)->nr_ents = __RING_SIZE(_s, __size); \
-+ (_r)->sring = (_s); \
-+} while (0)
-+
-+/* Initialize to existing shared indexes -- for recovery */
-+#define FRONT_RING_ATTACH(_r, _s, __size) do { \
-+ (_r)->sring = (_s); \
-+ (_r)->req_prod_pvt = (_s)->req_prod; \
-+ (_r)->rsp_cons = (_s)->rsp_prod; \
-+ (_r)->nr_ents = __RING_SIZE(_s, __size); \
-+} while (0)
-+
-+#define BACK_RING_ATTACH(_r, _s, __size) do { \
-+ (_r)->sring = (_s); \
-+ (_r)->rsp_prod_pvt = (_s)->rsp_prod; \
-+ (_r)->req_cons = (_s)->req_prod; \
-+ (_r)->nr_ents = __RING_SIZE(_s, __size); \
-+} while (0)
-+
-+/* How big is this ring? */
-+#define RING_SIZE(_r) \
-+ ((_r)->nr_ents)
-+
-+/* Number of free requests (for use on front side only). */
-+#define RING_FREE_REQUESTS(_r) \
-+ (RING_SIZE(_r) - ((_r)->req_prod_pvt - (_r)->rsp_cons))
-+
-+/* Test if there is an empty slot available on the front ring.
-+ * (This is only meaningful from the front. )
-+ */
-+#define RING_FULL(_r) \
-+ (RING_FREE_REQUESTS(_r) == 0)
-+
-+/* Test if there are outstanding messages to be processed on a ring. */
-+#define RING_HAS_UNCONSUMED_RESPONSES(_r) \
-+ ((_r)->sring->rsp_prod - (_r)->rsp_cons)
-+
-+#ifdef __GNUC__
-+#define RING_HAS_UNCONSUMED_REQUESTS(_r) ({ \
-+ unsigned int req = (_r)->sring->req_prod - (_r)->req_cons; \
-+ unsigned int rsp = RING_SIZE(_r) - \
-+ ((_r)->req_cons - (_r)->rsp_prod_pvt); \
-+ req < rsp ? req : rsp; \
-+})
-+#else
-+/* Same as above, but without the nice GCC ({ ... }) syntax. */
-+#define RING_HAS_UNCONSUMED_REQUESTS(_r) \
-+ ((((_r)->sring->req_prod - (_r)->req_cons) < \
-+ (RING_SIZE(_r) - ((_r)->req_cons - (_r)->rsp_prod_pvt))) ? \
-+ ((_r)->sring->req_prod - (_r)->req_cons) : \
-+ (RING_SIZE(_r) - ((_r)->req_cons - (_r)->rsp_prod_pvt)))
-+#endif
-+
-+/* Direct access to individual ring elements, by index. */
-+#define RING_GET_REQUEST(_r, _idx) \
-+ (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req))
-+
-+#define RING_GET_RESPONSE(_r, _idx) \
-+ (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp))
-+
-+/* Loop termination condition: Would the specified index overflow the ring? */
-+#define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \
-+ (((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r))
-+
-+#define RING_PUSH_REQUESTS(_r) do { \
-+ wmb(); /* back sees requests /before/ updated producer index */ \
-+ (_r)->sring->req_prod = (_r)->req_prod_pvt; \
-+} while (0)
-+
-+#define RING_PUSH_RESPONSES(_r) do { \
-+ wmb(); /* front sees responses /before/ updated producer index */ \
-+ (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt; \
-+} while (0)
-+
-+/*
-+ * Notification hold-off (req_event and rsp_event):
-+ *
-+ * When queueing requests or responses on a shared ring, it may not always be
-+ * necessary to notify the remote end. For example, if requests are in flight
-+ * in a backend, the front may be able to queue further requests without
-+ * notifying the back (if the back checks for new requests when it queues
-+ * responses).
-+ *
-+ * When enqueuing requests or responses:
-+ *
-+ * Use RING_PUSH_{REQUESTS,RESPONSES}_AND_CHECK_NOTIFY(). The second argument
-+ * is a boolean return value. True indicates that the receiver requires an
-+ * asynchronous notification.
-+ *
-+ * After dequeuing requests or responses (before sleeping the connection):
-+ *
-+ * Use RING_FINAL_CHECK_FOR_REQUESTS() or RING_FINAL_CHECK_FOR_RESPONSES().
-+ * The second argument is a boolean return value. True indicates that there
-+ * are pending messages on the ring (i.e., the connection should not be put
-+ * to sleep).
-+ *
-+ * These macros will set the req_event/rsp_event field to trigger a
-+ * notification on the very next message that is enqueued. If you want to
-+ * create batches of work (i.e., only receive a notification after several
-+ * messages have been enqueued) then you will need to create a customised
-+ * version of the FINAL_CHECK macro in your own code, which sets the event
-+ * field appropriately.
-+ */
-+
-+#define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do { \
-+ RING_IDX __old = (_r)->sring->req_prod; \
-+ RING_IDX __new = (_r)->req_prod_pvt; \
-+ wmb(); /* back sees requests /before/ updated producer index */ \
-+ (_r)->sring->req_prod = __new; \
-+ mb(); /* back sees new requests /before/ we check req_event */ \
-+ (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) < \
-+ (RING_IDX)(__new - __old)); \
-+} while (0)
-+
-+#define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do { \
-+ RING_IDX __old = (_r)->sring->rsp_prod; \
-+ RING_IDX __new = (_r)->rsp_prod_pvt; \
-+ wmb(); /* front sees responses /before/ updated producer index */ \
-+ (_r)->sring->rsp_prod = __new; \
-+ mb(); /* front sees new responses /before/ we check rsp_event */ \
-+ (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) < \
-+ (RING_IDX)(__new - __old)); \
-+} while (0)
-+
-+#define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do { \
-+ (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
-+ if (_work_to_do) break; \
-+ (_r)->sring->req_event = (_r)->req_cons + 1; \
-+ mb(); \
-+ (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
-+} while (0)
-+
-+#define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do { \
-+ (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
-+ if (_work_to_do) break; \
-+ (_r)->sring->rsp_event = (_r)->rsp_cons + 1; \
-+ mb(); \
-+ (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
-+} while (0)
-+
-+#endif /* __XEN_PUBLIC_IO_RING_H__ */
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/xen/interface/io/tpmif.h ubuntu-gutsy-xen/include/xen/interface/io/tpmif.h
---- ubuntu-gutsy/include/xen/interface/io/tpmif.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/xen/interface/io/tpmif.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,77 @@
-+/******************************************************************************
-+ * tpmif.h
-+ *
-+ * TPM I/O interface for Xen guest OSes.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this software and associated documentation files (the "Software"), to
-+ * deal in the Software without restriction, including without limitation the
-+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-+ * sell copies of the Software, and to permit persons to whom the Software is
-+ * furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-+ * DEALINGS IN THE SOFTWARE.
-+ *
-+ * Copyright (c) 2005, IBM Corporation
-+ *
-+ * Author: Stefan Berger, stefanb@us.ibm.com
-+ * Grant table support: Mahadevan Gomathisankaran
-+ *
-+ * This code has been derived from tools/libxc/xen/io/netif.h
-+ *
-+ * Copyright (c) 2003-2004, Keir Fraser
-+ */
-+
-+#ifndef __XEN_PUBLIC_IO_TPMIF_H__
-+#define __XEN_PUBLIC_IO_TPMIF_H__
-+
-+#include "../grant_table.h"
-+
-+struct tpmif_tx_request {
-+ unsigned long addr; /* Machine address of packet. */
-+ grant_ref_t ref; /* grant table access reference */
-+ uint16_t unused;
-+ uint16_t size; /* Packet size in bytes. */
-+};
-+typedef struct tpmif_tx_request tpmif_tx_request_t;
-+
-+/*
-+ * The TPMIF_TX_RING_SIZE defines the number of pages the
-+ * front-end and backend can exchange (= size of array).
-+ */
-+typedef uint32_t TPMIF_RING_IDX;
-+
-+#define TPMIF_TX_RING_SIZE 1
-+
-+/* This structure must fit in a memory page. */
-+
-+struct tpmif_ring {
-+ struct tpmif_tx_request req;
-+};
-+typedef struct tpmif_ring tpmif_ring_t;
-+
-+struct tpmif_tx_interface {
-+ struct tpmif_ring ring[TPMIF_TX_RING_SIZE];
-+};
-+typedef struct tpmif_tx_interface tpmif_tx_interface_t;
-+
-+#endif
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/xen/interface/io/xenbus.h ubuntu-gutsy-xen/include/xen/interface/io/xenbus.h
---- ubuntu-gutsy/include/xen/interface/io/xenbus.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/xen/interface/io/xenbus.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,73 @@
-+/*****************************************************************************
-+ * xenbus.h
-+ *
-+ * Xenbus protocol details.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this software and associated documentation files (the "Software"), to
-+ * deal in the Software without restriction, including without limitation the
-+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-+ * sell copies of the Software, and to permit persons to whom the Software is
-+ * furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-+ * DEALINGS IN THE SOFTWARE.
-+ *
-+ * Copyright (C) 2005 XenSource Ltd.
-+ */
-+
-+#ifndef _XEN_PUBLIC_IO_XENBUS_H
-+#define _XEN_PUBLIC_IO_XENBUS_H
-+
-+/*
-+ * The state of either end of the Xenbus, i.e. the current communication
-+ * status of initialisation across the bus. States here imply nothing about
-+ * the state of the connection between the driver and the kernel's device
-+ * layers.
-+ */
-+enum xenbus_state {
-+ XenbusStateUnknown = 0,
-+
-+ XenbusStateInitialising = 1,
-+
-+ /*
-+ * InitWait: Finished early initialisation but waiting for information
-+ * from the peer or hotplug scripts.
-+ */
-+ XenbusStateInitWait = 2,
-+
-+ /*
-+ * Initialised: Waiting for a connection from the peer.
-+ */
-+ XenbusStateInitialised = 3,
-+
-+ XenbusStateConnected = 4,
-+
-+ /*
-+ * Closing: The device is being closed due to an error or an unplug event.
-+ */
-+ XenbusStateClosing = 5,
-+
-+ XenbusStateClosed = 6
-+};
-+typedef enum xenbus_state XenbusState;
-+
-+#endif /* _XEN_PUBLIC_IO_XENBUS_H */
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/xen/interface/io/xs_wire.h ubuntu-gutsy-xen/include/xen/interface/io/xs_wire.h
---- ubuntu-gutsy/include/xen/interface/io/xs_wire.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/xen/interface/io/xs_wire.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,117 @@
-+/*
-+ * Details of the "wire" protocol between Xen Store Daemon and client
-+ * library or guest kernel.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this software and associated documentation files (the "Software"), to
-+ * deal in the Software without restriction, including without limitation the
-+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-+ * sell copies of the Software, and to permit persons to whom the Software is
-+ * furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-+ * DEALINGS IN THE SOFTWARE.
-+ *
-+ * Copyright (C) 2005 Rusty Russell IBM Corporation
-+ */
-+
-+#ifndef _XS_WIRE_H
-+#define _XS_WIRE_H
-+
-+enum xsd_sockmsg_type
-+{
-+ XS_DEBUG,
-+ XS_DIRECTORY,
-+ XS_READ,
-+ XS_GET_PERMS,
-+ XS_WATCH,
-+ XS_UNWATCH,
-+ XS_TRANSACTION_START,
-+ XS_TRANSACTION_END,
-+ XS_INTRODUCE,
-+ XS_RELEASE,
-+ XS_GET_DOMAIN_PATH,
-+ XS_WRITE,
-+ XS_MKDIR,
-+ XS_RM,
-+ XS_SET_PERMS,
-+ XS_WATCH_EVENT,
-+ XS_ERROR,
-+ XS_IS_DOMAIN_INTRODUCED,
-+ XS_RESUME
-+};
-+
-+#define XS_WRITE_NONE "NONE"
-+#define XS_WRITE_CREATE "CREATE"
-+#define XS_WRITE_CREATE_EXCL "CREATE|EXCL"
-+
-+/* We hand errors as strings, for portability. */
-+struct xsd_errors
-+{
-+ int errnum;
-+ const char *errstring;
-+};
-+#define XSD_ERROR(x) { x, #x }
-+static struct xsd_errors xsd_errors[] __attribute__((unused)) = {
-+ XSD_ERROR(EINVAL),
-+ XSD_ERROR(EACCES),
-+ XSD_ERROR(EEXIST),
-+ XSD_ERROR(EISDIR),
-+ XSD_ERROR(ENOENT),
-+ XSD_ERROR(ENOMEM),
-+ XSD_ERROR(ENOSPC),
-+ XSD_ERROR(EIO),
-+ XSD_ERROR(ENOTEMPTY),
-+ XSD_ERROR(ENOSYS),
-+ XSD_ERROR(EROFS),
-+ XSD_ERROR(EBUSY),
-+ XSD_ERROR(EAGAIN),
-+ XSD_ERROR(EISCONN)
-+};
-+
-+struct xsd_sockmsg
-+{
-+ uint32_t type; /* XS_??? */
-+ uint32_t req_id;/* Request identifier, echoed in daemon's response. */
-+ uint32_t tx_id; /* Transaction id (0 if not related to a transaction). */
-+ uint32_t len; /* Length of data following this. */
-+
-+ /* Generally followed by nul-terminated string(s). */
-+};
-+
-+enum xs_watch_type
-+{
-+ XS_WATCH_PATH = 0,
-+ XS_WATCH_TOKEN
-+};
-+
-+/* Inter-domain shared memory communications. */
-+#define XENSTORE_RING_SIZE 1024
-+typedef uint32_t XENSTORE_RING_IDX;
-+#define MASK_XENSTORE_IDX(idx) ((idx) & (XENSTORE_RING_SIZE-1))
-+struct xenstore_domain_interface {
-+ char req[XENSTORE_RING_SIZE]; /* Requests to xenstore daemon. */
-+ char rsp[XENSTORE_RING_SIZE]; /* Replies and async watch events. */
-+ XENSTORE_RING_IDX req_cons, req_prod;
-+ XENSTORE_RING_IDX rsp_cons, rsp_prod;
-+};
-+
-+#endif /* _XS_WIRE_H */
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/xen/interface/kexec.h ubuntu-gutsy-xen/include/xen/interface/kexec.h
---- ubuntu-gutsy/include/xen/interface/kexec.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/xen/interface/kexec.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,137 @@
-+/******************************************************************************
-+ * kexec.h - Public portion
-+ *
-+ * Xen port written by:
-+ * - Simon 'Horms' Horman <horms@verge.net.au>
-+ * - Magnus Damm <magnus@valinux.co.jp>
-+ */
-+
-+#ifndef _XEN_PUBLIC_KEXEC_H
-+#define _XEN_PUBLIC_KEXEC_H
-+
-+
-+/* This file describes the Kexec / Kdump hypercall interface for Xen.
-+ *
-+ * Kexec under vanilla Linux allows a user to reboot the physical machine
-+ * into a new user-specified kernel. The Xen port extends this idea
-+ * to allow rebooting of the machine from dom0. When kexec for dom0
-+ * is used to reboot, both the hypervisor and the domains get replaced
-+ * with some other kernel. It is possible to kexec between vanilla
-+ * Linux and Xen and back again. Xen to Xen works well too.
-+ *
-+ * The hypercall interface for kexec can be divided into three main
-+ * types of hypercall operations:
-+ *
-+ * 1) Range information:
-+ * This is used by the dom0 kernel to ask the hypervisor about various
-+ * address information. This information is needed to allow kexec-tools
-+ * to fill in the ELF headers for /proc/vmcore properly.
-+ *
-+ * 2) Load and unload of images:
-+ * There are no big surprises here, the kexec binary from kexec-tools
-+ * runs in userspace in dom0. The tool loads/unloads data into the
-+ * dom0 kernel such as new kernel, initramfs and hypervisor. When
-+ * loaded the dom0 kernel performs a load hypercall operation, and
-+ * before releasing all page references the dom0 kernel calls unload.
-+ *
-+ * 3) Kexec operation:
-+ * This is used to start a previously loaded kernel.
-+ */
-+
-+#include "xen.h"
-+
-+#if defined(__i386__) || defined(__x86_64__)
-+#define KEXEC_XEN_NO_PAGES 17
-+#endif
-+
-+/*
-+ * Prototype for this hypercall is:
-+ * int kexec_op(int cmd, void *args)
-+ * @cmd == KEXEC_CMD_...
-+ * KEXEC operation to perform
-+ * @args == Operation-specific extra arguments (NULL if none).
-+ */
-+
-+/*
-+ * Kexec supports two types of operation:
-+ * - kexec into a regular kernel, very similar to a standard reboot
-+ * - KEXEC_TYPE_DEFAULT is used to specify this type
-+ * - kexec into a special "crash kernel", aka kexec-on-panic
-+ * - KEXEC_TYPE_CRASH is used to specify this type
-+ * - parts of our system may be broken at kexec-on-panic time
-+ * - the code should be kept as simple and self-contained as possible
-+ */
-+
-+#define KEXEC_TYPE_DEFAULT 0
-+#define KEXEC_TYPE_CRASH 1
-+
-+
-+/* The kexec implementation for Xen allows the user to load two
-+ * types of kernels, KEXEC_TYPE_DEFAULT and KEXEC_TYPE_CRASH.
-+ * All data needed for a kexec reboot is kept in one xen_kexec_image_t
-+ * per "instance". The data mainly consists of machine address lists to pages
-+ * together with destination addresses. The data in xen_kexec_image_t
-+ * is passed to the "code page" which is one page of code that performs
-+ * the final relocations before jumping to the new kernel.
-+ */
-+
-+typedef struct xen_kexec_image {
-+#if defined(__i386__) || defined(__x86_64__)
-+ unsigned long page_list[KEXEC_XEN_NO_PAGES];
-+#endif
-+ unsigned long indirection_page;
-+ unsigned long start_address;
-+} xen_kexec_image_t;
-+
-+/*
-+ * Perform kexec having previously loaded a kexec or kdump kernel
-+ * as appropriate.
-+ * type == KEXEC_TYPE_DEFAULT or KEXEC_TYPE_CRASH [in]
-+ */
-+#define KEXEC_CMD_kexec 0
-+typedef struct xen_kexec_exec {
-+ int type;
-+} xen_kexec_exec_t;
-+
-+/*
-+ * Load/Unload kernel image for kexec or kdump.
-+ * type == KEXEC_TYPE_DEFAULT or KEXEC_TYPE_CRASH [in]
-+ * image == relocation information for kexec (ignored for unload) [in]
-+ */
-+#define KEXEC_CMD_kexec_load 1
-+#define KEXEC_CMD_kexec_unload 2
-+typedef struct xen_kexec_load {
-+ int type;
-+ xen_kexec_image_t image;
-+} xen_kexec_load_t;
-+
-+#define KEXEC_RANGE_MA_CRASH 0 /* machine address and size of crash area */
-+#define KEXEC_RANGE_MA_XEN 1 /* machine address and size of Xen itself */
-+#define KEXEC_RANGE_MA_CPU 2 /* machine address and size of a CPU note */
-+
-+/*
-+ * Find the address and size of certain memory areas
-+ * range == KEXEC_RANGE_... [in]
-+ * nr == physical CPU number (starting from 0) if KEXEC_RANGE_MA_CPU [in]
-+ * size == number of bytes reserved in window [out]
-+ * start == address of the first byte in the window [out]
-+ */
-+#define KEXEC_CMD_kexec_get_range 3
-+typedef struct xen_kexec_range {
-+ int range;
-+ int nr;
-+ unsigned long size;
-+ unsigned long start;
-+} xen_kexec_range_t;
-+
-+#endif /* _XEN_PUBLIC_KEXEC_H */
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/xen/interface/libelf.h ubuntu-gutsy-xen/include/xen/interface/libelf.h
---- ubuntu-gutsy/include/xen/interface/libelf.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/xen/interface/libelf.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,241 @@
-+#ifndef __XC_LIBELF__
-+#define __XC_LIBELF__ 1
-+
-+#if defined(__i386__) || defined(__x86_64) || defined(__ia64__)
-+#define XEN_ELF_LITTLE_ENDIAN
-+#elif defined(__powerpc__)
-+#define XEN_ELF_BIG_ENDIAN
-+#else
-+#error define architectural endianness
-+#endif
-+
-+#undef ELFSIZE
-+#include "elfnote.h"
-+#include "elfstructs.h"
-+#include "features.h"
-+
-+/* ------------------------------------------------------------------------ */
-+
-+typedef union {
-+ Elf32_Ehdr e32;
-+ Elf64_Ehdr e64;
-+} elf_ehdr;
-+
-+typedef union {
-+ Elf32_Phdr e32;
-+ Elf64_Phdr e64;
-+} elf_phdr;
-+
-+typedef union {
-+ Elf32_Shdr e32;
-+ Elf64_Shdr e64;
-+} elf_shdr;
-+
-+typedef union {
-+ Elf32_Sym e32;
-+ Elf64_Sym e64;
-+} elf_sym;
-+
-+typedef union {
-+ Elf32_Rel e32;
-+ Elf64_Rel e64;
-+} elf_rel;
-+
-+typedef union {
-+ Elf32_Rela e32;
-+ Elf64_Rela e64;
-+} elf_rela;
-+
-+typedef union {
-+ Elf32_Note e32;
-+ Elf64_Note e64;
-+} elf_note;
-+
-+struct elf_binary {
-+ /* elf binary */
-+ const char *image;
-+ size_t size;
-+ char class;
-+ char data;
-+
-+ const elf_ehdr *ehdr;
-+ const char *sec_strtab;
-+ const elf_shdr *sym_tab;
-+ const char *sym_strtab;
-+
-+ /* loaded to */
-+ char *dest;
-+ uint64_t pstart;
-+ uint64_t pend;
-+ uint64_t reloc_offset;
-+
-+#ifndef __XEN__
-+ /* misc */
-+ FILE *log;
-+#endif
-+ int verbose;
-+};
-+
-+/* ------------------------------------------------------------------------ */
-+/* accessing elf header fields */
-+
-+#ifdef XEN_ELF_BIG_ENDIAN
-+# define NATIVE_ELFDATA ELFDATA2MSB
-+#else
-+# define NATIVE_ELFDATA ELFDATA2LSB
-+#endif
-+
-+#define elf_32bit(elf) (ELFCLASS32 == (elf)->class)
-+#define elf_64bit(elf) (ELFCLASS64 == (elf)->class)
-+#define elf_msb(elf) (ELFDATA2MSB == (elf)->data)
-+#define elf_lsb(elf) (ELFDATA2LSB == (elf)->data)
-+#define elf_swap(elf) (NATIVE_ELFDATA != (elf)->data)
-+
-+#define elf_uval(elf, str, elem) \
-+ ((ELFCLASS64 == (elf)->class) \
-+ ? elf_access_unsigned((elf), (str), \
-+ offsetof(typeof(*(str)),e64.elem), \
-+ sizeof((str)->e64.elem)) \
-+ : elf_access_unsigned((elf), (str), \
-+ offsetof(typeof(*(str)),e32.elem), \
-+ sizeof((str)->e32.elem)))
-+
-+#define elf_sval(elf, str, elem) \
-+ ((ELFCLASS64 == (elf)->class) \
-+ ? elf_access_signed((elf), (str), \
-+ offsetof(typeof(*(str)),e64.elem), \
-+ sizeof((str)->e64.elem)) \
-+ : elf_access_signed((elf), (str), \
-+ offsetof(typeof(*(str)),e32.elem), \
-+ sizeof((str)->e32.elem)))
-+
-+#define elf_size(elf, str) \
-+ ((ELFCLASS64 == (elf)->class) \
-+ ? sizeof((str)->e64) \
-+ : sizeof((str)->e32))
-+
-+uint64_t elf_access_unsigned(struct elf_binary *elf, const void *ptr,
-+ uint64_t offset, size_t size);
-+int64_t elf_access_signed(struct elf_binary *elf, const void *ptr,
-+ uint64_t offset, size_t size);
-+
-+uint64_t elf_round_up(struct elf_binary *elf, uint64_t addr);
-+
-+/* ------------------------------------------------------------------------ */
-+/* xc_libelf_tools.c */
-+
-+int elf_shdr_count(struct elf_binary *elf);
-+int elf_phdr_count(struct elf_binary *elf);
-+
-+const elf_shdr *elf_shdr_by_name(struct elf_binary *elf, const char *name);
-+const elf_shdr *elf_shdr_by_index(struct elf_binary *elf, int index);
-+const elf_phdr *elf_phdr_by_index(struct elf_binary *elf, int index);
-+
-+const char *elf_section_name(struct elf_binary *elf, const elf_shdr * shdr);
-+const void *elf_section_start(struct elf_binary *elf, const elf_shdr * shdr);
-+const void *elf_section_end(struct elf_binary *elf, const elf_shdr * shdr);
-+
-+const void *elf_segment_start(struct elf_binary *elf, const elf_phdr * phdr);
-+const void *elf_segment_end(struct elf_binary *elf, const elf_phdr * phdr);
-+
-+const elf_sym *elf_sym_by_name(struct elf_binary *elf, const char *symbol);
-+const elf_sym *elf_sym_by_index(struct elf_binary *elf, int index);
-+
-+const char *elf_note_name(struct elf_binary *elf, const elf_note * note);
-+const void *elf_note_desc(struct elf_binary *elf, const elf_note * note);
-+uint64_t elf_note_numeric(struct elf_binary *elf, const elf_note * note);
-+const elf_note *elf_note_next(struct elf_binary *elf, const elf_note * note);
-+
-+int elf_is_elfbinary(const void *image);
-+int elf_phdr_is_loadable(struct elf_binary *elf, const elf_phdr * phdr);
-+
-+/* ------------------------------------------------------------------------ */
-+/* xc_libelf_loader.c */
-+
-+int elf_init(struct elf_binary *elf, const char *image, size_t size);
-+#ifdef __XEN__
-+void elf_set_verbose(struct elf_binary *elf);
-+#else
-+void elf_set_logfile(struct elf_binary *elf, FILE * log, int verbose);
-+#endif
-+
-+void elf_parse_binary(struct elf_binary *elf);
-+void elf_load_binary(struct elf_binary *elf);
-+
-+void *elf_get_ptr(struct elf_binary *elf, unsigned long addr);
-+uint64_t elf_lookup_addr(struct elf_binary *elf, const char *symbol);
-+
-+/* ------------------------------------------------------------------------ */
-+/* xc_libelf_relocate.c */
-+
-+int elf_reloc(struct elf_binary *elf);
-+
-+/* ------------------------------------------------------------------------ */
-+/* xc_libelf_dominfo.c */
-+
-+#define UNSET_ADDR ((uint64_t)-1)
-+
-+enum xen_elfnote_type {
-+ XEN_ENT_NONE = 0,
-+ XEN_ENT_LONG = 1,
-+ XEN_ENT_STR = 2
-+};
-+
-+struct xen_elfnote {
-+ enum xen_elfnote_type type;
-+ const char *name;
-+ union {
-+ const char *str;
-+ uint64_t num;
-+ } data;
-+};
-+
-+struct elf_dom_parms {
-+ /* raw */
-+ const char *guest_info;
-+ const void *elf_note_start;
-+ const void *elf_note_end;
-+ struct xen_elfnote elf_notes[XEN_ELFNOTE_MAX + 1];
-+
-+ /* parsed */
-+ char guest_os[16];
-+ char guest_ver[16];
-+ char xen_ver[16];
-+ char loader[16];
-+ int pae;
-+ int bsd_symtab;
-+ uint64_t virt_base;
-+ uint64_t virt_entry;
-+ uint64_t virt_hypercall;
-+ uint64_t virt_hv_start_low;
-+ uint64_t elf_paddr_offset;
-+ uint32_t f_supported[XENFEAT_NR_SUBMAPS];
-+ uint32_t f_required[XENFEAT_NR_SUBMAPS];
-+
-+ /* calculated */
-+ uint64_t virt_offset;
-+ uint64_t virt_kstart;
-+ uint64_t virt_kend;
-+};
-+
-+static inline void elf_xen_feature_set(int nr, uint32_t * addr)
-+{
-+ addr[nr >> 5] |= 1 << (nr & 31);
-+}
-+static inline int elf_xen_feature_get(int nr, uint32_t * addr)
-+{
-+ return !!(addr[nr >> 5] & (1 << (nr & 31)));
-+}
-+
-+int elf_xen_parse_features(const char *features,
-+ uint32_t *supported,
-+ uint32_t *required);
-+int elf_xen_parse_note(struct elf_binary *elf,
-+ struct elf_dom_parms *parms,
-+ const elf_note *note);
-+int elf_xen_parse_guest_info(struct elf_binary *elf,
-+ struct elf_dom_parms *parms);
-+int elf_xen_parse(struct elf_binary *elf,
-+ struct elf_dom_parms *parms);
-+
-+#endif /* __XC_LIBELF__ */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/xen/interface/memory.h ubuntu-gutsy-xen/include/xen/interface/memory.h
---- ubuntu-gutsy/include/xen/interface/memory.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/xen/interface/memory.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,281 @@
-+/******************************************************************************
-+ * memory.h
-+ *
-+ * Memory reservation and information.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this software and associated documentation files (the "Software"), to
-+ * deal in the Software without restriction, including without limitation the
-+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-+ * sell copies of the Software, and to permit persons to whom the Software is
-+ * furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-+ * DEALINGS IN THE SOFTWARE.
-+ *
-+ * Copyright (c) 2005, Keir Fraser <keir@xensource.com>
-+ */
-+
-+#ifndef __XEN_PUBLIC_MEMORY_H__
-+#define __XEN_PUBLIC_MEMORY_H__
-+
-+/*
-+ * Increase or decrease the specified domain's memory reservation. Returns the
-+ * number of extents successfully allocated or freed.
-+ * arg == addr of struct xen_memory_reservation.
-+ */
-+#define XENMEM_increase_reservation 0
-+#define XENMEM_decrease_reservation 1
-+#define XENMEM_populate_physmap 6
-+struct xen_memory_reservation {
-+
-+ /*
-+ * XENMEM_increase_reservation:
-+ * OUT: MFN (*not* GMFN) bases of extents that were allocated
-+ * XENMEM_decrease_reservation:
-+ * IN: GMFN bases of extents to free
-+ * XENMEM_populate_physmap:
-+ * IN: GPFN bases of extents to populate with memory
-+ * OUT: GMFN bases of extents that were allocated
-+ * (NB. This command also updates the mach_to_phys translation table)
-+ */
-+ XEN_GUEST_HANDLE(xen_pfn_t) extent_start;
-+
-+ /* Number of extents, and size/alignment of each (2^extent_order pages). */
-+ xen_ulong_t nr_extents;
-+ unsigned int extent_order;
-+
-+ /*
-+ * Maximum # bits addressable by the user of the allocated region (e.g.,
-+ * I/O devices often have a 32-bit limitation even in 64-bit systems). If
-+ * zero then the user has no addressing restriction.
-+ * This field is not used by XENMEM_decrease_reservation.
-+ */
-+ unsigned int address_bits;
-+
-+ /*
-+ * Domain whose reservation is being changed.
-+ * Unprivileged domains can specify only DOMID_SELF.
-+ */
-+ domid_t domid;
-+};
-+typedef struct xen_memory_reservation xen_memory_reservation_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_memory_reservation_t);
-+
-+/*
-+ * An atomic exchange of memory pages. If return code is zero then
-+ * @out.extent_list provides GMFNs of the newly-allocated memory.
-+ * Returns zero on complete success, otherwise a negative error code.
-+ * On complete success then always @nr_exchanged == @in.nr_extents.
-+ * On partial success @nr_exchanged indicates how much work was done.
-+ */
-+#define XENMEM_exchange 11
-+struct xen_memory_exchange {
-+ /*
-+ * [IN] Details of memory extents to be exchanged (GMFN bases).
-+ * Note that @in.address_bits is ignored and unused.
-+ */
-+ struct xen_memory_reservation in;
-+
-+ /*
-+ * [IN/OUT] Details of new memory extents.
-+ * We require that:
-+ * 1. @in.domid == @out.domid
-+ * 2. @in.nr_extents << @in.extent_order ==
-+ * @out.nr_extents << @out.extent_order
-+ * 3. @in.extent_start and @out.extent_start lists must not overlap
-+ * 4. @out.extent_start lists GPFN bases to be populated
-+ * 5. @out.extent_start is overwritten with allocated GMFN bases
-+ */
-+ struct xen_memory_reservation out;
-+
-+ /*
-+ * [OUT] Number of input extents that were successfully exchanged:
-+ * 1. The first @nr_exchanged input extents were successfully
-+ * deallocated.
-+ * 2. The corresponding first entries in the output extent list correctly
-+ * indicate the GMFNs that were successfully exchanged.
-+ * 3. All other input and output extents are untouched.
-+ * 4. If not all input exents are exchanged then the return code of this
-+ * command will be non-zero.
-+ * 5. THIS FIELD MUST BE INITIALISED TO ZERO BY THE CALLER!
-+ */
-+ xen_ulong_t nr_exchanged;
-+};
-+typedef struct xen_memory_exchange xen_memory_exchange_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_memory_exchange_t);
-+
-+/*
-+ * Returns the maximum machine frame number of mapped RAM in this system.
-+ * This command always succeeds (it never returns an error code).
-+ * arg == NULL.
-+ */
-+#define XENMEM_maximum_ram_page 2
-+
-+/*
-+ * Returns the current or maximum memory reservation, in pages, of the
-+ * specified domain (may be DOMID_SELF). Returns -ve errcode on failure.
-+ * arg == addr of domid_t.
-+ */
-+#define XENMEM_current_reservation 3
-+#define XENMEM_maximum_reservation 4
-+
-+/*
-+ * Returns the maximum GPFN in use by the guest, or -ve errcode on failure.
-+ */
-+#define XENMEM_maximum_gpfn 14
-+
-+/*
-+ * Returns a list of MFN bases of 2MB extents comprising the machine_to_phys
-+ * mapping table. Architectures which do not have a m2p table do not implement
-+ * this command.
-+ * arg == addr of xen_machphys_mfn_list_t.
-+ */
-+#define XENMEM_machphys_mfn_list 5
-+struct xen_machphys_mfn_list {
-+ /*
-+ * Size of the 'extent_start' array. Fewer entries will be filled if the
-+ * machphys table is smaller than max_extents * 2MB.
-+ */
-+ unsigned int max_extents;
-+
-+ /*
-+ * Pointer to buffer to fill with list of extent starts. If there are
-+ * any large discontiguities in the machine address space, 2MB gaps in
-+ * the machphys table will be represented by an MFN base of zero.
-+ */
-+ XEN_GUEST_HANDLE(xen_pfn_t) extent_start;
-+
-+ /*
-+ * Number of extents written to the above array. This will be smaller
-+ * than 'max_extents' if the machphys table is smaller than max_e * 2MB.
-+ */
-+ unsigned int nr_extents;
-+};
-+typedef struct xen_machphys_mfn_list xen_machphys_mfn_list_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_machphys_mfn_list_t);
-+
-+/*
-+ * Returns the location in virtual address space of the machine_to_phys
-+ * mapping table. Architectures which do not have a m2p table, or which do not
-+ * map it by default into guest address space, do not implement this command.
-+ * arg == addr of xen_machphys_mapping_t.
-+ */
-+#define XENMEM_machphys_mapping 12
-+struct xen_machphys_mapping {
-+ xen_ulong_t v_start, v_end; /* Start and end virtual addresses. */
-+ xen_ulong_t max_mfn; /* Maximum MFN that can be looked up. */
-+};
-+typedef struct xen_machphys_mapping xen_machphys_mapping_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_machphys_mapping_t);
-+
-+/*
-+ * Sets the GPFN at which a particular page appears in the specified guest's
-+ * pseudophysical address space.
-+ * arg == addr of xen_add_to_physmap_t.
-+ */
-+#define XENMEM_add_to_physmap 7
-+struct xen_add_to_physmap {
-+ /* Which domain to change the mapping for. */
-+ domid_t domid;
-+
-+ /* Source mapping space. */
-+#define XENMAPSPACE_shared_info 0 /* shared info page */
-+#define XENMAPSPACE_grant_table 1 /* grant table page */
-+ unsigned int space;
-+
-+ /* Index into source mapping space. */
-+ xen_ulong_t idx;
-+
-+ /* GPFN where the source mapping page should appear. */
-+ xen_pfn_t gpfn;
-+};
-+typedef struct xen_add_to_physmap xen_add_to_physmap_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_t);
-+
-+/*
-+ * Translates a list of domain-specific GPFNs into MFNs. Returns a -ve error
-+ * code on failure. This call only works for auto-translated guests.
-+ */
-+#define XENMEM_translate_gpfn_list 8
-+struct xen_translate_gpfn_list {
-+ /* Which domain to translate for? */
-+ domid_t domid;
-+
-+ /* Length of list. */
-+ xen_ulong_t nr_gpfns;
-+
-+ /* List of GPFNs to translate. */
-+ XEN_GUEST_HANDLE(xen_pfn_t) gpfn_list;
-+
-+ /*
-+ * Output list to contain MFN translations. May be the same as the input
-+ * list (in which case each input GPFN is overwritten with the output MFN).
-+ */
-+ XEN_GUEST_HANDLE(xen_pfn_t) mfn_list;
-+};
-+typedef struct xen_translate_gpfn_list xen_translate_gpfn_list_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_translate_gpfn_list_t);
-+
-+/*
-+ * Returns the pseudo-physical memory map as it was when the domain
-+ * was started (specified by XENMEM_set_memory_map).
-+ * arg == addr of xen_memory_map_t.
-+ */
-+#define XENMEM_memory_map 9
-+struct xen_memory_map {
-+ /*
-+ * On call the number of entries which can be stored in buffer. On
-+ * return the number of entries which have been stored in
-+ * buffer.
-+ */
-+ unsigned int nr_entries;
-+
-+ /*
-+ * Entries in the buffer are in the same format as returned by the
-+ * BIOS INT 0x15 EAX=0xE820 call.
-+ */
-+ XEN_GUEST_HANDLE(void) buffer;
-+};
-+typedef struct xen_memory_map xen_memory_map_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_memory_map_t);
-+
-+/*
-+ * Returns the real physical memory map. Passes the same structure as
-+ * XENMEM_memory_map.
-+ * arg == addr of xen_memory_map_t.
-+ */
-+#define XENMEM_machine_memory_map 10
-+
-+/*
-+ * Set the pseudo-physical memory map of a domain, as returned by
-+ * XENMEM_memory_map.
-+ * arg == addr of xen_foreign_memory_map_t.
-+ */
-+#define XENMEM_set_memory_map 13
-+struct xen_foreign_memory_map {
-+ domid_t domid;
-+ struct xen_memory_map map;
-+};
-+typedef struct xen_foreign_memory_map xen_foreign_memory_map_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_foreign_memory_map_t);
-+
-+#endif /* __XEN_PUBLIC_MEMORY_H__ */
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/xen/interface/nmi.h ubuntu-gutsy-xen/include/xen/interface/nmi.h
---- ubuntu-gutsy/include/xen/interface/nmi.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/xen/interface/nmi.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,78 @@
-+/******************************************************************************
-+ * nmi.h
-+ *
-+ * NMI callback registration and reason codes.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this software and associated documentation files (the "Software"), to
-+ * deal in the Software without restriction, including without limitation the
-+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-+ * sell copies of the Software, and to permit persons to whom the Software is
-+ * furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-+ * DEALINGS IN THE SOFTWARE.
-+ *
-+ * Copyright (c) 2005, Keir Fraser <keir@xensource.com>
-+ */
-+
-+#ifndef __XEN_PUBLIC_NMI_H__
-+#define __XEN_PUBLIC_NMI_H__
-+
-+/*
-+ * NMI reason codes:
-+ * Currently these are x86-specific, stored in arch_shared_info.nmi_reason.
-+ */
-+ /* I/O-check error reported via ISA port 0x61, bit 6. */
-+#define _XEN_NMIREASON_io_error 0
-+#define XEN_NMIREASON_io_error (1UL << _XEN_NMIREASON_io_error)
-+ /* Parity error reported via ISA port 0x61, bit 7. */
-+#define _XEN_NMIREASON_parity_error 1
-+#define XEN_NMIREASON_parity_error (1UL << _XEN_NMIREASON_parity_error)
-+ /* Unknown hardware-generated NMI. */
-+#define _XEN_NMIREASON_unknown 2
-+#define XEN_NMIREASON_unknown (1UL << _XEN_NMIREASON_unknown)
-+
-+/*
-+ * long nmi_op(unsigned int cmd, void *arg)
-+ * NB. All ops return zero on success, else a negative error code.
-+ */
-+
-+/*
-+ * Register NMI callback for this (calling) VCPU. Currently this only makes
-+ * sense for domain 0, vcpu 0. All other callers will be returned EINVAL.
-+ * arg == pointer to xennmi_callback structure.
-+ */
-+#define XENNMI_register_callback 0
-+struct xennmi_callback {
-+ unsigned long handler_address;
-+ unsigned long pad;
-+};
-+typedef struct xennmi_callback xennmi_callback_t;
-+DEFINE_XEN_GUEST_HANDLE(xennmi_callback_t);
-+
-+/*
-+ * Deregister NMI callback for this (calling) VCPU.
-+ * arg == NULL.
-+ */
-+#define XENNMI_unregister_callback 1
-+
-+#endif /* __XEN_PUBLIC_NMI_H__ */
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/xen/interface/physdev.h ubuntu-gutsy-xen/include/xen/interface/physdev.h
---- ubuntu-gutsy/include/xen/interface/physdev.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/xen/interface/physdev.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,169 @@
-+/*
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this software and associated documentation files (the "Software"), to
-+ * deal in the Software without restriction, including without limitation the
-+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-+ * sell copies of the Software, and to permit persons to whom the Software is
-+ * furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-+ * DEALINGS IN THE SOFTWARE.
-+ */
-+
-+#ifndef __XEN_PUBLIC_PHYSDEV_H__
-+#define __XEN_PUBLIC_PHYSDEV_H__
-+
-+/*
-+ * Prototype for this hypercall is:
-+ * int physdev_op(int cmd, void *args)
-+ * @cmd == PHYSDEVOP_??? (physdev operation).
-+ * @args == Operation-specific extra arguments (NULL if none).
-+ */
-+
-+/*
-+ * Notify end-of-interrupt (EOI) for the specified IRQ.
-+ * @arg == pointer to physdev_eoi structure.
-+ */
-+#define PHYSDEVOP_eoi 12
-+struct physdev_eoi {
-+ /* IN */
-+ uint32_t irq;
-+};
-+typedef struct physdev_eoi physdev_eoi_t;
-+DEFINE_XEN_GUEST_HANDLE(physdev_eoi_t);
-+
-+/*
-+ * Query the status of an IRQ line.
-+ * @arg == pointer to physdev_irq_status_query structure.
-+ */
-+#define PHYSDEVOP_irq_status_query 5
-+struct physdev_irq_status_query {
-+ /* IN */
-+ uint32_t irq;
-+ /* OUT */
-+ uint32_t flags; /* XENIRQSTAT_* */
-+};
-+typedef struct physdev_irq_status_query physdev_irq_status_query_t;
-+DEFINE_XEN_GUEST_HANDLE(physdev_irq_status_query_t);
-+
-+/* Need to call PHYSDEVOP_eoi when the IRQ has been serviced? */
-+#define _XENIRQSTAT_needs_eoi (0)
-+#define XENIRQSTAT_needs_eoi (1U<<_XENIRQSTAT_needs_eoi)
-+
-+/* IRQ shared by multiple guests? */
-+#define _XENIRQSTAT_shared (1)
-+#define XENIRQSTAT_shared (1U<<_XENIRQSTAT_shared)
-+
-+/*
-+ * Set the current VCPU's I/O privilege level.
-+ * @arg == pointer to physdev_set_iopl structure.
-+ */
-+#define PHYSDEVOP_set_iopl 6
-+struct physdev_set_iopl {
-+ /* IN */
-+ uint32_t iopl;
-+};
-+typedef struct physdev_set_iopl physdev_set_iopl_t;
-+DEFINE_XEN_GUEST_HANDLE(physdev_set_iopl_t);
-+
-+/*
-+ * Set the current VCPU's I/O-port permissions bitmap.
-+ * @arg == pointer to physdev_set_iobitmap structure.
-+ */
-+#define PHYSDEVOP_set_iobitmap 7
-+struct physdev_set_iobitmap {
-+ /* IN */
-+ XEN_GUEST_HANDLE_00030205(uint8_t) bitmap;
-+ uint32_t nr_ports;
-+};
-+typedef struct physdev_set_iobitmap physdev_set_iobitmap_t;
-+DEFINE_XEN_GUEST_HANDLE(physdev_set_iobitmap_t);
-+
-+/*
-+ * Read or write an IO-APIC register.
-+ * @arg == pointer to physdev_apic structure.
-+ */
-+#define PHYSDEVOP_apic_read 8
-+#define PHYSDEVOP_apic_write 9
-+struct physdev_apic {
-+ /* IN */
-+ unsigned long apic_physbase;
-+ uint32_t reg;
-+ /* IN or OUT */
-+ uint32_t value;
-+};
-+typedef struct physdev_apic physdev_apic_t;
-+DEFINE_XEN_GUEST_HANDLE(physdev_apic_t);
-+
-+/*
-+ * Allocate or free a physical upcall vector for the specified IRQ line.
-+ * @arg == pointer to physdev_irq structure.
-+ */
-+#define PHYSDEVOP_alloc_irq_vector 10
-+#define PHYSDEVOP_free_irq_vector 11
-+struct physdev_irq {
-+ /* IN */
-+ uint32_t irq;
-+ /* IN or OUT */
-+ uint32_t vector;
-+};
-+typedef struct physdev_irq physdev_irq_t;
-+DEFINE_XEN_GUEST_HANDLE(physdev_irq_t);
-+
-+/*
-+ * Argument to physdev_op_compat() hypercall. Superceded by new physdev_op()
-+ * hypercall since 0x00030202.
-+ */
-+struct physdev_op {
-+ uint32_t cmd;
-+ union {
-+ struct physdev_irq_status_query irq_status_query;
-+ struct physdev_set_iopl set_iopl;
-+ struct physdev_set_iobitmap set_iobitmap;
-+ struct physdev_apic apic_op;
-+ struct physdev_irq irq_op;
-+ } u;
-+};
-+typedef struct physdev_op physdev_op_t;
-+DEFINE_XEN_GUEST_HANDLE(physdev_op_t);
-+
-+/*
-+ * Notify that some PIRQ-bound event channels have been unmasked.
-+ * ** This command is obsolete since interface version 0x00030202 and is **
-+ * ** unsupported by newer versions of Xen. **
-+ */
-+#define PHYSDEVOP_IRQ_UNMASK_NOTIFY 4
-+
-+/*
-+ * These all-capitals physdev operation names are superceded by the new names
-+ * (defined above) since interface version 0x00030202.
-+ */
-+#define PHYSDEVOP_IRQ_STATUS_QUERY PHYSDEVOP_irq_status_query
-+#define PHYSDEVOP_SET_IOPL PHYSDEVOP_set_iopl
-+#define PHYSDEVOP_SET_IOBITMAP PHYSDEVOP_set_iobitmap
-+#define PHYSDEVOP_APIC_READ PHYSDEVOP_apic_read
-+#define PHYSDEVOP_APIC_WRITE PHYSDEVOP_apic_write
-+#define PHYSDEVOP_ASSIGN_VECTOR PHYSDEVOP_alloc_irq_vector
-+#define PHYSDEVOP_FREE_VECTOR PHYSDEVOP_free_irq_vector
-+#define PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY XENIRQSTAT_needs_eoi
-+#define PHYSDEVOP_IRQ_SHARED XENIRQSTAT_shared
-+
-+#endif /* __XEN_PUBLIC_PHYSDEV_H__ */
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/xen/interface/platform.h ubuntu-gutsy-xen/include/xen/interface/platform.h
---- ubuntu-gutsy/include/xen/interface/platform.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/xen/interface/platform.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,183 @@
-+/******************************************************************************
-+ * platform.h
-+ *
-+ * Hardware platform operations. Intended for use by domain-0 kernel.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this software and associated documentation files (the "Software"), to
-+ * deal in the Software without restriction, including without limitation the
-+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-+ * sell copies of the Software, and to permit persons to whom the Software is
-+ * furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-+ * DEALINGS IN THE SOFTWARE.
-+ *
-+ * Copyright (c) 2002-2006, K Fraser
-+ */
-+
-+#ifndef __XEN_PUBLIC_PLATFORM_H__
-+#define __XEN_PUBLIC_PLATFORM_H__
-+
-+#include "xen.h"
-+
-+#define XENPF_INTERFACE_VERSION 0x03000001
-+
-+/*
-+ * Set clock such that it would read <secs,nsecs> after 00:00:00 UTC,
-+ * 1 January, 1970 if the current system time was <system_time>.
-+ */
-+#define XENPF_settime 17
-+struct xenpf_settime {
-+ /* IN variables. */
-+ uint32_t secs;
-+ uint32_t nsecs;
-+ uint64_t system_time;
-+};
-+typedef struct xenpf_settime xenpf_settime_t;
-+DEFINE_XEN_GUEST_HANDLE(xenpf_settime_t);
-+
-+/*
-+ * Request memory range (@mfn, @mfn+@nr_mfns-1) to have type @type.
-+ * On x86, @type is an architecture-defined MTRR memory type.
-+ * On success, returns the MTRR that was used (@reg) and a handle that can
-+ * be passed to XENPF_DEL_MEMTYPE to accurately tear down the new setting.
-+ * (x86-specific).
-+ */
-+#define XENPF_add_memtype 31
-+struct xenpf_add_memtype {
-+ /* IN variables. */
-+ xen_pfn_t mfn;
-+ uint64_t nr_mfns;
-+ uint32_t type;
-+ /* OUT variables. */
-+ uint32_t handle;
-+ uint32_t reg;
-+};
-+typedef struct xenpf_add_memtype xenpf_add_memtype_t;
-+DEFINE_XEN_GUEST_HANDLE(xenpf_add_memtype_t);
-+
-+/*
-+ * Tear down an existing memory-range type. If @handle is remembered then it
-+ * should be passed in to accurately tear down the correct setting (in case
-+ * of overlapping memory regions with differing types). If it is not known
-+ * then @handle should be set to zero. In all cases @reg must be set.
-+ * (x86-specific).
-+ */
-+#define XENPF_del_memtype 32
-+struct xenpf_del_memtype {
-+ /* IN variables. */
-+ uint32_t handle;
-+ uint32_t reg;
-+};
-+typedef struct xenpf_del_memtype xenpf_del_memtype_t;
-+DEFINE_XEN_GUEST_HANDLE(xenpf_del_memtype_t);
-+
-+/* Read current type of an MTRR (x86-specific). */
-+#define XENPF_read_memtype 33
-+struct xenpf_read_memtype {
-+ /* IN variables. */
-+ uint32_t reg;
-+ /* OUT variables. */
-+ xen_pfn_t mfn;
-+ uint64_t nr_mfns;
-+ uint32_t type;
-+};
-+typedef struct xenpf_read_memtype xenpf_read_memtype_t;
-+DEFINE_XEN_GUEST_HANDLE(xenpf_read_memtype_t);
-+
-+#define XENPF_microcode_update 35
-+struct xenpf_microcode_update {
-+ /* IN variables. */
-+ XEN_GUEST_HANDLE(void) data; /* Pointer to microcode data */
-+ uint32_t length; /* Length of microcode data. */
-+};
-+typedef struct xenpf_microcode_update xenpf_microcode_update_t;
-+DEFINE_XEN_GUEST_HANDLE(xenpf_microcode_update_t);
-+
-+#define XENPF_platform_quirk 39
-+#define QUIRK_NOIRQBALANCING 1 /* Do not restrict IO-APIC RTE targets */
-+#define QUIRK_IOAPIC_BAD_REGSEL 2 /* IO-APIC REGSEL forgets its value */
-+#define QUIRK_IOAPIC_GOOD_REGSEL 3 /* IO-APIC REGSEL behaves properly */
-+struct xenpf_platform_quirk {
-+ /* IN variables. */
-+ uint32_t quirk_id;
-+};
-+typedef struct xenpf_platform_quirk xenpf_platform_quirk_t;
-+DEFINE_XEN_GUEST_HANDLE(xenpf_platform_quirk_t);
-+
-+#define XENPF_firmware_info 50
-+#define XEN_FW_DISK_INFO 1 /* from int 13 AH=08/41/48 */
-+#define XEN_FW_DISK_MBR_SIGNATURE 2 /* from MBR offset 0x1b8 */
-+#define XEN_FW_VBEDDC_INFO 3 /* from int 10 AX=4f15 */
-+struct xenpf_firmware_info {
-+ /* IN variables. */
-+ uint32_t type;
-+ uint32_t index;
-+ /* OUT variables. */
-+ union {
-+ struct {
-+ /* Int13, Fn48: Check Extensions Present. */
-+ uint8_t device; /* %dl: bios device number */
-+ uint8_t version; /* %ah: major version */
-+ uint16_t interface_support; /* %cx: support bitmap */
-+ /* Int13, Fn08: Legacy Get Device Parameters. */
-+ uint16_t legacy_max_cylinder; /* %cl[7:6]:%ch: max cyl # */
-+ uint8_t legacy_max_head; /* %dh: max head # */
-+ uint8_t legacy_sectors_per_track; /* %cl[5:0]: max sector # */
-+ /* Int13, Fn41: Get Device Parameters (as filled into %ds:%esi). */
-+ /* NB. First uint16_t of buffer must be set to buffer size. */
-+ XEN_GUEST_HANDLE(void) edd_params;
-+ } disk_info; /* XEN_FW_DISK_INFO */
-+ struct {
-+ uint8_t device; /* bios device number */
-+ uint32_t mbr_signature; /* offset 0x1b8 in mbr */
-+ } disk_mbr_signature; /* XEN_FW_DISK_MBR_SIGNATURE */
-+ struct {
-+ /* Int10, AX=4F15: Get EDID info. */
-+ uint8_t capabilities;
-+ uint8_t edid_transfer_time;
-+ /* must refer to 128-byte buffer */
-+ XEN_GUEST_HANDLE(uint8_t) edid;
-+ } vbeddc_info; /* XEN_FW_VBEDDC_INFO */
-+ } u;
-+};
-+typedef struct xenpf_firmware_info xenpf_firmware_info_t;
-+DEFINE_XEN_GUEST_HANDLE(xenpf_firmware_info_t);
-+
-+struct xen_platform_op {
-+ uint32_t cmd;
-+ uint32_t interface_version; /* XENPF_INTERFACE_VERSION */
-+ union {
-+ struct xenpf_settime settime;
-+ struct xenpf_add_memtype add_memtype;
-+ struct xenpf_del_memtype del_memtype;
-+ struct xenpf_read_memtype read_memtype;
-+ struct xenpf_microcode_update microcode;
-+ struct xenpf_platform_quirk platform_quirk;
-+ struct xenpf_firmware_info firmware_info;
-+ uint8_t pad[128];
-+ } u;
-+};
-+typedef struct xen_platform_op xen_platform_op_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_platform_op_t);
-+
-+#endif /* __XEN_PUBLIC_PLATFORM_H__ */
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/xen/interface/sched.h ubuntu-gutsy-xen/include/xen/interface/sched.h
---- ubuntu-gutsy/include/xen/interface/sched.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/xen/interface/sched.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,121 @@
-+/******************************************************************************
-+ * sched.h
-+ *
-+ * Scheduler state interactions
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this software and associated documentation files (the "Software"), to
-+ * deal in the Software without restriction, including without limitation the
-+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-+ * sell copies of the Software, and to permit persons to whom the Software is
-+ * furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-+ * DEALINGS IN THE SOFTWARE.
-+ *
-+ * Copyright (c) 2005, Keir Fraser <keir@xensource.com>
-+ */
-+
-+#ifndef __XEN_PUBLIC_SCHED_H__
-+#define __XEN_PUBLIC_SCHED_H__
-+
-+#include "event_channel.h"
-+
-+/*
-+ * The prototype for this hypercall is:
-+ * long sched_op(int cmd, void *arg)
-+ * @cmd == SCHEDOP_??? (scheduler operation).
-+ * @arg == Operation-specific extra argument(s), as described below.
-+ *
-+ * Versions of Xen prior to 3.0.2 provided only the following legacy version
-+ * of this hypercall, supporting only the commands yield, block and shutdown:
-+ * long sched_op(int cmd, unsigned long arg)
-+ * @cmd == SCHEDOP_??? (scheduler operation).
-+ * @arg == 0 (SCHEDOP_yield and SCHEDOP_block)
-+ * == SHUTDOWN_* code (SCHEDOP_shutdown)
-+ * This legacy version is available to new guests as sched_op_compat().
-+ */
-+
-+/*
-+ * Voluntarily yield the CPU.
-+ * @arg == NULL.
-+ */
-+#define SCHEDOP_yield 0
-+
-+/*
-+ * Block execution of this VCPU until an event is received for processing.
-+ * If called with event upcalls masked, this operation will atomically
-+ * reenable event delivery and check for pending events before blocking the
-+ * VCPU. This avoids a "wakeup waiting" race.
-+ * @arg == NULL.
-+ */
-+#define SCHEDOP_block 1
-+
-+/*
-+ * Halt execution of this domain (all VCPUs) and notify the system controller.
-+ * @arg == pointer to sched_shutdown structure.
-+ */
-+#define SCHEDOP_shutdown 2
-+struct sched_shutdown {
-+ unsigned int reason; /* SHUTDOWN_* */
-+};
-+typedef struct sched_shutdown sched_shutdown_t;
-+DEFINE_XEN_GUEST_HANDLE(sched_shutdown_t);
-+
-+/*
-+ * Poll a set of event-channel ports. Return when one or more are pending. An
-+ * optional timeout may be specified.
-+ * @arg == pointer to sched_poll structure.
-+ */
-+#define SCHEDOP_poll 3
-+struct sched_poll {
-+ XEN_GUEST_HANDLE(evtchn_port_t) ports;
-+ unsigned int nr_ports;
-+ uint64_t timeout;
-+};
-+typedef struct sched_poll sched_poll_t;
-+DEFINE_XEN_GUEST_HANDLE(sched_poll_t);
-+
-+/*
-+ * Declare a shutdown for another domain. The main use of this function is
-+ * in interpreting shutdown requests and reasons for fully-virtualized
-+ * domains. A para-virtualized domain may use SCHEDOP_shutdown directly.
-+ * @arg == pointer to sched_remote_shutdown structure.
-+ */
-+#define SCHEDOP_remote_shutdown 4
-+struct sched_remote_shutdown {
-+ domid_t domain_id; /* Remote domain ID */
-+ unsigned int reason; /* SHUTDOWN_xxx reason */
-+};
-+typedef struct sched_remote_shutdown sched_remote_shutdown_t;
-+DEFINE_XEN_GUEST_HANDLE(sched_remote_shutdown_t);
-+
-+/*
-+ * Reason codes for SCHEDOP_shutdown. These may be interpreted by control
-+ * software to determine the appropriate action. For the most part, Xen does
-+ * not care about the shutdown code.
-+ */
-+#define SHUTDOWN_poweroff 0 /* Domain exited normally. Clean up and kill. */
-+#define SHUTDOWN_reboot 1 /* Clean up, kill, and then restart. */
-+#define SHUTDOWN_suspend 2 /* Clean up, save suspend info, kill. */
-+#define SHUTDOWN_crash 3 /* Tell controller we've crashed. */
-+
-+#endif /* __XEN_PUBLIC_SCHED_H__ */
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/xen/interface/sysctl.h ubuntu-gutsy-xen/include/xen/interface/sysctl.h
---- ubuntu-gutsy/include/xen/interface/sysctl.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/xen/interface/sysctl.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,182 @@
-+/******************************************************************************
-+ * sysctl.h
-+ *
-+ * System management operations. For use by node control stack.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this software and associated documentation files (the "Software"), to
-+ * deal in the Software without restriction, including without limitation the
-+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-+ * sell copies of the Software, and to permit persons to whom the Software is
-+ * furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-+ * DEALINGS IN THE SOFTWARE.
-+ *
-+ * Copyright (c) 2002-2006, K Fraser
-+ */
-+
-+#ifndef __XEN_PUBLIC_SYSCTL_H__
-+#define __XEN_PUBLIC_SYSCTL_H__
-+
-+#if !defined(__XEN__) && !defined(__XEN_TOOLS__)
-+#error "sysctl operations are intended for use by node control tools only"
-+#endif
-+
-+#include "xen.h"
-+#include "domctl.h"
-+
-+#define XEN_SYSCTL_INTERFACE_VERSION 0x00000003
-+
-+/*
-+ * Read console content from Xen buffer ring.
-+ */
-+#define XEN_SYSCTL_readconsole 1
-+struct xen_sysctl_readconsole {
-+ /* IN variables. */
-+ uint32_t clear; /* Non-zero -> clear after reading. */
-+ XEN_GUEST_HANDLE_64(char) buffer; /* Buffer start */
-+ /* IN/OUT variables. */
-+ uint32_t count; /* In: Buffer size; Out: Used buffer size */
-+};
-+typedef struct xen_sysctl_readconsole xen_sysctl_readconsole_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_readconsole_t);
-+
-+/* Get trace buffers machine base address */
-+#define XEN_SYSCTL_tbuf_op 2
-+struct xen_sysctl_tbuf_op {
-+ /* IN variables */
-+#define XEN_SYSCTL_TBUFOP_get_info 0
-+#define XEN_SYSCTL_TBUFOP_set_cpu_mask 1
-+#define XEN_SYSCTL_TBUFOP_set_evt_mask 2
-+#define XEN_SYSCTL_TBUFOP_set_size 3
-+#define XEN_SYSCTL_TBUFOP_enable 4
-+#define XEN_SYSCTL_TBUFOP_disable 5
-+ uint32_t cmd;
-+ /* IN/OUT variables */
-+ struct xenctl_cpumap cpu_mask;
-+ uint32_t evt_mask;
-+ /* OUT variables */
-+ uint64_aligned_t buffer_mfn;
-+ uint32_t size;
-+};
-+typedef struct xen_sysctl_tbuf_op xen_sysctl_tbuf_op_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_tbuf_op_t);
-+
-+/*
-+ * Get physical information about the host machine
-+ */
-+#define XEN_SYSCTL_physinfo 3
-+struct xen_sysctl_physinfo {
-+ uint32_t threads_per_core;
-+ uint32_t cores_per_socket;
-+ uint32_t sockets_per_node;
-+ uint32_t nr_nodes;
-+ uint32_t cpu_khz;
-+ uint64_aligned_t total_pages;
-+ uint64_aligned_t free_pages;
-+ uint64_aligned_t scrub_pages;
-+ uint32_t hw_cap[8];
-+};
-+typedef struct xen_sysctl_physinfo xen_sysctl_physinfo_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_physinfo_t);
-+
-+/*
-+ * Get the ID of the current scheduler.
-+ */
-+#define XEN_SYSCTL_sched_id 4
-+struct xen_sysctl_sched_id {
-+ /* OUT variable */
-+ uint32_t sched_id;
-+};
-+typedef struct xen_sysctl_sched_id xen_sysctl_sched_id_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_sched_id_t);
-+
-+/* Interface for controlling Xen software performance counters. */
-+#define XEN_SYSCTL_perfc_op 5
-+/* Sub-operations: */
-+#define XEN_SYSCTL_PERFCOP_reset 1 /* Reset all counters to zero. */
-+#define XEN_SYSCTL_PERFCOP_query 2 /* Get perfctr information. */
-+struct xen_sysctl_perfc_desc {
-+ char name[80]; /* name of perf counter */
-+ uint32_t nr_vals; /* number of values for this counter */
-+};
-+typedef struct xen_sysctl_perfc_desc xen_sysctl_perfc_desc_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_perfc_desc_t);
-+typedef uint32_t xen_sysctl_perfc_val_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_perfc_val_t);
-+
-+struct xen_sysctl_perfc_op {
-+ /* IN variables. */
-+ uint32_t cmd; /* XEN_SYSCTL_PERFCOP_??? */
-+ /* OUT variables. */
-+ uint32_t nr_counters; /* number of counters description */
-+ uint32_t nr_vals; /* number of values */
-+ /* counter information (or NULL) */
-+ XEN_GUEST_HANDLE_64(xen_sysctl_perfc_desc_t) desc;
-+ /* counter values (or NULL) */
-+ XEN_GUEST_HANDLE_64(xen_sysctl_perfc_val_t) val;
-+};
-+typedef struct xen_sysctl_perfc_op xen_sysctl_perfc_op_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_perfc_op_t);
-+
-+#define XEN_SYSCTL_getdomaininfolist 6
-+struct xen_sysctl_getdomaininfolist {
-+ /* IN variables. */
-+ domid_t first_domain;
-+ uint32_t max_domains;
-+ XEN_GUEST_HANDLE_64(xen_domctl_getdomaininfo_t) buffer;
-+ /* OUT variables. */
-+ uint32_t num_domains;
-+};
-+typedef struct xen_sysctl_getdomaininfolist xen_sysctl_getdomaininfolist_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_getdomaininfolist_t);
-+
-+/*
-+ * Inject debug keys into Xen.
-+ */
-+#define XEN_SYSCTL_debug_keys 7
-+struct xen_sysctl_debug_keys {
-+ /* IN variables. */
-+ XEN_GUEST_HANDLE_64(char) keys;
-+ uint32_t nr_keys;
-+};
-+typedef struct xen_sysctl_debug_keys xen_sysctl_debug_keys_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_debug_keys_t);
-+
-+struct xen_sysctl {
-+ uint32_t cmd;
-+ uint32_t interface_version; /* XEN_SYSCTL_INTERFACE_VERSION */
-+ union {
-+ struct xen_sysctl_readconsole readconsole;
-+ struct xen_sysctl_tbuf_op tbuf_op;
-+ struct xen_sysctl_physinfo physinfo;
-+ struct xen_sysctl_sched_id sched_id;
-+ struct xen_sysctl_perfc_op perfc_op;
-+ struct xen_sysctl_getdomaininfolist getdomaininfolist;
-+ struct xen_sysctl_debug_keys debug_keys;
-+ uint8_t pad[128];
-+ } u;
-+};
-+typedef struct xen_sysctl xen_sysctl_t;
-+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_t);
-+
-+#endif /* __XEN_PUBLIC_SYSCTL_H__ */
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/xen/interface/trace.h ubuntu-gutsy-xen/include/xen/interface/trace.h
---- ubuntu-gutsy/include/xen/interface/trace.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/xen/interface/trace.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,119 @@
-+/******************************************************************************
-+ * include/public/trace.h
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this software and associated documentation files (the "Software"), to
-+ * deal in the Software without restriction, including without limitation the
-+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-+ * sell copies of the Software, and to permit persons to whom the Software is
-+ * furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-+ * DEALINGS IN THE SOFTWARE.
-+ *
-+ * Mark Williamson, (C) 2004 Intel Research Cambridge
-+ * Copyright (C) 2005 Bin Ren
-+ */
-+
-+#ifndef __XEN_PUBLIC_TRACE_H__
-+#define __XEN_PUBLIC_TRACE_H__
-+
-+/* Trace classes */
-+#define TRC_CLS_SHIFT 16
-+#define TRC_GEN 0x0001f000 /* General trace */
-+#define TRC_SCHED 0x0002f000 /* Xen Scheduler trace */
-+#define TRC_DOM0OP 0x0004f000 /* Xen DOM0 operation trace */
-+#define TRC_HVM 0x0008f000 /* Xen HVM trace */
-+#define TRC_MEM 0x0010f000 /* Xen memory trace */
-+#define TRC_ALL 0xfffff000
-+
-+/* Trace subclasses */
-+#define TRC_SUBCLS_SHIFT 12
-+
-+/* trace subclasses for SVM */
-+#define TRC_HVM_ENTRYEXIT 0x00081000 /* VMENTRY and #VMEXIT */
-+#define TRC_HVM_HANDLER 0x00082000 /* various HVM handlers */
-+
-+/* Trace events per class */
-+#define TRC_LOST_RECORDS (TRC_GEN + 1)
-+
-+#define TRC_SCHED_DOM_ADD (TRC_SCHED + 1)
-+#define TRC_SCHED_DOM_REM (TRC_SCHED + 2)
-+#define TRC_SCHED_SLEEP (TRC_SCHED + 3)
-+#define TRC_SCHED_WAKE (TRC_SCHED + 4)
-+#define TRC_SCHED_YIELD (TRC_SCHED + 5)
-+#define TRC_SCHED_BLOCK (TRC_SCHED + 6)
-+#define TRC_SCHED_SHUTDOWN (TRC_SCHED + 7)
-+#define TRC_SCHED_CTL (TRC_SCHED + 8)
-+#define TRC_SCHED_ADJDOM (TRC_SCHED + 9)
-+#define TRC_SCHED_SWITCH (TRC_SCHED + 10)
-+#define TRC_SCHED_S_TIMER_FN (TRC_SCHED + 11)
-+#define TRC_SCHED_T_TIMER_FN (TRC_SCHED + 12)
-+#define TRC_SCHED_DOM_TIMER_FN (TRC_SCHED + 13)
-+#define TRC_SCHED_SWITCH_INFPREV (TRC_SCHED + 14)
-+#define TRC_SCHED_SWITCH_INFNEXT (TRC_SCHED + 15)
-+
-+#define TRC_MEM_PAGE_GRANT_MAP (TRC_MEM + 1)
-+#define TRC_MEM_PAGE_GRANT_UNMAP (TRC_MEM + 2)
-+#define TRC_MEM_PAGE_GRANT_TRANSFER (TRC_MEM + 3)
-+
-+/* trace events per subclass */
-+#define TRC_HVM_VMENTRY (TRC_HVM_ENTRYEXIT + 0x01)
-+#define TRC_HVM_VMEXIT (TRC_HVM_ENTRYEXIT + 0x02)
-+#define TRC_HVM_PF_XEN (TRC_HVM_HANDLER + 0x01)
-+#define TRC_HVM_PF_INJECT (TRC_HVM_HANDLER + 0x02)
-+#define TRC_HVM_INJ_EXC (TRC_HVM_HANDLER + 0x03)
-+#define TRC_HVM_INJ_VIRQ (TRC_HVM_HANDLER + 0x04)
-+#define TRC_HVM_REINJ_VIRQ (TRC_HVM_HANDLER + 0x05)
-+#define TRC_HVM_IO_READ (TRC_HVM_HANDLER + 0x06)
-+#define TRC_HVM_IO_WRITE (TRC_HVM_HANDLER + 0x07)
-+#define TRC_HVM_CR_READ (TRC_HVM_HANDLER + 0x08)
-+#define TRC_HVM_CR_WRITE (TRC_HVM_HANDLER + 0x09)
-+#define TRC_HVM_DR_READ (TRC_HVM_HANDLER + 0x0A)
-+#define TRC_HVM_DR_WRITE (TRC_HVM_HANDLER + 0x0B)
-+#define TRC_HVM_MSR_READ (TRC_HVM_HANDLER + 0x0C)
-+#define TRC_HVM_MSR_WRITE (TRC_HVM_HANDLER + 0x0D)
-+#define TRC_HVM_CPUID (TRC_HVM_HANDLER + 0x0E)
-+#define TRC_HVM_INTR (TRC_HVM_HANDLER + 0x0F)
-+#define TRC_HVM_NMI (TRC_HVM_HANDLER + 0x10)
-+#define TRC_HVM_SMI (TRC_HVM_HANDLER + 0x11)
-+#define TRC_HVM_VMMCALL (TRC_HVM_HANDLER + 0x12)
-+#define TRC_HVM_HLT (TRC_HVM_HANDLER + 0x13)
-+#define TRC_HVM_INVLPG (TRC_HVM_HANDLER + 0x14)
-+
-+/* This structure represents a single trace buffer record. */
-+struct t_rec {
-+ uint64_t cycles; /* cycle counter timestamp */
-+ uint32_t event; /* event ID */
-+ unsigned long data[5]; /* event data items */
-+};
-+
-+/*
-+ * This structure contains the metadata for a single trace buffer. The head
-+ * field, indexes into an array of struct t_rec's.
-+ */
-+struct t_buf {
-+ uint32_t cons; /* Next item to be consumed by control tools. */
-+ uint32_t prod; /* Next item to be produced by Xen. */
-+ /* 'nr_recs' records follow immediately after the meta-data header. */
-+};
-+
-+#endif /* __XEN_PUBLIC_TRACE_H__ */
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/xen/interface/vcpu.h ubuntu-gutsy-xen/include/xen/interface/vcpu.h
---- ubuntu-gutsy/include/xen/interface/vcpu.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/xen/interface/vcpu.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,192 @@
-+/******************************************************************************
-+ * vcpu.h
-+ *
-+ * VCPU initialisation, query, and hotplug.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this software and associated documentation files (the "Software"), to
-+ * deal in the Software without restriction, including without limitation the
-+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-+ * sell copies of the Software, and to permit persons to whom the Software is
-+ * furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-+ * DEALINGS IN THE SOFTWARE.
-+ *
-+ * Copyright (c) 2005, Keir Fraser <keir@xensource.com>
-+ */
-+
-+#ifndef __XEN_PUBLIC_VCPU_H__
-+#define __XEN_PUBLIC_VCPU_H__
-+
-+/*
-+ * Prototype for this hypercall is:
-+ * int vcpu_op(int cmd, int vcpuid, void *extra_args)
-+ * @cmd == VCPUOP_??? (VCPU operation).
-+ * @vcpuid == VCPU to operate on.
-+ * @extra_args == Operation-specific extra arguments (NULL if none).
-+ */
-+
-+/*
-+ * Initialise a VCPU. Each VCPU can be initialised only once. A
-+ * newly-initialised VCPU will not run until it is brought up by VCPUOP_up.
-+ *
-+ * @extra_arg == pointer to vcpu_guest_context structure containing initial
-+ * state for the VCPU.
-+ */
-+#define VCPUOP_initialise 0
-+
-+/*
-+ * Bring up a VCPU. This makes the VCPU runnable. This operation will fail
-+ * if the VCPU has not been initialised (VCPUOP_initialise).
-+ */
-+#define VCPUOP_up 1
-+
-+/*
-+ * Bring down a VCPU (i.e., make it non-runnable).
-+ * There are a few caveats that callers should observe:
-+ * 1. This operation may return, and VCPU_is_up may return false, before the
-+ * VCPU stops running (i.e., the command is asynchronous). It is a good
-+ * idea to ensure that the VCPU has entered a non-critical loop before
-+ * bringing it down. Alternatively, this operation is guaranteed
-+ * synchronous if invoked by the VCPU itself.
-+ * 2. After a VCPU is initialised, there is currently no way to drop all its
-+ * references to domain memory. Even a VCPU that is down still holds
-+ * memory references via its pagetable base pointer and GDT. It is good
-+ * practise to move a VCPU onto an 'idle' or default page table, LDT and
-+ * GDT before bringing it down.
-+ */
-+#define VCPUOP_down 2
-+
-+/* Returns 1 if the given VCPU is up. */
-+#define VCPUOP_is_up 3
-+
-+/*
-+ * Return information about the state and running time of a VCPU.
-+ * @extra_arg == pointer to vcpu_runstate_info structure.
-+ */
-+#define VCPUOP_get_runstate_info 4
-+struct vcpu_runstate_info {
-+ /* VCPU's current state (RUNSTATE_*). */
-+ int state;
-+ /* When was current state entered (system time, ns)? */
-+ uint64_t state_entry_time;
-+ /*
-+ * Time spent in each RUNSTATE_* (ns). The sum of these times is
-+ * guaranteed not to drift from system time.
-+ */
-+ uint64_t time[4];
-+};
-+typedef struct vcpu_runstate_info vcpu_runstate_info_t;
-+DEFINE_XEN_GUEST_HANDLE(vcpu_runstate_info_t);
-+
-+/* VCPU is currently running on a physical CPU. */
-+#define RUNSTATE_running 0
-+
-+/* VCPU is runnable, but not currently scheduled on any physical CPU. */
-+#define RUNSTATE_runnable 1
-+
-+/* VCPU is blocked (a.k.a. idle). It is therefore not runnable. */
-+#define RUNSTATE_blocked 2
-+
-+/*
-+ * VCPU is not runnable, but it is not blocked.
-+ * This is a 'catch all' state for things like hotplug and pauses by the
-+ * system administrator (or for critical sections in the hypervisor).
-+ * RUNSTATE_blocked dominates this state (it is the preferred state).
-+ */
-+#define RUNSTATE_offline 3
-+
-+/*
-+ * Register a shared memory area from which the guest may obtain its own
-+ * runstate information without needing to execute a hypercall.
-+ * Notes:
-+ * 1. The registered address may be virtual or physical or guest handle,
-+ * depending on the platform. Virtual address or guest handle should be
-+ * registered on x86 systems.
-+ * 2. Only one shared area may be registered per VCPU. The shared area is
-+ * updated by the hypervisor each time the VCPU is scheduled. Thus
-+ * runstate.state will always be RUNSTATE_running and
-+ * runstate.state_entry_time will indicate the system time at which the
-+ * VCPU was last scheduled to run.
-+ * @extra_arg == pointer to vcpu_register_runstate_memory_area structure.
-+ */
-+#define VCPUOP_register_runstate_memory_area 5
-+struct vcpu_register_runstate_memory_area {
-+ union {
-+ XEN_GUEST_HANDLE(vcpu_runstate_info_t) h;
-+ struct vcpu_runstate_info *v;
-+ uint64_t p;
-+ } addr;
-+};
-+typedef struct vcpu_register_runstate_memory_area vcpu_register_runstate_memory_area_t;
-+DEFINE_XEN_GUEST_HANDLE(vcpu_register_runstate_memory_area_t);
-+
-+/*
-+ * Set or stop a VCPU's periodic timer. Every VCPU has one periodic timer
-+ * which can be set via these commands. Periods smaller than one millisecond
-+ * may not be supported.
-+ */
-+#define VCPUOP_set_periodic_timer 6 /* arg == vcpu_set_periodic_timer_t */
-+#define VCPUOP_stop_periodic_timer 7 /* arg == NULL */
-+struct vcpu_set_periodic_timer {
-+ uint64_t period_ns;
-+};
-+typedef struct vcpu_set_periodic_timer vcpu_set_periodic_timer_t;
-+DEFINE_XEN_GUEST_HANDLE(vcpu_set_periodic_timer_t);
-+
-+/*
-+ * Set or stop a VCPU's single-shot timer. Every VCPU has one single-shot
-+ * timer which can be set via these commands.
-+ */
-+#define VCPUOP_set_singleshot_timer 8 /* arg == vcpu_set_singleshot_timer_t */
-+#define VCPUOP_stop_singleshot_timer 9 /* arg == NULL */
-+struct vcpu_set_singleshot_timer {
-+ uint64_t timeout_abs_ns; /* Absolute system time value in nanoseconds. */
-+ uint32_t flags; /* VCPU_SSHOTTMR_??? */
-+};
-+typedef struct vcpu_set_singleshot_timer vcpu_set_singleshot_timer_t;
-+DEFINE_XEN_GUEST_HANDLE(vcpu_set_singleshot_timer_t);
-+
-+/* Flags to VCPUOP_set_singleshot_timer. */
-+ /* Require the timeout to be in the future (return -ETIME if it's passed). */
-+#define _VCPU_SSHOTTMR_future (0)
-+#define VCPU_SSHOTTMR_future (1U << _VCPU_SSHOTTMR_future)
-+
-+/*
-+ * Register a memory location in the guest address space for the
-+ * vcpu_info structure. This allows the guest to place the vcpu_info
-+ * structure in a convenient place, such as in a per-cpu data area.
-+ * The pointer need not be page aligned, but the structure must not
-+ * cross a page boundary.
-+ *
-+ * If the specified mfn is INVALID_MFN, then it reverts to using the
-+ * vcpu_info structure in the shared_info page.
-+ */
-+#define VCPUOP_register_vcpu_info 10 /* arg == struct vcpu_info */
-+struct vcpu_register_vcpu_info {
-+ xen_pfn_t mfn; /* mfn of page to place vcpu_info */
-+ uint32_t offset; /* offset within page */
-+};
-+typedef struct vcpu_register_vcpu_info vcpu_register_vcpu_info_t;
-+DEFINE_XEN_GUEST_HANDLE(vcpu_register_vcpu_info_t);
-+
-+#endif /* __XEN_PUBLIC_VCPU_H__ */
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/xen/interface/version.h ubuntu-gutsy-xen/include/xen/interface/version.h
---- ubuntu-gutsy/include/xen/interface/version.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/xen/interface/version.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,91 @@
-+/******************************************************************************
-+ * version.h
-+ *
-+ * Xen version, type, and compile information.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this software and associated documentation files (the "Software"), to
-+ * deal in the Software without restriction, including without limitation the
-+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-+ * sell copies of the Software, and to permit persons to whom the Software is
-+ * furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-+ * DEALINGS IN THE SOFTWARE.
-+ *
-+ * Copyright (c) 2005, Nguyen Anh Quynh <aquynh@gmail.com>
-+ * Copyright (c) 2005, Keir Fraser <keir@xensource.com>
-+ */
-+
-+#ifndef __XEN_PUBLIC_VERSION_H__
-+#define __XEN_PUBLIC_VERSION_H__
-+
-+/* NB. All ops return zero on success, except XENVER_{version,pagesize} */
-+
-+/* arg == NULL; returns major:minor (16:16). */
-+#define XENVER_version 0
-+
-+/* arg == xen_extraversion_t. */
-+#define XENVER_extraversion 1
-+typedef char xen_extraversion_t[16];
-+#define XEN_EXTRAVERSION_LEN (sizeof(xen_extraversion_t))
-+
-+/* arg == xen_compile_info_t. */
-+#define XENVER_compile_info 2
-+struct xen_compile_info {
-+ char compiler[64];
-+ char compile_by[16];
-+ char compile_domain[32];
-+ char compile_date[32];
-+};
-+typedef struct xen_compile_info xen_compile_info_t;
-+
-+#define XENVER_capabilities 3
-+typedef char xen_capabilities_info_t[1024];
-+#define XEN_CAPABILITIES_INFO_LEN (sizeof(xen_capabilities_info_t))
-+
-+#define XENVER_changeset 4
-+typedef char xen_changeset_info_t[64];
-+#define XEN_CHANGESET_INFO_LEN (sizeof(xen_changeset_info_t))
-+
-+#define XENVER_platform_parameters 5
-+struct xen_platform_parameters {
-+ unsigned long virt_start;
-+};
-+typedef struct xen_platform_parameters xen_platform_parameters_t;
-+
-+#define XENVER_get_features 6
-+struct xen_feature_info {
-+ unsigned int submap_idx; /* IN: which 32-bit submap to return */
-+ uint32_t submap; /* OUT: 32-bit submap */
-+};
-+typedef struct xen_feature_info xen_feature_info_t;
-+
-+/* Declares the features reported by XENVER_get_features. */
-+#include "features.h"
-+
-+/* arg == NULL; returns host memory page size. */
-+#define XENVER_pagesize 7
-+
-+/* arg == xen_domain_handle_t. */
-+#define XENVER_guest_handle 8
-+
-+#endif /* __XEN_PUBLIC_VERSION_H__ */
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/xen/interface/xencomm.h ubuntu-gutsy-xen/include/xen/interface/xencomm.h
---- ubuntu-gutsy/include/xen/interface/xencomm.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/xen/interface/xencomm.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,41 @@
-+/*
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this software and associated documentation files (the "Software"), to
-+ * deal in the Software without restriction, including without limitation the
-+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-+ * sell copies of the Software, and to permit persons to whom the Software is
-+ * furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-+ * DEALINGS IN THE SOFTWARE.
-+ *
-+ * Copyright (C) IBM Corp. 2006
-+ */
-+
-+#ifndef _XEN_XENCOMM_H_
-+#define _XEN_XENCOMM_H_
-+
-+/* A xencomm descriptor is a scatter/gather list containing physical
-+ * addresses corresponding to a virtually contiguous memory area. The
-+ * hypervisor translates these physical addresses to machine addresses to copy
-+ * to and from the virtually contiguous area.
-+ */
-+
-+#define XENCOMM_MAGIC 0x58434F4D /* 'XCOM' */
-+#define XENCOMM_INVALID (~0UL)
-+
-+struct xencomm_desc {
-+ uint32_t magic;
-+ uint32_t nr_addrs; /* the number of entries in address[] */
-+ uint64_t address[0];
-+};
-+
-+#endif /* _XEN_XENCOMM_H_ */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/xen/interface/xen-compat.h ubuntu-gutsy-xen/include/xen/interface/xen-compat.h
---- ubuntu-gutsy/include/xen/interface/xen-compat.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/xen/interface/xen-compat.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,51 @@
-+/******************************************************************************
-+ * xen-compat.h
-+ *
-+ * Guest OS interface to Xen. Compatibility layer.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this software and associated documentation files (the "Software"), to
-+ * deal in the Software without restriction, including without limitation the
-+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-+ * sell copies of the Software, and to permit persons to whom the Software is
-+ * furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-+ * DEALINGS IN THE SOFTWARE.
-+ *
-+ * Copyright (c) 2006, Christian Limpach
-+ */
-+
-+#ifndef __XEN_PUBLIC_XEN_COMPAT_H__
-+#define __XEN_PUBLIC_XEN_COMPAT_H__
-+
-+#define __XEN_LATEST_INTERFACE_VERSION__ 0x00030205
-+
-+#if defined(__XEN__) || defined(__XEN_TOOLS__)
-+/* Xen is built with matching headers and implements the latest interface. */
-+#define __XEN_INTERFACE_VERSION__ __XEN_LATEST_INTERFACE_VERSION__
-+#elif !defined(__XEN_INTERFACE_VERSION__)
-+/* Guests which do not specify a version get the legacy interface. */
-+#define __XEN_INTERFACE_VERSION__ 0x00000000
-+#endif
-+
-+#if __XEN_INTERFACE_VERSION__ > __XEN_LATEST_INTERFACE_VERSION__
-+#error "These header files do not support the requested interface version."
-+#endif
-+
-+/* Fields defined as a Xen guest handle since 0x00030205. */
-+#if __XEN_INTERFACE_VERSION__ >= 0x00030205
-+#define XEN_GUEST_HANDLE_00030205(type) XEN_GUEST_HANDLE(type)
-+#else
-+#define XEN_GUEST_HANDLE_00030205(type) type *
-+#endif
-+
-+#endif /* __XEN_PUBLIC_XEN_COMPAT_H__ */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/xen/interface/xen.h ubuntu-gutsy-xen/include/xen/interface/xen.h
---- ubuntu-gutsy/include/xen/interface/xen.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/xen/interface/xen.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,610 @@
-+/******************************************************************************
-+ * xen.h
-+ *
-+ * Guest OS interface to Xen.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this software and associated documentation files (the "Software"), to
-+ * deal in the Software without restriction, including without limitation the
-+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-+ * sell copies of the Software, and to permit persons to whom the Software is
-+ * furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-+ * DEALINGS IN THE SOFTWARE.
-+ *
-+ * Copyright (c) 2004, K A Fraser
-+ */
-+
-+#ifndef __XEN_PUBLIC_XEN_H__
-+#define __XEN_PUBLIC_XEN_H__
-+
-+#include "xen-compat.h"
-+
-+#if defined(__i386__) || defined(__x86_64__)
-+#include "arch-x86/xen.h"
-+#elif defined(__ia64__)
-+#include "arch-ia64.h"
-+#elif defined(__powerpc__)
-+#include "arch-powerpc.h"
-+#else
-+#error "Unsupported architecture"
-+#endif
-+
-+/*
-+ * HYPERCALLS
-+ */
-+
-+#define __HYPERVISOR_set_trap_table 0
-+#define __HYPERVISOR_mmu_update 1
-+#define __HYPERVISOR_set_gdt 2
-+#define __HYPERVISOR_stack_switch 3
-+#define __HYPERVISOR_set_callbacks 4
-+#define __HYPERVISOR_fpu_taskswitch 5
-+#define __HYPERVISOR_sched_op_compat 6 /* compat since 0x00030101 */
-+#define __HYPERVISOR_platform_op 7
-+#define __HYPERVISOR_set_debugreg 8
-+#define __HYPERVISOR_get_debugreg 9
-+#define __HYPERVISOR_update_descriptor 10
-+#define __HYPERVISOR_memory_op 12
-+#define __HYPERVISOR_multicall 13
-+#define __HYPERVISOR_update_va_mapping 14
-+#define __HYPERVISOR_set_timer_op 15
-+#define __HYPERVISOR_event_channel_op_compat 16 /* compat since 0x00030202 */
-+#define __HYPERVISOR_xen_version 17
-+#define __HYPERVISOR_console_io 18
-+#define __HYPERVISOR_physdev_op_compat 19 /* compat since 0x00030202 */
-+#define __HYPERVISOR_grant_table_op 20
-+#define __HYPERVISOR_vm_assist 21
-+#define __HYPERVISOR_update_va_mapping_otherdomain 22
-+#define __HYPERVISOR_iret 23 /* x86 only */
-+#define __HYPERVISOR_vcpu_op 24
-+#define __HYPERVISOR_set_segment_base 25 /* x86/64 only */
-+#define __HYPERVISOR_mmuext_op 26
-+#define __HYPERVISOR_acm_op 27
-+#define __HYPERVISOR_nmi_op 28
-+#define __HYPERVISOR_sched_op 29
-+#define __HYPERVISOR_callback_op 30
-+#define __HYPERVISOR_xenoprof_op 31
-+#define __HYPERVISOR_event_channel_op 32
-+#define __HYPERVISOR_physdev_op 33
-+#define __HYPERVISOR_hvm_op 34
-+#define __HYPERVISOR_sysctl 35
-+#define __HYPERVISOR_domctl 36
-+#define __HYPERVISOR_kexec_op 37
-+
-+/* Architecture-specific hypercall definitions. */
-+#define __HYPERVISOR_arch_0 48
-+#define __HYPERVISOR_arch_1 49
-+#define __HYPERVISOR_arch_2 50
-+#define __HYPERVISOR_arch_3 51
-+#define __HYPERVISOR_arch_4 52
-+#define __HYPERVISOR_arch_5 53
-+#define __HYPERVISOR_arch_6 54
-+#define __HYPERVISOR_arch_7 55
-+
-+/*
-+ * HYPERCALL COMPATIBILITY.
-+ */
-+
-+/* New sched_op hypercall introduced in 0x00030101. */
-+#if __XEN_INTERFACE_VERSION__ < 0x00030101
-+#undef __HYPERVISOR_sched_op
-+#define __HYPERVISOR_sched_op __HYPERVISOR_sched_op_compat
-+#endif
-+
-+/* New event-channel and physdev hypercalls introduced in 0x00030202. */
-+#if __XEN_INTERFACE_VERSION__ < 0x00030202
-+#undef __HYPERVISOR_event_channel_op
-+#define __HYPERVISOR_event_channel_op __HYPERVISOR_event_channel_op_compat
-+#undef __HYPERVISOR_physdev_op
-+#define __HYPERVISOR_physdev_op __HYPERVISOR_physdev_op_compat
-+#endif
-+
-+/* New platform_op hypercall introduced in 0x00030204. */
-+#if __XEN_INTERFACE_VERSION__ < 0x00030204
-+#define __HYPERVISOR_dom0_op __HYPERVISOR_platform_op
-+#endif
-+
-+/*
-+ * VIRTUAL INTERRUPTS
-+ *
-+ * Virtual interrupts that a guest OS may receive from Xen.
-+ *
-+ * In the side comments, 'V.' denotes a per-VCPU VIRQ while 'G.' denotes a
-+ * global VIRQ. The former can be bound once per VCPU and cannot be re-bound.
-+ * The latter can be allocated only once per guest: they must initially be
-+ * allocated to VCPU0 but can subsequently be re-bound.
-+ */
-+#define VIRQ_TIMER 0 /* V. Timebase update, and/or requested timeout. */
-+#define VIRQ_DEBUG 1 /* V. Request guest to dump debug info. */
-+#define VIRQ_CONSOLE 2 /* G. (DOM0) Bytes received on emergency console. */
-+#define VIRQ_DOM_EXC 3 /* G. (DOM0) Exceptional event for some domain. */
-+#define VIRQ_TBUF 4 /* G. (DOM0) Trace buffer has records available. */
-+#define VIRQ_DEBUGGER 6 /* G. (DOM0) A domain has paused for debugging. */
-+#define VIRQ_XENOPROF 7 /* V. XenOprofile interrupt: new sample available */
-+#define VIRQ_CON_RING 8 /* G. (DOM0) Bytes received on console */
-+
-+/* Architecture-specific VIRQ definitions. */
-+#define VIRQ_ARCH_0 16
-+#define VIRQ_ARCH_1 17
-+#define VIRQ_ARCH_2 18
-+#define VIRQ_ARCH_3 19
-+#define VIRQ_ARCH_4 20
-+#define VIRQ_ARCH_5 21
-+#define VIRQ_ARCH_6 22
-+#define VIRQ_ARCH_7 23
-+
-+#define NR_VIRQS 24
-+
-+/*
-+ * MMU-UPDATE REQUESTS
-+ *
-+ * HYPERVISOR_mmu_update() accepts a list of (ptr, val) pairs.
-+ * A foreigndom (FD) can be specified (or DOMID_SELF for none).
-+ * Where the FD has some effect, it is described below.
-+ * ptr[1:0] specifies the appropriate MMU_* command.
-+ *
-+ * ptr[1:0] == MMU_NORMAL_PT_UPDATE:
-+ * Updates an entry in a page table. If updating an L1 table, and the new
-+ * table entry is valid/present, the mapped frame must belong to the FD, if
-+ * an FD has been specified. If attempting to map an I/O page then the
-+ * caller assumes the privilege of the FD.
-+ * FD == DOMID_IO: Permit /only/ I/O mappings, at the priv level of the caller.
-+ * FD == DOMID_XEN: Map restricted areas of Xen's heap space.
-+ * ptr[:2] -- Machine address of the page-table entry to modify.
-+ * val -- Value to write.
-+ *
-+ * ptr[1:0] == MMU_MACHPHYS_UPDATE:
-+ * Updates an entry in the machine->pseudo-physical mapping table.
-+ * ptr[:2] -- Machine address within the frame whose mapping to modify.
-+ * The frame must belong to the FD, if one is specified.
-+ * val -- Value to write into the mapping entry.
-+ */
-+#define MMU_NORMAL_PT_UPDATE 0 /* checked '*ptr = val'. ptr is MA. */
-+#define MMU_MACHPHYS_UPDATE 1 /* ptr = MA of frame to modify entry for */
-+
-+/*
-+ * MMU EXTENDED OPERATIONS
-+ *
-+ * HYPERVISOR_mmuext_op() accepts a list of mmuext_op structures.
-+ * A foreigndom (FD) can be specified (or DOMID_SELF for none).
-+ * Where the FD has some effect, it is described below.
-+ *
-+ * cmd: MMUEXT_(UN)PIN_*_TABLE
-+ * mfn: Machine frame number to be (un)pinned as a p.t. page.
-+ * The frame must belong to the FD, if one is specified.
-+ *
-+ * cmd: MMUEXT_NEW_BASEPTR
-+ * mfn: Machine frame number of new page-table base to install in MMU.
-+ *
-+ * cmd: MMUEXT_NEW_USER_BASEPTR [x86/64 only]
-+ * mfn: Machine frame number of new page-table base to install in MMU
-+ * when in user space.
-+ *
-+ * cmd: MMUEXT_TLB_FLUSH_LOCAL
-+ * No additional arguments. Flushes local TLB.
-+ *
-+ * cmd: MMUEXT_INVLPG_LOCAL
-+ * linear_addr: Linear address to be flushed from the local TLB.
-+ *
-+ * cmd: MMUEXT_TLB_FLUSH_MULTI
-+ * vcpumask: Pointer to bitmap of VCPUs to be flushed.
-+ *
-+ * cmd: MMUEXT_INVLPG_MULTI
-+ * linear_addr: Linear address to be flushed.
-+ * vcpumask: Pointer to bitmap of VCPUs to be flushed.
-+ *
-+ * cmd: MMUEXT_TLB_FLUSH_ALL
-+ * No additional arguments. Flushes all VCPUs' TLBs.
-+ *
-+ * cmd: MMUEXT_INVLPG_ALL
-+ * linear_addr: Linear address to be flushed from all VCPUs' TLBs.
-+ *
-+ * cmd: MMUEXT_FLUSH_CACHE
-+ * No additional arguments. Writes back and flushes cache contents.
-+ *
-+ * cmd: MMUEXT_SET_LDT
-+ * linear_addr: Linear address of LDT base (NB. must be page-aligned).
-+ * nr_ents: Number of entries in LDT.
-+ */
-+#define MMUEXT_PIN_L1_TABLE 0
-+#define MMUEXT_PIN_L2_TABLE 1
-+#define MMUEXT_PIN_L3_TABLE 2
-+#define MMUEXT_PIN_L4_TABLE 3
-+#define MMUEXT_UNPIN_TABLE 4
-+#define MMUEXT_NEW_BASEPTR 5
-+#define MMUEXT_TLB_FLUSH_LOCAL 6
-+#define MMUEXT_INVLPG_LOCAL 7
-+#define MMUEXT_TLB_FLUSH_MULTI 8
-+#define MMUEXT_INVLPG_MULTI 9
-+#define MMUEXT_TLB_FLUSH_ALL 10
-+#define MMUEXT_INVLPG_ALL 11
-+#define MMUEXT_FLUSH_CACHE 12
-+#define MMUEXT_SET_LDT 13
-+#define MMUEXT_NEW_USER_BASEPTR 15
-+
-+#ifndef __ASSEMBLY__
-+struct mmuext_op {
-+ unsigned int cmd;
-+ union {
-+ /* [UN]PIN_TABLE, NEW_BASEPTR, NEW_USER_BASEPTR */
-+ xen_pfn_t mfn;
-+ /* INVLPG_LOCAL, INVLPG_ALL, SET_LDT */
-+ unsigned long linear_addr;
-+ } arg1;
-+ union {
-+ /* SET_LDT */
-+ unsigned int nr_ents;
-+ /* TLB_FLUSH_MULTI, INVLPG_MULTI */
-+ XEN_GUEST_HANDLE_00030205(void) vcpumask;
-+ } arg2;
-+};
-+typedef struct mmuext_op mmuext_op_t;
-+DEFINE_XEN_GUEST_HANDLE(mmuext_op_t);
-+#endif
-+
-+/* These are passed as 'flags' to update_va_mapping. They can be ORed. */
-+/* When specifying UVMF_MULTI, also OR in a pointer to a CPU bitmap. */
-+/* UVMF_LOCAL is merely UVMF_MULTI with a NULL bitmap pointer. */
-+#define UVMF_NONE (0UL<<0) /* No flushing at all. */
-+#define UVMF_TLB_FLUSH (1UL<<0) /* Flush entire TLB(s). */
-+#define UVMF_INVLPG (2UL<<0) /* Flush only one entry. */
-+#define UVMF_FLUSHTYPE_MASK (3UL<<0)
-+#define UVMF_MULTI (0UL<<2) /* Flush subset of TLBs. */
-+#define UVMF_LOCAL (0UL<<2) /* Flush local TLB. */
-+#define UVMF_ALL (1UL<<2) /* Flush all TLBs. */
-+
-+/*
-+ * Commands to HYPERVISOR_console_io().
-+ */
-+#define CONSOLEIO_write 0
-+#define CONSOLEIO_read 1
-+
-+/*
-+ * Commands to HYPERVISOR_vm_assist().
-+ */
-+#define VMASST_CMD_enable 0
-+#define VMASST_CMD_disable 1
-+
-+/* x86/32 guests: simulate full 4GB segment limits. */
-+#define VMASST_TYPE_4gb_segments 0
-+
-+/* x86/32 guests: trap (vector 15) whenever above vmassist is used. */
-+#define VMASST_TYPE_4gb_segments_notify 1
-+
-+/*
-+ * x86 guests: support writes to bottom-level PTEs.
-+ * NB1. Page-directory entries cannot be written.
-+ * NB2. Guest must continue to remove all writable mappings of PTEs.
-+ */
-+#define VMASST_TYPE_writable_pagetables 2
-+
-+/* x86/PAE guests: support PDPTs above 4GB. */
-+#define VMASST_TYPE_pae_extended_cr3 3
-+
-+#define MAX_VMASST_TYPE 3
-+
-+#ifndef __ASSEMBLY__
-+
-+typedef uint16_t domid_t;
-+
-+/* Domain ids >= DOMID_FIRST_RESERVED cannot be used for ordinary domains. */
-+#define DOMID_FIRST_RESERVED (0x7FF0U)
-+
-+/* DOMID_SELF is used in certain contexts to refer to oneself. */
-+#define DOMID_SELF (0x7FF0U)
-+
-+/*
-+ * DOMID_IO is used to restrict page-table updates to mapping I/O memory.
-+ * Although no Foreign Domain need be specified to map I/O pages, DOMID_IO
-+ * is useful to ensure that no mappings to the OS's own heap are accidentally
-+ * installed. (e.g., in Linux this could cause havoc as reference counts
-+ * aren't adjusted on the I/O-mapping code path).
-+ * This only makes sense in MMUEXT_SET_FOREIGNDOM, but in that context can
-+ * be specified by any calling domain.
-+ */
-+#define DOMID_IO (0x7FF1U)
-+
-+/*
-+ * DOMID_XEN is used to allow privileged domains to map restricted parts of
-+ * Xen's heap space (e.g., the machine_to_phys table).
-+ * This only makes sense in MMUEXT_SET_FOREIGNDOM, and is only permitted if
-+ * the caller is privileged.
-+ */
-+#define DOMID_XEN (0x7FF2U)
-+
-+/*
-+ * Send an array of these to HYPERVISOR_mmu_update().
-+ * NB. The fields are natural pointer/address size for this architecture.
-+ */
-+struct mmu_update {
-+ uint64_t ptr; /* Machine address of PTE. */
-+ uint64_t val; /* New contents of PTE. */
-+};
-+typedef struct mmu_update mmu_update_t;
-+DEFINE_XEN_GUEST_HANDLE(mmu_update_t);
-+
-+/*
-+ * Send an array of these to HYPERVISOR_multicall().
-+ * NB. The fields are natural register size for this architecture.
-+ */
-+struct multicall_entry {
-+ unsigned long op, result;
-+ unsigned long args[6];
-+};
-+typedef struct multicall_entry multicall_entry_t;
-+DEFINE_XEN_GUEST_HANDLE(multicall_entry_t);
-+
-+/*
-+ * Event channel endpoints per domain:
-+ * 1024 if a long is 32 bits; 4096 if a long is 64 bits.
-+ */
-+#define NR_EVENT_CHANNELS (sizeof(unsigned long) * sizeof(unsigned long) * 64)
-+
-+struct vcpu_time_info {
-+ /*
-+ * Updates to the following values are preceded and followed by an
-+ * increment of 'version'. The guest can therefore detect updates by
-+ * looking for changes to 'version'. If the least-significant bit of
-+ * the version number is set then an update is in progress and the guest
-+ * must wait to read a consistent set of values.
-+ * The correct way to interact with the version number is similar to
-+ * Linux's seqlock: see the implementations of read_seqbegin/read_seqretry.
-+ */
-+ uint32_t version;
-+ uint32_t pad0;
-+ uint64_t tsc_timestamp; /* TSC at last update of time vals. */
-+ uint64_t system_time; /* Time, in nanosecs, since boot. */
-+ /*
-+ * Current system time:
-+ * system_time +
-+ * ((((tsc - tsc_timestamp) << tsc_shift) * tsc_to_system_mul) >> 32)
-+ * CPU frequency (Hz):
-+ * ((10^9 << 32) / tsc_to_system_mul) >> tsc_shift
-+ */
-+ uint32_t tsc_to_system_mul;
-+ int8_t tsc_shift;
-+ int8_t pad1[3];
-+}; /* 32 bytes */
-+typedef struct vcpu_time_info vcpu_time_info_t;
-+
-+struct vcpu_info {
-+ /*
-+ * 'evtchn_upcall_pending' is written non-zero by Xen to indicate
-+ * a pending notification for a particular VCPU. It is then cleared
-+ * by the guest OS /before/ checking for pending work, thus avoiding
-+ * a set-and-check race. Note that the mask is only accessed by Xen
-+ * on the CPU that is currently hosting the VCPU. This means that the
-+ * pending and mask flags can be updated by the guest without special
-+ * synchronisation (i.e., no need for the x86 LOCK prefix).
-+ * This may seem suboptimal because if the pending flag is set by
-+ * a different CPU then an IPI may be scheduled even when the mask
-+ * is set. However, note:
-+ * 1. The task of 'interrupt holdoff' is covered by the per-event-
-+ * channel mask bits. A 'noisy' event that is continually being
-+ * triggered can be masked at source at this very precise
-+ * granularity.
-+ * 2. The main purpose of the per-VCPU mask is therefore to restrict
-+ * reentrant execution: whether for concurrency control, or to
-+ * prevent unbounded stack usage. Whatever the purpose, we expect
-+ * that the mask will be asserted only for short periods at a time,
-+ * and so the likelihood of a 'spurious' IPI is suitably small.
-+ * The mask is read before making an event upcall to the guest: a
-+ * non-zero mask therefore guarantees that the VCPU will not receive
-+ * an upcall activation. The mask is cleared when the VCPU requests
-+ * to block: this avoids wakeup-waiting races.
-+ */
-+ uint8_t evtchn_upcall_pending;
-+ uint8_t evtchn_upcall_mask;
-+ unsigned long evtchn_pending_sel;
-+ struct arch_vcpu_info arch;
-+ struct vcpu_time_info time;
-+}; /* 64 bytes (x86) */
-+#ifndef __XEN__
-+typedef struct vcpu_info vcpu_info_t;
-+#endif
-+
-+/*
-+ * Xen/kernel shared data -- pointer provided in start_info.
-+ *
-+ * This structure is defined to be both smaller than a page, and the
-+ * only data on the shared page, but may vary in actual size even within
-+ * compatible Xen versions; guests should not rely on the size
-+ * of this structure remaining constant.
-+ */
-+struct shared_info {
-+ struct vcpu_info vcpu_info[MAX_VIRT_CPUS];
-+
-+ /*
-+ * A domain can create "event channels" on which it can send and receive
-+ * asynchronous event notifications. There are three classes of event that
-+ * are delivered by this mechanism:
-+ * 1. Bi-directional inter- and intra-domain connections. Domains must
-+ * arrange out-of-band to set up a connection (usually by allocating
-+ * an unbound 'listener' port and avertising that via a storage service
-+ * such as xenstore).
-+ * 2. Physical interrupts. A domain with suitable hardware-access
-+ * privileges can bind an event-channel port to a physical interrupt
-+ * source.
-+ * 3. Virtual interrupts ('events'). A domain can bind an event-channel
-+ * port to a virtual interrupt source, such as the virtual-timer
-+ * device or the emergency console.
-+ *
-+ * Event channels are addressed by a "port index". Each channel is
-+ * associated with two bits of information:
-+ * 1. PENDING -- notifies the domain that there is a pending notification
-+ * to be processed. This bit is cleared by the guest.
-+ * 2. MASK -- if this bit is clear then a 0->1 transition of PENDING
-+ * will cause an asynchronous upcall to be scheduled. This bit is only
-+ * updated by the guest. It is read-only within Xen. If a channel
-+ * becomes pending while the channel is masked then the 'edge' is lost
-+ * (i.e., when the channel is unmasked, the guest must manually handle
-+ * pending notifications as no upcall will be scheduled by Xen).
-+ *
-+ * To expedite scanning of pending notifications, any 0->1 pending
-+ * transition on an unmasked channel causes a corresponding bit in a
-+ * per-vcpu selector word to be set. Each bit in the selector covers a
-+ * 'C long' in the PENDING bitfield array.
-+ */
-+ unsigned long evtchn_pending[sizeof(unsigned long) * 8];
-+ unsigned long evtchn_mask[sizeof(unsigned long) * 8];
-+
-+ /*
-+ * Wallclock time: updated only by control software. Guests should base
-+ * their gettimeofday() syscall on this wallclock-base value.
-+ */
-+ uint32_t wc_version; /* Version counter: see vcpu_time_info_t. */
-+ uint32_t wc_sec; /* Secs 00:00:00 UTC, Jan 1, 1970. */
-+ uint32_t wc_nsec; /* Nsecs 00:00:00 UTC, Jan 1, 1970. */
-+
-+ struct arch_shared_info arch;
-+
-+};
-+#ifndef __XEN__
-+typedef struct shared_info shared_info_t;
-+#endif
-+
-+/*
-+ * Start-of-day memory layout:
-+ * 1. The domain is started within contiguous virtual-memory region.
-+ * 2. The contiguous region ends on an aligned 4MB boundary.
-+ * 3. This the order of bootstrap elements in the initial virtual region:
-+ * a. relocated kernel image
-+ * b. initial ram disk [mod_start, mod_len]
-+ * c. list of allocated page frames [mfn_list, nr_pages]
-+ * d. start_info_t structure [register ESI (x86)]
-+ * e. bootstrap page tables [pt_base, CR3 (x86)]
-+ * f. bootstrap stack [register ESP (x86)]
-+ * 4. Bootstrap elements are packed together, but each is 4kB-aligned.
-+ * 5. The initial ram disk may be omitted.
-+ * 6. The list of page frames forms a contiguous 'pseudo-physical' memory
-+ * layout for the domain. In particular, the bootstrap virtual-memory
-+ * region is a 1:1 mapping to the first section of the pseudo-physical map.
-+ * 7. All bootstrap elements are mapped read-writable for the guest OS. The
-+ * only exception is the bootstrap page table, which is mapped read-only.
-+ * 8. There is guaranteed to be at least 512kB padding after the final
-+ * bootstrap element. If necessary, the bootstrap virtual region is
-+ * extended by an extra 4MB to ensure this.
-+ */
-+
-+#define MAX_GUEST_CMDLINE 1024
-+struct start_info {
-+ /* THE FOLLOWING ARE FILLED IN BOTH ON INITIAL BOOT AND ON RESUME. */
-+ char magic[32]; /* "xen-<version>-<platform>". */
-+ unsigned long nr_pages; /* Total pages allocated to this domain. */
-+ unsigned long shared_info; /* MACHINE address of shared info struct. */
-+ uint32_t flags; /* SIF_xxx flags. */
-+ xen_pfn_t store_mfn; /* MACHINE page number of shared page. */
-+ uint32_t store_evtchn; /* Event channel for store communication. */
-+ union {
-+ struct {
-+ xen_pfn_t mfn; /* MACHINE page number of console page. */
-+ uint32_t evtchn; /* Event channel for console page. */
-+ } domU;
-+ struct {
-+ uint32_t info_off; /* Offset of console_info struct. */
-+ uint32_t info_size; /* Size of console_info struct from start.*/
-+ } dom0;
-+ } console;
-+ /* THE FOLLOWING ARE ONLY FILLED IN ON INITIAL BOOT (NOT RESUME). */
-+ unsigned long pt_base; /* VIRTUAL address of page directory. */
-+ unsigned long nr_pt_frames; /* Number of bootstrap p.t. frames. */
-+ unsigned long mfn_list; /* VIRTUAL address of page-frame list. */
-+ unsigned long mod_start; /* VIRTUAL address of pre-loaded module. */
-+ unsigned long mod_len; /* Size (bytes) of pre-loaded module. */
-+ int8_t cmd_line[MAX_GUEST_CMDLINE];
-+};
-+typedef struct start_info start_info_t;
-+
-+/* New console union for dom0 introduced in 0x00030203. */
-+#if __XEN_INTERFACE_VERSION__ < 0x00030203
-+#define console_mfn console.domU.mfn
-+#define console_evtchn console.domU.evtchn
-+#endif
-+
-+/* These flags are passed in the 'flags' field of start_info_t. */
-+#define SIF_PRIVILEGED (1<<0) /* Is the domain privileged? */
-+#define SIF_INITDOMAIN (1<<1) /* Is this the initial control domain? */
-+
-+typedef struct dom0_vga_console_info {
-+ uint8_t video_type; /* DOM0_VGA_CONSOLE_??? */
-+#define XEN_VGATYPE_TEXT_MODE_3 0x03
-+#define XEN_VGATYPE_VESA_LFB 0x23
-+
-+ union {
-+ struct {
-+ /* Font height, in pixels. */
-+ uint16_t font_height;
-+ /* Cursor location (column, row). */
-+ uint16_t cursor_x, cursor_y;
-+ /* Number of rows and columns (dimensions in characters). */
-+ uint16_t rows, columns;
-+ } text_mode_3;
-+
-+ struct {
-+ /* Width and height, in pixels. */
-+ uint16_t width, height;
-+ /* Bytes per scan line. */
-+ uint16_t bytes_per_line;
-+ /* Bits per pixel. */
-+ uint16_t bits_per_pixel;
-+ /* LFB physical address, and size (in units of 64kB). */
-+ uint32_t lfb_base;
-+ uint32_t lfb_size;
-+ /* RGB mask offsets and sizes, as defined by VBE 1.2+ */
-+ uint8_t red_pos, red_size;
-+ uint8_t green_pos, green_size;
-+ uint8_t blue_pos, blue_size;
-+ uint8_t rsvd_pos, rsvd_size;
-+ } vesa_lfb;
-+ } u;
-+} dom0_vga_console_info_t;
-+
-+typedef uint8_t xen_domain_handle_t[16];
-+
-+/* Turn a plain number into a C unsigned long constant. */
-+#define __mk_unsigned_long(x) x ## UL
-+#define mk_unsigned_long(x) __mk_unsigned_long(x)
-+
-+DEFINE_XEN_GUEST_HANDLE(uint8_t);
-+DEFINE_XEN_GUEST_HANDLE(uint16_t);
-+DEFINE_XEN_GUEST_HANDLE(uint32_t);
-+DEFINE_XEN_GUEST_HANDLE(uint64_t);
-+
-+#else /* __ASSEMBLY__ */
-+
-+/* In assembly code we cannot use C numeric constant suffixes. */
-+#define mk_unsigned_long(x) x
-+
-+#endif /* !__ASSEMBLY__ */
-+
-+/* Default definitions for macros used by domctl/sysctl. */
-+#if defined(__XEN__) || defined(__XEN_TOOLS__)
-+#ifndef uint64_aligned_t
-+#define uint64_aligned_t uint64_t
-+#endif
-+#ifndef XEN_GUEST_HANDLE_64
-+#define XEN_GUEST_HANDLE_64(name) XEN_GUEST_HANDLE(name)
-+#endif
-+#endif
-+
-+#endif /* __XEN_PUBLIC_XEN_H__ */
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/xen/interface/xenoprof.h ubuntu-gutsy-xen/include/xen/interface/xenoprof.h
---- ubuntu-gutsy/include/xen/interface/xenoprof.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/xen/interface/xenoprof.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,132 @@
-+/******************************************************************************
-+ * xenoprof.h
-+ *
-+ * Interface for enabling system wide profiling based on hardware performance
-+ * counters
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this software and associated documentation files (the "Software"), to
-+ * deal in the Software without restriction, including without limitation the
-+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-+ * sell copies of the Software, and to permit persons to whom the Software is
-+ * furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-+ * DEALINGS IN THE SOFTWARE.
-+ *
-+ * Copyright (C) 2005 Hewlett-Packard Co.
-+ * Written by Aravind Menon & Jose Renato Santos
-+ */
-+
-+#ifndef __XEN_PUBLIC_XENOPROF_H__
-+#define __XEN_PUBLIC_XENOPROF_H__
-+
-+#include "xen.h"
-+
-+/*
-+ * Commands to HYPERVISOR_xenoprof_op().
-+ */
-+#define XENOPROF_init 0
-+#define XENOPROF_reset_active_list 1
-+#define XENOPROF_reset_passive_list 2
-+#define XENOPROF_set_active 3
-+#define XENOPROF_set_passive 4
-+#define XENOPROF_reserve_counters 5
-+#define XENOPROF_counter 6
-+#define XENOPROF_setup_events 7
-+#define XENOPROF_enable_virq 8
-+#define XENOPROF_start 9
-+#define XENOPROF_stop 10
-+#define XENOPROF_disable_virq 11
-+#define XENOPROF_release_counters 12
-+#define XENOPROF_shutdown 13
-+#define XENOPROF_get_buffer 14
-+#define XENOPROF_last_op 14
-+
-+#define MAX_OPROF_EVENTS 32
-+#define MAX_OPROF_DOMAINS 25
-+#define XENOPROF_CPU_TYPE_SIZE 64
-+
-+/* Xenoprof performance events (not Xen events) */
-+struct event_log {
-+ uint64_t eip;
-+ uint8_t mode;
-+ uint8_t event;
-+};
-+
-+/* Xenoprof buffer shared between Xen and domain - 1 per VCPU */
-+struct xenoprof_buf {
-+ uint32_t event_head;
-+ uint32_t event_tail;
-+ uint32_t event_size;
-+ uint32_t vcpu_id;
-+ uint64_t xen_samples;
-+ uint64_t kernel_samples;
-+ uint64_t user_samples;
-+ uint64_t lost_samples;
-+ struct event_log event_log[1];
-+};
-+#ifndef __XEN__
-+typedef struct xenoprof_buf xenoprof_buf_t;
-+DEFINE_XEN_GUEST_HANDLE(xenoprof_buf_t);
-+#endif
-+
-+struct xenoprof_init {
-+ int32_t num_events;
-+ int32_t is_primary;
-+ char cpu_type[XENOPROF_CPU_TYPE_SIZE];
-+};
-+typedef struct xenoprof_init xenoprof_init_t;
-+DEFINE_XEN_GUEST_HANDLE(xenoprof_init_t);
-+
-+struct xenoprof_get_buffer {
-+ int32_t max_samples;
-+ int32_t nbuf;
-+ int32_t bufsize;
-+ uint64_t buf_gmaddr;
-+};
-+typedef struct xenoprof_get_buffer xenoprof_get_buffer_t;
-+DEFINE_XEN_GUEST_HANDLE(xenoprof_get_buffer_t);
-+
-+struct xenoprof_counter {
-+ uint32_t ind;
-+ uint64_t count;
-+ uint32_t enabled;
-+ uint32_t event;
-+ uint32_t hypervisor;
-+ uint32_t kernel;
-+ uint32_t user;
-+ uint64_t unit_mask;
-+};
-+typedef struct xenoprof_counter xenoprof_counter_t;
-+DEFINE_XEN_GUEST_HANDLE(xenoprof_counter_t);
-+
-+typedef struct xenoprof_passive {
-+ uint16_t domain_id;
-+ int32_t max_samples;
-+ int32_t nbuf;
-+ int32_t bufsize;
-+ uint64_t buf_gmaddr;
-+} xenoprof_passive_t;
-+DEFINE_XEN_GUEST_HANDLE(xenoprof_passive_t);
-+
-+
-+#endif /* __XEN_PUBLIC_XENOPROF_H__ */
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/xen/pcifront.h ubuntu-gutsy-xen/include/xen/pcifront.h
---- ubuntu-gutsy/include/xen/pcifront.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/xen/pcifront.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,76 @@
-+/*
-+ * PCI Frontend - arch-dependendent declarations
-+ *
-+ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
-+ */
-+#ifndef __XEN_ASM_PCIFRONT_H__
-+#define __XEN_ASM_PCIFRONT_H__
-+
-+#include <linux/spinlock.h>
-+
-+#ifdef __KERNEL__
-+
-+#ifndef __ia64__
-+
-+struct pcifront_device;
-+struct pci_bus;
-+
-+struct pcifront_sd {
-+ int domain;
-+ struct pcifront_device *pdev;
-+};
-+
-+static inline struct pcifront_device *
-+pcifront_get_pdev(struct pcifront_sd *sd)
-+{
-+ return sd->pdev;
-+}
-+
-+static inline void pcifront_init_sd(struct pcifront_sd *sd, int domain,
-+ struct pcifront_device *pdev)
-+{
-+ sd->domain = domain;
-+ sd->pdev = pdev;
-+}
-+
-+#if defined(CONFIG_PCI_DOMAINS)
-+static inline int pci_domain_nr(struct pci_bus *bus)
-+{
-+ struct pcifront_sd *sd = bus->sysdata;
-+ return sd->domain;
-+}
-+static inline int pci_proc_domain(struct pci_bus *bus)
-+{
-+ return pci_domain_nr(bus);
-+}
-+#endif /* CONFIG_PCI_DOMAINS */
-+
-+#else /* __ia64__ */
-+
-+#include <asm/pci.h>
-+#define pcifront_sd pci_controller
-+
-+static inline struct pcifront_device *
-+pcifront_get_pdev(struct pcifront_sd *sd)
-+{
-+ return (struct pcifront_device *)sd->platform_data;
-+}
-+
-+static inline void pcifront_init_sd(struct pcifront_sd *sd, int domain,
-+ struct pcifront_device *pdev)
-+{
-+ sd->segment = domain;
-+ sd->acpi_handle = NULL;
-+ sd->iommu = NULL;
-+ sd->windows = 0;
-+ sd->window = NULL;
-+ sd->platform_data = pdev;
-+}
-+
-+#endif /* __ia64__ */
-+
-+extern struct rw_semaphore pci_bus_sem;
-+
-+#endif /* __KERNEL__ */
-+
-+#endif /* __XEN_ASM_PCIFRONT_H__ */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/xen/public/evtchn.h ubuntu-gutsy-xen/include/xen/public/evtchn.h
---- ubuntu-gutsy/include/xen/public/evtchn.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/xen/public/evtchn.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,88 @@
-+/******************************************************************************
-+ * evtchn.h
-+ *
-+ * Interface to /dev/xen/evtchn.
-+ *
-+ * Copyright (c) 2003-2005, K A Fraser
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#ifndef __LINUX_PUBLIC_EVTCHN_H__
-+#define __LINUX_PUBLIC_EVTCHN_H__
-+
-+/*
-+ * Bind a fresh port to VIRQ @virq.
-+ * Return allocated port.
-+ */
-+#define IOCTL_EVTCHN_BIND_VIRQ \
-+ _IOC(_IOC_NONE, 'E', 0, sizeof(struct ioctl_evtchn_bind_virq))
-+struct ioctl_evtchn_bind_virq {
-+ unsigned int virq;
-+};
-+
-+/*
-+ * Bind a fresh port to remote <@remote_domain, @remote_port>.
-+ * Return allocated port.
-+ */
-+#define IOCTL_EVTCHN_BIND_INTERDOMAIN \
-+ _IOC(_IOC_NONE, 'E', 1, sizeof(struct ioctl_evtchn_bind_interdomain))
-+struct ioctl_evtchn_bind_interdomain {
-+ unsigned int remote_domain, remote_port;
-+};
-+
-+/*
-+ * Allocate a fresh port for binding to @remote_domain.
-+ * Return allocated port.
-+ */
-+#define IOCTL_EVTCHN_BIND_UNBOUND_PORT \
-+ _IOC(_IOC_NONE, 'E', 2, sizeof(struct ioctl_evtchn_bind_unbound_port))
-+struct ioctl_evtchn_bind_unbound_port {
-+ unsigned int remote_domain;
-+};
-+
-+/*
-+ * Unbind previously allocated @port.
-+ */
-+#define IOCTL_EVTCHN_UNBIND \
-+ _IOC(_IOC_NONE, 'E', 3, sizeof(struct ioctl_evtchn_unbind))
-+struct ioctl_evtchn_unbind {
-+ unsigned int port;
-+};
-+
-+/*
-+ * Unbind previously allocated @port.
-+ */
-+#define IOCTL_EVTCHN_NOTIFY \
-+ _IOC(_IOC_NONE, 'E', 4, sizeof(struct ioctl_evtchn_notify))
-+struct ioctl_evtchn_notify {
-+ unsigned int port;
-+};
-+
-+/* Clear and reinitialise the event buffer. Clear error condition. */
-+#define IOCTL_EVTCHN_RESET \
-+ _IOC(_IOC_NONE, 'E', 5, 0)
-+
-+#endif /* __LINUX_PUBLIC_EVTCHN_H__ */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/xen/public/gntdev.h ubuntu-gutsy-xen/include/xen/public/gntdev.h
---- ubuntu-gutsy/include/xen/public/gntdev.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/xen/public/gntdev.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,105 @@
-+/******************************************************************************
-+ * gntdev.h
-+ *
-+ * Interface to /dev/xen/gntdev.
-+ *
-+ * Copyright (c) 2007, D G Murray
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#ifndef __LINUX_PUBLIC_GNTDEV_H__
-+#define __LINUX_PUBLIC_GNTDEV_H__
-+
-+struct ioctl_gntdev_grant_ref {
-+ /* The domain ID of the grant to be mapped. */
-+ uint32_t domid;
-+ /* The grant reference of the grant to be mapped. */
-+ uint32_t ref;
-+};
-+
-+/*
-+ * Inserts the grant references into the mapping table of an instance
-+ * of gntdev. N.B. This does not perform the mapping, which is deferred
-+ * until mmap() is called with @index as the offset.
-+ */
-+#define IOCTL_GNTDEV_MAP_GRANT_REF \
-+_IOC(_IOC_NONE, 'G', 0, sizeof(struct ioctl_gntdev_map_grant_ref))
-+struct ioctl_gntdev_map_grant_ref {
-+ /* IN parameters */
-+ /* The number of grants to be mapped. */
-+ uint32_t count;
-+ uint32_t pad;
-+ /* OUT parameters */
-+ /* The offset to be used on a subsequent call to mmap(). */
-+ uint64_t index;
-+ /* Variable IN parameter. */
-+ /* Array of grant references, of size @count. */
-+ struct ioctl_gntdev_grant_ref refs[1];
-+};
-+
-+/*
-+ * Removes the grant references from the mapping table of an instance of
-+ * of gntdev. N.B. munmap() must be called on the relevant virtual address(es)
-+ * before this ioctl is called, or an error will result.
-+ */
-+#define IOCTL_GNTDEV_UNMAP_GRANT_REF \
-+_IOC(_IOC_NONE, 'G', 1, sizeof(struct ioctl_gntdev_unmap_grant_ref))
-+struct ioctl_gntdev_unmap_grant_ref {
-+ /* IN parameters */
-+ /* The offset was returned by the corresponding map operation. */
-+ uint64_t index;
-+ /* The number of pages to be unmapped. */
-+ uint32_t count;
-+ uint32_t pad;
-+};
-+
-+/*
-+ * Returns the offset in the driver's address space that corresponds
-+ * to @vaddr. This can be used to perform a munmap(), followed by an
-+ * UNMAP_GRANT_REF ioctl, where no state about the offset is retained by
-+ * the caller. The number of pages that were allocated at the same time as
-+ * @vaddr is returned in @count.
-+ *
-+ * N.B. Where more than one page has been mapped into a contiguous range, the
-+ * supplied @vaddr must correspond to the start of the range; otherwise
-+ * an error will result. It is only possible to munmap() the entire
-+ * contiguously-allocated range at once, and not any subrange thereof.
-+ */
-+#define IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR \
-+_IOC(_IOC_NONE, 'G', 2, sizeof(struct ioctl_gntdev_get_offset_for_vaddr))
-+struct ioctl_gntdev_get_offset_for_vaddr {
-+ /* IN parameters */
-+ /* The virtual address of the first mapped page in a range. */
-+ uint64_t vaddr;
-+ /* OUT parameters */
-+ /* The offset that was used in the initial mmap() operation. */
-+ uint64_t offset;
-+ /* The number of pages mapped in the VM area that begins at @vaddr. */
-+ uint32_t count;
-+ uint32_t pad;
-+};
-+
-+#endif /* __LINUX_PUBLIC_GNTDEV_H__ */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/xen/public/privcmd.h ubuntu-gutsy-xen/include/xen/public/privcmd.h
---- ubuntu-gutsy/include/xen/public/privcmd.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/xen/public/privcmd.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,79 @@
-+/******************************************************************************
-+ * privcmd.h
-+ *
-+ * Interface to /proc/xen/privcmd.
-+ *
-+ * Copyright (c) 2003-2005, K A Fraser
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#ifndef __LINUX_PUBLIC_PRIVCMD_H__
-+#define __LINUX_PUBLIC_PRIVCMD_H__
-+
-+#include <linux/types.h>
-+
-+#ifndef __user
-+#define __user
-+#endif
-+
-+typedef struct privcmd_hypercall
-+{
-+ __u64 op;
-+ __u64 arg[5];
-+} privcmd_hypercall_t;
-+
-+typedef struct privcmd_mmap_entry {
-+ __u64 va;
-+ __u64 mfn;
-+ __u64 npages;
-+} privcmd_mmap_entry_t;
-+
-+typedef struct privcmd_mmap {
-+ int num;
-+ domid_t dom; /* target domain */
-+ privcmd_mmap_entry_t __user *entry;
-+} privcmd_mmap_t;
-+
-+typedef struct privcmd_mmapbatch {
-+ int num; /* number of pages to populate */
-+ domid_t dom; /* target domain */
-+ __u64 addr; /* virtual address */
-+ xen_pfn_t __user *arr; /* array of mfns - top nibble set on err */
-+} privcmd_mmapbatch_t;
-+
-+/*
-+ * @cmd: IOCTL_PRIVCMD_HYPERCALL
-+ * @arg: &privcmd_hypercall_t
-+ * Return: Value returned from execution of the specified hypercall.
-+ */
-+#define IOCTL_PRIVCMD_HYPERCALL \
-+ _IOC(_IOC_NONE, 'P', 0, sizeof(privcmd_hypercall_t))
-+#define IOCTL_PRIVCMD_MMAP \
-+ _IOC(_IOC_NONE, 'P', 2, sizeof(privcmd_mmap_t))
-+#define IOCTL_PRIVCMD_MMAPBATCH \
-+ _IOC(_IOC_NONE, 'P', 3, sizeof(privcmd_mmapbatch_t))
-+
-+#endif /* __LINUX_PUBLIC_PRIVCMD_H__ */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/xen/xenbus.h ubuntu-gutsy-xen/include/xen/xenbus.h
---- ubuntu-gutsy/include/xen/xenbus.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/xen/xenbus.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,303 @@
-+/******************************************************************************
-+ * xenbus.h
-+ *
-+ * Talks to Xen Store to figure out what devices we have.
-+ *
-+ * Copyright (C) 2005 Rusty Russell, IBM Corporation
-+ * Copyright (C) 2005 XenSource Ltd.
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#ifndef _XEN_XENBUS_H
-+#define _XEN_XENBUS_H
-+
-+#include <linux/device.h>
-+#include <linux/notifier.h>
-+#include <linux/mutex.h>
-+#include <linux/completion.h>
-+#include <linux/init.h>
-+#include <xen/interface/xen.h>
-+#include <xen/interface/grant_table.h>
-+#include <xen/interface/io/xenbus.h>
-+#include <xen/interface/io/xs_wire.h>
-+
-+/* Register callback to watch this node. */
-+struct xenbus_watch
-+{
-+ struct list_head list;
-+
-+ /* Path being watched. */
-+ const char *node;
-+
-+ /* Callback (executed in a process context with no locks held). */
-+ void (*callback)(struct xenbus_watch *,
-+ const char **vec, unsigned int len);
-+
-+ /* See XBWF_ definitions below. */
-+ unsigned long flags;
-+};
-+
-+/*
-+ * Execute callback in its own kthread. Useful if the callback is long
-+ * running or heavily serialised, to avoid taking out the main xenwatch thread
-+ * for a long period of time (or even unwittingly causing a deadlock).
-+ */
-+#define XBWF_new_thread 1
-+
-+/* A xenbus device. */
-+struct xenbus_device {
-+ const char *devicetype;
-+ const char *nodename;
-+ const char *otherend;
-+ int otherend_id;
-+ struct xenbus_watch otherend_watch;
-+ struct device dev;
-+ enum xenbus_state state;
-+ struct completion down;
-+};
-+
-+static inline struct xenbus_device *to_xenbus_device(struct device *dev)
-+{
-+ return container_of(dev, struct xenbus_device, dev);
-+}
-+
-+struct xenbus_device_id
-+{
-+ /* .../device/<device_type>/<identifier> */
-+ char devicetype[32]; /* General class of device. */
-+};
-+
-+/* A xenbus driver. */
-+struct xenbus_driver {
-+ char *name;
-+ struct module *owner;
-+ const struct xenbus_device_id *ids;
-+ int (*probe)(struct xenbus_device *dev,
-+ const struct xenbus_device_id *id);
-+ void (*otherend_changed)(struct xenbus_device *dev,
-+ enum xenbus_state backend_state);
-+ int (*remove)(struct xenbus_device *dev);
-+ int (*suspend)(struct xenbus_device *dev);
-+ int (*suspend_cancel)(struct xenbus_device *dev);
-+ int (*resume)(struct xenbus_device *dev);
-+ int (*uevent)(struct xenbus_device *, char **, int, char *, int);
-+ struct device_driver driver;
-+ int (*read_otherend_details)(struct xenbus_device *dev);
-+ int (*is_ready)(struct xenbus_device *dev);
-+};
-+
-+static inline struct xenbus_driver *to_xenbus_driver(struct device_driver *drv)
-+{
-+ return container_of(drv, struct xenbus_driver, driver);
-+}
-+
-+int xenbus_register_frontend(struct xenbus_driver *drv);
-+int xenbus_register_backend(struct xenbus_driver *drv);
-+void xenbus_unregister_driver(struct xenbus_driver *drv);
-+
-+struct xenbus_transaction
-+{
-+ u32 id;
-+};
-+
-+/* Nil transaction ID. */
-+#define XBT_NIL ((struct xenbus_transaction) { 0 })
-+
-+char **xenbus_directory(struct xenbus_transaction t,
-+ const char *dir, const char *node, unsigned int *num);
-+void *xenbus_read(struct xenbus_transaction t,
-+ const char *dir, const char *node, unsigned int *len);
-+int xenbus_write(struct xenbus_transaction t,
-+ const char *dir, const char *node, const char *string);
-+int xenbus_mkdir(struct xenbus_transaction t,
-+ const char *dir, const char *node);
-+int xenbus_exists(struct xenbus_transaction t,
-+ const char *dir, const char *node);
-+int xenbus_rm(struct xenbus_transaction t, const char *dir, const char *node);
-+int xenbus_transaction_start(struct xenbus_transaction *t);
-+int xenbus_transaction_end(struct xenbus_transaction t, int abort);
-+
-+/* Single read and scanf: returns -errno or num scanned if > 0. */
-+int xenbus_scanf(struct xenbus_transaction t,
-+ const char *dir, const char *node, const char *fmt, ...)
-+ __attribute__((format(scanf, 4, 5)));
-+
-+/* Single printf and write: returns -errno or 0. */
-+int xenbus_printf(struct xenbus_transaction t,
-+ const char *dir, const char *node, const char *fmt, ...)
-+ __attribute__((format(printf, 4, 5)));
-+
-+/* Generic read function: NULL-terminated triples of name,
-+ * sprintf-style type string, and pointer. Returns 0 or errno.*/
-+int xenbus_gather(struct xenbus_transaction t, const char *dir, ...);
-+
-+/* notifer routines for when the xenstore comes up */
-+int register_xenstore_notifier(struct notifier_block *nb);
-+void unregister_xenstore_notifier(struct notifier_block *nb);
-+
-+int register_xenbus_watch(struct xenbus_watch *watch);
-+void unregister_xenbus_watch(struct xenbus_watch *watch);
-+void xs_suspend(void);
-+void xs_resume(void);
-+void xs_suspend_cancel(void);
-+
-+/* Used by xenbus_dev to borrow kernel's store connection. */
-+void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg);
-+
-+/* Prepare for domain suspend: then resume or cancel the suspend. */
-+void xenbus_suspend(void);
-+void xenbus_resume(void);
-+void xenbus_suspend_cancel(void);
-+
-+#define XENBUS_IS_ERR_READ(str) ({ \
-+ if (!IS_ERR(str) && strlen(str) == 0) { \
-+ kfree(str); \
-+ str = ERR_PTR(-ERANGE); \
-+ } \
-+ IS_ERR(str); \
-+})
-+
-+#define XENBUS_EXIST_ERR(err) ((err) == -ENOENT || (err) == -ERANGE)
-+
-+
-+/**
-+ * Register a watch on the given path, using the given xenbus_watch structure
-+ * for storage, and the given callback function as the callback. Return 0 on
-+ * success, or -errno on error. On success, the given path will be saved as
-+ * watch->node, and remains the caller's to free. On error, watch->node will
-+ * be NULL, the device will switch to XenbusStateClosing, and the error will
-+ * be saved in the store.
-+ */
-+int xenbus_watch_path(struct xenbus_device *dev, const char *path,
-+ struct xenbus_watch *watch,
-+ void (*callback)(struct xenbus_watch *,
-+ const char **, unsigned int));
-+
-+
-+/**
-+ * Register a watch on the given path/path2, using the given xenbus_watch
-+ * structure for storage, and the given callback function as the callback.
-+ * Return 0 on success, or -errno on error. On success, the watched path
-+ * (path/path2) will be saved as watch->node, and becomes the caller's to
-+ * kfree(). On error, watch->node will be NULL, so the caller has nothing to
-+ * free, the device will switch to XenbusStateClosing, and the error will be
-+ * saved in the store.
-+ */
-+int xenbus_watch_path2(struct xenbus_device *dev, const char *path,
-+ const char *path2, struct xenbus_watch *watch,
-+ void (*callback)(struct xenbus_watch *,
-+ const char **, unsigned int));
-+
-+
-+/**
-+ * Advertise in the store a change of the given driver to the given new_state.
-+ * Return 0 on success, or -errno on error. On error, the device will switch
-+ * to XenbusStateClosing, and the error will be saved in the store.
-+ */
-+int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state new_state);
-+
-+
-+/**
-+ * Grant access to the given ring_mfn to the peer of the given device. Return
-+ * 0 on success, or -errno on error. On error, the device will switch to
-+ * XenbusStateClosing, and the error will be saved in the store.
-+ */
-+int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn);
-+
-+
-+/**
-+ * Map a page of memory into this domain from another domain's grant table.
-+ * xenbus_map_ring_valloc allocates a page of virtual address space, maps the
-+ * page to that address, and sets *vaddr to that address.
-+ * xenbus_map_ring does not allocate the virtual address space (you must do
-+ * this yourself!). It only maps in the page to the specified address.
-+ * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h)
-+ * or -ENOMEM on error. If an error is returned, device will switch to
-+ * XenbusStateClosing and the error message will be saved in XenStore.
-+ */
-+struct vm_struct *xenbus_map_ring_valloc(struct xenbus_device *dev,
-+ int gnt_ref);
-+int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref,
-+ grant_handle_t *handle, void *vaddr);
-+
-+
-+/**
-+ * Unmap a page of memory in this domain that was imported from another domain.
-+ * Use xenbus_unmap_ring_vfree if you mapped in your memory with
-+ * xenbus_map_ring_valloc (it will free the virtual address space).
-+ * Returns 0 on success and returns GNTST_* on error
-+ * (see xen/include/interface/grant_table.h).
-+ */
-+int xenbus_unmap_ring_vfree(struct xenbus_device *dev, struct vm_struct *);
-+int xenbus_unmap_ring(struct xenbus_device *dev,
-+ grant_handle_t handle, void *vaddr);
-+
-+
-+/**
-+ * Allocate an event channel for the given xenbus_device, assigning the newly
-+ * created local port to *port. Return 0 on success, or -errno on error. On
-+ * error, the device will switch to XenbusStateClosing, and the error will be
-+ * saved in the store.
-+ */
-+int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port);
-+
-+
-+/**
-+ * Free an existing event channel. Returns 0 on success or -errno on error.
-+ */
-+int xenbus_free_evtchn(struct xenbus_device *dev, int port);
-+
-+
-+/**
-+ * Return the state of the driver rooted at the given store path, or
-+ * XenbusStateUnknown if no state can be read.
-+ */
-+enum xenbus_state xenbus_read_driver_state(const char *path);
-+
-+
-+/***
-+ * Report the given negative errno into the store, along with the given
-+ * formatted message.
-+ */
-+void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt,
-+ ...);
-+
-+
-+/***
-+ * Equivalent to xenbus_dev_error(dev, err, fmt, args), followed by
-+ * xenbus_switch_state(dev, NULL, XenbusStateClosing) to schedule an orderly
-+ * closedown of this driver and its peer.
-+ */
-+void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt,
-+ ...);
-+
-+int xenbus_dev_init(void);
-+
-+const char *xenbus_strstate(enum xenbus_state state);
-+int xenbus_dev_is_online(struct xenbus_device *dev);
-+int xenbus_frontend_closed(struct xenbus_device *dev);
-+
-+#endif /* _XEN_XENBUS_H */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/xen/xencons.h ubuntu-gutsy-xen/include/xen/xencons.h
---- ubuntu-gutsy/include/xen/xencons.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/xen/xencons.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,33 @@
-+#ifndef __ASM_XENCONS_H__
-+#define __ASM_XENCONS_H__
-+
-+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
-+struct dom0_vga_console_info;
-+void dom0_init_screen_info(const struct dom0_vga_console_info *info);
-+#else
-+#define dom0_init_screen_info(info) ((void)(info))
-+#endif
-+
-+#ifdef CONFIG_XEN_CONSOLE
-+
-+void xencons_force_flush(void);
-+void xencons_resume(void);
-+
-+/* Interrupt work hooks. Receive data, or kick data out. */
-+void xencons_rx(char *buf, unsigned len);
-+void xencons_tx(void);
-+
-+int xencons_ring_init(void);
-+int xencons_ring_send(const char *data, unsigned len);
-+
-+void xencons_early_setup(void);
-+
-+#else
-+
-+static inline void xencons_force_flush(void) {}
-+static inline void xencons_resume(void) {}
-+static inline void xencons_early_setup(void) {}
-+
-+#endif
-+
-+#endif /* __ASM_XENCONS_H__ */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/xen/xenoprof.h ubuntu-gutsy-xen/include/xen/xenoprof.h
---- ubuntu-gutsy/include/xen/xenoprof.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/xen/xenoprof.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,42 @@
-+/******************************************************************************
-+ * xen/xenoprof.h
-+ *
-+ * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
-+ * VA Linux Systems Japan K.K.
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program; if not, write to the Free Software
-+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-+ *
-+ */
-+
-+#ifndef __XEN_XENOPROF_H__
-+#define __XEN_XENOPROF_H__
-+#ifdef CONFIG_XEN
-+
-+#include <asm/xenoprof.h>
-+
-+struct oprofile_operations;
-+int xenoprofile_init(struct oprofile_operations * ops);
-+void xenoprofile_exit(void);
-+
-+struct xenoprof_shared_buffer {
-+ char *buffer;
-+ struct xenoprof_arch_shared_buffer arch;
-+};
-+#else
-+#define xenoprofile_init(ops) (-ENOSYS)
-+#define xenoprofile_exit() do { } while (0)
-+
-+#endif /* CONFIG_XEN */
-+#endif /* __XEN_XENOPROF_H__ */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/include/xen/xen_proc.h ubuntu-gutsy-xen/include/xen/xen_proc.h
---- ubuntu-gutsy/include/xen/xen_proc.h 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/include/xen/xen_proc.h 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,12 @@
-+
-+#ifndef __ASM_XEN_PROC_H__
-+#define __ASM_XEN_PROC_H__
-+
-+#include <linux/proc_fs.h>
-+
-+extern struct proc_dir_entry *create_xen_proc_entry(
-+ const char *name, mode_t mode);
-+extern void remove_xen_proc_entry(
-+ const char *name);
-+
-+#endif /* __ASM_XEN_PROC_H__ */
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/kernel/irq/spurious.c ubuntu-gutsy-xen/kernel/irq/spurious.c
---- ubuntu-gutsy/kernel/irq/spurious.c 2007-08-18 09:40:34.000000000 -0400
-+++ ubuntu-gutsy-xen/kernel/irq/spurious.c 2007-08-18 12:38:02.000000000 -0400
-@@ -172,7 +172,8 @@
- irqreturn_t action_ret)
- {
- if (unlikely(action_ret != IRQ_HANDLED)) {
-- desc->irqs_unhandled++;
-+ if (!irq_ignore_unhandled(irq))
-+ desc->irqs_unhandled++;
- if (unlikely(action_ret != IRQ_NONE))
- report_bad_irq(irq, desc, action_ret);
- }
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/kernel/Kconfig.preempt ubuntu-gutsy-xen/kernel/Kconfig.preempt
---- ubuntu-gutsy/kernel/Kconfig.preempt 2007-08-18 09:40:34.000000000 -0400
-+++ ubuntu-gutsy-xen/kernel/Kconfig.preempt 2007-08-18 12:38:02.000000000 -0400
-@@ -35,6 +35,7 @@
-
- config PREEMPT
- bool "Preemptible Kernel (Low-Latency Desktop)"
-+ depends on !XEN
- help
- This option reduces the latency of the kernel by making
- all kernel code (that is not executing in a critical section)
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/kernel/kexec.c ubuntu-gutsy-xen/kernel/kexec.c
---- ubuntu-gutsy/kernel/kexec.c 2007-08-18 09:40:34.000000000 -0400
-+++ ubuntu-gutsy-xen/kernel/kexec.c 2007-08-18 12:38:02.000000000 -0400
-@@ -331,13 +331,27 @@
- return 0;
- }
-
--static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order)
-+static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order, unsigned long limit)
- {
- struct page *pages;
-
- pages = alloc_pages(gfp_mask, order);
- if (pages) {
- unsigned int count, i;
-+#ifdef CONFIG_XEN
-+ int address_bits;
-+
-+ if (limit == ~0UL)
-+ address_bits = BITS_PER_LONG;
-+ else
-+ address_bits = ilog2(limit);
-+
-+ if (xen_create_contiguous_region((unsigned long)page_address(pages),
-+ order, address_bits) < 0) {
-+ __free_pages(pages, order);
-+ return NULL;
-+ }
-+#endif
- pages->mapping = NULL;
- set_page_private(pages, order);
- count = 1 << order;
-@@ -356,6 +370,9 @@
- count = 1 << order;
- for (i = 0; i < count; i++)
- ClearPageReserved(page + i);
-+#ifdef CONFIG_XEN
-+ xen_destroy_contiguous_region((unsigned long)page_address(page), order);
-+#endif
- __free_pages(page, order);
- }
-
-@@ -401,10 +418,10 @@
- do {
- unsigned long pfn, epfn, addr, eaddr;
-
-- pages = kimage_alloc_pages(GFP_KERNEL, order);
-+ pages = kimage_alloc_pages(GFP_KERNEL, order, KEXEC_CONTROL_MEMORY_LIMIT);
- if (!pages)
- break;
-- pfn = page_to_pfn(pages);
-+ pfn = kexec_page_to_pfn(pages);
- epfn = pfn + count;
- addr = pfn << PAGE_SHIFT;
- eaddr = epfn << PAGE_SHIFT;
-@@ -438,6 +455,7 @@
- return pages;
- }
-
-+#ifndef CONFIG_XEN
- static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
- unsigned int order)
- {
-@@ -491,7 +509,7 @@
- }
- /* If I don't overlap any segments I have found my hole! */
- if (i == image->nr_segments) {
-- pages = pfn_to_page(hole_start >> PAGE_SHIFT);
-+ pages = kexec_pfn_to_page(hole_start >> PAGE_SHIFT);
- break;
- }
- }
-@@ -518,6 +536,13 @@
-
- return pages;
- }
-+#else /* !CONFIG_XEN */
-+struct page *kimage_alloc_control_pages(struct kimage *image,
-+ unsigned int order)
-+{
-+ return kimage_alloc_normal_control_pages(image, order);
-+}
-+#endif
-
- static int kimage_add_entry(struct kimage *image, kimage_entry_t entry)
- {
-@@ -533,7 +558,7 @@
- return -ENOMEM;
-
- ind_page = page_address(page);
-- *image->entry = virt_to_phys(ind_page) | IND_INDIRECTION;
-+ *image->entry = kexec_virt_to_phys(ind_page) | IND_INDIRECTION;
- image->entry = ind_page;
- image->last_entry = ind_page +
- ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1);
-@@ -594,13 +619,13 @@
- #define for_each_kimage_entry(image, ptr, entry) \
- for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
- ptr = (entry & IND_INDIRECTION)? \
-- phys_to_virt((entry & PAGE_MASK)): ptr +1)
-+ kexec_phys_to_virt((entry & PAGE_MASK)): ptr +1)
-
- static void kimage_free_entry(kimage_entry_t entry)
- {
- struct page *page;
-
-- page = pfn_to_page(entry >> PAGE_SHIFT);
-+ page = kexec_pfn_to_page(entry >> PAGE_SHIFT);
- kimage_free_pages(page);
- }
-
-@@ -612,6 +637,10 @@
- if (!image)
- return;
-
-+#ifdef CONFIG_XEN
-+ xen_machine_kexec_unload(image);
-+#endif
-+
- kimage_free_extra_pages(image);
- for_each_kimage_entry(image, ptr, entry) {
- if (entry & IND_INDIRECTION) {
-@@ -687,7 +716,7 @@
- * have a match.
- */
- list_for_each_entry(page, &image->dest_pages, lru) {
-- addr = page_to_pfn(page) << PAGE_SHIFT;
-+ addr = kexec_page_to_pfn(page) << PAGE_SHIFT;
- if (addr == destination) {
- list_del(&page->lru);
- return page;
-@@ -698,16 +727,16 @@
- kimage_entry_t *old;
-
- /* Allocate a page, if we run out of memory give up */
-- page = kimage_alloc_pages(gfp_mask, 0);
-+ page = kimage_alloc_pages(gfp_mask, 0, KEXEC_SOURCE_MEMORY_LIMIT);
- if (!page)
- return NULL;
- /* If the page cannot be used file it away */
-- if (page_to_pfn(page) >
-+ if (kexec_page_to_pfn(page) >
- (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) {
- list_add(&page->lru, &image->unuseable_pages);
- continue;
- }
-- addr = page_to_pfn(page) << PAGE_SHIFT;
-+ addr = kexec_page_to_pfn(page) << PAGE_SHIFT;
-
- /* If it is the destination page we want use it */
- if (addr == destination)
-@@ -730,7 +759,7 @@
- struct page *old_page;
-
- old_addr = *old & PAGE_MASK;
-- old_page = pfn_to_page(old_addr >> PAGE_SHIFT);
-+ old_page = kexec_pfn_to_page(old_addr >> PAGE_SHIFT);
- copy_highpage(page, old_page);
- *old = addr | (*old & ~PAGE_MASK);
-
-@@ -780,7 +809,7 @@
- result = -ENOMEM;
- goto out;
- }
-- result = kimage_add_page(image, page_to_pfn(page)
-+ result = kimage_add_page(image, kexec_page_to_pfn(page)
- << PAGE_SHIFT);
- if (result < 0)
- goto out;
-@@ -812,6 +841,7 @@
- return result;
- }
-
-+#ifndef CONFIG_XEN
- static int kimage_load_crash_segment(struct kimage *image,
- struct kexec_segment *segment)
- {
-@@ -834,7 +864,7 @@
- char *ptr;
- size_t uchunk, mchunk;
-
-- page = pfn_to_page(maddr >> PAGE_SHIFT);
-+ page = kexec_pfn_to_page(maddr >> PAGE_SHIFT);
- if (page == 0) {
- result = -ENOMEM;
- goto out;
-@@ -883,6 +913,13 @@
-
- return result;
- }
-+#else /* CONFIG_XEN */
-+static int kimage_load_segment(struct kimage *image,
-+ struct kexec_segment *segment)
-+{
-+ return kimage_load_normal_segment(image, segment);
-+}
-+#endif
-
- /*
- * Exec Kernel system call: for obvious reasons only root may call it.
-@@ -993,6 +1030,13 @@
- if (result)
- goto out;
- }
-+#ifdef CONFIG_XEN
-+ if (image) {
-+ result = xen_machine_kexec_load(image);
-+ if (result)
-+ goto out;
-+ }
-+#endif
- /* Install the new kernel, and Uninstall the old */
- image = xchg(dest_image, image);
-
-@@ -1047,7 +1091,6 @@
- {
- int locked;
-
--
- /* Take the kexec_lock here to prevent sys_kexec_load
- * running on one cpu from replacing the crash kernel
- * we are using after a panic on a different cpu.
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/kernel/softlockup.c ubuntu-gutsy-xen/kernel/softlockup.c
---- ubuntu-gutsy/kernel/softlockup.c 2007-08-18 09:40:34.000000000 -0400
-+++ ubuntu-gutsy-xen/kernel/softlockup.c 2007-08-18 12:38:02.000000000 -0400
-@@ -60,6 +60,19 @@
- }
- EXPORT_SYMBOL(touch_all_softlockup_watchdogs);
-
-+unsigned long softlockup_get_next_event(void)
-+{
-+ int this_cpu = smp_processor_id();
-+ unsigned long touch_timestamp = per_cpu(touch_timestamp, this_cpu);
-+
-+ if (per_cpu(print_timestamp, this_cpu) == touch_timestamp ||
-+ did_panic ||
-+ !per_cpu(watchdog_task, this_cpu))
-+ return MAX_JIFFY_OFFSET;
-+
-+ return max_t(long, 0, touch_timestamp + HZ - jiffies);
-+}
-+
- /*
- * This callback runs from the timer interrupt, and checks
- * whether the watchdog thread has hung or not:
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/kernel/sysctl.c ubuntu-gutsy-xen/kernel/sysctl.c
---- ubuntu-gutsy/kernel/sysctl.c 2007-08-18 09:40:34.000000000 -0400
-+++ ubuntu-gutsy-xen/kernel/sysctl.c 2007-08-18 12:38:02.000000000 -0400
-@@ -510,6 +510,7 @@
- .mode = 0644,
- .proc_handler = &proc_dointvec,
- },
-+#ifndef CONFIG_XEN
- {
- .ctl_name = KERN_NMI_WATCHDOG,
- .procname = "nmi_watchdog",
-@@ -519,6 +520,7 @@
- .proc_handler = &proc_nmi_enabled,
- },
- #endif
-+#endif
- #if defined(CONFIG_X86)
- {
- .ctl_name = KERN_PANIC_ON_NMI,
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/kernel/timer.c ubuntu-gutsy-xen/kernel/timer.c
---- ubuntu-gutsy/kernel/timer.c 2007-08-18 09:40:34.000000000 -0400
-+++ ubuntu-gutsy-xen/kernel/timer.c 2007-08-18 12:38:02.000000000 -0400
-@@ -790,7 +790,17 @@
- if (time_before_eq(expires, now))
- return now;
-
-+#ifndef CONFIG_XEN
- return cmp_next_hrtimer_event(now, expires);
-+#else
-+ expires = cmp_next_hrtimer_event(now, expires);
-+ {
-+ unsigned long sl_next = softlockup_get_next_event();
-+
-+ return expires <= now || expires - now < sl_next
-+ ? expires : now + sl_next;
-+ }
-+#endif
- }
-
- #ifdef CONFIG_NO_IDLE_HZ
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/lib/Makefile ubuntu-gutsy-xen/lib/Makefile
---- ubuntu-gutsy/lib/Makefile 2007-08-18 09:40:34.000000000 -0400
-+++ ubuntu-gutsy-xen/lib/Makefile 2007-08-18 12:38:02.000000000 -0400
-@@ -58,6 +58,7 @@
- obj-$(CONFIG_AUDIT_GENERIC) += audit.o
-
- obj-$(CONFIG_SWIOTLB) += swiotlb.o
-+swiotlb-$(CONFIG_XEN) := ../arch/i386/kernel/swiotlb.o
- obj-$(CONFIG_FAULT_INJECTION) += fault-inject.o
-
- lib-$(CONFIG_GENERIC_BUG) += bug.o
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/mm/memory.c ubuntu-gutsy-xen/mm/memory.c
---- ubuntu-gutsy/mm/memory.c 2007-08-18 09:40:34.000000000 -0400
-+++ ubuntu-gutsy-xen/mm/memory.c 2007-08-18 12:38:02.000000000 -0400
-@@ -404,7 +404,10 @@
- * and that the resulting page looks ok.
- */
- if (unlikely(!pfn_valid(pfn))) {
-- print_bad_pte(vma, pte, addr);
-+#ifdef CONFIG_XEN
-+ if (!(vma->vm_flags & VM_RESERVED))
-+#endif
-+ print_bad_pte(vma, pte, addr);
- return NULL;
- }
-
-@@ -662,8 +665,14 @@
- page->index > details->last_index))
- continue;
- }
-- ptent = ptep_get_and_clear_full(mm, addr, pte,
-- tlb->fullmm);
-+#ifdef CONFIG_XEN
-+ if (unlikely(vma->vm_ops && vma->vm_ops->zap_pte))
-+ ptent = vma->vm_ops->zap_pte(vma, addr, pte,
-+ tlb->fullmm);
-+ else
-+#endif
-+ ptent = ptep_get_and_clear_full(mm, addr, pte,
-+ tlb->fullmm);
- tlb_remove_tlb_entry(tlb, pte, addr);
- if (unlikely(!page))
- continue;
-@@ -896,6 +905,7 @@
- tlb_finish_mmu(tlb, address, end);
- return end;
- }
-+EXPORT_SYMBOL(zap_page_range);
-
- /*
- * Do a quick page-table lookup for a single page.
-@@ -1035,6 +1045,26 @@
- continue;
- }
-
-+#ifdef CONFIG_XEN
-+ if (vma && (vma->vm_flags & VM_FOREIGN)) {
-+ struct page **map = vma->vm_private_data;
-+ int offset = (start - vma->vm_start) >> PAGE_SHIFT;
-+ if (map[offset] != NULL) {
-+ if (pages) {
-+ struct page *page = map[offset];
-+
-+ pages[i] = page;
-+ get_page(page);
-+ }
-+ if (vmas)
-+ vmas[i] = vma;
-+ i++;
-+ start += PAGE_SIZE;
-+ len--;
-+ continue;
-+ }
-+ }
-+#endif
- if (!vma || (vma->vm_flags & (VM_IO | VM_PFNMAP))
- || !(vm_flags & vma->vm_flags))
- return i ? : -EFAULT;
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/mm/page_alloc.c ubuntu-gutsy-xen/mm/page_alloc.c
---- ubuntu-gutsy/mm/page_alloc.c 2007-08-18 09:40:34.000000000 -0400
-+++ ubuntu-gutsy-xen/mm/page_alloc.c 2007-08-18 12:38:02.000000000 -0400
-@@ -206,7 +206,11 @@
- 1 << PG_slab |
- 1 << PG_swapcache |
- 1 << PG_writeback |
-- 1 << PG_buddy );
-+ 1 << PG_buddy |
-+#ifdef CONFIG_X86_XEN
-+ 1 << PG_pinned |
-+#endif
-+ 1 << PG_foreign );
- set_page_count(page, 0);
- reset_page_mapcount(page);
- page->mapping = NULL;
-@@ -442,7 +446,11 @@
- 1 << PG_swapcache |
- 1 << PG_writeback |
- 1 << PG_reserved |
-- 1 << PG_buddy ))))
-+ 1 << PG_buddy |
-+#ifdef CONFIG_X86_XEN
-+ 1 << PG_pinned |
-+#endif
-+ 1 << PG_foreign ))))
- bad_page(page);
- /*
- * PageReclaim == PageTail. It is only an error
-@@ -504,6 +512,12 @@
- int i;
- int reserved = 0;
-
-+#ifdef CONFIG_XEN
-+ if (PageForeign(page)) {
-+ PageForeignDestructor(page);
-+ return;
-+ }
-+#endif
- for (i = 0 ; i < (1 << order) ; ++i)
- reserved += free_pages_check(page + i);
- if (reserved)
-@@ -598,7 +612,11 @@
- 1 << PG_swapcache |
- 1 << PG_writeback |
- 1 << PG_reserved |
-- 1 << PG_buddy ))))
-+ 1 << PG_buddy |
-+#ifdef CONFIG_X86_XEN
-+ 1 << PG_pinned |
-+#endif
-+ 1 << PG_foreign ))))
- bad_page(page);
-
- /*
-@@ -781,6 +799,12 @@
- struct per_cpu_pages *pcp;
- unsigned long flags;
-
-+#ifdef CONFIG_XEN
-+ if (PageForeign(page)) {
-+ PageForeignDestructor(page);
-+ return;
-+ }
-+#endif
- if (PageAnon(page))
- page->mapping = NULL;
- if (free_pages_check(page))
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/net/core/dev.c ubuntu-gutsy-xen/net/core/dev.c
---- ubuntu-gutsy/net/core/dev.c 2007-08-18 09:40:34.000000000 -0400
-+++ ubuntu-gutsy-xen/net/core/dev.c 2007-08-18 12:38:02.000000000 -0400
-@@ -118,6 +118,12 @@
- #include <linux/ctype.h>
- #include <linux/if_arp.h>
-
-+#ifdef CONFIG_XEN
-+#include <net/ip.h>
-+#include <linux/tcp.h>
-+#include <linux/udp.h>
-+#endif
-+
- /*
- * The list of packet types we will receive (as opposed to discard)
- * and the routines to invoke.
-@@ -1452,6 +1458,45 @@
- } \
- }
-
-+#ifdef CONFIG_XEN
-+inline int skb_checksum_setup(struct sk_buff *skb)
-+{
-+ if (skb->proto_csum_blank) {
-+ struct iphdr *iph;
-+
-+ if (skb->protocol != htons(ETH_P_IP))
-+ goto out;
-+ iph = ip_hdr(skb);
-+ skb->transport_header = skb->network_header + 4 * iph->ihl;
-+ if (skb->transport_header >= skb->tail)
-+ goto out;
-+ skb->csum_start = skb_transport_header(skb) - skb->head;
-+ switch (iph->protocol) {
-+ case IPPROTO_TCP:
-+ skb->csum_offset = offsetof(struct tcphdr, check);
-+ break;
-+ case IPPROTO_UDP:
-+ skb->csum_offset = offsetof(struct udphdr, check);
-+ break;
-+ default:
-+ if (net_ratelimit())
-+ printk(KERN_ERR "Attempting to checksum a non-"
-+ "TCP/UDP packet, dropping a protocol"
-+ " %d packet", iph->protocol);
-+ goto out;
-+ }
-+ if ((skb->transport_header + skb->csum_offset + 2) > skb->tail)
-+ goto out;
-+ skb->ip_summed = CHECKSUM_PARTIAL;
-+ skb->proto_csum_blank = 0;
-+ }
-+ return 0;
-+out:
-+ return -EPROTO;
-+}
-+#endif
-+
-+
- /**
- * dev_queue_xmit - transmit a buffer
- * @skb: buffer to transmit
-@@ -1484,6 +1529,12 @@
- struct Qdisc *q;
- int rc = -ENOMEM;
-
-+ /* If a checksum-deferred packet is forwarded to a device that needs a
-+ * checksum, correct the pointers and force checksumming.
-+ */
-+ if (skb_checksum_setup(skb))
-+ goto out_kfree_skb;
-+
- /* GSO will handle the following emulations directly. */
- if (netif_needs_gso(dev, skb))
- goto gso;
-@@ -1870,6 +1921,19 @@
- }
- #endif
-
-+#ifdef CONFIG_XEN
-+ switch (skb->ip_summed) {
-+ case CHECKSUM_UNNECESSARY:
-+ skb->proto_data_valid = 1;
-+ break;
-+ case CHECKSUM_PARTIAL:
-+ /* XXX Implement me. */
-+ default:
-+ skb->proto_data_valid = 0;
-+ break;
-+ }
-+#endif
-+
- list_for_each_entry_rcu(ptype, &ptype_all, list) {
- if (!ptype->dev || ptype->dev == skb->dev) {
- if (pt_prev)
-@@ -3736,6 +3800,7 @@
- EXPORT_SYMBOL(net_enable_timestamp);
- EXPORT_SYMBOL(net_disable_timestamp);
- EXPORT_SYMBOL(dev_get_flags);
-+EXPORT_SYMBOL(skb_checksum_setup);
-
- #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
- EXPORT_SYMBOL(br_handle_frame_hook);
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/net/core/skbuff.c ubuntu-gutsy-xen/net/core/skbuff.c
---- ubuntu-gutsy/net/core/skbuff.c 2007-08-18 09:40:34.000000000 -0400
-+++ ubuntu-gutsy-xen/net/core/skbuff.c 2007-08-18 12:38:02.000000000 -0400
-@@ -416,6 +416,10 @@
- C(local_df);
- n->cloned = 1;
- n->nohdr = 0;
-+#ifdef CONFIG_XEN
-+ C(proto_data_valid);
-+ C(proto_csum_blank);
-+#endif
- C(pkt_type);
- C(ip_summed);
- C(priority);
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/net/ipv4/netfilter/nf_nat_proto_tcp.c ubuntu-gutsy-xen/net/ipv4/netfilter/nf_nat_proto_tcp.c
---- ubuntu-gutsy/net/ipv4/netfilter/nf_nat_proto_tcp.c 2007-08-18 09:40:34.000000000 -0400
-+++ ubuntu-gutsy-xen/net/ipv4/netfilter/nf_nat_proto_tcp.c 2007-08-18 12:38:02.000000000 -0400
-@@ -132,6 +132,9 @@
- if (hdrsize < sizeof(*hdr))
- return 1;
-
-+ if (skb_checksum_setup(*pskb))
-+ return 0;
-+
- nf_proto_csum_replace4(&hdr->check, *pskb, oldip, newip, 1);
- nf_proto_csum_replace2(&hdr->check, *pskb, oldport, newport, 0);
- return 1;
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/net/ipv4/netfilter/nf_nat_proto_udp.c ubuntu-gutsy-xen/net/ipv4/netfilter/nf_nat_proto_udp.c
---- ubuntu-gutsy/net/ipv4/netfilter/nf_nat_proto_udp.c 2007-08-18 09:40:34.000000000 -0400
-+++ ubuntu-gutsy-xen/net/ipv4/netfilter/nf_nat_proto_udp.c 2007-08-18 12:38:02.000000000 -0400
-@@ -116,6 +116,10 @@
- newport = tuple->dst.u.udp.port;
- portptr = &hdr->dest;
- }
-+
-+ if (skb_checksum_setup(*pskb))
-+ return 0;
-+
- if (hdr->check || (*pskb)->ip_summed == CHECKSUM_PARTIAL) {
- nf_proto_csum_replace4(&hdr->check, *pskb, oldip, newip, 1);
- nf_proto_csum_replace2(&hdr->check, *pskb, *portptr, newport,
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/net/ipv4/xfrm4_output.c ubuntu-gutsy-xen/net/ipv4/xfrm4_output.c
---- ubuntu-gutsy/net/ipv4/xfrm4_output.c 2007-08-18 09:40:34.000000000 -0400
-+++ ubuntu-gutsy-xen/net/ipv4/xfrm4_output.c 2007-08-18 12:38:02.000000000 -0400
-@@ -47,6 +47,10 @@
- struct xfrm_state *x = dst->xfrm;
- int err;
-
-+ err = skb_checksum_setup(skb);
-+ if (err)
-+ goto error_nolock;
-+
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
- err = skb_checksum_help(skb);
- if (err)
-diff -Naur --exclude=.git --exclude=debian ubuntu-gutsy/scripts/Makefile.xen ubuntu-gutsy-xen/scripts/Makefile.xen
---- ubuntu-gutsy/scripts/Makefile.xen 1969-12-31 19:00:00.000000000 -0500
-+++ ubuntu-gutsy-xen/scripts/Makefile.xen 2007-08-18 12:38:02.000000000 -0400
-@@ -0,0 +1,14 @@
-+
-+# cherrypickxen($1 = allobj)
-+cherrypickxen = $(foreach var, $(1), \
-+ $(shell o=$(var); \
-+ c=$${o%.o}-xen.c; \
-+ s=$${o%.o}-xen.S; \
-+ oxen=$${o%.o}-xen.o; \
-+ [ -f $(srctree)/$(src)/$${c} ] || \
-+ [ -f $(srctree)/$(src)/$${s} ] \
-+ && echo $$oxen \
-+ || echo $(var) ) \
-+ )
-+# filterxen($1 = allobj, $2 = noobjs)
-+filterxen = $(filter-out $(2), $(1))
diff --git a/trunk/2.6.22/20002_add-console-use-vt.patch1 b/trunk/2.6.22/20002_add-console-use-vt.patch1
new file mode 100644
index 0000000..a63a027
--- /dev/null
+++ b/trunk/2.6.22/20002_add-console-use-vt.patch1
@@ -0,0 +1,58 @@
+Subject: add console_use_vt
+From: kraxel@suse.de
+Patch-mainline: no
+
+$subject says all
+
+---
+ drivers/char/tty_io.c | 7 ++++++-
+ include/linux/console.h | 1 +
+ 2 files changed, 7 insertions(+), 1 deletion(-)
+
+--- a/drivers/char/tty_io.c 2007-08-27 14:01:21.000000000 -0400
++++ b/drivers/char/tty_io.c 2007-08-27 14:01:24.000000000 -0400
+@@ -133,6 +133,8 @@ LIST_HEAD(tty_drivers); /* linked list
+ DEFINE_MUTEX(tty_mutex);
+ EXPORT_SYMBOL(tty_mutex);
+
++int console_use_vt = 1;
++
+ #ifdef CONFIG_UNIX98_PTYS
+ extern struct tty_driver *ptm_driver; /* Unix98 pty masters; for /dev/ptmx */
+ extern int pty_limit; /* Config limit on Unix98 ptys */
+@@ -2581,7 +2583,7 @@ retry_open:
+ goto got_driver;
+ }
+ #ifdef CONFIG_VT
+- if (device == MKDEV(TTY_MAJOR,0)) {
++ if (console_use_vt && device == MKDEV(TTY_MAJOR,0)) {
+ extern struct tty_driver *console_driver;
+ driver = console_driver;
+ index = fg_console;
+@@ -4048,6 +4050,8 @@ static int __init tty_init(void)
+ #endif
+
+ #ifdef CONFIG_VT
++ if (!console_use_vt)
++ goto out_vt;
+ cdev_init(&vc0_cdev, &console_fops);
+ if (cdev_add(&vc0_cdev, MKDEV(TTY_MAJOR, 0), 1) ||
+ register_chrdev_region(MKDEV(TTY_MAJOR, 0), 1, "/dev/vc/0") < 0)
+@@ -4055,6 +4059,7 @@ static int __init tty_init(void)
+ device_create(tty_class, NULL, MKDEV(TTY_MAJOR, 0), "tty0");
+
+ vty_init();
++ out_vt:
+ #endif
+ return 0;
+ }
+--- a/include/linux/console.h 2007-08-27 14:01:24.000000000 -0400
++++ b/include/linux/console.h 2007-08-27 14:01:24.000000000 -0400
+@@ -63,6 +63,7 @@ extern const struct consw dummy_con; /*
+ extern const struct consw vga_con; /* VGA text console */
+ extern const struct consw newport_con; /* SGI Newport console */
+ extern const struct consw prom_con; /* SPARC PROM console */
++extern int console_use_vt;
+
+ int con_is_bound(const struct consw *csw);
+ int register_con_driver(const struct consw *csw, int first, int last);
diff --git a/trunk/2.6.22/20003_linux-2.6.19-rc1-kexec-move_segment_code-i386.patch1 b/trunk/2.6.22/20003_linux-2.6.19-rc1-kexec-move_segment_code-i386.patch1
new file mode 100644
index 0000000..0e266e1
--- /dev/null
+++ b/trunk/2.6.22/20003_linux-2.6.19-rc1-kexec-move_segment_code-i386.patch1
@@ -0,0 +1,172 @@
+Subject: kexec: Move asm segment handling code to the assembly file (i386)
+From: http://xenbits.xensource.com/xen-unstable.hg (tip 13816)
+Patch-mainline: obsolete
+
+This patch moves the idt, gdt, and segment handling code from machine_kexec.c
+to relocate_kernel.S. The main reason behind this move is to avoid code
+duplication in the Xen hypervisor. With this patch all code required to kexec
+is put on the control page.
+
+On top of that this patch also counts as a cleanup - I think it is much
+nicer to write assembly directly in assembly files than wrap inline assembly
+in C functions for no apparent reason.
+
+Signed-off-by: Magnus Damm <magnus@valinux.co.jp>
+Acked-by: jbeulich@novell.com
+---
+
+ Applies to 2.6.19-rc1.
+
+ arch/i386/kernel/machine_kexec.c | 59 -------------------------------------
+ arch/i386/kernel/relocate_kernel.S | 58 +++++++++++++++++++++++++++++++++---
+ 2 files changed, 53 insertions(+), 64 deletions(-)
+
+--- a/arch/i386/kernel/machine_kexec.c 2007-08-27 12:09:26.000000000 -0400
++++ b/arch/i386/kernel/machine_kexec.c 2007-08-27 14:02:11.000000000 -0400
+@@ -29,48 +29,6 @@ static u32 kexec_pmd1[1024] PAGE_ALIGNED
+ static u32 kexec_pte0[1024] PAGE_ALIGNED;
+ static u32 kexec_pte1[1024] PAGE_ALIGNED;
+
+-static void set_idt(void *newidt, __u16 limit)
+-{
+- struct Xgt_desc_struct curidt;
+-
+- /* ia32 supports unaliged loads & stores */
+- curidt.size = limit;
+- curidt.address = (unsigned long)newidt;
+-
+- load_idt(&curidt);
+-};
+-
+-
+-static void set_gdt(void *newgdt, __u16 limit)
+-{
+- struct Xgt_desc_struct curgdt;
+-
+- /* ia32 supports unaligned loads & stores */
+- curgdt.size = limit;
+- curgdt.address = (unsigned long)newgdt;
+-
+- load_gdt(&curgdt);
+-};
+-
+-static void load_segments(void)
+-{
+-#define __STR(X) #X
+-#define STR(X) __STR(X)
+-
+- __asm__ __volatile__ (
+- "\tljmp $"STR(__KERNEL_CS)",$1f\n"
+- "\t1:\n"
+- "\tmovl $"STR(__KERNEL_DS)",%%eax\n"
+- "\tmovl %%eax,%%ds\n"
+- "\tmovl %%eax,%%es\n"
+- "\tmovl %%eax,%%fs\n"
+- "\tmovl %%eax,%%gs\n"
+- "\tmovl %%eax,%%ss\n"
+- ::: "eax", "memory");
+-#undef STR
+-#undef __STR
+-}
+-
+ /*
+ * A architecture hook called to validate the
+ * proposed image and prepare the control pages
+@@ -127,23 +85,6 @@ NORET_TYPE void machine_kexec(struct kim
+ page_list[PA_PTE_1] = __pa(kexec_pte1);
+ page_list[VA_PTE_1] = (unsigned long)kexec_pte1;
+
+- /* The segment registers are funny things, they have both a
+- * visible and an invisible part. Whenever the visible part is
+- * set to a specific selector, the invisible part is loaded
+- * with from a table in memory. At no other time is the
+- * descriptor table in memory accessed.
+- *
+- * I take advantage of this here by force loading the
+- * segments, before I zap the gdt with an invalid value.
+- */
+- load_segments();
+- /* The gdt & idt are now invalid.
+- * If you want to load them you must set up your own idt & gdt.
+- */
+- set_gdt(phys_to_virt(0),0);
+- set_idt(phys_to_virt(0),0);
+-
+- /* now call it */
+ relocate_kernel((unsigned long)image->head, (unsigned long)page_list,
+ image->start, cpu_has_pae);
+ }
+--- a/arch/i386/kernel/relocate_kernel.S 2007-08-27 12:09:26.000000000 -0400
++++ b/arch/i386/kernel/relocate_kernel.S 2007-08-27 14:01:24.000000000 -0400
+@@ -154,14 +154,45 @@ relocate_new_kernel:
+ movl PTR(PA_PGD)(%ebp), %eax
+ movl %eax, %cr3
+
++ /* setup idt */
++ movl %edi, %eax
++ addl $(idt_48 - relocate_kernel), %eax
++ lidtl (%eax)
++
++ /* setup gdt */
++ movl %edi, %eax
++ addl $(gdt - relocate_kernel), %eax
++ movl %edi, %esi
++ addl $((gdt_48 - relocate_kernel) + 2), %esi
++ movl %eax, (%esi)
++
++ movl %edi, %eax
++ addl $(gdt_48 - relocate_kernel), %eax
++ lgdtl (%eax)
++
++ /* setup data segment registers */
++ mov $(gdt_ds - gdt), %eax
++ mov %eax, %ds
++ mov %eax, %es
++ mov %eax, %fs
++ mov %eax, %gs
++ mov %eax, %ss
++
+ /* setup a new stack at the end of the physical control page */
+ lea 4096(%edi), %esp
+
+- /* jump to identity mapped page */
+- movl %edi, %eax
+- addl $(identity_mapped - relocate_kernel), %eax
+- pushl %eax
+- ret
++ /* load new code segment and jump to identity mapped page */
++ movl %edi, %esi
++ xorl %eax, %eax
++ pushl %eax
++ pushl %esi
++ pushl %eax
++ movl $(gdt_cs - gdt), %eax
++ pushl %eax
++ movl %edi, %eax
++ addl $(identity_mapped - relocate_kernel),%eax
++ pushl %eax
++ iretl
+
+ identity_mapped:
+ /* store the start address on the stack */
+@@ -250,3 +281,20 @@ identity_mapped:
+ xorl %edi, %edi
+ xorl %ebp, %ebp
+ ret
++
++ .align 16
++gdt:
++ .quad 0x0000000000000000 /* NULL descriptor */
++gdt_cs:
++ .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
++gdt_ds:
++ .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
++gdt_end:
++
++gdt_48:
++ .word gdt_end - gdt - 1 /* limit */
++ .long 0 /* base - filled in by code above */
++
++idt_48:
++ .word 0 /* limit */
++ .long 0 /* base */
diff --git a/trunk/2.6.22/20004_linux-2.6.19-rc1-kexec-move_segment_code-x86_64.patch1 b/trunk/2.6.22/20004_linux-2.6.19-rc1-kexec-move_segment_code-x86_64.patch1
new file mode 100644
index 0000000..d3abb10
--- /dev/null
+++ b/trunk/2.6.22/20004_linux-2.6.19-rc1-kexec-move_segment_code-x86_64.patch1
@@ -0,0 +1,164 @@
+Subject: kexec: Move asm segment handling code to the assembly file (x86_64)
+From: http://xenbits.xensource.com/xen-unstable.hg (tip 13816)
+Patch-mainline: obsolete
+
+This patch moves the idt, gdt, and segment handling code from machine_kexec.c
+to relocate_kernel.S. The main reason behind this move is to avoid code
+duplication in the Xen hypervisor. With this patch all code required to kexec
+is put on the control page.
+
+On top of that this patch also counts as a cleanup - I think it is much
+nicer to write assembly directly in assembly files than wrap inline assembly
+in C functions for no apparent reason.
+
+Signed-off-by: Magnus Damm <magnus@valinux.co.jp>
+Acked-by: jbeulich@novell.com
+---
+
+ Applies to 2.6.19-rc1.
+
+ arch/x86_64/kernel/machine_kexec.c | 58 -----------------------------------
+ arch/x86_64/kernel/relocate_kernel.S | 50 +++++++++++++++++++++++++++---
+ 2 files changed, 45 insertions(+), 63 deletions(-)
+
+--- a/arch/x86_64/kernel/machine_kexec.c 2007-08-27 12:09:26.000000000 -0400
++++ b/arch/x86_64/kernel/machine_kexec.c 2007-08-27 14:02:11.000000000 -0400
+@@ -112,47 +112,6 @@ static int init_pgtable(struct kimage *i
+ return init_level4_page(image, level4p, 0, end_pfn << PAGE_SHIFT);
+ }
+
+-static void set_idt(void *newidt, u16 limit)
+-{
+- struct desc_ptr curidt;
+-
+- /* x86-64 supports unaliged loads & stores */
+- curidt.size = limit;
+- curidt.address = (unsigned long)newidt;
+-
+- __asm__ __volatile__ (
+- "lidtq %0\n"
+- : : "m" (curidt)
+- );
+-};
+-
+-
+-static void set_gdt(void *newgdt, u16 limit)
+-{
+- struct desc_ptr curgdt;
+-
+- /* x86-64 supports unaligned loads & stores */
+- curgdt.size = limit;
+- curgdt.address = (unsigned long)newgdt;
+-
+- __asm__ __volatile__ (
+- "lgdtq %0\n"
+- : : "m" (curgdt)
+- );
+-};
+-
+-static void load_segments(void)
+-{
+- __asm__ __volatile__ (
+- "\tmovl %0,%%ds\n"
+- "\tmovl %0,%%es\n"
+- "\tmovl %0,%%ss\n"
+- "\tmovl %0,%%fs\n"
+- "\tmovl %0,%%gs\n"
+- : : "a" (__KERNEL_DS) : "memory"
+- );
+-}
+-
+ int machine_kexec_prepare(struct kimage *image)
+ {
+ unsigned long start_pgtable;
+@@ -209,23 +168,6 @@ NORET_TYPE void machine_kexec(struct kim
+ page_list[PA_TABLE_PAGE] =
+ (unsigned long)__pa(page_address(image->control_code_page));
+
+- /* The segment registers are funny things, they have both a
+- * visible and an invisible part. Whenever the visible part is
+- * set to a specific selector, the invisible part is loaded
+- * with from a table in memory. At no other time is the
+- * descriptor table in memory accessed.
+- *
+- * I take advantage of this here by force loading the
+- * segments, before I zap the gdt with an invalid value.
+- */
+- load_segments();
+- /* The gdt & idt are now invalid.
+- * If you want to load them you must set up your own idt & gdt.
+- */
+- set_gdt(phys_to_virt(0),0);
+- set_idt(phys_to_virt(0),0);
+-
+- /* now call it */
+ relocate_kernel((unsigned long)image->head, (unsigned long)page_list,
+ image->start);
+ }
+--- a/arch/x86_64/kernel/relocate_kernel.S 2007-08-27 12:09:26.000000000 -0400
++++ b/arch/x86_64/kernel/relocate_kernel.S 2007-08-27 14:01:24.000000000 -0400
+@@ -159,13 +159,39 @@ relocate_new_kernel:
+ movq PTR(PA_PGD)(%rsi), %r9
+ movq %r9, %cr3
+
++ /* setup idt */
++ movq %r8, %rax
++ addq $(idt_80 - relocate_kernel), %rax
++ lidtq (%rax)
++
++ /* setup gdt */
++ movq %r8, %rax
++ addq $(gdt - relocate_kernel), %rax
++ movq %r8, %r9
++ addq $((gdt_80 - relocate_kernel) + 2), %r9
++ movq %rax, (%r9)
++
++ movq %r8, %rax
++ addq $(gdt_80 - relocate_kernel), %rax
++ lgdtq (%rax)
++
++ /* setup data segment registers */
++ xorl %eax, %eax
++ movl %eax, %ds
++ movl %eax, %es
++ movl %eax, %fs
++ movl %eax, %gs
++ movl %eax, %ss
++
+ /* setup a new stack at the end of the physical control page */
+ lea 4096(%r8), %rsp
+
+- /* jump to identity mapped page */
+- addq $(identity_mapped - relocate_kernel), %r8
+- pushq %r8
+- ret
++ /* load new code segment and jump to identity mapped page */
++ movq %r8, %rax
++ addq $(identity_mapped - relocate_kernel), %rax
++ pushq $(gdt_cs - gdt)
++ pushq %rax
++ lretq
+
+ identity_mapped:
+ /* store the start address on the stack */
+@@ -272,5 +298,19 @@ identity_mapped:
+ xorq %r13, %r13
+ xorq %r14, %r14
+ xorq %r15, %r15
+-
+ ret
++
++ .align 16
++gdt:
++ .quad 0x0000000000000000 /* NULL descriptor */
++gdt_cs:
++ .quad 0x00af9a000000ffff
++gdt_end:
++
++gdt_80:
++ .word gdt_end - gdt - 1 /* limit */
++ .quad 0 /* base - filled in by code above */
++
++idt_80:
++ .word 0 /* limit */
++ .quad 0 /* base */
diff --git a/trunk/2.6.22/20005_blktap-aio-16_03_06.patch1 b/trunk/2.6.22/20005_blktap-aio-16_03_06.patch1
new file mode 100644
index 0000000..9331e9f
--- /dev/null
+++ b/trunk/2.6.22/20005_blktap-aio-16_03_06.patch1
@@ -0,0 +1,209 @@
+Subject: AIO/POLL interaction
+From: http://xenbits.xensource.com/xen-3.1-testing.hg (tip 15042)
+Acked-by: jbeulich@novell.com
+
+---
+ fs/aio.c | 120 ++++++++++++++++++++++++++++++++++++++++++++++++----
+ include/linux/aio.h | 5 ++
+ 2 files changed, 116 insertions(+), 9 deletions(-)
+
+--- a/fs/aio.c 2007-08-27 12:09:26.000000000 -0400
++++ b/fs/aio.c 2007-08-27 14:01:24.000000000 -0400
+@@ -36,6 +36,11 @@
+ #include <asm/uaccess.h>
+ #include <asm/mmu_context.h>
+
++#ifdef CONFIG_EPOLL
++#include <linux/poll.h>
++#include <linux/anon_inodes.h>
++#endif
++
+ #if DEBUG > 1
+ #define dprintk printk
+ #else
+@@ -1009,6 +1014,11 @@ put_rq:
+ if (waitqueue_active(&ctx->wait))
+ wake_up(&ctx->wait);
+
++#ifdef CONFIG_EPOLL
++ if (ctx->file && waitqueue_active(&ctx->poll_wait))
++ wake_up(&ctx->poll_wait);
++#endif
++
+ spin_unlock_irqrestore(&ctx->ctx_lock, flags);
+ return ret;
+ }
+@@ -1016,6 +1026,8 @@ put_rq:
+ /* aio_read_evt
+ * Pull an event off of the ioctx's event ring. Returns the number of
+ * events fetched (0 or 1 ;-)
++ * If ent parameter is 0, just returns the number of events that would
++ * be fetched.
+ * FIXME: make this use cmpxchg.
+ * TODO: make the ringbuffer user mmap()able (requires FIXME).
+ */
+@@ -1038,13 +1050,18 @@ static int aio_read_evt(struct kioctx *i
+
+ head = ring->head % info->nr;
+ if (head != ring->tail) {
+- struct io_event *evp = aio_ring_event(info, head, KM_USER1);
+- *ent = *evp;
+- head = (head + 1) % info->nr;
+- smp_mb(); /* finish reading the event before updatng the head */
+- ring->head = head;
+- ret = 1;
+- put_aio_ring_event(evp, KM_USER1);
++ if (ent) { /* event requested */
++ struct io_event *evp =
++ aio_ring_event(info, head, KM_USER1);
++ *ent = *evp;
++ head = (head + 1) % info->nr;
++ /* finish reading the event before updatng the head */
++ smp_mb();
++ ring->head = head;
++ ret = 1;
++ put_aio_ring_event(evp, KM_USER1);
++ } else /* only need to know availability */
++ ret = 1;
+ }
+ spin_unlock(&info->ring_lock);
+
+@@ -1227,9 +1244,78 @@ static void io_destroy(struct kioctx *io
+
+ aio_cancel_all(ioctx);
+ wait_for_all_aios(ioctx);
++#ifdef CONFIG_EPOLL
++ /* forget the poll file, but it's up to the user to close it */
++ if (ioctx->file) {
++ ioctx->file->private_data = 0;
++ ioctx->file = 0;
++ }
++#endif
+ put_ioctx(ioctx); /* once for the lookup */
+ }
+
++#ifdef CONFIG_EPOLL
++
++static int aio_queue_fd_close(struct inode *inode, struct file *file)
++{
++ struct kioctx *ioctx = file->private_data;
++ if (ioctx) {
++ file->private_data = 0;
++ spin_lock_irq(&ioctx->ctx_lock);
++ ioctx->file = 0;
++ spin_unlock_irq(&ioctx->ctx_lock);
++ }
++ return 0;
++}
++
++static unsigned int aio_queue_fd_poll(struct file *file, poll_table *wait)
++{ unsigned int pollflags = 0;
++ struct kioctx *ioctx = file->private_data;
++
++ if (ioctx) {
++
++ spin_lock_irq(&ioctx->ctx_lock);
++ /* Insert inside our poll wait queue */
++ poll_wait(file, &ioctx->poll_wait, wait);
++
++ /* Check our condition */
++ if (aio_read_evt(ioctx, 0))
++ pollflags = POLLIN | POLLRDNORM;
++ spin_unlock_irq(&ioctx->ctx_lock);
++ }
++
++ return pollflags;
++}
++
++static const struct file_operations aioq_fops = {
++ .release = aio_queue_fd_close,
++ .poll = aio_queue_fd_poll
++};
++
++/* make_aio_fd:
++ * Create a file descriptor that can be used to poll the event queue.
++ * Based on the excellent epoll code.
++ */
++
++static int make_aio_fd(struct kioctx *ioctx)
++{
++ int error, fd;
++ struct inode *inode;
++ struct file *file;
++
++ error = anon_inode_getfd(&fd, &inode, &file, "[aioq]",
++ &aioq_fops, ioctx);
++ if (error)
++ return error;
++
++ /* associate the file with the IO context */
++ ioctx->file = file;
++ init_waitqueue_head(&ioctx->poll_wait);
++ return fd;
++}
++#endif
++
++
+ /* sys_io_setup:
+ * Create an aio_context capable of receiving at least nr_events.
+ * ctxp must not point to an aio_context that already exists, and
+@@ -1242,18 +1328,30 @@ static void io_destroy(struct kioctx *io
+ * resources are available. May fail with -EFAULT if an invalid
+ * pointer is passed for ctxp. Will fail with -ENOSYS if not
+ * implemented.
++ *
++ * To request a selectable fd, the user context has to be initialized
++ * to 1, instead of 0, and the return value is the fd.
++ * This keeps the system call compatible, since a non-zero value
++ * was not allowed so far.
+ */
+ asmlinkage long sys_io_setup(unsigned nr_events, aio_context_t __user *ctxp)
+ {
+ struct kioctx *ioctx = NULL;
+ unsigned long ctx;
+ long ret;
++ int make_fd = 0;
+
+ ret = get_user(ctx, ctxp);
+ if (unlikely(ret))
+ goto out;
+
+ ret = -EINVAL;
++#ifdef CONFIG_EPOLL
++ if (ctx == 1) {
++ make_fd = 1;
++ ctx = 0;
++ }
++#endif
+ if (unlikely(ctx || nr_events == 0)) {
+ pr_debug("EINVAL: io_setup: ctx %lu nr_events %u\n",
+ ctx, nr_events);
+@@ -1264,8 +1362,12 @@ asmlinkage long sys_io_setup(unsigned nr
+ ret = PTR_ERR(ioctx);
+ if (!IS_ERR(ioctx)) {
+ ret = put_user(ioctx->user_id, ctxp);
+- if (!ret)
+- return 0;
++#ifdef CONFIG_EPOLL
++ if (make_fd && ret >= 0)
++ ret = make_aio_fd(ioctx);
++#endif
++ if (ret >= 0)
++ return ret;
+
+ get_ioctx(ioctx); /* io_destroy() expects us to hold a ref */
+ io_destroy(ioctx);
+--- a/include/linux/aio.h 2007-08-27 12:09:26.000000000 -0400
++++ b/include/linux/aio.h 2007-08-27 14:01:24.000000000 -0400
+@@ -201,6 +201,11 @@ struct kioctx {
+ struct aio_ring_info ring_info;
+
+ struct delayed_work wq;
++#ifdef CONFIG_EPOLL
++ // poll integration
++ wait_queue_head_t poll_wait;
++ struct file *file;
++#endif
+ };
+
+ /* prototypes */
diff --git a/trunk/2.6.22/20006_fix-ide-cd-pio-mode.patch1 b/trunk/2.6.22/20006_fix-ide-cd-pio-mode.patch1
new file mode 100644
index 0000000..1ca415c
--- /dev/null
+++ b/trunk/2.6.22/20006_fix-ide-cd-pio-mode.patch1
@@ -0,0 +1,36 @@
+Subject: Fix IDE CD-drive PIO mode.
+From: http://xenbits.xensource.com/xen-3.1-testing.hg (tip 15042)
+Signed-off-by: Christian Limpach <Christian.Limpach@cl.cam.ac.uk>
+
+CD drives in PIO mode don't work under Xen because of a change in Linux
+between 2.6.12 and 2.6.16, as a result of the following thread:
+http://lists.parisc-linux.org/pipermail/parisc-linux/2005-August/027197.html
+
+The change breaks systems which have highmem and a swiotlb because the
+ide-cd driver doesn't use the swiotlb, resulting in read/writes to/from
+highmem pages in PIO mode not working any longer. Xen kernels usually have
+both highmem and a swiotlb.
+
+Acked-by: Jan Beulich <jbeulich@novell.com>
+
+---
+ drivers/ide/ide-lib.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/drivers/ide/ide-lib.c 2007-08-27 12:09:26.000000000 -0400
++++ b/drivers/ide/ide-lib.c 2007-08-27 14:02:05.000000000 -0400
+@@ -341,10 +341,10 @@ void ide_toggle_bounce(ide_drive_t *driv
+ {
+ u64 addr = BLK_BOUNCE_HIGH; /* dma64_addr_t */
+
+- if (!PCI_DMA_BUS_IS_PHYS) {
+- addr = BLK_BOUNCE_ANY;
+- } else if (on && drive->media == ide_disk) {
+- if (HWIF(drive)->pci_dev)
++ if (on && drive->media == ide_disk) {
++ if (!PCI_DMA_BUS_IS_PHYS)
++ addr = BLK_BOUNCE_ANY;
++ else if (HWIF(drive)->pci_dev)
+ addr = HWIF(drive)->pci_dev->dma_mask;
+ }
+
diff --git a/trunk/2.6.22/20007_i386-mach-io-check-nmi.patch1 b/trunk/2.6.22/20007_i386-mach-io-check-nmi.patch1
new file mode 100644
index 0000000..c6fccf6
--- /dev/null
+++ b/trunk/2.6.22/20007_i386-mach-io-check-nmi.patch1
@@ -0,0 +1,53 @@
+Subject: Separate out clearing of NMI I/O check error
+From: http://xenbits.xensource.com/xen-3.1-testing.hg (tip 15042)
+Patch-mainline: obsolete
+Acked-by: jbeulich@novell.com
+
+---
+ arch/i386/kernel/traps.c | 9 +--------
+ include/asm-i386/mach-default/mach_traps.h | 12 ++++++++++++
+ 2 files changed, 13 insertions(+), 8 deletions(-)
+
+--- a/arch/i386/kernel/traps.c 2007-08-27 14:01:24.000000000 -0400
++++ b/arch/i386/kernel/traps.c 2007-08-27 14:01:24.000000000 -0400
+@@ -656,18 +656,11 @@ mem_parity_error(unsigned char reason, s
+ static __kprobes void
+ io_check_error(unsigned char reason, struct pt_regs * regs)
+ {
+- unsigned long i;
+-
+ printk(KERN_EMERG "NMI: IOCK error (debug interrupt?)\n");
+ show_registers(regs);
+
+ /* Re-enable the IOCK line, wait for a few seconds */
+- reason = (reason & 0xf) | 8;
+- outb(reason, 0x61);
+- i = 2000;
+- while (--i) udelay(1000);
+- reason &= ~8;
+- outb(reason, 0x61);
++ clear_io_check_error(reason);
+ }
+
+ static __kprobes void
+--- a/include/asm-i386/mach-default/mach_traps.h 2007-08-27 12:09:26.000000000 -0400
++++ b/include/asm-i386/mach-default/mach_traps.h 2007-08-27 14:01:24.000000000 -0400
+@@ -15,6 +15,18 @@ static inline void clear_mem_error(unsig
+ outb(reason, 0x61);
+ }
+
++static inline void clear_io_check_error(unsigned char reason)
++{
++ unsigned long i;
++
++ reason = (reason & 0xf) | 8;
++ outb(reason, 0x61);
++ i = 2000;
++ while (--i) udelay(1000);
++ reason &= ~8;
++ outb(reason, 0x61);
++}
++
+ static inline unsigned char get_nmi_reason(void)
+ {
+ return inb(0x61);
diff --git a/trunk/2.6.22/20008_net-csum.patch1 b/trunk/2.6.22/20008_net-csum.patch1
new file mode 100644
index 0000000..f52f99b
--- /dev/null
+++ b/trunk/2.6.22/20008_net-csum.patch1
@@ -0,0 +1,50 @@
+Subject: xen3 TCP/UDP checksumming
+From: http://xenbits.xensource.com/xen-3.1-testing.hg (tip 15042)
+Acked-by: jbeulich@novell.com
+
+This is only a guess, based on suggestions from Keir Fraser.
+
+---
+ net/ipv4/netfilter/nf_nat_proto_tcp.c | 3 +++
+ net/ipv4/netfilter/nf_nat_proto_udp.c | 4 ++++
+ net/ipv4/xfrm4_output.c | 4 ++++
+ 3 files changed, 11 insertions(+)
+
+--- a/net/ipv4/netfilter/nf_nat_proto_tcp.c 2007-08-27 12:09:26.000000000 -0400
++++ b/net/ipv4/netfilter/nf_nat_proto_tcp.c 2007-08-27 14:01:24.000000000 -0400
+@@ -132,6 +132,9 @@ tcp_manip_pkt(struct sk_buff **pskb,
+ if (hdrsize < sizeof(*hdr))
+ return 1;
+
++ if (skb_checksum_setup(*pskb))
++ return 0;
++
+ nf_proto_csum_replace4(&hdr->check, *pskb, oldip, newip, 1);
+ nf_proto_csum_replace2(&hdr->check, *pskb, oldport, newport, 0);
+ return 1;
+--- a/net/ipv4/netfilter/nf_nat_proto_udp.c 2007-08-27 12:09:26.000000000 -0400
++++ b/net/ipv4/netfilter/nf_nat_proto_udp.c 2007-08-27 14:01:24.000000000 -0400
+@@ -116,6 +116,10 @@ udp_manip_pkt(struct sk_buff **pskb,
+ newport = tuple->dst.u.udp.port;
+ portptr = &hdr->dest;
+ }
++
++ if (skb_checksum_setup(*pskb))
++ return 0;
++
+ if (hdr->check || (*pskb)->ip_summed == CHECKSUM_PARTIAL) {
+ nf_proto_csum_replace4(&hdr->check, *pskb, oldip, newip, 1);
+ nf_proto_csum_replace2(&hdr->check, *pskb, *portptr, newport,
+--- a/net/ipv4/xfrm4_output.c 2007-08-27 12:09:26.000000000 -0400
++++ b/net/ipv4/xfrm4_output.c 2007-08-27 14:01:24.000000000 -0400
+@@ -47,6 +47,10 @@ static int xfrm4_output_one(struct sk_bu
+ struct xfrm_state *x = dst->xfrm;
+ int err;
+
++ err = skb_checksum_setup(skb);
++ if (err)
++ goto error_nolock;
++
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ err = skb_checksum_help(skb);
+ if (err)
diff --git a/trunk/2.6.22/20009_xenoprof-generic.patch1 b/trunk/2.6.22/20009_xenoprof-generic.patch1
new file mode 100644
index 0000000..5b73392
--- /dev/null
+++ b/trunk/2.6.22/20009_xenoprof-generic.patch1
@@ -0,0 +1,669 @@
+Subject: Xen oprofile
+From: http://xenbits.xensource.com/xen-3.1-testing.hg (tip 15042)
+Acked-by: jbeulich@novell.com
+
+---
+ drivers/oprofile/buffer_sync.c | 87 ++++++++++++----
+ drivers/oprofile/cpu_buffer.c | 51 +++++++--
+ drivers/oprofile/cpu_buffer.h | 9 +
+ drivers/oprofile/event_buffer.h | 7 +
+ drivers/oprofile/oprof.c | 32 +++++-
+ drivers/oprofile/oprof.h | 3
+ drivers/oprofile/oprofile_files.c | 201 +++++++++++++++++++++++++++++++++++++-
+ include/linux/oprofile.h | 9 +
+ 8 files changed, 360 insertions(+), 39 deletions(-)
+
+--- a/drivers/oprofile/buffer_sync.c 2007-08-27 12:09:26.000000000 -0400
++++ b/drivers/oprofile/buffer_sync.c 2007-08-27 14:02:05.000000000 -0400
+@@ -6,6 +6,10 @@
+ *
+ * @author John Levon <levon@movementarian.org>
+ *
++ * Modified by Aravind Menon for Xen
++ * These modifications are:
++ * Copyright (C) 2005 Hewlett-Packard Co.
++ *
+ * This is the core of the buffer management. Each
+ * CPU buffer is processed and entered into the
+ * global event buffer. Such processing is necessary
+@@ -39,6 +43,7 @@ static cpumask_t marked_cpus = CPU_MASK_
+ static DEFINE_SPINLOCK(task_mortuary);
+ static void process_task_mortuary(void);
+
++static int cpu_current_domain[NR_CPUS];
+
+ /* Take ownership of the task struct and place it on the
+ * list for processing. Only after two full buffer syncs
+@@ -147,6 +152,11 @@ static void end_sync(void)
+ int sync_start(void)
+ {
+ int err;
++ int i;
++
++ for (i = 0; i < NR_CPUS; i++) {
++ cpu_current_domain[i] = COORDINATOR_DOMAIN;
++ }
+
+ start_cpu_work();
+
+@@ -276,15 +286,31 @@ static void add_cpu_switch(int i)
+ last_cookie = INVALID_COOKIE;
+ }
+
+-static void add_kernel_ctx_switch(unsigned int in_kernel)
++static void add_cpu_mode_switch(unsigned int cpu_mode)
+ {
+ add_event_entry(ESCAPE_CODE);
+- if (in_kernel)
+- add_event_entry(KERNEL_ENTER_SWITCH_CODE);
+- else
+- add_event_entry(KERNEL_EXIT_SWITCH_CODE);
++ switch (cpu_mode) {
++ case CPU_MODE_USER:
++ add_event_entry(USER_ENTER_SWITCH_CODE);
++ break;
++ case CPU_MODE_KERNEL:
++ add_event_entry(KERNEL_ENTER_SWITCH_CODE);
++ break;
++ case CPU_MODE_XEN:
++ add_event_entry(XEN_ENTER_SWITCH_CODE);
++ break;
++ default:
++ break;
++ }
+ }
+-
++
++static void add_domain_switch(unsigned long domain_id)
++{
++ add_event_entry(ESCAPE_CODE);
++ add_event_entry(DOMAIN_SWITCH_CODE);
++ add_event_entry(domain_id);
++}
++
+ static void
+ add_user_ctx_switch(struct task_struct const * task, unsigned long cookie)
+ {
+@@ -349,9 +375,9 @@ static int add_us_sample(struct mm_struc
+ * for later lookup from userspace.
+ */
+ static int
+-add_sample(struct mm_struct * mm, struct op_sample * s, int in_kernel)
++add_sample(struct mm_struct * mm, struct op_sample * s, int cpu_mode)
+ {
+- if (in_kernel) {
++ if (cpu_mode >= CPU_MODE_KERNEL) {
+ add_sample_entry(s->eip, s->event);
+ return 1;
+ } else if (mm) {
+@@ -497,15 +523,21 @@ void sync_buffer(int cpu)
+ struct mm_struct *mm = NULL;
+ struct task_struct * new;
+ unsigned long cookie = 0;
+- int in_kernel = 1;
++ int cpu_mode = 1;
+ unsigned int i;
+ sync_buffer_state state = sb_buffer_start;
+ unsigned long available;
++ int domain_switch = 0;
+
+ mutex_lock(&buffer_mutex);
+
+ add_cpu_switch(cpu);
+
++ /* We need to assign the first samples in this CPU buffer to the
++ same domain that we were processing at the last sync_buffer */
++ if (cpu_current_domain[cpu] != COORDINATOR_DOMAIN) {
++ add_domain_switch(cpu_current_domain[cpu]);
++ }
+ /* Remember, only we can modify tail_pos */
+
+ available = get_slots(cpu_buf);
+@@ -513,16 +545,18 @@ void sync_buffer(int cpu)
+ for (i = 0; i < available; ++i) {
+ struct op_sample * s = &cpu_buf->buffer[cpu_buf->tail_pos];
+
+- if (is_code(s->eip)) {
+- if (s->event <= CPU_IS_KERNEL) {
+- /* kernel/userspace switch */
+- in_kernel = s->event;
++ if (is_code(s->eip) && !domain_switch) {
++ if (s->event <= CPU_MODE_XEN) {
++ /* xen/kernel/userspace switch */
++ cpu_mode = s->event;
+ if (state == sb_buffer_start)
+ state = sb_sample_start;
+- add_kernel_ctx_switch(s->event);
++ add_cpu_mode_switch(s->event);
+ } else if (s->event == CPU_TRACE_BEGIN) {
+ state = sb_bt_start;
+ add_trace_begin();
++ } else if (s->event == CPU_DOMAIN_SWITCH) {
++ domain_switch = 1;
+ } else {
+ struct mm_struct * oldmm = mm;
+
+@@ -536,11 +570,21 @@ void sync_buffer(int cpu)
+ add_user_ctx_switch(new, cookie);
+ }
+ } else {
+- if (state >= sb_bt_start &&
+- !add_sample(mm, s, in_kernel)) {
+- if (state == sb_bt_start) {
+- state = sb_bt_ignore;
+- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
++ if (domain_switch) {
++ cpu_current_domain[cpu] = s->eip;
++ add_domain_switch(s->eip);
++ domain_switch = 0;
++ } else {
++ if (cpu_current_domain[cpu] !=
++ COORDINATOR_DOMAIN) {
++ add_sample_entry(s->eip, s->event);
++ }
++ else if (state >= sb_bt_start &&
++ !add_sample(mm, s, cpu_mode)) {
++ if (state == sb_bt_start) {
++ state = sb_bt_ignore;
++ atomic_inc(&oprofile_stats.bt_lost_no_mapping);
++ }
+ }
+ }
+ }
+@@ -549,6 +593,11 @@ void sync_buffer(int cpu)
+ }
+ release_mm(mm);
+
++ /* We reset domain to COORDINATOR at each CPU switch */
++ if (cpu_current_domain[cpu] != COORDINATOR_DOMAIN) {
++ add_domain_switch(COORDINATOR_DOMAIN);
++ }
++
+ mark_done(cpu);
+
+ mutex_unlock(&buffer_mutex);
+--- a/drivers/oprofile/cpu_buffer.c 2007-08-27 12:09:26.000000000 -0400
++++ b/drivers/oprofile/cpu_buffer.c 2007-08-27 14:02:05.000000000 -0400
+@@ -6,6 +6,10 @@
+ *
+ * @author John Levon <levon@movementarian.org>
+ *
++ * Modified by Aravind Menon for Xen
++ * These modifications are:
++ * Copyright (C) 2005 Hewlett-Packard Co.
++ *
+ * Each CPU has a local buffer that stores PC value/event
+ * pairs. We also log context switches when we notice them.
+ * Eventually each CPU's buffer is processed into the global
+@@ -34,6 +38,8 @@ static void wq_sync_buffer(struct work_s
+ #define DEFAULT_TIMER_EXPIRE (HZ / 10)
+ static int work_enabled;
+
++static int32_t current_domain = COORDINATOR_DOMAIN;
++
+ void free_cpu_buffers(void)
+ {
+ int i;
+@@ -57,7 +63,7 @@ int alloc_cpu_buffers(void)
+ goto fail;
+
+ b->last_task = NULL;
+- b->last_is_kernel = -1;
++ b->last_cpu_mode = -1;
+ b->tracing = 0;
+ b->buffer_size = buffer_size;
+ b->tail_pos = 0;
+@@ -113,7 +119,7 @@ void cpu_buffer_reset(struct oprofile_cp
+ * collected will populate the buffer with proper
+ * values to initialize the buffer
+ */
+- cpu_buf->last_is_kernel = -1;
++ cpu_buf->last_cpu_mode = -1;
+ cpu_buf->last_task = NULL;
+ }
+
+@@ -163,13 +169,13 @@ add_code(struct oprofile_cpu_buffer * bu
+ * because of the head/tail separation of the writer and reader
+ * of the CPU buffer.
+ *
+- * is_kernel is needed because on some architectures you cannot
++ * cpu_mode is needed because on some architectures you cannot
+ * tell if you are in kernel or user space simply by looking at
+- * pc. We tag this in the buffer by generating kernel enter/exit
+- * events whenever is_kernel changes
++ * pc. We tag this in the buffer by generating kernel/user (and xen)
++ * enter events whenever cpu_mode changes
+ */
+ static int log_sample(struct oprofile_cpu_buffer * cpu_buf, unsigned long pc,
+- int is_kernel, unsigned long event)
++ int cpu_mode, unsigned long event)
+ {
+ struct task_struct * task;
+
+@@ -180,18 +186,18 @@ static int log_sample(struct oprofile_cp
+ return 0;
+ }
+
+- is_kernel = !!is_kernel;
+-
+ task = current;
+
+ /* notice a switch from user->kernel or vice versa */
+- if (cpu_buf->last_is_kernel != is_kernel) {
+- cpu_buf->last_is_kernel = is_kernel;
+- add_code(cpu_buf, is_kernel);
++ if (cpu_buf->last_cpu_mode != cpu_mode) {
++ cpu_buf->last_cpu_mode = cpu_mode;
++ add_code(cpu_buf, cpu_mode);
+ }
+-
++
+ /* notice a task switch */
+- if (cpu_buf->last_task != task) {
++ /* if not processing other domain samples */
++ if ((cpu_buf->last_task != task) &&
++ (current_domain == COORDINATOR_DOMAIN)) {
+ cpu_buf->last_task = task;
+ add_code(cpu_buf, (unsigned long)task);
+ }
+@@ -275,6 +281,25 @@ void oprofile_add_trace(unsigned long pc
+ add_sample(cpu_buf, pc, 0);
+ }
+
++int oprofile_add_domain_switch(int32_t domain_id)
++{
++ struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[smp_processor_id()];
++
++ /* should have space for switching into and out of domain
++ (2 slots each) plus one sample and one cpu mode switch */
++ if (((nr_available_slots(cpu_buf) < 6) &&
++ (domain_id != COORDINATOR_DOMAIN)) ||
++ (nr_available_slots(cpu_buf) < 2))
++ return 0;
++
++ add_code(cpu_buf, CPU_DOMAIN_SWITCH);
++ add_sample(cpu_buf, domain_id, 0);
++
++ current_domain = domain_id;
++
++ return 1;
++}
++
+ /*
+ * This serves to avoid cpu buffer overflow, and makes sure
+ * the task mortuary progresses
+--- a/drivers/oprofile/cpu_buffer.h 2007-08-27 12:09:26.000000000 -0400
++++ b/drivers/oprofile/cpu_buffer.h 2007-08-27 14:01:24.000000000 -0400
+@@ -36,7 +36,7 @@ struct oprofile_cpu_buffer {
+ volatile unsigned long tail_pos;
+ unsigned long buffer_size;
+ struct task_struct * last_task;
+- int last_is_kernel;
++ int last_cpu_mode;
+ int tracing;
+ struct op_sample * buffer;
+ unsigned long sample_received;
+@@ -51,7 +51,10 @@ extern struct oprofile_cpu_buffer cpu_bu
+ void cpu_buffer_reset(struct oprofile_cpu_buffer * cpu_buf);
+
+ /* transient events for the CPU buffer -> event buffer */
+-#define CPU_IS_KERNEL 1
+-#define CPU_TRACE_BEGIN 2
++#define CPU_MODE_USER 0
++#define CPU_MODE_KERNEL 1
++#define CPU_MODE_XEN 2
++#define CPU_TRACE_BEGIN 3
++#define CPU_DOMAIN_SWITCH 4
+
+ #endif /* OPROFILE_CPU_BUFFER_H */
+--- a/drivers/oprofile/event_buffer.h 2007-08-27 12:09:26.000000000 -0400
++++ b/drivers/oprofile/event_buffer.h 2007-08-27 14:01:24.000000000 -0400
+@@ -29,15 +29,20 @@ void wake_up_buffer_waiter(void);
+ #define CPU_SWITCH_CODE 2
+ #define COOKIE_SWITCH_CODE 3
+ #define KERNEL_ENTER_SWITCH_CODE 4
+-#define KERNEL_EXIT_SWITCH_CODE 5
++#define USER_ENTER_SWITCH_CODE 5
+ #define MODULE_LOADED_CODE 6
+ #define CTX_TGID_CODE 7
+ #define TRACE_BEGIN_CODE 8
+ #define TRACE_END_CODE 9
++#define XEN_ENTER_SWITCH_CODE 10
++#define DOMAIN_SWITCH_CODE 11
+
+ #define INVALID_COOKIE ~0UL
+ #define NO_COOKIE 0UL
+
++/* Constant used to refer to coordinator domain (Xen) */
++#define COORDINATOR_DOMAIN -1
++
+ /* add data to the event buffer */
+ void add_event_entry(unsigned long data);
+
+--- a/drivers/oprofile/oprof.c 2007-08-27 12:09:26.000000000 -0400
++++ b/drivers/oprofile/oprof.c 2007-08-27 14:02:05.000000000 -0400
+@@ -5,6 +5,10 @@
+ * @remark Read the file COPYING
+ *
+ * @author John Levon <levon@movementarian.org>
++ *
++ * Modified by Aravind Menon for Xen
++ * These modifications are:
++ * Copyright (C) 2005 Hewlett-Packard Co.
+ */
+
+ #include <linux/kernel.h>
+@@ -19,7 +23,7 @@
+ #include "cpu_buffer.h"
+ #include "buffer_sync.h"
+ #include "oprofile_stats.h"
+-
++
+ struct oprofile_operations oprofile_ops;
+
+ unsigned long oprofile_started;
+@@ -33,6 +37,32 @@ static DEFINE_MUTEX(start_mutex);
+ */
+ static int timer = 0;
+
++int oprofile_set_active(int active_domains[], unsigned int adomains)
++{
++ int err;
++
++ if (!oprofile_ops.set_active)
++ return -EINVAL;
++
++ mutex_lock(&start_mutex);
++ err = oprofile_ops.set_active(active_domains, adomains);
++ mutex_unlock(&start_mutex);
++ return err;
++}
++
++int oprofile_set_passive(int passive_domains[], unsigned int pdomains)
++{
++ int err;
++
++ if (!oprofile_ops.set_passive)
++ return -EINVAL;
++
++ mutex_lock(&start_mutex);
++ err = oprofile_ops.set_passive(passive_domains, pdomains);
++ mutex_unlock(&start_mutex);
++ return err;
++}
++
+ int oprofile_setup(void)
+ {
+ int err;
+--- a/drivers/oprofile/oprof.h 2007-08-27 12:09:26.000000000 -0400
++++ b/drivers/oprofile/oprof.h 2007-08-27 14:01:24.000000000 -0400
+@@ -35,5 +35,8 @@ void oprofile_create_files(struct super_
+ void oprofile_timer_init(struct oprofile_operations * ops);
+
+ int oprofile_set_backtrace(unsigned long depth);
++
++int oprofile_set_active(int active_domains[], unsigned int adomains);
++int oprofile_set_passive(int passive_domains[], unsigned int pdomains);
+
+ #endif /* OPROF_H */
+--- a/drivers/oprofile/oprofile_files.c 2007-08-27 12:09:26.000000000 -0400
++++ b/drivers/oprofile/oprofile_files.c 2007-08-27 14:02:05.000000000 -0400
+@@ -5,15 +5,21 @@
+ * @remark Read the file COPYING
+ *
+ * @author John Levon <levon@movementarian.org>
++ *
++ * Modified by Aravind Menon for Xen
++ * These modifications are:
++ * Copyright (C) 2005 Hewlett-Packard Co.
+ */
+
+ #include <linux/fs.h>
+ #include <linux/oprofile.h>
++#include <asm/uaccess.h>
++#include <linux/ctype.h>
+
+ #include "event_buffer.h"
+ #include "oprofile_stats.h"
+ #include "oprof.h"
+-
++
+ unsigned long fs_buffer_size = 131072;
+ unsigned long fs_cpu_buffer_size = 8192;
+ unsigned long fs_buffer_watershed = 32768; /* FIXME: tune */
+@@ -117,11 +123,202 @@ static ssize_t dump_write(struct file *
+ static const struct file_operations dump_fops = {
+ .write = dump_write,
+ };
+-
++
++#define TMPBUFSIZE 512
++
++static unsigned int adomains = 0;
++static int active_domains[MAX_OPROF_DOMAINS + 1];
++static DEFINE_MUTEX(adom_mutex);
++
++static ssize_t adomain_write(struct file * file, char const __user * buf,
++ size_t count, loff_t * offset)
++{
++ char *tmpbuf;
++ char *startp, *endp;
++ int i;
++ unsigned long val;
++ ssize_t retval = count;
++
++ if (*offset)
++ return -EINVAL;
++ if (count > TMPBUFSIZE - 1)
++ return -EINVAL;
++
++ if (!(tmpbuf = kmalloc(TMPBUFSIZE, GFP_KERNEL)))
++ return -ENOMEM;
++
++ if (copy_from_user(tmpbuf, buf, count)) {
++ kfree(tmpbuf);
++ return -EFAULT;
++ }
++ tmpbuf[count] = 0;
++
++ mutex_lock(&adom_mutex);
++
++ startp = tmpbuf;
++ /* Parse one more than MAX_OPROF_DOMAINS, for easy error checking */
++ for (i = 0; i <= MAX_OPROF_DOMAINS; i++) {
++ val = simple_strtoul(startp, &endp, 0);
++ if (endp == startp)
++ break;
++ while (ispunct(*endp) || isspace(*endp))
++ endp++;
++ active_domains[i] = val;
++ if (active_domains[i] != val)
++ /* Overflow, force error below */
++ i = MAX_OPROF_DOMAINS + 1;
++ startp = endp;
++ }
++ /* Force error on trailing junk */
++ adomains = *startp ? MAX_OPROF_DOMAINS + 1 : i;
++
++ kfree(tmpbuf);
++
++ if (adomains > MAX_OPROF_DOMAINS
++ || oprofile_set_active(active_domains, adomains)) {
++ adomains = 0;
++ retval = -EINVAL;
++ }
++
++ mutex_unlock(&adom_mutex);
++ return retval;
++}
++
++static ssize_t adomain_read(struct file * file, char __user * buf,
++ size_t count, loff_t * offset)
++{
++ char * tmpbuf;
++ size_t len;
++ int i;
++ ssize_t retval;
++
++ if (!(tmpbuf = kmalloc(TMPBUFSIZE, GFP_KERNEL)))
++ return -ENOMEM;
++
++ mutex_lock(&adom_mutex);
++
++ len = 0;
++ for (i = 0; i < adomains; i++)
++ len += snprintf(tmpbuf + len,
++ len < TMPBUFSIZE ? TMPBUFSIZE - len : 0,
++ "%u ", active_domains[i]);
++ WARN_ON(len > TMPBUFSIZE);
++ if (len != 0 && len <= TMPBUFSIZE)
++ tmpbuf[len-1] = '\n';
++
++ mutex_unlock(&adom_mutex);
++
++ retval = simple_read_from_buffer(buf, count, offset, tmpbuf, len);
++
++ kfree(tmpbuf);
++ return retval;
++}
++
++
++static struct file_operations active_domain_ops = {
++ .read = adomain_read,
++ .write = adomain_write,
++};
++
++static unsigned int pdomains = 0;
++static int passive_domains[MAX_OPROF_DOMAINS];
++static DEFINE_MUTEX(pdom_mutex);
++
++static ssize_t pdomain_write(struct file * file, char const __user * buf,
++ size_t count, loff_t * offset)
++{
++ char *tmpbuf;
++ char *startp, *endp;
++ int i;
++ unsigned long val;
++ ssize_t retval = count;
++
++ if (*offset)
++ return -EINVAL;
++ if (count > TMPBUFSIZE - 1)
++ return -EINVAL;
++
++ if (!(tmpbuf = kmalloc(TMPBUFSIZE, GFP_KERNEL)))
++ return -ENOMEM;
++
++ if (copy_from_user(tmpbuf, buf, count)) {
++ kfree(tmpbuf);
++ return -EFAULT;
++ }
++ tmpbuf[count] = 0;
++
++ mutex_lock(&pdom_mutex);
++
++ startp = tmpbuf;
++ /* Parse one more than MAX_OPROF_DOMAINS, for easy error checking */
++ for (i = 0; i <= MAX_OPROF_DOMAINS; i++) {
++ val = simple_strtoul(startp, &endp, 0);
++ if (endp == startp)
++ break;
++ while (ispunct(*endp) || isspace(*endp))
++ endp++;
++ passive_domains[i] = val;
++ if (passive_domains[i] != val)
++ /* Overflow, force error below */
++ i = MAX_OPROF_DOMAINS + 1;
++ startp = endp;
++ }
++ /* Force error on trailing junk */
++ pdomains = *startp ? MAX_OPROF_DOMAINS + 1 : i;
++
++ kfree(tmpbuf);
++
++ if (pdomains > MAX_OPROF_DOMAINS
++ || oprofile_set_passive(passive_domains, pdomains)) {
++ pdomains = 0;
++ retval = -EINVAL;
++ }
++
++ mutex_unlock(&pdom_mutex);
++ return retval;
++}
++
++static ssize_t pdomain_read(struct file * file, char __user * buf,
++ size_t count, loff_t * offset)
++{
++ char * tmpbuf;
++ size_t len;
++ int i;
++ ssize_t retval;
++
++ if (!(tmpbuf = kmalloc(TMPBUFSIZE, GFP_KERNEL)))
++ return -ENOMEM;
++
++ mutex_lock(&pdom_mutex);
++
++ len = 0;
++ for (i = 0; i < pdomains; i++)
++ len += snprintf(tmpbuf + len,
++ len < TMPBUFSIZE ? TMPBUFSIZE - len : 0,
++ "%u ", passive_domains[i]);
++ WARN_ON(len > TMPBUFSIZE);
++ if (len != 0 && len <= TMPBUFSIZE)
++ tmpbuf[len-1] = '\n';
++
++ mutex_unlock(&pdom_mutex);
++
++ retval = simple_read_from_buffer(buf, count, offset, tmpbuf, len);
++
++ kfree(tmpbuf);
++ return retval;
++}
++
++static struct file_operations passive_domain_ops = {
++ .read = pdomain_read,
++ .write = pdomain_write,
++};
++
+ void oprofile_create_files(struct super_block * sb, struct dentry * root)
+ {
+ oprofilefs_create_file(sb, root, "enable", &enable_fops);
+ oprofilefs_create_file_perm(sb, root, "dump", &dump_fops, 0666);
++ oprofilefs_create_file(sb, root, "active_domains", &active_domain_ops);
++ oprofilefs_create_file(sb, root, "passive_domains", &passive_domain_ops);
+ oprofilefs_create_file(sb, root, "buffer", &event_buffer_fops);
+ oprofilefs_create_ulong(sb, root, "buffer_size", &fs_buffer_size);
+ oprofilefs_create_ulong(sb, root, "buffer_watershed", &fs_buffer_watershed);
+--- a/include/linux/oprofile.h 2007-08-27 12:09:26.000000000 -0400
++++ b/include/linux/oprofile.h 2007-08-27 14:02:05.000000000 -0400
+@@ -16,6 +16,8 @@
+ #include <linux/types.h>
+ #include <linux/spinlock.h>
+ #include <asm/atomic.h>
++
++#include <xen/interface/xenoprof.h>
+
+ struct super_block;
+ struct dentry;
+@@ -27,6 +29,11 @@ struct oprofile_operations {
+ /* create any necessary configuration files in the oprofile fs.
+ * Optional. */
+ int (*create_files)(struct super_block * sb, struct dentry * root);
++ /* setup active domains with Xen */
++ int (*set_active)(int *active_domains, unsigned int adomains);
++ /* setup passive domains with Xen */
++ int (*set_passive)(int *passive_domains, unsigned int pdomains);
++
+ /* Do any necessary interrupt setup. Optional. */
+ int (*setup)(void);
+ /* Do any necessary interrupt shutdown. Optional. */
+@@ -78,6 +85,8 @@ void oprofile_add_pc(unsigned long pc, i
+ /* add a backtrace entry, to be called from the ->backtrace callback */
+ void oprofile_add_trace(unsigned long eip);
+
++/* add a domain switch entry */
++int oprofile_add_domain_switch(int32_t domain_id);
+
+ /**
+ * Create a file of the given name as a child of the given root, with
diff --git a/trunk/2.6.22/20010_softlockup-no-idle-hz.patch1 b/trunk/2.6.22/20010_softlockup-no-idle-hz.patch1
new file mode 100644
index 0000000..e67c89c
--- /dev/null
+++ b/trunk/2.6.22/20010_softlockup-no-idle-hz.patch1
@@ -0,0 +1,75 @@
+Subject: xen3 softlockup - no-idle-hz interaction fix
+From: http://xenbits.xensource.com/xen-3.1-testing.hg (tip 15042)
+Acked-by: jbeulich@novell.com
+
+Index: head-2007-09-25/include/linux/sched.h
+===================================================================
+--- head-2007-09-25.orig/include/linux/sched.h 2007-09-25 14:22:37.000000000 +0200
++++ head-2007-09-25/include/linux/sched.h 2007-09-25 14:32:18.000000000 +0200
+@@ -236,11 +236,16 @@ extern void update_process_times(int use
+ extern void scheduler_tick(void);
+
+ #ifdef CONFIG_DETECT_SOFTLOCKUP
++extern unsigned long softlockup_get_next_event(void);
+ extern void softlockup_tick(void);
+ extern void spawn_softlockup_task(void);
+ extern void touch_softlockup_watchdog(void);
+ extern void touch_all_softlockup_watchdogs(void);
+ #else
++static inline unsigned long softlockup_get_next_event(void)
++{
++ return MAX_JIFFY_OFFSET;
++}
+ static inline void softlockup_tick(void)
+ {
+ }
+Index: head-2007-09-25/kernel/softlockup.c
+===================================================================
+--- head-2007-09-25.orig/kernel/softlockup.c 2007-09-25 14:22:37.000000000 +0200
++++ head-2007-09-25/kernel/softlockup.c 2007-09-25 14:32:18.000000000 +0200
+@@ -60,6 +60,19 @@ void touch_all_softlockup_watchdogs(void
+ }
+ EXPORT_SYMBOL(touch_all_softlockup_watchdogs);
+
++unsigned long softlockup_get_next_event(void)
++{
++ int this_cpu = smp_processor_id();
++ unsigned long touch_timestamp = per_cpu(touch_timestamp, this_cpu);
++
++ if (per_cpu(print_timestamp, this_cpu) == touch_timestamp ||
++ did_panic ||
++ !per_cpu(watchdog_task, this_cpu))
++ return MAX_JIFFY_OFFSET;
++
++ return max_t(long, 0, touch_timestamp + HZ - jiffies);
++}
++
+ /*
+ * This callback runs from the timer interrupt, and checks
+ * whether the watchdog thread has hung or not:
+Index: head-2007-09-25/kernel/timer.c
+===================================================================
+--- head-2007-09-25.orig/kernel/timer.c 2007-09-25 14:22:37.000000000 +0200
++++ head-2007-09-25/kernel/timer.c 2007-09-25 14:32:18.000000000 +0200
+@@ -781,7 +781,7 @@ static unsigned long cmp_next_hrtimer_ev
+ unsigned long get_next_timer_interrupt(unsigned long now)
+ {
+ tvec_base_t *base = __get_cpu_var(tvec_bases);
+- unsigned long expires;
++ unsigned long expires, sl_next;
+
+ spin_lock(&base->lock);
+ expires = __next_timer_interrupt(base);
+@@ -790,7 +790,11 @@ unsigned long get_next_timer_interrupt(u
+ if (time_before_eq(expires, now))
+ return now;
+
+- return cmp_next_hrtimer_event(now, expires);
++ expires = cmp_next_hrtimer_event(now, expires);
++ sl_next = softlockup_get_next_event();
++
++ return expires <= now || expires - now < sl_next
++ ? expires : now + sl_next;
+ }
+
+ #ifdef CONFIG_NO_IDLE_HZ
diff --git a/trunk/2.6.22/20011_xen3-auto-xen-arch.patch1 b/trunk/2.6.22/20011_xen3-auto-xen-arch.patch1
new file mode 100644
index 0000000..0252943
--- /dev/null
+++ b/trunk/2.6.22/20011_xen3-auto-xen-arch.patch1
@@ -0,0 +1,49825 @@
+Subject: xen3 xen-arch
+From: http://xenbits.xensource.com/xen-3.1-testing.hg (tip 15042)
+Patch-mainline: obsolete
+Acked-by: jbeulich@novell.com
+
+List of files having Xen derivates (perhaps created during the merging
+of newer kernel versions), for xen-port-patches.py to pick up (i.e. this
+must be retained here until the XenSource tree has picked up the
+respective kernel.org version):
+
++++ linux/arch/i386/kernel/e820-xen.c
++++ linux/drivers/char/mem-xen.c
++++ linux/lib/swiotlb-xen.c
+
+---
+ arch/i386/boot-xen/Makefile | 21
+ arch/i386/kernel/acpi/boot-xen.c | 1168 ++++++++
+ arch/i386/kernel/apic-xen.c | 155 +
+ arch/i386/kernel/cpu/common-xen.c | 743 +++++
+ arch/i386/kernel/cpu/mtrr/main-xen.c | 197 +
+ arch/i386/kernel/early_printk-xen.c | 2
+ arch/i386/kernel/entry-xen.S | 1216 ++++++++
+ arch/i386/kernel/fixup.c | 88
+ arch/i386/kernel/head-xen.S | 207 +
+ arch/i386/kernel/init_task-xen.c | 51
+ arch/i386/kernel/io_apic-xen.c | 2777 ++++++++++++++++++++
+ arch/i386/kernel/ioport-xen.c | 122
+ arch/i386/kernel/irq-xen.c | 324 ++
+ arch/i386/kernel/ldt-xen.c | 270 +
+ arch/i386/kernel/microcode-xen.c | 144 +
+ arch/i386/kernel/mpparse-xen.c | 1185 ++++++++
+ arch/i386/kernel/pci-dma-xen.c | 366 ++
+ arch/i386/kernel/process-xen.c | 853 ++++++
+ arch/i386/kernel/quirks-xen.c | 47
+ arch/i386/kernel/setup-xen.c | 1871 +++++++++++++
+ arch/i386/kernel/smp-xen.c | 624 ++++
+ arch/i386/kernel/swiotlb.c | 716 +++++
+ arch/i386/kernel/time-xen.c | 1141 ++++++++
+ arch/i386/kernel/traps-xen.c | 1186 ++++++++
+ arch/i386/kernel/vsyscall-note-xen.S | 32
+ arch/i386/mach-xen/Makefile | 5
+ arch/i386/mach-xen/setup.c | 147 +
+ arch/i386/mm/fault-xen.c | 769 +++++
+ arch/i386/mm/highmem-xen.c | 136
+ arch/i386/mm/hypervisor.c | 451 +++
+ arch/i386/mm/init-xen.c | 850 ++++++
+ arch/i386/mm/ioremap-xen.c | 443 +++
+ arch/i386/mm/pgtable-xen.c | 727 +++++
+ arch/i386/oprofile/xenoprof.c | 179 +
+ arch/i386/pci/irq-xen.c | 1205 ++++++++
+ arch/i386/pci/pcifront.c | 55
+ arch/x86_64/ia32/ia32entry-xen.S | 743 +++++
+ arch/x86_64/ia32/syscall32-xen.c | 128
+ arch/x86_64/ia32/syscall32_syscall-xen.S | 28
+ arch/x86_64/ia32/vsyscall-int80.S | 58
+ arch/x86_64/kernel/apic-xen.c | 197 +
+ arch/x86_64/kernel/e820-xen.c | 774 +++++
+ arch/x86_64/kernel/early_printk-xen.c | 302 ++
+ arch/x86_64/kernel/entry-xen.S | 1325 +++++++++
+ arch/x86_64/kernel/genapic-xen.c | 143 +
+ arch/x86_64/kernel/genapic_xen.c | 161 +
+ arch/x86_64/kernel/head-xen.S | 203 +
+ arch/x86_64/kernel/head64-xen.c | 162 +
+ arch/x86_64/kernel/io_apic-xen.c | 2269 ++++++++++++++++
+ arch/x86_64/kernel/ioport-xen.c | 99
+ arch/x86_64/kernel/irq-xen.c | 197 +
+ arch/x86_64/kernel/ldt-xen.c | 282 ++
+ arch/x86_64/kernel/mpparse-xen.c | 1011 +++++++
+ arch/x86_64/kernel/pci-swiotlb-xen.c | 55
+ arch/x86_64/kernel/process-xen.c | 829 +++++
+ arch/x86_64/kernel/setup-xen.c | 1650 +++++++++++
+ arch/x86_64/kernel/setup64-xen.c | 361 ++
+ arch/x86_64/kernel/smp-xen.c | 600 ++++
+ arch/x86_64/kernel/traps-xen.c | 1175 ++++++++
+ arch/x86_64/kernel/vsyscall-xen.c | 239 +
+ arch/x86_64/kernel/xen_entry.S | 40
+ arch/x86_64/mm/fault-xen.c | 724 +++++
+ arch/x86_64/mm/init-xen.c | 1241 ++++++++
+ arch/x86_64/mm/pageattr-xen.c | 433 +++
+ include/asm-i386/mach-xen/asm/agp.h | 37
+ include/asm-i386/mach-xen/asm/desc.h | 164 +
+ include/asm-i386/mach-xen/asm/dma-mapping.h | 157 +
+ include/asm-i386/mach-xen/asm/fixmap.h | 155 +
+ include/asm-i386/mach-xen/asm/floppy.h | 147 +
+ include/asm-i386/mach-xen/asm/highmem.h | 80
+ include/asm-i386/mach-xen/asm/hw_irq.h | 72
+ include/asm-i386/mach-xen/asm/hypercall.h | 407 ++
+ include/asm-i386/mach-xen/asm/hypervisor.h | 258 +
+ include/asm-i386/mach-xen/asm/io.h | 390 ++
+ include/asm-i386/mach-xen/asm/irqflags.h | 127
+ include/asm-i386/mach-xen/asm/maddr.h | 193 +
+ include/asm-i386/mach-xen/asm/mmu.h | 29
+ include/asm-i386/mach-xen/asm/mmu_context.h | 108
+ include/asm-i386/mach-xen/asm/page.h | 229 +
+ include/asm-i386/mach-xen/asm/param.h | 23
+ include/asm-i386/mach-xen/asm/pci.h | 146 +
+ include/asm-i386/mach-xen/asm/pgalloc.h | 59
+ include/asm-i386/mach-xen/asm/pgtable-2level-defs.h | 20
+ include/asm-i386/mach-xen/asm/pgtable-2level.h | 118
+ include/asm-i386/mach-xen/asm/pgtable-3level-defs.h | 24
+ include/asm-i386/mach-xen/asm/pgtable-3level.h | 206 +
+ include/asm-i386/mach-xen/asm/pgtable.h | 530 +++
+ include/asm-i386/mach-xen/asm/processor.h | 741 +++++
+ include/asm-i386/mach-xen/asm/ptrace.h | 90
+ include/asm-i386/mach-xen/asm/scatterlist.h | 22
+ include/asm-i386/mach-xen/asm/segment.h | 117
+ include/asm-i386/mach-xen/asm/setup.h | 81
+ include/asm-i386/mach-xen/asm/smp.h | 103
+ include/asm-i386/mach-xen/asm/spinlock.h | 202 +
+ include/asm-i386/mach-xen/asm/swiotlb.h | 43
+ include/asm-i386/mach-xen/asm/synch_bitops.h | 145 +
+ include/asm-i386/mach-xen/asm/system.h | 488 +++
+ include/asm-i386/mach-xen/asm/tlbflush.h | 101
+ include/asm-i386/mach-xen/asm/vga.h | 20
+ include/asm-i386/mach-xen/asm/xenoprof.h | 48
+ include/asm-i386/mach-xen/irq_vectors.h | 125
+ include/asm-i386/mach-xen/mach_traps.h | 33
+ include/asm-i386/mach-xen/setup_arch.h | 5
+ include/asm-x86_64/mach-xen/asm/agp.h | 35
+ include/asm-x86_64/mach-xen/asm/arch_hooks.h | 27
+ include/asm-x86_64/mach-xen/asm/bootsetup.h | 42
+ include/asm-x86_64/mach-xen/asm/desc.h | 263 +
+ include/asm-x86_64/mach-xen/asm/dma-mapping.h | 207 +
+ include/asm-x86_64/mach-xen/asm/e820.h | 66
+ include/asm-x86_64/mach-xen/asm/fixmap.h | 112
+ include/asm-x86_64/mach-xen/asm/floppy.h | 206 +
+ include/asm-x86_64/mach-xen/asm/hw_irq.h | 136
+ include/asm-x86_64/mach-xen/asm/hypercall.h | 406 ++
+ include/asm-x86_64/mach-xen/asm/hypervisor.h | 2
+ include/asm-x86_64/mach-xen/asm/io.h | 330 ++
+ include/asm-x86_64/mach-xen/asm/irq.h | 38
+ include/asm-x86_64/mach-xen/asm/irqflags.h | 139 +
+ include/asm-x86_64/mach-xen/asm/maddr.h | 161 +
+ include/asm-x86_64/mach-xen/asm/mmu.h | 38
+ include/asm-x86_64/mach-xen/asm/mmu_context.h | 136
+ include/asm-x86_64/mach-xen/asm/msr.h | 399 ++
+ include/asm-x86_64/mach-xen/asm/nmi.h | 93
+ include/asm-x86_64/mach-xen/asm/page.h | 214 +
+ include/asm-x86_64/mach-xen/asm/pci.h | 166 +
+ include/asm-x86_64/mach-xen/asm/pgalloc.h | 204 +
+ include/asm-x86_64/mach-xen/asm/pgtable.h | 573 ++++
+ include/asm-x86_64/mach-xen/asm/processor.h | 506 +++
+ include/asm-x86_64/mach-xen/asm/ptrace.h | 127
+ include/asm-x86_64/mach-xen/asm/smp.h | 150 +
+ include/asm-x86_64/mach-xen/asm/synch_bitops.h | 2
+ include/asm-x86_64/mach-xen/asm/system.h | 262 +
+ include/asm-x86_64/mach-xen/asm/timer.h | 67
+ include/asm-x86_64/mach-xen/asm/tlbflush.h | 103
+ include/asm-x86_64/mach-xen/asm/vga.h | 20
+ include/asm-x86_64/mach-xen/asm/xenoprof.h | 1
+ include/asm-x86_64/mach-xen/asm/xor.h | 328 ++
+ include/asm-x86_64/mach-xen/irq_vectors.h | 123
+ include/asm-x86_64/mach-xen/mach_time.h | 111
+ include/asm-x86_64/mach-xen/mach_timer.h | 50
+ include/asm-x86_64/mach-xen/setup_arch_post.h | 63
+ include/asm-x86_64/mach-xen/setup_arch_pre.h | 5
+ include/xen/balloon.h | 57
+ include/xen/blkif.h | 97
+ include/xen/cpu_hotplug.h | 44
+ include/xen/driver_util.h | 14
+ include/xen/evtchn.h | 126
+ include/xen/features.h | 20
+ include/xen/gnttab.h | 138
+ include/xen/hvm.h | 23
+ include/xen/hypercall.h | 24
+ include/xen/hypervisor_sysfs.h | 32
+ include/xen/pcifront.h | 76
+ include/xen/public/evtchn.h | 88
+ include/xen/public/gntdev.h | 105
+ include/xen/public/privcmd.h | 79
+ include/xen/xen_proc.h | 12
+ include/xen/xenbus.h | 302 ++
+ include/xen/xencons.h | 19
+ include/xen/xenoprof.h | 42
+ scripts/Makefile.xen | 14
+ 160 files changed, 49168 insertions(+)
+
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/arch/i386/boot-xen/Makefile 2007-08-27 14:01:24.000000000 -0400
+@@ -0,0 +1,21 @@
++
++OBJCOPYFLAGS := -g --strip-unneeded
++
++vmlinuz: vmlinux-stripped FORCE
++ $(call if_changed,gzip)
++
++vmlinux-stripped: vmlinux FORCE
++ $(call if_changed,objcopy)
++
++INSTALL_ROOT := $(patsubst %/boot,%,$(INSTALL_PATH))
++
++XINSTALL_NAME ?= $(KERNELRELEASE)
++install:
++ mkdir -p $(INSTALL_ROOT)/boot
++ ln -f -s vmlinuz-$(XINSTALL_NAME)$(INSTALL_SUFFIX) $(INSTALL_ROOT)/boot/vmlinuz-$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(XENGUEST)$(INSTALL_SUFFIX)
++ rm -f $(INSTALL_ROOT)/boot/vmlinuz-$(XINSTALL_NAME)$(INSTALL_SUFFIX)
++ install -m0644 vmlinuz $(INSTALL_ROOT)/boot/vmlinuz-$(XINSTALL_NAME)$(INSTALL_SUFFIX)
++ install -m0644 vmlinux $(INSTALL_ROOT)/boot/vmlinux-syms-$(XINSTALL_NAME)$(INSTALL_SUFFIX)
++ install -m0664 .config $(INSTALL_ROOT)/boot/config-$(XINSTALL_NAME)$(INSTALL_SUFFIX)
++ install -m0664 System.map $(INSTALL_ROOT)/boot/System.map-$(XINSTALL_NAME)$(INSTALL_SUFFIX)
++ ln -f -s vmlinuz-$(XINSTALL_NAME)$(INSTALL_SUFFIX) $(INSTALL_ROOT)/boot/vmlinuz-$(VERSION).$(PATCHLEVEL)$(XENGUEST)$(INSTALL_SUFFIX)
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/arch/i386/kernel/acpi/boot-xen.c 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,1168 @@
++/*
++ * boot.c - Architecture-Specific Low-Level ACPI Boot Support
++ *
++ * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
++ * Copyright (C) 2001 Jun Nakajima <jun.nakajima@intel.com>
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ */
++
++#include <linux/init.h>
++#include <linux/acpi.h>
++#include <linux/efi.h>
++#include <linux/module.h>
++#include <linux/dmi.h>
++#include <linux/irq.h>
++
++#include <asm/pgtable.h>
++#include <asm/io_apic.h>
++#include <asm/apic.h>
++#include <asm/io.h>
++#include <asm/mpspec.h>
++
++#ifdef CONFIG_X86_64
++
++extern void __init clustered_apic_check(void);
++
++extern int gsi_irq_sharing(int gsi);
++#include <asm/proto.h>
++
++static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id) { return 0; }
++
++
++#else /* X86 */
++
++#ifdef CONFIG_X86_LOCAL_APIC
++#include <mach_apic.h>
++#include <mach_mpparse.h>
++#endif /* CONFIG_X86_LOCAL_APIC */
++
++static inline int gsi_irq_sharing(int gsi) { return gsi; }
++
++#endif /* X86 */
++
++#define BAD_MADT_ENTRY(entry, end) ( \
++ (!entry) || (unsigned long)entry + sizeof(*entry) > end || \
++ ((acpi_table_entry_header *)entry)->length < sizeof(*entry))
++
++#define PREFIX "ACPI: "
++
++int acpi_noirq __initdata; /* skip ACPI IRQ initialization */
++int acpi_pci_disabled __initdata; /* skip ACPI PCI scan and IRQ initialization */
++int acpi_ht __initdata = 1; /* enable HT */
++
++int acpi_lapic;
++int acpi_ioapic;
++int acpi_strict;
++EXPORT_SYMBOL(acpi_strict);
++
++acpi_interrupt_flags acpi_sci_flags __initdata;
++int acpi_sci_override_gsi __initdata;
++int acpi_skip_timer_override __initdata;
++
++#ifdef CONFIG_X86_LOCAL_APIC
++static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
++#endif
++
++#ifndef __HAVE_ARCH_CMPXCHG
++#warning ACPI uses CMPXCHG, i486 and later hardware
++#endif
++
++#define MAX_MADT_ENTRIES 256
++u8 x86_acpiid_to_apicid[MAX_MADT_ENTRIES] =
++ {[0 ... MAX_MADT_ENTRIES - 1] = 0xff };
++EXPORT_SYMBOL(x86_acpiid_to_apicid);
++
++/* --------------------------------------------------------------------------
++ Boot-time Configuration
++ -------------------------------------------------------------------------- */
++
++/*
++ * The default interrupt routing model is PIC (8259). This gets
++ * overriden if IOAPICs are enumerated (below).
++ */
++enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_PIC;
++
++#if defined(CONFIG_X86_64) && !defined(CONFIG_XEN)
++
++/* rely on all ACPI tables being in the direct mapping */
++char *__acpi_map_table(unsigned long phys_addr, unsigned long size)
++{
++ if (!phys_addr || !size)
++ return NULL;
++
++ if (phys_addr+size <= (end_pfn_map << PAGE_SHIFT) + PAGE_SIZE)
++ return __va(phys_addr);
++
++ return NULL;
++}
++
++#else
++
++/*
++ * Temporarily use the virtual area starting from FIX_IO_APIC_BASE_END,
++ * to map the target physical address. The problem is that set_fixmap()
++ * provides a single page, and it is possible that the page is not
++ * sufficient.
++ * By using this area, we can map up to MAX_IO_APICS pages temporarily,
++ * i.e. until the next __va_range() call.
++ *
++ * Important Safety Note: The fixed I/O APIC page numbers are *subtracted*
++ * from the fixed base. That's why we start at FIX_IO_APIC_BASE_END and
++ * count idx down while incrementing the phys address.
++ */
++char *__acpi_map_table(unsigned long phys, unsigned long size)
++{
++ unsigned long base, offset, mapped_size;
++ int idx;
++
++#ifndef CONFIG_XEN
++ if (phys + size < 8 * 1024 * 1024)
++ return __va(phys);
++#endif
++
++ offset = phys & (PAGE_SIZE - 1);
++ mapped_size = PAGE_SIZE - offset;
++ set_fixmap(FIX_ACPI_END, phys);
++ base = fix_to_virt(FIX_ACPI_END);
++
++ /*
++ * Most cases can be covered by the below.
++ */
++ idx = FIX_ACPI_END;
++ while (mapped_size < size) {
++ if (--idx < FIX_ACPI_BEGIN)
++ return NULL; /* cannot handle this */
++ phys += PAGE_SIZE;
++ set_fixmap(idx, phys);
++ mapped_size += PAGE_SIZE;
++ }
++
++ return ((unsigned char *)base + offset);
++}
++#endif
++
++#ifdef CONFIG_PCI_MMCONFIG
++/* The physical address of the MMCONFIG aperture. Set from ACPI tables. */
++struct acpi_table_mcfg_config *pci_mmcfg_config;
++int pci_mmcfg_config_num;
++
++int __init acpi_parse_mcfg(unsigned long phys_addr, unsigned long size)
++{
++ struct acpi_table_mcfg *mcfg;
++ unsigned long i;
++ int config_size;
++
++ if (!phys_addr || !size)
++ return -EINVAL;
++
++ mcfg = (struct acpi_table_mcfg *)__acpi_map_table(phys_addr, size);
++ if (!mcfg) {
++ printk(KERN_WARNING PREFIX "Unable to map MCFG\n");
++ return -ENODEV;
++ }
++
++ /* how many config structures do we have */
++ pci_mmcfg_config_num = 0;
++ i = size - sizeof(struct acpi_table_mcfg);
++ while (i >= sizeof(struct acpi_table_mcfg_config)) {
++ ++pci_mmcfg_config_num;
++ i -= sizeof(struct acpi_table_mcfg_config);
++ };
++ if (pci_mmcfg_config_num == 0) {
++ printk(KERN_ERR PREFIX "MMCONFIG has no entries\n");
++ return -ENODEV;
++ }
++
++ config_size = pci_mmcfg_config_num * sizeof(*pci_mmcfg_config);
++ pci_mmcfg_config = kmalloc(config_size, GFP_KERNEL);
++ if (!pci_mmcfg_config) {
++ printk(KERN_WARNING PREFIX
++ "No memory for MCFG config tables\n");
++ return -ENOMEM;
++ }
++
++ memcpy(pci_mmcfg_config, &mcfg->config, config_size);
++ for (i = 0; i < pci_mmcfg_config_num; ++i) {
++ if (mcfg->config[i].base_reserved) {
++ printk(KERN_ERR PREFIX
++ "MMCONFIG not in low 4GB of memory\n");
++ kfree(pci_mmcfg_config);
++ pci_mmcfg_config_num = 0;
++ return -ENODEV;
++ }
++ }
++
++ return 0;
++}
++#endif /* CONFIG_PCI_MMCONFIG */
++
++#ifdef CONFIG_X86_LOCAL_APIC
++static int __init acpi_parse_madt(unsigned long phys_addr, unsigned long size)
++{
++ struct acpi_table_madt *madt = NULL;
++
++ if (!phys_addr || !size || !cpu_has_apic)
++ return -EINVAL;
++
++ madt = (struct acpi_table_madt *)__acpi_map_table(phys_addr, size);
++ if (!madt) {
++ printk(KERN_WARNING PREFIX "Unable to map MADT\n");
++ return -ENODEV;
++ }
++
++ if (madt->lapic_address) {
++ acpi_lapic_addr = (u64) madt->lapic_address;
++
++ printk(KERN_DEBUG PREFIX "Local APIC address 0x%08x\n",
++ madt->lapic_address);
++ }
++
++ acpi_madt_oem_check(madt->header.oem_id, madt->header.oem_table_id);
++
++ return 0;
++}
++
++static int __init
++acpi_parse_lapic(acpi_table_entry_header * header, const unsigned long end)
++{
++ struct acpi_table_lapic *processor = NULL;
++
++ processor = (struct acpi_table_lapic *)header;
++
++ if (BAD_MADT_ENTRY(processor, end))
++ return -EINVAL;
++
++ acpi_table_print_madt_entry(header);
++
++ /* Record local apic id only when enabled */
++ if (processor->flags.enabled)
++ x86_acpiid_to_apicid[processor->acpi_id] = processor->id;
++
++ /*
++ * We need to register disabled CPU as well to permit
++ * counting disabled CPUs. This allows us to size
++ * cpus_possible_map more accurately, to permit
++ * to not preallocating memory for all NR_CPUS
++ * when we use CPU hotplug.
++ */
++ mp_register_lapic(processor->id, /* APIC ID */
++ processor->flags.enabled); /* Enabled? */
++
++ return 0;
++}
++
++static int __init
++acpi_parse_lapic_addr_ovr(acpi_table_entry_header * header,
++ const unsigned long end)
++{
++ struct acpi_table_lapic_addr_ovr *lapic_addr_ovr = NULL;
++
++ lapic_addr_ovr = (struct acpi_table_lapic_addr_ovr *)header;
++
++ if (BAD_MADT_ENTRY(lapic_addr_ovr, end))
++ return -EINVAL;
++
++ acpi_lapic_addr = lapic_addr_ovr->address;
++
++ return 0;
++}
++
++static int __init
++acpi_parse_lapic_nmi(acpi_table_entry_header * header, const unsigned long end)
++{
++ struct acpi_table_lapic_nmi *lapic_nmi = NULL;
++
++ lapic_nmi = (struct acpi_table_lapic_nmi *)header;
++
++ if (BAD_MADT_ENTRY(lapic_nmi, end))
++ return -EINVAL;
++
++ acpi_table_print_madt_entry(header);
++
++ if (lapic_nmi->lint != 1)
++ printk(KERN_WARNING PREFIX "NMI not connected to LINT 1!\n");
++
++ return 0;
++}
++
++#endif /*CONFIG_X86_LOCAL_APIC */
++
++#ifdef CONFIG_X86_IO_APIC
++
++static int __init
++acpi_parse_ioapic(acpi_table_entry_header * header, const unsigned long end)
++{
++ struct acpi_table_ioapic *ioapic = NULL;
++
++ ioapic = (struct acpi_table_ioapic *)header;
++
++ if (BAD_MADT_ENTRY(ioapic, end))
++ return -EINVAL;
++
++ acpi_table_print_madt_entry(header);
++
++ mp_register_ioapic(ioapic->id,
++ ioapic->address, ioapic->global_irq_base);
++
++ return 0;
++}
++
++/*
++ * Parse Interrupt Source Override for the ACPI SCI
++ */
++static void acpi_sci_ioapic_setup(u32 gsi, u16 polarity, u16 trigger)
++{
++ if (trigger == 0) /* compatible SCI trigger is level */
++ trigger = 3;
++
++ if (polarity == 0) /* compatible SCI polarity is low */
++ polarity = 3;
++
++ /* Command-line over-ride via acpi_sci= */
++ if (acpi_sci_flags.trigger)
++ trigger = acpi_sci_flags.trigger;
++
++ if (acpi_sci_flags.polarity)
++ polarity = acpi_sci_flags.polarity;
++
++ /*
++ * mp_config_acpi_legacy_irqs() already setup IRQs < 16
++ * If GSI is < 16, this will update its flags,
++ * else it will create a new mp_irqs[] entry.
++ */
++ mp_override_legacy_irq(gsi, polarity, trigger, gsi);
++
++ /*
++ * stash over-ride to indicate we've been here
++ * and for later update of acpi_fadt
++ */
++ acpi_sci_override_gsi = gsi;
++ return;
++}
++
++static int __init
++acpi_parse_int_src_ovr(acpi_table_entry_header * header,
++ const unsigned long end)
++{
++ struct acpi_table_int_src_ovr *intsrc = NULL;
++
++ intsrc = (struct acpi_table_int_src_ovr *)header;
++
++ if (BAD_MADT_ENTRY(intsrc, end))
++ return -EINVAL;
++
++ acpi_table_print_madt_entry(header);
++
++ if (intsrc->bus_irq == acpi_fadt.sci_int) {
++ acpi_sci_ioapic_setup(intsrc->global_irq,
++ intsrc->flags.polarity,
++ intsrc->flags.trigger);
++ return 0;
++ }
++
++ if (acpi_skip_timer_override &&
++ intsrc->bus_irq == 0 && intsrc->global_irq == 2) {
++ printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n");
++ return 0;
++ }
++
++ mp_override_legacy_irq(intsrc->bus_irq,
++ intsrc->flags.polarity,
++ intsrc->flags.trigger, intsrc->global_irq);
++
++ return 0;
++}
++
++static int __init
++acpi_parse_nmi_src(acpi_table_entry_header * header, const unsigned long end)
++{
++ struct acpi_table_nmi_src *nmi_src = NULL;
++
++ nmi_src = (struct acpi_table_nmi_src *)header;
++
++ if (BAD_MADT_ENTRY(nmi_src, end))
++ return -EINVAL;
++
++ acpi_table_print_madt_entry(header);
++
++ /* TBD: Support nimsrc entries? */
++
++ return 0;
++}
++
++#endif /* CONFIG_X86_IO_APIC */
++
++/*
++ * acpi_pic_sci_set_trigger()
++ *
++ * use ELCR to set PIC-mode trigger type for SCI
++ *
++ * If a PIC-mode SCI is not recognized or gives spurious IRQ7's
++ * it may require Edge Trigger -- use "acpi_sci=edge"
++ *
++ * Port 0x4d0-4d1 are ECLR1 and ECLR2, the Edge/Level Control Registers
++ * for the 8259 PIC. bit[n] = 1 means irq[n] is Level, otherwise Edge.
++ * ECLR1 is IRQ's 0-7 (IRQ 0, 1, 2 must be 0)
++ * ECLR2 is IRQ's 8-15 (IRQ 8, 13 must be 0)
++ */
++
++void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger)
++{
++ unsigned int mask = 1 << irq;
++ unsigned int old, new;
++
++ /* Real old ELCR mask */
++ old = inb(0x4d0) | (inb(0x4d1) << 8);
++
++ /*
++ * If we use ACPI to set PCI irq's, then we should clear ELCR
++ * since we will set it correctly as we enable the PCI irq
++ * routing.
++ */
++ new = acpi_noirq ? old : 0;
++
++ /*
++ * Update SCI information in the ELCR, it isn't in the PCI
++ * routing tables..
++ */
++ switch (trigger) {
++ case 1: /* Edge - clear */
++ new &= ~mask;
++ break;
++ case 3: /* Level - set */
++ new |= mask;
++ break;
++ }
++
++ if (old == new)
++ return;
++
++ printk(PREFIX "setting ELCR to %04x (from %04x)\n", new, old);
++ outb(new, 0x4d0);
++ outb(new >> 8, 0x4d1);
++}
++
++int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
++{
++#ifdef CONFIG_X86_IO_APIC
++ if (use_pci_vector() && !platform_legacy_irq(gsi))
++ *irq = IO_APIC_VECTOR(gsi);
++ else
++#endif
++ *irq = gsi_irq_sharing(gsi);
++ return 0;
++}
++
++/*
++ * success: return IRQ number (>=0)
++ * failure: return < 0
++ */
++int acpi_register_gsi(u32 gsi, int triggering, int polarity)
++{
++ unsigned int irq;
++ unsigned int plat_gsi = gsi;
++
++#ifdef CONFIG_PCI
++ /*
++ * Make sure all (legacy) PCI IRQs are set as level-triggered.
++ */
++ if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) {
++ extern void eisa_set_level_irq(unsigned int irq);
++
++ if (triggering == ACPI_LEVEL_SENSITIVE)
++ eisa_set_level_irq(gsi);
++ }
++#endif
++
++#ifdef CONFIG_X86_IO_APIC
++ if (acpi_irq_model == ACPI_IRQ_MODEL_IOAPIC) {
++ plat_gsi = mp_register_gsi(gsi, triggering, polarity);
++ }
++#endif
++ acpi_gsi_to_irq(plat_gsi, &irq);
++ return irq;
++}
++
++EXPORT_SYMBOL(acpi_register_gsi);
++
++/*
++ * ACPI based hotplug support for CPU
++ */
++#ifdef CONFIG_ACPI_HOTPLUG_CPU
++int acpi_map_lsapic(acpi_handle handle, int *pcpu)
++{
++ /* TBD */
++ return -EINVAL;
++}
++
++EXPORT_SYMBOL(acpi_map_lsapic);
++
++int acpi_unmap_lsapic(int cpu)
++{
++ /* TBD */
++ return -EINVAL;
++}
++
++EXPORT_SYMBOL(acpi_unmap_lsapic);
++#endif /* CONFIG_ACPI_HOTPLUG_CPU */
++
++int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base)
++{
++ /* TBD */
++ return -EINVAL;
++}
++
++EXPORT_SYMBOL(acpi_register_ioapic);
++
++int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base)
++{
++ /* TBD */
++ return -EINVAL;
++}
++
++EXPORT_SYMBOL(acpi_unregister_ioapic);
++
++static unsigned long __init
++acpi_scan_rsdp(unsigned long start, unsigned long length)
++{
++ unsigned long offset = 0;
++ unsigned long sig_len = sizeof("RSD PTR ") - 1;
++ unsigned long vstart = (unsigned long)isa_bus_to_virt(start);
++
++ /*
++ * Scan all 16-byte boundaries of the physical memory region for the
++ * RSDP signature.
++ */
++ for (offset = 0; offset < length; offset += 16) {
++ if (strncmp((char *)(vstart + offset), "RSD PTR ", sig_len))
++ continue;
++ return (start + offset);
++ }
++
++ return 0;
++}
++
++static int __init acpi_parse_sbf(unsigned long phys_addr, unsigned long size)
++{
++ struct acpi_table_sbf *sb;
++
++ if (!phys_addr || !size)
++ return -EINVAL;
++
++ sb = (struct acpi_table_sbf *)__acpi_map_table(phys_addr, size);
++ if (!sb) {
++ printk(KERN_WARNING PREFIX "Unable to map SBF\n");
++ return -ENODEV;
++ }
++
++ sbf_port = sb->sbf_cmos; /* Save CMOS port */
++
++ return 0;
++}
++
++#ifdef CONFIG_HPET_TIMER
++
++static int __init acpi_parse_hpet(unsigned long phys, unsigned long size)
++{
++ struct acpi_table_hpet *hpet_tbl;
++
++ if (!phys || !size)
++ return -EINVAL;
++
++ hpet_tbl = (struct acpi_table_hpet *)__acpi_map_table(phys, size);
++ if (!hpet_tbl) {
++ printk(KERN_WARNING PREFIX "Unable to map HPET\n");
++ return -ENODEV;
++ }
++
++ if (hpet_tbl->addr.space_id != ACPI_SPACE_MEM) {
++ printk(KERN_WARNING PREFIX "HPET timers must be located in "
++ "memory.\n");
++ return -1;
++ }
++#ifdef CONFIG_X86_64
++ vxtime.hpet_address = hpet_tbl->addr.addrl |
++ ((long)hpet_tbl->addr.addrh << 32);
++
++ printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n",
++ hpet_tbl->id, vxtime.hpet_address);
++#else /* X86 */
++ {
++ extern unsigned long hpet_address;
++
++ hpet_address = hpet_tbl->addr.addrl;
++ printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n",
++ hpet_tbl->id, hpet_address);
++ }
++#endif /* X86 */
++
++ return 0;
++}
++#else
++#define acpi_parse_hpet NULL
++#endif
++
++#ifdef CONFIG_X86_PM_TIMER
++extern u32 pmtmr_ioport;
++#endif
++
++static int __init acpi_parse_fadt(unsigned long phys, unsigned long size)
++{
++ struct fadt_descriptor *fadt = NULL;
++
++ fadt = (struct fadt_descriptor *)__acpi_map_table(phys, size);
++ if (!fadt) {
++ printk(KERN_WARNING PREFIX "Unable to map FADT\n");
++ return 0;
++ }
++ /* initialize sci_int early for INT_SRC_OVR MADT parsing */
++ acpi_fadt.sci_int = fadt->sci_int;
++
++ /* initialize rev and apic_phys_dest_mode for x86_64 genapic */
++ acpi_fadt.revision = fadt->revision;
++ acpi_fadt.force_apic_physical_destination_mode =
++ fadt->force_apic_physical_destination_mode;
++
++#if defined(CONFIG_X86_PM_TIMER) && !defined(CONFIG_XEN)
++ /* detect the location of the ACPI PM Timer */
++ if (fadt->revision >= FADT2_REVISION_ID) {
++ /* FADT rev. 2 */
++ if (fadt->xpm_tmr_blk.address_space_id !=
++ ACPI_ADR_SPACE_SYSTEM_IO)
++ return 0;
++
++ pmtmr_ioport = fadt->xpm_tmr_blk.address;
++ /*
++ * "X" fields are optional extensions to the original V1.0
++ * fields, so we must selectively expand V1.0 fields if the
++ * corresponding X field is zero.
++ */
++ if (!pmtmr_ioport)
++ pmtmr_ioport = fadt->V1_pm_tmr_blk;
++ } else {
++ /* FADT rev. 1 */
++ pmtmr_ioport = fadt->V1_pm_tmr_blk;
++ }
++ if (pmtmr_ioport)
++ printk(KERN_INFO PREFIX "PM-Timer IO Port: %#x\n",
++ pmtmr_ioport);
++#endif
++ return 0;
++}
++
++unsigned long __init acpi_find_rsdp(void)
++{
++ unsigned long rsdp_phys = 0;
++
++ if (efi_enabled) {
++ if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
++ return efi.acpi20;
++ else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
++ return efi.acpi;
++ }
++ /*
++ * Scan memory looking for the RSDP signature. First search EBDA (low
++ * memory) paragraphs and then search upper memory (E0000-FFFFF).
++ */
++ rsdp_phys = acpi_scan_rsdp(0, 0x400);
++ if (!rsdp_phys)
++ rsdp_phys = acpi_scan_rsdp(0xE0000, 0x20000);
++
++ return rsdp_phys;
++}
++
++#ifdef CONFIG_X86_LOCAL_APIC
++/*
++ * Parse LAPIC entries in MADT
++ * returns 0 on success, < 0 on error
++ */
++static int __init acpi_parse_madt_lapic_entries(void)
++{
++ int count;
++
++ if (!cpu_has_apic)
++ return -ENODEV;
++
++ /*
++ * Note that the LAPIC address is obtained from the MADT (32-bit value)
++ * and (optionally) overriden by a LAPIC_ADDR_OVR entry (64-bit value).
++ */
++
++ count =
++ acpi_table_parse_madt(ACPI_MADT_LAPIC_ADDR_OVR,
++ acpi_parse_lapic_addr_ovr, 0);
++ if (count < 0) {
++ printk(KERN_ERR PREFIX
++ "Error parsing LAPIC address override entry\n");
++ return count;
++ }
++
++ mp_register_lapic_address(acpi_lapic_addr);
++
++ count = acpi_table_parse_madt(ACPI_MADT_LAPIC, acpi_parse_lapic,
++ MAX_APICS);
++ if (!count) {
++ printk(KERN_ERR PREFIX "No LAPIC entries present\n");
++ /* TBD: Cleanup to allow fallback to MPS */
++ return -ENODEV;
++ } else if (count < 0) {
++ printk(KERN_ERR PREFIX "Error parsing LAPIC entry\n");
++ /* TBD: Cleanup to allow fallback to MPS */
++ return count;
++ }
++
++ count =
++ acpi_table_parse_madt(ACPI_MADT_LAPIC_NMI, acpi_parse_lapic_nmi, 0);
++ if (count < 0) {
++ printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n");
++ /* TBD: Cleanup to allow fallback to MPS */
++ return count;
++ }
++ return 0;
++}
++#endif /* CONFIG_X86_LOCAL_APIC */
++
++#ifdef CONFIG_X86_IO_APIC
++/*
++ * Parse IOAPIC related entries in MADT
++ * returns 0 on success, < 0 on error
++ */
++static int __init acpi_parse_madt_ioapic_entries(void)
++{
++ int count;
++
++ /*
++ * ACPI interpreter is required to complete interrupt setup,
++ * so if it is off, don't enumerate the io-apics with ACPI.
++ * If MPS is present, it will handle them,
++ * otherwise the system will stay in PIC mode
++ */
++ if (acpi_disabled || acpi_noirq) {
++ return -ENODEV;
++ }
++
++ if (!cpu_has_apic)
++ return -ENODEV;
++
++ /*
++ * if "noapic" boot option, don't look for IO-APICs
++ */
++ if (skip_ioapic_setup) {
++ printk(KERN_INFO PREFIX "Skipping IOAPIC probe "
++ "due to 'noapic' option.\n");
++ return -ENODEV;
++ }
++
++ count =
++ acpi_table_parse_madt(ACPI_MADT_IOAPIC, acpi_parse_ioapic,
++ MAX_IO_APICS);
++ if (!count) {
++ printk(KERN_ERR PREFIX "No IOAPIC entries present\n");
++ return -ENODEV;
++ } else if (count < 0) {
++ printk(KERN_ERR PREFIX "Error parsing IOAPIC entry\n");
++ return count;
++ }
++
++ count =
++ acpi_table_parse_madt(ACPI_MADT_INT_SRC_OVR, acpi_parse_int_src_ovr,
++ NR_IRQ_VECTORS);
++ if (count < 0) {
++ printk(KERN_ERR PREFIX
++ "Error parsing interrupt source overrides entry\n");
++ /* TBD: Cleanup to allow fallback to MPS */
++ return count;
++ }
++
++ /*
++ * If BIOS did not supply an INT_SRC_OVR for the SCI
++ * pretend we got one so we can set the SCI flags.
++ */
++ if (!acpi_sci_override_gsi)
++ acpi_sci_ioapic_setup(acpi_fadt.sci_int, 0, 0);
++
++ /* Fill in identity legacy mapings where no override */
++ mp_config_acpi_legacy_irqs();
++
++ count =
++ acpi_table_parse_madt(ACPI_MADT_NMI_SRC, acpi_parse_nmi_src,
++ NR_IRQ_VECTORS);
++ if (count < 0) {
++ printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n");
++ /* TBD: Cleanup to allow fallback to MPS */
++ return count;
++ }
++
++ return 0;
++}
++#else
++static inline int acpi_parse_madt_ioapic_entries(void)
++{
++ return -1;
++}
++#endif /* !CONFIG_X86_IO_APIC */
++
++static void __init acpi_process_madt(void)
++{
++#ifdef CONFIG_X86_LOCAL_APIC
++ int count, error;
++
++ count = acpi_table_parse(ACPI_APIC, acpi_parse_madt);
++ if (count >= 1) {
++
++ /*
++ * Parse MADT LAPIC entries
++ */
++ error = acpi_parse_madt_lapic_entries();
++ if (!error) {
++ acpi_lapic = 1;
++
++#ifdef CONFIG_X86_GENERICARCH
++ generic_bigsmp_probe();
++#endif
++ /*
++ * Parse MADT IO-APIC entries
++ */
++ error = acpi_parse_madt_ioapic_entries();
++ if (!error) {
++ acpi_irq_model = ACPI_IRQ_MODEL_IOAPIC;
++ acpi_irq_balance_set(NULL);
++ acpi_ioapic = 1;
++
++ smp_found_config = 1;
++ clustered_apic_check();
++ }
++ }
++ if (error == -EINVAL) {
++ /*
++ * Dell Precision Workstation 410, 610 come here.
++ */
++ printk(KERN_ERR PREFIX
++ "Invalid BIOS MADT, disabling ACPI\n");
++ disable_acpi();
++ }
++ }
++#endif
++ return;
++}
++
++extern int acpi_force;
++
++#ifdef __i386__
++
++static int __init disable_acpi_irq(struct dmi_system_id *d)
++{
++ if (!acpi_force) {
++ printk(KERN_NOTICE "%s detected: force use of acpi=noirq\n",
++ d->ident);
++ acpi_noirq_set();
++ }
++ return 0;
++}
++
++static int __init disable_acpi_pci(struct dmi_system_id *d)
++{
++ if (!acpi_force) {
++ printk(KERN_NOTICE "%s detected: force use of pci=noacpi\n",
++ d->ident);
++ acpi_disable_pci();
++ }
++ return 0;
++}
++
++static int __init dmi_disable_acpi(struct dmi_system_id *d)
++{
++ if (!acpi_force) {
++ printk(KERN_NOTICE "%s detected: acpi off\n", d->ident);
++ disable_acpi();
++ } else {
++ printk(KERN_NOTICE
++ "Warning: DMI blacklist says broken, but acpi forced\n");
++ }
++ return 0;
++}
++
++/*
++ * Limit ACPI to CPU enumeration for HT
++ */
++static int __init force_acpi_ht(struct dmi_system_id *d)
++{
++ if (!acpi_force) {
++ printk(KERN_NOTICE "%s detected: force use of acpi=ht\n",
++ d->ident);
++ disable_acpi();
++ acpi_ht = 1;
++ } else {
++ printk(KERN_NOTICE
++ "Warning: acpi=force overrules DMI blacklist: acpi=ht\n");
++ }
++ return 0;
++}
++
++/*
++ * If your system is blacklisted here, but you find that acpi=force
++ * works for you, please contact acpi-devel@sourceforge.net
++ */
++static struct dmi_system_id __initdata acpi_dmi_table[] = {
++ /*
++ * Boxes that need ACPI disabled
++ */
++ {
++ .callback = dmi_disable_acpi,
++ .ident = "IBM Thinkpad",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
++ DMI_MATCH(DMI_BOARD_NAME, "2629H1G"),
++ },
++ },
++
++ /*
++ * Boxes that need acpi=ht
++ */
++ {
++ .callback = force_acpi_ht,
++ .ident = "FSC Primergy T850",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "PRIMERGY T850"),
++ },
++ },
++ {
++ .callback = force_acpi_ht,
++ .ident = "DELL GX240",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "Dell Computer Corporation"),
++ DMI_MATCH(DMI_BOARD_NAME, "OptiPlex GX240"),
++ },
++ },
++ {
++ .callback = force_acpi_ht,
++ .ident = "HP VISUALIZE NT Workstation",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "HP VISUALIZE NT Workstation"),
++ },
++ },
++ {
++ .callback = force_acpi_ht,
++ .ident = "Compaq Workstation W8000",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Compaq"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "Workstation W8000"),
++ },
++ },
++ {
++ .callback = force_acpi_ht,
++ .ident = "ASUS P4B266",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
++ DMI_MATCH(DMI_BOARD_NAME, "P4B266"),
++ },
++ },
++ {
++ .callback = force_acpi_ht,
++ .ident = "ASUS P2B-DS",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
++ DMI_MATCH(DMI_BOARD_NAME, "P2B-DS"),
++ },
++ },
++ {
++ .callback = force_acpi_ht,
++ .ident = "ASUS CUR-DLS",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
++ DMI_MATCH(DMI_BOARD_NAME, "CUR-DLS"),
++ },
++ },
++ {
++ .callback = force_acpi_ht,
++ .ident = "ABIT i440BX-W83977",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "ABIT <http://www.abit.com>"),
++ DMI_MATCH(DMI_BOARD_NAME, "i440BX-W83977 (BP6)"),
++ },
++ },
++ {
++ .callback = force_acpi_ht,
++ .ident = "IBM Bladecenter",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
++ DMI_MATCH(DMI_BOARD_NAME, "IBM eServer BladeCenter HS20"),
++ },
++ },
++ {
++ .callback = force_acpi_ht,
++ .ident = "IBM eServer xSeries 360",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
++ DMI_MATCH(DMI_BOARD_NAME, "eServer xSeries 360"),
++ },
++ },
++ {
++ .callback = force_acpi_ht,
++ .ident = "IBM eserver xSeries 330",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
++ DMI_MATCH(DMI_BOARD_NAME, "eserver xSeries 330"),
++ },
++ },
++ {
++ .callback = force_acpi_ht,
++ .ident = "IBM eserver xSeries 440",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "eserver xSeries 440"),
++ },
++ },
++
++ /*
++ * Boxes that need ACPI PCI IRQ routing disabled
++ */
++ {
++ .callback = disable_acpi_irq,
++ .ident = "ASUS A7V",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC"),
++ DMI_MATCH(DMI_BOARD_NAME, "<A7V>"),
++ /* newer BIOS, Revision 1011, does work */
++ DMI_MATCH(DMI_BIOS_VERSION,
++ "ASUS A7V ACPI BIOS Revision 1007"),
++ },
++ },
++
++ /*
++ * Boxes that need ACPI PCI IRQ routing and PCI scan disabled
++ */
++ { /* _BBN 0 bug */
++ .callback = disable_acpi_pci,
++ .ident = "ASUS PR-DLS",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
++ DMI_MATCH(DMI_BOARD_NAME, "PR-DLS"),
++ DMI_MATCH(DMI_BIOS_VERSION,
++ "ASUS PR-DLS ACPI BIOS Revision 1010"),
++ DMI_MATCH(DMI_BIOS_DATE, "03/21/2003")
++ },
++ },
++ {
++ .callback = disable_acpi_pci,
++ .ident = "Acer TravelMate 36x Laptop",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 360"),
++ },
++ },
++ {}
++};
++
++#endif /* __i386__ */
++
++/*
++ * acpi_boot_table_init() and acpi_boot_init()
++ * called from setup_arch(), always.
++ * 1. checksums all tables
++ * 2. enumerates lapics
++ * 3. enumerates io-apics
++ *
++ * acpi_table_init() is separate to allow reading SRAT without
++ * other side effects.
++ *
++ * side effects of acpi_boot_init:
++ * acpi_lapic = 1 if LAPIC found
++ * acpi_ioapic = 1 if IOAPIC found
++ * if (acpi_lapic && acpi_ioapic) smp_found_config = 1;
++ * if acpi_blacklisted() acpi_disabled = 1;
++ * acpi_irq_model=...
++ * ...
++ *
++ * return value: (currently ignored)
++ * 0: success
++ * !0: failure
++ */
++
++int __init acpi_boot_table_init(void)
++{
++ int error;
++
++#ifdef __i386__
++ dmi_check_system(acpi_dmi_table);
++#endif
++
++ /*
++ * If acpi_disabled, bail out
++ * One exception: acpi=ht continues far enough to enumerate LAPICs
++ */
++ if (acpi_disabled && !acpi_ht)
++ return 1;
++
++ /*
++ * Initialize the ACPI boot-time table parser.
++ */
++ error = acpi_table_init();
++ if (error) {
++ disable_acpi();
++ return error;
++ }
++
++ acpi_table_parse(ACPI_BOOT, acpi_parse_sbf);
++
++ /*
++ * blacklist may disable ACPI entirely
++ */
++ error = acpi_blacklisted();
++ if (error) {
++ if (acpi_force) {
++ printk(KERN_WARNING PREFIX "acpi=force override\n");
++ } else {
++ printk(KERN_WARNING PREFIX "Disabling ACPI support\n");
++ disable_acpi();
++ return error;
++ }
++ }
++
++ return 0;
++}
++
++int __init acpi_boot_init(void)
++{
++ /*
++ * If acpi_disabled, bail out
++ * One exception: acpi=ht continues far enough to enumerate LAPICs
++ */
++ if (acpi_disabled && !acpi_ht)
++ return 1;
++
++ acpi_table_parse(ACPI_BOOT, acpi_parse_sbf);
++
++ /*
++ * set sci_int and PM timer address
++ */
++ acpi_table_parse(ACPI_FADT, acpi_parse_fadt);
++
++ /*
++ * Process the Multiple APIC Description Table (MADT), if present
++ */
++ acpi_process_madt();
++
++ acpi_table_parse(ACPI_HPET, acpi_parse_hpet);
++
++ return 0;
++}
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/arch/i386/kernel/apic-xen.c 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,155 @@
++/*
++ * Local APIC handling, local APIC timers
++ *
++ * (c) 1999, 2000 Ingo Molnar <mingo@redhat.com>
++ *
++ * Fixes
++ * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
++ * thanks to Eric Gilmore
++ * and Rolf G. Tews
++ * for testing these extensively.
++ * Maciej W. Rozycki : Various updates and fixes.
++ * Mikael Pettersson : Power Management for UP-APIC.
++ * Pavel Machek and
++ * Mikael Pettersson : PM converted to driver model.
++ */
++
++#include <linux/init.h>
++
++#include <linux/mm.h>
++#include <linux/delay.h>
++#include <linux/bootmem.h>
++#include <linux/smp_lock.h>
++#include <linux/interrupt.h>
++#include <linux/mc146818rtc.h>
++#include <linux/kernel_stat.h>
++#include <linux/sysdev.h>
++#include <linux/cpu.h>
++#include <linux/module.h>
++
++#include <asm/atomic.h>
++#include <asm/smp.h>
++#include <asm/mtrr.h>
++#include <asm/mpspec.h>
++#include <asm/desc.h>
++#include <asm/arch_hooks.h>
++#include <asm/hpet.h>
++#include <asm/i8253.h>
++#include <asm/nmi.h>
++
++#include <mach_apic.h>
++#include <mach_apicdef.h>
++#include <mach_ipi.h>
++
++#include "io_ports.h"
++
++#ifndef CONFIG_XEN
++/*
++ * cpu_mask that denotes the CPUs that needs timer interrupt coming in as
++ * IPIs in place of local APIC timers
++ */
++static cpumask_t timer_bcast_ipi;
++#endif
++
++/*
++ * Knob to control our willingness to enable the local APIC.
++ */
++int enable_local_apic __initdata = 0; /* -1=force-disable, +1=force-enable */
++
++/*
++ * Debug level
++ */
++int apic_verbosity;
++
++#ifndef CONFIG_XEN
++static int modern_apic(void)
++{
++ unsigned int lvr, version;
++ /* AMD systems use old APIC versions, so check the CPU */
++ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
++ boot_cpu_data.x86 >= 0xf)
++ return 1;
++ lvr = apic_read(APIC_LVR);
++ version = GET_APIC_VERSION(lvr);
++ return version >= 0x14;
++}
++#endif /* !CONFIG_XEN */
++
++/*
++ * 'what should we do if we get a hw irq event on an illegal vector'.
++ * each architecture has to answer this themselves.
++ */
++void ack_bad_irq(unsigned int irq)
++{
++ printk("unexpected IRQ trap at vector %02x\n", irq);
++ /*
++ * Currently unexpected vectors happen only on SMP and APIC.
++ * We _must_ ack these because every local APIC has only N
++ * irq slots per priority level, and a 'hanging, unacked' IRQ
++ * holds up an irq slot - in excessive cases (when multiple
++ * unexpected vectors occur) that might lock up the APIC
++ * completely.
++ * But only ack when the APIC is enabled -AK
++ */
++ if (cpu_has_apic)
++ ack_APIC_irq();
++}
++
++int get_physical_broadcast(void)
++{
++ return 0xff;
++}
++
++#ifndef CONFIG_XEN
++#ifndef CONFIG_SMP
++static void up_apic_timer_interrupt_call(struct pt_regs *regs)
++{
++ int cpu = smp_processor_id();
++
++ /*
++ * the NMI deadlock-detector uses this.
++ */
++ per_cpu(irq_stat, cpu).apic_timer_irqs++;
++
++ smp_local_timer_interrupt(regs);
++}
++#endif
++
++void smp_send_timer_broadcast_ipi(struct pt_regs *regs)
++{
++ cpumask_t mask;
++
++ cpus_and(mask, cpu_online_map, timer_bcast_ipi);
++ if (!cpus_empty(mask)) {
++#ifdef CONFIG_SMP
++ send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
++#else
++ /*
++ * We can directly call the apic timer interrupt handler
++ * in UP case. Minus all irq related functions
++ */
++ up_apic_timer_interrupt_call(regs);
++#endif
++ }
++}
++#endif
++
++int setup_profiling_timer(unsigned int multiplier)
++{
++ return -EINVAL;
++}
++
++/*
++ * This initializes the IO-APIC and APIC hardware if this is
++ * a UP kernel.
++ */
++int __init APIC_init_uniprocessor (void)
++{
++#ifdef CONFIG_X86_IO_APIC
++ if (smp_found_config)
++ if (!skip_ioapic_setup && nr_ioapics)
++ setup_IO_APIC();
++#endif
++
++ return 0;
++}
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/arch/i386/kernel/cpu/common-xen.c 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,743 @@
++#include <linux/init.h>
++#include <linux/string.h>
++#include <linux/delay.h>
++#include <linux/smp.h>
++#include <linux/module.h>
++#include <linux/percpu.h>
++#include <linux/bootmem.h>
++#include <asm/semaphore.h>
++#include <asm/processor.h>
++#include <asm/i387.h>
++#include <asm/msr.h>
++#include <asm/io.h>
++#include <asm/mmu_context.h>
++#include <asm/mtrr.h>
++#include <asm/mce.h>
++#ifdef CONFIG_X86_LOCAL_APIC
++#include <asm/mpspec.h>
++#include <asm/apic.h>
++#include <mach_apic.h>
++#else
++#ifdef CONFIG_XEN
++#define phys_pkg_id(a,b) a
++#endif
++#endif
++#include <asm/hypervisor.h>
++
++#include "cpu.h"
++
++DEFINE_PER_CPU(struct Xgt_desc_struct, cpu_gdt_descr);
++EXPORT_PER_CPU_SYMBOL(cpu_gdt_descr);
++
++#ifndef CONFIG_XEN
++DEFINE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]);
++EXPORT_PER_CPU_SYMBOL(cpu_16bit_stack);
++#endif
++
++static int cachesize_override __cpuinitdata = -1;
++static int disable_x86_fxsr __cpuinitdata;
++static int disable_x86_serial_nr __cpuinitdata = 1;
++static int disable_x86_sep __cpuinitdata;
++
++struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {};
++
++extern int disable_pse;
++
++static void default_init(struct cpuinfo_x86 * c)
++{
++ /* Not much we can do here... */
++ /* Check if at least it has cpuid */
++ if (c->cpuid_level == -1) {
++ /* No cpuid. It must be an ancient CPU */
++ if (c->x86 == 4)
++ strcpy(c->x86_model_id, "486");
++ else if (c->x86 == 3)
++ strcpy(c->x86_model_id, "386");
++ }
++}
++
++static struct cpu_dev default_cpu = {
++ .c_init = default_init,
++ .c_vendor = "Unknown",
++};
++static struct cpu_dev * this_cpu = &default_cpu;
++
++static int __init cachesize_setup(char *str)
++{
++ get_option (&str, &cachesize_override);
++ return 1;
++}
++__setup("cachesize=", cachesize_setup);
++
++int __cpuinit get_model_name(struct cpuinfo_x86 *c)
++{
++ unsigned int *v;
++ char *p, *q;
++
++ if (cpuid_eax(0x80000000) < 0x80000004)
++ return 0;
++
++ v = (unsigned int *) c->x86_model_id;
++ cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
++ cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
++ cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
++ c->x86_model_id[48] = 0;
++
++ /* Intel chips right-justify this string for some dumb reason;
++ undo that brain damage */
++ p = q = &c->x86_model_id[0];
++ while ( *p == ' ' )
++ p++;
++ if ( p != q ) {
++ while ( *p )
++ *q++ = *p++;
++ while ( q <= &c->x86_model_id[48] )
++ *q++ = '\0'; /* Zero-pad the rest */
++ }
++
++ return 1;
++}
++
++
++void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
++{
++ unsigned int n, dummy, ecx, edx, l2size;
++
++ n = cpuid_eax(0x80000000);
++
++ if (n >= 0x80000005) {
++ cpuid(0x80000005, &dummy, &dummy, &ecx, &edx);
++ printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
++ edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
++ c->x86_cache_size=(ecx>>24)+(edx>>24);
++ }
++
++ if (n < 0x80000006) /* Some chips just has a large L1. */
++ return;
++
++ ecx = cpuid_ecx(0x80000006);
++ l2size = ecx >> 16;
++
++ /* do processor-specific cache resizing */
++ if (this_cpu->c_size_cache)
++ l2size = this_cpu->c_size_cache(c,l2size);
++
++ /* Allow user to override all this if necessary. */
++ if (cachesize_override != -1)
++ l2size = cachesize_override;
++
++ if ( l2size == 0 )
++ return; /* Again, no L2 cache is possible */
++
++ c->x86_cache_size = l2size;
++
++ printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
++ l2size, ecx & 0xFF);
++}
++
++/* Naming convention should be: <Name> [(<Codename>)] */
++/* This table only is used unless init_<vendor>() below doesn't set it; */
++/* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used */
++
++/* Look up CPU names by table lookup. */
++static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c)
++{
++ struct cpu_model_info *info;
++
++ if ( c->x86_model >= 16 )
++ return NULL; /* Range check */
++
++ if (!this_cpu)
++ return NULL;
++
++ info = this_cpu->c_models;
++
++ while (info && info->family) {
++ if (info->family == c->x86)
++ return info->model_names[c->x86_model];
++ info++;
++ }
++ return NULL; /* Not found */
++}
++
++
++static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c, int early)
++{
++ char *v = c->x86_vendor_id;
++ int i;
++ static int printed;
++
++ for (i = 0; i < X86_VENDOR_NUM; i++) {
++ if (cpu_devs[i]) {
++ if (!strcmp(v,cpu_devs[i]->c_ident[0]) ||
++ (cpu_devs[i]->c_ident[1] &&
++ !strcmp(v,cpu_devs[i]->c_ident[1]))) {
++ c->x86_vendor = i;
++ if (!early)
++ this_cpu = cpu_devs[i];
++ return;
++ }
++ }
++ }
++ if (!printed) {
++ printed++;
++ printk(KERN_ERR "CPU: Vendor unknown, using generic init.\n");
++ printk(KERN_ERR "CPU: Your system may be unstable.\n");
++ }
++ c->x86_vendor = X86_VENDOR_UNKNOWN;
++ this_cpu = &default_cpu;
++}
++
++
++static int __init x86_fxsr_setup(char * s)
++{
++ disable_x86_fxsr = 1;
++ return 1;
++}
++__setup("nofxsr", x86_fxsr_setup);
++
++
++static int __init x86_sep_setup(char * s)
++{
++ disable_x86_sep = 1;
++ return 1;
++}
++__setup("nosep", x86_sep_setup);
++
++
++/* Standard macro to see if a specific flag is changeable */
++static inline int flag_is_changeable_p(u32 flag)
++{
++ u32 f1, f2;
++
++ asm("pushfl\n\t"
++ "pushfl\n\t"
++ "popl %0\n\t"
++ "movl %0,%1\n\t"
++ "xorl %2,%0\n\t"
++ "pushl %0\n\t"
++ "popfl\n\t"
++ "pushfl\n\t"
++ "popl %0\n\t"
++ "popfl\n\t"
++ : "=&r" (f1), "=&r" (f2)
++ : "ir" (flag));
++
++ return ((f1^f2) & flag) != 0;
++}
++
++
++/* Probe for the CPUID instruction */
++static int __cpuinit have_cpuid_p(void)
++{
++ return flag_is_changeable_p(X86_EFLAGS_ID);
++}
++
++/* Do minimum CPU detection early.
++ Fields really needed: vendor, cpuid_level, family, model, mask, cache alignment.
++ The others are not touched to avoid unwanted side effects.
++
++ WARNING: this function is only called on the BP. Don't add code here
++ that is supposed to run on all CPUs. */
++static void __init early_cpu_detect(void)
++{
++ struct cpuinfo_x86 *c = &boot_cpu_data;
++
++ c->x86_cache_alignment = 32;
++
++ if (!have_cpuid_p())
++ return;
++
++ /* Get vendor name */
++ cpuid(0x00000000, &c->cpuid_level,
++ (int *)&c->x86_vendor_id[0],
++ (int *)&c->x86_vendor_id[8],
++ (int *)&c->x86_vendor_id[4]);
++
++ get_cpu_vendor(c, 1);
++
++ c->x86 = 4;
++ if (c->cpuid_level >= 0x00000001) {
++ u32 junk, tfms, cap0, misc;
++ cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
++ c->x86 = (tfms >> 8) & 15;
++ c->x86_model = (tfms >> 4) & 15;
++ if (c->x86 == 0xf)
++ c->x86 += (tfms >> 20) & 0xff;
++ if (c->x86 >= 0x6)
++ c->x86_model += ((tfms >> 16) & 0xF) << 4;
++ c->x86_mask = tfms & 15;
++ if (cap0 & (1<<19))
++ c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8;
++ }
++}
++
++void __cpuinit generic_identify(struct cpuinfo_x86 * c)
++{
++ u32 tfms, xlvl;
++ int ebx;
++
++ if (have_cpuid_p()) {
++ /* Get vendor name */
++ cpuid(0x00000000, &c->cpuid_level,
++ (int *)&c->x86_vendor_id[0],
++ (int *)&c->x86_vendor_id[8],
++ (int *)&c->x86_vendor_id[4]);
++
++ get_cpu_vendor(c, 0);
++ /* Initialize the standard set of capabilities */
++ /* Note that the vendor-specific code below might override */
++
++ /* Intel-defined flags: level 0x00000001 */
++ if ( c->cpuid_level >= 0x00000001 ) {
++ u32 capability, excap;
++ cpuid(0x00000001, &tfms, &ebx, &excap, &capability);
++ c->x86_capability[0] = capability;
++ c->x86_capability[4] = excap;
++ c->x86 = (tfms >> 8) & 15;
++ c->x86_model = (tfms >> 4) & 15;
++ if (c->x86 == 0xf)
++ c->x86 += (tfms >> 20) & 0xff;
++ if (c->x86 >= 0x6)
++ c->x86_model += ((tfms >> 16) & 0xF) << 4;
++ c->x86_mask = tfms & 15;
++#ifdef CONFIG_X86_HT
++ c->apicid = phys_pkg_id((ebx >> 24) & 0xFF, 0);
++#else
++ c->apicid = (ebx >> 24) & 0xFF;
++#endif
++ } else {
++ /* Have CPUID level 0 only - unheard of */
++ c->x86 = 4;
++ }
++
++ /* AMD-defined flags: level 0x80000001 */
++ xlvl = cpuid_eax(0x80000000);
++ if ( (xlvl & 0xffff0000) == 0x80000000 ) {
++ if ( xlvl >= 0x80000001 ) {
++ c->x86_capability[1] = cpuid_edx(0x80000001);
++ c->x86_capability[6] = cpuid_ecx(0x80000001);
++ }
++ if ( xlvl >= 0x80000004 )
++ get_model_name(c); /* Default name */
++ }
++ }
++
++ early_intel_workaround(c);
++
++#ifdef CONFIG_X86_HT
++ c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff;
++#endif
++}
++
++static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
++{
++ if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr ) {
++ /* Disable processor serial number */
++ unsigned long lo,hi;
++ rdmsr(MSR_IA32_BBL_CR_CTL,lo,hi);
++ lo |= 0x200000;
++ wrmsr(MSR_IA32_BBL_CR_CTL,lo,hi);
++ printk(KERN_NOTICE "CPU serial number disabled.\n");
++ clear_bit(X86_FEATURE_PN, c->x86_capability);
++
++ /* Disabling the serial number may affect the cpuid level */
++ c->cpuid_level = cpuid_eax(0);
++ }
++}
++
++static int __init x86_serial_nr_setup(char *s)
++{
++ disable_x86_serial_nr = 0;
++ return 1;
++}
++__setup("serialnumber", x86_serial_nr_setup);
++
++
++
++/*
++ * This does the hard work of actually picking apart the CPU stuff...
++ */
++void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
++{
++ int i;
++
++ c->loops_per_jiffy = loops_per_jiffy;
++ c->x86_cache_size = -1;
++ c->x86_vendor = X86_VENDOR_UNKNOWN;
++ c->cpuid_level = -1; /* CPUID not detected */
++ c->x86_model = c->x86_mask = 0; /* So far unknown... */
++ c->x86_vendor_id[0] = '\0'; /* Unset */
++ c->x86_model_id[0] = '\0'; /* Unset */
++ c->x86_max_cores = 1;
++ memset(&c->x86_capability, 0, sizeof c->x86_capability);
++
++ if (!have_cpuid_p()) {
++ /* First of all, decide if this is a 486 or higher */
++ /* It's a 486 if we can modify the AC flag */
++ if ( flag_is_changeable_p(X86_EFLAGS_AC) )
++ c->x86 = 4;
++ else
++ c->x86 = 3;
++ }
++
++ generic_identify(c);
++
++ printk(KERN_DEBUG "CPU: After generic identify, caps:");
++ for (i = 0; i < NCAPINTS; i++)
++ printk(" %08lx", c->x86_capability[i]);
++ printk("\n");
++
++ if (this_cpu->c_identify) {
++ this_cpu->c_identify(c);
++
++ printk(KERN_DEBUG "CPU: After vendor identify, caps:");
++ for (i = 0; i < NCAPINTS; i++)
++ printk(" %08lx", c->x86_capability[i]);
++ printk("\n");
++ }
++
++ /*
++ * Vendor-specific initialization. In this section we
++ * canonicalize the feature flags, meaning if there are
++ * features a certain CPU supports which CPUID doesn't
++ * tell us, CPUID claiming incorrect flags, or other bugs,
++ * we handle them here.
++ *
++ * At the end of this section, c->x86_capability better
++ * indicate the features this CPU genuinely supports!
++ */
++ if (this_cpu->c_init)
++ this_cpu->c_init(c);
++
++ /* Disable the PN if appropriate */
++ squash_the_stupid_serial_number(c);
++
++ /*
++ * The vendor-specific functions might have changed features. Now
++ * we do "generic changes."
++ */
++
++ /* TSC disabled? */
++ if ( tsc_disable )
++ clear_bit(X86_FEATURE_TSC, c->x86_capability);
++
++ /* FXSR disabled? */
++ if (disable_x86_fxsr) {
++ clear_bit(X86_FEATURE_FXSR, c->x86_capability);
++ clear_bit(X86_FEATURE_XMM, c->x86_capability);
++ }
++
++ /* SEP disabled? */
++ if (disable_x86_sep)
++ clear_bit(X86_FEATURE_SEP, c->x86_capability);
++
++ if (disable_pse)
++ clear_bit(X86_FEATURE_PSE, c->x86_capability);
++
++ /* If the model name is still unset, do table lookup. */
++ if ( !c->x86_model_id[0] ) {
++ char *p;
++ p = table_lookup_model(c);
++ if ( p )
++ strcpy(c->x86_model_id, p);
++ else
++ /* Last resort... */
++ sprintf(c->x86_model_id, "%02x/%02x",
++ c->x86, c->x86_model);
++ }
++
++ /* Now the feature flags better reflect actual CPU features! */
++
++ printk(KERN_DEBUG "CPU: After all inits, caps:");
++ for (i = 0; i < NCAPINTS; i++)
++ printk(" %08lx", c->x86_capability[i]);
++ printk("\n");
++
++ /*
++ * On SMP, boot_cpu_data holds the common feature set between
++ * all CPUs; so make sure that we indicate which features are
++ * common between the CPUs. The first time this routine gets
++ * executed, c == &boot_cpu_data.
++ */
++ if ( c != &boot_cpu_data ) {
++ /* AND the already accumulated flags with these */
++ for ( i = 0 ; i < NCAPINTS ; i++ )
++ boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
++ }
++
++ /* Init Machine Check Exception if available. */
++ mcheck_init(c);
++
++ if (c == &boot_cpu_data)
++ sysenter_setup();
++ enable_sep_cpu();
++
++ if (c == &boot_cpu_data)
++ mtrr_bp_init();
++ else
++ mtrr_ap_init();
++}
++
++#ifdef CONFIG_X86_HT
++void __cpuinit detect_ht(struct cpuinfo_x86 *c)
++{
++ u32 eax, ebx, ecx, edx;
++ int index_msb, core_bits;
++
++ cpuid(1, &eax, &ebx, &ecx, &edx);
++
++ if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY))
++ return;
++
++ smp_num_siblings = (ebx & 0xff0000) >> 16;
++
++ if (smp_num_siblings == 1) {
++ printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
++ } else if (smp_num_siblings > 1 ) {
++
++ if (smp_num_siblings > NR_CPUS) {
++ printk(KERN_WARNING "CPU: Unsupported number of the "
++ "siblings %d", smp_num_siblings);
++ smp_num_siblings = 1;
++ return;
++ }
++
++ index_msb = get_count_order(smp_num_siblings);
++ c->phys_proc_id = phys_pkg_id((ebx >> 24) & 0xFF, index_msb);
++
++ printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
++ c->phys_proc_id);
++
++ smp_num_siblings = smp_num_siblings / c->x86_max_cores;
++
++ index_msb = get_count_order(smp_num_siblings) ;
++
++ core_bits = get_count_order(c->x86_max_cores);
++
++ c->cpu_core_id = phys_pkg_id((ebx >> 24) & 0xFF, index_msb) &
++ ((1 << core_bits) - 1);
++
++ if (c->x86_max_cores > 1)
++ printk(KERN_INFO "CPU: Processor Core ID: %d\n",
++ c->cpu_core_id);
++ }
++}
++#endif
++
++void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
++{
++ char *vendor = NULL;
++
++ if (c->x86_vendor < X86_VENDOR_NUM)
++ vendor = this_cpu->c_vendor;
++ else if (c->cpuid_level >= 0)
++ vendor = c->x86_vendor_id;
++
++ if (vendor && strncmp(c->x86_model_id, vendor, strlen(vendor)))
++ printk("%s ", vendor);
++
++ if (!c->x86_model_id[0])
++ printk("%d86", c->x86);
++ else
++ printk("%s", c->x86_model_id);
++
++ if (c->x86_mask || c->cpuid_level >= 0)
++ printk(" stepping %02x\n", c->x86_mask);
++ else
++ printk("\n");
++}
++
++cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE;
++
++/* This is hacky. :)
++ * We're emulating future behavior.
++ * In the future, the cpu-specific init functions will be called implicitly
++ * via the magic of initcalls.
++ * They will insert themselves into the cpu_devs structure.
++ * Then, when cpu_init() is called, we can just iterate over that array.
++ */
++
++extern int intel_cpu_init(void);
++extern int cyrix_init_cpu(void);
++extern int nsc_init_cpu(void);
++extern int amd_init_cpu(void);
++extern int centaur_init_cpu(void);
++extern int transmeta_init_cpu(void);
++extern int rise_init_cpu(void);
++extern int nexgen_init_cpu(void);
++extern int umc_init_cpu(void);
++
++void __init early_cpu_init(void)
++{
++ intel_cpu_init();
++ cyrix_init_cpu();
++ nsc_init_cpu();
++ amd_init_cpu();
++ centaur_init_cpu();
++ transmeta_init_cpu();
++ rise_init_cpu();
++ nexgen_init_cpu();
++ umc_init_cpu();
++ early_cpu_detect();
++
++#ifdef CONFIG_DEBUG_PAGEALLOC
++ /* pse is not compatible with on-the-fly unmapping,
++ * disable it even if the cpus claim to support it.
++ */
++ clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability);
++ disable_pse = 1;
++#endif
++}
++
++void __cpuinit cpu_gdt_init(struct Xgt_desc_struct *gdt_descr)
++{
++ unsigned long frames[16];
++ unsigned long va;
++ int f;
++
++ for (va = gdt_descr->address, f = 0;
++ va < gdt_descr->address + gdt_descr->size;
++ va += PAGE_SIZE, f++) {
++ frames[f] = virt_to_mfn(va);
++ make_lowmem_page_readonly(
++ (void *)va, XENFEAT_writable_descriptor_tables);
++ }
++ if (HYPERVISOR_set_gdt(frames, gdt_descr->size / 8))
++ BUG();
++}
++
++/*
++ * cpu_init() initializes state that is per-CPU. Some data is already
++ * initialized (naturally) in the bootstrap process, such as the GDT
++ * and IDT. We reload them nevertheless, this function acts as a
++ * 'CPU state barrier', nothing should get across.
++ */
++void __cpuinit cpu_init(void)
++{
++ int cpu = smp_processor_id();
++#ifndef CONFIG_X86_NO_TSS
++ struct tss_struct * t = &per_cpu(init_tss, cpu);
++#endif
++ struct thread_struct *thread = &current->thread;
++ struct desc_struct *gdt;
++ struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
++
++ if (cpu_test_and_set(cpu, cpu_initialized)) {
++ printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
++ for (;;) local_irq_enable();
++ }
++ printk(KERN_INFO "Initializing CPU#%d\n", cpu);
++
++ if (cpu_has_vme || cpu_has_de)
++ clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
++ if (tsc_disable && cpu_has_tsc) {
++ printk(KERN_NOTICE "Disabling TSC...\n");
++ /**** FIX-HPA: DOES THIS REALLY BELONG HERE? ****/
++ clear_bit(X86_FEATURE_TSC, boot_cpu_data.x86_capability);
++ set_in_cr4(X86_CR4_TSD);
++ }
++
++#ifndef CONFIG_XEN
++ /* The CPU hotplug case */
++ if (cpu_gdt_descr->address) {
++ gdt = (struct desc_struct *)cpu_gdt_descr->address;
++ memset(gdt, 0, PAGE_SIZE);
++ goto old_gdt;
++ }
++ /*
++ * This is a horrible hack to allocate the GDT. The problem
++ * is that cpu_init() is called really early for the boot CPU
++ * (and hence needs bootmem) but much later for the secondary
++ * CPUs, when bootmem will have gone away
++ */
++ if (NODE_DATA(0)->bdata->node_bootmem_map) {
++ gdt = (struct desc_struct *)alloc_bootmem_pages(PAGE_SIZE);
++ /* alloc_bootmem_pages panics on failure, so no check */
++ memset(gdt, 0, PAGE_SIZE);
++ } else {
++ gdt = (struct desc_struct *)get_zeroed_page(GFP_KERNEL);
++ if (unlikely(!gdt)) {
++ printk(KERN_CRIT "CPU%d failed to allocate GDT\n", cpu);
++ for (;;)
++ local_irq_enable();
++ }
++ }
++old_gdt:
++ /*
++ * Initialize the per-CPU GDT with the boot GDT,
++ * and set up the GDT descriptor:
++ */
++ memcpy(gdt, cpu_gdt_table, GDT_SIZE);
++
++ /* Set up GDT entry for 16bit stack */
++ *(__u64 *)(&gdt[GDT_ENTRY_ESPFIX_SS]) |=
++ ((((__u64)stk16_off) << 16) & 0x000000ffffff0000ULL) |
++ ((((__u64)stk16_off) << 32) & 0xff00000000000000ULL) |
++ (CPU_16BIT_STACK_SIZE - 1);
++
++ cpu_gdt_descr->size = GDT_SIZE - 1;
++ cpu_gdt_descr->address = (unsigned long)gdt;
++#else
++ if (cpu == 0 && cpu_gdt_descr->address == 0) {
++ gdt = (struct desc_struct *)alloc_bootmem_pages(PAGE_SIZE);
++ /* alloc_bootmem_pages panics on failure, so no check */
++ memset(gdt, 0, PAGE_SIZE);
++
++ memcpy(gdt, cpu_gdt_table, GDT_SIZE);
++
++ cpu_gdt_descr->size = GDT_SIZE;
++ cpu_gdt_descr->address = (unsigned long)gdt;
++ }
++#endif
++
++ cpu_gdt_init(cpu_gdt_descr);
++
++ /*
++ * Set up and load the per-CPU TSS and LDT
++ */
++ atomic_inc(&init_mm.mm_count);
++ current->active_mm = &init_mm;
++ if (current->mm)
++ BUG();
++ enter_lazy_tlb(&init_mm, current);
++
++ load_esp0(t, thread);
++
++ load_LDT(&init_mm.context);
++
++#ifdef CONFIG_DOUBLEFAULT
++ /* Set up doublefault TSS pointer in the GDT */
++ __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
++#endif
++
++ /* Clear %fs and %gs. */
++ asm volatile ("xorl %eax, %eax; movl %eax, %fs; movl %eax, %gs");
++
++ /* Clear all 6 debug registers: */
++ set_debugreg(0, 0);
++ set_debugreg(0, 1);
++ set_debugreg(0, 2);
++ set_debugreg(0, 3);
++ set_debugreg(0, 6);
++ set_debugreg(0, 7);
++
++ /*
++ * Force FPU initialization:
++ */
++ current_thread_info()->status = 0;
++ clear_used_math();
++ mxcsr_feature_mask_init();
++}
++
++#ifdef CONFIG_HOTPLUG_CPU
++void __cpuinit cpu_uninit(void)
++{
++ int cpu = raw_smp_processor_id();
++ cpu_clear(cpu, cpu_initialized);
++
++ /* lazy TLB state */
++ per_cpu(cpu_tlbstate, cpu).state = 0;
++ per_cpu(cpu_tlbstate, cpu).active_mm = &init_mm;
++}
++#endif
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/arch/i386/kernel/cpu/mtrr/main-xen.c 2007-08-27 14:02:01.000000000 -0400
+@@ -0,0 +1,197 @@
++#include <linux/init.h>
++#include <linux/proc_fs.h>
++#include <linux/ctype.h>
++#include <linux/module.h>
++#include <linux/seq_file.h>
++#include <asm/uaccess.h>
++#include <linux/mutex.h>
++
++#include <asm/mtrr.h>
++#include "mtrr.h"
++
++static DEFINE_MUTEX(mtrr_mutex);
++
++void generic_get_mtrr(unsigned int reg, unsigned long *base,
++ unsigned int *size, mtrr_type * type)
++{
++ struct xen_platform_op op;
++
++ op.cmd = XENPF_read_memtype;
++ op.u.read_memtype.reg = reg;
++ (void)HYPERVISOR_platform_op(&op);
++
++ *size = op.u.read_memtype.nr_mfns;
++ *base = op.u.read_memtype.mfn;
++ *type = op.u.read_memtype.type;
++}
++
++struct mtrr_ops generic_mtrr_ops = {
++ .use_intel_if = 1,
++ .get = generic_get_mtrr,
++};
++
++struct mtrr_ops *mtrr_if = &generic_mtrr_ops;
++unsigned int num_var_ranges;
++unsigned int *usage_table;
++
++static void __init set_num_var_ranges(void)
++{
++ struct xen_platform_op op;
++
++ for (num_var_ranges = 0; ; num_var_ranges++) {
++ op.cmd = XENPF_read_memtype;
++ op.u.read_memtype.reg = num_var_ranges;
++ if (HYPERVISOR_platform_op(&op) != 0)
++ break;
++ }
++}
++
++static void __init init_table(void)
++{
++ int i, max;
++
++ max = num_var_ranges;
++ if ((usage_table = kmalloc(max * sizeof *usage_table, GFP_KERNEL))
++ == NULL) {
++ printk(KERN_ERR "mtrr: could not allocate\n");
++ return;
++ }
++ for (i = 0; i < max; i++)
++ usage_table[i] = 0;
++}
++
++int mtrr_add_page(unsigned long base, unsigned long size,
++ unsigned int type, char increment)
++{
++ int error;
++ struct xen_platform_op op;
++
++ mutex_lock(&mtrr_mutex);
++
++ op.cmd = XENPF_add_memtype;
++ op.u.add_memtype.mfn = base;
++ op.u.add_memtype.nr_mfns = size;
++ op.u.add_memtype.type = type;
++ error = HYPERVISOR_platform_op(&op);
++ if (error) {
++ mutex_unlock(&mtrr_mutex);
++ BUG_ON(error > 0);
++ return error;
++ }
++
++ if (increment)
++ ++usage_table[op.u.add_memtype.reg];
++
++ mutex_unlock(&mtrr_mutex);
++
++ return op.u.add_memtype.reg;
++}
++
++static int mtrr_check(unsigned long base, unsigned long size)
++{
++ if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) {
++ printk(KERN_WARNING
++ "mtrr: size and base must be multiples of 4 kiB\n");
++ printk(KERN_DEBUG
++ "mtrr: size: 0x%lx base: 0x%lx\n", size, base);
++ dump_stack();
++ return -1;
++ }
++ return 0;
++}
++
++int
++mtrr_add(unsigned long base, unsigned long size, unsigned int type,
++ char increment)
++{
++ if (mtrr_check(base, size))
++ return -EINVAL;
++ return mtrr_add_page(base >> PAGE_SHIFT, size >> PAGE_SHIFT, type,
++ increment);
++}
++
++int mtrr_del_page(int reg, unsigned long base, unsigned long size)
++{
++ unsigned i;
++ mtrr_type ltype;
++ unsigned long lbase;
++ unsigned int lsize;
++ int error = -EINVAL;
++ struct xen_platform_op op;
++
++ mutex_lock(&mtrr_mutex);
++
++ if (reg < 0) {
++ /* Search for existing MTRR */
++ for (i = 0; i < num_var_ranges; ++i) {
++ mtrr_if->get(i, &lbase, &lsize, &ltype);
++ if (lbase == base && lsize == size) {
++ reg = i;
++ break;
++ }
++ }
++ if (reg < 0) {
++ printk(KERN_DEBUG "mtrr: no MTRR for %lx000,%lx000 found\n", base,
++ size);
++ goto out;
++ }
++ }
++ if (usage_table[reg] < 1) {
++ printk(KERN_WARNING "mtrr: reg: %d has count=0\n", reg);
++ goto out;
++ }
++ if (--usage_table[reg] < 1) {
++ op.cmd = XENPF_del_memtype;
++ op.u.del_memtype.handle = 0;
++ op.u.del_memtype.reg = reg;
++ error = HYPERVISOR_platform_op(&op);
++ if (error) {
++ BUG_ON(error > 0);
++ goto out;
++ }
++ }
++ error = reg;
++ out:
++ mutex_unlock(&mtrr_mutex);
++ return error;
++}
++
++int
++mtrr_del(int reg, unsigned long base, unsigned long size)
++{
++ if (mtrr_check(base, size))
++ return -EINVAL;
++ return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT);
++}
++
++EXPORT_SYMBOL(mtrr_add);
++EXPORT_SYMBOL(mtrr_del);
++
++void __init mtrr_bp_init(void)
++{
++}
++
++void mtrr_ap_init(void)
++{
++}
++
++static int __init mtrr_init(void)
++{
++ struct cpuinfo_x86 *c = &boot_cpu_data;
++
++ if (!is_initial_xendomain())
++ return -ENODEV;
++
++ if ((!cpu_has(c, X86_FEATURE_MTRR)) &&
++ (!cpu_has(c, X86_FEATURE_K6_MTRR)) &&
++ (!cpu_has(c, X86_FEATURE_CYRIX_ARR)) &&
++ (!cpu_has(c, X86_FEATURE_CENTAUR_MCR)))
++ return -ENODEV;
++
++ set_num_var_ranges();
++ init_table();
++
++ return 0;
++}
++
++subsys_initcall(mtrr_init);
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/arch/i386/kernel/early_printk-xen.c 2007-08-27 14:01:24.000000000 -0400
+@@ -0,0 +1,2 @@
++
++#include "../../x86_64/kernel/early_printk-xen.c"
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/arch/i386/kernel/entry-xen.S 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,1216 @@
++/*
++ * linux/arch/i386/entry.S
++ *
++ * Copyright (C) 1991, 1992 Linus Torvalds
++ */
++
++/*
++ * entry.S contains the system-call and fault low-level handling routines.
++ * This also contains the timer-interrupt handler, as well as all interrupts
++ * and faults that can result in a task-switch.
++ *
++ * NOTE: This code handles signal-recognition, which happens every time
++ * after a timer-interrupt and after each system call.
++ *
++ * I changed all the .align's to 4 (16 byte alignment), as that's faster
++ * on a 486.
++ *
++ * Stack layout in 'ret_from_system_call':
++ * ptrace needs to have all regs on the stack.
++ * if the order here is changed, it needs to be
++ * updated in fork.c:copy_process, signal.c:do_signal,
++ * ptrace.c and ptrace.h
++ *
++ * 0(%esp) - %ebx
++ * 4(%esp) - %ecx
++ * 8(%esp) - %edx
++ * C(%esp) - %esi
++ * 10(%esp) - %edi
++ * 14(%esp) - %ebp
++ * 18(%esp) - %eax
++ * 1C(%esp) - %ds
++ * 20(%esp) - %es
++ * 24(%esp) - orig_eax
++ * 28(%esp) - %eip
++ * 2C(%esp) - %cs
++ * 30(%esp) - %eflags
++ * 34(%esp) - %oldesp
++ * 38(%esp) - %oldss
++ *
++ * "current" is in register %ebx during any slow entries.
++ */
++
++#include <linux/linkage.h>
++#include <asm/thread_info.h>
++#include <asm/irqflags.h>
++#include <asm/errno.h>
++#include <asm/segment.h>
++#include <asm/smp.h>
++#include <asm/page.h>
++#include <asm/desc.h>
++#include <asm/dwarf2.h>
++#include "irq_vectors.h"
++#include <xen/interface/xen.h>
++
++#define nr_syscalls ((syscall_table_size)/4)
++
++EBX = 0x00
++ECX = 0x04
++EDX = 0x08
++ESI = 0x0C
++EDI = 0x10
++EBP = 0x14
++EAX = 0x18
++DS = 0x1C
++ES = 0x20
++ORIG_EAX = 0x24
++EIP = 0x28
++CS = 0x2C
++EFLAGS = 0x30
++OLDESP = 0x34
++OLDSS = 0x38
++
++CF_MASK = 0x00000001
++TF_MASK = 0x00000100
++IF_MASK = 0x00000200
++DF_MASK = 0x00000400
++NT_MASK = 0x00004000
++VM_MASK = 0x00020000
++/* Pseudo-eflags. */
++NMI_MASK = 0x80000000
++
++#ifndef CONFIG_XEN
++#define DISABLE_INTERRUPTS cli
++#define ENABLE_INTERRUPTS sti
++#else
++/* Offsets into shared_info_t. */
++#define evtchn_upcall_pending /* 0 */
++#define evtchn_upcall_mask 1
++
++#define sizeof_vcpu_shift 6
++
++#ifdef CONFIG_SMP
++#define GET_VCPU_INFO movl TI_cpu(%ebp),%esi ; \
++ shl $sizeof_vcpu_shift,%esi ; \
++ addl HYPERVISOR_shared_info,%esi
++#else
++#define GET_VCPU_INFO movl HYPERVISOR_shared_info,%esi
++#endif
++
++#define __DISABLE_INTERRUPTS movb $1,evtchn_upcall_mask(%esi)
++#define __ENABLE_INTERRUPTS movb $0,evtchn_upcall_mask(%esi)
++#define DISABLE_INTERRUPTS GET_VCPU_INFO ; \
++ __DISABLE_INTERRUPTS
++#define ENABLE_INTERRUPTS GET_VCPU_INFO ; \
++ __ENABLE_INTERRUPTS
++#define __TEST_PENDING testb $0xFF,evtchn_upcall_pending(%esi)
++#endif
++
++#ifdef CONFIG_PREEMPT
++#define preempt_stop cli; TRACE_IRQS_OFF
++#else
++#define preempt_stop
++#define resume_kernel restore_nocheck
++#endif
++
++.macro TRACE_IRQS_IRET
++#ifdef CONFIG_TRACE_IRQFLAGS
++ testl $IF_MASK,EFLAGS(%esp) # interrupts off?
++ jz 1f
++ TRACE_IRQS_ON
++1:
++#endif
++.endm
++
++#ifdef CONFIG_VM86
++#define resume_userspace_sig check_userspace
++#else
++#define resume_userspace_sig resume_userspace
++#endif
++
++#define SAVE_ALL \
++ cld; \
++ pushl %es; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ /*CFI_REL_OFFSET es, 0;*/\
++ pushl %ds; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ /*CFI_REL_OFFSET ds, 0;*/\
++ pushl %eax; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ CFI_REL_OFFSET eax, 0;\
++ pushl %ebp; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ CFI_REL_OFFSET ebp, 0;\
++ pushl %edi; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ CFI_REL_OFFSET edi, 0;\
++ pushl %esi; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ CFI_REL_OFFSET esi, 0;\
++ pushl %edx; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ CFI_REL_OFFSET edx, 0;\
++ pushl %ecx; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ CFI_REL_OFFSET ecx, 0;\
++ pushl %ebx; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ CFI_REL_OFFSET ebx, 0;\
++ movl $(__USER_DS), %edx; \
++ movl %edx, %ds; \
++ movl %edx, %es;
++
++#define RESTORE_INT_REGS \
++ popl %ebx; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ CFI_RESTORE ebx;\
++ popl %ecx; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ CFI_RESTORE ecx;\
++ popl %edx; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ CFI_RESTORE edx;\
++ popl %esi; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ CFI_RESTORE esi;\
++ popl %edi; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ CFI_RESTORE edi;\
++ popl %ebp; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ CFI_RESTORE ebp;\
++ popl %eax; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ CFI_RESTORE eax
++
++#define RESTORE_REGS \
++ RESTORE_INT_REGS; \
++1: popl %ds; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ /*CFI_RESTORE ds;*/\
++2: popl %es; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ /*CFI_RESTORE es;*/\
++.section .fixup,"ax"; \
++3: movl $0,(%esp); \
++ jmp 1b; \
++4: movl $0,(%esp); \
++ jmp 2b; \
++.previous; \
++.section __ex_table,"a";\
++ .align 4; \
++ .long 1b,3b; \
++ .long 2b,4b; \
++.previous
++
++#define RING0_INT_FRAME \
++ CFI_STARTPROC simple;\
++ CFI_DEF_CFA esp, 3*4;\
++ /*CFI_OFFSET cs, -2*4;*/\
++ CFI_OFFSET eip, -3*4
++
++#define RING0_EC_FRAME \
++ CFI_STARTPROC simple;\
++ CFI_DEF_CFA esp, 4*4;\
++ /*CFI_OFFSET cs, -2*4;*/\
++ CFI_OFFSET eip, -3*4
++
++#define RING0_PTREGS_FRAME \
++ CFI_STARTPROC simple;\
++ CFI_DEF_CFA esp, OLDESP-EBX;\
++ /*CFI_OFFSET cs, CS-OLDESP;*/\
++ CFI_OFFSET eip, EIP-OLDESP;\
++ /*CFI_OFFSET es, ES-OLDESP;*/\
++ /*CFI_OFFSET ds, DS-OLDESP;*/\
++ CFI_OFFSET eax, EAX-OLDESP;\
++ CFI_OFFSET ebp, EBP-OLDESP;\
++ CFI_OFFSET edi, EDI-OLDESP;\
++ CFI_OFFSET esi, ESI-OLDESP;\
++ CFI_OFFSET edx, EDX-OLDESP;\
++ CFI_OFFSET ecx, ECX-OLDESP;\
++ CFI_OFFSET ebx, EBX-OLDESP
++
++ENTRY(ret_from_fork)
++ CFI_STARTPROC
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ call schedule_tail
++ GET_THREAD_INFO(%ebp)
++ popl %eax
++ CFI_ADJUST_CFA_OFFSET -4
++ pushl $0x0202 # Reset kernel eflags
++ CFI_ADJUST_CFA_OFFSET 4
++ popfl
++ CFI_ADJUST_CFA_OFFSET -4
++ jmp syscall_exit
++ CFI_ENDPROC
++
++/*
++ * Return to user mode is not as complex as all this looks,
++ * but we want the default path for a system call return to
++ * go as quickly as possible which is why some of this is
++ * less clear than it otherwise should be.
++ */
++
++ # userspace resumption stub bypassing syscall exit tracing
++ ALIGN
++ RING0_PTREGS_FRAME
++ret_from_exception:
++ preempt_stop
++ret_from_intr:
++ GET_THREAD_INFO(%ebp)
++check_userspace:
++ movl EFLAGS(%esp), %eax # mix EFLAGS and CS
++ movb CS(%esp), %al
++ testl $(VM_MASK | 2), %eax
++ jz resume_kernel
++ENTRY(resume_userspace)
++ DISABLE_INTERRUPTS # make sure we don't miss an interrupt
++ # setting need_resched or sigpending
++ # between sampling and the iret
++ movl TI_flags(%ebp), %ecx
++ andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
++ # int/exception return?
++ jne work_pending
++ jmp restore_all
++
++#ifdef CONFIG_PREEMPT
++ENTRY(resume_kernel)
++ cli
++ cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ?
++ jnz restore_nocheck
++need_resched:
++ movl TI_flags(%ebp), %ecx # need_resched set ?
++ testb $_TIF_NEED_RESCHED, %cl
++ jz restore_all
++ testl $IF_MASK,EFLAGS(%esp) # interrupts off (exception path) ?
++ jz restore_all
++ call preempt_schedule_irq
++ jmp need_resched
++#endif
++ CFI_ENDPROC
++
++/* SYSENTER_RETURN points to after the "sysenter" instruction in
++ the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */
++
++ # sysenter call handler stub
++ENTRY(sysenter_entry)
++ CFI_STARTPROC simple
++ CFI_DEF_CFA esp, 0
++ CFI_REGISTER esp, ebp
++ movl SYSENTER_stack_esp0(%esp),%esp
++sysenter_past_esp:
++ /*
++ * No need to follow this irqs on/off section: the syscall
++ * disabled irqs and here we enable it straight after entry:
++ */
++ sti
++ pushl $(__USER_DS)
++ CFI_ADJUST_CFA_OFFSET 4
++ /*CFI_REL_OFFSET ss, 0*/
++ pushl %ebp
++ CFI_ADJUST_CFA_OFFSET 4
++ CFI_REL_OFFSET esp, 0
++ pushfl
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl $(__USER_CS)
++ CFI_ADJUST_CFA_OFFSET 4
++ /*CFI_REL_OFFSET cs, 0*/
++ /*
++ * Push current_thread_info()->sysenter_return to the stack.
++ * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
++ * pushed above; +8 corresponds to copy_thread's esp0 setting.
++ */
++ pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
++ CFI_ADJUST_CFA_OFFSET 4
++ CFI_REL_OFFSET eip, 0
++
++/*
++ * Load the potential sixth argument from user stack.
++ * Careful about security.
++ */
++ cmpl $__PAGE_OFFSET-3,%ebp
++ jae syscall_fault
++1: movl (%ebp),%ebp
++.section __ex_table,"a"
++ .align 4
++ .long 1b,syscall_fault
++.previous
++
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++ GET_THREAD_INFO(%ebp)
++
++ /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
++ testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
++ jnz syscall_trace_entry
++ cmpl $(nr_syscalls), %eax
++ jae syscall_badsys
++ call *sys_call_table(,%eax,4)
++ movl %eax,EAX(%esp)
++ DISABLE_INTERRUPTS
++ TRACE_IRQS_OFF
++ movl TI_flags(%ebp), %ecx
++ testw $_TIF_ALLWORK_MASK, %cx
++ jne syscall_exit_work
++/* if something modifies registers it must also disable sysexit */
++ movl EIP(%esp), %edx
++ movl OLDESP(%esp), %ecx
++ xorl %ebp,%ebp
++#ifdef CONFIG_XEN
++ TRACE_IRQS_ON
++ __ENABLE_INTERRUPTS
++sysexit_scrit: /**** START OF SYSEXIT CRITICAL REGION ****/
++ __TEST_PENDING
++ jnz 14f # process more events if necessary...
++ movl ESI(%esp), %esi
++ sysexit
++14: __DISABLE_INTERRUPTS
++ TRACE_IRQS_OFF
++sysexit_ecrit: /**** END OF SYSEXIT CRITICAL REGION ****/
++ push %esp
++ call evtchn_do_upcall
++ add $4,%esp
++ jmp ret_from_intr
++#else
++ TRACE_IRQS_ON
++ sti
++ sysexit
++#endif /* !CONFIG_XEN */
++ CFI_ENDPROC
++
++
++ # system call handler stub
++ENTRY(system_call)
++ RING0_INT_FRAME # can't unwind into user space anyway
++ pushl %eax # save orig_eax
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++ GET_THREAD_INFO(%ebp)
++ testl $TF_MASK,EFLAGS(%esp)
++ jz no_singlestep
++ orl $_TIF_SINGLESTEP,TI_flags(%ebp)
++no_singlestep:
++ # system call tracing in operation / emulation
++ /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
++ testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
++ jnz syscall_trace_entry
++ cmpl $(nr_syscalls), %eax
++ jae syscall_badsys
++syscall_call:
++ call *sys_call_table(,%eax,4)
++ movl %eax,EAX(%esp) # store the return value
++syscall_exit:
++ DISABLE_INTERRUPTS # make sure we don't miss an interrupt
++ # setting need_resched or sigpending
++ # between sampling and the iret
++ TRACE_IRQS_OFF
++ movl TI_flags(%ebp), %ecx
++ testw $_TIF_ALLWORK_MASK, %cx # current->work
++ jne syscall_exit_work
++
++restore_all:
++#ifndef CONFIG_XEN
++ movl EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
++ # Warning: OLDSS(%esp) contains the wrong/random values if we
++ # are returning to the kernel.
++ # See comments in process.c:copy_thread() for details.
++ movb OLDSS(%esp), %ah
++ movb CS(%esp), %al
++ andl $(VM_MASK | (4 << 8) | 3), %eax
++ cmpl $((4 << 8) | 3), %eax
++ CFI_REMEMBER_STATE
++ je ldt_ss # returning to user-space with LDT SS
++restore_nocheck:
++#else
++restore_nocheck:
++ movl EFLAGS(%esp), %eax
++ testl $(VM_MASK|NMI_MASK), %eax
++ CFI_REMEMBER_STATE
++ jnz hypervisor_iret
++ shr $9, %eax # EAX[0] == IRET_EFLAGS.IF
++ GET_VCPU_INFO
++ andb evtchn_upcall_mask(%esi),%al
++ andb $1,%al # EAX[0] == IRET_EFLAGS.IF & event_mask
++ CFI_REMEMBER_STATE
++ jnz restore_all_enable_events # != 0 => enable event delivery
++#endif
++ TRACE_IRQS_IRET
++restore_nocheck_notrace:
++ RESTORE_REGS
++ addl $4, %esp
++ CFI_ADJUST_CFA_OFFSET -4
++1: iret
++.section .fixup,"ax"
++iret_exc:
++#ifndef CONFIG_XEN
++ TRACE_IRQS_ON
++ sti
++#endif
++ pushl $0 # no error code
++ pushl $do_iret_error
++ jmp error_code
++.previous
++.section __ex_table,"a"
++ .align 4
++ .long 1b,iret_exc
++.previous
++
++ CFI_RESTORE_STATE
++#ifndef CONFIG_XEN
++ldt_ss:
++ larl OLDSS(%esp), %eax
++ jnz restore_nocheck
++ testl $0x00400000, %eax # returning to 32bit stack?
++ jnz restore_nocheck # allright, normal return
++ /* If returning to userspace with 16bit stack,
++ * try to fix the higher word of ESP, as the CPU
++ * won't restore it.
++ * This is an "official" bug of all the x86-compatible
++ * CPUs, which we can try to work around to make
++ * dosemu and wine happy. */
++ subl $8, %esp # reserve space for switch16 pointer
++ CFI_ADJUST_CFA_OFFSET 8
++ cli
++ TRACE_IRQS_OFF
++ movl %esp, %eax
++ /* Set up the 16bit stack frame with switch32 pointer on top,
++ * and a switch16 pointer on top of the current frame. */
++ call setup_x86_bogus_stack
++ CFI_ADJUST_CFA_OFFSET -8 # frame has moved
++ TRACE_IRQS_IRET
++ RESTORE_REGS
++ lss 20+4(%esp), %esp # switch to 16bit stack
++1: iret
++.section __ex_table,"a"
++ .align 4
++ .long 1b,iret_exc
++.previous
++#else
++ ALIGN
++restore_all_enable_events:
++ TRACE_IRQS_ON
++ __ENABLE_INTERRUPTS
++scrit: /**** START OF CRITICAL REGION ****/
++ __TEST_PENDING
++ jnz 14f # process more events if necessary...
++ RESTORE_REGS
++ addl $4, %esp
++ CFI_ADJUST_CFA_OFFSET -4
++1: iret
++.section __ex_table,"a"
++ .align 4
++ .long 1b,iret_exc
++.previous
++14: __DISABLE_INTERRUPTS
++ TRACE_IRQS_OFF
++ jmp 11f
++ecrit: /**** END OF CRITICAL REGION ****/
++
++ CFI_RESTORE_STATE
++hypervisor_iret:
++ andl $~NMI_MASK, EFLAGS(%esp)
++ RESTORE_REGS
++ addl $4, %esp
++ CFI_ADJUST_CFA_OFFSET -4
++ jmp hypercall_page + (__HYPERVISOR_iret * 32)
++#endif
++ CFI_ENDPROC
++
++ # perform work that needs to be done immediately before resumption
++ ALIGN
++ RING0_PTREGS_FRAME # can't unwind into user space anyway
++work_pending:
++ testb $_TIF_NEED_RESCHED, %cl
++ jz work_notifysig
++work_resched:
++ call schedule
++ DISABLE_INTERRUPTS # make sure we don't miss an interrupt
++ # setting need_resched or sigpending
++ # between sampling and the iret
++ TRACE_IRQS_OFF
++ movl TI_flags(%ebp), %ecx
++ andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
++ # than syscall tracing?
++ jz restore_all
++ testb $_TIF_NEED_RESCHED, %cl
++ jnz work_resched
++
++work_notifysig: # deal with pending signals and
++ # notify-resume requests
++ testl $VM_MASK, EFLAGS(%esp)
++ movl %esp, %eax
++ jne work_notifysig_v86 # returning to kernel-space or
++ # vm86-space
++ xorl %edx, %edx
++ call do_notify_resume
++ jmp resume_userspace_sig
++
++ ALIGN
++work_notifysig_v86:
++#ifdef CONFIG_VM86
++ pushl %ecx # save ti_flags for do_notify_resume
++ CFI_ADJUST_CFA_OFFSET 4
++ call save_v86_state # %eax contains pt_regs pointer
++ popl %ecx
++ CFI_ADJUST_CFA_OFFSET -4
++ movl %eax, %esp
++ xorl %edx, %edx
++ call do_notify_resume
++ jmp resume_userspace_sig
++#endif
++
++ # perform syscall exit tracing
++ ALIGN
++syscall_trace_entry:
++ movl $-ENOSYS,EAX(%esp)
++ movl %esp, %eax
++ xorl %edx,%edx
++ call do_syscall_trace
++ cmpl $0, %eax
++ jne resume_userspace # ret != 0 -> running under PTRACE_SYSEMU,
++ # so must skip actual syscall
++ movl ORIG_EAX(%esp), %eax
++ cmpl $(nr_syscalls), %eax
++ jnae syscall_call
++ jmp syscall_exit
++
++ # perform syscall exit tracing
++ ALIGN
++syscall_exit_work:
++ testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl
++ jz work_pending
++ TRACE_IRQS_ON
++ ENABLE_INTERRUPTS # could let do_syscall_trace() call
++ # schedule() instead
++ movl %esp, %eax
++ movl $1, %edx
++ call do_syscall_trace
++ jmp resume_userspace
++ CFI_ENDPROC
++
++ RING0_INT_FRAME # can't unwind into user space anyway
++syscall_fault:
++ pushl %eax # save orig_eax
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++ GET_THREAD_INFO(%ebp)
++ movl $-EFAULT,EAX(%esp)
++ jmp resume_userspace
++
++syscall_badsys:
++ movl $-ENOSYS,EAX(%esp)
++ jmp resume_userspace
++ CFI_ENDPROC
++
++#ifndef CONFIG_XEN
++#define FIXUP_ESPFIX_STACK \
++ movl %esp, %eax; \
++ /* switch to 32bit stack using the pointer on top of 16bit stack */ \
++ lss %ss:CPU_16BIT_STACK_SIZE-8, %esp; \
++ /* copy data from 16bit stack to 32bit stack */ \
++ call fixup_x86_bogus_stack; \
++ /* put ESP to the proper location */ \
++ movl %eax, %esp;
++#define UNWIND_ESPFIX_STACK \
++ pushl %eax; \
++ CFI_ADJUST_CFA_OFFSET 4; \
++ movl %ss, %eax; \
++ /* see if on 16bit stack */ \
++ cmpw $__ESPFIX_SS, %ax; \
++ je 28f; \
++27: popl %eax; \
++ CFI_ADJUST_CFA_OFFSET -4; \
++.section .fixup,"ax"; \
++28: movl $__KERNEL_DS, %eax; \
++ movl %eax, %ds; \
++ movl %eax, %es; \
++ /* switch to 32bit stack */ \
++ FIXUP_ESPFIX_STACK; \
++ jmp 27b; \
++.previous
++
++/*
++ * Build the entry stubs and pointer table with
++ * some assembler magic.
++ */
++.data
++ENTRY(interrupt)
++.text
++
++vector=0
++ENTRY(irq_entries_start)
++ RING0_INT_FRAME
++.rept NR_IRQS
++ ALIGN
++ .if vector
++ CFI_ADJUST_CFA_OFFSET -4
++ .endif
++1: pushl $~(vector)
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp common_interrupt
++.data
++ .long 1b
++.text
++vector=vector+1
++.endr
++
++/*
++ * the CPU automatically disables interrupts when executing an IRQ vector,
++ * so IRQ-flags tracing has to follow that:
++ */
++ ALIGN
++common_interrupt:
++ SAVE_ALL
++ TRACE_IRQS_OFF
++ movl %esp,%eax
++ call do_IRQ
++ jmp ret_from_intr
++ CFI_ENDPROC
++
++#define BUILD_INTERRUPT(name, nr) \
++ENTRY(name) \
++ RING0_INT_FRAME; \
++ pushl $~(nr); \
++ CFI_ADJUST_CFA_OFFSET 4; \
++ SAVE_ALL; \
++ TRACE_IRQS_OFF \
++ movl %esp,%eax; \
++ call smp_/**/name; \
++ jmp ret_from_intr; \
++ CFI_ENDPROC
++
++/* The include is where all of the SMP etc. interrupts come from */
++#include "entry_arch.h"
++#else
++#define UNWIND_ESPFIX_STACK
++#endif
++
++ENTRY(divide_error)
++ RING0_INT_FRAME
++ pushl $0 # no error code
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl $do_divide_error
++ CFI_ADJUST_CFA_OFFSET 4
++ ALIGN
++error_code:
++ pushl %ds
++ CFI_ADJUST_CFA_OFFSET 4
++ /*CFI_REL_OFFSET ds, 0*/
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ CFI_REL_OFFSET eax, 0
++ xorl %eax, %eax
++ pushl %ebp
++ CFI_ADJUST_CFA_OFFSET 4
++ CFI_REL_OFFSET ebp, 0
++ pushl %edi
++ CFI_ADJUST_CFA_OFFSET 4
++ CFI_REL_OFFSET edi, 0
++ pushl %esi
++ CFI_ADJUST_CFA_OFFSET 4
++ CFI_REL_OFFSET esi, 0
++ pushl %edx
++ CFI_ADJUST_CFA_OFFSET 4
++ CFI_REL_OFFSET edx, 0
++ decl %eax # eax = -1
++ pushl %ecx
++ CFI_ADJUST_CFA_OFFSET 4
++ CFI_REL_OFFSET ecx, 0
++ pushl %ebx
++ CFI_ADJUST_CFA_OFFSET 4
++ CFI_REL_OFFSET ebx, 0
++ cld
++ pushl %es
++ CFI_ADJUST_CFA_OFFSET 4
++ /*CFI_REL_OFFSET es, 0*/
++ UNWIND_ESPFIX_STACK
++ popl %ecx
++ CFI_ADJUST_CFA_OFFSET -4
++ /*CFI_REGISTER es, ecx*/
++ movl ES(%esp), %edi # get the function address
++ movl ORIG_EAX(%esp), %edx # get the error code
++ movl %eax, ORIG_EAX(%esp)
++ movl %ecx, ES(%esp)
++ /*CFI_REL_OFFSET es, ES*/
++ movl $(__USER_DS), %ecx
++ movl %ecx, %ds
++ movl %ecx, %es
++ movl %esp,%eax # pt_regs pointer
++ call *%edi
++ jmp ret_from_exception
++ CFI_ENDPROC
++
++#ifdef CONFIG_XEN
++# A note on the "critical region" in our callback handler.
++# We want to avoid stacking callback handlers due to events occurring
++# during handling of the last event. To do this, we keep events disabled
++# until we've done all processing. HOWEVER, we must enable events before
++# popping the stack frame (can't be done atomically) and so it would still
++# be possible to get enough handler activations to overflow the stack.
++# Although unlikely, bugs of that kind are hard to track down, so we'd
++# like to avoid the possibility.
++# So, on entry to the handler we detect whether we interrupted an
++# existing activation in its critical region -- if so, we pop the current
++# activation and restart the handler using the previous one.
++#
++# The sysexit critical region is slightly different. sysexit
++# atomically removes the entire stack frame. If we interrupt in the
++# critical region we know that the entire frame is present and correct
++# so we can simply throw away the new one.
++ENTRY(hypervisor_callback)
++ RING0_INT_FRAME
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++ movl EIP(%esp),%eax
++ cmpl $scrit,%eax
++ jb 11f
++ cmpl $ecrit,%eax
++ jb critical_region_fixup
++ cmpl $sysexit_scrit,%eax
++ jb 11f
++ cmpl $sysexit_ecrit,%eax
++ ja 11f
++ addl $OLDESP,%esp # Remove eflags...ebx from stack frame.
++11: push %esp
++ CFI_ADJUST_CFA_OFFSET 4
++ call evtchn_do_upcall
++ add $4,%esp
++ CFI_ADJUST_CFA_OFFSET -4
++ jmp ret_from_intr
++ CFI_ENDPROC
++
++# [How we do the fixup]. We want to merge the current stack frame with the
++# just-interrupted frame. How we do this depends on where in the critical
++# region the interrupted handler was executing, and so how many saved
++# registers are in each frame. We do this quickly using the lookup table
++# 'critical_fixup_table'. For each byte offset in the critical region, it
++# provides the number of bytes which have already been popped from the
++# interrupted stack frame.
++critical_region_fixup:
++ movzbl critical_fixup_table-scrit(%eax),%ecx # %eax contains num bytes popped
++ cmpb $0xff,%cl # 0xff => vcpu_info critical region
++ jne 15f
++ xorl %ecx,%ecx
++15: leal (%esp,%ecx),%esi # %esi points at end of src region
++ leal OLDESP(%esp),%edi # %edi points at end of dst region
++ shrl $2,%ecx # convert words to bytes
++ je 17f # skip loop if nothing to copy
++16: subl $4,%esi # pre-decrementing copy loop
++ subl $4,%edi
++ movl (%esi),%eax
++ movl %eax,(%edi)
++ loop 16b
++17: movl %edi,%esp # final %edi is top of merged stack
++ jmp 11b
++
++.section .rodata,"a"
++critical_fixup_table:
++ .byte 0xff,0xff,0xff # testb $0xff,(%esi) = __TEST_PENDING
++ .byte 0xff,0xff # jnz 14f
++ .byte 0x00 # pop %ebx
++ .byte 0x04 # pop %ecx
++ .byte 0x08 # pop %edx
++ .byte 0x0c # pop %esi
++ .byte 0x10 # pop %edi
++ .byte 0x14 # pop %ebp
++ .byte 0x18 # pop %eax
++ .byte 0x1c # pop %ds
++ .byte 0x20 # pop %es
++ .byte 0x24,0x24,0x24 # add $4,%esp
++ .byte 0x28 # iret
++ .byte 0xff,0xff,0xff,0xff # movb $1,1(%esi)
++ .byte 0x00,0x00 # jmp 11b
++.previous
++
++# Hypervisor uses this for application faults while it executes.
++# We get here for two reasons:
++# 1. Fault while reloading DS, ES, FS or GS
++# 2. Fault while executing IRET
++# Category 1 we fix up by reattempting the load, and zeroing the segment
++# register if the load fails.
++# Category 2 we fix up by jumping to do_iret_error. We cannot use the
++# normal Linux return path in this case because if we use the IRET hypercall
++# to pop the stack frame we end up in an infinite loop of failsafe callbacks.
++# We distinguish between categories by maintaining a status value in EAX.
++ENTRY(failsafe_callback)
++ pushl %eax
++ movl $1,%eax
++1: mov 4(%esp),%ds
++2: mov 8(%esp),%es
++3: mov 12(%esp),%fs
++4: mov 16(%esp),%gs
++ testl %eax,%eax
++ popl %eax
++ jz 5f
++ addl $16,%esp # EAX != 0 => Category 2 (Bad IRET)
++ jmp iret_exc
++5: addl $16,%esp # EAX == 0 => Category 1 (Bad segment)
++ RING0_INT_FRAME
++ pushl $0
++ SAVE_ALL
++ jmp ret_from_exception
++.section .fixup,"ax"; \
++6: xorl %eax,%eax; \
++ movl %eax,4(%esp); \
++ jmp 1b; \
++7: xorl %eax,%eax; \
++ movl %eax,8(%esp); \
++ jmp 2b; \
++8: xorl %eax,%eax; \
++ movl %eax,12(%esp); \
++ jmp 3b; \
++9: xorl %eax,%eax; \
++ movl %eax,16(%esp); \
++ jmp 4b; \
++.previous; \
++.section __ex_table,"a"; \
++ .align 4; \
++ .long 1b,6b; \
++ .long 2b,7b; \
++ .long 3b,8b; \
++ .long 4b,9b; \
++.previous
++#endif
++ CFI_ENDPROC
++
++ENTRY(coprocessor_error)
++ RING0_INT_FRAME
++ pushl $0
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl $do_coprocessor_error
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++
++ENTRY(simd_coprocessor_error)
++ RING0_INT_FRAME
++ pushl $0
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl $do_simd_coprocessor_error
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++
++ENTRY(device_not_available)
++ RING0_INT_FRAME
++ pushl $-1 # mark this as an int
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++#ifndef CONFIG_XEN
++ movl %cr0, %eax
++ testl $0x4, %eax # EM (math emulation bit)
++ je device_available_emulate
++ pushl $0 # temporary storage for ORIG_EIP
++ CFI_ADJUST_CFA_OFFSET 4
++ call math_emulate
++ addl $4, %esp
++ CFI_ADJUST_CFA_OFFSET -4
++ jmp ret_from_exception
++device_available_emulate:
++#endif
++ preempt_stop
++ call math_state_restore
++ jmp ret_from_exception
++ CFI_ENDPROC
++
++#ifndef CONFIG_XEN
++/*
++ * Debug traps and NMI can happen at the one SYSENTER instruction
++ * that sets up the real kernel stack. Check here, since we can't
++ * allow the wrong stack to be used.
++ *
++ * "SYSENTER_stack_esp0+12" is because the NMI/debug handler will have
++ * already pushed 3 words if it hits on the sysenter instruction:
++ * eflags, cs and eip.
++ *
++ * We just load the right stack, and push the three (known) values
++ * by hand onto the new stack - while updating the return eip past
++ * the instruction that would have done it for sysenter.
++ */
++#define FIX_STACK(offset, ok, label) \
++ cmpw $__KERNEL_CS,4(%esp); \
++ jne ok; \
++label: \
++ movl SYSENTER_stack_esp0+offset(%esp),%esp; \
++ pushfl; \
++ pushl $__KERNEL_CS; \
++ pushl $sysenter_past_esp
++#endif /* CONFIG_XEN */
++
++KPROBE_ENTRY(debug)
++ RING0_INT_FRAME
++#ifndef CONFIG_XEN
++ cmpl $sysenter_entry,(%esp)
++ jne debug_stack_correct
++ FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn)
++debug_stack_correct:
++#endif /* !CONFIG_XEN */
++ pushl $-1 # mark this as an int
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++ xorl %edx,%edx # error code 0
++ movl %esp,%eax # pt_regs pointer
++ call do_debug
++ jmp ret_from_exception
++ CFI_ENDPROC
++ .previous .text
++#ifndef CONFIG_XEN
++/*
++ * NMI is doubly nasty. It can happen _while_ we're handling
++ * a debug fault, and the debug fault hasn't yet been able to
++ * clear up the stack. So we first check whether we got an
++ * NMI on the sysenter entry path, but after that we need to
++ * check whether we got an NMI on the debug path where the debug
++ * fault happened on the sysenter path.
++ */
++ENTRY(nmi)
++ RING0_INT_FRAME
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ movl %ss, %eax
++ cmpw $__ESPFIX_SS, %ax
++ popl %eax
++ CFI_ADJUST_CFA_OFFSET -4
++ je nmi_16bit_stack
++ cmpl $sysenter_entry,(%esp)
++ je nmi_stack_fixup
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ movl %esp,%eax
++ /* Do not access memory above the end of our stack page,
++ * it might not exist.
++ */
++ andl $(THREAD_SIZE-1),%eax
++ cmpl $(THREAD_SIZE-20),%eax
++ popl %eax
++ CFI_ADJUST_CFA_OFFSET -4
++ jae nmi_stack_correct
++ cmpl $sysenter_entry,12(%esp)
++ je nmi_debug_stack_check
++nmi_stack_correct:
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++ xorl %edx,%edx # zero error code
++ movl %esp,%eax # pt_regs pointer
++ call do_nmi
++ jmp restore_nocheck_notrace
++ CFI_ENDPROC
++
++nmi_stack_fixup:
++ FIX_STACK(12,nmi_stack_correct, 1)
++ jmp nmi_stack_correct
++nmi_debug_stack_check:
++ cmpw $__KERNEL_CS,16(%esp)
++ jne nmi_stack_correct
++ cmpl $debug,(%esp)
++ jb nmi_stack_correct
++ cmpl $debug_esp_fix_insn,(%esp)
++ ja nmi_stack_correct
++ FIX_STACK(24,nmi_stack_correct, 1)
++ jmp nmi_stack_correct
++
++nmi_16bit_stack:
++ RING0_INT_FRAME
++ /* create the pointer to lss back */
++ pushl %ss
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl %esp
++ CFI_ADJUST_CFA_OFFSET 4
++ movzwl %sp, %esp
++ addw $4, (%esp)
++ /* copy the iret frame of 12 bytes */
++ .rept 3
++ pushl 16(%esp)
++ CFI_ADJUST_CFA_OFFSET 4
++ .endr
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++ FIXUP_ESPFIX_STACK # %eax == %esp
++ CFI_ADJUST_CFA_OFFSET -20 # the frame has now moved
++ xorl %edx,%edx # zero error code
++ call do_nmi
++ RESTORE_REGS
++ lss 12+4(%esp), %esp # back to 16bit stack
++1: iret
++ CFI_ENDPROC
++.section __ex_table,"a"
++ .align 4
++ .long 1b,iret_exc
++.previous
++#else
++ENTRY(nmi)
++ RING0_INT_FRAME
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++ xorl %edx,%edx # zero error code
++ movl %esp,%eax # pt_regs pointer
++ call do_nmi
++ orl $NMI_MASK, EFLAGS(%esp)
++ jmp restore_all
++ CFI_ENDPROC
++#endif
++
++KPROBE_ENTRY(int3)
++ RING0_INT_FRAME
++ pushl $-1 # mark this as an int
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++ xorl %edx,%edx # zero error code
++ movl %esp,%eax # pt_regs pointer
++ call do_int3
++ jmp ret_from_exception
++ CFI_ENDPROC
++ .previous .text
++
++ENTRY(overflow)
++ RING0_INT_FRAME
++ pushl $0
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl $do_overflow
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++
++ENTRY(bounds)
++ RING0_INT_FRAME
++ pushl $0
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl $do_bounds
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++
++ENTRY(invalid_op)
++ RING0_INT_FRAME
++ pushl $0
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl $do_invalid_op
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++
++ENTRY(coprocessor_segment_overrun)
++ RING0_INT_FRAME
++ pushl $0
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl $do_coprocessor_segment_overrun
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++
++ENTRY(invalid_TSS)
++ RING0_EC_FRAME
++ pushl $do_invalid_TSS
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++
++ENTRY(segment_not_present)
++ RING0_EC_FRAME
++ pushl $do_segment_not_present
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++
++ENTRY(stack_segment)
++ RING0_EC_FRAME
++ pushl $do_stack_segment
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++
++KPROBE_ENTRY(general_protection)
++ RING0_EC_FRAME
++ pushl $do_general_protection
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++ .previous .text
++
++ENTRY(alignment_check)
++ RING0_EC_FRAME
++ pushl $do_alignment_check
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++
++KPROBE_ENTRY(page_fault)
++ RING0_EC_FRAME
++ pushl $do_page_fault
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++ .previous .text
++
++#ifdef CONFIG_X86_MCE
++ENTRY(machine_check)
++ RING0_INT_FRAME
++ pushl $0
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl machine_check_vector
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++#endif
++
++#ifndef CONFIG_XEN
++ENTRY(spurious_interrupt_bug)
++ RING0_INT_FRAME
++ pushl $0
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl $do_spurious_interrupt_bug
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++#endif /* !CONFIG_XEN */
++
++#ifdef CONFIG_STACK_UNWIND
++ENTRY(arch_unwind_init_running)
++ CFI_STARTPROC
++ movl 4(%esp), %edx
++ movl (%esp), %ecx
++ leal 4(%esp), %eax
++ movl %ebx, EBX(%edx)
++ xorl %ebx, %ebx
++ movl %ebx, ECX(%edx)
++ movl %ebx, EDX(%edx)
++ movl %esi, ESI(%edx)
++ movl %edi, EDI(%edx)
++ movl %ebp, EBP(%edx)
++ movl %ebx, EAX(%edx)
++ movl $__USER_DS, DS(%edx)
++ movl $__USER_DS, ES(%edx)
++ movl %ebx, ORIG_EAX(%edx)
++ movl %ecx, EIP(%edx)
++ movl 12(%esp), %ecx
++ movl $__KERNEL_CS, CS(%edx)
++ movl %ebx, EFLAGS(%edx)
++ movl %eax, OLDESP(%edx)
++ movl 8(%esp), %eax
++ movl %ecx, 8(%esp)
++ movl EBX(%edx), %ebx
++ movl $__KERNEL_DS, OLDSS(%edx)
++ jmpl *%eax
++ CFI_ENDPROC
++ENDPROC(arch_unwind_init_running)
++#endif
++
++ENTRY(fixup_4gb_segment)
++ RING0_EC_FRAME
++ pushl $do_fixup_4gb_segment
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++
++.section .rodata,"a"
++.align 4
++#include "syscall_table.S"
++
++syscall_table_size=(.-sys_call_table)
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/arch/i386/kernel/fixup.c 2007-08-27 14:01:24.000000000 -0400
+@@ -0,0 +1,88 @@
++/******************************************************************************
++ * fixup.c
++ *
++ * Binary-rewriting of certain IA32 instructions, on notification by Xen.
++ * Used to avoid repeated slow emulation of common instructions used by the
++ * user-space TLS (Thread-Local Storage) libraries.
++ *
++ * **** NOTE ****
++ * Issues with the binary rewriting have caused it to be removed. Instead
++ * we rely on Xen's emulator to boot the kernel, and then print a banner
++ * message recommending that the user disables /lib/tls.
++ *
++ * Copyright (c) 2004, K A Fraser
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ */
++
++#include <linux/init.h>
++#include <linux/sched.h>
++#include <linux/slab.h>
++#include <linux/kernel.h>
++#include <linux/delay.h>
++#include <linux/version.h>
++
++#define DP(_f, _args...) printk(KERN_ALERT " " _f "\n" , ## _args )
++
++fastcall void do_fixup_4gb_segment(struct pt_regs *regs, long error_code)
++{
++ static unsigned long printed = 0;
++ char info[100];
++ int i;
++
++ /* Ignore statically-linked init. */
++ if (current->tgid == 1)
++ return;
++
++ HYPERVISOR_vm_assist(
++ VMASST_CMD_disable, VMASST_TYPE_4gb_segments_notify);
++
++ if (test_and_set_bit(0, &printed))
++ return;
++
++ sprintf(info, "%s (pid=%d)", current->comm, current->tgid);
++
++ DP("");
++ DP("***************************************************************");
++ DP("***************************************************************");
++ DP("** WARNING: Currently emulating unsupported memory accesses **");
++ DP("** in /lib/tls glibc libraries. The emulation is **");
++ DP("** slow. To ensure full performance you should **");
++ DP("** install a 'xen-friendly' (nosegneg) version of **");
++ DP("** the library, or disable tls support by executing **");
++ DP("** the following as root: **");
++ DP("** mv /lib/tls /lib/tls.disabled **");
++ DP("** Offending process: %-38.38s **", info);
++ DP("***************************************************************");
++ DP("***************************************************************");
++ DP("");
++
++ for (i = 5; i > 0; i--) {
++ touch_softlockup_watchdog();
++ printk("Pausing... %d", i);
++ mdelay(1000);
++ printk("\b\b\b\b\b\b\b\b\b\b\b\b");
++ }
++
++ printk("Continuing...\n\n");
++}
++
++static int __init fixup_init(void)
++{
++ HYPERVISOR_vm_assist(
++ VMASST_CMD_enable, VMASST_TYPE_4gb_segments_notify);
++ return 0;
++}
++__initcall(fixup_init);
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/arch/i386/kernel/head-xen.S 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,207 @@
++
++
++.text
++#include <linux/elfnote.h>
++#include <linux/threads.h>
++#include <linux/linkage.h>
++#include <asm/segment.h>
++#include <asm/page.h>
++#include <asm/cache.h>
++#include <asm/thread_info.h>
++#include <asm/asm-offsets.h>
++#include <asm/dwarf2.h>
++#include <xen/interface/xen.h>
++#include <xen/interface/elfnote.h>
++
++/*
++ * References to members of the new_cpu_data structure.
++ */
++
++#define X86 new_cpu_data+CPUINFO_x86
++#define X86_VENDOR new_cpu_data+CPUINFO_x86_vendor
++#define X86_MODEL new_cpu_data+CPUINFO_x86_model
++#define X86_MASK new_cpu_data+CPUINFO_x86_mask
++#define X86_HARD_MATH new_cpu_data+CPUINFO_hard_math
++#define X86_CPUID new_cpu_data+CPUINFO_cpuid_level
++#define X86_CAPABILITY new_cpu_data+CPUINFO_x86_capability
++#define X86_VENDOR_ID new_cpu_data+CPUINFO_x86_vendor_id
++
++#define VIRT_ENTRY_OFFSET 0x0
++.org VIRT_ENTRY_OFFSET
++ENTRY(startup_32)
++ movl %esi,xen_start_info
++ cld
++
++ /* Set up the stack pointer */
++ movl $(init_thread_union+THREAD_SIZE),%esp
++
++ /* get vendor info */
++ xorl %eax,%eax # call CPUID with 0 -> return vendor ID
++ XEN_CPUID
++ movl %eax,X86_CPUID # save CPUID level
++ movl %ebx,X86_VENDOR_ID # lo 4 chars
++ movl %edx,X86_VENDOR_ID+4 # next 4 chars
++ movl %ecx,X86_VENDOR_ID+8 # last 4 chars
++
++ movl $1,%eax # Use the CPUID instruction to get CPU type
++ XEN_CPUID
++ movb %al,%cl # save reg for future use
++ andb $0x0f,%ah # mask processor family
++ movb %ah,X86
++ andb $0xf0,%al # mask model
++ shrb $4,%al
++ movb %al,X86_MODEL
++ andb $0x0f,%cl # mask mask revision
++ movb %cl,X86_MASK
++ movl %edx,X86_CAPABILITY
++
++ movb $1,X86_HARD_MATH
++
++ xorl %eax,%eax # Clear FS/GS and LDT
++ movl %eax,%fs
++ movl %eax,%gs
++ cld # gcc2 wants the direction flag cleared at all times
++
++ pushl %eax # fake return address
++ jmp start_kernel
++
++#define HYPERCALL_PAGE_OFFSET 0x1000
++.org HYPERCALL_PAGE_OFFSET
++ENTRY(hypercall_page)
++ CFI_STARTPROC
++.skip 0x1000
++ CFI_ENDPROC
++
++/*
++ * Real beginning of normal "text" segment
++ */
++ENTRY(stext)
++ENTRY(_stext)
++
++/*
++ * BSS section
++ */
++.section ".bss.page_aligned","w"
++ENTRY(empty_zero_page)
++ .fill 4096,1,0
++
++/*
++ * This starts the data section.
++ */
++.data
++
++/*
++ * The Global Descriptor Table contains 28 quadwords, per-CPU.
++ */
++ .align L1_CACHE_BYTES
++ENTRY(cpu_gdt_table)
++ .quad 0x0000000000000000 /* NULL descriptor */
++ .quad 0x0000000000000000 /* 0x0b reserved */
++ .quad 0x0000000000000000 /* 0x13 reserved */
++ .quad 0x0000000000000000 /* 0x1b reserved */
++ .quad 0x0000000000000000 /* 0x20 unused */
++ .quad 0x0000000000000000 /* 0x28 unused */
++ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
++ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
++ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
++ .quad 0x0000000000000000 /* 0x4b reserved */
++ .quad 0x0000000000000000 /* 0x53 reserved */
++ .quad 0x0000000000000000 /* 0x5b reserved */
++
++ .quad 0x00cf9a000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
++ .quad 0x00cf92000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
++ .quad 0x00cffa000000ffff /* 0x73 user 4GB code at 0x00000000 */
++ .quad 0x00cff2000000ffff /* 0x7b user 4GB data at 0x00000000 */
++
++ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
++ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
++
++ /*
++ * Segments used for calling PnP BIOS have byte granularity.
++ * They code segments and data segments have fixed 64k limits,
++ * the transfer segment sizes are set at run time.
++ */
++ .quad 0x0000000000000000 /* 0x90 32-bit code */
++ .quad 0x0000000000000000 /* 0x98 16-bit code */
++ .quad 0x0000000000000000 /* 0xa0 16-bit data */
++ .quad 0x0000000000000000 /* 0xa8 16-bit data */
++ .quad 0x0000000000000000 /* 0xb0 16-bit data */
++
++ /*
++ * The APM segments have byte granularity and their bases
++ * are set at run time. All have 64k limits.
++ */
++ .quad 0x0000000000000000 /* 0xb8 APM CS code */
++ .quad 0x0000000000000000 /* 0xc0 APM CS 16 code (16 bit) */
++ .quad 0x0000000000000000 /* 0xc8 APM DS data */
++
++ .quad 0x0000000000000000 /* 0xd0 - ESPFIX 16-bit SS */
++ .quad 0x0000000000000000 /* 0xd8 - unused */
++ .quad 0x0000000000000000 /* 0xe0 - unused */
++ .quad 0x0000000000000000 /* 0xe8 - unused */
++ .quad 0x0000000000000000 /* 0xf0 - unused */
++ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
++
++#if CONFIG_XEN_COMPAT <= 0x030002
++/*
++ * __xen_guest information
++ */
++.macro utoa value
++ .if (\value) < 0 || (\value) >= 0x10
++ utoa (((\value)>>4)&0x0fffffff)
++ .endif
++ .if ((\value) & 0xf) < 10
++ .byte '0' + ((\value) & 0xf)
++ .else
++ .byte 'A' + ((\value) & 0xf) - 10
++ .endif
++.endm
++
++.section __xen_guest
++ .ascii "GUEST_OS=linux,GUEST_VER=2.6"
++ .ascii ",XEN_VER=xen-3.0"
++ .ascii ",VIRT_BASE=0x"
++ utoa __PAGE_OFFSET
++ .ascii ",ELF_PADDR_OFFSET=0x"
++ utoa __PAGE_OFFSET
++ .ascii ",VIRT_ENTRY=0x"
++ utoa (__PAGE_OFFSET + __PHYSICAL_START + VIRT_ENTRY_OFFSET)
++ .ascii ",HYPERCALL_PAGE=0x"
++ utoa ((__PHYSICAL_START+HYPERCALL_PAGE_OFFSET)>>PAGE_SHIFT)
++ .ascii ",FEATURES=writable_page_tables"
++ .ascii "|writable_descriptor_tables"
++ .ascii "|auto_translated_physmap"
++ .ascii "|pae_pgdir_above_4gb"
++ .ascii "|supervisor_mode_kernel"
++#ifdef CONFIG_X86_PAE
++ .ascii ",PAE=yes[extended-cr3]"
++#else
++ .ascii ",PAE=no"
++#endif
++ .ascii ",LOADER=generic"
++ .byte 0
++#endif /* CONFIG_XEN_COMPAT <= 0x030002 */
++
++
++ ELFNOTE(Xen, XEN_ELFNOTE_GUEST_OS, .asciz, "linux")
++ ELFNOTE(Xen, XEN_ELFNOTE_GUEST_VERSION, .asciz, "2.6")
++ ELFNOTE(Xen, XEN_ELFNOTE_XEN_VERSION, .asciz, "xen-3.0")
++ ELFNOTE(Xen, XEN_ELFNOTE_VIRT_BASE, .long, __PAGE_OFFSET)
++#if CONFIG_XEN_COMPAT <= 0x030002
++ ELFNOTE(Xen, XEN_ELFNOTE_PADDR_OFFSET, .long, __PAGE_OFFSET)
++#else
++ ELFNOTE(Xen, XEN_ELFNOTE_PADDR_OFFSET, .long, 0)
++#endif
++ ELFNOTE(Xen, XEN_ELFNOTE_ENTRY, .long, startup_32)
++ ELFNOTE(Xen, XEN_ELFNOTE_HYPERCALL_PAGE, .long, hypercall_page)
++ ELFNOTE(Xen, XEN_ELFNOTE_HV_START_LOW, .long, HYPERVISOR_VIRT_START)
++ ELFNOTE(Xen, XEN_ELFNOTE_FEATURES, .asciz, "writable_page_tables|writable_descriptor_tables|auto_translated_physmap|pae_pgdir_above_4gb|supervisor_mode_kernel")
++#ifdef CONFIG_X86_PAE
++ ELFNOTE(Xen, XEN_ELFNOTE_PAE_MODE, .asciz, "yes")
++ ELFNOTE(Xen, XEN_ELFNOTE_L1_MFN_VALID, .quad, _PAGE_PRESENT,_PAGE_PRESENT)
++#else
++ ELFNOTE(Xen, XEN_ELFNOTE_PAE_MODE, .asciz, "no")
++ ELFNOTE(Xen, XEN_ELFNOTE_L1_MFN_VALID, .long, _PAGE_PRESENT,_PAGE_PRESENT)
++#endif
++ ELFNOTE(Xen, XEN_ELFNOTE_LOADER, .asciz, "generic")
++ ELFNOTE(Xen, XEN_ELFNOTE_SUSPEND_CANCEL, .long, 1)
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/arch/i386/kernel/init_task-xen.c 2007-08-27 14:01:24.000000000 -0400
+@@ -0,0 +1,51 @@
++#include <linux/mm.h>
++#include <linux/module.h>
++#include <linux/sched.h>
++#include <linux/init.h>
++#include <linux/init_task.h>
++#include <linux/fs.h>
++#include <linux/mqueue.h>
++
++#include <asm/uaccess.h>
++#include <asm/pgtable.h>
++#include <asm/desc.h>
++
++static struct fs_struct init_fs = INIT_FS;
++static struct files_struct init_files = INIT_FILES;
++static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
++static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
++
++#define swapper_pg_dir ((pgd_t *)NULL)
++struct mm_struct init_mm = INIT_MM(init_mm);
++#undef swapper_pg_dir
++
++EXPORT_SYMBOL(init_mm);
++
++/*
++ * Initial thread structure.
++ *
++ * We need to make sure that this is THREAD_SIZE aligned due to the
++ * way process stacks are handled. This is done by having a special
++ * "init_task" linker map entry..
++ */
++union thread_union init_thread_union
++ __attribute__((__section__(".data.init_task"))) =
++ { INIT_THREAD_INFO(init_task) };
++
++/*
++ * Initial task structure.
++ *
++ * All other task structs will be allocated on slabs in fork.c
++ */
++struct task_struct init_task = INIT_TASK(init_task);
++
++EXPORT_SYMBOL(init_task);
++
++#ifndef CONFIG_X86_NO_TSS
++/*
++ * per-CPU TSS segments. Threads are completely 'soft' on Linux,
++ * no more per-task TSS's.
++ */
++DEFINE_PER_CPU(struct tss_struct, init_tss) ____cacheline_internodealigned_in_smp = INIT_TSS;
++#endif
++
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/arch/i386/kernel/io_apic-xen.c 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,2777 @@
++/*
++ * Intel IO-APIC support for multi-Pentium hosts.
++ *
++ * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar, Hajnalka Szabo
++ *
++ * Many thanks to Stig Venaas for trying out countless experimental
++ * patches and reporting/debugging problems patiently!
++ *
++ * (c) 1999, Multiple IO-APIC support, developed by
++ * Ken-ichi Yaku <yaku@css1.kbnes.nec.co.jp> and
++ * Hidemi Kishimoto <kisimoto@css1.kbnes.nec.co.jp>,
++ * further tested and cleaned up by Zach Brown <zab@redhat.com>
++ * and Ingo Molnar <mingo@redhat.com>
++ *
++ * Fixes
++ * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
++ * thanks to Eric Gilmore
++ * and Rolf G. Tews
++ * for testing these extensively
++ * Paul Diefenbaugh : Added full ACPI support
++ */
++
++#include <linux/mm.h>
++#include <linux/interrupt.h>
++#include <linux/init.h>
++#include <linux/delay.h>
++#include <linux/sched.h>
++#include <linux/smp_lock.h>
++#include <linux/mc146818rtc.h>
++#include <linux/compiler.h>
++#include <linux/acpi.h>
++#include <linux/module.h>
++#include <linux/sysdev.h>
++
++#include <asm/io.h>
++#include <asm/smp.h>
++#include <asm/desc.h>
++#include <asm/timer.h>
++#include <asm/i8259.h>
++#include <asm/nmi.h>
++
++#include <mach_apic.h>
++
++#include "io_ports.h"
++
++#ifdef CONFIG_XEN
++
++#include <xen/interface/xen.h>
++#include <xen/interface/physdev.h>
++
++/* Fake i8259 */
++#define make_8259A_irq(_irq) (io_apic_irqs &= ~(1UL<<(_irq)))
++#define disable_8259A_irq(_irq) ((void)0)
++#define i8259A_irq_pending(_irq) (0)
++
++unsigned long io_apic_irqs;
++
++static inline unsigned int xen_io_apic_read(unsigned int apic, unsigned int reg)
++{
++ struct physdev_apic apic_op;
++ int ret;
++
++ apic_op.apic_physbase = mp_ioapics[apic].mpc_apicaddr;
++ apic_op.reg = reg;
++ ret = HYPERVISOR_physdev_op(PHYSDEVOP_apic_read, &apic_op);
++ if (ret)
++ return ret;
++ return apic_op.value;
++}
++
++static inline void xen_io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
++{
++ struct physdev_apic apic_op;
++
++ apic_op.apic_physbase = mp_ioapics[apic].mpc_apicaddr;
++ apic_op.reg = reg;
++ apic_op.value = value;
++ HYPERVISOR_physdev_op(PHYSDEVOP_apic_write, &apic_op);
++}
++
++#define io_apic_read(a,r) xen_io_apic_read(a,r)
++#define io_apic_write(a,r,v) xen_io_apic_write(a,r,v)
++
++#endif /* CONFIG_XEN */
++
++int (*ioapic_renumber_irq)(int ioapic, int irq);
++atomic_t irq_mis_count;
++
++/* Where if anywhere is the i8259 connect in external int mode */
++static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
++
++static DEFINE_SPINLOCK(ioapic_lock);
++static DEFINE_SPINLOCK(vector_lock);
++
++int timer_over_8254 __initdata = 1;
++
++/*
++ * Is the SiS APIC rmw bug present ?
++ * -1 = don't know, 0 = no, 1 = yes
++ */
++int sis_apic_bug = -1;
++
++/*
++ * # of IRQ routing registers
++ */
++int nr_ioapic_registers[MAX_IO_APICS];
++
++int disable_timer_pin_1 __initdata;
++
++/*
++ * Rough estimation of how many shared IRQs there are, can
++ * be changed anytime.
++ */
++#define MAX_PLUS_SHARED_IRQS NR_IRQS
++#define PIN_MAP_SIZE (MAX_PLUS_SHARED_IRQS + NR_IRQS)
++
++/*
++ * This is performance-critical, we want to do it O(1)
++ *
++ * the indexing order of this array favors 1:1 mappings
++ * between pins and IRQs.
++ */
++
++static struct irq_pin_list {
++ int apic, pin, next;
++} irq_2_pin[PIN_MAP_SIZE];
++
++int vector_irq[NR_VECTORS] __read_mostly = { [0 ... NR_VECTORS - 1] = -1};
++#ifdef CONFIG_PCI_MSI
++#define vector_to_irq(vector) \
++ (platform_legacy_irq(vector) ? vector : vector_irq[vector])
++#else
++#define vector_to_irq(vector) (vector)
++#endif
++
++/*
++ * The common case is 1:1 IRQ<->pin mappings. Sometimes there are
++ * shared ISA-space IRQs, so we have to support them. We are super
++ * fast in the common case, and fast for shared ISA-space IRQs.
++ */
++static void add_pin_to_irq(unsigned int irq, int apic, int pin)
++{
++ static int first_free_entry = NR_IRQS;
++ struct irq_pin_list *entry = irq_2_pin + irq;
++
++ while (entry->next)
++ entry = irq_2_pin + entry->next;
++
++ if (entry->pin != -1) {
++ entry->next = first_free_entry;
++ entry = irq_2_pin + entry->next;
++ if (++first_free_entry >= PIN_MAP_SIZE)
++ panic("io_apic.c: whoops");
++ }
++ entry->apic = apic;
++ entry->pin = pin;
++}
++
++#ifdef CONFIG_XEN
++#define clear_IO_APIC() ((void)0)
++#else
++/*
++ * Reroute an IRQ to a different pin.
++ */
++static void __init replace_pin_at_irq(unsigned int irq,
++ int oldapic, int oldpin,
++ int newapic, int newpin)
++{
++ struct irq_pin_list *entry = irq_2_pin + irq;
++
++ while (1) {
++ if (entry->apic == oldapic && entry->pin == oldpin) {
++ entry->apic = newapic;
++ entry->pin = newpin;
++ }
++ if (!entry->next)
++ break;
++ entry = irq_2_pin + entry->next;
++ }
++}
++
++static void __modify_IO_APIC_irq (unsigned int irq, unsigned long enable, unsigned long disable)
++{
++ struct irq_pin_list *entry = irq_2_pin + irq;
++ unsigned int pin, reg;
++
++ for (;;) {
++ pin = entry->pin;
++ if (pin == -1)
++ break;
++ reg = io_apic_read(entry->apic, 0x10 + pin*2);
++ reg &= ~disable;
++ reg |= enable;
++ io_apic_modify(entry->apic, 0x10 + pin*2, reg);
++ if (!entry->next)
++ break;
++ entry = irq_2_pin + entry->next;
++ }
++}
++
++/* mask = 1 */
++static void __mask_IO_APIC_irq (unsigned int irq)
++{
++ __modify_IO_APIC_irq(irq, 0x00010000, 0);
++}
++
++/* mask = 0 */
++static void __unmask_IO_APIC_irq (unsigned int irq)
++{
++ __modify_IO_APIC_irq(irq, 0, 0x00010000);
++}
++
++/* mask = 1, trigger = 0 */
++static void __mask_and_edge_IO_APIC_irq (unsigned int irq)
++{
++ __modify_IO_APIC_irq(irq, 0x00010000, 0x00008000);
++}
++
++/* mask = 0, trigger = 1 */
++static void __unmask_and_level_IO_APIC_irq (unsigned int irq)
++{
++ __modify_IO_APIC_irq(irq, 0x00008000, 0x00010000);
++}
++
++static void mask_IO_APIC_irq (unsigned int irq)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ __mask_IO_APIC_irq(irq);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++}
++
++static void unmask_IO_APIC_irq (unsigned int irq)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ __unmask_IO_APIC_irq(irq);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++}
++
++static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
++{
++ struct IO_APIC_route_entry entry;
++ unsigned long flags;
++
++ /* Check delivery_mode to be sure we're not clearing an SMI pin */
++ spin_lock_irqsave(&ioapic_lock, flags);
++ *(((int*)&entry) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
++ *(((int*)&entry) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++ if (entry.delivery_mode == dest_SMI)
++ return;
++
++ /*
++ * Disable it in the IO-APIC irq-routing table:
++ */
++ memset(&entry, 0, sizeof(entry));
++ entry.mask = 1;
++ spin_lock_irqsave(&ioapic_lock, flags);
++ io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry) + 0));
++ io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry) + 1));
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++}
++
++static void clear_IO_APIC (void)
++{
++ int apic, pin;
++
++ for (apic = 0; apic < nr_ioapics; apic++)
++ for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
++ clear_IO_APIC_pin(apic, pin);
++}
++
++#ifdef CONFIG_SMP
++static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t cpumask)
++{
++ unsigned long flags;
++ int pin;
++ struct irq_pin_list *entry = irq_2_pin + irq;
++ unsigned int apicid_value;
++ cpumask_t tmp;
++
++ cpus_and(tmp, cpumask, cpu_online_map);
++ if (cpus_empty(tmp))
++ tmp = TARGET_CPUS;
++
++ cpus_and(cpumask, tmp, CPU_MASK_ALL);
++
++ apicid_value = cpu_mask_to_apicid(cpumask);
++ /* Prepare to do the io_apic_write */
++ apicid_value = apicid_value << 24;
++ spin_lock_irqsave(&ioapic_lock, flags);
++ for (;;) {
++ pin = entry->pin;
++ if (pin == -1)
++ break;
++ io_apic_write(entry->apic, 0x10 + 1 + pin*2, apicid_value);
++ if (!entry->next)
++ break;
++ entry = irq_2_pin + entry->next;
++ }
++ set_irq_info(irq, cpumask);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++}
++
++#if defined(CONFIG_IRQBALANCE)
++# include <asm/processor.h> /* kernel_thread() */
++# include <linux/kernel_stat.h> /* kstat */
++# include <linux/slab.h> /* kmalloc() */
++# include <linux/timer.h> /* time_after() */
++
++#ifdef CONFIG_BALANCED_IRQ_DEBUG
++# define TDprintk(x...) do { printk("<%ld:%s:%d>: ", jiffies, __FILE__, __LINE__); printk(x); } while (0)
++# define Dprintk(x...) do { TDprintk(x); } while (0)
++# else
++# define TDprintk(x...)
++# define Dprintk(x...)
++# endif
++
++#define IRQBALANCE_CHECK_ARCH -999
++#define MAX_BALANCED_IRQ_INTERVAL (5*HZ)
++#define MIN_BALANCED_IRQ_INTERVAL (HZ/2)
++#define BALANCED_IRQ_MORE_DELTA (HZ/10)
++#define BALANCED_IRQ_LESS_DELTA (HZ)
++
++static int irqbalance_disabled __read_mostly = IRQBALANCE_CHECK_ARCH;
++static int physical_balance __read_mostly;
++static long balanced_irq_interval __read_mostly = MAX_BALANCED_IRQ_INTERVAL;
++
++static struct irq_cpu_info {
++ unsigned long * last_irq;
++ unsigned long * irq_delta;
++ unsigned long irq;
++} irq_cpu_data[NR_CPUS];
++
++#define CPU_IRQ(cpu) (irq_cpu_data[cpu].irq)
++#define LAST_CPU_IRQ(cpu,irq) (irq_cpu_data[cpu].last_irq[irq])
++#define IRQ_DELTA(cpu,irq) (irq_cpu_data[cpu].irq_delta[irq])
++
++#define IDLE_ENOUGH(cpu,now) \
++ (idle_cpu(cpu) && ((now) - per_cpu(irq_stat, (cpu)).idle_timestamp > 1))
++
++#define IRQ_ALLOWED(cpu, allowed_mask) cpu_isset(cpu, allowed_mask)
++
++#define CPU_TO_PACKAGEINDEX(i) (first_cpu(cpu_sibling_map[i]))
++
++static cpumask_t balance_irq_affinity[NR_IRQS] = {
++ [0 ... NR_IRQS-1] = CPU_MASK_ALL
++};
++
++void set_balance_irq_affinity(unsigned int irq, cpumask_t mask)
++{
++ balance_irq_affinity[irq] = mask;
++}
++
++static unsigned long move(int curr_cpu, cpumask_t allowed_mask,
++ unsigned long now, int direction)
++{
++ int search_idle = 1;
++ int cpu = curr_cpu;
++
++ goto inside;
++
++ do {
++ if (unlikely(cpu == curr_cpu))
++ search_idle = 0;
++inside:
++ if (direction == 1) {
++ cpu++;
++ if (cpu >= NR_CPUS)
++ cpu = 0;
++ } else {
++ cpu--;
++ if (cpu == -1)
++ cpu = NR_CPUS-1;
++ }
++ } while (!cpu_online(cpu) || !IRQ_ALLOWED(cpu,allowed_mask) ||
++ (search_idle && !IDLE_ENOUGH(cpu,now)));
++
++ return cpu;
++}
++
++static inline void balance_irq(int cpu, int irq)
++{
++ unsigned long now = jiffies;
++ cpumask_t allowed_mask;
++ unsigned int new_cpu;
++
++ if (irqbalance_disabled)
++ return;
++
++ cpus_and(allowed_mask, cpu_online_map, balance_irq_affinity[irq]);
++ new_cpu = move(cpu, allowed_mask, now, 1);
++ if (cpu != new_cpu) {
++ set_pending_irq(irq, cpumask_of_cpu(new_cpu));
++ }
++}
++
++static inline void rotate_irqs_among_cpus(unsigned long useful_load_threshold)
++{
++ int i, j;
++ Dprintk("Rotating IRQs among CPUs.\n");
++ for_each_online_cpu(i) {
++ for (j = 0; j < NR_IRQS; j++) {
++ if (!irq_desc[j].action)
++ continue;
++ /* Is it a significant load ? */
++ if (IRQ_DELTA(CPU_TO_PACKAGEINDEX(i),j) <
++ useful_load_threshold)
++ continue;
++ balance_irq(i, j);
++ }
++ }
++ balanced_irq_interval = max((long)MIN_BALANCED_IRQ_INTERVAL,
++ balanced_irq_interval - BALANCED_IRQ_LESS_DELTA);
++ return;
++}
++
++static void do_irq_balance(void)
++{
++ int i, j;
++ unsigned long max_cpu_irq = 0, min_cpu_irq = (~0);
++ unsigned long move_this_load = 0;
++ int max_loaded = 0, min_loaded = 0;
++ int load;
++ unsigned long useful_load_threshold = balanced_irq_interval + 10;
++ int selected_irq;
++ int tmp_loaded, first_attempt = 1;
++ unsigned long tmp_cpu_irq;
++ unsigned long imbalance = 0;
++ cpumask_t allowed_mask, target_cpu_mask, tmp;
++
++ for_each_possible_cpu(i) {
++ int package_index;
++ CPU_IRQ(i) = 0;
++ if (!cpu_online(i))
++ continue;
++ package_index = CPU_TO_PACKAGEINDEX(i);
++ for (j = 0; j < NR_IRQS; j++) {
++ unsigned long value_now, delta;
++ /* Is this an active IRQ? */
++ if (!irq_desc[j].action)
++ continue;
++ if ( package_index == i )
++ IRQ_DELTA(package_index,j) = 0;
++ /* Determine the total count per processor per IRQ */
++ value_now = (unsigned long) kstat_cpu(i).irqs[j];
++
++ /* Determine the activity per processor per IRQ */
++ delta = value_now - LAST_CPU_IRQ(i,j);
++
++ /* Update last_cpu_irq[][] for the next time */
++ LAST_CPU_IRQ(i,j) = value_now;
++
++ /* Ignore IRQs whose rate is less than the clock */
++ if (delta < useful_load_threshold)
++ continue;
++ /* update the load for the processor or package total */
++ IRQ_DELTA(package_index,j) += delta;
++
++ /* Keep track of the higher numbered sibling as well */
++ if (i != package_index)
++ CPU_IRQ(i) += delta;
++ /*
++ * We have sibling A and sibling B in the package
++ *
++ * cpu_irq[A] = load for cpu A + load for cpu B
++ * cpu_irq[B] = load for cpu B
++ */
++ CPU_IRQ(package_index) += delta;
++ }
++ }
++ /* Find the least loaded processor package */
++ for_each_online_cpu(i) {
++ if (i != CPU_TO_PACKAGEINDEX(i))
++ continue;
++ if (min_cpu_irq > CPU_IRQ(i)) {
++ min_cpu_irq = CPU_IRQ(i);
++ min_loaded = i;
++ }
++ }
++ max_cpu_irq = ULONG_MAX;
++
++tryanothercpu:
++ /* Look for heaviest loaded processor.
++ * We may come back to get the next heaviest loaded processor.
++ * Skip processors with trivial loads.
++ */
++ tmp_cpu_irq = 0;
++ tmp_loaded = -1;
++ for_each_online_cpu(i) {
++ if (i != CPU_TO_PACKAGEINDEX(i))
++ continue;
++ if (max_cpu_irq <= CPU_IRQ(i))
++ continue;
++ if (tmp_cpu_irq < CPU_IRQ(i)) {
++ tmp_cpu_irq = CPU_IRQ(i);
++ tmp_loaded = i;
++ }
++ }
++
++ if (tmp_loaded == -1) {
++ /* In the case of small number of heavy interrupt sources,
++ * loading some of the cpus too much. We use Ingo's original
++ * approach to rotate them around.
++ */
++ if (!first_attempt && imbalance >= useful_load_threshold) {
++ rotate_irqs_among_cpus(useful_load_threshold);
++ return;
++ }
++ goto not_worth_the_effort;
++ }
++
++ first_attempt = 0; /* heaviest search */
++ max_cpu_irq = tmp_cpu_irq; /* load */
++ max_loaded = tmp_loaded; /* processor */
++ imbalance = (max_cpu_irq - min_cpu_irq) / 2;
++
++ Dprintk("max_loaded cpu = %d\n", max_loaded);
++ Dprintk("min_loaded cpu = %d\n", min_loaded);
++ Dprintk("max_cpu_irq load = %ld\n", max_cpu_irq);
++ Dprintk("min_cpu_irq load = %ld\n", min_cpu_irq);
++ Dprintk("load imbalance = %lu\n", imbalance);
++
++ /* if imbalance is less than approx 10% of max load, then
++ * observe diminishing returns action. - quit
++ */
++ if (imbalance < (max_cpu_irq >> 3)) {
++ Dprintk("Imbalance too trivial\n");
++ goto not_worth_the_effort;
++ }
++
++tryanotherirq:
++ /* if we select an IRQ to move that can't go where we want, then
++ * see if there is another one to try.
++ */
++ move_this_load = 0;
++ selected_irq = -1;
++ for (j = 0; j < NR_IRQS; j++) {
++ /* Is this an active IRQ? */
++ if (!irq_desc[j].action)
++ continue;
++ if (imbalance <= IRQ_DELTA(max_loaded,j))
++ continue;
++ /* Try to find the IRQ that is closest to the imbalance
++ * without going over.
++ */
++ if (move_this_load < IRQ_DELTA(max_loaded,j)) {
++ move_this_load = IRQ_DELTA(max_loaded,j);
++ selected_irq = j;
++ }
++ }
++ if (selected_irq == -1) {
++ goto tryanothercpu;
++ }
++
++ imbalance = move_this_load;
++
++ /* For physical_balance case, we accumlated both load
++ * values in the one of the siblings cpu_irq[],
++ * to use the same code for physical and logical processors
++ * as much as possible.
++ *
++ * NOTE: the cpu_irq[] array holds the sum of the load for
++ * sibling A and sibling B in the slot for the lowest numbered
++ * sibling (A), _AND_ the load for sibling B in the slot for
++ * the higher numbered sibling.
++ *
++ * We seek the least loaded sibling by making the comparison
++ * (A+B)/2 vs B
++ */
++ load = CPU_IRQ(min_loaded) >> 1;
++ for_each_cpu_mask(j, cpu_sibling_map[min_loaded]) {
++ if (load > CPU_IRQ(j)) {
++ /* This won't change cpu_sibling_map[min_loaded] */
++ load = CPU_IRQ(j);
++ min_loaded = j;
++ }
++ }
++
++ cpus_and(allowed_mask,
++ cpu_online_map,
++ balance_irq_affinity[selected_irq]);
++ target_cpu_mask = cpumask_of_cpu(min_loaded);
++ cpus_and(tmp, target_cpu_mask, allowed_mask);
++
++ if (!cpus_empty(tmp)) {
++
++ Dprintk("irq = %d moved to cpu = %d\n",
++ selected_irq, min_loaded);
++ /* mark for change destination */
++ set_pending_irq(selected_irq, cpumask_of_cpu(min_loaded));
++
++ /* Since we made a change, come back sooner to
++ * check for more variation.
++ */
++ balanced_irq_interval = max((long)MIN_BALANCED_IRQ_INTERVAL,
++ balanced_irq_interval - BALANCED_IRQ_LESS_DELTA);
++ return;
++ }
++ goto tryanotherirq;
++
++not_worth_the_effort:
++ /*
++ * if we did not find an IRQ to move, then adjust the time interval
++ * upward
++ */
++ balanced_irq_interval = min((long)MAX_BALANCED_IRQ_INTERVAL,
++ balanced_irq_interval + BALANCED_IRQ_MORE_DELTA);
++ Dprintk("IRQ worth rotating not found\n");
++ return;
++}
++
++static int balanced_irq(void *unused)
++{
++ int i;
++ unsigned long prev_balance_time = jiffies;
++ long time_remaining = balanced_irq_interval;
++
++ daemonize("kirqd");
++
++ /* push everything to CPU 0 to give us a starting point. */
++ for (i = 0 ; i < NR_IRQS ; i++) {
++ irq_desc[i].pending_mask = cpumask_of_cpu(0);
++ set_pending_irq(i, cpumask_of_cpu(0));
++ }
++
++ for ( ; ; ) {
++ time_remaining = schedule_timeout_interruptible(time_remaining);
++ try_to_freeze();
++ if (time_after(jiffies,
++ prev_balance_time+balanced_irq_interval)) {
++ preempt_disable();
++ do_irq_balance();
++ prev_balance_time = jiffies;
++ time_remaining = balanced_irq_interval;
++ preempt_enable();
++ }
++ }
++ return 0;
++}
++
++static int __init balanced_irq_init(void)
++{
++ int i;
++ struct cpuinfo_x86 *c;
++ cpumask_t tmp;
++
++ cpus_shift_right(tmp, cpu_online_map, 2);
++ c = &boot_cpu_data;
++ /* When not overwritten by the command line ask subarchitecture. */
++ if (irqbalance_disabled == IRQBALANCE_CHECK_ARCH)
++ irqbalance_disabled = NO_BALANCE_IRQ;
++ if (irqbalance_disabled)
++ return 0;
++
++ /* disable irqbalance completely if there is only one processor online */
++ if (num_online_cpus() < 2) {
++ irqbalance_disabled = 1;
++ return 0;
++ }
++ /*
++ * Enable physical balance only if more than 1 physical processor
++ * is present
++ */
++ if (smp_num_siblings > 1 && !cpus_empty(tmp))
++ physical_balance = 1;
++
++ for_each_online_cpu(i) {
++ irq_cpu_data[i].irq_delta = kmalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL);
++ irq_cpu_data[i].last_irq = kmalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL);
++ if (irq_cpu_data[i].irq_delta == NULL || irq_cpu_data[i].last_irq == NULL) {
++ printk(KERN_ERR "balanced_irq_init: out of memory");
++ goto failed;
++ }
++ memset(irq_cpu_data[i].irq_delta,0,sizeof(unsigned long) * NR_IRQS);
++ memset(irq_cpu_data[i].last_irq,0,sizeof(unsigned long) * NR_IRQS);
++ }
++
++ printk(KERN_INFO "Starting balanced_irq\n");
++ if (kernel_thread(balanced_irq, NULL, CLONE_KERNEL) >= 0)
++ return 0;
++ else
++ printk(KERN_ERR "balanced_irq_init: failed to spawn balanced_irq");
++failed:
++ for_each_possible_cpu(i) {
++ kfree(irq_cpu_data[i].irq_delta);
++ irq_cpu_data[i].irq_delta = NULL;
++ kfree(irq_cpu_data[i].last_irq);
++ irq_cpu_data[i].last_irq = NULL;
++ }
++ return 0;
++}
++
++int __init irqbalance_disable(char *str)
++{
++ irqbalance_disabled = 1;
++ return 1;
++}
++
++__setup("noirqbalance", irqbalance_disable);
++
++late_initcall(balanced_irq_init);
++#endif /* CONFIG_IRQBALANCE */
++#endif /* CONFIG_SMP */
++#endif
++
++#ifndef CONFIG_SMP
++void fastcall send_IPI_self(int vector)
++{
++#ifndef CONFIG_XEN
++ unsigned int cfg;
++
++ /*
++ * Wait for idle.
++ */
++ apic_wait_icr_idle();
++ cfg = APIC_DM_FIXED | APIC_DEST_SELF | vector | APIC_DEST_LOGICAL;
++ /*
++ * Send the IPI. The write to APIC_ICR fires this off.
++ */
++ apic_write_around(APIC_ICR, cfg);
++#endif
++}
++#endif /* !CONFIG_SMP */
++
++
++/*
++ * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to
++ * specific CPU-side IRQs.
++ */
++
++#define MAX_PIRQS 8
++static int pirq_entries [MAX_PIRQS];
++static int pirqs_enabled;
++int skip_ioapic_setup;
++
++static int __init ioapic_setup(char *str)
++{
++ skip_ioapic_setup = 1;
++ return 1;
++}
++
++__setup("noapic", ioapic_setup);
++
++static int __init ioapic_pirq_setup(char *str)
++{
++ int i, max;
++ int ints[MAX_PIRQS+1];
++
++ get_options(str, ARRAY_SIZE(ints), ints);
++
++ for (i = 0; i < MAX_PIRQS; i++)
++ pirq_entries[i] = -1;
++
++ pirqs_enabled = 1;
++ apic_printk(APIC_VERBOSE, KERN_INFO
++ "PIRQ redirection, working around broken MP-BIOS.\n");
++ max = MAX_PIRQS;
++ if (ints[0] < MAX_PIRQS)
++ max = ints[0];
++
++ for (i = 0; i < max; i++) {
++ apic_printk(APIC_VERBOSE, KERN_DEBUG
++ "... PIRQ%d -> IRQ %d\n", i, ints[i+1]);
++ /*
++ * PIRQs are mapped upside down, usually.
++ */
++ pirq_entries[MAX_PIRQS-i-1] = ints[i+1];
++ }
++ return 1;
++}
++
++__setup("pirq=", ioapic_pirq_setup);
++
++/*
++ * Find the IRQ entry number of a certain pin.
++ */
++static int find_irq_entry(int apic, int pin, int type)
++{
++ int i;
++
++ for (i = 0; i < mp_irq_entries; i++)
++ if (mp_irqs[i].mpc_irqtype == type &&
++ (mp_irqs[i].mpc_dstapic == mp_ioapics[apic].mpc_apicid ||
++ mp_irqs[i].mpc_dstapic == MP_APIC_ALL) &&
++ mp_irqs[i].mpc_dstirq == pin)
++ return i;
++
++ return -1;
++}
++
++/*
++ * Find the pin to which IRQ[irq] (ISA) is connected
++ */
++static int __init find_isa_irq_pin(int irq, int type)
++{
++ int i;
++
++ for (i = 0; i < mp_irq_entries; i++) {
++ int lbus = mp_irqs[i].mpc_srcbus;
++
++ if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA ||
++ mp_bus_id_to_type[lbus] == MP_BUS_EISA ||
++ mp_bus_id_to_type[lbus] == MP_BUS_MCA ||
++ mp_bus_id_to_type[lbus] == MP_BUS_NEC98
++ ) &&
++ (mp_irqs[i].mpc_irqtype == type) &&
++ (mp_irqs[i].mpc_srcbusirq == irq))
++
++ return mp_irqs[i].mpc_dstirq;
++ }
++ return -1;
++}
++
++static int __init find_isa_irq_apic(int irq, int type)
++{
++ int i;
++
++ for (i = 0; i < mp_irq_entries; i++) {
++ int lbus = mp_irqs[i].mpc_srcbus;
++
++ if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA ||
++ mp_bus_id_to_type[lbus] == MP_BUS_EISA ||
++ mp_bus_id_to_type[lbus] == MP_BUS_MCA ||
++ mp_bus_id_to_type[lbus] == MP_BUS_NEC98
++ ) &&
++ (mp_irqs[i].mpc_irqtype == type) &&
++ (mp_irqs[i].mpc_srcbusirq == irq))
++ break;
++ }
++ if (i < mp_irq_entries) {
++ int apic;
++ for(apic = 0; apic < nr_ioapics; apic++) {
++ if (mp_ioapics[apic].mpc_apicid == mp_irqs[i].mpc_dstapic)
++ return apic;
++ }
++ }
++
++ return -1;
++}
++
++/*
++ * Find a specific PCI IRQ entry.
++ * Not an __init, possibly needed by modules
++ */
++static int pin_2_irq(int idx, int apic, int pin);
++
++int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
++{
++ int apic, i, best_guess = -1;
++
++ apic_printk(APIC_DEBUG, "querying PCI -> IRQ mapping bus:%d, "
++ "slot:%d, pin:%d.\n", bus, slot, pin);
++ if (mp_bus_id_to_pci_bus[bus] == -1) {
++ printk(KERN_WARNING "PCI BIOS passed nonexistent PCI bus %d!\n", bus);
++ return -1;
++ }
++ for (i = 0; i < mp_irq_entries; i++) {
++ int lbus = mp_irqs[i].mpc_srcbus;
++
++ for (apic = 0; apic < nr_ioapics; apic++)
++ if (mp_ioapics[apic].mpc_apicid == mp_irqs[i].mpc_dstapic ||
++ mp_irqs[i].mpc_dstapic == MP_APIC_ALL)
++ break;
++
++ if ((mp_bus_id_to_type[lbus] == MP_BUS_PCI) &&
++ !mp_irqs[i].mpc_irqtype &&
++ (bus == lbus) &&
++ (slot == ((mp_irqs[i].mpc_srcbusirq >> 2) & 0x1f))) {
++ int irq = pin_2_irq(i,apic,mp_irqs[i].mpc_dstirq);
++
++ if (!(apic || IO_APIC_IRQ(irq)))
++ continue;
++
++ if (pin == (mp_irqs[i].mpc_srcbusirq & 3))
++ return irq;
++ /*
++ * Use the first all-but-pin matching entry as a
++ * best-guess fuzzy result for broken mptables.
++ */
++ if (best_guess < 0)
++ best_guess = irq;
++ }
++ }
++ return best_guess;
++}
++EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
++
++/*
++ * This function currently is only a helper for the i386 smp boot process where
++ * we need to reprogram the ioredtbls to cater for the cpus which have come online
++ * so mask in all cases should simply be TARGET_CPUS
++ */
++#ifdef CONFIG_SMP
++#ifndef CONFIG_XEN
++void __init setup_ioapic_dest(void)
++{
++ int pin, ioapic, irq, irq_entry;
++
++ if (skip_ioapic_setup == 1)
++ return;
++
++ for (ioapic = 0; ioapic < nr_ioapics; ioapic++) {
++ for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) {
++ irq_entry = find_irq_entry(ioapic, pin, mp_INT);
++ if (irq_entry == -1)
++ continue;
++ irq = pin_2_irq(irq_entry, ioapic, pin);
++ set_ioapic_affinity_irq(irq, TARGET_CPUS);
++ }
++
++ }
++}
++#endif /* !CONFIG_XEN */
++#endif
++
++/*
++ * EISA Edge/Level control register, ELCR
++ */
++static int EISA_ELCR(unsigned int irq)
++{
++ if (irq < 16) {
++ unsigned int port = 0x4d0 + (irq >> 3);
++ return (inb(port) >> (irq & 7)) & 1;
++ }
++ apic_printk(APIC_VERBOSE, KERN_INFO
++ "Broken MPtable reports ISA irq %d\n", irq);
++ return 0;
++}
++
++/* EISA interrupts are always polarity zero and can be edge or level
++ * trigger depending on the ELCR value. If an interrupt is listed as
++ * EISA conforming in the MP table, that means its trigger type must
++ * be read in from the ELCR */
++
++#define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].mpc_srcbusirq))
++#define default_EISA_polarity(idx) (0)
++
++/* ISA interrupts are always polarity zero edge triggered,
++ * when listed as conforming in the MP table. */
++
++#define default_ISA_trigger(idx) (0)
++#define default_ISA_polarity(idx) (0)
++
++/* PCI interrupts are always polarity one level triggered,
++ * when listed as conforming in the MP table. */
++
++#define default_PCI_trigger(idx) (1)
++#define default_PCI_polarity(idx) (1)
++
++/* MCA interrupts are always polarity zero level triggered,
++ * when listed as conforming in the MP table. */
++
++#define default_MCA_trigger(idx) (1)
++#define default_MCA_polarity(idx) (0)
++
++/* NEC98 interrupts are always polarity zero edge triggered,
++ * when listed as conforming in the MP table. */
++
++#define default_NEC98_trigger(idx) (0)
++#define default_NEC98_polarity(idx) (0)
++
++static int __init MPBIOS_polarity(int idx)
++{
++ int bus = mp_irqs[idx].mpc_srcbus;
++ int polarity;
++
++ /*
++ * Determine IRQ line polarity (high active or low active):
++ */
++ switch (mp_irqs[idx].mpc_irqflag & 3)
++ {
++ case 0: /* conforms, ie. bus-type dependent polarity */
++ {
++ switch (mp_bus_id_to_type[bus])
++ {
++ case MP_BUS_ISA: /* ISA pin */
++ {
++ polarity = default_ISA_polarity(idx);
++ break;
++ }
++ case MP_BUS_EISA: /* EISA pin */
++ {
++ polarity = default_EISA_polarity(idx);
++ break;
++ }
++ case MP_BUS_PCI: /* PCI pin */
++ {
++ polarity = default_PCI_polarity(idx);
++ break;
++ }
++ case MP_BUS_MCA: /* MCA pin */
++ {
++ polarity = default_MCA_polarity(idx);
++ break;
++ }
++ case MP_BUS_NEC98: /* NEC 98 pin */
++ {
++ polarity = default_NEC98_polarity(idx);
++ break;
++ }
++ default:
++ {
++ printk(KERN_WARNING "broken BIOS!!\n");
++ polarity = 1;
++ break;
++ }
++ }
++ break;
++ }
++ case 1: /* high active */
++ {
++ polarity = 0;
++ break;
++ }
++ case 2: /* reserved */
++ {
++ printk(KERN_WARNING "broken BIOS!!\n");
++ polarity = 1;
++ break;
++ }
++ case 3: /* low active */
++ {
++ polarity = 1;
++ break;
++ }
++ default: /* invalid */
++ {
++ printk(KERN_WARNING "broken BIOS!!\n");
++ polarity = 1;
++ break;
++ }
++ }
++ return polarity;
++}
++
++static int MPBIOS_trigger(int idx)
++{
++ int bus = mp_irqs[idx].mpc_srcbus;
++ int trigger;
++
++ /*
++ * Determine IRQ trigger mode (edge or level sensitive):
++ */
++ switch ((mp_irqs[idx].mpc_irqflag>>2) & 3)
++ {
++ case 0: /* conforms, ie. bus-type dependent */
++ {
++ switch (mp_bus_id_to_type[bus])
++ {
++ case MP_BUS_ISA: /* ISA pin */
++ {
++ trigger = default_ISA_trigger(idx);
++ break;
++ }
++ case MP_BUS_EISA: /* EISA pin */
++ {
++ trigger = default_EISA_trigger(idx);
++ break;
++ }
++ case MP_BUS_PCI: /* PCI pin */
++ {
++ trigger = default_PCI_trigger(idx);
++ break;
++ }
++ case MP_BUS_MCA: /* MCA pin */
++ {
++ trigger = default_MCA_trigger(idx);
++ break;
++ }
++ case MP_BUS_NEC98: /* NEC 98 pin */
++ {
++ trigger = default_NEC98_trigger(idx);
++ break;
++ }
++ default:
++ {
++ printk(KERN_WARNING "broken BIOS!!\n");
++ trigger = 1;
++ break;
++ }
++ }
++ break;
++ }
++ case 1: /* edge */
++ {
++ trigger = 0;
++ break;
++ }
++ case 2: /* reserved */
++ {
++ printk(KERN_WARNING "broken BIOS!!\n");
++ trigger = 1;
++ break;
++ }
++ case 3: /* level */
++ {
++ trigger = 1;
++ break;
++ }
++ default: /* invalid */
++ {
++ printk(KERN_WARNING "broken BIOS!!\n");
++ trigger = 0;
++ break;
++ }
++ }
++ return trigger;
++}
++
++static inline int irq_polarity(int idx)
++{
++ return MPBIOS_polarity(idx);
++}
++
++static inline int irq_trigger(int idx)
++{
++ return MPBIOS_trigger(idx);
++}
++
++static int pin_2_irq(int idx, int apic, int pin)
++{
++ int irq, i;
++ int bus = mp_irqs[idx].mpc_srcbus;
++
++ /*
++ * Debugging check, we are in big trouble if this message pops up!
++ */
++ if (mp_irqs[idx].mpc_dstirq != pin)
++ printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n");
++
++ switch (mp_bus_id_to_type[bus])
++ {
++ case MP_BUS_ISA: /* ISA pin */
++ case MP_BUS_EISA:
++ case MP_BUS_MCA:
++ case MP_BUS_NEC98:
++ {
++ irq = mp_irqs[idx].mpc_srcbusirq;
++ break;
++ }
++ case MP_BUS_PCI: /* PCI pin */
++ {
++ /*
++ * PCI IRQs are mapped in order
++ */
++ i = irq = 0;
++ while (i < apic)
++ irq += nr_ioapic_registers[i++];
++ irq += pin;
++
++ /*
++ * For MPS mode, so far only needed by ES7000 platform
++ */
++ if (ioapic_renumber_irq)
++ irq = ioapic_renumber_irq(apic, irq);
++
++ break;
++ }
++ default:
++ {
++ printk(KERN_ERR "unknown bus type %d.\n",bus);
++ irq = 0;
++ break;
++ }
++ }
++
++ /*
++ * PCI IRQ command line redirection. Yes, limits are hardcoded.
++ */
++ if ((pin >= 16) && (pin <= 23)) {
++ if (pirq_entries[pin-16] != -1) {
++ if (!pirq_entries[pin-16]) {
++ apic_printk(APIC_VERBOSE, KERN_DEBUG
++ "disabling PIRQ%d\n", pin-16);
++ } else {
++ irq = pirq_entries[pin-16];
++ apic_printk(APIC_VERBOSE, KERN_DEBUG
++ "using PIRQ%d -> IRQ %d\n",
++ pin-16, irq);
++ }
++ }
++ }
++ return irq;
++}
++
++static inline int IO_APIC_irq_trigger(int irq)
++{
++ int apic, idx, pin;
++
++ for (apic = 0; apic < nr_ioapics; apic++) {
++ for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
++ idx = find_irq_entry(apic,pin,mp_INT);
++ if ((idx != -1) && (irq == pin_2_irq(idx,apic,pin)))
++ return irq_trigger(idx);
++ }
++ }
++ /*
++ * nonexistent IRQs are edge default
++ */
++ return 0;
++}
++
++/* irq_vectors is indexed by the sum of all RTEs in all I/O APICs. */
++u8 irq_vector[NR_IRQ_VECTORS] __read_mostly; /* = { FIRST_DEVICE_VECTOR , 0 }; */
++
++int assign_irq_vector(int irq)
++{
++ unsigned long flags;
++ int vector;
++ struct physdev_irq irq_op;
++
++ BUG_ON(irq != AUTO_ASSIGN && (unsigned)irq >= NR_IRQ_VECTORS);
++
++ spin_lock_irqsave(&vector_lock, flags);
++
++ if (irq != AUTO_ASSIGN && IO_APIC_VECTOR(irq) > 0) {
++ spin_unlock_irqrestore(&vector_lock, flags);
++ return IO_APIC_VECTOR(irq);
++ }
++
++ irq_op.irq = irq;
++ if (HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) {
++ spin_unlock_irqrestore(&vector_lock, flags);
++ return -ENOSPC;
++ }
++
++ vector = irq_op.vector;
++ vector_irq[vector] = irq;
++ if (irq != AUTO_ASSIGN)
++ IO_APIC_VECTOR(irq) = vector;
++
++ spin_unlock_irqrestore(&vector_lock, flags);
++
++ return vector;
++}
++
++#ifndef CONFIG_XEN
++static struct hw_interrupt_type ioapic_level_type;
++static struct hw_interrupt_type ioapic_edge_type;
++
++#define IOAPIC_AUTO -1
++#define IOAPIC_EDGE 0
++#define IOAPIC_LEVEL 1
++
++static void ioapic_register_intr(int irq, int vector, unsigned long trigger)
++{
++ unsigned idx;
++
++ idx = use_pci_vector() && !platform_legacy_irq(irq) ? vector : irq;
++
++ if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
++ trigger == IOAPIC_LEVEL)
++ irq_desc[idx].chip = &ioapic_level_type;
++ else
++ irq_desc[idx].chip = &ioapic_edge_type;
++ set_intr_gate(vector, interrupt[idx]);
++}
++#else
++#define ioapic_register_intr(_irq,_vector,_trigger) ((void)0)
++#endif
++
++static void __init setup_IO_APIC_irqs(void)
++{
++ struct IO_APIC_route_entry entry;
++ int apic, pin, idx, irq, first_notcon = 1, vector;
++ unsigned long flags;
++
++ apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
++
++ for (apic = 0; apic < nr_ioapics; apic++) {
++ for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
++
++ /*
++ * add it to the IO-APIC irq-routing table:
++ */
++ memset(&entry,0,sizeof(entry));
++
++ entry.delivery_mode = INT_DELIVERY_MODE;
++ entry.dest_mode = INT_DEST_MODE;
++ entry.mask = 0; /* enable IRQ */
++ entry.dest.logical.logical_dest =
++ cpu_mask_to_apicid(TARGET_CPUS);
++
++ idx = find_irq_entry(apic,pin,mp_INT);
++ if (idx == -1) {
++ if (first_notcon) {
++ apic_printk(APIC_VERBOSE, KERN_DEBUG
++ " IO-APIC (apicid-pin) %d-%d",
++ mp_ioapics[apic].mpc_apicid,
++ pin);
++ first_notcon = 0;
++ } else
++ apic_printk(APIC_VERBOSE, ", %d-%d",
++ mp_ioapics[apic].mpc_apicid, pin);
++ continue;
++ }
++
++ entry.trigger = irq_trigger(idx);
++ entry.polarity = irq_polarity(idx);
++
++ if (irq_trigger(idx)) {
++ entry.trigger = 1;
++ entry.mask = 1;
++ }
++
++ irq = pin_2_irq(idx, apic, pin);
++ /*
++ * skip adding the timer int on secondary nodes, which causes
++ * a small but painful rift in the time-space continuum
++ */
++ if (multi_timer_check(apic, irq))
++ continue;
++ else
++ add_pin_to_irq(irq, apic, pin);
++
++ if (/*!apic &&*/ !IO_APIC_IRQ(irq))
++ continue;
++
++ if (IO_APIC_IRQ(irq)) {
++ vector = assign_irq_vector(irq);
++ entry.vector = vector;
++ ioapic_register_intr(irq, vector, IOAPIC_AUTO);
++
++ if (!apic && (irq < 16))
++ disable_8259A_irq(irq);
++ }
++ spin_lock_irqsave(&ioapic_lock, flags);
++ io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1));
++ io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0));
++ set_native_irq_info(irq, TARGET_CPUS);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++ }
++ }
++
++ if (!first_notcon)
++ apic_printk(APIC_VERBOSE, " not connected.\n");
++}
++
++/*
++ * Set up the 8259A-master output pin:
++ */
++#ifndef CONFIG_XEN
++static void __init setup_ExtINT_IRQ0_pin(unsigned int apic, unsigned int pin, int vector)
++{
++ struct IO_APIC_route_entry entry;
++ unsigned long flags;
++
++ memset(&entry,0,sizeof(entry));
++
++ disable_8259A_irq(0);
++
++ /* mask LVT0 */
++ apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
++
++ /*
++ * We use logical delivery to get the timer IRQ
++ * to the first CPU.
++ */
++ entry.dest_mode = INT_DEST_MODE;
++ entry.mask = 0; /* unmask IRQ now */
++ entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
++ entry.delivery_mode = INT_DELIVERY_MODE;
++ entry.polarity = 0;
++ entry.trigger = 0;
++ entry.vector = vector;
++
++ /*
++ * The timer IRQ doesn't have to know that behind the
++ * scene we have a 8259A-master in AEOI mode ...
++ */
++ irq_desc[0].chip = &ioapic_edge_type;
++
++ /*
++ * Add it to the IO-APIC irq-routing table:
++ */
++ spin_lock_irqsave(&ioapic_lock, flags);
++ io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1));
++ io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0));
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ enable_8259A_irq(0);
++}
++
++static inline void UNEXPECTED_IO_APIC(void)
++{
++}
++
++void __init print_IO_APIC(void)
++{
++ int apic, i;
++ union IO_APIC_reg_00 reg_00;
++ union IO_APIC_reg_01 reg_01;
++ union IO_APIC_reg_02 reg_02;
++ union IO_APIC_reg_03 reg_03;
++ unsigned long flags;
++
++ if (apic_verbosity == APIC_QUIET)
++ return;
++
++ printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
++ for (i = 0; i < nr_ioapics; i++)
++ printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
++ mp_ioapics[i].mpc_apicid, nr_ioapic_registers[i]);
++
++ /*
++ * We are a bit conservative about what we expect. We have to
++ * know about every hardware change ASAP.
++ */
++ printk(KERN_INFO "testing the IO APIC.......................\n");
++
++ for (apic = 0; apic < nr_ioapics; apic++) {
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ reg_00.raw = io_apic_read(apic, 0);
++ reg_01.raw = io_apic_read(apic, 1);
++ if (reg_01.bits.version >= 0x10)
++ reg_02.raw = io_apic_read(apic, 2);
++ if (reg_01.bits.version >= 0x20)
++ reg_03.raw = io_apic_read(apic, 3);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].mpc_apicid);
++ printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
++ printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID);
++ printk(KERN_DEBUG "....... : Delivery Type: %X\n", reg_00.bits.delivery_type);
++ printk(KERN_DEBUG "....... : LTS : %X\n", reg_00.bits.LTS);
++ if (reg_00.bits.ID >= get_physical_broadcast())
++ UNEXPECTED_IO_APIC();
++ if (reg_00.bits.__reserved_1 || reg_00.bits.__reserved_2)
++ UNEXPECTED_IO_APIC();
++
++ printk(KERN_DEBUG ".... register #01: %08X\n", reg_01.raw);
++ printk(KERN_DEBUG "....... : max redirection entries: %04X\n", reg_01.bits.entries);
++ if ( (reg_01.bits.entries != 0x0f) && /* older (Neptune) boards */
++ (reg_01.bits.entries != 0x17) && /* typical ISA+PCI boards */
++ (reg_01.bits.entries != 0x1b) && /* Compaq Proliant boards */
++ (reg_01.bits.entries != 0x1f) && /* dual Xeon boards */
++ (reg_01.bits.entries != 0x22) && /* bigger Xeon boards */
++ (reg_01.bits.entries != 0x2E) &&
++ (reg_01.bits.entries != 0x3F)
++ )
++ UNEXPECTED_IO_APIC();
++
++ printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ);
++ printk(KERN_DEBUG "....... : IO APIC version: %04X\n", reg_01.bits.version);
++ if ( (reg_01.bits.version != 0x01) && /* 82489DX IO-APICs */
++ (reg_01.bits.version != 0x10) && /* oldest IO-APICs */
++ (reg_01.bits.version != 0x11) && /* Pentium/Pro IO-APICs */
++ (reg_01.bits.version != 0x13) && /* Xeon IO-APICs */
++ (reg_01.bits.version != 0x20) /* Intel P64H (82806 AA) */
++ )
++ UNEXPECTED_IO_APIC();
++ if (reg_01.bits.__reserved_1 || reg_01.bits.__reserved_2)
++ UNEXPECTED_IO_APIC();
++
++ /*
++ * Some Intel chipsets with IO APIC VERSION of 0x1? don't have reg_02,
++ * but the value of reg_02 is read as the previous read register
++ * value, so ignore it if reg_02 == reg_01.
++ */
++ if (reg_01.bits.version >= 0x10 && reg_02.raw != reg_01.raw) {
++ printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw);
++ printk(KERN_DEBUG "....... : arbitration: %02X\n", reg_02.bits.arbitration);
++ if (reg_02.bits.__reserved_1 || reg_02.bits.__reserved_2)
++ UNEXPECTED_IO_APIC();
++ }
++
++ /*
++ * Some Intel chipsets with IO APIC VERSION of 0x2? don't have reg_02
++ * or reg_03, but the value of reg_0[23] is read as the previous read
++ * register value, so ignore it if reg_03 == reg_0[12].
++ */
++ if (reg_01.bits.version >= 0x20 && reg_03.raw != reg_02.raw &&
++ reg_03.raw != reg_01.raw) {
++ printk(KERN_DEBUG ".... register #03: %08X\n", reg_03.raw);
++ printk(KERN_DEBUG "....... : Boot DT : %X\n", reg_03.bits.boot_DT);
++ if (reg_03.bits.__reserved_1)
++ UNEXPECTED_IO_APIC();
++ }
++
++ printk(KERN_DEBUG ".... IRQ redirection table:\n");
++
++ printk(KERN_DEBUG " NR Log Phy Mask Trig IRR Pol"
++ " Stat Dest Deli Vect: \n");
++
++ for (i = 0; i <= reg_01.bits.entries; i++) {
++ struct IO_APIC_route_entry entry;
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ *(((int *)&entry)+0) = io_apic_read(apic, 0x10+i*2);
++ *(((int *)&entry)+1) = io_apic_read(apic, 0x11+i*2);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ printk(KERN_DEBUG " %02x %03X %02X ",
++ i,
++ entry.dest.logical.logical_dest,
++ entry.dest.physical.physical_dest
++ );
++
++ printk("%1d %1d %1d %1d %1d %1d %1d %02X\n",
++ entry.mask,
++ entry.trigger,
++ entry.irr,
++ entry.polarity,
++ entry.delivery_status,
++ entry.dest_mode,
++ entry.delivery_mode,
++ entry.vector
++ );
++ }
++ }
++ if (use_pci_vector())
++ printk(KERN_INFO "Using vector-based indexing\n");
++ printk(KERN_DEBUG "IRQ to pin mappings:\n");
++ for (i = 0; i < NR_IRQS; i++) {
++ struct irq_pin_list *entry = irq_2_pin + i;
++ if (entry->pin < 0)
++ continue;
++ if (use_pci_vector() && !platform_legacy_irq(i))
++ printk(KERN_DEBUG "IRQ%d ", IO_APIC_VECTOR(i));
++ else
++ printk(KERN_DEBUG "IRQ%d ", i);
++ for (;;) {
++ printk("-> %d:%d", entry->apic, entry->pin);
++ if (!entry->next)
++ break;
++ entry = irq_2_pin + entry->next;
++ }
++ printk("\n");
++ }
++
++ printk(KERN_INFO ".................................... done.\n");
++
++ return;
++}
++
++#if 0
++
++static void print_APIC_bitfield (int base)
++{
++ unsigned int v;
++ int i, j;
++
++ if (apic_verbosity == APIC_QUIET)
++ return;
++
++ printk(KERN_DEBUG "0123456789abcdef0123456789abcdef\n" KERN_DEBUG);
++ for (i = 0; i < 8; i++) {
++ v = apic_read(base + i*0x10);
++ for (j = 0; j < 32; j++) {
++ if (v & (1<<j))
++ printk("1");
++ else
++ printk("0");
++ }
++ printk("\n");
++ }
++}
++
++void /*__init*/ print_local_APIC(void * dummy)
++{
++ unsigned int v, ver, maxlvt;
++
++ if (apic_verbosity == APIC_QUIET)
++ return;
++
++ printk("\n" KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n",
++ smp_processor_id(), hard_smp_processor_id());
++ v = apic_read(APIC_ID);
++ printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, GET_APIC_ID(v));
++ v = apic_read(APIC_LVR);
++ printk(KERN_INFO "... APIC VERSION: %08x\n", v);
++ ver = GET_APIC_VERSION(v);
++ maxlvt = get_maxlvt();
++
++ v = apic_read(APIC_TASKPRI);
++ printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
++
++ if (APIC_INTEGRATED(ver)) { /* !82489DX */
++ v = apic_read(APIC_ARBPRI);
++ printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v,
++ v & APIC_ARBPRI_MASK);
++ v = apic_read(APIC_PROCPRI);
++ printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v);
++ }
++
++ v = apic_read(APIC_EOI);
++ printk(KERN_DEBUG "... APIC EOI: %08x\n", v);
++ v = apic_read(APIC_RRR);
++ printk(KERN_DEBUG "... APIC RRR: %08x\n", v);
++ v = apic_read(APIC_LDR);
++ printk(KERN_DEBUG "... APIC LDR: %08x\n", v);
++ v = apic_read(APIC_DFR);
++ printk(KERN_DEBUG "... APIC DFR: %08x\n", v);
++ v = apic_read(APIC_SPIV);
++ printk(KERN_DEBUG "... APIC SPIV: %08x\n", v);
++
++ printk(KERN_DEBUG "... APIC ISR field:\n");
++ print_APIC_bitfield(APIC_ISR);
++ printk(KERN_DEBUG "... APIC TMR field:\n");
++ print_APIC_bitfield(APIC_TMR);
++ printk(KERN_DEBUG "... APIC IRR field:\n");
++ print_APIC_bitfield(APIC_IRR);
++
++ if (APIC_INTEGRATED(ver)) { /* !82489DX */
++ if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
++ apic_write(APIC_ESR, 0);
++ v = apic_read(APIC_ESR);
++ printk(KERN_DEBUG "... APIC ESR: %08x\n", v);
++ }
++
++ v = apic_read(APIC_ICR);
++ printk(KERN_DEBUG "... APIC ICR: %08x\n", v);
++ v = apic_read(APIC_ICR2);
++ printk(KERN_DEBUG "... APIC ICR2: %08x\n", v);
++
++ v = apic_read(APIC_LVTT);
++ printk(KERN_DEBUG "... APIC LVTT: %08x\n", v);
++
++ if (maxlvt > 3) { /* PC is LVT#4. */
++ v = apic_read(APIC_LVTPC);
++ printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v);
++ }
++ v = apic_read(APIC_LVT0);
++ printk(KERN_DEBUG "... APIC LVT0: %08x\n", v);
++ v = apic_read(APIC_LVT1);
++ printk(KERN_DEBUG "... APIC LVT1: %08x\n", v);
++
++ if (maxlvt > 2) { /* ERR is LVT#3. */
++ v = apic_read(APIC_LVTERR);
++ printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v);
++ }
++
++ v = apic_read(APIC_TMICT);
++ printk(KERN_DEBUG "... APIC TMICT: %08x\n", v);
++ v = apic_read(APIC_TMCCT);
++ printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v);
++ v = apic_read(APIC_TDCR);
++ printk(KERN_DEBUG "... APIC TDCR: %08x\n", v);
++ printk("\n");
++}
++
++void print_all_local_APICs (void)
++{
++ on_each_cpu(print_local_APIC, NULL, 1, 1);
++}
++
++void /*__init*/ print_PIC(void)
++{
++ unsigned int v;
++ unsigned long flags;
++
++ if (apic_verbosity == APIC_QUIET)
++ return;
++
++ printk(KERN_DEBUG "\nprinting PIC contents\n");
++
++ spin_lock_irqsave(&i8259A_lock, flags);
++
++ v = inb(0xa1) << 8 | inb(0x21);
++ printk(KERN_DEBUG "... PIC IMR: %04x\n", v);
++
++ v = inb(0xa0) << 8 | inb(0x20);
++ printk(KERN_DEBUG "... PIC IRR: %04x\n", v);
++
++ outb(0x0b,0xa0);
++ outb(0x0b,0x20);
++ v = inb(0xa0) << 8 | inb(0x20);
++ outb(0x0a,0xa0);
++ outb(0x0a,0x20);
++
++ spin_unlock_irqrestore(&i8259A_lock, flags);
++
++ printk(KERN_DEBUG "... PIC ISR: %04x\n", v);
++
++ v = inb(0x4d1) << 8 | inb(0x4d0);
++ printk(KERN_DEBUG "... PIC ELCR: %04x\n", v);
++}
++
++#endif /* 0 */
++
++#else
++void __init print_IO_APIC(void) { }
++#endif /* !CONFIG_XEN */
++
++static void __init enable_IO_APIC(void)
++{
++ union IO_APIC_reg_01 reg_01;
++ int i8259_apic, i8259_pin;
++ int i, apic;
++ unsigned long flags;
++
++ for (i = 0; i < PIN_MAP_SIZE; i++) {
++ irq_2_pin[i].pin = -1;
++ irq_2_pin[i].next = 0;
++ }
++ if (!pirqs_enabled)
++ for (i = 0; i < MAX_PIRQS; i++)
++ pirq_entries[i] = -1;
++
++ /*
++ * The number of IO-APIC IRQ registers (== #pins):
++ */
++ for (apic = 0; apic < nr_ioapics; apic++) {
++ spin_lock_irqsave(&ioapic_lock, flags);
++ reg_01.raw = io_apic_read(apic, 1);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++ nr_ioapic_registers[apic] = reg_01.bits.entries+1;
++ }
++ for(apic = 0; apic < nr_ioapics; apic++) {
++ int pin;
++ /* See if any of the pins is in ExtINT mode */
++ for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
++ struct IO_APIC_route_entry entry;
++ spin_lock_irqsave(&ioapic_lock, flags);
++ *(((int *)&entry) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
++ *(((int *)&entry) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++
++ /* If the interrupt line is enabled and in ExtInt mode
++ * I have found the pin where the i8259 is connected.
++ */
++ if ((entry.mask == 0) && (entry.delivery_mode == dest_ExtINT)) {
++ ioapic_i8259.apic = apic;
++ ioapic_i8259.pin = pin;
++ goto found_i8259;
++ }
++ }
++ }
++ found_i8259:
++ /* Look to see what if the MP table has reported the ExtINT */
++ /* If we could not find the appropriate pin by looking at the ioapic
++ * the i8259 probably is not connected the ioapic but give the
++ * mptable a chance anyway.
++ */
++ i8259_pin = find_isa_irq_pin(0, mp_ExtINT);
++ i8259_apic = find_isa_irq_apic(0, mp_ExtINT);
++ /* Trust the MP table if nothing is setup in the hardware */
++ if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) {
++ printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n");
++ ioapic_i8259.pin = i8259_pin;
++ ioapic_i8259.apic = i8259_apic;
++ }
++ /* Complain if the MP table and the hardware disagree */
++ if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) &&
++ (i8259_pin >= 0) && (ioapic_i8259.pin >= 0))
++ {
++ printk(KERN_WARNING "ExtINT in hardware and MP table differ\n");
++ }
++
++ /*
++ * Do not trust the IO-APIC being empty at bootup
++ */
++ clear_IO_APIC();
++}
++
++/*
++ * Not an __init, needed by the reboot code
++ */
++void disable_IO_APIC(void)
++{
++ /*
++ * Clear the IO-APIC before rebooting:
++ */
++ clear_IO_APIC();
++
++#ifndef CONFIG_XEN
++ /*
++ * If the i8259 is routed through an IOAPIC
++ * Put that IOAPIC in virtual wire mode
++ * so legacy interrupts can be delivered.
++ */
++ if (ioapic_i8259.pin != -1) {
++ struct IO_APIC_route_entry entry;
++ unsigned long flags;
++
++ memset(&entry, 0, sizeof(entry));
++ entry.mask = 0; /* Enabled */
++ entry.trigger = 0; /* Edge */
++ entry.irr = 0;
++ entry.polarity = 0; /* High */
++ entry.delivery_status = 0;
++ entry.dest_mode = 0; /* Physical */
++ entry.delivery_mode = dest_ExtINT; /* ExtInt */
++ entry.vector = 0;
++ entry.dest.physical.physical_dest =
++ GET_APIC_ID(apic_read(APIC_ID));
++
++ /*
++ * Add it to the IO-APIC irq-routing table:
++ */
++ spin_lock_irqsave(&ioapic_lock, flags);
++ io_apic_write(ioapic_i8259.apic, 0x11+2*ioapic_i8259.pin,
++ *(((int *)&entry)+1));
++ io_apic_write(ioapic_i8259.apic, 0x10+2*ioapic_i8259.pin,
++ *(((int *)&entry)+0));
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++ }
++ disconnect_bsp_APIC(ioapic_i8259.pin != -1);
++#endif
++}
++
++/*
++ * function to set the IO-APIC physical IDs based on the
++ * values stored in the MPC table.
++ *
++ * by Matt Domsch <Matt_Domsch@dell.com> Tue Dec 21 12:25:05 CST 1999
++ */
++
++#if !defined(CONFIG_XEN) && !defined(CONFIG_X86_NUMAQ)
++static void __init setup_ioapic_ids_from_mpc(void)
++{
++ union IO_APIC_reg_00 reg_00;
++ physid_mask_t phys_id_present_map;
++ int apic;
++ int i;
++ unsigned char old_id;
++ unsigned long flags;
++
++ /*
++ * Don't check I/O APIC IDs for xAPIC systems. They have
++ * no meaning without the serial APIC bus.
++ */
++ if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
++ || APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
++ return;
++ /*
++ * This is broken; anything with a real cpu count has to
++ * circumvent this idiocy regardless.
++ */
++ phys_id_present_map = ioapic_phys_id_map(phys_cpu_present_map);
++
++ /*
++ * Set the IOAPIC ID to the value stored in the MPC table.
++ */
++ for (apic = 0; apic < nr_ioapics; apic++) {
++
++ /* Read the register 0 value */
++ spin_lock_irqsave(&ioapic_lock, flags);
++ reg_00.raw = io_apic_read(apic, 0);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ old_id = mp_ioapics[apic].mpc_apicid;
++
++ if (mp_ioapics[apic].mpc_apicid >= get_physical_broadcast()) {
++ printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n",
++ apic, mp_ioapics[apic].mpc_apicid);
++ printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
++ reg_00.bits.ID);
++ mp_ioapics[apic].mpc_apicid = reg_00.bits.ID;
++ }
++
++ /*
++ * Sanity check, is the ID really free? Every APIC in a
++ * system must have a unique ID or we get lots of nice
++ * 'stuck on smp_invalidate_needed IPI wait' messages.
++ */
++ if (check_apicid_used(phys_id_present_map,
++ mp_ioapics[apic].mpc_apicid)) {
++ printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n",
++ apic, mp_ioapics[apic].mpc_apicid);
++ for (i = 0; i < get_physical_broadcast(); i++)
++ if (!physid_isset(i, phys_id_present_map))
++ break;
++ if (i >= get_physical_broadcast())
++ panic("Max APIC ID exceeded!\n");
++ printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
++ i);
++ physid_set(i, phys_id_present_map);
++ mp_ioapics[apic].mpc_apicid = i;
++ } else {
++ physid_mask_t tmp;
++ tmp = apicid_to_cpu_present(mp_ioapics[apic].mpc_apicid);
++ apic_printk(APIC_VERBOSE, "Setting %d in the "
++ "phys_id_present_map\n",
++ mp_ioapics[apic].mpc_apicid);
++ physids_or(phys_id_present_map, phys_id_present_map, tmp);
++ }
++
++
++ /*
++ * We need to adjust the IRQ routing table
++ * if the ID changed.
++ */
++ if (old_id != mp_ioapics[apic].mpc_apicid)
++ for (i = 0; i < mp_irq_entries; i++)
++ if (mp_irqs[i].mpc_dstapic == old_id)
++ mp_irqs[i].mpc_dstapic
++ = mp_ioapics[apic].mpc_apicid;
++
++ /*
++ * Read the right value from the MPC table and
++ * write it into the ID register.
++ */
++ apic_printk(APIC_VERBOSE, KERN_INFO
++ "...changing IO-APIC physical APIC ID to %d ...",
++ mp_ioapics[apic].mpc_apicid);
++
++ reg_00.bits.ID = mp_ioapics[apic].mpc_apicid;
++ spin_lock_irqsave(&ioapic_lock, flags);
++ io_apic_write(apic, 0, reg_00.raw);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ /*
++ * Sanity check
++ */
++ spin_lock_irqsave(&ioapic_lock, flags);
++ reg_00.raw = io_apic_read(apic, 0);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++ if (reg_00.bits.ID != mp_ioapics[apic].mpc_apicid)
++ printk("could not set ID!\n");
++ else
++ apic_printk(APIC_VERBOSE, " ok.\n");
++ }
++}
++#else
++static void __init setup_ioapic_ids_from_mpc(void) { }
++#endif
++
++#ifndef CONFIG_XEN
++/*
++ * There is a nasty bug in some older SMP boards, their mptable lies
++ * about the timer IRQ. We do the following to work around the situation:
++ *
++ * - timer IRQ defaults to IO-APIC IRQ
++ * - if this function detects that timer IRQs are defunct, then we fall
++ * back to ISA timer IRQs
++ */
++static int __init timer_irq_works(void)
++{
++ unsigned long t1 = jiffies;
++
++ local_irq_enable();
++ /* Let ten ticks pass... */
++ mdelay((10 * 1000) / HZ);
++
++ /*
++ * Expect a few ticks at least, to be sure some possible
++ * glue logic does not lock up after one or two first
++ * ticks in a non-ExtINT mode. Also the local APIC
++ * might have cached one ExtINT interrupt. Finally, at
++ * least one tick may be lost due to delays.
++ */
++ if (jiffies - t1 > 4)
++ return 1;
++
++ return 0;
++}
++
++/*
++ * In the SMP+IOAPIC case it might happen that there are an unspecified
++ * number of pending IRQ events unhandled. These cases are very rare,
++ * so we 'resend' these IRQs via IPIs, to the same CPU. It's much
++ * better to do it this way as thus we do not have to be aware of
++ * 'pending' interrupts in the IRQ path, except at this point.
++ */
++/*
++ * Edge triggered needs to resend any interrupt
++ * that was delayed but this is now handled in the device
++ * independent code.
++ */
++
++/*
++ * Starting up a edge-triggered IO-APIC interrupt is
++ * nasty - we need to make sure that we get the edge.
++ * If it is already asserted for some reason, we need
++ * return 1 to indicate that is was pending.
++ *
++ * This is not complete - we should be able to fake
++ * an edge even if it isn't on the 8259A...
++ */
++static unsigned int startup_edge_ioapic_irq(unsigned int irq)
++{
++ int was_pending = 0;
++ unsigned long flags;
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ if (irq < 16) {
++ disable_8259A_irq(irq);
++ if (i8259A_irq_pending(irq))
++ was_pending = 1;
++ }
++ __unmask_IO_APIC_irq(irq);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ return was_pending;
++}
++
++/*
++ * Once we have recorded IRQ_PENDING already, we can mask the
++ * interrupt for real. This prevents IRQ storms from unhandled
++ * devices.
++ */
++static void ack_edge_ioapic_irq(unsigned int irq)
++{
++ move_irq(irq);
++ if ((irq_desc[irq].status & (IRQ_PENDING | IRQ_DISABLED))
++ == (IRQ_PENDING | IRQ_DISABLED))
++ mask_IO_APIC_irq(irq);
++ ack_APIC_irq();
++}
++
++/*
++ * Level triggered interrupts can just be masked,
++ * and shutting down and starting up the interrupt
++ * is the same as enabling and disabling them -- except
++ * with a startup need to return a "was pending" value.
++ *
++ * Level triggered interrupts are special because we
++ * do not touch any IO-APIC register while handling
++ * them. We ack the APIC in the end-IRQ handler, not
++ * in the start-IRQ-handler. Protection against reentrance
++ * from the same interrupt is still provided, both by the
++ * generic IRQ layer and by the fact that an unacked local
++ * APIC does not accept IRQs.
++ */
++static unsigned int startup_level_ioapic_irq (unsigned int irq)
++{
++ unmask_IO_APIC_irq(irq);
++
++ return 0; /* don't check for pending */
++}
++
++static void end_level_ioapic_irq (unsigned int irq)
++{
++ unsigned long v;
++ int i;
++
++ move_irq(irq);
++/*
++ * It appears there is an erratum which affects at least version 0x11
++ * of I/O APIC (that's the 82093AA and cores integrated into various
++ * chipsets). Under certain conditions a level-triggered interrupt is
++ * erroneously delivered as edge-triggered one but the respective IRR
++ * bit gets set nevertheless. As a result the I/O unit expects an EOI
++ * message but it will never arrive and further interrupts are blocked
++ * from the source. The exact reason is so far unknown, but the
++ * phenomenon was observed when two consecutive interrupt requests
++ * from a given source get delivered to the same CPU and the source is
++ * temporarily disabled in between.
++ *
++ * A workaround is to simulate an EOI message manually. We achieve it
++ * by setting the trigger mode to edge and then to level when the edge
++ * trigger mode gets detected in the TMR of a local APIC for a
++ * level-triggered interrupt. We mask the source for the time of the
++ * operation to prevent an edge-triggered interrupt escaping meanwhile.
++ * The idea is from Manfred Spraul. --macro
++ */
++ i = IO_APIC_VECTOR(irq);
++
++ v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1));
++
++ ack_APIC_irq();
++
++ if (!(v & (1 << (i & 0x1f)))) {
++ atomic_inc(&irq_mis_count);
++ spin_lock(&ioapic_lock);
++ __mask_and_edge_IO_APIC_irq(irq);
++ __unmask_and_level_IO_APIC_irq(irq);
++ spin_unlock(&ioapic_lock);
++ }
++}
++
++#ifdef CONFIG_PCI_MSI
++static unsigned int startup_edge_ioapic_vector(unsigned int vector)
++{
++ int irq = vector_to_irq(vector);
++
++ return startup_edge_ioapic_irq(irq);
++}
++
++static void ack_edge_ioapic_vector(unsigned int vector)
++{
++ int irq = vector_to_irq(vector);
++
++ move_native_irq(vector);
++ ack_edge_ioapic_irq(irq);
++}
++
++static unsigned int startup_level_ioapic_vector (unsigned int vector)
++{
++ int irq = vector_to_irq(vector);
++
++ return startup_level_ioapic_irq (irq);
++}
++
++static void end_level_ioapic_vector (unsigned int vector)
++{
++ int irq = vector_to_irq(vector);
++
++ move_native_irq(vector);
++ end_level_ioapic_irq(irq);
++}
++
++static void mask_IO_APIC_vector (unsigned int vector)
++{
++ int irq = vector_to_irq(vector);
++
++ mask_IO_APIC_irq(irq);
++}
++
++static void unmask_IO_APIC_vector (unsigned int vector)
++{
++ int irq = vector_to_irq(vector);
++
++ unmask_IO_APIC_irq(irq);
++}
++
++#ifdef CONFIG_SMP
++static void set_ioapic_affinity_vector (unsigned int vector,
++ cpumask_t cpu_mask)
++{
++ int irq = vector_to_irq(vector);
++
++ set_native_irq_info(vector, cpu_mask);
++ set_ioapic_affinity_irq(irq, cpu_mask);
++}
++#endif
++#endif
++
++static int ioapic_retrigger(unsigned int irq)
++{
++ send_IPI_self(IO_APIC_VECTOR(irq));
++
++ return 1;
++}
++
++/*
++ * Level and edge triggered IO-APIC interrupts need different handling,
++ * so we use two separate IRQ descriptors. Edge triggered IRQs can be
++ * handled with the level-triggered descriptor, but that one has slightly
++ * more overhead. Level-triggered interrupts cannot be handled with the
++ * edge-triggered handler, without risking IRQ storms and other ugly
++ * races.
++ */
++static struct hw_interrupt_type ioapic_edge_type __read_mostly = {
++ .typename = "IO-APIC-edge",
++ .startup = startup_edge_ioapic,
++ .shutdown = shutdown_edge_ioapic,
++ .enable = enable_edge_ioapic,
++ .disable = disable_edge_ioapic,
++ .ack = ack_edge_ioapic,
++ .end = end_edge_ioapic,
++#ifdef CONFIG_SMP
++ .set_affinity = set_ioapic_affinity,
++#endif
++ .retrigger = ioapic_retrigger,
++};
++
++static struct hw_interrupt_type ioapic_level_type __read_mostly = {
++ .typename = "IO-APIC-level",
++ .startup = startup_level_ioapic,
++ .shutdown = shutdown_level_ioapic,
++ .enable = enable_level_ioapic,
++ .disable = disable_level_ioapic,
++ .ack = mask_and_ack_level_ioapic,
++ .end = end_level_ioapic,
++#ifdef CONFIG_SMP
++ .set_affinity = set_ioapic_affinity,
++#endif
++ .retrigger = ioapic_retrigger,
++};
++#endif /* !CONFIG_XEN */
++
++static inline void init_IO_APIC_traps(void)
++{
++ int irq;
++
++ /*
++ * NOTE! The local APIC isn't very good at handling
++ * multiple interrupts at the same interrupt level.
++ * As the interrupt level is determined by taking the
++ * vector number and shifting that right by 4, we
++ * want to spread these out a bit so that they don't
++ * all fall in the same interrupt level.
++ *
++ * Also, we've got to be careful not to trash gate
++ * 0x80, because int 0x80 is hm, kind of importantish. ;)
++ */
++ for (irq = 0; irq < NR_IRQS ; irq++) {
++ int tmp = irq;
++ if (use_pci_vector()) {
++ if (!platform_legacy_irq(tmp))
++ if ((tmp = vector_to_irq(tmp)) == -1)
++ continue;
++ }
++ if (IO_APIC_IRQ(tmp) && !IO_APIC_VECTOR(tmp)) {
++ /*
++ * Hmm.. We don't have an entry for this,
++ * so default to an old-fashioned 8259
++ * interrupt if we can..
++ */
++ if (irq < 16)
++ make_8259A_irq(irq);
++#ifndef CONFIG_XEN
++ else
++ /* Strange. Oh, well.. */
++ irq_desc[irq].chip = &no_irq_type;
++#endif
++ }
++ }
++}
++
++#ifndef CONFIG_XEN
++static void enable_lapic_irq (unsigned int irq)
++{
++ unsigned long v;
++
++ v = apic_read(APIC_LVT0);
++ apic_write_around(APIC_LVT0, v & ~APIC_LVT_MASKED);
++}
++
++static void disable_lapic_irq (unsigned int irq)
++{
++ unsigned long v;
++
++ v = apic_read(APIC_LVT0);
++ apic_write_around(APIC_LVT0, v | APIC_LVT_MASKED);
++}
++
++static void ack_lapic_irq (unsigned int irq)
++{
++ ack_APIC_irq();
++}
++
++static void end_lapic_irq (unsigned int i) { /* nothing */ }
++
++static struct hw_interrupt_type lapic_irq_type __read_mostly = {
++ .typename = "local-APIC-edge",
++ .startup = NULL, /* startup_irq() not used for IRQ0 */
++ .shutdown = NULL, /* shutdown_irq() not used for IRQ0 */
++ .enable = enable_lapic_irq,
++ .disable = disable_lapic_irq,
++ .ack = ack_lapic_irq,
++ .end = end_lapic_irq
++};
++
++static void setup_nmi (void)
++{
++ /*
++ * Dirty trick to enable the NMI watchdog ...
++ * We put the 8259A master into AEOI mode and
++ * unmask on all local APICs LVT0 as NMI.
++ *
++ * The idea to use the 8259A in AEOI mode ('8259A Virtual Wire')
++ * is from Maciej W. Rozycki - so we do not have to EOI from
++ * the NMI handler or the timer interrupt.
++ */
++ apic_printk(APIC_VERBOSE, KERN_INFO "activating NMI Watchdog ...");
++
++ on_each_cpu(enable_NMI_through_LVT0, NULL, 1, 1);
++
++ apic_printk(APIC_VERBOSE, " done.\n");
++}
++
++/*
++ * This looks a bit hackish but it's about the only one way of sending
++ * a few INTA cycles to 8259As and any associated glue logic. ICR does
++ * not support the ExtINT mode, unfortunately. We need to send these
++ * cycles as some i82489DX-based boards have glue logic that keeps the
++ * 8259A interrupt line asserted until INTA. --macro
++ */
++static inline void unlock_ExtINT_logic(void)
++{
++ int apic, pin, i;
++ struct IO_APIC_route_entry entry0, entry1;
++ unsigned char save_control, save_freq_select;
++ unsigned long flags;
++
++ pin = find_isa_irq_pin(8, mp_INT);
++ apic = find_isa_irq_apic(8, mp_INT);
++ if (pin == -1)
++ return;
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ *(((int *)&entry0) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
++ *(((int *)&entry0) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++ clear_IO_APIC_pin(apic, pin);
++
++ memset(&entry1, 0, sizeof(entry1));
++
++ entry1.dest_mode = 0; /* physical delivery */
++ entry1.mask = 0; /* unmask IRQ now */
++ entry1.dest.physical.physical_dest = hard_smp_processor_id();
++ entry1.delivery_mode = dest_ExtINT;
++ entry1.polarity = entry0.polarity;
++ entry1.trigger = 0;
++ entry1.vector = 0;
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry1) + 1));
++ io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry1) + 0));
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ save_control = CMOS_READ(RTC_CONTROL);
++ save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
++ CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6,
++ RTC_FREQ_SELECT);
++ CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL);
++
++ i = 100;
++ while (i-- > 0) {
++ mdelay(10);
++ if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF)
++ i -= 10;
++ }
++
++ CMOS_WRITE(save_control, RTC_CONTROL);
++ CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
++ clear_IO_APIC_pin(apic, pin);
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry0) + 1));
++ io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry0) + 0));
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++}
++
++int timer_uses_ioapic_pin_0;
++
++/*
++ * This code may look a bit paranoid, but it's supposed to cooperate with
++ * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ
++ * is so screwy. Thanks to Brian Perkins for testing/hacking this beast
++ * fanatically on his truly buggy board.
++ */
++static inline void check_timer(void)
++{
++ int apic1, pin1, apic2, pin2;
++ int vector;
++
++ /*
++ * get/set the timer IRQ vector:
++ */
++ disable_8259A_irq(0);
++ vector = assign_irq_vector(0);
++ set_intr_gate(vector, interrupt[0]);
++
++ /*
++ * Subtle, code in do_timer_interrupt() expects an AEOI
++ * mode for the 8259A whenever interrupts are routed
++ * through I/O APICs. Also IRQ0 has to be enabled in
++ * the 8259A which implies the virtual wire has to be
++ * disabled in the local APIC.
++ */
++ apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
++ init_8259A(1);
++ timer_ack = 1;
++ if (timer_over_8254 > 0)
++ enable_8259A_irq(0);
++
++ pin1 = find_isa_irq_pin(0, mp_INT);
++ apic1 = find_isa_irq_apic(0, mp_INT);
++ pin2 = ioapic_i8259.pin;
++ apic2 = ioapic_i8259.apic;
++
++ if (pin1 == 0)
++ timer_uses_ioapic_pin_0 = 1;
++
++ printk(KERN_INFO "..TIMER: vector=0x%02X apic1=%d pin1=%d apic2=%d pin2=%d\n",
++ vector, apic1, pin1, apic2, pin2);
++
++ if (pin1 != -1) {
++ /*
++ * Ok, does IRQ0 through the IOAPIC work?
++ */
++ unmask_IO_APIC_irq(0);
++ if (timer_irq_works()) {
++ if (nmi_watchdog == NMI_IO_APIC) {
++ disable_8259A_irq(0);
++ setup_nmi();
++ enable_8259A_irq(0);
++ }
++ if (disable_timer_pin_1 > 0)
++ clear_IO_APIC_pin(0, pin1);
++ return;
++ }
++ clear_IO_APIC_pin(apic1, pin1);
++ printk(KERN_ERR "..MP-BIOS bug: 8254 timer not connected to "
++ "IO-APIC\n");
++ }
++
++ printk(KERN_INFO "...trying to set up timer (IRQ0) through the 8259A ... ");
++ if (pin2 != -1) {
++ printk("\n..... (found pin %d) ...", pin2);
++ /*
++ * legacy devices should be connected to IO APIC #0
++ */
++ setup_ExtINT_IRQ0_pin(apic2, pin2, vector);
++ if (timer_irq_works()) {
++ printk("works.\n");
++ if (pin1 != -1)
++ replace_pin_at_irq(0, apic1, pin1, apic2, pin2);
++ else
++ add_pin_to_irq(0, apic2, pin2);
++ if (nmi_watchdog == NMI_IO_APIC) {
++ setup_nmi();
++ }
++ return;
++ }
++ /*
++ * Cleanup, just in case ...
++ */
++ clear_IO_APIC_pin(apic2, pin2);
++ }
++ printk(" failed.\n");
++
++ if (nmi_watchdog == NMI_IO_APIC) {
++ printk(KERN_WARNING "timer doesn't work through the IO-APIC - disabling NMI Watchdog!\n");
++ nmi_watchdog = 0;
++ }
++
++ printk(KERN_INFO "...trying to set up timer as Virtual Wire IRQ...");
++
++ disable_8259A_irq(0);
++ irq_desc[0].chip = &lapic_irq_type;
++ apic_write_around(APIC_LVT0, APIC_DM_FIXED | vector); /* Fixed mode */
++ enable_8259A_irq(0);
++
++ if (timer_irq_works()) {
++ printk(" works.\n");
++ return;
++ }
++ apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | vector);
++ printk(" failed.\n");
++
++ printk(KERN_INFO "...trying to set up timer as ExtINT IRQ...");
++
++ timer_ack = 0;
++ init_8259A(0);
++ make_8259A_irq(0);
++ apic_write_around(APIC_LVT0, APIC_DM_EXTINT);
++
++ unlock_ExtINT_logic();
++
++ if (timer_irq_works()) {
++ printk(" works.\n");
++ return;
++ }
++ printk(" failed :(.\n");
++ panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a "
++ "report. Then try booting with the 'noapic' option");
++}
++#else
++int timer_uses_ioapic_pin_0 = 0;
++#define check_timer() ((void)0)
++#endif
++
++/*
++ *
++ * IRQ's that are handled by the PIC in the MPS IOAPIC case.
++ * - IRQ2 is the cascade IRQ, and cannot be a io-apic IRQ.
++ * Linux doesn't really care, as it's not actually used
++ * for any interrupt handling anyway.
++ */
++#define PIC_IRQS (1 << PIC_CASCADE_IR)
++
++void __init setup_IO_APIC(void)
++{
++ enable_IO_APIC();
++
++ if (acpi_ioapic)
++ io_apic_irqs = ~0; /* all IRQs go through IOAPIC */
++ else
++ io_apic_irqs = ~PIC_IRQS;
++
++ printk("ENABLING IO-APIC IRQs\n");
++
++ /*
++ * Set up IO-APIC IRQ routing.
++ */
++ if (!acpi_ioapic)
++ setup_ioapic_ids_from_mpc();
++#ifndef CONFIG_XEN
++ sync_Arb_IDs();
++#endif
++ setup_IO_APIC_irqs();
++ init_IO_APIC_traps();
++ check_timer();
++ if (!acpi_ioapic)
++ print_IO_APIC();
++}
++
++static int __init setup_disable_8254_timer(char *s)
++{
++ timer_over_8254 = -1;
++ return 1;
++}
++static int __init setup_enable_8254_timer(char *s)
++{
++ timer_over_8254 = 2;
++ return 1;
++}
++
++__setup("disable_8254_timer", setup_disable_8254_timer);
++__setup("enable_8254_timer", setup_enable_8254_timer);
++
++/*
++ * Called after all the initialization is done. If we didnt find any
++ * APIC bugs then we can allow the modify fast path
++ */
++
++static int __init io_apic_bug_finalize(void)
++{
++ if(sis_apic_bug == -1)
++ sis_apic_bug = 0;
++ if (is_initial_xendomain()) {
++ struct xen_platform_op op = { .cmd = XENPF_platform_quirk };
++ op.u.platform_quirk.quirk_id = sis_apic_bug ?
++ QUIRK_IOAPIC_BAD_REGSEL : QUIRK_IOAPIC_GOOD_REGSEL;
++ HYPERVISOR_platform_op(&op);
++ }
++ return 0;
++}
++
++late_initcall(io_apic_bug_finalize);
++
++struct sysfs_ioapic_data {
++ struct sys_device dev;
++ struct IO_APIC_route_entry entry[0];
++};
++static struct sysfs_ioapic_data * mp_ioapic_data[MAX_IO_APICS];
++
++static int ioapic_suspend(struct sys_device *dev, pm_message_t state)
++{
++ struct IO_APIC_route_entry *entry;
++ struct sysfs_ioapic_data *data;
++ unsigned long flags;
++ int i;
++
++ data = container_of(dev, struct sysfs_ioapic_data, dev);
++ entry = data->entry;
++ spin_lock_irqsave(&ioapic_lock, flags);
++ for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ ) {
++ *(((int *)entry) + 1) = io_apic_read(dev->id, 0x11 + 2 * i);
++ *(((int *)entry) + 0) = io_apic_read(dev->id, 0x10 + 2 * i);
++ }
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ return 0;
++}
++
++static int ioapic_resume(struct sys_device *dev)
++{
++ struct IO_APIC_route_entry *entry;
++ struct sysfs_ioapic_data *data;
++ unsigned long flags;
++ union IO_APIC_reg_00 reg_00;
++ int i;
++
++ data = container_of(dev, struct sysfs_ioapic_data, dev);
++ entry = data->entry;
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ reg_00.raw = io_apic_read(dev->id, 0);
++ if (reg_00.bits.ID != mp_ioapics[dev->id].mpc_apicid) {
++ reg_00.bits.ID = mp_ioapics[dev->id].mpc_apicid;
++ io_apic_write(dev->id, 0, reg_00.raw);
++ }
++ for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ ) {
++ io_apic_write(dev->id, 0x11+2*i, *(((int *)entry)+1));
++ io_apic_write(dev->id, 0x10+2*i, *(((int *)entry)+0));
++ }
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ return 0;
++}
++
++static struct sysdev_class ioapic_sysdev_class = {
++ set_kset_name("ioapic"),
++ .suspend = ioapic_suspend,
++ .resume = ioapic_resume,
++};
++
++static int __init ioapic_init_sysfs(void)
++{
++ struct sys_device * dev;
++ int i, size, error = 0;
++
++ error = sysdev_class_register(&ioapic_sysdev_class);
++ if (error)
++ return error;
++
++ for (i = 0; i < nr_ioapics; i++ ) {
++ size = sizeof(struct sys_device) + nr_ioapic_registers[i]
++ * sizeof(struct IO_APIC_route_entry);
++ mp_ioapic_data[i] = kmalloc(size, GFP_KERNEL);
++ if (!mp_ioapic_data[i]) {
++ printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
++ continue;
++ }
++ memset(mp_ioapic_data[i], 0, size);
++ dev = &mp_ioapic_data[i]->dev;
++ dev->id = i;
++ dev->cls = &ioapic_sysdev_class;
++ error = sysdev_register(dev);
++ if (error) {
++ kfree(mp_ioapic_data[i]);
++ mp_ioapic_data[i] = NULL;
++ printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
++ continue;
++ }
++ }
++
++ return 0;
++}
++
++device_initcall(ioapic_init_sysfs);
++
++/* --------------------------------------------------------------------------
++ ACPI-based IOAPIC Configuration
++ -------------------------------------------------------------------------- */
++
++#ifdef CONFIG_ACPI
++
++int __init io_apic_get_unique_id (int ioapic, int apic_id)
++{
++#ifndef CONFIG_XEN
++ union IO_APIC_reg_00 reg_00;
++ static physid_mask_t apic_id_map = PHYSID_MASK_NONE;
++ physid_mask_t tmp;
++ unsigned long flags;
++ int i = 0;
++
++ /*
++ * The P4 platform supports up to 256 APIC IDs on two separate APIC
++ * buses (one for LAPICs, one for IOAPICs), where predecessors only
++ * supports up to 16 on one shared APIC bus.
++ *
++ * TBD: Expand LAPIC/IOAPIC support on P4-class systems to take full
++ * advantage of new APIC bus architecture.
++ */
++
++ if (physids_empty(apic_id_map))
++ apic_id_map = ioapic_phys_id_map(phys_cpu_present_map);
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ reg_00.raw = io_apic_read(ioapic, 0);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ if (apic_id >= get_physical_broadcast()) {
++ printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying "
++ "%d\n", ioapic, apic_id, reg_00.bits.ID);
++ apic_id = reg_00.bits.ID;
++ }
++
++ /*
++ * Every APIC in a system must have a unique ID or we get lots of nice
++ * 'stuck on smp_invalidate_needed IPI wait' messages.
++ */
++ if (check_apicid_used(apic_id_map, apic_id)) {
++
++ for (i = 0; i < get_physical_broadcast(); i++) {
++ if (!check_apicid_used(apic_id_map, i))
++ break;
++ }
++
++ if (i == get_physical_broadcast())
++ panic("Max apic_id exceeded!\n");
++
++ printk(KERN_WARNING "IOAPIC[%d]: apic_id %d already used, "
++ "trying %d\n", ioapic, apic_id, i);
++
++ apic_id = i;
++ }
++
++ tmp = apicid_to_cpu_present(apic_id);
++ physids_or(apic_id_map, apic_id_map, tmp);
++
++ if (reg_00.bits.ID != apic_id) {
++ reg_00.bits.ID = apic_id;
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ io_apic_write(ioapic, 0, reg_00.raw);
++ reg_00.raw = io_apic_read(ioapic, 0);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ /* Sanity check */
++ if (reg_00.bits.ID != apic_id) {
++ printk("IOAPIC[%d]: Unable to change apic_id!\n", ioapic);
++ return -1;
++ }
++ }
++
++ apic_printk(APIC_VERBOSE, KERN_INFO
++ "IOAPIC[%d]: Assigned apic_id %d\n", ioapic, apic_id);
++#endif /* !CONFIG_XEN */
++
++ return apic_id;
++}
++
++
++int __init io_apic_get_version (int ioapic)
++{
++ union IO_APIC_reg_01 reg_01;
++ unsigned long flags;
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ reg_01.raw = io_apic_read(ioapic, 1);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ return reg_01.bits.version;
++}
++
++
++int __init io_apic_get_redir_entries (int ioapic)
++{
++ union IO_APIC_reg_01 reg_01;
++ unsigned long flags;
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ reg_01.raw = io_apic_read(ioapic, 1);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ return reg_01.bits.entries;
++}
++
++
++int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int active_high_low)
++{
++ struct IO_APIC_route_entry entry;
++ unsigned long flags;
++
++ if (!IO_APIC_IRQ(irq)) {
++ printk(KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n",
++ ioapic);
++ return -EINVAL;
++ }
++
++ /*
++ * Generate a PCI IRQ routing entry and program the IOAPIC accordingly.
++ * Note that we mask (disable) IRQs now -- these get enabled when the
++ * corresponding device driver registers for this IRQ.
++ */
++
++ memset(&entry,0,sizeof(entry));
++
++ entry.delivery_mode = INT_DELIVERY_MODE;
++ entry.dest_mode = INT_DEST_MODE;
++ entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
++ entry.trigger = edge_level;
++ entry.polarity = active_high_low;
++ entry.mask = 1;
++
++ /*
++ * IRQs < 16 are already in the irq_2_pin[] map
++ */
++ if (irq >= 16)
++ add_pin_to_irq(irq, ioapic, pin);
++
++ entry.vector = assign_irq_vector(irq);
++
++ apic_printk(APIC_DEBUG, KERN_DEBUG "IOAPIC[%d]: Set PCI routing entry "
++ "(%d-%d -> 0x%x -> IRQ %d Mode:%i Active:%i)\n", ioapic,
++ mp_ioapics[ioapic].mpc_apicid, pin, entry.vector, irq,
++ edge_level, active_high_low);
++
++ ioapic_register_intr(irq, entry.vector, edge_level);
++
++ if (!ioapic && (irq < 16))
++ disable_8259A_irq(irq);
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ io_apic_write(ioapic, 0x11+2*pin, *(((int *)&entry)+1));
++ io_apic_write(ioapic, 0x10+2*pin, *(((int *)&entry)+0));
++ set_native_irq_info(use_pci_vector() ? entry.vector : irq, TARGET_CPUS);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ return 0;
++}
++
++#endif /* CONFIG_ACPI */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/arch/i386/kernel/ioport-xen.c 2007-08-27 14:01:58.000000000 -0400
+@@ -0,0 +1,122 @@
++/*
++ * linux/arch/i386/kernel/ioport.c
++ *
++ * This contains the io-permission bitmap code - written by obz, with changes
++ * by Linus.
++ */
++
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/capability.h>
++#include <linux/errno.h>
++#include <linux/types.h>
++#include <linux/ioport.h>
++#include <linux/smp.h>
++#include <linux/smp_lock.h>
++#include <linux/stddef.h>
++#include <linux/slab.h>
++#include <linux/thread_info.h>
++#include <xen/interface/physdev.h>
++
++/* Set EXTENT bits starting at BASE in BITMAP to value TURN_ON. */
++static void set_bitmap(unsigned long *bitmap, unsigned int base, unsigned int extent, int new_value)
++{
++ unsigned long mask;
++ unsigned long *bitmap_base = bitmap + (base / BITS_PER_LONG);
++ unsigned int low_index = base & (BITS_PER_LONG-1);
++ int length = low_index + extent;
++
++ if (low_index != 0) {
++ mask = (~0UL << low_index);
++ if (length < BITS_PER_LONG)
++ mask &= ~(~0UL << length);
++ if (new_value)
++ *bitmap_base++ |= mask;
++ else
++ *bitmap_base++ &= ~mask;
++ length -= BITS_PER_LONG;
++ }
++
++ mask = (new_value ? ~0UL : 0UL);
++ while (length >= BITS_PER_LONG) {
++ *bitmap_base++ = mask;
++ length -= BITS_PER_LONG;
++ }
++
++ if (length > 0) {
++ mask = ~(~0UL << length);
++ if (new_value)
++ *bitmap_base++ |= mask;
++ else
++ *bitmap_base++ &= ~mask;
++ }
++}
++
++
++/*
++ * this changes the io permissions bitmap in the current task.
++ */
++asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
++{
++ struct thread_struct * t = &current->thread;
++ unsigned long *bitmap;
++ struct physdev_set_iobitmap set_iobitmap;
++
++ if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
++ return -EINVAL;
++ if (turn_on && !capable(CAP_SYS_RAWIO))
++ return -EPERM;
++
++ /*
++ * If it's the first ioperm() call in this thread's lifetime, set the
++ * IO bitmap up. ioperm() is much less timing critical than clone(),
++ * this is why we delay this operation until now:
++ */
++ if (!t->io_bitmap_ptr) {
++ bitmap = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
++ if (!bitmap)
++ return -ENOMEM;
++
++ memset(bitmap, 0xff, IO_BITMAP_BYTES);
++ t->io_bitmap_ptr = bitmap;
++ set_thread_flag(TIF_IO_BITMAP);
++
++ set_xen_guest_handle(set_iobitmap.bitmap, (char *)bitmap);
++ set_iobitmap.nr_ports = IO_BITMAP_BITS;
++ HYPERVISOR_physdev_op(PHYSDEVOP_set_iobitmap, &set_iobitmap);
++ }
++
++ set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
++
++ return 0;
++}
++
++/*
++ * sys_iopl has to be used when you want to access the IO ports
++ * beyond the 0x3ff range: to get the full 65536 ports bitmapped
++ * you'd need 8kB of bitmaps/process, which is a bit excessive.
++ *
++ * Here we just change the eflags value on the stack: we allow
++ * only the super-user to do it. This depends on the stack-layout
++ * on system-call entry - see also fork() and the signal handling
++ * code.
++ */
++
++asmlinkage long sys_iopl(unsigned long unused)
++{
++ volatile struct pt_regs * regs = (struct pt_regs *) &unused;
++ unsigned int level = regs->ebx;
++ struct thread_struct *t = &current->thread;
++ unsigned int old = (t->iopl >> 12) & 3;
++
++ if (level > 3)
++ return -EINVAL;
++ /* Trying to gain more privileges? */
++ if (level > old) {
++ if (!capable(CAP_SYS_RAWIO))
++ return -EPERM;
++ }
++ t->iopl = level << 12;
++ set_iopl_mask(t->iopl);
++ return 0;
++}
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/arch/i386/kernel/irq-xen.c 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,324 @@
++/*
++ * linux/arch/i386/kernel/irq.c
++ *
++ * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
++ *
++ * This file contains the lowest level x86-specific interrupt
++ * entry, irq-stacks and irq statistics code. All the remaining
++ * irq logic is done by the generic kernel/irq/ code and
++ * by the x86-specific irq controller code. (e.g. i8259.c and
++ * io_apic.c.)
++ */
++
++#include <asm/uaccess.h>
++#include <linux/module.h>
++#include <linux/seq_file.h>
++#include <linux/interrupt.h>
++#include <linux/kernel_stat.h>
++#include <linux/notifier.h>
++#include <linux/cpu.h>
++#include <linux/delay.h>
++
++DEFINE_PER_CPU(irq_cpustat_t, irq_stat) ____cacheline_internodealigned_in_smp;
++EXPORT_PER_CPU_SYMBOL(irq_stat);
++
++#ifndef CONFIG_X86_LOCAL_APIC
++/*
++ * 'what should we do if we get a hw irq event on an illegal vector'.
++ * each architecture has to answer this themselves.
++ */
++void ack_bad_irq(unsigned int irq)
++{
++ printk("unexpected IRQ trap at vector %02x\n", irq);
++}
++#endif
++
++#ifdef CONFIG_4KSTACKS
++/*
++ * per-CPU IRQ handling contexts (thread information and stack)
++ */
++union irq_ctx {
++ struct thread_info tinfo;
++ u32 stack[THREAD_SIZE/sizeof(u32)];
++};
++
++static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly;
++static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly;
++#endif
++
++/*
++ * do_IRQ handles all normal device IRQ's (the special
++ * SMP cross-CPU interrupts have their own specific
++ * handlers).
++ */
++fastcall unsigned int do_IRQ(struct pt_regs *regs)
++{
++ /* high bit used in ret_from_ code */
++ int irq = ~regs->orig_eax;
++#ifdef CONFIG_4KSTACKS
++ union irq_ctx *curctx, *irqctx;
++ u32 *isp;
++#endif
++
++ if (unlikely((unsigned)irq >= NR_IRQS)) {
++ printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
++ __FUNCTION__, irq);
++ BUG();
++ }
++
++ irq_enter();
++#ifdef CONFIG_DEBUG_STACKOVERFLOW
++ /* Debugging check for stack overflow: is there less than 1KB free? */
++ {
++ long esp;
++
++ __asm__ __volatile__("andl %%esp,%0" :
++ "=r" (esp) : "0" (THREAD_SIZE - 1));
++ if (unlikely(esp < (sizeof(struct thread_info) + STACK_WARN))) {
++ printk("do_IRQ: stack overflow: %ld\n",
++ esp - sizeof(struct thread_info));
++ dump_stack();
++ }
++ }
++#endif
++
++#ifdef CONFIG_4KSTACKS
++
++ curctx = (union irq_ctx *) current_thread_info();
++ irqctx = hardirq_ctx[smp_processor_id()];
++
++ /*
++ * this is where we switch to the IRQ stack. However, if we are
++ * already using the IRQ stack (because we interrupted a hardirq
++ * handler) we can't do that and just have to keep using the
++ * current stack (which is the irq stack already after all)
++ */
++ if (curctx != irqctx) {
++ int arg1, arg2, ebx;
++
++ /* build the stack frame on the IRQ stack */
++ isp = (u32*) ((char*)irqctx + sizeof(*irqctx));
++ irqctx->tinfo.task = curctx->tinfo.task;
++ irqctx->tinfo.previous_esp = current_stack_pointer;
++
++ /*
++ * Copy the softirq bits in preempt_count so that the
++ * softirq checks work in the hardirq context.
++ */
++ irqctx->tinfo.preempt_count =
++ (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
++ (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
++
++ asm volatile(
++ " xchgl %%ebx,%%esp \n"
++ " call __do_IRQ \n"
++ " movl %%ebx,%%esp \n"
++ : "=a" (arg1), "=d" (arg2), "=b" (ebx)
++ : "0" (irq), "1" (regs), "2" (isp)
++ : "memory", "cc", "ecx"
++ );
++ } else
++#endif
++ __do_IRQ(irq, regs);
++
++ irq_exit();
++
++ return 1;
++}
++
++#ifdef CONFIG_4KSTACKS
++
++/*
++ * These should really be __section__(".bss.page_aligned") as well, but
++ * gcc's 3.0 and earlier don't handle that correctly.
++ */
++static char softirq_stack[NR_CPUS * THREAD_SIZE]
++ __attribute__((__aligned__(THREAD_SIZE)));
++
++static char hardirq_stack[NR_CPUS * THREAD_SIZE]
++ __attribute__((__aligned__(THREAD_SIZE)));
++
++/*
++ * allocate per-cpu stacks for hardirq and for softirq processing
++ */
++void irq_ctx_init(int cpu)
++{
++ union irq_ctx *irqctx;
++
++ if (hardirq_ctx[cpu])
++ return;
++
++ irqctx = (union irq_ctx*) &hardirq_stack[cpu*THREAD_SIZE];
++ irqctx->tinfo.task = NULL;
++ irqctx->tinfo.exec_domain = NULL;
++ irqctx->tinfo.cpu = cpu;
++ irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
++ irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
++
++ hardirq_ctx[cpu] = irqctx;
++
++ irqctx = (union irq_ctx*) &softirq_stack[cpu*THREAD_SIZE];
++ irqctx->tinfo.task = NULL;
++ irqctx->tinfo.exec_domain = NULL;
++ irqctx->tinfo.cpu = cpu;
++ irqctx->tinfo.preempt_count = 0;
++ irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
++
++ softirq_ctx[cpu] = irqctx;
++
++ printk("CPU %u irqstacks, hard=%p soft=%p\n",
++ cpu,hardirq_ctx[cpu],softirq_ctx[cpu]);
++}
++
++void irq_ctx_exit(int cpu)
++{
++ hardirq_ctx[cpu] = NULL;
++}
++
++extern asmlinkage void __do_softirq(void);
++
++asmlinkage void do_softirq(void)
++{
++ unsigned long flags;
++ struct thread_info *curctx;
++ union irq_ctx *irqctx;
++ u32 *isp;
++
++ if (in_interrupt())
++ return;
++
++ local_irq_save(flags);
++
++ if (local_softirq_pending()) {
++ curctx = current_thread_info();
++ irqctx = softirq_ctx[smp_processor_id()];
++ irqctx->tinfo.task = curctx->task;
++ irqctx->tinfo.previous_esp = current_stack_pointer;
++
++ /* build the stack frame on the softirq stack */
++ isp = (u32*) ((char*)irqctx + sizeof(*irqctx));
++
++ asm volatile(
++ " xchgl %%ebx,%%esp \n"
++ " call __do_softirq \n"
++ " movl %%ebx,%%esp \n"
++ : "=b"(isp)
++ : "0"(isp)
++ : "memory", "cc", "edx", "ecx", "eax"
++ );
++ /*
++ * Shouldnt happen, we returned above if in_interrupt():
++ */
++ WARN_ON_ONCE(softirq_count());
++ }
++
++ local_irq_restore(flags);
++}
++
++EXPORT_SYMBOL(do_softirq);
++#endif
++
++/*
++ * Interrupt statistics:
++ */
++
++atomic_t irq_err_count;
++
++/*
++ * /proc/interrupts printing:
++ */
++
++int show_interrupts(struct seq_file *p, void *v)
++{
++ int i = *(loff_t *) v, j;
++ struct irqaction * action;
++ unsigned long flags;
++
++ if (i == 0) {
++ seq_printf(p, " ");
++ for_each_online_cpu(j)
++ seq_printf(p, "CPU%-8d",j);
++ seq_putc(p, '\n');
++ }
++
++ if (i < NR_IRQS) {
++ spin_lock_irqsave(&irq_desc[i].lock, flags);
++ action = irq_desc[i].action;
++ if (!action)
++ goto skip;
++ seq_printf(p, "%3d: ",i);
++#ifndef CONFIG_SMP
++ seq_printf(p, "%10u ", kstat_irqs(i));
++#else
++ for_each_online_cpu(j)
++ seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
++#endif
++ seq_printf(p, " %14s", irq_desc[i].chip->typename);
++ seq_printf(p, " %s", action->name);
++
++ for (action=action->next; action; action = action->next)
++ seq_printf(p, ", %s", action->name);
++
++ seq_putc(p, '\n');
++skip:
++ spin_unlock_irqrestore(&irq_desc[i].lock, flags);
++ } else if (i == NR_IRQS) {
++ seq_printf(p, "NMI: ");
++ for_each_online_cpu(j)
++ seq_printf(p, "%10u ", nmi_count(j));
++ seq_putc(p, '\n');
++#ifdef CONFIG_X86_LOCAL_APIC
++ seq_printf(p, "LOC: ");
++ for_each_online_cpu(j)
++ seq_printf(p, "%10u ",
++ per_cpu(irq_stat,j).apic_timer_irqs);
++ seq_putc(p, '\n');
++#endif
++ seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
++#if defined(CONFIG_X86_IO_APIC)
++ seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count));
++#endif
++ }
++ return 0;
++}
++
++#ifdef CONFIG_HOTPLUG_CPU
++
++void fixup_irqs(cpumask_t map)
++{
++ unsigned int irq;
++ static int warned;
++
++ for (irq = 0; irq < NR_IRQS; irq++) {
++ cpumask_t mask;
++ if (irq == 2)
++ continue;
++
++ cpus_and(mask, irq_desc[irq].affinity, map);
++ if (any_online_cpu(mask) == NR_CPUS) {
++ /*printk("Breaking affinity for irq %i\n", irq);*/
++ mask = map;
++ }
++ if (irq_desc[irq].chip->set_affinity)
++ irq_desc[irq].chip->set_affinity(irq, mask);
++ else if (irq_desc[irq].action && !(warned++))
++ printk("Cannot set affinity for irq %i\n", irq);
++ }
++
++#if 0
++ barrier();
++ /* Ingo Molnar says: "after the IO-APIC masks have been redirected
++ [note the nop - the interrupt-enable boundary on x86 is two
++ instructions from sti] - to flush out pending hardirqs and
++ IPIs. After this point nothing is supposed to reach this CPU." */
++ __asm__ __volatile__("sti; nop; cli");
++ barrier();
++#else
++ /* That doesn't seem sufficient. Give it 1ms. */
++ local_irq_enable();
++ mdelay(1);
++ local_irq_disable();
++#endif
++}
++#endif
++
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/arch/i386/kernel/ldt-xen.c 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,270 @@
++/*
++ * linux/kernel/ldt.c
++ *
++ * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds
++ * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
++ */
++
++#include <linux/errno.h>
++#include <linux/sched.h>
++#include <linux/string.h>
++#include <linux/mm.h>
++#include <linux/smp.h>
++#include <linux/smp_lock.h>
++#include <linux/vmalloc.h>
++#include <linux/slab.h>
++
++#include <asm/uaccess.h>
++#include <asm/system.h>
++#include <asm/ldt.h>
++#include <asm/desc.h>
++#include <asm/mmu_context.h>
++
++#ifdef CONFIG_SMP /* avoids "defined but not used" warnig */
++static void flush_ldt(void *null)
++{
++ if (current->active_mm)
++ load_LDT(&current->active_mm->context);
++}
++#endif
++
++static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
++{
++ void *oldldt;
++ void *newldt;
++ int oldsize;
++
++ if (mincount <= pc->size)
++ return 0;
++ oldsize = pc->size;
++ mincount = (mincount+511)&(~511);
++ if (mincount*LDT_ENTRY_SIZE > PAGE_SIZE)
++ newldt = vmalloc(mincount*LDT_ENTRY_SIZE);
++ else
++ newldt = kmalloc(mincount*LDT_ENTRY_SIZE, GFP_KERNEL);
++
++ if (!newldt)
++ return -ENOMEM;
++
++ if (oldsize)
++ memcpy(newldt, pc->ldt, oldsize*LDT_ENTRY_SIZE);
++ oldldt = pc->ldt;
++ memset(newldt+oldsize*LDT_ENTRY_SIZE, 0, (mincount-oldsize)*LDT_ENTRY_SIZE);
++ pc->ldt = newldt;
++ wmb();
++ pc->size = mincount;
++ wmb();
++
++ if (reload) {
++#ifdef CONFIG_SMP
++ cpumask_t mask;
++ preempt_disable();
++#endif
++ make_pages_readonly(
++ pc->ldt,
++ (pc->size * LDT_ENTRY_SIZE) / PAGE_SIZE,
++ XENFEAT_writable_descriptor_tables);
++ load_LDT(pc);
++#ifdef CONFIG_SMP
++ mask = cpumask_of_cpu(smp_processor_id());
++ if (!cpus_equal(current->mm->cpu_vm_mask, mask))
++ smp_call_function(flush_ldt, NULL, 1, 1);
++ preempt_enable();
++#endif
++ }
++ if (oldsize) {
++ make_pages_writable(
++ oldldt,
++ (oldsize * LDT_ENTRY_SIZE) / PAGE_SIZE,
++ XENFEAT_writable_descriptor_tables);
++ if (oldsize*LDT_ENTRY_SIZE > PAGE_SIZE)
++ vfree(oldldt);
++ else
++ kfree(oldldt);
++ }
++ return 0;
++}
++
++static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
++{
++ int err = alloc_ldt(new, old->size, 0);
++ if (err < 0)
++ return err;
++ memcpy(new->ldt, old->ldt, old->size*LDT_ENTRY_SIZE);
++ make_pages_readonly(
++ new->ldt,
++ (new->size * LDT_ENTRY_SIZE) / PAGE_SIZE,
++ XENFEAT_writable_descriptor_tables);
++ return 0;
++}
++
++/*
++ * we do not have to muck with descriptors here, that is
++ * done in switch_mm() as needed.
++ */
++int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
++{
++ struct mm_struct * old_mm;
++ int retval = 0;
++
++ init_MUTEX(&mm->context.sem);
++ mm->context.size = 0;
++ mm->context.has_foreign_mappings = 0;
++ old_mm = current->mm;
++ if (old_mm && old_mm->context.size > 0) {
++ down(&old_mm->context.sem);
++ retval = copy_ldt(&mm->context, &old_mm->context);
++ up(&old_mm->context.sem);
++ }
++ return retval;
++}
++
++/*
++ * No need to lock the MM as we are the last user
++ */
++void destroy_context(struct mm_struct *mm)
++{
++ if (mm->context.size) {
++ if (mm == current->active_mm)
++ clear_LDT();
++ make_pages_writable(
++ mm->context.ldt,
++ (mm->context.size * LDT_ENTRY_SIZE) / PAGE_SIZE,
++ XENFEAT_writable_descriptor_tables);
++ if (mm->context.size*LDT_ENTRY_SIZE > PAGE_SIZE)
++ vfree(mm->context.ldt);
++ else
++ kfree(mm->context.ldt);
++ mm->context.size = 0;
++ }
++}
++
++static int read_ldt(void __user * ptr, unsigned long bytecount)
++{
++ int err;
++ unsigned long size;
++ struct mm_struct * mm = current->mm;
++
++ if (!mm->context.size)
++ return 0;
++ if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES)
++ bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES;
++
++ down(&mm->context.sem);
++ size = mm->context.size*LDT_ENTRY_SIZE;
++ if (size > bytecount)
++ size = bytecount;
++
++ err = 0;
++ if (copy_to_user(ptr, mm->context.ldt, size))
++ err = -EFAULT;
++ up(&mm->context.sem);
++ if (err < 0)
++ goto error_return;
++ if (size != bytecount) {
++ /* zero-fill the rest */
++ if (clear_user(ptr+size, bytecount-size) != 0) {
++ err = -EFAULT;
++ goto error_return;
++ }
++ }
++ return bytecount;
++error_return:
++ return err;
++}
++
++static int read_default_ldt(void __user * ptr, unsigned long bytecount)
++{
++ int err;
++ unsigned long size;
++ void *address;
++
++ err = 0;
++ address = &default_ldt[0];
++ size = 5*sizeof(struct desc_struct);
++ if (size > bytecount)
++ size = bytecount;
++
++ err = size;
++ if (copy_to_user(ptr, address, size))
++ err = -EFAULT;
++
++ return err;
++}
++
++static int write_ldt(void __user * ptr, unsigned long bytecount, int oldmode)
++{
++ struct mm_struct * mm = current->mm;
++ __u32 entry_1, entry_2;
++ int error;
++ struct user_desc ldt_info;
++
++ error = -EINVAL;
++ if (bytecount != sizeof(ldt_info))
++ goto out;
++ error = -EFAULT;
++ if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info)))
++ goto out;
++
++ error = -EINVAL;
++ if (ldt_info.entry_number >= LDT_ENTRIES)
++ goto out;
++ if (ldt_info.contents == 3) {
++ if (oldmode)
++ goto out;
++ if (ldt_info.seg_not_present == 0)
++ goto out;
++ }
++
++ down(&mm->context.sem);
++ if (ldt_info.entry_number >= mm->context.size) {
++ error = alloc_ldt(&current->mm->context, ldt_info.entry_number+1, 1);
++ if (error < 0)
++ goto out_unlock;
++ }
++
++ /* Allow LDTs to be cleared by the user. */
++ if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
++ if (oldmode || LDT_empty(&ldt_info)) {
++ entry_1 = 0;
++ entry_2 = 0;
++ goto install;
++ }
++ }
++
++ entry_1 = LDT_entry_a(&ldt_info);
++ entry_2 = LDT_entry_b(&ldt_info);
++ if (oldmode)
++ entry_2 &= ~(1 << 20);
++
++ /* Install the new entry ... */
++install:
++ error = write_ldt_entry(mm->context.ldt, ldt_info.entry_number,
++ entry_1, entry_2);
++
++out_unlock:
++ up(&mm->context.sem);
++out:
++ return error;
++}
++
++asmlinkage int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount)
++{
++ int ret = -ENOSYS;
++
++ switch (func) {
++ case 0:
++ ret = read_ldt(ptr, bytecount);
++ break;
++ case 1:
++ ret = write_ldt(ptr, bytecount, 1);
++ break;
++ case 2:
++ ret = read_default_ldt(ptr, bytecount);
++ break;
++ case 0x11:
++ ret = write_ldt(ptr, bytecount, 0);
++ break;
++ }
++ return ret;
++}
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/arch/i386/kernel/microcode-xen.c 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,144 @@
++/*
++ * Intel CPU Microcode Update Driver for Linux
++ *
++ * Copyright (C) 2000-2004 Tigran Aivazian
++ *
++ * This driver allows to upgrade microcode on Intel processors
++ * belonging to IA-32 family - PentiumPro, Pentium II,
++ * Pentium III, Xeon, Pentium 4, etc.
++ *
++ * Reference: Section 8.10 of Volume III, Intel Pentium 4 Manual,
++ * Order Number 245472 or free download from:
++ *
++ * http://developer.intel.com/design/pentium4/manuals/245472.htm
++ *
++ * For more information, go to http://www.urbanmyth.org/microcode
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ */
++
++//#define DEBUG /* pr_debug */
++#include <linux/capability.h>
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/sched.h>
++#include <linux/cpumask.h>
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/miscdevice.h>
++#include <linux/spinlock.h>
++#include <linux/mm.h>
++#include <linux/mutex.h>
++#include <linux/syscalls.h>
++
++#include <asm/msr.h>
++#include <asm/uaccess.h>
++#include <asm/processor.h>
++
++MODULE_DESCRIPTION("Intel CPU (IA-32) Microcode Update Driver");
++MODULE_AUTHOR("Tigran Aivazian <tigran@veritas.com>");
++MODULE_LICENSE("GPL");
++
++static int verbose;
++module_param(verbose, int, 0644);
++
++#define MICROCODE_VERSION "1.14a-xen"
++
++#define DEFAULT_UCODE_DATASIZE (2000) /* 2000 bytes */
++#define MC_HEADER_SIZE (sizeof (microcode_header_t)) /* 48 bytes */
++#define DEFAULT_UCODE_TOTALSIZE (DEFAULT_UCODE_DATASIZE + MC_HEADER_SIZE) /* 2048 bytes */
++
++/* no concurrent ->write()s are allowed on /dev/cpu/microcode */
++static DEFINE_MUTEX(microcode_mutex);
++
++static int microcode_open (struct inode *unused1, struct file *unused2)
++{
++ return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
++}
++
++
++static int do_microcode_update (const void __user *ubuf, size_t len)
++{
++ int err;
++ void *kbuf;
++
++ kbuf = vmalloc(len);
++ if (!kbuf)
++ return -ENOMEM;
++
++ if (copy_from_user(kbuf, ubuf, len) == 0) {
++ struct xen_platform_op op;
++
++ op.cmd = XENPF_microcode_update;
++ set_xen_guest_handle(op.u.microcode.data, kbuf);
++ op.u.microcode.length = len;
++ err = HYPERVISOR_platform_op(&op);
++ } else
++ err = -EFAULT;
++
++ vfree(kbuf);
++
++ return err;
++}
++
++static ssize_t microcode_write (struct file *file, const char __user *buf, size_t len, loff_t *ppos)
++{
++ ssize_t ret;
++
++ if (len < MC_HEADER_SIZE) {
++ printk(KERN_ERR "microcode: not enough data\n");
++ return -EINVAL;
++ }
++
++ mutex_lock(&microcode_mutex);
++
++ ret = do_microcode_update(buf, len);
++ if (!ret)
++ ret = (ssize_t)len;
++
++ mutex_unlock(&microcode_mutex);
++
++ return ret;
++}
++
++static struct file_operations microcode_fops = {
++ .owner = THIS_MODULE,
++ .write = microcode_write,
++ .open = microcode_open,
++};
++
++static struct miscdevice microcode_dev = {
++ .minor = MICROCODE_MINOR,
++ .name = "microcode",
++ .fops = &microcode_fops,
++};
++
++static int __init microcode_init (void)
++{
++ int error;
++
++ error = misc_register(&microcode_dev);
++ if (error) {
++ printk(KERN_ERR
++ "microcode: can't misc_register on minor=%d\n",
++ MICROCODE_MINOR);
++ return error;
++ }
++
++ printk(KERN_INFO
++ "IA-32 Microcode Update Driver: v" MICROCODE_VERSION " <tigran@veritas.com>\n");
++ return 0;
++}
++
++static void __exit microcode_exit (void)
++{
++ misc_deregister(&microcode_dev);
++}
++
++module_init(microcode_init)
++module_exit(microcode_exit)
++MODULE_ALIAS_MISCDEV(MICROCODE_MINOR);
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/arch/i386/kernel/mpparse-xen.c 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,1185 @@
++/*
++ * Intel Multiprocessor Specification 1.1 and 1.4
++ * compliant MP-table parsing routines.
++ *
++ * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
++ * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
++ *
++ * Fixes
++ * Erich Boleyn : MP v1.4 and additional changes.
++ * Alan Cox : Added EBDA scanning
++ * Ingo Molnar : various cleanups and rewrites
++ * Maciej W. Rozycki: Bits for default MP configurations
++ * Paul Diefenbaugh: Added full ACPI support
++ */
++
++#include <linux/mm.h>
++#include <linux/init.h>
++#include <linux/acpi.h>
++#include <linux/delay.h>
++#include <linux/bootmem.h>
++#include <linux/smp_lock.h>
++#include <linux/kernel_stat.h>
++#include <linux/mc146818rtc.h>
++#include <linux/bitops.h>
++
++#include <asm/smp.h>
++#include <asm/acpi.h>
++#include <asm/mtrr.h>
++#include <asm/mpspec.h>
++#include <asm/io_apic.h>
++
++#include <mach_apic.h>
++#include <mach_mpparse.h>
++#include <bios_ebda.h>
++
++/* Have we found an MP table */
++int smp_found_config;
++unsigned int __initdata maxcpus = NR_CPUS;
++
++/*
++ * Various Linux-internal data structures created from the
++ * MP-table.
++ */
++int apic_version [MAX_APICS];
++int mp_bus_id_to_type [MAX_MP_BUSSES];
++int mp_bus_id_to_node [MAX_MP_BUSSES];
++int mp_bus_id_to_local [MAX_MP_BUSSES];
++int quad_local_to_mp_bus_id [NR_CPUS/4][4];
++int mp_bus_id_to_pci_bus [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 };
++static int mp_current_pci_id;
++
++/* I/O APIC entries */
++struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS];
++
++/* # of MP IRQ source entries */
++struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
++
++/* MP IRQ source entries */
++int mp_irq_entries;
++
++int nr_ioapics;
++
++int pic_mode;
++unsigned long mp_lapic_addr;
++
++unsigned int def_to_bigsmp = 0;
++
++/* Processor that is doing the boot up */
++unsigned int boot_cpu_physical_apicid = -1U;
++/* Internal processor count */
++static unsigned int __devinitdata num_processors;
++
++/* Bitmask of physically existing CPUs */
++physid_mask_t phys_cpu_present_map;
++
++u8 bios_cpu_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
++
++/*
++ * Intel MP BIOS table parsing routines:
++ */
++
++
++/*
++ * Checksum an MP configuration block.
++ */
++
++static int __init mpf_checksum(unsigned char *mp, int len)
++{
++ int sum = 0;
++
++ while (len--)
++ sum += *mp++;
++
++ return sum & 0xFF;
++}
++
++/*
++ * Have to match translation table entries to main table entries by counter
++ * hence the mpc_record variable .... can't see a less disgusting way of
++ * doing this ....
++ */
++
++static int mpc_record;
++static struct mpc_config_translation *translation_table[MAX_MPC_ENTRY] __initdata;
++
++#ifndef CONFIG_XEN
++static void __devinit MP_processor_info (struct mpc_config_processor *m)
++{
++ int ver, apicid;
++ physid_mask_t phys_cpu;
++
++ if (!(m->mpc_cpuflag & CPU_ENABLED))
++ return;
++
++ apicid = mpc_apic_id(m, translation_table[mpc_record]);
++
++ if (m->mpc_featureflag&(1<<0))
++ Dprintk(" Floating point unit present.\n");
++ if (m->mpc_featureflag&(1<<7))
++ Dprintk(" Machine Exception supported.\n");
++ if (m->mpc_featureflag&(1<<8))
++ Dprintk(" 64 bit compare & exchange supported.\n");
++ if (m->mpc_featureflag&(1<<9))
++ Dprintk(" Internal APIC present.\n");
++ if (m->mpc_featureflag&(1<<11))
++ Dprintk(" SEP present.\n");
++ if (m->mpc_featureflag&(1<<12))
++ Dprintk(" MTRR present.\n");
++ if (m->mpc_featureflag&(1<<13))
++ Dprintk(" PGE present.\n");
++ if (m->mpc_featureflag&(1<<14))
++ Dprintk(" MCA present.\n");
++ if (m->mpc_featureflag&(1<<15))
++ Dprintk(" CMOV present.\n");
++ if (m->mpc_featureflag&(1<<16))
++ Dprintk(" PAT present.\n");
++ if (m->mpc_featureflag&(1<<17))
++ Dprintk(" PSE present.\n");
++ if (m->mpc_featureflag&(1<<18))
++ Dprintk(" PSN present.\n");
++ if (m->mpc_featureflag&(1<<19))
++ Dprintk(" Cache Line Flush Instruction present.\n");
++ /* 20 Reserved */
++ if (m->mpc_featureflag&(1<<21))
++ Dprintk(" Debug Trace and EMON Store present.\n");
++ if (m->mpc_featureflag&(1<<22))
++ Dprintk(" ACPI Thermal Throttle Registers present.\n");
++ if (m->mpc_featureflag&(1<<23))
++ Dprintk(" MMX present.\n");
++ if (m->mpc_featureflag&(1<<24))
++ Dprintk(" FXSR present.\n");
++ if (m->mpc_featureflag&(1<<25))
++ Dprintk(" XMM present.\n");
++ if (m->mpc_featureflag&(1<<26))
++ Dprintk(" Willamette New Instructions present.\n");
++ if (m->mpc_featureflag&(1<<27))
++ Dprintk(" Self Snoop present.\n");
++ if (m->mpc_featureflag&(1<<28))
++ Dprintk(" HT present.\n");
++ if (m->mpc_featureflag&(1<<29))
++ Dprintk(" Thermal Monitor present.\n");
++ /* 30, 31 Reserved */
++
++
++ if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) {
++ Dprintk(" Bootup CPU\n");
++ boot_cpu_physical_apicid = m->mpc_apicid;
++ }
++
++ ver = m->mpc_apicver;
++
++ /*
++ * Validate version
++ */
++ if (ver == 0x0) {
++ printk(KERN_WARNING "BIOS bug, APIC version is 0 for CPU#%d! "
++ "fixing up to 0x10. (tell your hw vendor)\n",
++ m->mpc_apicid);
++ ver = 0x10;
++ }
++ apic_version[m->mpc_apicid] = ver;
++
++ phys_cpu = apicid_to_cpu_present(apicid);
++ physids_or(phys_cpu_present_map, phys_cpu_present_map, phys_cpu);
++
++ if (num_processors >= NR_CPUS) {
++ printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached."
++ " Processor ignored.\n", NR_CPUS);
++ return;
++ }
++
++ if (num_processors >= maxcpus) {
++ printk(KERN_WARNING "WARNING: maxcpus limit of %i reached."
++ " Processor ignored.\n", maxcpus);
++ return;
++ }
++
++ cpu_set(num_processors, cpu_possible_map);
++ num_processors++;
++
++ /*
++ * Would be preferable to switch to bigsmp when CONFIG_HOTPLUG_CPU=y
++ * but we need to work other dependencies like SMP_SUSPEND etc
++ * before this can be done without some confusion.
++ * if (CPU_HOTPLUG_ENABLED || num_processors > 8)
++ * - Ashok Raj <ashok.raj@intel.com>
++ */
++ if (num_processors > 8) {
++ switch (boot_cpu_data.x86_vendor) {
++ case X86_VENDOR_INTEL:
++ if (!APIC_XAPIC(ver)) {
++ def_to_bigsmp = 0;
++ break;
++ }
++ /* If P4 and above fall through */
++ case X86_VENDOR_AMD:
++ def_to_bigsmp = 1;
++ }
++ }
++ bios_cpu_apicid[num_processors - 1] = m->mpc_apicid;
++}
++#else
++void __init MP_processor_info (struct mpc_config_processor *m)
++{
++ num_processors++;
++}
++#endif /* CONFIG_XEN */
++
++static void __init MP_bus_info (struct mpc_config_bus *m)
++{
++ char str[7];
++
++ memcpy(str, m->mpc_bustype, 6);
++ str[6] = 0;
++
++ mpc_oem_bus_info(m, str, translation_table[mpc_record]);
++
++ if (m->mpc_busid >= MAX_MP_BUSSES) {
++ printk(KERN_WARNING "MP table busid value (%d) for bustype %s "
++ " is too large, max. supported is %d\n",
++ m->mpc_busid, str, MAX_MP_BUSSES - 1);
++ return;
++ }
++
++ if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA)-1) == 0) {
++ mp_bus_id_to_type[m->mpc_busid] = MP_BUS_ISA;
++ } else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA)-1) == 0) {
++ mp_bus_id_to_type[m->mpc_busid] = MP_BUS_EISA;
++ } else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI)-1) == 0) {
++ mpc_oem_pci_bus(m, translation_table[mpc_record]);
++ mp_bus_id_to_type[m->mpc_busid] = MP_BUS_PCI;
++ mp_bus_id_to_pci_bus[m->mpc_busid] = mp_current_pci_id;
++ mp_current_pci_id++;
++ } else if (strncmp(str, BUSTYPE_MCA, sizeof(BUSTYPE_MCA)-1) == 0) {
++ mp_bus_id_to_type[m->mpc_busid] = MP_BUS_MCA;
++ } else if (strncmp(str, BUSTYPE_NEC98, sizeof(BUSTYPE_NEC98)-1) == 0) {
++ mp_bus_id_to_type[m->mpc_busid] = MP_BUS_NEC98;
++ } else {
++ printk(KERN_WARNING "Unknown bustype %s - ignoring\n", str);
++ }
++}
++
++static void __init MP_ioapic_info (struct mpc_config_ioapic *m)
++{
++ if (!(m->mpc_flags & MPC_APIC_USABLE))
++ return;
++
++ printk(KERN_INFO "I/O APIC #%d Version %d at 0x%lX.\n",
++ m->mpc_apicid, m->mpc_apicver, m->mpc_apicaddr);
++ if (nr_ioapics >= MAX_IO_APICS) {
++ printk(KERN_CRIT "Max # of I/O APICs (%d) exceeded (found %d).\n",
++ MAX_IO_APICS, nr_ioapics);
++ panic("Recompile kernel with bigger MAX_IO_APICS!.\n");
++ }
++ if (!m->mpc_apicaddr) {
++ printk(KERN_ERR "WARNING: bogus zero I/O APIC address"
++ " found in MP table, skipping!\n");
++ return;
++ }
++ mp_ioapics[nr_ioapics] = *m;
++ nr_ioapics++;
++}
++
++static void __init MP_intsrc_info (struct mpc_config_intsrc *m)
++{
++ mp_irqs [mp_irq_entries] = *m;
++ Dprintk("Int: type %d, pol %d, trig %d, bus %d,"
++ " IRQ %02x, APIC ID %x, APIC INT %02x\n",
++ m->mpc_irqtype, m->mpc_irqflag & 3,
++ (m->mpc_irqflag >> 2) & 3, m->mpc_srcbus,
++ m->mpc_srcbusirq, m->mpc_dstapic, m->mpc_dstirq);
++ if (++mp_irq_entries == MAX_IRQ_SOURCES)
++ panic("Max # of irq sources exceeded!!\n");
++}
++
++static void __init MP_lintsrc_info (struct mpc_config_lintsrc *m)
++{
++ Dprintk("Lint: type %d, pol %d, trig %d, bus %d,"
++ " IRQ %02x, APIC ID %x, APIC LINT %02x\n",
++ m->mpc_irqtype, m->mpc_irqflag & 3,
++ (m->mpc_irqflag >> 2) &3, m->mpc_srcbusid,
++ m->mpc_srcbusirq, m->mpc_destapic, m->mpc_destapiclint);
++ /*
++ * Well it seems all SMP boards in existence
++ * use ExtINT/LVT1 == LINT0 and
++ * NMI/LVT2 == LINT1 - the following check
++ * will show us if this assumptions is false.
++ * Until then we do not have to add baggage.
++ */
++ if ((m->mpc_irqtype == mp_ExtINT) &&
++ (m->mpc_destapiclint != 0))
++ BUG();
++ if ((m->mpc_irqtype == mp_NMI) &&
++ (m->mpc_destapiclint != 1))
++ BUG();
++}
++
++#ifdef CONFIG_X86_NUMAQ
++static void __init MP_translation_info (struct mpc_config_translation *m)
++{
++ printk(KERN_INFO "Translation: record %d, type %d, quad %d, global %d, local %d\n", mpc_record, m->trans_type, m->trans_quad, m->trans_global, m->trans_local);
++
++ if (mpc_record >= MAX_MPC_ENTRY)
++ printk(KERN_ERR "MAX_MPC_ENTRY exceeded!\n");
++ else
++ translation_table[mpc_record] = m; /* stash this for later */
++ if (m->trans_quad < MAX_NUMNODES && !node_online(m->trans_quad))
++ node_set_online(m->trans_quad);
++}
++
++/*
++ * Read/parse the MPC oem tables
++ */
++
++static void __init smp_read_mpc_oem(struct mp_config_oemtable *oemtable, \
++ unsigned short oemsize)
++{
++ int count = sizeof (*oemtable); /* the header size */
++ unsigned char *oemptr = ((unsigned char *)oemtable)+count;
++
++ mpc_record = 0;
++ printk(KERN_INFO "Found an OEM MPC table at %8p - parsing it ... \n", oemtable);
++ if (memcmp(oemtable->oem_signature,MPC_OEM_SIGNATURE,4))
++ {
++ printk(KERN_WARNING "SMP mpc oemtable: bad signature [%c%c%c%c]!\n",
++ oemtable->oem_signature[0],
++ oemtable->oem_signature[1],
++ oemtable->oem_signature[2],
++ oemtable->oem_signature[3]);
++ return;
++ }
++ if (mpf_checksum((unsigned char *)oemtable,oemtable->oem_length))
++ {
++ printk(KERN_WARNING "SMP oem mptable: checksum error!\n");
++ return;
++ }
++ while (count < oemtable->oem_length) {
++ switch (*oemptr) {
++ case MP_TRANSLATION:
++ {
++ struct mpc_config_translation *m=
++ (struct mpc_config_translation *)oemptr;
++ MP_translation_info(m);
++ oemptr += sizeof(*m);
++ count += sizeof(*m);
++ ++mpc_record;
++ break;
++ }
++ default:
++ {
++ printk(KERN_WARNING "Unrecognised OEM table entry type! - %d\n", (int) *oemptr);
++ return;
++ }
++ }
++ }
++}
++
++static inline void mps_oem_check(struct mp_config_table *mpc, char *oem,
++ char *productid)
++{
++ if (strncmp(oem, "IBM NUMA", 8))
++ printk("Warning! May not be a NUMA-Q system!\n");
++ if (mpc->mpc_oemptr)
++ smp_read_mpc_oem((struct mp_config_oemtable *) mpc->mpc_oemptr,
++ mpc->mpc_oemsize);
++}
++#endif /* CONFIG_X86_NUMAQ */
++
++/*
++ * Read/parse the MPC
++ */
++
++static int __init smp_read_mpc(struct mp_config_table *mpc)
++{
++ char str[16];
++ char oem[10];
++ int count=sizeof(*mpc);
++ unsigned char *mpt=((unsigned char *)mpc)+count;
++
++ if (memcmp(mpc->mpc_signature,MPC_SIGNATURE,4)) {
++ printk(KERN_ERR "SMP mptable: bad signature [0x%x]!\n",
++ *(u32 *)mpc->mpc_signature);
++ return 0;
++ }
++ if (mpf_checksum((unsigned char *)mpc,mpc->mpc_length)) {
++ printk(KERN_ERR "SMP mptable: checksum error!\n");
++ return 0;
++ }
++ if (mpc->mpc_spec!=0x01 && mpc->mpc_spec!=0x04) {
++ printk(KERN_ERR "SMP mptable: bad table version (%d)!!\n",
++ mpc->mpc_spec);
++ return 0;
++ }
++ if (!mpc->mpc_lapic) {
++ printk(KERN_ERR "SMP mptable: null local APIC address!\n");
++ return 0;
++ }
++ memcpy(oem,mpc->mpc_oem,8);
++ oem[8]=0;
++ printk(KERN_INFO "OEM ID: %s ",oem);
++
++ memcpy(str,mpc->mpc_productid,12);
++ str[12]=0;
++ printk("Product ID: %s ",str);
++
++ mps_oem_check(mpc, oem, str);
++
++ printk("APIC at: 0x%lX\n",mpc->mpc_lapic);
++
++ /*
++ * Save the local APIC address (it might be non-default) -- but only
++ * if we're not using ACPI.
++ */
++ if (!acpi_lapic)
++ mp_lapic_addr = mpc->mpc_lapic;
++
++ /*
++ * Now process the configuration blocks.
++ */
++ mpc_record = 0;
++ while (count < mpc->mpc_length) {
++ switch(*mpt) {
++ case MP_PROCESSOR:
++ {
++ struct mpc_config_processor *m=
++ (struct mpc_config_processor *)mpt;
++ /* ACPI may have already provided this data */
++ if (!acpi_lapic)
++ MP_processor_info(m);
++ mpt += sizeof(*m);
++ count += sizeof(*m);
++ break;
++ }
++ case MP_BUS:
++ {
++ struct mpc_config_bus *m=
++ (struct mpc_config_bus *)mpt;
++ MP_bus_info(m);
++ mpt += sizeof(*m);
++ count += sizeof(*m);
++ break;
++ }
++ case MP_IOAPIC:
++ {
++ struct mpc_config_ioapic *m=
++ (struct mpc_config_ioapic *)mpt;
++ MP_ioapic_info(m);
++ mpt+=sizeof(*m);
++ count+=sizeof(*m);
++ break;
++ }
++ case MP_INTSRC:
++ {
++ struct mpc_config_intsrc *m=
++ (struct mpc_config_intsrc *)mpt;
++
++ MP_intsrc_info(m);
++ mpt+=sizeof(*m);
++ count+=sizeof(*m);
++ break;
++ }
++ case MP_LINTSRC:
++ {
++ struct mpc_config_lintsrc *m=
++ (struct mpc_config_lintsrc *)mpt;
++ MP_lintsrc_info(m);
++ mpt+=sizeof(*m);
++ count+=sizeof(*m);
++ break;
++ }
++ default:
++ {
++ count = mpc->mpc_length;
++ break;
++ }
++ }
++ ++mpc_record;
++ }
++ clustered_apic_check();
++ if (!num_processors)
++ printk(KERN_ERR "SMP mptable: no processors registered!\n");
++ return num_processors;
++}
++
++static int __init ELCR_trigger(unsigned int irq)
++{
++ unsigned int port;
++
++ port = 0x4d0 + (irq >> 3);
++ return (inb(port) >> (irq & 7)) & 1;
++}
++
++static void __init construct_default_ioirq_mptable(int mpc_default_type)
++{
++ struct mpc_config_intsrc intsrc;
++ int i;
++ int ELCR_fallback = 0;
++
++ intsrc.mpc_type = MP_INTSRC;
++ intsrc.mpc_irqflag = 0; /* conforming */
++ intsrc.mpc_srcbus = 0;
++ intsrc.mpc_dstapic = mp_ioapics[0].mpc_apicid;
++
++ intsrc.mpc_irqtype = mp_INT;
++
++ /*
++ * If true, we have an ISA/PCI system with no IRQ entries
++ * in the MP table. To prevent the PCI interrupts from being set up
++ * incorrectly, we try to use the ELCR. The sanity check to see if
++ * there is good ELCR data is very simple - IRQ0, 1, 2 and 13 can
++ * never be level sensitive, so we simply see if the ELCR agrees.
++ * If it does, we assume it's valid.
++ */
++ if (mpc_default_type == 5) {
++ printk(KERN_INFO "ISA/PCI bus type with no IRQ information... falling back to ELCR\n");
++
++ if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) || ELCR_trigger(13))
++ printk(KERN_WARNING "ELCR contains invalid data... not using ELCR\n");
++ else {
++ printk(KERN_INFO "Using ELCR to identify PCI interrupts\n");
++ ELCR_fallback = 1;
++ }
++ }
++
++ for (i = 0; i < 16; i++) {
++ switch (mpc_default_type) {
++ case 2:
++ if (i == 0 || i == 13)
++ continue; /* IRQ0 & IRQ13 not connected */
++ /* fall through */
++ default:
++ if (i == 2)
++ continue; /* IRQ2 is never connected */
++ }
++
++ if (ELCR_fallback) {
++ /*
++ * If the ELCR indicates a level-sensitive interrupt, we
++ * copy that information over to the MP table in the
++ * irqflag field (level sensitive, active high polarity).
++ */
++ if (ELCR_trigger(i))
++ intsrc.mpc_irqflag = 13;
++ else
++ intsrc.mpc_irqflag = 0;
++ }
++
++ intsrc.mpc_srcbusirq = i;
++ intsrc.mpc_dstirq = i ? i : 2; /* IRQ0 to INTIN2 */
++ MP_intsrc_info(&intsrc);
++ }
++
++ intsrc.mpc_irqtype = mp_ExtINT;
++ intsrc.mpc_srcbusirq = 0;
++ intsrc.mpc_dstirq = 0; /* 8259A to INTIN0 */
++ MP_intsrc_info(&intsrc);
++}
++
++static inline void __init construct_default_ISA_mptable(int mpc_default_type)
++{
++ struct mpc_config_processor processor;
++ struct mpc_config_bus bus;
++ struct mpc_config_ioapic ioapic;
++ struct mpc_config_lintsrc lintsrc;
++ int linttypes[2] = { mp_ExtINT, mp_NMI };
++ int i;
++
++ /*
++ * local APIC has default address
++ */
++ mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
++
++ /*
++ * 2 CPUs, numbered 0 & 1.
++ */
++ processor.mpc_type = MP_PROCESSOR;
++ /* Either an integrated APIC or a discrete 82489DX. */
++ processor.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
++ processor.mpc_cpuflag = CPU_ENABLED;
++ processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) |
++ (boot_cpu_data.x86_model << 4) |
++ boot_cpu_data.x86_mask;
++ processor.mpc_featureflag = boot_cpu_data.x86_capability[0];
++ processor.mpc_reserved[0] = 0;
++ processor.mpc_reserved[1] = 0;
++ for (i = 0; i < 2; i++) {
++ processor.mpc_apicid = i;
++ MP_processor_info(&processor);
++ }
++
++ bus.mpc_type = MP_BUS;
++ bus.mpc_busid = 0;
++ switch (mpc_default_type) {
++ default:
++ printk("???\n");
++ printk(KERN_ERR "Unknown standard configuration %d\n",
++ mpc_default_type);
++ /* fall through */
++ case 1:
++ case 5:
++ memcpy(bus.mpc_bustype, "ISA ", 6);
++ break;
++ case 2:
++ case 6:
++ case 3:
++ memcpy(bus.mpc_bustype, "EISA ", 6);
++ break;
++ case 4:
++ case 7:
++ memcpy(bus.mpc_bustype, "MCA ", 6);
++ }
++ MP_bus_info(&bus);
++ if (mpc_default_type > 4) {
++ bus.mpc_busid = 1;
++ memcpy(bus.mpc_bustype, "PCI ", 6);
++ MP_bus_info(&bus);
++ }
++
++ ioapic.mpc_type = MP_IOAPIC;
++ ioapic.mpc_apicid = 2;
++ ioapic.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
++ ioapic.mpc_flags = MPC_APIC_USABLE;
++ ioapic.mpc_apicaddr = 0xFEC00000;
++ MP_ioapic_info(&ioapic);
++
++ /*
++ * We set up most of the low 16 IO-APIC pins according to MPS rules.
++ */
++ construct_default_ioirq_mptable(mpc_default_type);
++
++ lintsrc.mpc_type = MP_LINTSRC;
++ lintsrc.mpc_irqflag = 0; /* conforming */
++ lintsrc.mpc_srcbusid = 0;
++ lintsrc.mpc_srcbusirq = 0;
++ lintsrc.mpc_destapic = MP_APIC_ALL;
++ for (i = 0; i < 2; i++) {
++ lintsrc.mpc_irqtype = linttypes[i];
++ lintsrc.mpc_destapiclint = i;
++ MP_lintsrc_info(&lintsrc);
++ }
++}
++
++static struct intel_mp_floating *mpf_found;
++
++/*
++ * Scan the memory blocks for an SMP configuration block.
++ */
++void __init get_smp_config (void)
++{
++ struct intel_mp_floating *mpf = mpf_found;
++
++ /*
++ * ACPI supports both logical (e.g. Hyper-Threading) and physical
++ * processors, where MPS only supports physical.
++ */
++ if (acpi_lapic && acpi_ioapic) {
++ printk(KERN_INFO "Using ACPI (MADT) for SMP configuration information\n");
++ return;
++ }
++ else if (acpi_lapic)
++ printk(KERN_INFO "Using ACPI for processor (LAPIC) configuration information\n");
++
++ printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n", mpf->mpf_specification);
++ if (mpf->mpf_feature2 & (1<<7)) {
++ printk(KERN_INFO " IMCR and PIC compatibility mode.\n");
++ pic_mode = 1;
++ } else {
++ printk(KERN_INFO " Virtual Wire compatibility mode.\n");
++ pic_mode = 0;
++ }
++
++ /*
++ * Now see if we need to read further.
++ */
++ if (mpf->mpf_feature1 != 0) {
++
++ printk(KERN_INFO "Default MP configuration #%d\n", mpf->mpf_feature1);
++ construct_default_ISA_mptable(mpf->mpf_feature1);
++
++ } else if (mpf->mpf_physptr) {
++
++ /*
++ * Read the physical hardware table. Anything here will
++ * override the defaults.
++ */
++ if (!smp_read_mpc(isa_bus_to_virt(mpf->mpf_physptr))) {
++ smp_found_config = 0;
++ printk(KERN_ERR "BIOS bug, MP table errors detected!...\n");
++ printk(KERN_ERR "... disabling SMP support. (tell your hw vendor)\n");
++ return;
++ }
++ /*
++ * If there are no explicit MP IRQ entries, then we are
++ * broken. We set up most of the low 16 IO-APIC pins to
++ * ISA defaults and hope it will work.
++ */
++ if (!mp_irq_entries) {
++ struct mpc_config_bus bus;
++
++ printk(KERN_ERR "BIOS bug, no explicit IRQ entries, using default mptable. (tell your hw vendor)\n");
++
++ bus.mpc_type = MP_BUS;
++ bus.mpc_busid = 0;
++ memcpy(bus.mpc_bustype, "ISA ", 6);
++ MP_bus_info(&bus);
++
++ construct_default_ioirq_mptable(0);
++ }
++
++ } else
++ BUG();
++
++ printk(KERN_INFO "Processors: %d\n", num_processors);
++ /*
++ * Only use the first configuration found.
++ */
++}
++
++static int __init smp_scan_config (unsigned long base, unsigned long length)
++{
++ unsigned long *bp = isa_bus_to_virt(base);
++ struct intel_mp_floating *mpf;
++
++ Dprintk("Scan SMP from %p for %ld bytes.\n", bp,length);
++ if (sizeof(*mpf) != 16)
++ printk("Error: MPF size\n");
++
++ while (length > 0) {
++ mpf = (struct intel_mp_floating *)bp;
++ if ((*bp == SMP_MAGIC_IDENT) &&
++ (mpf->mpf_length == 1) &&
++ !mpf_checksum((unsigned char *)bp, 16) &&
++ ((mpf->mpf_specification == 1)
++ || (mpf->mpf_specification == 4)) ) {
++
++ smp_found_config = 1;
++#ifndef CONFIG_XEN
++ printk(KERN_INFO "found SMP MP-table at %08lx\n",
++ virt_to_phys(mpf));
++ reserve_bootmem(virt_to_phys(mpf), PAGE_SIZE);
++ if (mpf->mpf_physptr) {
++ /*
++ * We cannot access to MPC table to compute
++ * table size yet, as only few megabytes from
++ * the bottom is mapped now.
++ * PC-9800's MPC table places on the very last
++ * of physical memory; so that simply reserving
++ * PAGE_SIZE from mpg->mpf_physptr yields BUG()
++ * in reserve_bootmem.
++ */
++ unsigned long size = PAGE_SIZE;
++ unsigned long end = max_low_pfn * PAGE_SIZE;
++ if (mpf->mpf_physptr + size > end)
++ size = end - mpf->mpf_physptr;
++ reserve_bootmem(mpf->mpf_physptr, size);
++ }
++#else
++ printk(KERN_INFO "found SMP MP-table at %08lx\n",
++ ((unsigned long)bp - (unsigned long)isa_bus_to_virt(base)) + base);
++#endif
++
++ mpf_found = mpf;
++ return 1;
++ }
++ bp += 4;
++ length -= 16;
++ }
++ return 0;
++}
++
++void __init find_smp_config (void)
++{
++#ifndef CONFIG_XEN
++ unsigned int address;
++#endif
++
++ /*
++ * FIXME: Linux assumes you have 640K of base ram..
++ * this continues the error...
++ *
++ * 1) Scan the bottom 1K for a signature
++ * 2) Scan the top 1K of base RAM
++ * 3) Scan the 64K of bios
++ */
++ if (smp_scan_config(0x0,0x400) ||
++ smp_scan_config(639*0x400,0x400) ||
++ smp_scan_config(0xF0000,0x10000))
++ return;
++ /*
++ * If it is an SMP machine we should know now, unless the
++ * configuration is in an EISA/MCA bus machine with an
++ * extended bios data area.
++ *
++ * there is a real-mode segmented pointer pointing to the
++ * 4K EBDA area at 0x40E, calculate and scan it here.
++ *
++ * NOTE! There are Linux loaders that will corrupt the EBDA
++ * area, and as such this kind of SMP config may be less
++ * trustworthy, simply because the SMP table may have been
++ * stomped on during early boot. These loaders are buggy and
++ * should be fixed.
++ *
++ * MP1.4 SPEC states to only scan first 1K of 4K EBDA.
++ */
++
++#ifndef CONFIG_XEN
++ address = get_bios_ebda();
++ if (address)
++ smp_scan_config(address, 0x400);
++#endif
++}
++
++int es7000_plat;
++
++/* --------------------------------------------------------------------------
++ ACPI-based MP Configuration
++ -------------------------------------------------------------------------- */
++
++#ifdef CONFIG_ACPI
++
++void __init mp_register_lapic_address (
++ u64 address)
++{
++#ifndef CONFIG_XEN
++ mp_lapic_addr = (unsigned long) address;
++
++ set_fixmap_nocache(FIX_APIC_BASE, mp_lapic_addr);
++
++ if (boot_cpu_physical_apicid == -1U)
++ boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
++
++ Dprintk("Boot CPU = %d\n", boot_cpu_physical_apicid);
++#endif
++}
++
++
++void __devinit mp_register_lapic (
++ u8 id,
++ u8 enabled)
++{
++ struct mpc_config_processor processor;
++ int boot_cpu = 0;
++
++ if (MAX_APICS - id <= 0) {
++ printk(KERN_WARNING "Processor #%d invalid (max %d)\n",
++ id, MAX_APICS);
++ return;
++ }
++
++ if (id == boot_cpu_physical_apicid)
++ boot_cpu = 1;
++
++#ifndef CONFIG_XEN
++ processor.mpc_type = MP_PROCESSOR;
++ processor.mpc_apicid = id;
++ processor.mpc_apicver = GET_APIC_VERSION(apic_read(APIC_LVR));
++ processor.mpc_cpuflag = (enabled ? CPU_ENABLED : 0);
++ processor.mpc_cpuflag |= (boot_cpu ? CPU_BOOTPROCESSOR : 0);
++ processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) |
++ (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask;
++ processor.mpc_featureflag = boot_cpu_data.x86_capability[0];
++ processor.mpc_reserved[0] = 0;
++ processor.mpc_reserved[1] = 0;
++#endif
++
++ MP_processor_info(&processor);
++}
++
++#ifdef CONFIG_X86_IO_APIC
++
++#define MP_ISA_BUS 0
++#define MP_MAX_IOAPIC_PIN 127
++
++static struct mp_ioapic_routing {
++ int apic_id;
++ int gsi_base;
++ int gsi_end;
++ u32 pin_programmed[4];
++} mp_ioapic_routing[MAX_IO_APICS];
++
++
++static int mp_find_ioapic (
++ int gsi)
++{
++ int i = 0;
++
++ /* Find the IOAPIC that manages this GSI. */
++ for (i = 0; i < nr_ioapics; i++) {
++ if ((gsi >= mp_ioapic_routing[i].gsi_base)
++ && (gsi <= mp_ioapic_routing[i].gsi_end))
++ return i;
++ }
++
++ printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi);
++
++ return -1;
++}
++
++
++void __init mp_register_ioapic (
++ u8 id,
++ u32 address,
++ u32 gsi_base)
++{
++ int idx = 0;
++ int tmpid;
++
++ if (nr_ioapics >= MAX_IO_APICS) {
++ printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded "
++ "(found %d)\n", MAX_IO_APICS, nr_ioapics);
++ panic("Recompile kernel with bigger MAX_IO_APICS!\n");
++ }
++ if (!address) {
++ printk(KERN_ERR "WARNING: Bogus (zero) I/O APIC address"
++ " found in MADT table, skipping!\n");
++ return;
++ }
++
++ idx = nr_ioapics++;
++
++ mp_ioapics[idx].mpc_type = MP_IOAPIC;
++ mp_ioapics[idx].mpc_flags = MPC_APIC_USABLE;
++ mp_ioapics[idx].mpc_apicaddr = address;
++
++#ifndef CONFIG_XEN
++ set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
++#endif
++ if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
++ && !APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
++ tmpid = io_apic_get_unique_id(idx, id);
++ else
++ tmpid = id;
++ if (tmpid == -1) {
++ nr_ioapics--;
++ return;
++ }
++ mp_ioapics[idx].mpc_apicid = tmpid;
++ mp_ioapics[idx].mpc_apicver = io_apic_get_version(idx);
++
++ /*
++ * Build basic GSI lookup table to facilitate gsi->io_apic lookups
++ * and to prevent reprogramming of IOAPIC pins (PCI GSIs).
++ */
++ mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].mpc_apicid;
++ mp_ioapic_routing[idx].gsi_base = gsi_base;
++ mp_ioapic_routing[idx].gsi_end = gsi_base +
++ io_apic_get_redir_entries(idx);
++
++ printk("IOAPIC[%d]: apic_id %d, version %d, address 0x%lx, "
++ "GSI %d-%d\n", idx, mp_ioapics[idx].mpc_apicid,
++ mp_ioapics[idx].mpc_apicver, mp_ioapics[idx].mpc_apicaddr,
++ mp_ioapic_routing[idx].gsi_base,
++ mp_ioapic_routing[idx].gsi_end);
++
++ return;
++}
++
++
++void __init mp_override_legacy_irq (
++ u8 bus_irq,
++ u8 polarity,
++ u8 trigger,
++ u32 gsi)
++{
++ struct mpc_config_intsrc intsrc;
++ int ioapic = -1;
++ int pin = -1;
++
++ /*
++ * Convert 'gsi' to 'ioapic.pin'.
++ */
++ ioapic = mp_find_ioapic(gsi);
++ if (ioapic < 0)
++ return;
++ pin = gsi - mp_ioapic_routing[ioapic].gsi_base;
++
++ /*
++ * TBD: This check is for faulty timer entries, where the override
++ * erroneously sets the trigger to level, resulting in a HUGE
++ * increase of timer interrupts!
++ */
++ if ((bus_irq == 0) && (trigger == 3))
++ trigger = 1;
++
++ intsrc.mpc_type = MP_INTSRC;
++ intsrc.mpc_irqtype = mp_INT;
++ intsrc.mpc_irqflag = (trigger << 2) | polarity;
++ intsrc.mpc_srcbus = MP_ISA_BUS;
++ intsrc.mpc_srcbusirq = bus_irq; /* IRQ */
++ intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid; /* APIC ID */
++ intsrc.mpc_dstirq = pin; /* INTIN# */
++
++ Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, %d-%d\n",
++ intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3,
++ (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus,
++ intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, intsrc.mpc_dstirq);
++
++ mp_irqs[mp_irq_entries] = intsrc;
++ if (++mp_irq_entries == MAX_IRQ_SOURCES)
++ panic("Max # of irq sources exceeded!\n");
++
++ return;
++}
++
++void __init mp_config_acpi_legacy_irqs (void)
++{
++ struct mpc_config_intsrc intsrc;
++ int i = 0;
++ int ioapic = -1;
++
++ /*
++ * Fabricate the legacy ISA bus (bus #31).
++ */
++ mp_bus_id_to_type[MP_ISA_BUS] = MP_BUS_ISA;
++ Dprintk("Bus #%d is ISA\n", MP_ISA_BUS);
++
++ /*
++ * Older generations of ES7000 have no legacy identity mappings
++ */
++ if (es7000_plat == 1)
++ return;
++
++ /*
++ * Locate the IOAPIC that manages the ISA IRQs (0-15).
++ */
++ ioapic = mp_find_ioapic(0);
++ if (ioapic < 0)
++ return;
++
++ intsrc.mpc_type = MP_INTSRC;
++ intsrc.mpc_irqflag = 0; /* Conforming */
++ intsrc.mpc_srcbus = MP_ISA_BUS;
++ intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid;
++
++ /*
++ * Use the default configuration for the IRQs 0-15. Unless
++ * overriden by (MADT) interrupt source override entries.
++ */
++ for (i = 0; i < 16; i++) {
++ int idx;
++
++ for (idx = 0; idx < mp_irq_entries; idx++) {
++ struct mpc_config_intsrc *irq = mp_irqs + idx;
++
++ /* Do we already have a mapping for this ISA IRQ? */
++ if (irq->mpc_srcbus == MP_ISA_BUS && irq->mpc_srcbusirq == i)
++ break;
++
++ /* Do we already have a mapping for this IOAPIC pin */
++ if ((irq->mpc_dstapic == intsrc.mpc_dstapic) &&
++ (irq->mpc_dstirq == i))
++ break;
++ }
++
++ if (idx != mp_irq_entries) {
++ printk(KERN_DEBUG "ACPI: IRQ%d used by override.\n", i);
++ continue; /* IRQ already used */
++ }
++
++ intsrc.mpc_irqtype = mp_INT;
++ intsrc.mpc_srcbusirq = i; /* Identity mapped */
++ intsrc.mpc_dstirq = i;
++
++ Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, "
++ "%d-%d\n", intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3,
++ (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus,
++ intsrc.mpc_srcbusirq, intsrc.mpc_dstapic,
++ intsrc.mpc_dstirq);
++
++ mp_irqs[mp_irq_entries] = intsrc;
++ if (++mp_irq_entries == MAX_IRQ_SOURCES)
++ panic("Max # of irq sources exceeded!\n");
++ }
++}
++
++#define MAX_GSI_NUM 4096
++
++int mp_register_gsi (u32 gsi, int triggering, int polarity)
++{
++ int ioapic = -1;
++ int ioapic_pin = 0;
++ int idx, bit = 0;
++ static int pci_irq = 16;
++ /*
++ * Mapping between Global System Interrups, which
++ * represent all possible interrupts, and IRQs
++ * assigned to actual devices.
++ */
++ static int gsi_to_irq[MAX_GSI_NUM];
++
++ /* Don't set up the ACPI SCI because it's already set up */
++ if (acpi_fadt.sci_int == gsi)
++ return gsi;
++
++ ioapic = mp_find_ioapic(gsi);
++ if (ioapic < 0) {
++ printk(KERN_WARNING "No IOAPIC for GSI %u\n", gsi);
++ return gsi;
++ }
++
++ ioapic_pin = gsi - mp_ioapic_routing[ioapic].gsi_base;
++
++ if (ioapic_renumber_irq)
++ gsi = ioapic_renumber_irq(ioapic, gsi);
++
++ /*
++ * Avoid pin reprogramming. PRTs typically include entries
++ * with redundant pin->gsi mappings (but unique PCI devices);
++ * we only program the IOAPIC on the first.
++ */
++ bit = ioapic_pin % 32;
++ idx = (ioapic_pin < 32) ? 0 : (ioapic_pin / 32);
++ if (idx > 3) {
++ printk(KERN_ERR "Invalid reference to IOAPIC pin "
++ "%d-%d\n", mp_ioapic_routing[ioapic].apic_id,
++ ioapic_pin);
++ return gsi;
++ }
++ if ((1<<bit) & mp_ioapic_routing[ioapic].pin_programmed[idx]) {
++ Dprintk(KERN_DEBUG "Pin %d-%d already programmed\n",
++ mp_ioapic_routing[ioapic].apic_id, ioapic_pin);
++ return gsi_to_irq[gsi];
++ }
++
++ mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit);
++
++ if (triggering == ACPI_LEVEL_SENSITIVE) {
++ /*
++ * For PCI devices assign IRQs in order, avoiding gaps
++ * due to unused I/O APIC pins.
++ */
++ int irq = gsi;
++ if (gsi < MAX_GSI_NUM) {
++ /*
++ * Retain the VIA chipset work-around (gsi > 15), but
++ * avoid a problem where the 8254 timer (IRQ0) is setup
++ * via an override (so it's not on pin 0 of the ioapic),
++ * and at the same time, the pin 0 interrupt is a PCI
++ * type. The gsi > 15 test could cause these two pins
++ * to be shared as IRQ0, and they are not shareable.
++ * So test for this condition, and if necessary, avoid
++ * the pin collision.
++ */
++ if (gsi > 15 || (gsi == 0 && !timer_uses_ioapic_pin_0))
++ gsi = pci_irq++;
++ /*
++ * Don't assign IRQ used by ACPI SCI
++ */
++ if (gsi == acpi_fadt.sci_int)
++ gsi = pci_irq++;
++ gsi_to_irq[irq] = gsi;
++ } else {
++ printk(KERN_ERR "GSI %u is too high\n", gsi);
++ return gsi;
++ }
++ }
++
++ io_apic_set_pci_routing(ioapic, ioapic_pin, gsi,
++ triggering == ACPI_EDGE_SENSITIVE ? 0 : 1,
++ polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
++ return gsi;
++}
++
++#endif /* CONFIG_X86_IO_APIC */
++#endif /* CONFIG_ACPI */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/arch/i386/kernel/pci-dma-xen.c 2007-08-27 14:02:10.000000000 -0400
+@@ -0,0 +1,366 @@
++/*
++ * Dynamic DMA mapping support.
++ *
++ * On i386 there is no hardware dynamic DMA address translation,
++ * so consistent alloc/free are merely page allocation/freeing.
++ * The rest of the dynamic DMA mapping interface is implemented
++ * in asm/pci.h.
++ */
++
++#include <linux/types.h>
++#include <linux/mm.h>
++#include <linux/string.h>
++#include <linux/pci.h>
++#include <linux/module.h>
++#include <linux/version.h>
++#include <asm/io.h>
++#include <xen/balloon.h>
++#include <asm/swiotlb.h>
++#include <asm/tlbflush.h>
++#include <asm-i386/mach-xen/asm/swiotlb.h>
++#include <asm/bug.h>
++
++#ifdef __x86_64__
++#include <asm/proto.h>
++
++int iommu_merge __read_mostly = 0;
++EXPORT_SYMBOL(iommu_merge);
++
++dma_addr_t bad_dma_address __read_mostly;
++EXPORT_SYMBOL(bad_dma_address);
++
++/* This tells the BIO block layer to assume merging. Default to off
++ because we cannot guarantee merging later. */
++int iommu_bio_merge __read_mostly = 0;
++EXPORT_SYMBOL(iommu_bio_merge);
++
++int force_iommu __read_mostly= 0;
++
++__init int iommu_setup(char *p)
++{
++ return 1;
++}
++
++void __init pci_iommu_alloc(void)
++{
++#ifdef CONFIG_SWIOTLB
++ pci_swiotlb_init();
++#endif
++}
++
++static int __init pci_iommu_init(void)
++{
++ no_iommu_init();
++ return 0;
++}
++
++/* Must execute after PCI subsystem */
++fs_initcall(pci_iommu_init);
++#endif
++
++struct dma_coherent_mem {
++ void *virt_base;
++ u32 device_base;
++ int size;
++ int flags;
++ unsigned long *bitmap;
++};
++
++#define IOMMU_BUG_ON(test) \
++do { \
++ if (unlikely(test)) { \
++ printk(KERN_ALERT "Fatal DMA error! " \
++ "Please use 'swiotlb=force'\n"); \
++ BUG(); \
++ } \
++} while (0)
++
++int
++dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents,
++ enum dma_data_direction direction)
++{
++ int i, rc;
++
++ if (direction == DMA_NONE)
++ BUG();
++ WARN_ON(nents == 0 || sg[0].length == 0);
++
++ if (swiotlb) {
++ rc = swiotlb_map_sg(hwdev, sg, nents, direction);
++ } else {
++ for (i = 0; i < nents; i++ ) {
++ sg[i].dma_address =
++ page_to_bus(sg[i].page) + sg[i].offset;
++ sg[i].dma_length = sg[i].length;
++ BUG_ON(!sg[i].page);
++ IOMMU_BUG_ON(address_needs_mapping(
++ hwdev, sg[i].dma_address));
++ }
++ rc = nents;
++ }
++
++ flush_write_buffers();
++ return rc;
++}
++EXPORT_SYMBOL(dma_map_sg);
++
++void
++dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
++ enum dma_data_direction direction)
++{
++ BUG_ON(direction == DMA_NONE);
++ if (swiotlb)
++ swiotlb_unmap_sg(hwdev, sg, nents, direction);
++}
++EXPORT_SYMBOL(dma_unmap_sg);
++
++#ifdef CONFIG_HIGHMEM
++dma_addr_t
++dma_map_page(struct device *dev, struct page *page, unsigned long offset,
++ size_t size, enum dma_data_direction direction)
++{
++ dma_addr_t dma_addr;
++
++ BUG_ON(direction == DMA_NONE);
++
++ if (swiotlb) {
++ dma_addr = swiotlb_map_page(
++ dev, page, offset, size, direction);
++ } else {
++ dma_addr = page_to_bus(page) + offset;
++ IOMMU_BUG_ON(address_needs_mapping(dev, dma_addr));
++ }
++
++ return dma_addr;
++}
++EXPORT_SYMBOL(dma_map_page);
++
++void
++dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
++ enum dma_data_direction direction)
++{
++ BUG_ON(direction == DMA_NONE);
++ if (swiotlb)
++ swiotlb_unmap_page(dev, dma_address, size, direction);
++}
++EXPORT_SYMBOL(dma_unmap_page);
++#endif /* CONFIG_HIGHMEM */
++
++int
++dma_mapping_error(dma_addr_t dma_addr)
++{
++ if (swiotlb)
++ return swiotlb_dma_mapping_error(dma_addr);
++ return 0;
++}
++EXPORT_SYMBOL(dma_mapping_error);
++
++int
++dma_supported(struct device *dev, u64 mask)
++{
++ if (swiotlb)
++ return swiotlb_dma_supported(dev, mask);
++ /*
++ * By default we'll BUG when an infeasible DMA is requested, and
++ * request swiotlb=force (see IOMMU_BUG_ON).
++ */
++ return 1;
++}
++EXPORT_SYMBOL(dma_supported);
++
++void *dma_alloc_coherent(struct device *dev, size_t size,
++ dma_addr_t *dma_handle, gfp_t gfp)
++{
++ void *ret;
++ struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
++ unsigned int order = get_order(size);
++ unsigned long vstart;
++ u64 mask;
++
++ /* ignore region specifiers */
++ gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
++
++ if (mem) {
++ int page = bitmap_find_free_region(mem->bitmap, mem->size,
++ order);
++ if (page >= 0) {
++ *dma_handle = mem->device_base + (page << PAGE_SHIFT);
++ ret = mem->virt_base + (page << PAGE_SHIFT);
++ memset(ret, 0, size);
++ return ret;
++ }
++ if (mem->flags & DMA_MEMORY_EXCLUSIVE)
++ return NULL;
++ }
++
++ if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
++ gfp |= GFP_DMA;
++
++ vstart = __get_free_pages(gfp, order);
++ ret = (void *)vstart;
++
++ if (dev != NULL && dev->coherent_dma_mask)
++ mask = dev->coherent_dma_mask;
++ else
++ mask = 0xffffffff;
++
++ if (ret != NULL) {
++ if (xen_create_contiguous_region(vstart, order,
++ fls64(mask)) != 0) {
++ free_pages(vstart, order);
++ return NULL;
++ }
++ memset(ret, 0, size);
++ *dma_handle = virt_to_bus(ret);
++ }
++ return ret;
++}
++EXPORT_SYMBOL(dma_alloc_coherent);
++
++void dma_free_coherent(struct device *dev, size_t size,
++ void *vaddr, dma_addr_t dma_handle)
++{
++ struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
++ int order = get_order(size);
++
++ if (mem && vaddr >= mem->virt_base && vaddr < (mem->virt_base + (mem->size << PAGE_SHIFT))) {
++ int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
++
++ bitmap_release_region(mem->bitmap, page, order);
++ } else {
++ xen_destroy_contiguous_region((unsigned long)vaddr, order);
++ free_pages((unsigned long)vaddr, order);
++ }
++}
++EXPORT_SYMBOL(dma_free_coherent);
++
++#ifdef ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
++int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
++ dma_addr_t device_addr, size_t size, int flags)
++{
++ void __iomem *mem_base;
++ int pages = size >> PAGE_SHIFT;
++ int bitmap_size = (pages + 31)/32;
++
++ if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
++ goto out;
++ if (!size)
++ goto out;
++ if (dev->dma_mem)
++ goto out;
++
++ /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
++
++ mem_base = ioremap(bus_addr, size);
++ if (!mem_base)
++ goto out;
++
++ dev->dma_mem = kmalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
++ if (!dev->dma_mem)
++ goto out;
++ memset(dev->dma_mem, 0, sizeof(struct dma_coherent_mem));
++ dev->dma_mem->bitmap = kmalloc(bitmap_size, GFP_KERNEL);
++ if (!dev->dma_mem->bitmap)
++ goto free1_out;
++ memset(dev->dma_mem->bitmap, 0, bitmap_size);
++
++ dev->dma_mem->virt_base = mem_base;
++ dev->dma_mem->device_base = device_addr;
++ dev->dma_mem->size = pages;
++ dev->dma_mem->flags = flags;
++
++ if (flags & DMA_MEMORY_MAP)
++ return DMA_MEMORY_MAP;
++
++ return DMA_MEMORY_IO;
++
++ free1_out:
++ kfree(dev->dma_mem->bitmap);
++ out:
++ return 0;
++}
++EXPORT_SYMBOL(dma_declare_coherent_memory);
++
++void dma_release_declared_memory(struct device *dev)
++{
++ struct dma_coherent_mem *mem = dev->dma_mem;
++
++ if(!mem)
++ return;
++ dev->dma_mem = NULL;
++ iounmap(mem->virt_base);
++ kfree(mem->bitmap);
++ kfree(mem);
++}
++EXPORT_SYMBOL(dma_release_declared_memory);
++
++void *dma_mark_declared_memory_occupied(struct device *dev,
++ dma_addr_t device_addr, size_t size)
++{
++ struct dma_coherent_mem *mem = dev->dma_mem;
++ int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1) >> PAGE_SHIFT;
++ int pos, err;
++
++ if (!mem)
++ return ERR_PTR(-EINVAL);
++
++ pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
++ err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages));
++ if (err != 0)
++ return ERR_PTR(err);
++ return mem->virt_base + (pos << PAGE_SHIFT);
++}
++EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
++#endif /* ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY */
++
++dma_addr_t
++dma_map_single(struct device *dev, void *ptr, size_t size,
++ enum dma_data_direction direction)
++{
++ dma_addr_t dma;
++
++ if (direction == DMA_NONE)
++ BUG();
++ WARN_ON(size == 0);
++
++ if (swiotlb) {
++ dma = swiotlb_map_single(dev, ptr, size, direction);
++ } else {
++ dma = virt_to_bus(ptr);
++ IOMMU_BUG_ON(range_straddles_page_boundary(ptr, size));
++ IOMMU_BUG_ON(address_needs_mapping(dev, dma));
++ }
++
++ flush_write_buffers();
++ return dma;
++}
++EXPORT_SYMBOL(dma_map_single);
++
++void
++dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
++ enum dma_data_direction direction)
++{
++ if (direction == DMA_NONE)
++ BUG();
++ if (swiotlb)
++ swiotlb_unmap_single(dev, dma_addr, size, direction);
++}
++EXPORT_SYMBOL(dma_unmap_single);
++
++void
++dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
++ enum dma_data_direction direction)
++{
++ if (swiotlb)
++ swiotlb_sync_single_for_cpu(dev, dma_handle, size, direction);
++}
++EXPORT_SYMBOL(dma_sync_single_for_cpu);
++
++void
++dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
++ enum dma_data_direction direction)
++{
++ if (swiotlb)
++ swiotlb_sync_single_for_device(dev, dma_handle, size, direction);
++}
++EXPORT_SYMBOL(dma_sync_single_for_device);
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/arch/i386/kernel/process-xen.c 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,853 @@
++/*
++ * linux/arch/i386/kernel/process.c
++ *
++ * Copyright (C) 1995 Linus Torvalds
++ *
++ * Pentium III FXSR, SSE support
++ * Gareth Hughes <gareth@valinux.com>, May 2000
++ */
++
++/*
++ * This file handles the architecture-dependent parts of process handling..
++ */
++
++#include <stdarg.h>
++
++#include <linux/cpu.h>
++#include <linux/errno.h>
++#include <linux/sched.h>
++#include <linux/fs.h>
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/elfcore.h>
++#include <linux/smp.h>
++#include <linux/smp_lock.h>
++#include <linux/stddef.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/user.h>
++#include <linux/a.out.h>
++#include <linux/interrupt.h>
++#include <linux/utsname.h>
++#include <linux/delay.h>
++#include <linux/reboot.h>
++#include <linux/init.h>
++#include <linux/mc146818rtc.h>
++#include <linux/module.h>
++#include <linux/kallsyms.h>
++#include <linux/ptrace.h>
++#include <linux/random.h>
++
++#include <asm/uaccess.h>
++#include <asm/pgtable.h>
++#include <asm/system.h>
++#include <asm/io.h>
++#include <asm/ldt.h>
++#include <asm/processor.h>
++#include <asm/i387.h>
++#include <asm/desc.h>
++#include <asm/vm86.h>
++#ifdef CONFIG_MATH_EMULATION
++#include <asm/math_emu.h>
++#endif
++
++#include <xen/interface/physdev.h>
++#include <xen/interface/vcpu.h>
++#include <xen/cpu_hotplug.h>
++
++#include <linux/err.h>
++
++#include <asm/tlbflush.h>
++#include <asm/cpu.h>
++
++asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
++
++static int hlt_counter;
++
++unsigned long boot_option_idle_override = 0;
++EXPORT_SYMBOL(boot_option_idle_override);
++
++/*
++ * Return saved PC of a blocked thread.
++ */
++unsigned long thread_saved_pc(struct task_struct *tsk)
++{
++ return ((unsigned long *)tsk->thread.esp)[3];
++}
++
++/*
++ * Powermanagement idle function, if any..
++ */
++void (*pm_idle)(void);
++EXPORT_SYMBOL(pm_idle);
++static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
++
++void disable_hlt(void)
++{
++ hlt_counter++;
++}
++
++EXPORT_SYMBOL(disable_hlt);
++
++void enable_hlt(void)
++{
++ hlt_counter--;
++}
++
++EXPORT_SYMBOL(enable_hlt);
++
++/*
++ * On SMP it's slightly faster (but much more power-consuming!)
++ * to poll the ->work.need_resched flag instead of waiting for the
++ * cross-CPU IPI to arrive. Use this option with caution.
++ */
++static void poll_idle (void)
++{
++ local_irq_enable();
++
++ asm volatile(
++ "2:"
++ "testl %0, %1;"
++ "rep; nop;"
++ "je 2b;"
++ : : "i"(_TIF_NEED_RESCHED), "m" (current_thread_info()->flags));
++}
++
++static void xen_idle(void)
++{
++ local_irq_disable();
++
++ if (need_resched())
++ local_irq_enable();
++ else {
++ current_thread_info()->status &= ~TS_POLLING;
++ smp_mb__after_clear_bit();
++ safe_halt();
++ current_thread_info()->status |= TS_POLLING;
++ }
++}
++#ifdef CONFIG_APM_MODULE
++EXPORT_SYMBOL(default_idle);
++#endif
++
++#ifdef CONFIG_HOTPLUG_CPU
++extern cpumask_t cpu_initialized;
++static inline void play_dead(void)
++{
++ idle_task_exit();
++ local_irq_disable();
++ cpu_clear(smp_processor_id(), cpu_initialized);
++ preempt_enable_no_resched();
++ HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
++ cpu_bringup();
++}
++#else
++static inline void play_dead(void)
++{
++ BUG();
++}
++#endif /* CONFIG_HOTPLUG_CPU */
++
++/*
++ * The idle thread. There's no useful work to be
++ * done, so just try to conserve power and have a
++ * low exit latency (ie sit in a loop waiting for
++ * somebody to say that they'd like to reschedule)
++ */
++void cpu_idle(void)
++{
++ int cpu = smp_processor_id();
++
++ current_thread_info()->status |= TS_POLLING;
++
++ /* endless idle loop with no priority at all */
++ while (1) {
++ while (!need_resched()) {
++ void (*idle)(void);
++
++ if (__get_cpu_var(cpu_idle_state))
++ __get_cpu_var(cpu_idle_state) = 0;
++
++ rmb();
++ idle = xen_idle; /* no alternatives */
++
++ if (cpu_is_offline(cpu))
++ play_dead();
++
++ __get_cpu_var(irq_stat).idle_timestamp = jiffies;
++ idle();
++ }
++ preempt_enable_no_resched();
++ schedule();
++ preempt_disable();
++ }
++}
++
++void cpu_idle_wait(void)
++{
++ unsigned int cpu, this_cpu = get_cpu();
++ cpumask_t map;
++
++ set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
++ put_cpu();
++
++ cpus_clear(map);
++ for_each_online_cpu(cpu) {
++ per_cpu(cpu_idle_state, cpu) = 1;
++ cpu_set(cpu, map);
++ }
++
++ __get_cpu_var(cpu_idle_state) = 0;
++
++ wmb();
++ do {
++ ssleep(1);
++ for_each_online_cpu(cpu) {
++ if (cpu_isset(cpu, map) && !per_cpu(cpu_idle_state, cpu))
++ cpu_clear(cpu, map);
++ }
++ cpus_and(map, map, cpu_online_map);
++ } while (!cpus_empty(map));
++}
++EXPORT_SYMBOL_GPL(cpu_idle_wait);
++
++void __devinit select_idle_routine(const struct cpuinfo_x86 *c)
++{
++}
++
++static int __init idle_setup (char *str)
++{
++ if (!strncmp(str, "poll", 4)) {
++ printk("using polling idle threads.\n");
++ pm_idle = poll_idle;
++ }
++
++ boot_option_idle_override = 1;
++ return 1;
++}
++
++__setup("idle=", idle_setup);
++
++void show_regs(struct pt_regs * regs)
++{
++ unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
++
++ printk("\n");
++ printk("Pid: %d, comm: %20s\n", current->pid, current->comm);
++ printk("EIP: %04x:[<%08lx>] CPU: %d\n",0xffff & regs->xcs,regs->eip, smp_processor_id());
++ print_symbol("EIP is at %s\n", regs->eip);
++
++ if (user_mode_vm(regs))
++ printk(" ESP: %04x:%08lx",0xffff & regs->xss,regs->esp);
++ printk(" EFLAGS: %08lx %s (%s %.*s)\n",
++ regs->eflags, print_tainted(), system_utsname.release,
++ (int)strcspn(system_utsname.version, " "),
++ system_utsname.version);
++ printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
++ regs->eax,regs->ebx,regs->ecx,regs->edx);
++ printk("ESI: %08lx EDI: %08lx EBP: %08lx",
++ regs->esi, regs->edi, regs->ebp);
++ printk(" DS: %04x ES: %04x\n",
++ 0xffff & regs->xds,0xffff & regs->xes);
++
++ cr0 = read_cr0();
++ cr2 = read_cr2();
++ cr3 = read_cr3();
++ cr4 = read_cr4_safe();
++ printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", cr0, cr2, cr3, cr4);
++ show_trace(NULL, regs, &regs->esp);
++}
++
++/*
++ * This gets run with %ebx containing the
++ * function to call, and %edx containing
++ * the "args".
++ */
++extern void kernel_thread_helper(void);
++__asm__(".section .text\n"
++ ".align 4\n"
++ "kernel_thread_helper:\n\t"
++ "movl %edx,%eax\n\t"
++ "pushl %edx\n\t"
++ "call *%ebx\n\t"
++ "pushl %eax\n\t"
++ "call do_exit\n"
++ ".previous");
++
++/*
++ * Create a kernel thread
++ */
++int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
++{
++ struct pt_regs regs;
++
++ memset(&regs, 0, sizeof(regs));
++
++ regs.ebx = (unsigned long) fn;
++ regs.edx = (unsigned long) arg;
++
++ regs.xds = __USER_DS;
++ regs.xes = __USER_DS;
++ regs.orig_eax = -1;
++ regs.eip = (unsigned long) kernel_thread_helper;
++ regs.xcs = GET_KERNEL_CS();
++ regs.eflags = X86_EFLAGS_IF | X86_EFLAGS_SF | X86_EFLAGS_PF | 0x2;
++
++ /* Ok, create the new process.. */
++ return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
++}
++EXPORT_SYMBOL(kernel_thread);
++
++/*
++ * Free current thread data structures etc..
++ */
++void exit_thread(void)
++{
++ /* The process may have allocated an io port bitmap... nuke it. */
++ if (unlikely(test_thread_flag(TIF_IO_BITMAP))) {
++ struct task_struct *tsk = current;
++ struct thread_struct *t = &tsk->thread;
++ struct physdev_set_iobitmap set_iobitmap;
++ memset(&set_iobitmap, 0, sizeof(set_iobitmap));
++ HYPERVISOR_physdev_op(PHYSDEVOP_set_iobitmap, &set_iobitmap);
++ kfree(t->io_bitmap_ptr);
++ t->io_bitmap_ptr = NULL;
++ clear_thread_flag(TIF_IO_BITMAP);
++ }
++}
++
++void flush_thread(void)
++{
++ struct task_struct *tsk = current;
++
++ memset(tsk->thread.debugreg, 0, sizeof(unsigned long)*8);
++ memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
++ clear_tsk_thread_flag(tsk, TIF_DEBUG);
++ /*
++ * Forget coprocessor state..
++ */
++ clear_fpu(tsk);
++ clear_used_math();
++}
++
++void release_thread(struct task_struct *dead_task)
++{
++ BUG_ON(dead_task->mm);
++ release_vm86_irqs(dead_task);
++}
++
++/*
++ * This gets called before we allocate a new thread and copy
++ * the current task into it.
++ */
++void prepare_to_copy(struct task_struct *tsk)
++{
++ unlazy_fpu(tsk);
++}
++
++int copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
++ unsigned long unused,
++ struct task_struct * p, struct pt_regs * regs)
++{
++ struct pt_regs * childregs;
++ struct task_struct *tsk;
++ int err;
++
++ childregs = task_pt_regs(p);
++ *childregs = *regs;
++ childregs->eax = 0;
++ childregs->esp = esp;
++
++ p->thread.esp = (unsigned long) childregs;
++ p->thread.esp0 = (unsigned long) (childregs+1);
++
++ p->thread.eip = (unsigned long) ret_from_fork;
++
++ savesegment(fs,p->thread.fs);
++ savesegment(gs,p->thread.gs);
++
++ tsk = current;
++ if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) {
++ p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
++ if (!p->thread.io_bitmap_ptr) {
++ p->thread.io_bitmap_max = 0;
++ return -ENOMEM;
++ }
++ memcpy(p->thread.io_bitmap_ptr, tsk->thread.io_bitmap_ptr,
++ IO_BITMAP_BYTES);
++ set_tsk_thread_flag(p, TIF_IO_BITMAP);
++ }
++
++ /*
++ * Set a new TLS for the child thread?
++ */
++ if (clone_flags & CLONE_SETTLS) {
++ struct desc_struct *desc;
++ struct user_desc info;
++ int idx;
++
++ err = -EFAULT;
++ if (copy_from_user(&info, (void __user *)childregs->esi, sizeof(info)))
++ goto out;
++ err = -EINVAL;
++ if (LDT_empty(&info))
++ goto out;
++
++ idx = info.entry_number;
++ if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
++ goto out;
++
++ desc = p->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
++ desc->a = LDT_entry_a(&info);
++ desc->b = LDT_entry_b(&info);
++ }
++
++ p->thread.iopl = current->thread.iopl;
++
++ err = 0;
++ out:
++ if (err && p->thread.io_bitmap_ptr) {
++ kfree(p->thread.io_bitmap_ptr);
++ p->thread.io_bitmap_max = 0;
++ }
++ return err;
++}
++
++/*
++ * fill in the user structure for a core dump..
++ */
++void dump_thread(struct pt_regs * regs, struct user * dump)
++{
++ int i;
++
++/* changed the size calculations - should hopefully work better. lbt */
++ dump->magic = CMAGIC;
++ dump->start_code = 0;
++ dump->start_stack = regs->esp & ~(PAGE_SIZE - 1);
++ dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT;
++ dump->u_dsize = ((unsigned long) (current->mm->brk + (PAGE_SIZE-1))) >> PAGE_SHIFT;
++ dump->u_dsize -= dump->u_tsize;
++ dump->u_ssize = 0;
++ for (i = 0; i < 8; i++)
++ dump->u_debugreg[i] = current->thread.debugreg[i];
++
++ if (dump->start_stack < TASK_SIZE)
++ dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> PAGE_SHIFT;
++
++ dump->regs.ebx = regs->ebx;
++ dump->regs.ecx = regs->ecx;
++ dump->regs.edx = regs->edx;
++ dump->regs.esi = regs->esi;
++ dump->regs.edi = regs->edi;
++ dump->regs.ebp = regs->ebp;
++ dump->regs.eax = regs->eax;
++ dump->regs.ds = regs->xds;
++ dump->regs.es = regs->xes;
++ savesegment(fs,dump->regs.fs);
++ savesegment(gs,dump->regs.gs);
++ dump->regs.orig_eax = regs->orig_eax;
++ dump->regs.eip = regs->eip;
++ dump->regs.cs = regs->xcs;
++ dump->regs.eflags = regs->eflags;
++ dump->regs.esp = regs->esp;
++ dump->regs.ss = regs->xss;
++
++ dump->u_fpvalid = dump_fpu (regs, &dump->i387);
++}
++EXPORT_SYMBOL(dump_thread);
++
++/*
++ * Capture the user space registers if the task is not running (in user space)
++ */
++int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
++{
++ struct pt_regs ptregs = *task_pt_regs(tsk);
++ ptregs.xcs &= 0xffff;
++ ptregs.xds &= 0xffff;
++ ptregs.xes &= 0xffff;
++ ptregs.xss &= 0xffff;
++
++ elf_core_copy_regs(regs, &ptregs);
++
++ return 1;
++}
++
++static noinline void __switch_to_xtra(struct task_struct *next_p)
++{
++ struct thread_struct *next;
++
++ next = &next_p->thread;
++
++ if (test_tsk_thread_flag(next_p, TIF_DEBUG)) {
++ set_debugreg(next->debugreg[0], 0);
++ set_debugreg(next->debugreg[1], 1);
++ set_debugreg(next->debugreg[2], 2);
++ set_debugreg(next->debugreg[3], 3);
++ /* no 4 and 5 */
++ set_debugreg(next->debugreg[6], 6);
++ set_debugreg(next->debugreg[7], 7);
++ }
++}
++
++/*
++ * This function selects if the context switch from prev to next
++ * has to tweak the TSC disable bit in the cr4.
++ */
++static inline void disable_tsc(struct task_struct *prev_p,
++ struct task_struct *next_p)
++{
++ struct thread_info *prev, *next;
++
++ /*
++ * gcc should eliminate the ->thread_info dereference if
++ * has_secure_computing returns 0 at compile time (SECCOMP=n).
++ */
++ prev = task_thread_info(prev_p);
++ next = task_thread_info(next_p);
++
++ if (has_secure_computing(prev) || has_secure_computing(next)) {
++ /* slow path here */
++ if (has_secure_computing(prev) &&
++ !has_secure_computing(next)) {
++ write_cr4(read_cr4() & ~X86_CR4_TSD);
++ } else if (!has_secure_computing(prev) &&
++ has_secure_computing(next))
++ write_cr4(read_cr4() | X86_CR4_TSD);
++ }
++}
++
++/*
++ * switch_to(x,yn) should switch tasks from x to y.
++ *
++ * We fsave/fwait so that an exception goes off at the right time
++ * (as a call from the fsave or fwait in effect) rather than to
++ * the wrong process. Lazy FP saving no longer makes any sense
++ * with modern CPU's, and this simplifies a lot of things (SMP
++ * and UP become the same).
++ *
++ * NOTE! We used to use the x86 hardware context switching. The
++ * reason for not using it any more becomes apparent when you
++ * try to recover gracefully from saved state that is no longer
++ * valid (stale segment register values in particular). With the
++ * hardware task-switch, there is no way to fix up bad state in
++ * a reasonable manner.
++ *
++ * The fact that Intel documents the hardware task-switching to
++ * be slow is a fairly red herring - this code is not noticeably
++ * faster. However, there _is_ some room for improvement here,
++ * so the performance issues may eventually be a valid point.
++ * More important, however, is the fact that this allows us much
++ * more flexibility.
++ *
++ * The return value (in %eax) will be the "prev" task after
++ * the task-switch, and shows up in ret_from_fork in entry.S,
++ * for example.
++ */
++struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
++{
++ struct thread_struct *prev = &prev_p->thread,
++ *next = &next_p->thread;
++ int cpu = smp_processor_id();
++#ifndef CONFIG_X86_NO_TSS
++ struct tss_struct *tss = &per_cpu(init_tss, cpu);
++#endif
++ struct physdev_set_iopl iopl_op;
++ struct physdev_set_iobitmap iobmp_op;
++ multicall_entry_t _mcl[8], *mcl = _mcl;
++
++ /* XEN NOTE: FS/GS saved in switch_mm(), not here. */
++
++ /*
++ * This is basically '__unlazy_fpu', except that we queue a
++ * multicall to indicate FPU task switch, rather than
++ * synchronously trapping to Xen.
++ */
++ if (prev_p->thread_info->status & TS_USEDFPU) {
++ __save_init_fpu(prev_p); /* _not_ save_init_fpu() */
++ mcl->op = __HYPERVISOR_fpu_taskswitch;
++ mcl->args[0] = 1;
++ mcl++;
++ }
++#if 0 /* lazy fpu sanity check */
++ else BUG_ON(!(read_cr0() & 8));
++#endif
++
++ /*
++ * Reload esp0.
++ * This is load_esp0(tss, next) with a multicall.
++ */
++ mcl->op = __HYPERVISOR_stack_switch;
++ mcl->args[0] = __KERNEL_DS;
++ mcl->args[1] = next->esp0;
++ mcl++;
++
++ /*
++ * Load the per-thread Thread-Local Storage descriptor.
++ * This is load_TLS(next, cpu) with multicalls.
++ */
++#define C(i) do { \
++ if (unlikely(next->tls_array[i].a != prev->tls_array[i].a || \
++ next->tls_array[i].b != prev->tls_array[i].b)) { \
++ mcl->op = __HYPERVISOR_update_descriptor; \
++ *(u64 *)&mcl->args[0] = virt_to_machine( \
++ &get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i]);\
++ *(u64 *)&mcl->args[2] = *(u64 *)&next->tls_array[i]; \
++ mcl++; \
++ } \
++} while (0)
++ C(0); C(1); C(2);
++#undef C
++
++ if (unlikely(prev->iopl != next->iopl)) {
++ iopl_op.iopl = (next->iopl == 0) ? 1 : (next->iopl >> 12) & 3;
++ mcl->op = __HYPERVISOR_physdev_op;
++ mcl->args[0] = PHYSDEVOP_set_iopl;
++ mcl->args[1] = (unsigned long)&iopl_op;
++ mcl++;
++ }
++
++ if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr)) {
++ set_xen_guest_handle(iobmp_op.bitmap,
++ (char *)next->io_bitmap_ptr);
++ iobmp_op.nr_ports = next->io_bitmap_ptr ? IO_BITMAP_BITS : 0;
++ mcl->op = __HYPERVISOR_physdev_op;
++ mcl->args[0] = PHYSDEVOP_set_iobitmap;
++ mcl->args[1] = (unsigned long)&iobmp_op;
++ mcl++;
++ }
++
++ (void)HYPERVISOR_multicall(_mcl, mcl - _mcl);
++
++ /*
++ * Restore %fs and %gs if needed.
++ *
++ * Glibc normally makes %fs be zero, and %gs is one of
++ * the TLS segments.
++ */
++ if (unlikely(next->fs))
++ loadsegment(fs, next->fs);
++
++ if (next->gs)
++ loadsegment(gs, next->gs);
++
++ /*
++ * Now maybe handle debug registers
++ */
++ if (unlikely(task_thread_info(next_p)->flags & _TIF_WORK_CTXSW))
++ __switch_to_xtra(next_p);
++
++ disable_tsc(prev_p, next_p);
++
++ return prev_p;
++}
++
++asmlinkage int sys_fork(struct pt_regs regs)
++{
++ return do_fork(SIGCHLD, regs.esp, &regs, 0, NULL, NULL);
++}
++
++asmlinkage int sys_clone(struct pt_regs regs)
++{
++ unsigned long clone_flags;
++ unsigned long newsp;
++ int __user *parent_tidptr, *child_tidptr;
++
++ clone_flags = regs.ebx;
++ newsp = regs.ecx;
++ parent_tidptr = (int __user *)regs.edx;
++ child_tidptr = (int __user *)regs.edi;
++ if (!newsp)
++ newsp = regs.esp;
++ return do_fork(clone_flags, newsp, &regs, 0, parent_tidptr, child_tidptr);
++}
++
++/*
++ * This is trivial, and on the face of it looks like it
++ * could equally well be done in user mode.
++ *
++ * Not so, for quite unobvious reasons - register pressure.
++ * In user mode vfork() cannot have a stack frame, and if
++ * done by calling the "clone()" system call directly, you
++ * do not have enough call-clobbered registers to hold all
++ * the information you need.
++ */
++asmlinkage int sys_vfork(struct pt_regs regs)
++{
++ return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.esp, &regs, 0, NULL, NULL);
++}
++
++/*
++ * sys_execve() executes a new program.
++ */
++asmlinkage int sys_execve(struct pt_regs regs)
++{
++ int error;
++ char * filename;
++
++ filename = getname((char __user *) regs.ebx);
++ error = PTR_ERR(filename);
++ if (IS_ERR(filename))
++ goto out;
++ error = do_execve(filename,
++ (char __user * __user *) regs.ecx,
++ (char __user * __user *) regs.edx,
++ &regs);
++ if (error == 0) {
++ task_lock(current);
++ current->ptrace &= ~PT_DTRACE;
++ task_unlock(current);
++ /* Make sure we don't return using sysenter.. */
++ set_thread_flag(TIF_IRET);
++ }
++ putname(filename);
++out:
++ return error;
++}
++
++#define top_esp (THREAD_SIZE - sizeof(unsigned long))
++#define top_ebp (THREAD_SIZE - 2*sizeof(unsigned long))
++
++unsigned long get_wchan(struct task_struct *p)
++{
++ unsigned long ebp, esp, eip;
++ unsigned long stack_page;
++ int count = 0;
++ if (!p || p == current || p->state == TASK_RUNNING)
++ return 0;
++ stack_page = (unsigned long)task_stack_page(p);
++ esp = p->thread.esp;
++ if (!stack_page || esp < stack_page || esp > top_esp+stack_page)
++ return 0;
++ /* include/asm-i386/system.h:switch_to() pushes ebp last. */
++ ebp = *(unsigned long *) esp;
++ do {
++ if (ebp < stack_page || ebp > top_ebp+stack_page)
++ return 0;
++ eip = *(unsigned long *) (ebp+4);
++ if (!in_sched_functions(eip))
++ return eip;
++ ebp = *(unsigned long *) ebp;
++ } while (count++ < 16);
++ return 0;
++}
++
++/*
++ * sys_alloc_thread_area: get a yet unused TLS descriptor index.
++ */
++static int get_free_idx(void)
++{
++ struct thread_struct *t = &current->thread;
++ int idx;
++
++ for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
++ if (desc_empty(t->tls_array + idx))
++ return idx + GDT_ENTRY_TLS_MIN;
++ return -ESRCH;
++}
++
++/*
++ * Set a given TLS descriptor:
++ */
++asmlinkage int sys_set_thread_area(struct user_desc __user *u_info)
++{
++ struct thread_struct *t = &current->thread;
++ struct user_desc info;
++ struct desc_struct *desc;
++ int cpu, idx;
++
++ if (copy_from_user(&info, u_info, sizeof(info)))
++ return -EFAULT;
++ idx = info.entry_number;
++
++ /*
++ * index -1 means the kernel should try to find and
++ * allocate an empty descriptor:
++ */
++ if (idx == -1) {
++ idx = get_free_idx();
++ if (idx < 0)
++ return idx;
++ if (put_user(idx, &u_info->entry_number))
++ return -EFAULT;
++ }
++
++ if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
++ return -EINVAL;
++
++ desc = t->tls_array + idx - GDT_ENTRY_TLS_MIN;
++
++ /*
++ * We must not get preempted while modifying the TLS.
++ */
++ cpu = get_cpu();
++
++ if (LDT_empty(&info)) {
++ desc->a = 0;
++ desc->b = 0;
++ } else {
++ desc->a = LDT_entry_a(&info);
++ desc->b = LDT_entry_b(&info);
++ }
++ load_TLS(t, cpu);
++
++ put_cpu();
++
++ return 0;
++}
++
++/*
++ * Get the current Thread-Local Storage area:
++ */
++
++#define GET_BASE(desc) ( \
++ (((desc)->a >> 16) & 0x0000ffff) | \
++ (((desc)->b << 16) & 0x00ff0000) | \
++ ( (desc)->b & 0xff000000) )
++
++#define GET_LIMIT(desc) ( \
++ ((desc)->a & 0x0ffff) | \
++ ((desc)->b & 0xf0000) )
++
++#define GET_32BIT(desc) (((desc)->b >> 22) & 1)
++#define GET_CONTENTS(desc) (((desc)->b >> 10) & 3)
++#define GET_WRITABLE(desc) (((desc)->b >> 9) & 1)
++#define GET_LIMIT_PAGES(desc) (((desc)->b >> 23) & 1)
++#define GET_PRESENT(desc) (((desc)->b >> 15) & 1)
++#define GET_USEABLE(desc) (((desc)->b >> 20) & 1)
++
++asmlinkage int sys_get_thread_area(struct user_desc __user *u_info)
++{
++ struct user_desc info;
++ struct desc_struct *desc;
++ int idx;
++
++ if (get_user(idx, &u_info->entry_number))
++ return -EFAULT;
++ if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
++ return -EINVAL;
++
++ memset(&info, 0, sizeof(info));
++
++ desc = current->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
++
++ info.entry_number = idx;
++ info.base_addr = GET_BASE(desc);
++ info.limit = GET_LIMIT(desc);
++ info.seg_32bit = GET_32BIT(desc);
++ info.contents = GET_CONTENTS(desc);
++ info.read_exec_only = !GET_WRITABLE(desc);
++ info.limit_in_pages = GET_LIMIT_PAGES(desc);
++ info.seg_not_present = !GET_PRESENT(desc);
++ info.useable = GET_USEABLE(desc);
++
++ if (copy_to_user(u_info, &info, sizeof(info)))
++ return -EFAULT;
++ return 0;
++}
++
++unsigned long arch_align_stack(unsigned long sp)
++{
++ if (randomize_va_space)
++ sp -= get_random_int() % 8192;
++ return sp & ~0xf;
++}
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/arch/i386/kernel/quirks-xen.c 2007-08-27 14:02:01.000000000 -0400
+@@ -0,0 +1,47 @@
++/*
++ * This file contains work-arounds for x86 and x86_64 platform bugs.
++ */
++#include <linux/pci.h>
++#include <linux/irq.h>
++
++#if defined(CONFIG_X86_IO_APIC) && (defined(CONFIG_SMP) || defined(CONFIG_XEN)) && defined(CONFIG_PCI)
++
++static void __devinit quirk_intel_irqbalance(struct pci_dev *dev)
++{
++ u8 config, rev;
++ u32 word;
++
++ /* BIOS may enable hardware IRQ balancing for
++ * E7520/E7320/E7525(revision ID 0x9 and below)
++ * based platforms.
++ * Disable SW irqbalance/affinity on those platforms.
++ */
++ pci_read_config_byte(dev, PCI_CLASS_REVISION, &rev);
++ if (rev > 0x9)
++ return;
++
++ printk(KERN_INFO "Intel E7520/7320/7525 detected.");
++
++ /* enable access to config space*/
++ pci_read_config_byte(dev, 0xf4, &config);
++ pci_write_config_byte(dev, 0xf4, config|0x2);
++
++ /* read xTPR register */
++ raw_pci_ops->read(0, 0, 0x40, 0x4c, 2, &word);
++
++ if (!(word & (1 << 13))) {
++ struct xen_platform_op op;
++ printk(KERN_INFO "Disabling irq balancing and affinity\n");
++ op.cmd = XENPF_platform_quirk;
++ op.u.platform_quirk.quirk_id = QUIRK_NOIRQBALANCING;
++ (void)HYPERVISOR_platform_op(&op);
++ }
++
++ /* put back the original value for config space*/
++ if (!(config & 0x2))
++ pci_write_config_byte(dev, 0xf4, config);
++}
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH, quirk_intel_irqbalance);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, quirk_intel_irqbalance);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, quirk_intel_irqbalance);
++#endif
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/arch/i386/kernel/setup-xen.c 2007-08-27 14:02:09.000000000 -0400
+@@ -0,0 +1,1871 @@
++/*
++ * linux/arch/i386/kernel/setup.c
++ *
++ * Copyright (C) 1995 Linus Torvalds
++ *
++ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
++ *
++ * Memory region support
++ * David Parsons <orc@pell.chi.il.us>, July-August 1999
++ *
++ * Added E820 sanitization routine (removes overlapping memory regions);
++ * Brian Moyle <bmoyle@mvista.com>, February 2001
++ *
++ * Moved CPU detection code to cpu/${cpu}.c
++ * Patrick Mochel <mochel@osdl.org>, March 2002
++ *
++ * Provisions for empty E820 memory regions (reported by certain BIOSes).
++ * Alex Achenbach <xela@slit.de>, December 2002.
++ *
++ */
++
++/*
++ * This file handles the architecture-dependent parts of initialization
++ */
++
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/mmzone.h>
++#include <linux/screen_info.h>
++#include <linux/ioport.h>
++#include <linux/acpi.h>
++#include <linux/apm_bios.h>
++#include <linux/initrd.h>
++#include <linux/bootmem.h>
++#include <linux/seq_file.h>
++#include <linux/platform_device.h>
++#include <linux/console.h>
++#include <linux/mca.h>
++#include <linux/root_dev.h>
++#include <linux/highmem.h>
++#include <linux/module.h>
++#include <linux/efi.h>
++#include <linux/init.h>
++#include <linux/edd.h>
++#include <linux/nodemask.h>
++#include <linux/kernel.h>
++#include <linux/percpu.h>
++#include <linux/notifier.h>
++#include <linux/kexec.h>
++#include <linux/crash_dump.h>
++#include <linux/dmi.h>
++#include <linux/pfn.h>
++
++#include <video/edid.h>
++
++#include <asm/apic.h>
++#include <asm/e820.h>
++#include <asm/mpspec.h>
++#include <asm/setup.h>
++#include <asm/arch_hooks.h>
++#include <asm/sections.h>
++#include <asm/io_apic.h>
++#include <asm/ist.h>
++#include <asm/io.h>
++#include <asm/hypervisor.h>
++#include <xen/interface/physdev.h>
++#include <xen/interface/memory.h>
++#include <xen/features.h>
++#include <xen/xencons.h>
++#include <setup_arch.h>
++#include <bios_ebda.h>
++
++#ifdef CONFIG_XEN
++#include <xen/interface/kexec.h>
++#endif
++
++/* Forward Declaration. */
++void __init find_max_pfn(void);
++
++static int xen_panic_event(struct notifier_block *, unsigned long, void *);
++static struct notifier_block xen_panic_block = {
++ xen_panic_event, NULL, 0 /* try to go last */
++};
++
++extern char hypercall_page[PAGE_SIZE];
++EXPORT_SYMBOL(hypercall_page);
++
++int disable_pse __devinitdata = 0;
++
++/*
++ * Machine setup..
++ */
++
++#ifdef CONFIG_EFI
++int efi_enabled = 0;
++EXPORT_SYMBOL(efi_enabled);
++#endif
++
++/* cpu data as detected by the assembly code in head.S */
++struct cpuinfo_x86 new_cpu_data __initdata = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
++/* common cpu data for all cpus */
++struct cpuinfo_x86 boot_cpu_data __read_mostly = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
++EXPORT_SYMBOL(boot_cpu_data);
++
++unsigned long mmu_cr4_features;
++
++#ifdef CONFIG_ACPI
++ int acpi_disabled = 0;
++#else
++ int acpi_disabled = 1;
++#endif
++EXPORT_SYMBOL(acpi_disabled);
++
++#ifdef CONFIG_ACPI
++int __initdata acpi_force = 0;
++extern acpi_interrupt_flags acpi_sci_flags;
++#endif
++
++/* for MCA, but anyone else can use it if they want */
++unsigned int machine_id;
++#ifdef CONFIG_MCA
++EXPORT_SYMBOL(machine_id);
++#endif
++unsigned int machine_submodel_id;
++unsigned int BIOS_revision;
++unsigned int mca_pentium_flag;
++
++/* For PCI or other memory-mapped resources */
++unsigned long pci_mem_start = 0x10000000;
++#ifdef CONFIG_PCI
++EXPORT_SYMBOL(pci_mem_start);
++#endif
++
++/* Boot loader ID as an integer, for the benefit of proc_dointvec */
++int bootloader_type;
++
++/* user-defined highmem size */
++static unsigned int highmem_pages = -1;
++
++/*
++ * Setup options
++ */
++struct drive_info_struct { char dummy[32]; } drive_info;
++#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_HD) || \
++ defined(CONFIG_BLK_DEV_IDE_MODULE) || defined(CONFIG_BLK_DEV_HD_MODULE)
++EXPORT_SYMBOL(drive_info);
++#endif
++struct screen_info screen_info;
++EXPORT_SYMBOL(screen_info);
++struct apm_info apm_info;
++EXPORT_SYMBOL(apm_info);
++struct sys_desc_table_struct {
++ unsigned short length;
++ unsigned char table[0];
++};
++struct edid_info edid_info;
++EXPORT_SYMBOL_GPL(edid_info);
++struct ist_info ist_info;
++#if defined(CONFIG_X86_SPEEDSTEP_SMI) || \
++ defined(CONFIG_X86_SPEEDSTEP_SMI_MODULE)
++EXPORT_SYMBOL(ist_info);
++#endif
++struct e820map e820;
++#ifdef CONFIG_XEN
++struct e820map machine_e820;
++#endif
++
++extern void early_cpu_init(void);
++extern void generic_apic_probe(char *);
++extern int root_mountflags;
++
++unsigned long saved_videomode;
++
++#define RAMDISK_IMAGE_START_MASK 0x07FF
++#define RAMDISK_PROMPT_FLAG 0x8000
++#define RAMDISK_LOAD_FLAG 0x4000
++
++static char command_line[COMMAND_LINE_SIZE];
++
++unsigned char __initdata boot_params[PARAM_SIZE];
++
++static struct resource data_resource = {
++ .name = "Kernel data",
++ .start = 0,
++ .end = 0,
++ .flags = IORESOURCE_BUSY | IORESOURCE_MEM
++};
++
++static struct resource code_resource = {
++ .name = "Kernel code",
++ .start = 0,
++ .end = 0,
++ .flags = IORESOURCE_BUSY | IORESOURCE_MEM
++};
++
++static struct resource system_rom_resource = {
++ .name = "System ROM",
++ .start = 0xf0000,
++ .end = 0xfffff,
++ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
++};
++
++static struct resource extension_rom_resource = {
++ .name = "Extension ROM",
++ .start = 0xe0000,
++ .end = 0xeffff,
++ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
++};
++
++static struct resource adapter_rom_resources[] = { {
++ .name = "Adapter ROM",
++ .start = 0xc8000,
++ .end = 0,
++ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
++}, {
++ .name = "Adapter ROM",
++ .start = 0,
++ .end = 0,
++ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
++}, {
++ .name = "Adapter ROM",
++ .start = 0,
++ .end = 0,
++ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
++}, {
++ .name = "Adapter ROM",
++ .start = 0,
++ .end = 0,
++ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
++}, {
++ .name = "Adapter ROM",
++ .start = 0,
++ .end = 0,
++ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
++}, {
++ .name = "Adapter ROM",
++ .start = 0,
++ .end = 0,
++ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
++} };
++
++#define ADAPTER_ROM_RESOURCES \
++ (sizeof adapter_rom_resources / sizeof adapter_rom_resources[0])
++
++static struct resource video_rom_resource = {
++ .name = "Video ROM",
++ .start = 0xc0000,
++ .end = 0xc7fff,
++ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
++};
++
++static struct resource video_ram_resource = {
++ .name = "Video RAM area",
++ .start = 0xa0000,
++ .end = 0xbffff,
++ .flags = IORESOURCE_BUSY | IORESOURCE_MEM
++};
++
++static struct resource standard_io_resources[] = { {
++ .name = "dma1",
++ .start = 0x0000,
++ .end = 0x001f,
++ .flags = IORESOURCE_BUSY | IORESOURCE_IO
++}, {
++ .name = "pic1",
++ .start = 0x0020,
++ .end = 0x0021,
++ .flags = IORESOURCE_BUSY | IORESOURCE_IO
++}, {
++ .name = "timer0",
++ .start = 0x0040,
++ .end = 0x0043,
++ .flags = IORESOURCE_BUSY | IORESOURCE_IO
++}, {
++ .name = "timer1",
++ .start = 0x0050,
++ .end = 0x0053,
++ .flags = IORESOURCE_BUSY | IORESOURCE_IO
++}, {
++ .name = "keyboard",
++ .start = 0x0060,
++ .end = 0x006f,
++ .flags = IORESOURCE_BUSY | IORESOURCE_IO
++}, {
++ .name = "dma page reg",
++ .start = 0x0080,
++ .end = 0x008f,
++ .flags = IORESOURCE_BUSY | IORESOURCE_IO
++}, {
++ .name = "pic2",
++ .start = 0x00a0,
++ .end = 0x00a1,
++ .flags = IORESOURCE_BUSY | IORESOURCE_IO
++}, {
++ .name = "dma2",
++ .start = 0x00c0,
++ .end = 0x00df,
++ .flags = IORESOURCE_BUSY | IORESOURCE_IO
++}, {
++ .name = "fpu",
++ .start = 0x00f0,
++ .end = 0x00ff,
++ .flags = IORESOURCE_BUSY | IORESOURCE_IO
++} };
++
++#define STANDARD_IO_RESOURCES \
++ (sizeof standard_io_resources / sizeof standard_io_resources[0])
++
++#define romsignature(x) (*(unsigned short *)(x) == 0xaa55)
++
++static int __init romchecksum(unsigned char *rom, unsigned long length)
++{
++ unsigned char *p, sum = 0;
++
++ for (p = rom; p < rom + length; p++)
++ sum += *p;
++ return sum == 0;
++}
++
++static void __init probe_roms(void)
++{
++ unsigned long start, length, upper;
++ unsigned char *rom;
++ int i;
++
++#ifdef CONFIG_XEN
++ /* Nothing to do if not running in dom0. */
++ if (!is_initial_xendomain())
++ return;
++#endif
++
++ /* video rom */
++ upper = adapter_rom_resources[0].start;
++ for (start = video_rom_resource.start; start < upper; start += 2048) {
++ rom = isa_bus_to_virt(start);
++ if (!romsignature(rom))
++ continue;
++
++ video_rom_resource.start = start;
++
++ /* 0 < length <= 0x7f * 512, historically */
++ length = rom[2] * 512;
++
++ /* if checksum okay, trust length byte */
++ if (length && romchecksum(rom, length))
++ video_rom_resource.end = start + length - 1;
++
++ request_resource(&iomem_resource, &video_rom_resource);
++ break;
++ }
++
++ start = (video_rom_resource.end + 1 + 2047) & ~2047UL;
++ if (start < upper)
++ start = upper;
++
++ /* system rom */
++ request_resource(&iomem_resource, &system_rom_resource);
++ upper = system_rom_resource.start;
++
++ /* check for extension rom (ignore length byte!) */
++ rom = isa_bus_to_virt(extension_rom_resource.start);
++ if (romsignature(rom)) {
++ length = extension_rom_resource.end - extension_rom_resource.start + 1;
++ if (romchecksum(rom, length)) {
++ request_resource(&iomem_resource, &extension_rom_resource);
++ upper = extension_rom_resource.start;
++ }
++ }
++
++ /* check for adapter roms on 2k boundaries */
++ for (i = 0; i < ADAPTER_ROM_RESOURCES && start < upper; start += 2048) {
++ rom = isa_bus_to_virt(start);
++ if (!romsignature(rom))
++ continue;
++
++ /* 0 < length <= 0x7f * 512, historically */
++ length = rom[2] * 512;
++
++ /* but accept any length that fits if checksum okay */
++ if (!length || start + length > upper || !romchecksum(rom, length))
++ continue;
++
++ adapter_rom_resources[i].start = start;
++ adapter_rom_resources[i].end = start + length - 1;
++ request_resource(&iomem_resource, &adapter_rom_resources[i]);
++
++ start = adapter_rom_resources[i++].end & ~2047UL;
++ }
++}
++
++/*
++ * Point at the empty zero page to start with. We map the real shared_info
++ * page as soon as fixmap is up and running.
++ */
++shared_info_t *HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
++EXPORT_SYMBOL(HYPERVISOR_shared_info);
++
++unsigned long *phys_to_machine_mapping;
++unsigned long *pfn_to_mfn_frame_list_list, *pfn_to_mfn_frame_list[16];
++EXPORT_SYMBOL(phys_to_machine_mapping);
++
++/* Raw start-of-day parameters from the hypervisor. */
++start_info_t *xen_start_info;
++EXPORT_SYMBOL(xen_start_info);
++
++void __init add_memory_region(unsigned long long start,
++ unsigned long long size, int type)
++{
++ int x;
++
++ if (!efi_enabled) {
++ x = e820.nr_map;
++
++ if (x == E820MAX) {
++ printk(KERN_ERR "Ooops! Too many entries in the memory map!\n");
++ return;
++ }
++
++ e820.map[x].addr = start;
++ e820.map[x].size = size;
++ e820.map[x].type = type;
++ e820.nr_map++;
++ }
++} /* add_memory_region */
++
++static void __init limit_regions(unsigned long long size)
++{
++ unsigned long long current_addr = 0;
++ int i;
++
++ if (efi_enabled) {
++ efi_memory_desc_t *md;
++ void *p;
++
++ for (p = memmap.map, i = 0; p < memmap.map_end;
++ p += memmap.desc_size, i++) {
++ md = p;
++ current_addr = md->phys_addr + (md->num_pages << 12);
++ if (md->type == EFI_CONVENTIONAL_MEMORY) {
++ if (current_addr >= size) {
++ md->num_pages -=
++ (((current_addr-size) + PAGE_SIZE-1) >> PAGE_SHIFT);
++ memmap.nr_map = i + 1;
++ return;
++ }
++ }
++ }
++ }
++ for (i = 0; i < e820.nr_map; i++) {
++ current_addr = e820.map[i].addr + e820.map[i].size;
++ if (current_addr < size)
++ continue;
++
++ if (e820.map[i].type != E820_RAM)
++ continue;
++
++ if (e820.map[i].addr >= size) {
++ /*
++ * This region starts past the end of the
++ * requested size, skip it completely.
++ */
++ e820.nr_map = i;
++ } else {
++ e820.nr_map = i + 1;
++ e820.map[i].size -= current_addr - size;
++ }
++ return;
++ }
++#ifdef CONFIG_XEN
++ if (i==e820.nr_map && current_addr < size) {
++ /*
++ * The e820 map finished before our requested size so
++ * extend the final entry to the requested address.
++ */
++ --i;
++ if (e820.map[i].type == E820_RAM)
++ e820.map[i].size -= current_addr - size;
++ else
++ add_memory_region(current_addr, size - current_addr, E820_RAM);
++ }
++#endif
++}
++
++#define E820_DEBUG 1
++
++static void __init print_memory_map(char *who)
++{
++ int i;
++
++ for (i = 0; i < e820.nr_map; i++) {
++ printk(" %s: %016Lx - %016Lx ", who,
++ e820.map[i].addr,
++ e820.map[i].addr + e820.map[i].size);
++ switch (e820.map[i].type) {
++ case E820_RAM: printk("(usable)\n");
++ break;
++ case E820_RESERVED:
++ printk("(reserved)\n");
++ break;
++ case E820_ACPI:
++ printk("(ACPI data)\n");
++ break;
++ case E820_NVS:
++ printk("(ACPI NVS)\n");
++ break;
++ default: printk("type %lu\n", e820.map[i].type);
++ break;
++ }
++ }
++}
++
++/*
++ * Sanitize the BIOS e820 map.
++ *
++ * Some e820 responses include overlapping entries. The following
++ * replaces the original e820 map with a new one, removing overlaps.
++ *
++ */
++struct change_member {
++ struct e820entry *pbios; /* pointer to original bios entry */
++ unsigned long long addr; /* address for this change point */
++};
++static struct change_member change_point_list[2*E820MAX] __initdata;
++static struct change_member *change_point[2*E820MAX] __initdata;
++static struct e820entry *overlap_list[E820MAX] __initdata;
++static struct e820entry new_bios[E820MAX] __initdata;
++
++int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map)
++{
++ struct change_member *change_tmp;
++ unsigned long current_type, last_type;
++ unsigned long long last_addr;
++ int chgidx, still_changing;
++ int overlap_entries;
++ int new_bios_entry;
++ int old_nr, new_nr, chg_nr;
++ int i;
++
++ /*
++ Visually we're performing the following (1,2,3,4 = memory types)...
++
++ Sample memory map (w/overlaps):
++ ____22__________________
++ ______________________4_
++ ____1111________________
++ _44_____________________
++ 11111111________________
++ ____________________33__
++ ___________44___________
++ __________33333_________
++ ______________22________
++ ___________________2222_
++ _________111111111______
++ _____________________11_
++ _________________4______
++
++ Sanitized equivalent (no overlap):
++ 1_______________________
++ _44_____________________
++ ___1____________________
++ ____22__________________
++ ______11________________
++ _________1______________
++ __________3_____________
++ ___________44___________
++ _____________33_________
++ _______________2________
++ ________________1_______
++ _________________4______
++ ___________________2____
++ ____________________33__
++ ______________________4_
++ */
++
++ /* if there's only one memory region, don't bother */
++ if (*pnr_map < 2)
++ return -1;
++
++ old_nr = *pnr_map;
++
++ /* bail out if we find any unreasonable addresses in bios map */
++ for (i=0; i<old_nr; i++)
++ if (biosmap[i].addr + biosmap[i].size < biosmap[i].addr)
++ return -1;
++
++ /* create pointers for initial change-point information (for sorting) */
++ for (i=0; i < 2*old_nr; i++)
++ change_point[i] = &change_point_list[i];
++
++ /* record all known change-points (starting and ending addresses),
++ omitting those that are for empty memory regions */
++ chgidx = 0;
++ for (i=0; i < old_nr; i++) {
++ if (biosmap[i].size != 0) {
++ change_point[chgidx]->addr = biosmap[i].addr;
++ change_point[chgidx++]->pbios = &biosmap[i];
++ change_point[chgidx]->addr = biosmap[i].addr + biosmap[i].size;
++ change_point[chgidx++]->pbios = &biosmap[i];
++ }
++ }
++ chg_nr = chgidx; /* true number of change-points */
++
++ /* sort change-point list by memory addresses (low -> high) */
++ still_changing = 1;
++ while (still_changing) {
++ still_changing = 0;
++ for (i=1; i < chg_nr; i++) {
++ /* if <current_addr> > <last_addr>, swap */
++ /* or, if current=<start_addr> & last=<end_addr>, swap */
++ if ((change_point[i]->addr < change_point[i-1]->addr) ||
++ ((change_point[i]->addr == change_point[i-1]->addr) &&
++ (change_point[i]->addr == change_point[i]->pbios->addr) &&
++ (change_point[i-1]->addr != change_point[i-1]->pbios->addr))
++ )
++ {
++ change_tmp = change_point[i];
++ change_point[i] = change_point[i-1];
++ change_point[i-1] = change_tmp;
++ still_changing=1;
++ }
++ }
++ }
++
++ /* create a new bios memory map, removing overlaps */
++ overlap_entries=0; /* number of entries in the overlap table */
++ new_bios_entry=0; /* index for creating new bios map entries */
++ last_type = 0; /* start with undefined memory type */
++ last_addr = 0; /* start with 0 as last starting address */
++ /* loop through change-points, determining affect on the new bios map */
++ for (chgidx=0; chgidx < chg_nr; chgidx++)
++ {
++ /* keep track of all overlapping bios entries */
++ if (change_point[chgidx]->addr == change_point[chgidx]->pbios->addr)
++ {
++ /* add map entry to overlap list (> 1 entry implies an overlap) */
++ overlap_list[overlap_entries++]=change_point[chgidx]->pbios;
++ }
++ else
++ {
++ /* remove entry from list (order independent, so swap with last) */
++ for (i=0; i<overlap_entries; i++)
++ {
++ if (overlap_list[i] == change_point[chgidx]->pbios)
++ overlap_list[i] = overlap_list[overlap_entries-1];
++ }
++ overlap_entries--;
++ }
++ /* if there are overlapping entries, decide which "type" to use */
++ /* (larger value takes precedence -- 1=usable, 2,3,4,4+=unusable) */
++ current_type = 0;
++ for (i=0; i<overlap_entries; i++)
++ if (overlap_list[i]->type > current_type)
++ current_type = overlap_list[i]->type;
++ /* continue building up new bios map based on this information */
++ if (current_type != last_type) {
++ if (last_type != 0) {
++ new_bios[new_bios_entry].size =
++ change_point[chgidx]->addr - last_addr;
++ /* move forward only if the new size was non-zero */
++ if (new_bios[new_bios_entry].size != 0)
++ if (++new_bios_entry >= E820MAX)
++ break; /* no more space left for new bios entries */
++ }
++ if (current_type != 0) {
++ new_bios[new_bios_entry].addr = change_point[chgidx]->addr;
++ new_bios[new_bios_entry].type = current_type;
++ last_addr=change_point[chgidx]->addr;
++ }
++ last_type = current_type;
++ }
++ }
++ new_nr = new_bios_entry; /* retain count for new bios entries */
++
++ /* copy new bios mapping into original location */
++ memcpy(biosmap, new_bios, new_nr*sizeof(struct e820entry));
++ *pnr_map = new_nr;
++
++ return 0;
++}
++
++/*
++ * Copy the BIOS e820 map into a safe place.
++ *
++ * Sanity-check it while we're at it..
++ *
++ * If we're lucky and live on a modern system, the setup code
++ * will have given us a memory map that we can use to properly
++ * set up memory. If we aren't, we'll fake a memory map.
++ *
++ * We check to see that the memory map contains at least 2 elements
++ * before we'll use it, because the detection code in setup.S may
++ * not be perfect and most every PC known to man has two memory
++ * regions: one from 0 to 640k, and one from 1mb up. (The IBM
++ * thinkpad 560x, for example, does not cooperate with the memory
++ * detection code.)
++ */
++int __init copy_e820_map(struct e820entry * biosmap, int nr_map)
++{
++#ifndef CONFIG_XEN
++ /* Only one memory region (or negative)? Ignore it */
++ if (nr_map < 2)
++ return -1;
++#else
++ BUG_ON(nr_map < 1);
++#endif
++
++ do {
++ unsigned long long start = biosmap->addr;
++ unsigned long long size = biosmap->size;
++ unsigned long long end = start + size;
++ unsigned long type = biosmap->type;
++
++ /* Overflow in 64 bits? Ignore the memory map. */
++ if (start > end)
++ return -1;
++
++#ifndef CONFIG_XEN
++ /*
++ * Some BIOSes claim RAM in the 640k - 1M region.
++ * Not right. Fix it up.
++ */
++ if (type == E820_RAM) {
++ if (start < 0x100000ULL && end > 0xA0000ULL) {
++ if (start < 0xA0000ULL)
++ add_memory_region(start, 0xA0000ULL-start, type);
++ if (end <= 0x100000ULL)
++ continue;
++ start = 0x100000ULL;
++ size = end - start;
++ }
++ }
++#endif
++ add_memory_region(start, size, type);
++ } while (biosmap++,--nr_map);
++ return 0;
++}
++
++#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
++struct edd edd;
++#ifdef CONFIG_EDD_MODULE
++EXPORT_SYMBOL(edd);
++#endif
++/**
++ * copy_edd() - Copy the BIOS EDD information
++ * from boot_params into a safe place.
++ *
++ */
++static inline void copy_edd(void)
++{
++ memcpy(edd.mbr_signature, EDD_MBR_SIGNATURE, sizeof(edd.mbr_signature));
++ memcpy(edd.edd_info, EDD_BUF, sizeof(edd.edd_info));
++ edd.mbr_signature_nr = EDD_MBR_SIG_NR;
++ edd.edd_info_nr = EDD_NR;
++}
++#else
++static inline void copy_edd(void)
++{
++}
++#endif
++
++static void __init parse_cmdline_early (char ** cmdline_p)
++{
++ char c = ' ', *to = command_line, *from = saved_command_line;
++ int len = 0, max_cmdline;
++ int userdef = 0;
++
++ if ((max_cmdline = MAX_GUEST_CMDLINE) > COMMAND_LINE_SIZE)
++ max_cmdline = COMMAND_LINE_SIZE;
++ memcpy(saved_command_line, xen_start_info->cmd_line, max_cmdline);
++ /* Save unparsed command line copy for /proc/cmdline */
++ saved_command_line[max_cmdline-1] = '\0';
++
++ for (;;) {
++ if (c != ' ')
++ goto next_char;
++ /*
++ * "mem=nopentium" disables the 4MB page tables.
++ * "mem=XXX[kKmM]" defines a memory region from HIGH_MEM
++ * to <mem>, overriding the bios size.
++ * "memmap=XXX[KkmM]@XXX[KkmM]" defines a memory region from
++ * <start> to <start>+<mem>, overriding the bios size.
++ *
++ * HPA tells me bootloaders need to parse mem=, so no new
++ * option should be mem= [also see Documentation/i386/boot.txt]
++ */
++ if (!memcmp(from, "mem=", 4)) {
++ if (to != command_line)
++ to--;
++ if (!memcmp(from+4, "nopentium", 9)) {
++ from += 9+4;
++ clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability);
++ disable_pse = 1;
++ } else {
++ /* If the user specifies memory size, we
++ * limit the BIOS-provided memory map to
++ * that size. exactmap can be used to specify
++ * the exact map. mem=number can be used to
++ * trim the existing memory map.
++ */
++ unsigned long long mem_size;
++
++ mem_size = memparse(from+4, &from);
++ limit_regions(mem_size);
++ userdef=1;
++ }
++ }
++
++ else if (!memcmp(from, "memmap=", 7)) {
++ if (to != command_line)
++ to--;
++ if (!memcmp(from+7, "exactmap", 8)) {
++#ifdef CONFIG_CRASH_DUMP
++ /* If we are doing a crash dump, we
++ * still need to know the real mem
++ * size before original memory map is
++ * reset.
++ */
++ find_max_pfn();
++ saved_max_pfn = max_pfn;
++#endif
++ from += 8+7;
++ e820.nr_map = 0;
++ userdef = 1;
++ } else {
++ /* If the user specifies memory size, we
++ * limit the BIOS-provided memory map to
++ * that size. exactmap can be used to specify
++ * the exact map. mem=number can be used to
++ * trim the existing memory map.
++ */
++ unsigned long long start_at, mem_size;
++
++ mem_size = memparse(from+7, &from);
++ if (*from == '@') {
++ start_at = memparse(from+1, &from);
++ add_memory_region(start_at, mem_size, E820_RAM);
++ } else if (*from == '#') {
++ start_at = memparse(from+1, &from);
++ add_memory_region(start_at, mem_size, E820_ACPI);
++ } else if (*from == '$') {
++ start_at = memparse(from+1, &from);
++ add_memory_region(start_at, mem_size, E820_RESERVED);
++ } else {
++ limit_regions(mem_size);
++ userdef=1;
++ }
++ }
++ }
++
++ else if (!memcmp(from, "noexec=", 7))
++ noexec_setup(from + 7);
++
++
++#ifdef CONFIG_X86_MPPARSE
++ /*
++ * If the BIOS enumerates physical processors before logical,
++ * maxcpus=N at enumeration-time can be used to disable HT.
++ */
++ else if (!memcmp(from, "maxcpus=", 8)) {
++ extern unsigned int maxcpus;
++
++ maxcpus = simple_strtoul(from + 8, NULL, 0);
++ }
++#endif
++
++#ifdef CONFIG_ACPI
++ /* "acpi=off" disables both ACPI table parsing and interpreter */
++ else if (!memcmp(from, "acpi=off", 8)) {
++ disable_acpi();
++ }
++
++ /* acpi=force to over-ride black-list */
++ else if (!memcmp(from, "acpi=force", 10)) {
++ acpi_force = 1;
++ acpi_ht = 1;
++ acpi_disabled = 0;
++ }
++
++ /* acpi=strict disables out-of-spec workarounds */
++ else if (!memcmp(from, "acpi=strict", 11)) {
++ acpi_strict = 1;
++ }
++
++ /* Limit ACPI just to boot-time to enable HT */
++ else if (!memcmp(from, "acpi=ht", 7)) {
++ if (!acpi_force)
++ disable_acpi();
++ acpi_ht = 1;
++ }
++
++ /* "pci=noacpi" disable ACPI IRQ routing and PCI scan */
++ else if (!memcmp(from, "pci=noacpi", 10)) {
++ acpi_disable_pci();
++ }
++ /* "acpi=noirq" disables ACPI interrupt routing */
++ else if (!memcmp(from, "acpi=noirq", 10)) {
++ acpi_noirq_set();
++ }
++
++ else if (!memcmp(from, "acpi_sci=edge", 13))
++ acpi_sci_flags.trigger = 1;
++
++ else if (!memcmp(from, "acpi_sci=level", 14))
++ acpi_sci_flags.trigger = 3;
++
++ else if (!memcmp(from, "acpi_sci=high", 13))
++ acpi_sci_flags.polarity = 1;
++
++ else if (!memcmp(from, "acpi_sci=low", 12))
++ acpi_sci_flags.polarity = 3;
++
++#ifdef CONFIG_X86_IO_APIC
++ else if (!memcmp(from, "acpi_skip_timer_override", 24))
++ acpi_skip_timer_override = 1;
++
++ if (!memcmp(from, "disable_timer_pin_1", 19))
++ disable_timer_pin_1 = 1;
++ if (!memcmp(from, "enable_timer_pin_1", 18))
++ disable_timer_pin_1 = -1;
++
++ /* disable IO-APIC */
++ else if (!memcmp(from, "noapic", 6))
++ disable_ioapic_setup();
++#endif /* CONFIG_X86_IO_APIC */
++#endif /* CONFIG_ACPI */
++
++#ifdef CONFIG_X86_LOCAL_APIC
++ /* enable local APIC */
++ else if (!memcmp(from, "lapic", 5))
++ lapic_enable();
++
++ /* disable local APIC */
++ else if (!memcmp(from, "nolapic", 6))
++ lapic_disable();
++#endif /* CONFIG_X86_LOCAL_APIC */
++
++#ifdef CONFIG_KEXEC
++ /* crashkernel=size@addr specifies the location to reserve for
++ * a crash kernel. By reserving this memory we guarantee
++ * that linux never set's it up as a DMA target.
++ * Useful for holding code to do something appropriate
++ * after a kernel panic.
++ */
++ else if (!memcmp(from, "crashkernel=", 12)) {
++#ifndef CONFIG_XEN
++ unsigned long size, base;
++ size = memparse(from+12, &from);
++ if (*from == '@') {
++ base = memparse(from+1, &from);
++ /* FIXME: Do I want a sanity check
++ * to validate the memory range?
++ */
++ crashk_res.start = base;
++ crashk_res.end = base + size - 1;
++ }
++#else
++ printk("Ignoring crashkernel command line, "
++ "parameter will be supplied by xen\n");
++#endif
++ }
++#endif
++#ifdef CONFIG_PROC_VMCORE
++ /* elfcorehdr= specifies the location of elf core header
++ * stored by the crashed kernel.
++ */
++ else if (!memcmp(from, "elfcorehdr=", 11))
++ elfcorehdr_addr = memparse(from+11, &from);
++#endif
++
++ /*
++ * highmem=size forces highmem to be exactly 'size' bytes.
++ * This works even on boxes that have no highmem otherwise.
++ * This also works to reduce highmem size on bigger boxes.
++ */
++ else if (!memcmp(from, "highmem=", 8))
++ highmem_pages = memparse(from+8, &from) >> PAGE_SHIFT;
++
++ /*
++ * vmalloc=size forces the vmalloc area to be exactly 'size'
++ * bytes. This can be used to increase (or decrease) the
++ * vmalloc area - the default is 128m.
++ */
++ else if (!memcmp(from, "vmalloc=", 8))
++ __VMALLOC_RESERVE = memparse(from+8, &from);
++
++ next_char:
++ c = *(from++);
++ if (!c)
++ break;
++ if (COMMAND_LINE_SIZE <= ++len)
++ break;
++ *(to++) = c;
++ }
++ *to = '\0';
++ *cmdline_p = command_line;
++ if (userdef) {
++ printk(KERN_INFO "user-defined physical RAM map:\n");
++ print_memory_map("user");
++ }
++}
++
++/*
++ * Callback for efi_memory_walk.
++ */
++static int __init
++efi_find_max_pfn(unsigned long start, unsigned long end, void *arg)
++{
++ unsigned long *max_pfn = arg, pfn;
++
++ if (start < end) {
++ pfn = PFN_UP(end -1);
++ if (pfn > *max_pfn)
++ *max_pfn = pfn;
++ }
++ return 0;
++}
++
++static int __init
++efi_memory_present_wrapper(unsigned long start, unsigned long end, void *arg)
++{
++ memory_present(0, start, end);
++ return 0;
++}
++
++ /*
++ * This function checks if the entire range <start,end> is mapped with type.
++ *
++ * Note: this function only works correct if the e820 table is sorted and
++ * not-overlapping, which is the case
++ */
++int __init
++e820_all_mapped(unsigned long s, unsigned long e, unsigned type)
++{
++ u64 start = s;
++ u64 end = e;
++ int i;
++
++#ifndef CONFIG_XEN
++ for (i = 0; i < e820.nr_map; i++) {
++ struct e820entry *ei = &e820.map[i];
++#else
++ if (!is_initial_xendomain())
++ return 0;
++ for (i = 0; i < machine_e820.nr_map; ++i) {
++ const struct e820entry *ei = &machine_e820.map[i];
++#endif
++ if (type && ei->type != type)
++ continue;
++ /* is the region (part) in overlap with the current region ?*/
++ if (ei->addr >= end || ei->addr + ei->size <= start)
++ continue;
++ /* if the region is at the beginning of <start,end> we move
++ * start to the end of the region since it's ok until there
++ */
++ if (ei->addr <= start)
++ start = ei->addr + ei->size;
++ /* if start is now at or beyond end, we're done, full
++ * coverage */
++ if (start >= end)
++ return 1; /* we're done */
++ }
++ return 0;
++}
++
++/*
++ * Find the highest page frame number we have available
++ */
++void __init find_max_pfn(void)
++{
++ int i;
++
++ max_pfn = 0;
++ if (efi_enabled) {
++ efi_memmap_walk(efi_find_max_pfn, &max_pfn);
++ efi_memmap_walk(efi_memory_present_wrapper, NULL);
++ return;
++ }
++
++ for (i = 0; i < e820.nr_map; i++) {
++ unsigned long start, end;
++ /* RAM? */
++ if (e820.map[i].type != E820_RAM)
++ continue;
++ start = PFN_UP(e820.map[i].addr);
++ end = PFN_DOWN(e820.map[i].addr + e820.map[i].size);
++ if (start >= end)
++ continue;
++ if (end > max_pfn)
++ max_pfn = end;
++ memory_present(0, start, end);
++ }
++}
++
++/*
++ * Determine low and high memory ranges:
++ */
++unsigned long __init find_max_low_pfn(void)
++{
++ unsigned long max_low_pfn;
++
++ max_low_pfn = max_pfn;
++ if (max_low_pfn > MAXMEM_PFN) {
++ if (highmem_pages == -1)
++ highmem_pages = max_pfn - MAXMEM_PFN;
++ if (highmem_pages + MAXMEM_PFN < max_pfn)
++ max_pfn = MAXMEM_PFN + highmem_pages;
++ if (highmem_pages + MAXMEM_PFN > max_pfn) {
++ printk("only %luMB highmem pages available, ignoring highmem size of %uMB.\n", pages_to_mb(max_pfn - MAXMEM_PFN), pages_to_mb(highmem_pages));
++ highmem_pages = 0;
++ }
++ max_low_pfn = MAXMEM_PFN;
++#ifndef CONFIG_HIGHMEM
++ /* Maximum memory usable is what is directly addressable */
++ printk(KERN_WARNING "Warning only %ldMB will be used.\n",
++ MAXMEM>>20);
++ if (max_pfn > MAX_NONPAE_PFN)
++ printk(KERN_WARNING "Use a PAE enabled kernel.\n");
++ else
++ printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
++ max_pfn = MAXMEM_PFN;
++#else /* !CONFIG_HIGHMEM */
++#ifndef CONFIG_X86_PAE
++ if (max_pfn > MAX_NONPAE_PFN) {
++ max_pfn = MAX_NONPAE_PFN;
++ printk(KERN_WARNING "Warning only 4GB will be used.\n");
++ printk(KERN_WARNING "Use a PAE enabled kernel.\n");
++ }
++#endif /* !CONFIG_X86_PAE */
++#endif /* !CONFIG_HIGHMEM */
++ } else {
++ if (highmem_pages == -1)
++ highmem_pages = 0;
++#ifdef CONFIG_HIGHMEM
++ if (highmem_pages >= max_pfn) {
++ printk(KERN_ERR "highmem size specified (%uMB) is bigger than pages available (%luMB)!.\n", pages_to_mb(highmem_pages), pages_to_mb(max_pfn));
++ highmem_pages = 0;
++ }
++ if (highmem_pages) {
++ if (max_low_pfn-highmem_pages < 64*1024*1024/PAGE_SIZE){
++ printk(KERN_ERR "highmem size %uMB results in smaller than 64MB lowmem, ignoring it.\n", pages_to_mb(highmem_pages));
++ highmem_pages = 0;
++ }
++ max_low_pfn -= highmem_pages;
++ }
++#else
++ if (highmem_pages)
++ printk(KERN_ERR "ignoring highmem size on non-highmem kernel!\n");
++#endif
++ }
++ return max_low_pfn;
++}
++
++/*
++ * Free all available memory for boot time allocation. Used
++ * as a callback function by efi_memory_walk()
++ */
++
++static int __init
++free_available_memory(unsigned long start, unsigned long end, void *arg)
++{
++ /* check max_low_pfn */
++ if (start >= (max_low_pfn << PAGE_SHIFT))
++ return 0;
++ if (end >= (max_low_pfn << PAGE_SHIFT))
++ end = max_low_pfn << PAGE_SHIFT;
++ if (start < end)
++ free_bootmem(start, end - start);
++
++ return 0;
++}
++/*
++ * Register fully available low RAM pages with the bootmem allocator.
++ */
++static void __init register_bootmem_low_pages(unsigned long max_low_pfn)
++{
++ int i;
++
++ if (efi_enabled) {
++ efi_memmap_walk(free_available_memory, NULL);
++ return;
++ }
++ for (i = 0; i < e820.nr_map; i++) {
++ unsigned long curr_pfn, last_pfn, size;
++ /*
++ * Reserve usable low memory
++ */
++ if (e820.map[i].type != E820_RAM)
++ continue;
++ /*
++ * We are rounding up the start address of usable memory:
++ */
++ curr_pfn = PFN_UP(e820.map[i].addr);
++ if (curr_pfn >= max_low_pfn)
++ continue;
++ /*
++ * ... and at the end of the usable range downwards:
++ */
++ last_pfn = PFN_DOWN(e820.map[i].addr + e820.map[i].size);
++
++#ifdef CONFIG_XEN
++ /*
++ * Truncate to the number of actual pages currently
++ * present.
++ */
++ if (last_pfn > xen_start_info->nr_pages)
++ last_pfn = xen_start_info->nr_pages;
++#endif
++
++ if (last_pfn > max_low_pfn)
++ last_pfn = max_low_pfn;
++
++ /*
++ * .. finally, did all the rounding and playing
++ * around just make the area go away?
++ */
++ if (last_pfn <= curr_pfn)
++ continue;
++
++ size = last_pfn - curr_pfn;
++ free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(size));
++ }
++}
++
++#ifndef CONFIG_XEN
++/*
++ * workaround for Dell systems that neglect to reserve EBDA
++ */
++static void __init reserve_ebda_region(void)
++{
++ unsigned int addr;
++ addr = get_bios_ebda();
++ if (addr)
++ reserve_bootmem(addr, PAGE_SIZE);
++}
++#endif
++
++#ifndef CONFIG_NEED_MULTIPLE_NODES
++void __init setup_bootmem_allocator(void);
++static unsigned long __init setup_memory(void)
++{
++ /*
++ * partially used pages are not usable - thus
++ * we are rounding upwards:
++ */
++ min_low_pfn = PFN_UP(__pa(xen_start_info->pt_base)) +
++ xen_start_info->nr_pt_frames;
++
++ find_max_pfn();
++
++ max_low_pfn = find_max_low_pfn();
++
++#ifdef CONFIG_HIGHMEM
++ highstart_pfn = highend_pfn = max_pfn;
++ if (max_pfn > max_low_pfn) {
++ highstart_pfn = max_low_pfn;
++ }
++ printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
++ pages_to_mb(highend_pfn - highstart_pfn));
++#endif
++ printk(KERN_NOTICE "%ldMB LOWMEM available.\n",
++ pages_to_mb(max_low_pfn));
++
++ setup_bootmem_allocator();
++
++ return max_low_pfn;
++}
++
++void __init zone_sizes_init(void)
++{
++ unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};
++ unsigned int max_dma, low;
++
++ /*
++ * XEN: Our notion of "DMA memory" is fake when running over Xen.
++ * We simply put all RAM in the DMA zone so that those drivers which
++ * needlessly specify GFP_DMA do not get starved of RAM unnecessarily.
++ * Those drivers that *do* require lowmem are screwed anyway when
++ * running over Xen!
++ */
++ max_dma = max_low_pfn;
++ low = max_low_pfn;
++
++ if (low < max_dma)
++ zones_size[ZONE_DMA] = low;
++ else {
++ zones_size[ZONE_DMA] = max_dma;
++ zones_size[ZONE_NORMAL] = low - max_dma;
++#ifdef CONFIG_HIGHMEM
++ zones_size[ZONE_HIGHMEM] = highend_pfn - low;
++#endif
++ }
++ free_area_init(zones_size);
++}
++#else
++extern unsigned long __init setup_memory(void);
++extern void zone_sizes_init(void);
++#endif /* !CONFIG_NEED_MULTIPLE_NODES */
++
++void __init setup_bootmem_allocator(void)
++{
++ unsigned long bootmap_size;
++ /*
++ * Initialize the boot-time allocator (with low memory only):
++ */
++ bootmap_size = init_bootmem(min_low_pfn, max_low_pfn);
++
++ register_bootmem_low_pages(max_low_pfn);
++
++ /*
++ * Reserve the bootmem bitmap itself as well. We do this in two
++ * steps (first step was init_bootmem()) because this catches
++ * the (very unlikely) case of us accidentally initializing the
++ * bootmem allocator with an invalid RAM area.
++ */
++ reserve_bootmem(__PHYSICAL_START, (PFN_PHYS(min_low_pfn) +
++ bootmap_size + PAGE_SIZE-1) - (__PHYSICAL_START));
++
++#ifndef CONFIG_XEN
++ /*
++ * reserve physical page 0 - it's a special BIOS page on many boxes,
++ * enabling clean reboots, SMP operation, laptop functions.
++ */
++ reserve_bootmem(0, PAGE_SIZE);
++
++ /* reserve EBDA region, it's a 4K region */
++ reserve_ebda_region();
++
++ /* could be an AMD 768MPX chipset. Reserve a page before VGA to prevent
++ PCI prefetch into it (errata #56). Usually the page is reserved anyways,
++ unless you have no PS/2 mouse plugged in. */
++ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
++ boot_cpu_data.x86 == 6)
++ reserve_bootmem(0xa0000 - 4096, 4096);
++
++#ifdef CONFIG_SMP
++ /*
++ * But first pinch a few for the stack/trampoline stuff
++ * FIXME: Don't need the extra page at 4K, but need to fix
++ * trampoline before removing it. (see the GDT stuff)
++ */
++ reserve_bootmem(PAGE_SIZE, PAGE_SIZE);
++#endif
++#ifdef CONFIG_ACPI_SLEEP
++ /*
++ * Reserve low memory region for sleep support.
++ */
++ acpi_reserve_bootmem();
++#endif
++#endif /* !CONFIG_XEN */
++
++#ifdef CONFIG_BLK_DEV_INITRD
++ if (xen_start_info->mod_start) {
++ if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) {
++ /*reserve_bootmem(INITRD_START, INITRD_SIZE);*/
++ initrd_start = INITRD_START + PAGE_OFFSET;
++ initrd_end = initrd_start+INITRD_SIZE;
++ initrd_below_start_ok = 1;
++ }
++ else {
++ printk(KERN_ERR "initrd extends beyond end of memory "
++ "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
++ INITRD_START + INITRD_SIZE,
++ max_low_pfn << PAGE_SHIFT);
++ initrd_start = 0;
++ }
++ }
++#endif
++#ifdef CONFIG_KEXEC
++#ifdef CONFIG_XEN
++ xen_machine_kexec_setup_resources();
++#else
++ if (crashk_res.start != crashk_res.end)
++ reserve_bootmem(crashk_res.start,
++ crashk_res.end - crashk_res.start + 1);
++#endif
++#endif
++
++ if (!xen_feature(XENFEAT_auto_translated_physmap))
++ phys_to_machine_mapping =
++ (unsigned long *)xen_start_info->mfn_list;
++}
++
++/*
++ * The node 0 pgdat is initialized before all of these because
++ * it's needed for bootmem. node>0 pgdats have their virtual
++ * space allocated before the pagetables are in place to access
++ * them, so they can't be cleared then.
++ *
++ * This should all compile down to nothing when NUMA is off.
++ */
++void __init remapped_pgdat_init(void)
++{
++ int nid;
++
++ for_each_online_node(nid) {
++ if (nid != 0)
++ memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
++ }
++}
++
++/*
++ * Request address space for all standard RAM and ROM resources
++ * and also for regions reported as reserved by the e820.
++ */
++static void __init
++legacy_init_iomem_resources(struct e820entry *e820, int nr_map,
++ struct resource *code_resource,
++ struct resource *data_resource)
++{
++ int i;
++
++ probe_roms();
++
++ for (i = 0; i < nr_map; i++) {
++ struct resource *res;
++#ifndef CONFIG_RESOURCES_64BIT
++ if (e820[i].addr + e820[i].size > 0x100000000ULL)
++ continue;
++#endif
++ res = kzalloc(sizeof(struct resource), GFP_ATOMIC);
++ switch (e820[i].type) {
++ case E820_RAM: res->name = "System RAM"; break;
++ case E820_ACPI: res->name = "ACPI Tables"; break;
++ case E820_NVS: res->name = "ACPI Non-volatile Storage"; break;
++ default: res->name = "reserved";
++ }
++ res->start = e820[i].addr;
++ res->end = res->start + e820[i].size - 1;
++ res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
++ if (request_resource(&iomem_resource, res)) {
++ kfree(res);
++ continue;
++ }
++ if (e820[i].type == E820_RAM) {
++ /*
++ * We don't know which RAM region contains kernel data,
++ * so we try it repeatedly and let the resource manager
++ * test it.
++ */
++#ifndef CONFIG_XEN
++ request_resource(res, code_resource);
++ request_resource(res, data_resource);
++#endif
++#ifdef CONFIG_KEXEC
++ if (crashk_res.start != crashk_res.end)
++ request_resource(res, &crashk_res);
++#ifdef CONFIG_XEN
++ xen_machine_kexec_register_resources(res);
++#endif
++#endif
++ }
++ }
++}
++
++/*
++ * Locate a unused range of the physical address space below 4G which
++ * can be used for PCI mappings.
++ */
++static void __init
++e820_setup_gap(struct e820entry *e820, int nr_map)
++{
++ unsigned long gapstart, gapsize, round;
++ unsigned long long last;
++ int i;
++
++ /*
++ * Search for the bigest gap in the low 32 bits of the e820
++ * memory space.
++ */
++ last = 0x100000000ull;
++ gapstart = 0x10000000;
++ gapsize = 0x400000;
++ i = nr_map;
++ while (--i >= 0) {
++ unsigned long long start = e820[i].addr;
++ unsigned long long end = start + e820[i].size;
++
++ /*
++ * Since "last" is at most 4GB, we know we'll
++ * fit in 32 bits if this condition is true
++ */
++ if (last > end) {
++ unsigned long gap = last - end;
++
++ if (gap > gapsize) {
++ gapsize = gap;
++ gapstart = end;
++ }
++ }
++ if (start < last)
++ last = start;
++ }
++
++ /*
++ * See how much we want to round up: start off with
++ * rounding to the next 1MB area.
++ */
++ round = 0x100000;
++ while ((gapsize >> 4) > round)
++ round += round;
++ /* Fun with two's complement */
++ pci_mem_start = (gapstart + round) & -round;
++
++ printk("Allocating PCI resources starting at %08lx (gap: %08lx:%08lx)\n",
++ pci_mem_start, gapstart, gapsize);
++}
++
++/*
++ * Request address space for all standard resources
++ *
++ * This is called just before pcibios_init(), which is also a
++ * subsys_initcall, but is linked in later (in arch/i386/pci/common.c).
++ */
++static int __init request_standard_resources(void)
++{
++ int i;
++
++ /* Nothing to do if not running in dom0. */
++ if (!is_initial_xendomain())
++ return 0;
++
++ printk("Setting up standard PCI resources\n");
++#ifdef CONFIG_XEN
++ legacy_init_iomem_resources(machine_e820.map, machine_e820.nr_map,
++ &code_resource, &data_resource);
++#else
++ if (efi_enabled)
++ efi_initialize_iomem_resources(&code_resource, &data_resource);
++ else
++ legacy_init_iomem_resources(e820.map, e820.nr_map,
++ &code_resource, &data_resource);
++#endif
++
++ /* EFI systems may still have VGA */
++ request_resource(&iomem_resource, &video_ram_resource);
++
++ /* request I/O space for devices used on all i[345]86 PCs */
++ for (i = 0; i < STANDARD_IO_RESOURCES; i++)
++ request_resource(&ioport_resource, &standard_io_resources[i]);
++ return 0;
++}
++
++subsys_initcall(request_standard_resources);
++
++static void __init register_memory(void)
++{
++#ifdef CONFIG_XEN
++ if (is_initial_xendomain()) {
++ struct xen_memory_map memmap;
++
++ memmap.nr_entries = E820MAX;
++ set_xen_guest_handle(memmap.buffer, machine_e820.map);
++
++ if (HYPERVISOR_memory_op(XENMEM_machine_memory_map, &memmap))
++ BUG();
++
++ machine_e820.nr_map = memmap.nr_entries;
++ e820_setup_gap(machine_e820.map, machine_e820.nr_map);
++ }
++ else
++#endif
++ e820_setup_gap(e820.map, e820.nr_map);
++}
++
++#ifdef CONFIG_MCA
++static void set_mca_bus(int x)
++{
++ MCA_bus = x;
++}
++#else
++static void set_mca_bus(int x) { }
++#endif
++
++/*
++ * Determine if we were loaded by an EFI loader. If so, then we have also been
++ * passed the efi memmap, systab, etc., so we should use these data structures
++ * for initialization. Note, the efi init code path is determined by the
++ * global efi_enabled. This allows the same kernel image to be used on existing
++ * systems (with a traditional BIOS) as well as on EFI systems.
++ */
++void __init setup_arch(char **cmdline_p)
++{
++ int i, j, k, fpp;
++ struct physdev_set_iopl set_iopl;
++ unsigned long max_low_pfn;
++
++ /* Force a quick death if the kernel panics (not domain 0). */
++ extern int panic_timeout;
++ if (!panic_timeout && !is_initial_xendomain())
++ panic_timeout = 1;
++
++ /* Register a call for panic conditions. */
++ atomic_notifier_chain_register(&panic_notifier_list, &xen_panic_block);
++
++ HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments);
++ HYPERVISOR_vm_assist(VMASST_CMD_enable,
++ VMASST_TYPE_writable_pagetables);
++
++ memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
++ early_cpu_init();
++#ifdef CONFIG_SMP
++ prefill_possible_map();
++#endif
++
++ /*
++ * FIXME: This isn't an official loader_type right
++ * now but does currently work with elilo.
++ * If we were configured as an EFI kernel, check to make
++ * sure that we were loaded correctly from elilo and that
++ * the system table is valid. If not, then initialize normally.
++ */
++#ifdef CONFIG_EFI
++ if ((LOADER_TYPE == 0x50) && EFI_SYSTAB)
++ efi_enabled = 1;
++#endif
++
++ /* This must be initialized to UNNAMED_MAJOR for ipconfig to work
++ properly. Setting ROOT_DEV to default to /dev/ram0 breaks initrd.
++ */
++ ROOT_DEV = MKDEV(UNNAMED_MAJOR,0);
++ drive_info = DRIVE_INFO;
++ screen_info = SCREEN_INFO;
++ edid_info = EDID_INFO;
++ apm_info.bios = APM_BIOS_INFO;
++ ist_info = IST_INFO;
++ saved_videomode = VIDEO_MODE;
++ if( SYS_DESC_TABLE.length != 0 ) {
++ set_mca_bus(SYS_DESC_TABLE.table[3] & 0x2);
++ machine_id = SYS_DESC_TABLE.table[0];
++ machine_submodel_id = SYS_DESC_TABLE.table[1];
++ BIOS_revision = SYS_DESC_TABLE.table[2];
++ }
++ bootloader_type = LOADER_TYPE;
++
++ if (is_initial_xendomain()) {
++ /* This is drawn from a dump from vgacon:startup in
++ * standard Linux. */
++ screen_info.orig_video_mode = 3;
++ screen_info.orig_video_isVGA = 1;
++ screen_info.orig_video_lines = 25;
++ screen_info.orig_video_cols = 80;
++ screen_info.orig_video_ega_bx = 3;
++ screen_info.orig_video_points = 16;
++ screen_info.orig_y = screen_info.orig_video_lines - 1;
++ if (xen_start_info->console.dom0.info_size >=
++ sizeof(struct dom0_vga_console_info)) {
++ const struct dom0_vga_console_info *info =
++ (struct dom0_vga_console_info *)(
++ (char *)xen_start_info +
++ xen_start_info->console.dom0.info_off);
++ dom0_init_screen_info(info);
++ }
++ xen_start_info->console.domU.mfn = 0;
++ xen_start_info->console.domU.evtchn = 0;
++ } else
++ screen_info.orig_video_isVGA = 0;
++
++#ifdef CONFIG_BLK_DEV_RAM
++ rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
++ rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
++ rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
++#endif
++
++ setup_xen_features();
++
++ ARCH_SETUP
++ if (efi_enabled)
++ efi_init();
++ else {
++ printk(KERN_INFO "BIOS-provided physical RAM map:\n");
++ print_memory_map(machine_specific_memory_setup());
++ }
++
++ copy_edd();
++
++ if (!MOUNT_ROOT_RDONLY)
++ root_mountflags &= ~MS_RDONLY;
++ init_mm.start_code = (unsigned long) _text;
++ init_mm.end_code = (unsigned long) _etext;
++ init_mm.end_data = (unsigned long) _edata;
++ init_mm.brk = (PFN_UP(__pa(xen_start_info->pt_base)) +
++ xen_start_info->nr_pt_frames) << PAGE_SHIFT;
++
++ code_resource.start = virt_to_phys(_text);
++ code_resource.end = virt_to_phys(_etext)-1;
++ data_resource.start = virt_to_phys(_etext);
++ data_resource.end = virt_to_phys(_edata)-1;
++
++ parse_cmdline_early(cmdline_p);
++
++#ifdef CONFIG_EARLY_PRINTK
++ {
++ char *s = strstr(*cmdline_p, "earlyprintk=");
++ if (s) {
++ setup_early_printk(strchr(s, '=') + 1);
++ printk("early console enabled\n");
++ }
++ }
++#endif
++
++ max_low_pfn = setup_memory();
++
++ /*
++ * NOTE: before this point _nobody_ is allowed to allocate
++ * any memory using the bootmem allocator. Although the
++ * alloctor is now initialised only the first 8Mb of the kernel
++ * virtual address space has been mapped. All allocations before
++ * paging_init() has completed must use the alloc_bootmem_low_pages()
++ * variant (which allocates DMA'able memory) and care must be taken
++ * not to exceed the 8Mb limit.
++ */
++
++#ifdef CONFIG_SMP
++ smp_alloc_memory(); /* AP processor realmode stacks in low memory*/
++#endif
++ paging_init();
++ remapped_pgdat_init();
++ sparse_init();
++ zone_sizes_init();
++
++#ifdef CONFIG_X86_FIND_SMP_CONFIG
++ /*
++ * Find and reserve possible boot-time SMP configuration:
++ */
++ find_smp_config();
++#endif
++
++ /* Make sure we have a correctly sized P->M table. */
++ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
++ phys_to_machine_mapping = alloc_bootmem_low_pages(
++ max_pfn * sizeof(unsigned long));
++ memset(phys_to_machine_mapping, ~0,
++ max_pfn * sizeof(unsigned long));
++ memcpy(phys_to_machine_mapping,
++ (unsigned long *)xen_start_info->mfn_list,
++ xen_start_info->nr_pages * sizeof(unsigned long));
++ free_bootmem(
++ __pa(xen_start_info->mfn_list),
++ PFN_PHYS(PFN_UP(xen_start_info->nr_pages *
++ sizeof(unsigned long))));
++
++ /*
++ * Initialise the list of the frames that specify the list of
++ * frames that make up the p2m table. Used by save/restore
++ */
++ pfn_to_mfn_frame_list_list = alloc_bootmem_low_pages(PAGE_SIZE);
++
++ fpp = PAGE_SIZE/sizeof(unsigned long);
++ for (i=0, j=0, k=-1; i< max_pfn; i+=fpp, j++) {
++ if ((j % fpp) == 0) {
++ k++;
++ BUG_ON(k>=16);
++ pfn_to_mfn_frame_list[k] =
++ alloc_bootmem_low_pages(PAGE_SIZE);
++ pfn_to_mfn_frame_list_list[k] =
++ virt_to_mfn(pfn_to_mfn_frame_list[k]);
++ j=0;
++ }
++ pfn_to_mfn_frame_list[k][j] =
++ virt_to_mfn(&phys_to_machine_mapping[i]);
++ }
++ HYPERVISOR_shared_info->arch.max_pfn = max_pfn;
++ HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
++ virt_to_mfn(pfn_to_mfn_frame_list_list);
++ }
++
++ /*
++ * NOTE: at this point the bootmem allocator is fully available.
++ */
++
++ if (is_initial_xendomain())
++ dmi_scan_machine();
++
++#ifdef CONFIG_X86_GENERICARCH
++ generic_apic_probe(*cmdline_p);
++#endif
++ if (efi_enabled)
++ efi_map_memmap();
++
++ set_iopl.iopl = 1;
++ HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
++
++#ifdef CONFIG_ACPI
++ if (!is_initial_xendomain()) {
++ printk(KERN_INFO "ACPI in unprivileged domain disabled\n");
++ acpi_disabled = 1;
++ acpi_ht = 0;
++ }
++
++ /*
++ * Parse the ACPI tables for possible boot-time SMP configuration.
++ */
++ acpi_boot_table_init();
++#endif
++
++#ifdef CONFIG_X86_IO_APIC
++ check_acpi_pci(); /* Checks more than just ACPI actually */
++#endif
++
++#ifdef CONFIG_ACPI
++ acpi_boot_init();
++
++#if defined(CONFIG_SMP) && defined(CONFIG_X86_PC)
++ if (def_to_bigsmp)
++ printk(KERN_WARNING "More than 8 CPUs detected and "
++ "CONFIG_X86_PC cannot handle it.\nUse "
++ "CONFIG_X86_GENERICARCH or CONFIG_X86_BIGSMP.\n");
++#endif
++#endif
++#ifdef CONFIG_X86_LOCAL_APIC
++ if (smp_found_config)
++ get_smp_config();
++#endif
++
++ register_memory();
++
++ if (is_initial_xendomain()) {
++#ifdef CONFIG_VT
++#if defined(CONFIG_VGA_CONSOLE)
++ if (!efi_enabled ||
++ (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY))
++ conswitchp = &vga_con;
++#elif defined(CONFIG_DUMMY_CONSOLE)
++ conswitchp = &dummy_con;
++#endif
++#endif
++ } else {
++#if defined(CONFIG_VT) && defined(CONFIG_DUMMY_CONSOLE)
++ conswitchp = &dummy_con;
++#endif
++ }
++ tsc_init();
++
++ xencons_early_setup();
++}
++
++static int
++xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
++{
++ HYPERVISOR_shutdown(SHUTDOWN_crash);
++ /* we're never actually going to get here... */
++ return NOTIFY_DONE;
++}
++
++static __init int add_pcspkr(void)
++{
++ struct platform_device *pd;
++ int ret;
++
++ if (!is_initial_xendomain())
++ return 0;
++
++ pd = platform_device_alloc("pcspkr", -1);
++ if (!pd)
++ return -ENOMEM;
++
++ ret = platform_device_add(pd);
++ if (ret)
++ platform_device_put(pd);
++
++ return ret;
++}
++device_initcall(add_pcspkr);
++
++/*
++ * Local Variables:
++ * mode:c
++ * c-file-style:"k&r"
++ * c-basic-offset:8
++ * End:
++ */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/arch/i386/kernel/smp-xen.c 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,624 @@
++/*
++ * Intel SMP support routines.
++ *
++ * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
++ * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
++ *
++ * This code is released under the GNU General Public License version 2 or
++ * later.
++ */
++
++#include <linux/init.h>
++
++#include <linux/mm.h>
++#include <linux/delay.h>
++#include <linux/spinlock.h>
++#include <linux/smp_lock.h>
++#include <linux/kernel_stat.h>
++#include <linux/mc146818rtc.h>
++#include <linux/cache.h>
++#include <linux/interrupt.h>
++#include <linux/cpu.h>
++#include <linux/module.h>
++
++#include <asm/mtrr.h>
++#include <asm/tlbflush.h>
++#if 0
++#include <mach_apic.h>
++#endif
++#include <xen/evtchn.h>
++
++/*
++ * Some notes on x86 processor bugs affecting SMP operation:
++ *
++ * Pentium, Pentium Pro, II, III (and all CPUs) have bugs.
++ * The Linux implications for SMP are handled as follows:
++ *
++ * Pentium III / [Xeon]
++ * None of the E1AP-E3AP errata are visible to the user.
++ *
++ * E1AP. see PII A1AP
++ * E2AP. see PII A2AP
++ * E3AP. see PII A3AP
++ *
++ * Pentium II / [Xeon]
++ * None of the A1AP-A3AP errata are visible to the user.
++ *
++ * A1AP. see PPro 1AP
++ * A2AP. see PPro 2AP
++ * A3AP. see PPro 7AP
++ *
++ * Pentium Pro
++ * None of 1AP-9AP errata are visible to the normal user,
++ * except occasional delivery of 'spurious interrupt' as trap #15.
++ * This is very rare and a non-problem.
++ *
++ * 1AP. Linux maps APIC as non-cacheable
++ * 2AP. worked around in hardware
++ * 3AP. fixed in C0 and above steppings microcode update.
++ * Linux does not use excessive STARTUP_IPIs.
++ * 4AP. worked around in hardware
++ * 5AP. symmetric IO mode (normal Linux operation) not affected.
++ * 'noapic' mode has vector 0xf filled out properly.
++ * 6AP. 'noapic' mode might be affected - fixed in later steppings
++ * 7AP. We do not assume writes to the LVT deassering IRQs
++ * 8AP. We do not enable low power mode (deep sleep) during MP bootup
++ * 9AP. We do not use mixed mode
++ *
++ * Pentium
++ * There is a marginal case where REP MOVS on 100MHz SMP
++ * machines with B stepping processors can fail. XXX should provide
++ * an L1cache=Writethrough or L1cache=off option.
++ *
++ * B stepping CPUs may hang. There are hardware work arounds
++ * for this. We warn about it in case your board doesn't have the work
++ * arounds. Basically thats so I can tell anyone with a B stepping
++ * CPU and SMP problems "tough".
++ *
++ * Specific items [From Pentium Processor Specification Update]
++ *
++ * 1AP. Linux doesn't use remote read
++ * 2AP. Linux doesn't trust APIC errors
++ * 3AP. We work around this
++ * 4AP. Linux never generated 3 interrupts of the same priority
++ * to cause a lost local interrupt.
++ * 5AP. Remote read is never used
++ * 6AP. not affected - worked around in hardware
++ * 7AP. not affected - worked around in hardware
++ * 8AP. worked around in hardware - we get explicit CS errors if not
++ * 9AP. only 'noapic' mode affected. Might generate spurious
++ * interrupts, we log only the first one and count the
++ * rest silently.
++ * 10AP. not affected - worked around in hardware
++ * 11AP. Linux reads the APIC between writes to avoid this, as per
++ * the documentation. Make sure you preserve this as it affects
++ * the C stepping chips too.
++ * 12AP. not affected - worked around in hardware
++ * 13AP. not affected - worked around in hardware
++ * 14AP. we always deassert INIT during bootup
++ * 15AP. not affected - worked around in hardware
++ * 16AP. not affected - worked around in hardware
++ * 17AP. not affected - worked around in hardware
++ * 18AP. not affected - worked around in hardware
++ * 19AP. not affected - worked around in BIOS
++ *
++ * If this sounds worrying believe me these bugs are either ___RARE___,
++ * or are signal timing bugs worked around in hardware and there's
++ * about nothing of note with C stepping upwards.
++ */
++
++DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) ____cacheline_aligned = { &init_mm, 0, };
++
++/*
++ * the following functions deal with sending IPIs between CPUs.
++ *
++ * We use 'broadcast', CPU->CPU IPIs and self-IPIs too.
++ */
++
++static inline int __prepare_ICR (unsigned int shortcut, int vector)
++{
++ unsigned int icr = shortcut | APIC_DEST_LOGICAL;
++
++ switch (vector) {
++ default:
++ icr |= APIC_DM_FIXED | vector;
++ break;
++ case NMI_VECTOR:
++ icr |= APIC_DM_NMI;
++ break;
++ }
++ return icr;
++}
++
++static inline int __prepare_ICR2 (unsigned int mask)
++{
++ return SET_APIC_DEST_FIELD(mask);
++}
++
++DECLARE_PER_CPU(int, ipi_to_irq[NR_IPIS]);
++
++static inline void __send_IPI_one(unsigned int cpu, int vector)
++{
++ int irq = per_cpu(ipi_to_irq, cpu)[vector];
++ BUG_ON(irq < 0);
++ notify_remote_via_irq(irq);
++}
++
++void __send_IPI_shortcut(unsigned int shortcut, int vector)
++{
++ int cpu;
++
++ switch (shortcut) {
++ case APIC_DEST_SELF:
++ __send_IPI_one(smp_processor_id(), vector);
++ break;
++ case APIC_DEST_ALLBUT:
++ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
++ if (cpu == smp_processor_id())
++ continue;
++ if (cpu_isset(cpu, cpu_online_map)) {
++ __send_IPI_one(cpu, vector);
++ }
++ }
++ break;
++ default:
++ printk("XXXXXX __send_IPI_shortcut %08x vector %d\n", shortcut,
++ vector);
++ break;
++ }
++}
++
++void fastcall send_IPI_self(int vector)
++{
++ __send_IPI_shortcut(APIC_DEST_SELF, vector);
++}
++
++/*
++ * This is only used on smaller machines.
++ */
++void send_IPI_mask_bitmask(cpumask_t mask, int vector)
++{
++ unsigned long flags;
++ unsigned int cpu;
++
++ local_irq_save(flags);
++ WARN_ON(cpus_addr(mask)[0] & ~cpus_addr(cpu_online_map)[0]);
++
++ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
++ if (cpu_isset(cpu, mask)) {
++ __send_IPI_one(cpu, vector);
++ }
++ }
++
++ local_irq_restore(flags);
++}
++
++void send_IPI_mask_sequence(cpumask_t mask, int vector)
++{
++
++ send_IPI_mask_bitmask(mask, vector);
++}
++
++#include <mach_ipi.h> /* must come after the send_IPI functions above for inlining */
++
++#if 0 /* XEN */
++/*
++ * Smarter SMP flushing macros.
++ * c/o Linus Torvalds.
++ *
++ * These mean you can really definitely utterly forget about
++ * writing to user space from interrupts. (Its not allowed anyway).
++ *
++ * Optimizations Manfred Spraul <manfred@colorfullife.com>
++ */
++
++static cpumask_t flush_cpumask;
++static struct mm_struct * flush_mm;
++static unsigned long flush_va;
++static DEFINE_SPINLOCK(tlbstate_lock);
++#define FLUSH_ALL 0xffffffff
++
++/*
++ * We cannot call mmdrop() because we are in interrupt context,
++ * instead update mm->cpu_vm_mask.
++ *
++ * We need to reload %cr3 since the page tables may be going
++ * away from under us..
++ */
++static inline void leave_mm (unsigned long cpu)
++{
++ if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)
++ BUG();
++ cpu_clear(cpu, per_cpu(cpu_tlbstate, cpu).active_mm->cpu_vm_mask);
++ load_cr3(swapper_pg_dir);
++}
++
++/*
++ *
++ * The flush IPI assumes that a thread switch happens in this order:
++ * [cpu0: the cpu that switches]
++ * 1) switch_mm() either 1a) or 1b)
++ * 1a) thread switch to a different mm
++ * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask);
++ * Stop ipi delivery for the old mm. This is not synchronized with
++ * the other cpus, but smp_invalidate_interrupt ignore flush ipis
++ * for the wrong mm, and in the worst case we perform a superflous
++ * tlb flush.
++ * 1a2) set cpu_tlbstate to TLBSTATE_OK
++ * Now the smp_invalidate_interrupt won't call leave_mm if cpu0
++ * was in lazy tlb mode.
++ * 1a3) update cpu_tlbstate[].active_mm
++ * Now cpu0 accepts tlb flushes for the new mm.
++ * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask);
++ * Now the other cpus will send tlb flush ipis.
++ * 1a4) change cr3.
++ * 1b) thread switch without mm change
++ * cpu_tlbstate[].active_mm is correct, cpu0 already handles
++ * flush ipis.
++ * 1b1) set cpu_tlbstate to TLBSTATE_OK
++ * 1b2) test_and_set the cpu bit in cpu_vm_mask.
++ * Atomically set the bit [other cpus will start sending flush ipis],
++ * and test the bit.
++ * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
++ * 2) switch %%esp, ie current
++ *
++ * The interrupt must handle 2 special cases:
++ * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
++ * - the cpu performs speculative tlb reads, i.e. even if the cpu only
++ * runs in kernel space, the cpu could load tlb entries for user space
++ * pages.
++ *
++ * The good news is that cpu_tlbstate is local to each cpu, no
++ * write/read ordering problems.
++ */
++
++/*
++ * TLB flush IPI:
++ *
++ * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
++ * 2) Leave the mm if we are in the lazy tlb mode.
++ */
++
++irqreturn_t smp_invalidate_interrupt(int irq, void *dev_id,
++ struct pt_regs *regs)
++{
++ unsigned long cpu;
++
++ cpu = get_cpu();
++
++ if (!cpu_isset(cpu, flush_cpumask))
++ goto out;
++ /*
++ * This was a BUG() but until someone can quote me the
++ * line from the intel manual that guarantees an IPI to
++ * multiple CPUs is retried _only_ on the erroring CPUs
++ * its staying as a return
++ *
++ * BUG();
++ */
++
++ if (flush_mm == per_cpu(cpu_tlbstate, cpu).active_mm) {
++ if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) {
++ if (flush_va == FLUSH_ALL)
++ local_flush_tlb();
++ else
++ __flush_tlb_one(flush_va);
++ } else
++ leave_mm(cpu);
++ }
++ smp_mb__before_clear_bit();
++ cpu_clear(cpu, flush_cpumask);
++ smp_mb__after_clear_bit();
++out:
++ put_cpu_no_resched();
++
++ return IRQ_HANDLED;
++}
++
++static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
++ unsigned long va)
++{
++ /*
++ * A couple of (to be removed) sanity checks:
++ *
++ * - current CPU must not be in mask
++ * - mask must exist :)
++ */
++ BUG_ON(cpus_empty(cpumask));
++ BUG_ON(cpu_isset(smp_processor_id(), cpumask));
++ BUG_ON(!mm);
++
++ /* If a CPU which we ran on has gone down, OK. */
++ cpus_and(cpumask, cpumask, cpu_online_map);
++ if (cpus_empty(cpumask))
++ return;
++
++ /*
++ * i'm not happy about this global shared spinlock in the
++ * MM hot path, but we'll see how contended it is.
++ * Temporarily this turns IRQs off, so that lockups are
++ * detected by the NMI watchdog.
++ */
++ spin_lock(&tlbstate_lock);
++
++ flush_mm = mm;
++ flush_va = va;
++#if NR_CPUS <= BITS_PER_LONG
++ atomic_set_mask(cpumask, &flush_cpumask);
++#else
++ {
++ int k;
++ unsigned long *flush_mask = (unsigned long *)&flush_cpumask;
++ unsigned long *cpu_mask = (unsigned long *)&cpumask;
++ for (k = 0; k < BITS_TO_LONGS(NR_CPUS); ++k)
++ atomic_set_mask(cpu_mask[k], &flush_mask[k]);
++ }
++#endif
++ /*
++ * We have to send the IPI only to
++ * CPUs affected.
++ */
++ send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR);
++
++ while (!cpus_empty(flush_cpumask))
++ /* nothing. lockup detection does not belong here */
++ mb();
++
++ flush_mm = NULL;
++ flush_va = 0;
++ spin_unlock(&tlbstate_lock);
++}
++
++void flush_tlb_current_task(void)
++{
++ struct mm_struct *mm = current->mm;
++ cpumask_t cpu_mask;
++
++ preempt_disable();
++ cpu_mask = mm->cpu_vm_mask;
++ cpu_clear(smp_processor_id(), cpu_mask);
++
++ local_flush_tlb();
++ if (!cpus_empty(cpu_mask))
++ flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
++ preempt_enable();
++}
++
++void flush_tlb_mm (struct mm_struct * mm)
++{
++ cpumask_t cpu_mask;
++
++ preempt_disable();
++ cpu_mask = mm->cpu_vm_mask;
++ cpu_clear(smp_processor_id(), cpu_mask);
++
++ if (current->active_mm == mm) {
++ if (current->mm)
++ local_flush_tlb();
++ else
++ leave_mm(smp_processor_id());
++ }
++ if (!cpus_empty(cpu_mask))
++ flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
++
++ preempt_enable();
++}
++
++void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
++{
++ struct mm_struct *mm = vma->vm_mm;
++ cpumask_t cpu_mask;
++
++ preempt_disable();
++ cpu_mask = mm->cpu_vm_mask;
++ cpu_clear(smp_processor_id(), cpu_mask);
++
++ if (current->active_mm == mm) {
++ if(current->mm)
++ __flush_tlb_one(va);
++ else
++ leave_mm(smp_processor_id());
++ }
++
++ if (!cpus_empty(cpu_mask))
++ flush_tlb_others(cpu_mask, mm, va);
++
++ preempt_enable();
++}
++EXPORT_SYMBOL(flush_tlb_page);
++
++static void do_flush_tlb_all(void* info)
++{
++ unsigned long cpu = smp_processor_id();
++
++ __flush_tlb_all();
++ if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_LAZY)
++ leave_mm(cpu);
++}
++
++void flush_tlb_all(void)
++{
++ on_each_cpu(do_flush_tlb_all, NULL, 1, 1);
++}
++
++#else
++
++irqreturn_t smp_invalidate_interrupt(int irq, void *dev_id,
++ struct pt_regs *regs)
++{ return 0; }
++void flush_tlb_current_task(void)
++{ xen_tlb_flush_mask(&current->mm->cpu_vm_mask); }
++void flush_tlb_mm(struct mm_struct * mm)
++{ xen_tlb_flush_mask(&mm->cpu_vm_mask); }
++void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
++{ xen_invlpg_mask(&vma->vm_mm->cpu_vm_mask, va); }
++EXPORT_SYMBOL(flush_tlb_page);
++void flush_tlb_all(void)
++{ xen_tlb_flush_all(); }
++
++#endif /* XEN */
++
++/*
++ * this function sends a 'reschedule' IPI to another CPU.
++ * it goes straight through and wastes no time serializing
++ * anything. Worst case is that we lose a reschedule ...
++ */
++void smp_send_reschedule(int cpu)
++{
++ WARN_ON(cpu_is_offline(cpu));
++ send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
++}
++
++/*
++ * Structure and data for smp_call_function(). This is designed to minimise
++ * static memory requirements. It also looks cleaner.
++ */
++static DEFINE_SPINLOCK(call_lock);
++
++struct call_data_struct {
++ void (*func) (void *info);
++ void *info;
++ atomic_t started;
++ atomic_t finished;
++ int wait;
++};
++
++void lock_ipi_call_lock(void)
++{
++ spin_lock_irq(&call_lock);
++}
++
++void unlock_ipi_call_lock(void)
++{
++ spin_unlock_irq(&call_lock);
++}
++
++static struct call_data_struct *call_data;
++
++/**
++ * smp_call_function(): Run a function on all other CPUs.
++ * @func: The function to run. This must be fast and non-blocking.
++ * @info: An arbitrary pointer to pass to the function.
++ * @nonatomic: currently unused.
++ * @wait: If true, wait (atomically) until function has completed on other CPUs.
++ *
++ * Returns 0 on success, else a negative status code. Does not return until
++ * remote CPUs are nearly ready to execute <<func>> or are or have executed.
++ *
++ * You must not call this function with disabled interrupts or from a
++ * hardware interrupt handler or from a bottom half handler.
++ */
++int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
++ int wait)
++{
++ struct call_data_struct data;
++ int cpus;
++
++ /* Holding any lock stops cpus from going down. */
++ spin_lock(&call_lock);
++ cpus = num_online_cpus() - 1;
++ if (!cpus) {
++ spin_unlock(&call_lock);
++ return 0;
++ }
++
++ /* Can deadlock when called with interrupts disabled */
++ WARN_ON(irqs_disabled());
++
++ data.func = func;
++ data.info = info;
++ atomic_set(&data.started, 0);
++ data.wait = wait;
++ if (wait)
++ atomic_set(&data.finished, 0);
++
++ call_data = &data;
++ mb();
++
++ /* Send a message to all other CPUs and wait for them to respond */
++ send_IPI_allbutself(CALL_FUNCTION_VECTOR);
++
++ /* Wait for response */
++ while (atomic_read(&data.started) != cpus)
++ barrier();
++
++ if (wait)
++ while (atomic_read(&data.finished) != cpus)
++ barrier();
++ spin_unlock(&call_lock);
++
++ return 0;
++}
++EXPORT_SYMBOL(smp_call_function);
++
++static void stop_this_cpu (void * dummy)
++{
++ /*
++ * Remove this CPU:
++ */
++ cpu_clear(smp_processor_id(), cpu_online_map);
++ local_irq_disable();
++#if 0
++ disable_local_APIC();
++#endif
++ if (cpu_data[smp_processor_id()].hlt_works_ok)
++ for(;;) halt();
++ for (;;);
++}
++
++/*
++ * this function calls the 'stop' function on all other CPUs in the system.
++ */
++
++void smp_send_stop(void)
++{
++ smp_call_function(stop_this_cpu, NULL, 1, 0);
++
++ local_irq_disable();
++#if 0
++ disable_local_APIC();
++#endif
++ local_irq_enable();
++}
++
++/*
++ * Reschedule call back. Nothing to do,
++ * all the work is done automatically when
++ * we return from the interrupt.
++ */
++irqreturn_t smp_reschedule_interrupt(int irq, void *dev_id,
++ struct pt_regs *regs)
++{
++
++ return IRQ_HANDLED;
++}
++
++#include <linux/kallsyms.h>
++irqreturn_t smp_call_function_interrupt(int irq, void *dev_id,
++ struct pt_regs *regs)
++{
++ void (*func) (void *info) = call_data->func;
++ void *info = call_data->info;
++ int wait = call_data->wait;
++
++ /*
++ * Notify initiating CPU that I've grabbed the data and am
++ * about to execute the function
++ */
++ mb();
++ atomic_inc(&call_data->started);
++ /*
++ * At this point the info structure may be out of scope unless wait==1
++ */
++ irq_enter();
++ (*func)(info);
++ irq_exit();
++
++ if (wait) {
++ mb();
++ atomic_inc(&call_data->finished);
++ }
++
++ return IRQ_HANDLED;
++}
++
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/arch/i386/kernel/swiotlb.c 2007-08-27 14:02:10.000000000 -0400
+@@ -0,0 +1,716 @@
++/*
++ * Dynamic DMA mapping support.
++ *
++ * This implementation is a fallback for platforms that do not support
++ * I/O TLBs (aka DMA address translation hardware).
++ * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
++ * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
++ * Copyright (C) 2000, 2003 Hewlett-Packard Co
++ * David Mosberger-Tang <davidm@hpl.hp.com>
++ * Copyright (C) 2005 Keir Fraser <keir@xensource.com>
++ */
++
++#include <linux/cache.h>
++#include <linux/mm.h>
++#include <linux/module.h>
++#include <linux/pci.h>
++#include <linux/spinlock.h>
++#include <linux/string.h>
++#include <linux/types.h>
++#include <linux/ctype.h>
++#include <linux/init.h>
++#include <linux/bootmem.h>
++#include <linux/highmem.h>
++#include <asm/io.h>
++#include <asm/pci.h>
++#include <asm/dma.h>
++#include <asm/uaccess.h>
++#include <xen/interface/memory.h>
++
++int swiotlb;
++EXPORT_SYMBOL(swiotlb);
++
++#define OFFSET(val,align) ((unsigned long)((val) & ( (align) - 1)))
++
++#define SG_ENT_PHYS_ADDRESS(sg) (page_to_bus((sg)->page) + (sg)->offset)
++
++/*
++ * Maximum allowable number of contiguous slabs to map,
++ * must be a power of 2. What is the appropriate value ?
++ * The complexity of {map,unmap}_single is linearly dependent on this value.
++ */
++#define IO_TLB_SEGSIZE 128
++
++/*
++ * log of the size of each IO TLB slab. The number of slabs is command line
++ * controllable.
++ */
++#define IO_TLB_SHIFT 11
++
++int swiotlb_force;
++
++static char *iotlb_virt_start;
++static unsigned long iotlb_nslabs;
++
++/*
++ * Used to do a quick range check in swiotlb_unmap_single and
++ * swiotlb_sync_single_*, to see if the memory was in fact allocated by this
++ * API.
++ */
++static unsigned long iotlb_pfn_start, iotlb_pfn_end;
++
++/* Does the given dma address reside within the swiotlb aperture? */
++static inline int in_swiotlb_aperture(dma_addr_t dev_addr)
++{
++ unsigned long pfn = mfn_to_local_pfn(dev_addr >> PAGE_SHIFT);
++ return (pfn_valid(pfn)
++ && (pfn >= iotlb_pfn_start)
++ && (pfn < iotlb_pfn_end));
++}
++
++/*
++ * When the IOMMU overflows we return a fallback buffer. This sets the size.
++ */
++static unsigned long io_tlb_overflow = 32*1024;
++
++void *io_tlb_overflow_buffer;
++
++/*
++ * This is a free list describing the number of free entries available from
++ * each index
++ */
++static unsigned int *io_tlb_list;
++static unsigned int io_tlb_index;
++
++/*
++ * We need to save away the original address corresponding to a mapped entry
++ * for the sync operations.
++ */
++static struct phys_addr {
++ struct page *page;
++ unsigned int offset;
++} *io_tlb_orig_addr;
++
++/*
++ * Protect the above data structures in the map and unmap calls
++ */
++static DEFINE_SPINLOCK(io_tlb_lock);
++
++static unsigned int dma_bits;
++static unsigned int __initdata max_dma_bits = 32;
++static int __init
++setup_dma_bits(char *str)
++{
++ max_dma_bits = simple_strtoul(str, NULL, 0);
++ return 0;
++}
++__setup("dma_bits=", setup_dma_bits);
++
++static int __init
++setup_io_tlb_npages(char *str)
++{
++ /* Unlike ia64, the size is aperture in megabytes, not 'slabs'! */
++ if (isdigit(*str)) {
++ iotlb_nslabs = simple_strtoul(str, &str, 0) <<
++ (20 - IO_TLB_SHIFT);
++ iotlb_nslabs = ALIGN(iotlb_nslabs, IO_TLB_SEGSIZE);
++ /* Round up to power of two (xen_create_contiguous_region). */
++ while (iotlb_nslabs & (iotlb_nslabs-1))
++ iotlb_nslabs += iotlb_nslabs & ~(iotlb_nslabs-1);
++ }
++ if (*str == ',')
++ ++str;
++ /*
++ * NB. 'force' enables the swiotlb, but doesn't force its use for
++ * every DMA like it does on native Linux. 'off' forcibly disables
++ * use of the swiotlb.
++ */
++ if (!strcmp(str, "force"))
++ swiotlb_force = 1;
++ else if (!strcmp(str, "off"))
++ swiotlb_force = -1;
++ return 1;
++}
++__setup("swiotlb=", setup_io_tlb_npages);
++/* make io_tlb_overflow tunable too? */
++
++/*
++ * Statically reserve bounce buffer space and initialize bounce buffer data
++ * structures for the software IO TLB used to implement the PCI DMA API.
++ */
++void
++swiotlb_init_with_default_size (size_t default_size)
++{
++ unsigned long i, bytes;
++ int rc;
++
++ if (!iotlb_nslabs) {
++ iotlb_nslabs = (default_size >> IO_TLB_SHIFT);
++ iotlb_nslabs = ALIGN(iotlb_nslabs, IO_TLB_SEGSIZE);
++ /* Round up to power of two (xen_create_contiguous_region). */
++ while (iotlb_nslabs & (iotlb_nslabs-1))
++ iotlb_nslabs += iotlb_nslabs & ~(iotlb_nslabs-1);
++ }
++
++ bytes = iotlb_nslabs * (1UL << IO_TLB_SHIFT);
++
++ /*
++ * Get IO TLB memory from the low pages
++ */
++ iotlb_virt_start = alloc_bootmem_low_pages(bytes);
++ if (!iotlb_virt_start)
++ panic("Cannot allocate SWIOTLB buffer!\n");
++
++ dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT;
++ for (i = 0; i < iotlb_nslabs; i += IO_TLB_SEGSIZE) {
++ do {
++ rc = xen_create_contiguous_region(
++ (unsigned long)iotlb_virt_start + (i << IO_TLB_SHIFT),
++ get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT),
++ dma_bits);
++ } while (rc && dma_bits++ < max_dma_bits);
++ if (rc) {
++ if (i == 0)
++ panic("No suitable physical memory available for SWIOTLB buffer!\n"
++ "Use dom0_mem Xen boot parameter to reserve\n"
++ "some DMA memory (e.g., dom0_mem=-128M).\n");
++ iotlb_nslabs = i;
++ i <<= IO_TLB_SHIFT;
++ free_bootmem(__pa(iotlb_virt_start + i), bytes - i);
++ bytes = i;
++ for (dma_bits = 0; i > 0; i -= IO_TLB_SEGSIZE << IO_TLB_SHIFT) {
++ unsigned int bits = fls64(virt_to_bus(iotlb_virt_start + i - 1));
++
++ if (bits > dma_bits)
++ dma_bits = bits;
++ }
++ break;
++ }
++ }
++
++ /*
++ * Allocate and initialize the free list array. This array is used
++ * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE.
++ */
++ io_tlb_list = alloc_bootmem(iotlb_nslabs * sizeof(int));
++ for (i = 0; i < iotlb_nslabs; i++)
++ io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
++ io_tlb_index = 0;
++ io_tlb_orig_addr = alloc_bootmem(
++ iotlb_nslabs * sizeof(*io_tlb_orig_addr));
++
++ /*
++ * Get the overflow emergency buffer
++ */
++ io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow);
++ if (!io_tlb_overflow_buffer)
++ panic("Cannot allocate SWIOTLB overflow buffer!\n");
++
++ do {
++ rc = xen_create_contiguous_region(
++ (unsigned long)io_tlb_overflow_buffer,
++ get_order(io_tlb_overflow),
++ dma_bits);
++ } while (rc && dma_bits++ < max_dma_bits);
++ if (rc)
++ panic("No suitable physical memory available for SWIOTLB overflow buffer!\n");
++
++ iotlb_pfn_start = __pa(iotlb_virt_start) >> PAGE_SHIFT;
++ iotlb_pfn_end = iotlb_pfn_start + (bytes >> PAGE_SHIFT);
++
++ printk(KERN_INFO "Software IO TLB enabled: \n"
++ " Aperture: %lu megabytes\n"
++ " Kernel range: %p - %p\n"
++ " Address size: %u bits\n",
++ bytes >> 20,
++ iotlb_virt_start, iotlb_virt_start + bytes,
++ dma_bits);
++}
++
++void
++swiotlb_init(void)
++{
++ long ram_end;
++ size_t defsz = 64 * (1 << 20); /* 64MB default size */
++
++ if (swiotlb_force == 1) {
++ swiotlb = 1;
++ } else if ((swiotlb_force != -1) &&
++ is_running_on_xen() &&
++ is_initial_xendomain()) {
++ /* Domain 0 always has a swiotlb. */
++ ram_end = HYPERVISOR_memory_op(XENMEM_maximum_ram_page, NULL);
++ if (ram_end <= 0x7ffff)
++ defsz = 2 * (1 << 20); /* 2MB on <2GB on systems. */
++ swiotlb = 1;
++ }
++
++ if (swiotlb)
++ swiotlb_init_with_default_size(defsz);
++ else
++ printk(KERN_INFO "Software IO TLB disabled\n");
++}
++
++/*
++ * We use __copy_to_user_inatomic to transfer to the host buffer because the
++ * buffer may be mapped read-only (e.g, in blkback driver) but lower-level
++ * drivers map the buffer for DMA_BIDIRECTIONAL access. This causes an
++ * unnecessary copy from the aperture to the host buffer, and a page fault.
++ */
++static void
++__sync_single(struct phys_addr buffer, char *dma_addr, size_t size, int dir)
++{
++ if (PageHighMem(buffer.page)) {
++ size_t len, bytes;
++ char *dev, *host, *kmp;
++ len = size;
++ while (len != 0) {
++ unsigned long flags;
++
++ if (((bytes = len) + buffer.offset) > PAGE_SIZE)
++ bytes = PAGE_SIZE - buffer.offset;
++ local_irq_save(flags); /* protects KM_BOUNCE_READ */
++ kmp = kmap_atomic(buffer.page, KM_BOUNCE_READ);
++ dev = dma_addr + size - len;
++ host = kmp + buffer.offset;
++ if (dir == DMA_FROM_DEVICE) {
++ if (__copy_to_user_inatomic(host, dev, bytes))
++ /* inaccessible */;
++ } else
++ memcpy(dev, host, bytes);
++ kunmap_atomic(kmp, KM_BOUNCE_READ);
++ local_irq_restore(flags);
++ len -= bytes;
++ buffer.page++;
++ buffer.offset = 0;
++ }
++ } else {
++ char *host = (char *)phys_to_virt(
++ page_to_pseudophys(buffer.page)) + buffer.offset;
++ if (dir == DMA_FROM_DEVICE) {
++ if (__copy_to_user_inatomic(host, dma_addr, size))
++ /* inaccessible */;
++ } else if (dir == DMA_TO_DEVICE)
++ memcpy(dma_addr, host, size);
++ }
++}
++
++/*
++ * Allocates bounce buffer and returns its kernel virtual address.
++ */
++static void *
++map_single(struct device *hwdev, struct phys_addr buffer, size_t size, int dir)
++{
++ unsigned long flags;
++ char *dma_addr;
++ unsigned int nslots, stride, index, wrap;
++ int i;
++
++ /*
++ * For mappings greater than a page, we limit the stride (and
++ * hence alignment) to a page size.
++ */
++ nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
++ if (size > PAGE_SIZE)
++ stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
++ else
++ stride = 1;
++
++ BUG_ON(!nslots);
++
++ /*
++ * Find suitable number of IO TLB entries size that will fit this
++ * request and allocate a buffer from that IO TLB pool.
++ */
++ spin_lock_irqsave(&io_tlb_lock, flags);
++ {
++ wrap = index = ALIGN(io_tlb_index, stride);
++
++ if (index >= iotlb_nslabs)
++ wrap = index = 0;
++
++ do {
++ /*
++ * If we find a slot that indicates we have 'nslots'
++ * number of contiguous buffers, we allocate the
++ * buffers from that slot and mark the entries as '0'
++ * indicating unavailable.
++ */
++ if (io_tlb_list[index] >= nslots) {
++ int count = 0;
++
++ for (i = index; i < (int)(index + nslots); i++)
++ io_tlb_list[i] = 0;
++ for (i = index - 1;
++ (OFFSET(i, IO_TLB_SEGSIZE) !=
++ IO_TLB_SEGSIZE -1) && io_tlb_list[i];
++ i--)
++ io_tlb_list[i] = ++count;
++ dma_addr = iotlb_virt_start +
++ (index << IO_TLB_SHIFT);
++
++ /*
++ * Update the indices to avoid searching in
++ * the next round.
++ */
++ io_tlb_index =
++ ((index + nslots) < iotlb_nslabs
++ ? (index + nslots) : 0);
++
++ goto found;
++ }
++ index += stride;
++ if (index >= iotlb_nslabs)
++ index = 0;
++ } while (index != wrap);
++
++ spin_unlock_irqrestore(&io_tlb_lock, flags);
++ return NULL;
++ }
++ found:
++ spin_unlock_irqrestore(&io_tlb_lock, flags);
++
++ /*
++ * Save away the mapping from the original address to the DMA address.
++ * This is needed when we sync the memory. Then we sync the buffer if
++ * needed.
++ */
++ io_tlb_orig_addr[index] = buffer;
++ if ((dir == DMA_TO_DEVICE) || (dir == DMA_BIDIRECTIONAL))
++ __sync_single(buffer, dma_addr, size, DMA_TO_DEVICE);
++
++ return dma_addr;
++}
++
++/*
++ * dma_addr is the kernel virtual address of the bounce buffer to unmap.
++ */
++static void
++unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
++{
++ unsigned long flags;
++ int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
++ int index = (dma_addr - iotlb_virt_start) >> IO_TLB_SHIFT;
++ struct phys_addr buffer = io_tlb_orig_addr[index];
++
++ /*
++ * First, sync the memory before unmapping the entry
++ */
++ if ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))
++ __sync_single(buffer, dma_addr, size, DMA_FROM_DEVICE);
++
++ /*
++ * Return the buffer to the free list by setting the corresponding
++ * entries to indicate the number of contigous entries available.
++ * While returning the entries to the free list, we merge the entries
++ * with slots below and above the pool being returned.
++ */
++ spin_lock_irqsave(&io_tlb_lock, flags);
++ {
++ count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ?
++ io_tlb_list[index + nslots] : 0);
++ /*
++ * Step 1: return the slots to the free list, merging the
++ * slots with superceeding slots
++ */
++ for (i = index + nslots - 1; i >= index; i--)
++ io_tlb_list[i] = ++count;
++ /*
++ * Step 2: merge the returned slots with the preceding slots,
++ * if available (non zero)
++ */
++ for (i = index - 1;
++ (OFFSET(i, IO_TLB_SEGSIZE) !=
++ IO_TLB_SEGSIZE -1) && io_tlb_list[i];
++ i--)
++ io_tlb_list[i] = ++count;
++ }
++ spin_unlock_irqrestore(&io_tlb_lock, flags);
++}
++
++static void
++sync_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
++{
++ int index = (dma_addr - iotlb_virt_start) >> IO_TLB_SHIFT;
++ struct phys_addr buffer = io_tlb_orig_addr[index];
++ BUG_ON((dir != DMA_FROM_DEVICE) && (dir != DMA_TO_DEVICE));
++ __sync_single(buffer, dma_addr, size, dir);
++}
++
++static void
++swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
++{
++ /*
++ * Ran out of IOMMU space for this operation. This is very bad.
++ * Unfortunately the drivers cannot handle this operation properly.
++ * unless they check for pci_dma_mapping_error (most don't)
++ * When the mapping is small enough return a static buffer to limit
++ * the damage, or panic when the transfer is too big.
++ */
++ printk(KERN_ERR "PCI-DMA: Out of SW-IOMMU space for %lu bytes at "
++ "device %s\n", (unsigned long)size, dev ? dev->bus_id : "?");
++
++ if (size > io_tlb_overflow && do_panic) {
++ if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL)
++ panic("PCI-DMA: Memory would be corrupted\n");
++ if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL)
++ panic("PCI-DMA: Random memory would be DMAed\n");
++ }
++}
++
++/*
++ * Map a single buffer of the indicated size for DMA in streaming mode. The
++ * PCI address to use is returned.
++ *
++ * Once the device is given the dma address, the device owns this memory until
++ * either swiotlb_unmap_single or swiotlb_dma_sync_single is performed.
++ */
++dma_addr_t
++swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
++{
++ dma_addr_t dev_addr = virt_to_bus(ptr);
++ void *map;
++ struct phys_addr buffer;
++
++ BUG_ON(dir == DMA_NONE);
++
++ /*
++ * If the pointer passed in happens to be in the device's DMA window,
++ * we can safely return the device addr and not worry about bounce
++ * buffering it.
++ */
++ if (!range_straddles_page_boundary(ptr, size) &&
++ !address_needs_mapping(hwdev, dev_addr))
++ return dev_addr;
++
++ /*
++ * Oh well, have to allocate and map a bounce buffer.
++ */
++ buffer.page = virt_to_page(ptr);
++ buffer.offset = (unsigned long)ptr & ~PAGE_MASK;
++ map = map_single(hwdev, buffer, size, dir);
++ if (!map) {
++ swiotlb_full(hwdev, size, dir, 1);
++ map = io_tlb_overflow_buffer;
++ }
++
++ dev_addr = virt_to_bus(map);
++ return dev_addr;
++}
++
++/*
++ * Unmap a single streaming mode DMA translation. The dma_addr and size must
++ * match what was provided for in a previous swiotlb_map_single call. All
++ * other usages are undefined.
++ *
++ * After this call, reads by the cpu to the buffer are guaranteed to see
++ * whatever the device wrote there.
++ */
++void
++swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size,
++ int dir)
++{
++ BUG_ON(dir == DMA_NONE);
++ if (in_swiotlb_aperture(dev_addr))
++ unmap_single(hwdev, bus_to_virt(dev_addr), size, dir);
++}
++
++/*
++ * Make physical memory consistent for a single streaming mode DMA translation
++ * after a transfer.
++ *
++ * If you perform a swiotlb_map_single() but wish to interrogate the buffer
++ * using the cpu, yet do not wish to teardown the PCI dma mapping, you must
++ * call this function before doing so. At the next point you give the PCI dma
++ * address back to the card, you must first perform a
++ * swiotlb_dma_sync_for_device, and then the device again owns the buffer
++ */
++void
++swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
++ size_t size, int dir)
++{
++ BUG_ON(dir == DMA_NONE);
++ if (in_swiotlb_aperture(dev_addr))
++ sync_single(hwdev, bus_to_virt(dev_addr), size, dir);
++}
++
++void
++swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
++ size_t size, int dir)
++{
++ BUG_ON(dir == DMA_NONE);
++ if (in_swiotlb_aperture(dev_addr))
++ sync_single(hwdev, bus_to_virt(dev_addr), size, dir);
++}
++
++/*
++ * Map a set of buffers described by scatterlist in streaming mode for DMA.
++ * This is the scatter-gather version of the above swiotlb_map_single
++ * interface. Here the scatter gather list elements are each tagged with the
++ * appropriate dma address and length. They are obtained via
++ * sg_dma_{address,length}(SG).
++ *
++ * NOTE: An implementation may be able to use a smaller number of
++ * DMA address/length pairs than there are SG table elements.
++ * (for example via virtual mapping capabilities)
++ * The routine returns the number of addr/length pairs actually
++ * used, at most nents.
++ *
++ * Device ownership issues as mentioned above for swiotlb_map_single are the
++ * same here.
++ */
++int
++swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
++ int dir)
++{
++ struct phys_addr buffer;
++ dma_addr_t dev_addr;
++ char *map;
++ int i;
++
++ BUG_ON(dir == DMA_NONE);
++
++ for (i = 0; i < nelems; i++, sg++) {
++ dev_addr = SG_ENT_PHYS_ADDRESS(sg);
++ if (address_needs_mapping(hwdev, dev_addr)) {
++ buffer.page = sg->page;
++ buffer.offset = sg->offset;
++ map = map_single(hwdev, buffer, sg->length, dir);
++ if (!map) {
++ /* Don't panic here, we expect map_sg users
++ to do proper error handling. */
++ swiotlb_full(hwdev, sg->length, dir, 0);
++ swiotlb_unmap_sg(hwdev, sg - i, i, dir);
++ sg[0].dma_length = 0;
++ return 0;
++ }
++ sg->dma_address = (dma_addr_t)virt_to_bus(map);
++ } else
++ sg->dma_address = dev_addr;
++ sg->dma_length = sg->length;
++ }
++ return nelems;
++}
++
++/*
++ * Unmap a set of streaming mode DMA translations. Again, cpu read rules
++ * concerning calls here are the same as for swiotlb_unmap_single() above.
++ */
++void
++swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
++ int dir)
++{
++ int i;
++
++ BUG_ON(dir == DMA_NONE);
++
++ for (i = 0; i < nelems; i++, sg++)
++ if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
++ unmap_single(hwdev,
++ (void *)bus_to_virt(sg->dma_address),
++ sg->dma_length, dir);
++}
++
++/*
++ * Make physical memory consistent for a set of streaming mode DMA translations
++ * after a transfer.
++ *
++ * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
++ * and usage.
++ */
++void
++swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
++ int nelems, int dir)
++{
++ int i;
++
++ BUG_ON(dir == DMA_NONE);
++
++ for (i = 0; i < nelems; i++, sg++)
++ if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
++ sync_single(hwdev,
++ (void *)bus_to_virt(sg->dma_address),
++ sg->dma_length, dir);
++}
++
++void
++swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
++ int nelems, int dir)
++{
++ int i;
++
++ BUG_ON(dir == DMA_NONE);
++
++ for (i = 0; i < nelems; i++, sg++)
++ if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
++ sync_single(hwdev,
++ (void *)bus_to_virt(sg->dma_address),
++ sg->dma_length, dir);
++}
++
++#ifdef CONFIG_HIGHMEM
++
++dma_addr_t
++swiotlb_map_page(struct device *hwdev, struct page *page,
++ unsigned long offset, size_t size,
++ enum dma_data_direction direction)
++{
++ struct phys_addr buffer;
++ dma_addr_t dev_addr;
++ char *map;
++
++ dev_addr = page_to_bus(page) + offset;
++ if (address_needs_mapping(hwdev, dev_addr)) {
++ buffer.page = page;
++ buffer.offset = offset;
++ map = map_single(hwdev, buffer, size, direction);
++ if (!map) {
++ swiotlb_full(hwdev, size, direction, 1);
++ map = io_tlb_overflow_buffer;
++ }
++ dev_addr = (dma_addr_t)virt_to_bus(map);
++ }
++
++ return dev_addr;
++}
++
++void
++swiotlb_unmap_page(struct device *hwdev, dma_addr_t dma_address,
++ size_t size, enum dma_data_direction direction)
++{
++ BUG_ON(direction == DMA_NONE);
++ if (in_swiotlb_aperture(dma_address))
++ unmap_single(hwdev, bus_to_virt(dma_address), size, direction);
++}
++
++#endif
++
++int
++swiotlb_dma_mapping_error(dma_addr_t dma_addr)
++{
++ return (dma_addr == virt_to_bus(io_tlb_overflow_buffer));
++}
++
++/*
++ * Return whether the given PCI device DMA address mask can be supported
++ * properly. For example, if your device can only drive the low 24-bits
++ * during PCI bus mastering, then you would pass 0x00ffffff as the mask to
++ * this function.
++ */
++int
++swiotlb_dma_supported (struct device *hwdev, u64 mask)
++{
++ return (mask >= ((1UL << dma_bits) - 1));
++}
++
++EXPORT_SYMBOL(swiotlb_init);
++EXPORT_SYMBOL(swiotlb_map_single);
++EXPORT_SYMBOL(swiotlb_unmap_single);
++EXPORT_SYMBOL(swiotlb_map_sg);
++EXPORT_SYMBOL(swiotlb_unmap_sg);
++EXPORT_SYMBOL(swiotlb_sync_single_for_cpu);
++EXPORT_SYMBOL(swiotlb_sync_single_for_device);
++EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu);
++EXPORT_SYMBOL(swiotlb_sync_sg_for_device);
++EXPORT_SYMBOL(swiotlb_dma_mapping_error);
++EXPORT_SYMBOL(swiotlb_dma_supported);
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/arch/i386/kernel/time-xen.c 2007-08-27 14:02:08.000000000 -0400
+@@ -0,0 +1,1141 @@
++/*
++ * linux/arch/i386/kernel/time.c
++ *
++ * Copyright (C) 1991, 1992, 1995 Linus Torvalds
++ *
++ * This file contains the PC-specific time handling details:
++ * reading the RTC at bootup, etc..
++ * 1994-07-02 Alan Modra
++ * fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime
++ * 1995-03-26 Markus Kuhn
++ * fixed 500 ms bug at call to set_rtc_mmss, fixed DS12887
++ * precision CMOS clock update
++ * 1996-05-03 Ingo Molnar
++ * fixed time warps in do_[slow|fast]_gettimeoffset()
++ * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
++ * "A Kernel Model for Precision Timekeeping" by Dave Mills
++ * 1998-09-05 (Various)
++ * More robust do_fast_gettimeoffset() algorithm implemented
++ * (works with APM, Cyrix 6x86MX and Centaur C6),
++ * monotonic gettimeofday() with fast_get_timeoffset(),
++ * drift-proof precision TSC calibration on boot
++ * (C. Scott Ananian <cananian@alumni.princeton.edu>, Andrew D.
++ * Balsa <andrebalsa@altern.org>, Philip Gladstone <philip@raptor.com>;
++ * ported from 2.0.35 Jumbo-9 by Michael Krause <m.krause@tu-harburg.de>).
++ * 1998-12-16 Andrea Arcangeli
++ * Fixed Jumbo-9 code in 2.1.131: do_gettimeofday was missing 1 jiffy
++ * because was not accounting lost_ticks.
++ * 1998-12-24 Copyright (C) 1998 Andrea Arcangeli
++ * Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
++ * serialize accesses to xtime/lost_ticks).
++ */
++
++#include <linux/errno.h>
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/param.h>
++#include <linux/string.h>
++#include <linux/mm.h>
++#include <linux/interrupt.h>
++#include <linux/time.h>
++#include <linux/delay.h>
++#include <linux/init.h>
++#include <linux/smp.h>
++#include <linux/module.h>
++#include <linux/sysdev.h>
++#include <linux/bcd.h>
++#include <linux/efi.h>
++#include <linux/mca.h>
++#include <linux/sysctl.h>
++#include <linux/percpu.h>
++#include <linux/kernel_stat.h>
++#include <linux/posix-timers.h>
++
++#include <asm/io.h>
++#include <asm/smp.h>
++#include <asm/irq.h>
++#include <asm/msr.h>
++#include <asm/delay.h>
++#include <asm/mpspec.h>
++#include <asm/uaccess.h>
++#include <asm/processor.h>
++#include <asm/timer.h>
++#include <asm/sections.h>
++
++#include "mach_time.h"
++
++#include <linux/timex.h>
++
++#include <asm/hpet.h>
++
++#include <asm/arch_hooks.h>
++
++#include <xen/evtchn.h>
++#include <xen/interface/vcpu.h>
++
++#if defined (__i386__)
++#include <asm/i8259.h>
++#endif
++
++int pit_latch_buggy; /* extern */
++
++#if defined(__x86_64__)
++unsigned long vxtime_hz = PIT_TICK_RATE;
++struct vxtime_data __vxtime __section_vxtime; /* for vsyscalls */
++volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
++unsigned long __wall_jiffies __section_wall_jiffies = INITIAL_JIFFIES;
++struct timespec __xtime __section_xtime;
++struct timezone __sys_tz __section_sys_tz;
++#endif
++
++unsigned int cpu_khz; /* Detected as we calibrate the TSC */
++EXPORT_SYMBOL(cpu_khz);
++
++extern unsigned long wall_jiffies;
++
++DEFINE_SPINLOCK(rtc_lock);
++EXPORT_SYMBOL(rtc_lock);
++
++extern struct init_timer_opts timer_tsc_init;
++extern struct timer_opts timer_tsc;
++#define timer_none timer_tsc
++
++/* These are peridically updated in shared_info, and then copied here. */
++struct shadow_time_info {
++ u64 tsc_timestamp; /* TSC at last update of time vals. */
++ u64 system_timestamp; /* Time, in nanosecs, since boot. */
++ u32 tsc_to_nsec_mul;
++ u32 tsc_to_usec_mul;
++ int tsc_shift;
++ u32 version;
++};
++static DEFINE_PER_CPU(struct shadow_time_info, shadow_time);
++static struct timespec shadow_tv;
++static u32 shadow_tv_version;
++
++/* Keep track of last time we did processing/updating of jiffies and xtime. */
++static u64 processed_system_time; /* System time (ns) at last processing. */
++static DEFINE_PER_CPU(u64, processed_system_time);
++
++/* How much CPU time was spent blocked and how much was 'stolen'? */
++static DEFINE_PER_CPU(u64, processed_stolen_time);
++static DEFINE_PER_CPU(u64, processed_blocked_time);
++
++/* Current runstate of each CPU (updated automatically by the hypervisor). */
++static DEFINE_PER_CPU(struct vcpu_runstate_info, runstate);
++
++/* Must be signed, as it's compared with s64 quantities which can be -ve. */
++#define NS_PER_TICK (1000000000LL/HZ)
++
++static inline void __normalize_time(time_t *sec, s64 *nsec)
++{
++ while (*nsec >= NSEC_PER_SEC) {
++ (*nsec) -= NSEC_PER_SEC;
++ (*sec)++;
++ }
++ while (*nsec < 0) {
++ (*nsec) += NSEC_PER_SEC;
++ (*sec)--;
++ }
++}
++
++/* Does this guest OS track Xen time, or set its wall clock independently? */
++static int independent_wallclock = 0;
++static int __init __independent_wallclock(char *str)
++{
++ independent_wallclock = 1;
++ return 1;
++}
++__setup("independent_wallclock", __independent_wallclock);
++
++/* Permitted clock jitter, in nsecs, beyond which a warning will be printed. */
++static unsigned long permitted_clock_jitter = 10000000UL; /* 10ms */
++static int __init __permitted_clock_jitter(char *str)
++{
++ permitted_clock_jitter = simple_strtoul(str, NULL, 0);
++ return 1;
++}
++__setup("permitted_clock_jitter=", __permitted_clock_jitter);
++
++#if 0
++static void delay_tsc(unsigned long loops)
++{
++ unsigned long bclock, now;
++
++ rdtscl(bclock);
++ do {
++ rep_nop();
++ rdtscl(now);
++ } while ((now - bclock) < loops);
++}
++
++struct timer_opts timer_tsc = {
++ .name = "tsc",
++ .delay = delay_tsc,
++};
++#endif
++
++/*
++ * Scale a 64-bit delta by scaling and multiplying by a 32-bit fraction,
++ * yielding a 64-bit result.
++ */
++static inline u64 scale_delta(u64 delta, u32 mul_frac, int shift)
++{
++ u64 product;
++#ifdef __i386__
++ u32 tmp1, tmp2;
++#endif
++
++ if (shift < 0)
++ delta >>= -shift;
++ else
++ delta <<= shift;
++
++#ifdef __i386__
++ __asm__ (
++ "mul %5 ; "
++ "mov %4,%%eax ; "
++ "mov %%edx,%4 ; "
++ "mul %5 ; "
++ "xor %5,%5 ; "
++ "add %4,%%eax ; "
++ "adc %5,%%edx ; "
++ : "=A" (product), "=r" (tmp1), "=r" (tmp2)
++ : "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (mul_frac) );
++#else
++ __asm__ (
++ "mul %%rdx ; shrd $32,%%rdx,%%rax"
++ : "=a" (product) : "0" (delta), "d" ((u64)mul_frac) );
++#endif
++
++ return product;
++}
++
++#if 0 /* defined (__i386__) */
++int read_current_timer(unsigned long *timer_val)
++{
++ rdtscl(*timer_val);
++ return 0;
++}
++#endif
++
++void init_cpu_khz(void)
++{
++ u64 __cpu_khz = 1000000ULL << 32;
++ struct vcpu_time_info *info = &vcpu_info(0)->time;
++ do_div(__cpu_khz, info->tsc_to_system_mul);
++ if (info->tsc_shift < 0)
++ cpu_khz = __cpu_khz << -info->tsc_shift;
++ else
++ cpu_khz = __cpu_khz >> info->tsc_shift;
++}
++
++static u64 get_nsec_offset(struct shadow_time_info *shadow)
++{
++ u64 now, delta;
++ rdtscll(now);
++ delta = now - shadow->tsc_timestamp;
++ return scale_delta(delta, shadow->tsc_to_nsec_mul, shadow->tsc_shift);
++}
++
++static unsigned long get_usec_offset(struct shadow_time_info *shadow)
++{
++ u64 now, delta;
++ rdtscll(now);
++ delta = now - shadow->tsc_timestamp;
++ return scale_delta(delta, shadow->tsc_to_usec_mul, shadow->tsc_shift);
++}
++
++static void __update_wallclock(time_t sec, long nsec)
++{
++ long wtm_nsec, xtime_nsec;
++ time_t wtm_sec, xtime_sec;
++ u64 tmp, wc_nsec;
++
++ /* Adjust wall-clock time base based on wall_jiffies ticks. */
++ wc_nsec = processed_system_time;
++ wc_nsec += sec * (u64)NSEC_PER_SEC;
++ wc_nsec += nsec;
++ wc_nsec -= (jiffies - wall_jiffies) * (u64)NS_PER_TICK;
++
++ /* Split wallclock base into seconds and nanoseconds. */
++ tmp = wc_nsec;
++ xtime_nsec = do_div(tmp, 1000000000);
++ xtime_sec = (time_t)tmp;
++
++ wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - xtime_sec);
++ wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - xtime_nsec);
++
++ set_normalized_timespec(&xtime, xtime_sec, xtime_nsec);
++ set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
++
++ ntp_clear();
++}
++
++static void update_wallclock(void)
++{
++ shared_info_t *s = HYPERVISOR_shared_info;
++
++ do {
++ shadow_tv_version = s->wc_version;
++ rmb();
++ shadow_tv.tv_sec = s->wc_sec;
++ shadow_tv.tv_nsec = s->wc_nsec;
++ rmb();
++ } while ((s->wc_version & 1) | (shadow_tv_version ^ s->wc_version));
++
++ if (!independent_wallclock)
++ __update_wallclock(shadow_tv.tv_sec, shadow_tv.tv_nsec);
++}
++
++/*
++ * Reads a consistent set of time-base values from Xen, into a shadow data
++ * area.
++ */
++static void get_time_values_from_xen(int cpu)
++{
++ struct vcpu_time_info *src;
++ struct shadow_time_info *dst;
++
++ src = &vcpu_info(cpu)->time;
++ dst = &per_cpu(shadow_time, cpu);
++
++ do {
++ dst->version = src->version;
++ rmb();
++ dst->tsc_timestamp = src->tsc_timestamp;
++ dst->system_timestamp = src->system_time;
++ dst->tsc_to_nsec_mul = src->tsc_to_system_mul;
++ dst->tsc_shift = src->tsc_shift;
++ rmb();
++ } while ((src->version & 1) | (dst->version ^ src->version));
++
++ dst->tsc_to_usec_mul = dst->tsc_to_nsec_mul / 1000;
++}
++
++static inline int time_values_up_to_date(int cpu)
++{
++ struct vcpu_time_info *src;
++ struct shadow_time_info *dst;
++
++ src = &vcpu_info(cpu)->time;
++ dst = &per_cpu(shadow_time, cpu);
++
++ rmb();
++ return (dst->version == src->version);
++}
++
++/*
++ * This is a special lock that is owned by the CPU and holds the index
++ * register we are working with. It is required for NMI access to the
++ * CMOS/RTC registers. See include/asm-i386/mc146818rtc.h for details.
++ */
++volatile unsigned long cmos_lock = 0;
++EXPORT_SYMBOL(cmos_lock);
++
++/* Routines for accessing the CMOS RAM/RTC. */
++unsigned char rtc_cmos_read(unsigned char addr)
++{
++ unsigned char val;
++ lock_cmos_prefix(addr);
++ outb_p(addr, RTC_PORT(0));
++ val = inb_p(RTC_PORT(1));
++ lock_cmos_suffix(addr);
++ return val;
++}
++EXPORT_SYMBOL(rtc_cmos_read);
++
++void rtc_cmos_write(unsigned char val, unsigned char addr)
++{
++ lock_cmos_prefix(addr);
++ outb_p(addr, RTC_PORT(0));
++ outb_p(val, RTC_PORT(1));
++ lock_cmos_suffix(addr);
++}
++EXPORT_SYMBOL(rtc_cmos_write);
++
++/*
++ * This version of gettimeofday has microsecond resolution
++ * and better than microsecond precision on fast x86 machines with TSC.
++ */
++void do_gettimeofday(struct timeval *tv)
++{
++ unsigned long seq;
++ unsigned long usec, sec;
++ unsigned long max_ntp_tick;
++ s64 nsec;
++ unsigned int cpu;
++ struct shadow_time_info *shadow;
++ u32 local_time_version;
++
++ cpu = get_cpu();
++ shadow = &per_cpu(shadow_time, cpu);
++
++ do {
++ unsigned long lost;
++
++ local_time_version = shadow->version;
++ seq = read_seqbegin(&xtime_lock);
++
++ usec = get_usec_offset(shadow);
++ lost = jiffies - wall_jiffies;
++
++ /*
++ * If time_adjust is negative then NTP is slowing the clock
++ * so make sure not to go into next possible interval.
++ * Better to lose some accuracy than have time go backwards..
++ */
++ if (unlikely(time_adjust < 0)) {
++ max_ntp_tick = (USEC_PER_SEC / HZ) - tickadj;
++ usec = min(usec, max_ntp_tick);
++
++ if (lost)
++ usec += lost * max_ntp_tick;
++ }
++ else if (unlikely(lost))
++ usec += lost * (USEC_PER_SEC / HZ);
++
++ sec = xtime.tv_sec;
++ usec += (xtime.tv_nsec / NSEC_PER_USEC);
++
++ nsec = shadow->system_timestamp - processed_system_time;
++ __normalize_time(&sec, &nsec);
++ usec += (long)nsec / NSEC_PER_USEC;
++
++ if (unlikely(!time_values_up_to_date(cpu))) {
++ /*
++ * We may have blocked for a long time,
++ * rendering our calculations invalid
++ * (e.g. the time delta may have
++ * overflowed). Detect that and recalculate
++ * with fresh values.
++ */
++ get_time_values_from_xen(cpu);
++ continue;
++ }
++ } while (read_seqretry(&xtime_lock, seq) ||
++ (local_time_version != shadow->version));
++
++ put_cpu();
++
++ while (usec >= USEC_PER_SEC) {
++ usec -= USEC_PER_SEC;
++ sec++;
++ }
++
++ tv->tv_sec = sec;
++ tv->tv_usec = usec;
++}
++
++EXPORT_SYMBOL(do_gettimeofday);
++
++int do_settimeofday(struct timespec *tv)
++{
++ time_t sec;
++ s64 nsec;
++ unsigned int cpu;
++ struct shadow_time_info *shadow;
++ struct xen_platform_op op;
++
++ if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
++ return -EINVAL;
++
++ cpu = get_cpu();
++ shadow = &per_cpu(shadow_time, cpu);
++
++ write_seqlock_irq(&xtime_lock);
++
++ /*
++ * Ensure we don't get blocked for a long time so that our time delta
++ * overflows. If that were to happen then our shadow time values would
++ * be stale, so we can retry with fresh ones.
++ */
++ for (;;) {
++ nsec = tv->tv_nsec - get_nsec_offset(shadow);
++ if (time_values_up_to_date(cpu))
++ break;
++ get_time_values_from_xen(cpu);
++ }
++ sec = tv->tv_sec;
++ __normalize_time(&sec, &nsec);
++
++ if (is_initial_xendomain() && !independent_wallclock) {
++ op.cmd = XENPF_settime;
++ op.u.settime.secs = sec;
++ op.u.settime.nsecs = nsec;
++ op.u.settime.system_time = shadow->system_timestamp;
++ HYPERVISOR_platform_op(&op);
++ update_wallclock();
++ } else if (independent_wallclock) {
++ nsec -= shadow->system_timestamp;
++ __normalize_time(&sec, &nsec);
++ __update_wallclock(sec, nsec);
++ }
++
++ write_sequnlock_irq(&xtime_lock);
++
++ put_cpu();
++
++ clock_was_set();
++ return 0;
++}
++
++EXPORT_SYMBOL(do_settimeofday);
++
++static void sync_xen_wallclock(unsigned long dummy);
++static DEFINE_TIMER(sync_xen_wallclock_timer, sync_xen_wallclock, 0, 0);
++static void sync_xen_wallclock(unsigned long dummy)
++{
++ time_t sec;
++ s64 nsec;
++ struct xen_platform_op op;
++
++ if (!ntp_synced() || independent_wallclock || !is_initial_xendomain())
++ return;
++
++ write_seqlock_irq(&xtime_lock);
++
++ sec = xtime.tv_sec;
++ nsec = xtime.tv_nsec + ((jiffies - wall_jiffies) * (u64)NS_PER_TICK);
++ __normalize_time(&sec, &nsec);
++
++ op.cmd = XENPF_settime;
++ op.u.settime.secs = sec;
++ op.u.settime.nsecs = nsec;
++ op.u.settime.system_time = processed_system_time;
++ HYPERVISOR_platform_op(&op);
++
++ update_wallclock();
++
++ write_sequnlock_irq(&xtime_lock);
++
++ /* Once per minute. */
++ mod_timer(&sync_xen_wallclock_timer, jiffies + 60*HZ);
++}
++
++static int set_rtc_mmss(unsigned long nowtime)
++{
++ int retval;
++ unsigned long flags;
++
++ if (independent_wallclock || !is_initial_xendomain())
++ return 0;
++
++ /* gets recalled with irq locally disabled */
++ /* XXX - does irqsave resolve this? -johnstul */
++ spin_lock_irqsave(&rtc_lock, flags);
++ if (efi_enabled)
++ retval = efi_set_rtc_mmss(nowtime);
++ else
++ retval = mach_set_rtc_mmss(nowtime);
++ spin_unlock_irqrestore(&rtc_lock, flags);
++
++ return retval;
++}
++
++/* monotonic_clock(): returns # of nanoseconds passed since time_init()
++ * Note: This function is required to return accurate
++ * time even in the absence of multiple timer ticks.
++ */
++unsigned long long monotonic_clock(void)
++{
++ int cpu = get_cpu();
++ struct shadow_time_info *shadow = &per_cpu(shadow_time, cpu);
++ u64 time;
++ u32 local_time_version;
++
++ do {
++ local_time_version = shadow->version;
++ barrier();
++ time = shadow->system_timestamp + get_nsec_offset(shadow);
++ if (!time_values_up_to_date(cpu))
++ get_time_values_from_xen(cpu);
++ barrier();
++ } while (local_time_version != shadow->version);
++
++ put_cpu();
++
++ return time;
++}
++EXPORT_SYMBOL(monotonic_clock);
++
++#ifdef __x86_64__
++unsigned long long sched_clock(void)
++{
++ return monotonic_clock();
++}
++#endif
++
++#if defined(CONFIG_SMP) && defined(CONFIG_FRAME_POINTER)
++unsigned long profile_pc(struct pt_regs *regs)
++{
++ unsigned long pc = instruction_pointer(regs);
++
++#ifdef __x86_64__
++ /* Assume the lock function has either no stack frame or only a single word.
++ This checks if the address on the stack looks like a kernel text address.
++ There is a small window for false hits, but in that case the tick
++ is just accounted to the spinlock function.
++ Better would be to write these functions in assembler again
++ and check exactly. */
++ if (!user_mode_vm(regs) && in_lock_functions(pc)) {
++ char *v = *(char **)regs->rsp;
++ if ((v >= _stext && v <= _etext) ||
++ (v >= _sinittext && v <= _einittext) ||
++ (v >= (char *)MODULES_VADDR && v <= (char *)MODULES_END))
++ return (unsigned long)v;
++ return ((unsigned long *)regs->rsp)[1];
++ }
++#else
++ if (!user_mode_vm(regs) && in_lock_functions(pc))
++ return *(unsigned long *)(regs->ebp + 4);
++#endif
++
++ return pc;
++}
++EXPORT_SYMBOL(profile_pc);
++#endif
++
++/*
++ * This is the same as the above, except we _also_ save the current
++ * Time Stamp Counter value at the time of the timer interrupt, so that
++ * we later on can estimate the time of day more exactly.
++ */
++irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
++{
++ s64 delta, delta_cpu, stolen, blocked;
++ u64 sched_time;
++ int i, cpu = smp_processor_id();
++ struct shadow_time_info *shadow = &per_cpu(shadow_time, cpu);
++ struct vcpu_runstate_info *runstate = &per_cpu(runstate, cpu);
++
++ /*
++ * Here we are in the timer irq handler. We just have irqs locally
++ * disabled but we don't know if the timer_bh is running on the other
++ * CPU. We need to avoid to SMP race with it. NOTE: we don' t need
++ * the irq version of write_lock because as just said we have irq
++ * locally disabled. -arca
++ */
++ write_seqlock(&xtime_lock);
++
++ do {
++ get_time_values_from_xen(cpu);
++
++ /* Obtain a consistent snapshot of elapsed wallclock cycles. */
++ delta = delta_cpu =
++ shadow->system_timestamp + get_nsec_offset(shadow);
++ delta -= processed_system_time;
++ delta_cpu -= per_cpu(processed_system_time, cpu);
++
++ /*
++ * Obtain a consistent snapshot of stolen/blocked cycles. We
++ * can use state_entry_time to detect if we get preempted here.
++ */
++ do {
++ sched_time = runstate->state_entry_time;
++ barrier();
++ stolen = runstate->time[RUNSTATE_runnable] +
++ runstate->time[RUNSTATE_offline] -
++ per_cpu(processed_stolen_time, cpu);
++ blocked = runstate->time[RUNSTATE_blocked] -
++ per_cpu(processed_blocked_time, cpu);
++ barrier();
++ } while (sched_time != runstate->state_entry_time);
++ } while (!time_values_up_to_date(cpu));
++
++ if ((unlikely(delta < -(s64)permitted_clock_jitter) ||
++ unlikely(delta_cpu < -(s64)permitted_clock_jitter))
++ && printk_ratelimit()) {
++ printk("Timer ISR/%d: Time went backwards: "
++ "delta=%lld delta_cpu=%lld shadow=%lld "
++ "off=%lld processed=%lld cpu_processed=%lld\n",
++ cpu, delta, delta_cpu, shadow->system_timestamp,
++ (s64)get_nsec_offset(shadow),
++ processed_system_time,
++ per_cpu(processed_system_time, cpu));
++ for (i = 0; i < num_online_cpus(); i++)
++ printk(" %d: %lld\n", i,
++ per_cpu(processed_system_time, i));
++ }
++
++ /* System-wide jiffy work. */
++ while (delta >= NS_PER_TICK) {
++ delta -= NS_PER_TICK;
++ processed_system_time += NS_PER_TICK;
++ do_timer(regs);
++ }
++
++ if (shadow_tv_version != HYPERVISOR_shared_info->wc_version) {
++ update_wallclock();
++ clock_was_set();
++ }
++
++ write_sequnlock(&xtime_lock);
++
++ /*
++ * Account stolen ticks.
++ * HACK: Passing NULL to account_steal_time()
++ * ensures that the ticks are accounted as stolen.
++ */
++ if ((stolen > 0) && (delta_cpu > 0)) {
++ delta_cpu -= stolen;
++ if (unlikely(delta_cpu < 0))
++ stolen += delta_cpu; /* clamp local-time progress */
++ do_div(stolen, NS_PER_TICK);
++ per_cpu(processed_stolen_time, cpu) += stolen * NS_PER_TICK;
++ per_cpu(processed_system_time, cpu) += stolen * NS_PER_TICK;
++ account_steal_time(NULL, (cputime_t)stolen);
++ }
++
++ /*
++ * Account blocked ticks.
++ * HACK: Passing idle_task to account_steal_time()
++ * ensures that the ticks are accounted as idle/wait.
++ */
++ if ((blocked > 0) && (delta_cpu > 0)) {
++ delta_cpu -= blocked;
++ if (unlikely(delta_cpu < 0))
++ blocked += delta_cpu; /* clamp local-time progress */
++ do_div(blocked, NS_PER_TICK);
++ per_cpu(processed_blocked_time, cpu) += blocked * NS_PER_TICK;
++ per_cpu(processed_system_time, cpu) += blocked * NS_PER_TICK;
++ account_steal_time(idle_task(cpu), (cputime_t)blocked);
++ }
++
++ /* Account user/system ticks. */
++ if (delta_cpu > 0) {
++ do_div(delta_cpu, NS_PER_TICK);
++ per_cpu(processed_system_time, cpu) += delta_cpu * NS_PER_TICK;
++ if (user_mode_vm(regs))
++ account_user_time(current, (cputime_t)delta_cpu);
++ else
++ account_system_time(current, HARDIRQ_OFFSET,
++ (cputime_t)delta_cpu);
++ }
++
++ /* Offlined for more than a few seconds? Avoid lockup warnings. */
++ if (stolen > 5*HZ)
++ touch_softlockup_watchdog();
++
++ /* Local timer processing (see update_process_times()). */
++ run_local_timers();
++ if (rcu_pending(cpu))
++ rcu_check_callbacks(cpu, user_mode_vm(regs));
++ scheduler_tick();
++ run_posix_cpu_timers(current);
++ profile_tick(CPU_PROFILING, regs);
++
++ return IRQ_HANDLED;
++}
++
++static void init_missing_ticks_accounting(int cpu)
++{
++ struct vcpu_register_runstate_memory_area area;
++ struct vcpu_runstate_info *runstate = &per_cpu(runstate, cpu);
++
++ memset(runstate, 0, sizeof(*runstate));
++
++ area.addr.v = runstate;
++ HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area, cpu, &area);
++
++ per_cpu(processed_blocked_time, cpu) =
++ runstate->time[RUNSTATE_blocked];
++ per_cpu(processed_stolen_time, cpu) =
++ runstate->time[RUNSTATE_runnable] +
++ runstate->time[RUNSTATE_offline];
++}
++
++/* not static: needed by APM */
++unsigned long get_cmos_time(void)
++{
++ unsigned long retval;
++ unsigned long flags;
++
++ spin_lock_irqsave(&rtc_lock, flags);
++
++ if (efi_enabled)
++ retval = efi_get_time();
++ else
++ retval = mach_get_cmos_time();
++
++ spin_unlock_irqrestore(&rtc_lock, flags);
++
++ return retval;
++}
++EXPORT_SYMBOL(get_cmos_time);
++
++static void sync_cmos_clock(unsigned long dummy);
++
++static DEFINE_TIMER(sync_cmos_timer, sync_cmos_clock, 0, 0);
++
++static void sync_cmos_clock(unsigned long dummy)
++{
++ struct timeval now, next;
++ int fail = 1;
++
++ /*
++ * If we have an externally synchronized Linux clock, then update
++ * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
++ * called as close as possible to 500 ms before the new second starts.
++ * This code is run on a timer. If the clock is set, that timer
++ * may not expire at the correct time. Thus, we adjust...
++ */
++ if (!ntp_synced())
++ /*
++ * Not synced, exit, do not restart a timer (if one is
++ * running, let it run out).
++ */
++ return;
++
++ do_gettimeofday(&now);
++ if (now.tv_usec >= USEC_AFTER - ((unsigned) TICK_SIZE) / 2 &&
++ now.tv_usec <= USEC_BEFORE + ((unsigned) TICK_SIZE) / 2)
++ fail = set_rtc_mmss(now.tv_sec);
++
++ next.tv_usec = USEC_AFTER - now.tv_usec;
++ if (next.tv_usec <= 0)
++ next.tv_usec += USEC_PER_SEC;
++
++ if (!fail)
++ next.tv_sec = 659;
++ else
++ next.tv_sec = 0;
++
++ if (next.tv_usec >= USEC_PER_SEC) {
++ next.tv_sec++;
++ next.tv_usec -= USEC_PER_SEC;
++ }
++ mod_timer(&sync_cmos_timer, jiffies + timeval_to_jiffies(&next));
++}
++
++void notify_arch_cmos_timer(void)
++{
++ mod_timer(&sync_cmos_timer, jiffies + 1);
++ mod_timer(&sync_xen_wallclock_timer, jiffies + 1);
++}
++
++static long clock_cmos_diff, sleep_start;
++
++static int timer_suspend(struct sys_device *dev, pm_message_t state)
++{
++ /*
++ * Estimate time zone so that set_time can update the clock
++ */
++ clock_cmos_diff = -get_cmos_time();
++ clock_cmos_diff += get_seconds();
++ sleep_start = get_cmos_time();
++ return 0;
++}
++
++static int timer_resume(struct sys_device *dev)
++{
++ unsigned long flags;
++ unsigned long sec;
++ unsigned long sleep_length;
++
++#ifdef CONFIG_HPET_TIMER
++ if (is_hpet_enabled())
++ hpet_reenable();
++#endif
++ sec = get_cmos_time() + clock_cmos_diff;
++ sleep_length = (get_cmos_time() - sleep_start) * HZ;
++ write_seqlock_irqsave(&xtime_lock, flags);
++ xtime.tv_sec = sec;
++ xtime.tv_nsec = 0;
++ jiffies_64 += sleep_length;
++ wall_jiffies += sleep_length;
++ write_sequnlock_irqrestore(&xtime_lock, flags);
++ touch_softlockup_watchdog();
++ return 0;
++}
++
++static struct sysdev_class timer_sysclass = {
++ .resume = timer_resume,
++ .suspend = timer_suspend,
++ set_kset_name("timer"),
++};
++
++
++/* XXX this driverfs stuff should probably go elsewhere later -john */
++static struct sys_device device_timer = {
++ .id = 0,
++ .cls = &timer_sysclass,
++};
++
++static int time_init_device(void)
++{
++ int error = sysdev_class_register(&timer_sysclass);
++ if (!error)
++ error = sysdev_register(&device_timer);
++ return error;
++}
++
++device_initcall(time_init_device);
++
++#ifdef CONFIG_HPET_TIMER
++extern void (*late_time_init)(void);
++/* Duplicate of time_init() below, with hpet_enable part added */
++static void __init hpet_time_init(void)
++{
++ xtime.tv_sec = get_cmos_time();
++ xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ);
++ set_normalized_timespec(&wall_to_monotonic,
++ -xtime.tv_sec, -xtime.tv_nsec);
++
++ if ((hpet_enable() >= 0) && hpet_use_timer) {
++ printk("Using HPET for base-timer\n");
++ }
++
++ time_init_hook();
++}
++#endif
++
++/* Dynamically-mapped IRQ. */
++DEFINE_PER_CPU(int, timer_irq);
++
++extern void (*late_time_init)(void);
++static void setup_cpu0_timer_irq(void)
++{
++ per_cpu(timer_irq, 0) =
++ bind_virq_to_irqhandler(
++ VIRQ_TIMER,
++ 0,
++ timer_interrupt,
++ SA_INTERRUPT,
++ "timer0",
++ NULL);
++ BUG_ON(per_cpu(timer_irq, 0) < 0);
++}
++
++static struct vcpu_set_periodic_timer xen_set_periodic_tick = {
++ .period_ns = NS_PER_TICK
++};
++
++void __init time_init(void)
++{
++#ifdef CONFIG_HPET_TIMER
++ if (is_hpet_capable()) {
++ /*
++ * HPET initialization needs to do memory-mapped io. So, let
++ * us do a late initialization after mem_init().
++ */
++ late_time_init = hpet_time_init;
++ return;
++ }
++#endif
++
++ HYPERVISOR_vcpu_op(VCPUOP_set_periodic_timer, 0,
++ &xen_set_periodic_tick);
++
++ get_time_values_from_xen(0);
++
++ processed_system_time = per_cpu(shadow_time, 0).system_timestamp;
++ per_cpu(processed_system_time, 0) = processed_system_time;
++ init_missing_ticks_accounting(0);
++
++ update_wallclock();
++
++ init_cpu_khz();
++ printk(KERN_INFO "Xen reported: %u.%03u MHz processor.\n",
++ cpu_khz / 1000, cpu_khz % 1000);
++
++#if defined(__x86_64__)
++ vxtime.mode = VXTIME_TSC;
++ vxtime.quot = (1000000L << 32) / vxtime_hz;
++ vxtime.tsc_quot = (1000L << 32) / cpu_khz;
++ sync_core();
++ rdtscll(vxtime.last_tsc);
++#endif
++
++ /* Cannot request_irq() until kmem is initialised. */
++ late_time_init = setup_cpu0_timer_irq;
++}
++
++/* Convert jiffies to system time. */
++u64 jiffies_to_st(unsigned long j)
++{
++ unsigned long seq;
++ long delta;
++ u64 st;
++
++ do {
++ seq = read_seqbegin(&xtime_lock);
++ delta = j - jiffies;
++ if (delta < 1) {
++ /* Triggers in some wrap-around cases, but that's okay:
++ * we just end up with a shorter timeout. */
++ st = processed_system_time + NS_PER_TICK;
++ } else if (((unsigned long)delta >> (BITS_PER_LONG-3)) != 0) {
++ /* Very long timeout means there is no pending timer.
++ * We indicate this to Xen by passing zero timeout. */
++ st = 0;
++ } else {
++ st = processed_system_time + delta * (u64)NS_PER_TICK;
++ }
++ } while (read_seqretry(&xtime_lock, seq));
++
++ return st;
++}
++EXPORT_SYMBOL(jiffies_to_st);
++
++/*
++ * stop_hz_timer / start_hz_timer - enter/exit 'tickless mode' on an idle cpu
++ * These functions are based on implementations from arch/s390/kernel/time.c
++ */
++static void stop_hz_timer(void)
++{
++ struct vcpu_set_singleshot_timer singleshot;
++ unsigned int cpu = smp_processor_id();
++ unsigned long j;
++ int rc;
++
++ cpu_set(cpu, nohz_cpu_mask);
++
++ /* See matching smp_mb in rcu_start_batch in rcupdate.c. These mbs */
++ /* ensure that if __rcu_pending (nested in rcu_needs_cpu) fetches a */
++ /* value of rcp->cur that matches rdp->quiescbatch and allows us to */
++ /* stop the hz timer then the cpumasks created for subsequent values */
++ /* of cur in rcu_start_batch are guaranteed to pick up the updated */
++ /* nohz_cpu_mask and so will not depend on this cpu. */
++
++ smp_mb();
++
++ /* Leave ourselves in tick mode if rcu or softirq or timer pending. */
++ if (rcu_needs_cpu(cpu) || local_softirq_pending() ||
++ (j = next_timer_interrupt(), time_before_eq(j, jiffies))) {
++ cpu_clear(cpu, nohz_cpu_mask);
++ j = jiffies + 1;
++ }
++
++ singleshot.timeout_abs_ns = jiffies_to_st(j);
++ singleshot.flags = 0;
++ rc = HYPERVISOR_vcpu_op(VCPUOP_set_singleshot_timer, cpu, &singleshot);
++#if CONFIG_XEN_COMPAT <= 0x030004
++ if (rc) {
++ BUG_ON(rc != -ENOSYS);
++ rc = HYPERVISOR_set_timer_op(singleshot.timeout_abs_ns);
++ }
++#endif
++ BUG_ON(rc);
++}
++
++static void start_hz_timer(void)
++{
++ cpu_clear(smp_processor_id(), nohz_cpu_mask);
++}
++
++void raw_safe_halt(void)
++{
++ stop_hz_timer();
++ /* Blocking includes an implicit local_irq_enable(). */
++ HYPERVISOR_block();
++ start_hz_timer();
++}
++EXPORT_SYMBOL(raw_safe_halt);
++
++void halt(void)
++{
++ if (irqs_disabled())
++ HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
++}
++EXPORT_SYMBOL(halt);
++
++/* No locking required. Interrupts are disabled on all CPUs. */
++void time_resume(void)
++{
++ unsigned int cpu;
++
++ init_cpu_khz();
++
++ for_each_online_cpu(cpu) {
++ HYPERVISOR_vcpu_op(VCPUOP_set_periodic_timer, cpu,
++ &xen_set_periodic_tick);
++ get_time_values_from_xen(cpu);
++ per_cpu(processed_system_time, cpu) =
++ per_cpu(shadow_time, 0).system_timestamp;
++ init_missing_ticks_accounting(cpu);
++ }
++
++ processed_system_time = per_cpu(shadow_time, 0).system_timestamp;
++
++ update_wallclock();
++}
++
++#ifdef CONFIG_SMP
++static char timer_name[NR_CPUS][15];
++
++int local_setup_timer(unsigned int cpu)
++{
++ int seq, irq;
++
++ BUG_ON(cpu == 0);
++
++ HYPERVISOR_vcpu_op(VCPUOP_set_periodic_timer, cpu,
++ &xen_set_periodic_tick);
++
++ do {
++ seq = read_seqbegin(&xtime_lock);
++ /* Use cpu0 timestamp: cpu's shadow is not initialised yet. */
++ per_cpu(processed_system_time, cpu) =
++ per_cpu(shadow_time, 0).system_timestamp;
++ init_missing_ticks_accounting(cpu);
++ } while (read_seqretry(&xtime_lock, seq));
++
++ sprintf(timer_name[cpu], "timer%d", cpu);
++ irq = bind_virq_to_irqhandler(VIRQ_TIMER,
++ cpu,
++ timer_interrupt,
++ SA_INTERRUPT,
++ timer_name[cpu],
++ NULL);
++ if (irq < 0)
++ return irq;
++ per_cpu(timer_irq, cpu) = irq;
++
++ return 0;
++}
++
++void local_teardown_timer(unsigned int cpu)
++{
++ BUG_ON(cpu == 0);
++ unbind_from_irqhandler(per_cpu(timer_irq, cpu), NULL);
++}
++#endif
++
++/*
++ * /proc/sys/xen: This really belongs in another file. It can stay here for
++ * now however.
++ */
++static ctl_table xen_subtable[] = {
++ {
++ .ctl_name = 1,
++ .procname = "independent_wallclock",
++ .data = &independent_wallclock,
++ .maxlen = sizeof(independent_wallclock),
++ .mode = 0644,
++ .proc_handler = proc_dointvec
++ },
++ {
++ .ctl_name = 2,
++ .procname = "permitted_clock_jitter",
++ .data = &permitted_clock_jitter,
++ .maxlen = sizeof(permitted_clock_jitter),
++ .mode = 0644,
++ .proc_handler = proc_doulongvec_minmax
++ },
++ { 0 }
++};
++static ctl_table xen_table[] = {
++ {
++ .ctl_name = 123,
++ .procname = "xen",
++ .mode = 0555,
++ .child = xen_subtable},
++ { 0 }
++};
++static int __init xen_sysctl_init(void)
++{
++ (void)register_sysctl_table(xen_table, 0);
++ return 0;
++}
++__initcall(xen_sysctl_init);
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/arch/i386/kernel/traps-xen.c 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,1186 @@
++/*
++ * linux/arch/i386/traps.c
++ *
++ * Copyright (C) 1991, 1992 Linus Torvalds
++ *
++ * Pentium III FXSR, SSE support
++ * Gareth Hughes <gareth@valinux.com>, May 2000
++ */
++
++/*
++ * 'Traps.c' handles hardware traps and faults after we have saved some
++ * state in 'asm.s'.
++ */
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/string.h>
++#include <linux/errno.h>
++#include <linux/timer.h>
++#include <linux/mm.h>
++#include <linux/init.h>
++#include <linux/delay.h>
++#include <linux/spinlock.h>
++#include <linux/interrupt.h>
++#include <linux/highmem.h>
++#include <linux/kallsyms.h>
++#include <linux/ptrace.h>
++#include <linux/utsname.h>
++#include <linux/kprobes.h>
++#include <linux/kexec.h>
++#include <linux/unwind.h>
++
++#ifdef CONFIG_EISA
++#include <linux/ioport.h>
++#include <linux/eisa.h>
++#endif
++
++#ifdef CONFIG_MCA
++#include <linux/mca.h>
++#endif
++
++#include <asm/processor.h>
++#include <asm/system.h>
++#include <asm/uaccess.h>
++#include <asm/io.h>
++#include <asm/atomic.h>
++#include <asm/debugreg.h>
++#include <asm/desc.h>
++#include <asm/i387.h>
++#include <asm/nmi.h>
++#include <asm/unwind.h>
++#include <asm/smp.h>
++#include <asm/arch_hooks.h>
++#include <asm/kdebug.h>
++
++#include <linux/module.h>
++
++#include "mach_traps.h"
++
++asmlinkage int system_call(void);
++
++struct desc_struct default_ldt[] = { { 0, 0 }, { 0, 0 }, { 0, 0 },
++ { 0, 0 }, { 0, 0 } };
++
++/* Do we ignore FPU interrupts ? */
++char ignore_fpu_irq = 0;
++
++#ifndef CONFIG_X86_NO_IDT
++/*
++ * The IDT has to be page-aligned to simplify the Pentium
++ * F0 0F bug workaround.. We have a special link segment
++ * for this.
++ */
++struct desc_struct idt_table[256] __attribute__((__section__(".data.idt"))) = { {0, 0}, };
++#endif
++
++asmlinkage void divide_error(void);
++asmlinkage void debug(void);
++asmlinkage void nmi(void);
++asmlinkage void int3(void);
++asmlinkage void overflow(void);
++asmlinkage void bounds(void);
++asmlinkage void invalid_op(void);
++asmlinkage void device_not_available(void);
++asmlinkage void coprocessor_segment_overrun(void);
++asmlinkage void invalid_TSS(void);
++asmlinkage void segment_not_present(void);
++asmlinkage void stack_segment(void);
++asmlinkage void general_protection(void);
++asmlinkage void page_fault(void);
++asmlinkage void coprocessor_error(void);
++asmlinkage void simd_coprocessor_error(void);
++asmlinkage void alignment_check(void);
++#ifndef CONFIG_XEN
++asmlinkage void spurious_interrupt_bug(void);
++#else
++asmlinkage void fixup_4gb_segment(void);
++#endif
++asmlinkage void machine_check(void);
++
++static int kstack_depth_to_print = 24;
++#ifdef CONFIG_STACK_UNWIND
++static int call_trace = 1;
++#else
++#define call_trace (-1)
++#endif
++ATOMIC_NOTIFIER_HEAD(i386die_chain);
++
++int register_die_notifier(struct notifier_block *nb)
++{
++ vmalloc_sync_all();
++ return atomic_notifier_chain_register(&i386die_chain, nb);
++}
++EXPORT_SYMBOL(register_die_notifier); /* used modular by kdb */
++
++int unregister_die_notifier(struct notifier_block *nb)
++{
++ return atomic_notifier_chain_unregister(&i386die_chain, nb);
++}
++EXPORT_SYMBOL(unregister_die_notifier); /* used modular by kdb */
++
++static inline int valid_stack_ptr(struct thread_info *tinfo, void *p)
++{
++ return p > (void *)tinfo &&
++ p < (void *)tinfo + THREAD_SIZE - 3;
++}
++
++/*
++ * Print one address/symbol entries per line.
++ */
++static inline void print_addr_and_symbol(unsigned long addr, char *log_lvl)
++{
++ printk(" [<%08lx>] ", addr);
++
++ print_symbol("%s\n", addr);
++}
++
++static inline unsigned long print_context_stack(struct thread_info *tinfo,
++ unsigned long *stack, unsigned long ebp,
++ char *log_lvl)
++{
++ unsigned long addr;
++
++#ifdef CONFIG_FRAME_POINTER
++ while (valid_stack_ptr(tinfo, (void *)ebp)) {
++ addr = *(unsigned long *)(ebp + 4);
++ print_addr_and_symbol(addr, log_lvl);
++ /*
++ * break out of recursive entries (such as
++ * end_of_stack_stop_unwind_function):
++ */
++ if (ebp == *(unsigned long *)ebp)
++ break;
++ ebp = *(unsigned long *)ebp;
++ }
++#else
++ while (valid_stack_ptr(tinfo, stack)) {
++ addr = *stack++;
++ if (__kernel_text_address(addr))
++ print_addr_and_symbol(addr, log_lvl);
++ }
++#endif
++ return ebp;
++}
++
++static asmlinkage int
++show_trace_unwind(struct unwind_frame_info *info, void *log_lvl)
++{
++ int n = 0;
++
++ while (unwind(info) == 0 && UNW_PC(info)) {
++ n++;
++ print_addr_and_symbol(UNW_PC(info), log_lvl);
++ if (arch_unw_user_mode(info))
++ break;
++ }
++ return n;
++}
++
++static void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
++ unsigned long *stack, char *log_lvl)
++{
++ unsigned long ebp;
++
++ if (!task)
++ task = current;
++
++ if (call_trace >= 0) {
++ int unw_ret = 0;
++ struct unwind_frame_info info;
++
++ if (regs) {
++ if (unwind_init_frame_info(&info, task, regs) == 0)
++ unw_ret = show_trace_unwind(&info, log_lvl);
++ } else if (task == current)
++ unw_ret = unwind_init_running(&info, show_trace_unwind, log_lvl);
++ else {
++ if (unwind_init_blocked(&info, task) == 0)
++ unw_ret = show_trace_unwind(&info, log_lvl);
++ }
++ if (unw_ret > 0) {
++ if (call_trace == 1 && !arch_unw_user_mode(&info)) {
++ print_symbol("DWARF2 unwinder stuck at %s\n",
++ UNW_PC(&info));
++ if (UNW_SP(&info) >= PAGE_OFFSET) {
++ printk("Leftover inexact backtrace:\n");
++ stack = (void *)UNW_SP(&info);
++ } else
++ printk("Full inexact backtrace again:\n");
++ } else if (call_trace >= 1)
++ return;
++ else
++ printk("Full inexact backtrace again:\n");
++ } else
++ printk("Inexact backtrace:\n");
++ }
++
++ if (task == current) {
++ /* Grab ebp right from our regs */
++ asm ("movl %%ebp, %0" : "=r" (ebp) : );
++ } else {
++ /* ebp is the last reg pushed by switch_to */
++ ebp = *(unsigned long *) task->thread.esp;
++ }
++
++ while (1) {
++ struct thread_info *context;
++ context = (struct thread_info *)
++ ((unsigned long)stack & (~(THREAD_SIZE - 1)));
++ ebp = print_context_stack(context, stack, ebp, log_lvl);
++ stack = (unsigned long*)context->previous_esp;
++ if (!stack)
++ break;
++ printk("%s =======================\n", log_lvl);
++ }
++}
++
++void show_trace(struct task_struct *task, struct pt_regs *regs, unsigned long * stack)
++{
++ show_trace_log_lvl(task, regs, stack, "");
++}
++
++static void show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
++ unsigned long *esp, char *log_lvl)
++{
++ unsigned long *stack;
++ int i;
++
++ if (esp == NULL) {
++ if (task)
++ esp = (unsigned long*)task->thread.esp;
++ else
++ esp = (unsigned long *)&esp;
++ }
++
++ stack = esp;
++ for(i = 0; i < kstack_depth_to_print; i++) {
++ if (kstack_end(stack))
++ break;
++ if (i && ((i % 8) == 0))
++ printk("\n%s ", log_lvl);
++ printk("%08lx ", *stack++);
++ }
++ printk("\n%sCall Trace:\n", log_lvl);
++ show_trace_log_lvl(task, regs, esp, log_lvl);
++}
++
++void show_stack(struct task_struct *task, unsigned long *esp)
++{
++ printk(" ");
++ show_stack_log_lvl(task, NULL, esp, "");
++}
++
++/*
++ * The architecture-independent dump_stack generator
++ */
++void dump_stack(void)
++{
++ unsigned long stack;
++
++ show_trace(current, NULL, &stack);
++}
++
++EXPORT_SYMBOL(dump_stack);
++
++void show_registers(struct pt_regs *regs)
++{
++ int i;
++ int in_kernel = 1;
++ unsigned long esp;
++ unsigned short ss;
++
++ esp = (unsigned long) (&regs->esp);
++ savesegment(ss, ss);
++ if (user_mode_vm(regs)) {
++ in_kernel = 0;
++ esp = regs->esp;
++ ss = regs->xss & 0xffff;
++ }
++ print_modules();
++ printk(KERN_EMERG "CPU: %d\nEIP: %04x:[<%08lx>] %s VLI\n"
++ "EFLAGS: %08lx (%s %.*s) \n",
++ smp_processor_id(), 0xffff & regs->xcs, regs->eip,
++ print_tainted(), regs->eflags, system_utsname.release,
++ (int)strcspn(system_utsname.version, " "),
++ system_utsname.version);
++ print_symbol(KERN_EMERG "EIP is at %s\n", regs->eip);
++ printk(KERN_EMERG "eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n",
++ regs->eax, regs->ebx, regs->ecx, regs->edx);
++ printk(KERN_EMERG "esi: %08lx edi: %08lx ebp: %08lx esp: %08lx\n",
++ regs->esi, regs->edi, regs->ebp, esp);
++ printk(KERN_EMERG "ds: %04x es: %04x ss: %04x\n",
++ regs->xds & 0xffff, regs->xes & 0xffff, ss);
++ printk(KERN_EMERG "Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)",
++ TASK_COMM_LEN, current->comm, current->pid,
++ current_thread_info(), current, current->thread_info);
++ /*
++ * When in-kernel, we also print out the stack and code at the
++ * time of the fault..
++ */
++ if (in_kernel) {
++ u8 __user *eip;
++
++ printk("\n" KERN_EMERG "Stack: ");
++ show_stack_log_lvl(NULL, regs, (unsigned long *)esp, KERN_EMERG);
++
++ printk(KERN_EMERG "Code: ");
++
++ eip = (u8 __user *)regs->eip - 43;
++ for (i = 0; i < 64; i++, eip++) {
++ unsigned char c;
++
++ if (eip < (u8 __user *)PAGE_OFFSET || __get_user(c, eip)) {
++ printk(" Bad EIP value.");
++ break;
++ }
++ if (eip == (u8 __user *)regs->eip)
++ printk("<%02x> ", c);
++ else
++ printk("%02x ", c);
++ }
++ }
++ printk("\n");
++}
++
++static void handle_BUG(struct pt_regs *regs)
++{
++ unsigned long eip = regs->eip;
++ unsigned short ud2;
++
++ if (eip < PAGE_OFFSET)
++ return;
++ if (__get_user(ud2, (unsigned short __user *)eip))
++ return;
++ if (ud2 != 0x0b0f)
++ return;
++
++ printk(KERN_EMERG "------------[ cut here ]------------\n");
++
++#ifdef CONFIG_DEBUG_BUGVERBOSE
++ do {
++ unsigned short line;
++ char *file;
++ char c;
++
++ if (__get_user(line, (unsigned short __user *)(eip + 2)))
++ break;
++ if (__get_user(file, (char * __user *)(eip + 4)) ||
++ (unsigned long)file < PAGE_OFFSET || __get_user(c, file))
++ file = "<bad filename>";
++
++ printk(KERN_EMERG "kernel BUG at %s:%d!\n", file, line);
++ return;
++ } while (0);
++#endif
++ printk(KERN_EMERG "Kernel BUG at [verbose debug info unavailable]\n");
++}
++
++/* This is gone through when something in the kernel
++ * has done something bad and is about to be terminated.
++*/
++void die(const char * str, struct pt_regs * regs, long err)
++{
++ static struct {
++ spinlock_t lock;
++ u32 lock_owner;
++ int lock_owner_depth;
++ } die = {
++ .lock = SPIN_LOCK_UNLOCKED,
++ .lock_owner = -1,
++ .lock_owner_depth = 0
++ };
++ static int die_counter;
++ unsigned long flags;
++
++ oops_enter();
++
++ if (die.lock_owner != raw_smp_processor_id()) {
++ console_verbose();
++ spin_lock_irqsave(&die.lock, flags);
++ die.lock_owner = smp_processor_id();
++ die.lock_owner_depth = 0;
++ bust_spinlocks(1);
++ }
++ else
++ local_save_flags(flags);
++
++ if (++die.lock_owner_depth < 3) {
++ int nl = 0;
++ unsigned long esp;
++ unsigned short ss;
++
++ handle_BUG(regs);
++ printk(KERN_EMERG "%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
++#ifdef CONFIG_PREEMPT
++ printk(KERN_EMERG "PREEMPT ");
++ nl = 1;
++#endif
++#ifdef CONFIG_SMP
++ if (!nl)
++ printk(KERN_EMERG);
++ printk("SMP ");
++ nl = 1;
++#endif
++#ifdef CONFIG_DEBUG_PAGEALLOC
++ if (!nl)
++ printk(KERN_EMERG);
++ printk("DEBUG_PAGEALLOC");
++ nl = 1;
++#endif
++ if (nl)
++ printk("\n");
++ if (notify_die(DIE_OOPS, str, regs, err,
++ current->thread.trap_no, SIGSEGV) !=
++ NOTIFY_STOP) {
++ show_registers(regs);
++ /* Executive summary in case the oops scrolled away */
++ esp = (unsigned long) (&regs->esp);
++ savesegment(ss, ss);
++ if (user_mode(regs)) {
++ esp = regs->esp;
++ ss = regs->xss & 0xffff;
++ }
++ printk(KERN_EMERG "EIP: [<%08lx>] ", regs->eip);
++ print_symbol("%s", regs->eip);
++ printk(" SS:ESP %04x:%08lx\n", ss, esp);
++ }
++ else
++ regs = NULL;
++ } else
++ printk(KERN_EMERG "Recursive die() failure, output suppressed\n");
++
++ bust_spinlocks(0);
++ die.lock_owner = -1;
++ spin_unlock_irqrestore(&die.lock, flags);
++
++ if (!regs)
++ return;
++
++ if (kexec_should_crash(current))
++ crash_kexec(regs);
++
++ if (in_interrupt())
++ panic("Fatal exception in interrupt");
++
++ if (panic_on_oops)
++ panic("Fatal exception");
++
++ oops_exit();
++ do_exit(SIGSEGV);
++}
++
++static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err)
++{
++ if (!user_mode_vm(regs))
++ die(str, regs, err);
++}
++
++static void __kprobes do_trap(int trapnr, int signr, char *str, int vm86,
++ struct pt_regs * regs, long error_code,
++ siginfo_t *info)
++{
++ struct task_struct *tsk = current;
++ tsk->thread.error_code = error_code;
++ tsk->thread.trap_no = trapnr;
++
++ if (regs->eflags & VM_MASK) {
++ if (vm86)
++ goto vm86_trap;
++ goto trap_signal;
++ }
++
++ if (!user_mode(regs))
++ goto kernel_trap;
++
++ trap_signal: {
++ if (info)
++ force_sig_info(signr, info, tsk);
++ else
++ force_sig(signr, tsk);
++ return;
++ }
++
++ kernel_trap: {
++ if (!fixup_exception(regs))
++ die(str, regs, error_code);
++ return;
++ }
++
++ vm86_trap: {
++ int ret = handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, trapnr);
++ if (ret) goto trap_signal;
++ return;
++ }
++}
++
++#define DO_ERROR(trapnr, signr, str, name) \
++fastcall void do_##name(struct pt_regs * regs, long error_code) \
++{ \
++ if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
++ == NOTIFY_STOP) \
++ return; \
++ do_trap(trapnr, signr, str, 0, regs, error_code, NULL); \
++}
++
++#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
++fastcall void do_##name(struct pt_regs * regs, long error_code) \
++{ \
++ siginfo_t info; \
++ info.si_signo = signr; \
++ info.si_errno = 0; \
++ info.si_code = sicode; \
++ info.si_addr = (void __user *)siaddr; \
++ if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
++ == NOTIFY_STOP) \
++ return; \
++ do_trap(trapnr, signr, str, 0, regs, error_code, &info); \
++}
++
++#define DO_VM86_ERROR(trapnr, signr, str, name) \
++fastcall void do_##name(struct pt_regs * regs, long error_code) \
++{ \
++ if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
++ == NOTIFY_STOP) \
++ return; \
++ do_trap(trapnr, signr, str, 1, regs, error_code, NULL); \
++}
++
++#define DO_VM86_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
++fastcall void do_##name(struct pt_regs * regs, long error_code) \
++{ \
++ siginfo_t info; \
++ info.si_signo = signr; \
++ info.si_errno = 0; \
++ info.si_code = sicode; \
++ info.si_addr = (void __user *)siaddr; \
++ if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
++ == NOTIFY_STOP) \
++ return; \
++ do_trap(trapnr, signr, str, 1, regs, error_code, &info); \
++}
++
++DO_VM86_ERROR_INFO( 0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->eip)
++#ifndef CONFIG_KPROBES
++DO_VM86_ERROR( 3, SIGTRAP, "int3", int3)
++#endif
++DO_VM86_ERROR( 4, SIGSEGV, "overflow", overflow)
++DO_VM86_ERROR( 5, SIGSEGV, "bounds", bounds)
++DO_ERROR_INFO( 6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->eip)
++DO_ERROR( 9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun)
++DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
++DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
++DO_ERROR(12, SIGBUS, "stack segment", stack_segment)
++DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
++DO_ERROR_INFO(32, SIGSEGV, "iret exception", iret_error, ILL_BADSTK, 0)
++
++fastcall void __kprobes do_general_protection(struct pt_regs * regs,
++ long error_code)
++{
++ current->thread.error_code = error_code;
++ current->thread.trap_no = 13;
++
++ if (regs->eflags & VM_MASK)
++ goto gp_in_vm86;
++
++ if (!user_mode(regs))
++ goto gp_in_kernel;
++
++ current->thread.error_code = error_code;
++ current->thread.trap_no = 13;
++ force_sig(SIGSEGV, current);
++ return;
++
++gp_in_vm86:
++ local_irq_enable();
++ handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
++ return;
++
++gp_in_kernel:
++ if (!fixup_exception(regs)) {
++ if (notify_die(DIE_GPF, "general protection fault", regs,
++ error_code, 13, SIGSEGV) == NOTIFY_STOP)
++ return;
++ die("general protection fault", regs, error_code);
++ }
++}
++
++static void mem_parity_error(unsigned char reason, struct pt_regs * regs)
++{
++ printk(KERN_EMERG "Uhhuh. NMI received. Dazed and confused, but trying "
++ "to continue\n");
++ printk(KERN_EMERG "You probably have a hardware problem with your RAM "
++ "chips\n");
++
++ /* Clear and disable the memory parity error line. */
++ clear_mem_error(reason);
++}
++
++static void io_check_error(unsigned char reason, struct pt_regs * regs)
++{
++ printk(KERN_EMERG "NMI: IOCK error (debug interrupt?)\n");
++ show_registers(regs);
++
++ /* Re-enable the IOCK line, wait for a few seconds */
++ clear_io_check_error(reason);
++}
++
++static void unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
++{
++#ifdef CONFIG_MCA
++ /* Might actually be able to figure out what the guilty party
++ * is. */
++ if( MCA_bus ) {
++ mca_handle_nmi();
++ return;
++ }
++#endif
++ printk("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
++ reason, smp_processor_id());
++ printk("Dazed and confused, but trying to continue\n");
++ printk("Do you have a strange power saving mode enabled?\n");
++}
++
++static DEFINE_SPINLOCK(nmi_print_lock);
++
++void die_nmi (struct pt_regs *regs, const char *msg)
++{
++ if (notify_die(DIE_NMIWATCHDOG, msg, regs, 0, 2, SIGINT) ==
++ NOTIFY_STOP)
++ return;
++
++ spin_lock(&nmi_print_lock);
++ /*
++ * We are in trouble anyway, lets at least try
++ * to get a message out.
++ */
++ bust_spinlocks(1);
++ printk(KERN_EMERG "%s", msg);
++ printk(" on CPU%d, eip %08lx, registers:\n",
++ smp_processor_id(), regs->eip);
++ show_registers(regs);
++ printk(KERN_EMERG "console shuts up ...\n");
++ console_silent();
++ spin_unlock(&nmi_print_lock);
++ bust_spinlocks(0);
++
++ /* If we are in kernel we are probably nested up pretty bad
++ * and might aswell get out now while we still can.
++ */
++ if (!user_mode_vm(regs)) {
++ current->thread.trap_no = 2;
++ crash_kexec(regs);
++ }
++
++ do_exit(SIGSEGV);
++}
++
++static void default_do_nmi(struct pt_regs * regs)
++{
++ unsigned char reason = 0;
++
++ /* Only the BSP gets external NMIs from the system. */
++ if (!smp_processor_id())
++ reason = get_nmi_reason();
++
++ if (!(reason & 0xc0)) {
++ if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT)
++ == NOTIFY_STOP)
++ return;
++#ifdef CONFIG_X86_LOCAL_APIC
++ /*
++ * Ok, so this is none of the documented NMI sources,
++ * so it must be the NMI watchdog.
++ */
++ if (nmi_watchdog) {
++ nmi_watchdog_tick(regs);
++ return;
++ }
++#endif
++ unknown_nmi_error(reason, regs);
++ return;
++ }
++ if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP)
++ return;
++ if (reason & 0x80)
++ mem_parity_error(reason, regs);
++ if (reason & 0x40)
++ io_check_error(reason, regs);
++ /*
++ * Reassert NMI in case it became active meanwhile
++ * as it's edge-triggered.
++ */
++ reassert_nmi();
++}
++
++static int dummy_nmi_callback(struct pt_regs * regs, int cpu)
++{
++ return 0;
++}
++
++static nmi_callback_t nmi_callback = dummy_nmi_callback;
++
++fastcall void do_nmi(struct pt_regs * regs, long error_code)
++{
++ int cpu;
++
++ nmi_enter();
++
++ cpu = smp_processor_id();
++
++ ++nmi_count(cpu);
++
++ if (!rcu_dereference(nmi_callback)(regs, cpu))
++ default_do_nmi(regs);
++
++ nmi_exit();
++}
++
++void set_nmi_callback(nmi_callback_t callback)
++{
++ vmalloc_sync_all();
++ rcu_assign_pointer(nmi_callback, callback);
++}
++EXPORT_SYMBOL_GPL(set_nmi_callback);
++
++void unset_nmi_callback(void)
++{
++ nmi_callback = dummy_nmi_callback;
++}
++EXPORT_SYMBOL_GPL(unset_nmi_callback);
++
++#ifdef CONFIG_KPROBES
++fastcall void __kprobes do_int3(struct pt_regs *regs, long error_code)
++{
++ if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP)
++ == NOTIFY_STOP)
++ return;
++ /* This is an interrupt gate, because kprobes wants interrupts
++ disabled. Normal trap handlers don't. */
++ restore_interrupts(regs);
++ do_trap(3, SIGTRAP, "int3", 1, regs, error_code, NULL);
++}
++#endif
++
++/*
++ * Our handling of the processor debug registers is non-trivial.
++ * We do not clear them on entry and exit from the kernel. Therefore
++ * it is possible to get a watchpoint trap here from inside the kernel.
++ * However, the code in ./ptrace.c has ensured that the user can
++ * only set watchpoints on userspace addresses. Therefore the in-kernel
++ * watchpoint trap can only occur in code which is reading/writing
++ * from user space. Such code must not hold kernel locks (since it
++ * can equally take a page fault), therefore it is safe to call
++ * force_sig_info even though that claims and releases locks.
++ *
++ * Code in ./signal.c ensures that the debug control register
++ * is restored before we deliver any signal, and therefore that
++ * user code runs with the correct debug control register even though
++ * we clear it here.
++ *
++ * Being careful here means that we don't have to be as careful in a
++ * lot of more complicated places (task switching can be a bit lazy
++ * about restoring all the debug state, and ptrace doesn't have to
++ * find every occurrence of the TF bit that could be saved away even
++ * by user code)
++ */
++fastcall void __kprobes do_debug(struct pt_regs * regs, long error_code)
++{
++ unsigned int condition;
++ struct task_struct *tsk = current;
++
++ get_debugreg(condition, 6);
++
++ if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
++ SIGTRAP) == NOTIFY_STOP)
++ return;
++ /* It's safe to allow irq's after DR6 has been saved */
++ if (regs->eflags & X86_EFLAGS_IF)
++ local_irq_enable();
++
++ /* Mask out spurious debug traps due to lazy DR7 setting */
++ if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) {
++ if (!tsk->thread.debugreg[7])
++ goto clear_dr7;
++ }
++
++ if (regs->eflags & VM_MASK)
++ goto debug_vm86;
++
++ /* Save debug status register where ptrace can see it */
++ tsk->thread.debugreg[6] = condition;
++
++ /*
++ * Single-stepping through TF: make sure we ignore any events in
++ * kernel space (but re-enable TF when returning to user mode).
++ */
++ if (condition & DR_STEP) {
++ /*
++ * We already checked v86 mode above, so we can
++ * check for kernel mode by just checking the CPL
++ * of CS.
++ */
++ if (!user_mode(regs))
++ goto clear_TF_reenable;
++ }
++
++ /* Ok, finally something we can handle */
++ send_sigtrap(tsk, regs, error_code);
++
++ /* Disable additional traps. They'll be re-enabled when
++ * the signal is delivered.
++ */
++clear_dr7:
++ set_debugreg(0, 7);
++ return;
++
++debug_vm86:
++ handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 1);
++ return;
++
++clear_TF_reenable:
++ set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
++ regs->eflags &= ~TF_MASK;
++ return;
++}
++
++/*
++ * Note that we play around with the 'TS' bit in an attempt to get
++ * the correct behaviour even in the presence of the asynchronous
++ * IRQ13 behaviour
++ */
++void math_error(void __user *eip)
++{
++ struct task_struct * task;
++ siginfo_t info;
++ unsigned short cwd, swd;
++
++ /*
++ * Save the info for the exception handler and clear the error.
++ */
++ task = current;
++ save_init_fpu(task);
++ task->thread.trap_no = 16;
++ task->thread.error_code = 0;
++ info.si_signo = SIGFPE;
++ info.si_errno = 0;
++ info.si_code = __SI_FAULT;
++ info.si_addr = eip;
++ /*
++ * (~cwd & swd) will mask out exceptions that are not set to unmasked
++ * status. 0x3f is the exception bits in these regs, 0x200 is the
++ * C1 reg you need in case of a stack fault, 0x040 is the stack
++ * fault bit. We should only be taking one exception at a time,
++ * so if this combination doesn't produce any single exception,
++ * then we have a bad program that isn't syncronizing its FPU usage
++ * and it will suffer the consequences since we won't be able to
++ * fully reproduce the context of the exception
++ */
++ cwd = get_fpu_cwd(task);
++ swd = get_fpu_swd(task);
++ switch (swd & ~cwd & 0x3f) {
++ case 0x000: /* No unmasked exception */
++ return;
++ default: /* Multiple exceptions */
++ break;
++ case 0x001: /* Invalid Op */
++ /*
++ * swd & 0x240 == 0x040: Stack Underflow
++ * swd & 0x240 == 0x240: Stack Overflow
++ * User must clear the SF bit (0x40) if set
++ */
++ info.si_code = FPE_FLTINV;
++ break;
++ case 0x002: /* Denormalize */
++ case 0x010: /* Underflow */
++ info.si_code = FPE_FLTUND;
++ break;
++ case 0x004: /* Zero Divide */
++ info.si_code = FPE_FLTDIV;
++ break;
++ case 0x008: /* Overflow */
++ info.si_code = FPE_FLTOVF;
++ break;
++ case 0x020: /* Precision */
++ info.si_code = FPE_FLTRES;
++ break;
++ }
++ force_sig_info(SIGFPE, &info, task);
++}
++
++fastcall void do_coprocessor_error(struct pt_regs * regs, long error_code)
++{
++ ignore_fpu_irq = 1;
++ math_error((void __user *)regs->eip);
++}
++
++static void simd_math_error(void __user *eip)
++{
++ struct task_struct * task;
++ siginfo_t info;
++ unsigned short mxcsr;
++
++ /*
++ * Save the info for the exception handler and clear the error.
++ */
++ task = current;
++ save_init_fpu(task);
++ task->thread.trap_no = 19;
++ task->thread.error_code = 0;
++ info.si_signo = SIGFPE;
++ info.si_errno = 0;
++ info.si_code = __SI_FAULT;
++ info.si_addr = eip;
++ /*
++ * The SIMD FPU exceptions are handled a little differently, as there
++ * is only a single status/control register. Thus, to determine which
++ * unmasked exception was caught we must mask the exception mask bits
++ * at 0x1f80, and then use these to mask the exception bits at 0x3f.
++ */
++ mxcsr = get_fpu_mxcsr(task);
++ switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) {
++ case 0x000:
++ default:
++ break;
++ case 0x001: /* Invalid Op */
++ info.si_code = FPE_FLTINV;
++ break;
++ case 0x002: /* Denormalize */
++ case 0x010: /* Underflow */
++ info.si_code = FPE_FLTUND;
++ break;
++ case 0x004: /* Zero Divide */
++ info.si_code = FPE_FLTDIV;
++ break;
++ case 0x008: /* Overflow */
++ info.si_code = FPE_FLTOVF;
++ break;
++ case 0x020: /* Precision */
++ info.si_code = FPE_FLTRES;
++ break;
++ }
++ force_sig_info(SIGFPE, &info, task);
++}
++
++fastcall void do_simd_coprocessor_error(struct pt_regs * regs,
++ long error_code)
++{
++ if (cpu_has_xmm) {
++ /* Handle SIMD FPU exceptions on PIII+ processors. */
++ ignore_fpu_irq = 1;
++ simd_math_error((void __user *)regs->eip);
++ } else {
++ /*
++ * Handle strange cache flush from user space exception
++ * in all other cases. This is undocumented behaviour.
++ */
++ if (regs->eflags & VM_MASK) {
++ handle_vm86_fault((struct kernel_vm86_regs *)regs,
++ error_code);
++ return;
++ }
++ current->thread.trap_no = 19;
++ current->thread.error_code = error_code;
++ die_if_kernel("cache flush denied", regs, error_code);
++ force_sig(SIGSEGV, current);
++ }
++}
++
++#ifndef CONFIG_XEN
++fastcall void do_spurious_interrupt_bug(struct pt_regs * regs,
++ long error_code)
++{
++#if 0
++ /* No need to warn about this any longer. */
++ printk("Ignoring P6 Local APIC Spurious Interrupt Bug...\n");
++#endif
++}
++
++fastcall void setup_x86_bogus_stack(unsigned char * stk)
++{
++ unsigned long *switch16_ptr, *switch32_ptr;
++ struct pt_regs *regs;
++ unsigned long stack_top, stack_bot;
++ unsigned short iret_frame16_off;
++ int cpu = smp_processor_id();
++ /* reserve the space on 32bit stack for the magic switch16 pointer */
++ memmove(stk, stk + 8, sizeof(struct pt_regs));
++ switch16_ptr = (unsigned long *)(stk + sizeof(struct pt_regs));
++ regs = (struct pt_regs *)stk;
++ /* now the switch32 on 16bit stack */
++ stack_bot = (unsigned long)&per_cpu(cpu_16bit_stack, cpu);
++ stack_top = stack_bot + CPU_16BIT_STACK_SIZE;
++ switch32_ptr = (unsigned long *)(stack_top - 8);
++ iret_frame16_off = CPU_16BIT_STACK_SIZE - 8 - 20;
++ /* copy iret frame on 16bit stack */
++ memcpy((void *)(stack_bot + iret_frame16_off), &regs->eip, 20);
++ /* fill in the switch pointers */
++ switch16_ptr[0] = (regs->esp & 0xffff0000) | iret_frame16_off;
++ switch16_ptr[1] = __ESPFIX_SS;
++ switch32_ptr[0] = (unsigned long)stk + sizeof(struct pt_regs) +
++ 8 - CPU_16BIT_STACK_SIZE;
++ switch32_ptr[1] = __KERNEL_DS;
++}
++
++fastcall unsigned char * fixup_x86_bogus_stack(unsigned short sp)
++{
++ unsigned long *switch32_ptr;
++ unsigned char *stack16, *stack32;
++ unsigned long stack_top, stack_bot;
++ int len;
++ int cpu = smp_processor_id();
++ stack_bot = (unsigned long)&per_cpu(cpu_16bit_stack, cpu);
++ stack_top = stack_bot + CPU_16BIT_STACK_SIZE;
++ switch32_ptr = (unsigned long *)(stack_top - 8);
++ /* copy the data from 16bit stack to 32bit stack */
++ len = CPU_16BIT_STACK_SIZE - 8 - sp;
++ stack16 = (unsigned char *)(stack_bot + sp);
++ stack32 = (unsigned char *)
++ (switch32_ptr[0] + CPU_16BIT_STACK_SIZE - 8 - len);
++ memcpy(stack32, stack16, len);
++ return stack32;
++}
++#endif
++
++/*
++ * 'math_state_restore()' saves the current math information in the
++ * old math state array, and gets the new ones from the current task
++ *
++ * Careful.. There are problems with IBM-designed IRQ13 behaviour.
++ * Don't touch unless you *really* know how it works.
++ *
++ * Must be called with kernel preemption disabled (in this case,
++ * local interrupts are disabled at the call-site in entry.S).
++ */
++asmlinkage void math_state_restore(struct pt_regs regs)
++{
++ struct thread_info *thread = current_thread_info();
++ struct task_struct *tsk = thread->task;
++
++ /* NB. 'clts' is done for us by Xen during virtual trap. */
++ if (!tsk_used_math(tsk))
++ init_fpu(tsk);
++ restore_fpu(tsk);
++ thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */
++}
++
++#ifndef CONFIG_MATH_EMULATION
++
++asmlinkage void math_emulate(long arg)
++{
++ printk(KERN_EMERG "math-emulation not enabled and no coprocessor found.\n");
++ printk(KERN_EMERG "killing %s.\n",current->comm);
++ force_sig(SIGFPE,current);
++ schedule();
++}
++
++#endif /* CONFIG_MATH_EMULATION */
++
++#ifdef CONFIG_X86_F00F_BUG
++void __init trap_init_f00f_bug(void)
++{
++ __set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO);
++
++ /*
++ * Update the IDT descriptor and reload the IDT so that
++ * it uses the read-only mapped virtual address.
++ */
++ idt_descr.address = fix_to_virt(FIX_F00F_IDT);
++ load_idt(&idt_descr);
++}
++#endif
++
++
++/*
++ * NB. All these are "trap gates" (i.e. events_mask isn't set) except
++ * for those that specify <dpl>|4 in the second field.
++ */
++static trap_info_t trap_table[] = {
++ { 0, 0, __KERNEL_CS, (unsigned long)divide_error },
++ { 1, 0|4, __KERNEL_CS, (unsigned long)debug },
++ { 3, 3|4, __KERNEL_CS, (unsigned long)int3 },
++ { 4, 3, __KERNEL_CS, (unsigned long)overflow },
++ { 5, 0, __KERNEL_CS, (unsigned long)bounds },
++ { 6, 0, __KERNEL_CS, (unsigned long)invalid_op },
++ { 7, 0|4, __KERNEL_CS, (unsigned long)device_not_available },
++ { 9, 0, __KERNEL_CS, (unsigned long)coprocessor_segment_overrun },
++ { 10, 0, __KERNEL_CS, (unsigned long)invalid_TSS },
++ { 11, 0, __KERNEL_CS, (unsigned long)segment_not_present },
++ { 12, 0, __KERNEL_CS, (unsigned long)stack_segment },
++ { 13, 0, __KERNEL_CS, (unsigned long)general_protection },
++ { 14, 0|4, __KERNEL_CS, (unsigned long)page_fault },
++ { 15, 0, __KERNEL_CS, (unsigned long)fixup_4gb_segment },
++ { 16, 0, __KERNEL_CS, (unsigned long)coprocessor_error },
++ { 17, 0, __KERNEL_CS, (unsigned long)alignment_check },
++#ifdef CONFIG_X86_MCE
++ { 18, 0, __KERNEL_CS, (unsigned long)machine_check },
++#endif
++ { 19, 0, __KERNEL_CS, (unsigned long)simd_coprocessor_error },
++ { SYSCALL_VECTOR, 3, __KERNEL_CS, (unsigned long)system_call },
++ { 0, 0, 0, 0 }
++};
++
++void __init trap_init(void)
++{
++ HYPERVISOR_set_trap_table(trap_table);
++
++ if (cpu_has_fxsr) {
++ /*
++ * Verify that the FXSAVE/FXRSTOR data will be 16-byte aligned.
++ * Generates a compile-time "error: zero width for bit-field" if
++ * the alignment is wrong.
++ */
++ struct fxsrAlignAssert {
++ int _:!(offsetof(struct task_struct,
++ thread.i387.fxsave) & 15);
++ };
++
++ printk(KERN_INFO "Enabling fast FPU save and restore... ");
++ set_in_cr4(X86_CR4_OSFXSR);
++ printk("done.\n");
++ }
++ if (cpu_has_xmm) {
++ printk(KERN_INFO "Enabling unmasked SIMD FPU exception "
++ "support... ");
++ set_in_cr4(X86_CR4_OSXMMEXCPT);
++ printk("done.\n");
++ }
++
++ /*
++ * Should be a barrier for any external CPU state.
++ */
++ cpu_init();
++}
++
++void smp_trap_init(trap_info_t *trap_ctxt)
++{
++ trap_info_t *t = trap_table;
++
++ for (t = trap_table; t->address; t++) {
++ trap_ctxt[t->vector].flags = t->flags;
++ trap_ctxt[t->vector].cs = t->cs;
++ trap_ctxt[t->vector].address = t->address;
++ }
++}
++
++static int __init kstack_setup(char *s)
++{
++ kstack_depth_to_print = simple_strtoul(s, NULL, 0);
++ return 1;
++}
++__setup("kstack=", kstack_setup);
++
++#ifdef CONFIG_STACK_UNWIND
++static int __init call_trace_setup(char *s)
++{
++ if (strcmp(s, "old") == 0)
++ call_trace = -1;
++ else if (strcmp(s, "both") == 0)
++ call_trace = 0;
++ else if (strcmp(s, "newfallback") == 0)
++ call_trace = 1;
++ else if (strcmp(s, "new") == 2)
++ call_trace = 2;
++ return 1;
++}
++__setup("call_trace=", call_trace_setup);
++#endif
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/arch/i386/kernel/vsyscall-note-xen.S 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,32 @@
++/*
++ * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text.
++ * Here we can supply some information useful to userland.
++ * First we get the vanilla i386 note that supplies the kernel version info.
++ */
++
++#include "vsyscall-note.S"
++
++/*
++ * Now we add a special note telling glibc's dynamic linker a fake hardware
++ * flavor that it will use to choose the search path for libraries in the
++ * same way it uses real hardware capabilities like "mmx".
++ * We supply "nosegneg" as the fake capability, to indicate that we
++ * do not like negative offsets in instructions using segment overrides,
++ * since we implement those inefficiently. This makes it possible to
++ * install libraries optimized to avoid those access patterns in someplace
++ * like /lib/i686/tls/nosegneg. Note that an /etc/ld.so.conf.d/file
++ * corresponding to the bits here is needed to make ldconfig work right.
++ * It should contain:
++ * hwcap 0 nosegneg
++ * to match the mapping of bit to name that we give here.
++ */
++#define NOTE_KERNELCAP_BEGIN(ncaps, mask) \
++ ASM_ELF_NOTE_BEGIN(".note.kernelcap", "a", "GNU", 2) \
++ .long ncaps, mask
++#define NOTE_KERNELCAP(bit, name) \
++ .byte bit; .asciz name
++#define NOTE_KERNELCAP_END ASM_ELF_NOTE_END
++
++NOTE_KERNELCAP_BEGIN(1, 1)
++NOTE_KERNELCAP(0, "nosegneg")
++NOTE_KERNELCAP_END
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/arch/i386/mach-xen/Makefile 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,5 @@
++#
++# Makefile for the linux kernel.
++#
++
++obj-y := setup.o
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/arch/i386/mach-xen/setup.c 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,147 @@
++/*
++ * Machine specific setup for generic
++ */
++
++#include <linux/smp.h>
++#include <linux/init.h>
++#include <linux/interrupt.h>
++#include <linux/module.h>
++#include <asm/acpi.h>
++#include <asm/arch_hooks.h>
++#include <asm/e820.h>
++#include <asm/setup.h>
++#include <asm/fixmap.h>
++
++#include <xen/interface/callback.h>
++#include <xen/interface/memory.h>
++
++#ifdef CONFIG_HOTPLUG_CPU
++#define DEFAULT_SEND_IPI (1)
++#else
++#define DEFAULT_SEND_IPI (0)
++#endif
++
++int no_broadcast=DEFAULT_SEND_IPI;
++
++static __init int no_ipi_broadcast(char *str)
++{
++ get_option(&str, &no_broadcast);
++ printk ("Using %s mode\n", no_broadcast ? "No IPI Broadcast" :
++ "IPI Broadcast");
++ return 1;
++}
++
++__setup("no_ipi_broadcast", no_ipi_broadcast);
++
++static int __init print_ipi_mode(void)
++{
++ printk ("Using IPI %s mode\n", no_broadcast ? "No-Shortcut" :
++ "Shortcut");
++ return 0;
++}
++
++late_initcall(print_ipi_mode);
++
++/**
++ * machine_specific_memory_setup - Hook for machine specific memory setup.
++ *
++ * Description:
++ * This is included late in kernel/setup.c so that it can make
++ * use of all of the static functions.
++ **/
++
++char * __init machine_specific_memory_setup(void)
++{
++ int rc;
++ struct xen_memory_map memmap;
++ /*
++ * This is rather large for a stack variable but this early in
++ * the boot process we know we have plenty slack space.
++ */
++ struct e820entry map[E820MAX];
++
++ memmap.nr_entries = E820MAX;
++ set_xen_guest_handle(memmap.buffer, map);
++
++ rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap);
++ if ( rc == -ENOSYS ) {
++ memmap.nr_entries = 1;
++ map[0].addr = 0ULL;
++ map[0].size = PFN_PHYS((unsigned long long)xen_start_info->nr_pages);
++ /* 8MB slack (to balance backend allocations). */
++ map[0].size += 8ULL << 20;
++ map[0].type = E820_RAM;
++ rc = 0;
++ }
++ BUG_ON(rc);
++
++ sanitize_e820_map(map, (char *)&memmap.nr_entries);
++
++ BUG_ON(copy_e820_map(map, (char)memmap.nr_entries) < 0);
++
++ return "Xen";
++}
++
++
++extern void hypervisor_callback(void);
++extern void failsafe_callback(void);
++extern void nmi(void);
++
++unsigned long *machine_to_phys_mapping;
++EXPORT_SYMBOL(machine_to_phys_mapping);
++unsigned int machine_to_phys_order;
++EXPORT_SYMBOL(machine_to_phys_order);
++
++void __init machine_specific_arch_setup(void)
++{
++ int ret;
++ struct xen_machphys_mapping mapping;
++ unsigned long machine_to_phys_nr_ents;
++ struct xen_platform_parameters pp;
++ static struct callback_register __initdata event = {
++ .type = CALLBACKTYPE_event,
++ .address = { __KERNEL_CS, (unsigned long)hypervisor_callback },
++ };
++ static struct callback_register __initdata failsafe = {
++ .type = CALLBACKTYPE_failsafe,
++ .address = { __KERNEL_CS, (unsigned long)failsafe_callback },
++ };
++ static struct callback_register __initdata nmi_cb = {
++ .type = CALLBACKTYPE_nmi,
++ .address = { __KERNEL_CS, (unsigned long)nmi },
++ };
++
++ ret = HYPERVISOR_callback_op(CALLBACKOP_register, &event);
++ if (ret == 0)
++ ret = HYPERVISOR_callback_op(CALLBACKOP_register, &failsafe);
++#if CONFIG_XEN_COMPAT <= 0x030002
++ if (ret == -ENOSYS)
++ ret = HYPERVISOR_set_callbacks(
++ event.address.cs, event.address.eip,
++ failsafe.address.cs, failsafe.address.eip);
++#endif
++ BUG_ON(ret);
++
++ ret = HYPERVISOR_callback_op(CALLBACKOP_register, &nmi_cb);
++#if CONFIG_XEN_COMPAT <= 0x030002
++ if (ret == -ENOSYS) {
++ static struct xennmi_callback __initdata cb = {
++ .handler_address = (unsigned long)nmi
++ };
++
++ HYPERVISOR_nmi_op(XENNMI_register_callback, &cb);
++ }
++#endif
++
++ if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0)
++ set_fixaddr_top(pp.virt_start);
++
++ machine_to_phys_mapping = (unsigned long *)MACH2PHYS_VIRT_START;
++ machine_to_phys_nr_ents = MACH2PHYS_NR_ENTRIES;
++ if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) {
++ machine_to_phys_mapping = (unsigned long *)mapping.v_start;
++ machine_to_phys_nr_ents = mapping.max_mfn + 1;
++ }
++ while ((1UL << machine_to_phys_order) < machine_to_phys_nr_ents )
++ machine_to_phys_order++;
++}
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/arch/i386/mm/fault-xen.c 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,769 @@
++/*
++ * linux/arch/i386/mm/fault.c
++ *
++ * Copyright (C) 1995 Linus Torvalds
++ */
++
++#include <linux/signal.h>
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/string.h>
++#include <linux/types.h>
++#include <linux/ptrace.h>
++#include <linux/mman.h>
++#include <linux/mm.h>
++#include <linux/smp.h>
++#include <linux/smp_lock.h>
++#include <linux/interrupt.h>
++#include <linux/init.h>
++#include <linux/tty.h>
++#include <linux/vt_kern.h> /* For unblank_screen() */
++#include <linux/highmem.h>
++#include <linux/module.h>
++#include <linux/kprobes.h>
++
++#include <asm/system.h>
++#include <asm/uaccess.h>
++#include <asm/desc.h>
++#include <asm/kdebug.h>
++
++extern void die(const char *,struct pt_regs *,long);
++
++#ifdef CONFIG_KPROBES
++ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain);
++int register_page_fault_notifier(struct notifier_block *nb)
++{
++ vmalloc_sync_all();
++ return atomic_notifier_chain_register(&notify_page_fault_chain, nb);
++}
++
++int unregister_page_fault_notifier(struct notifier_block *nb)
++{
++ return atomic_notifier_chain_unregister(&notify_page_fault_chain, nb);
++}
++
++static inline int notify_page_fault(enum die_val val, const char *str,
++ struct pt_regs *regs, long err, int trap, int sig)
++{
++ struct die_args args = {
++ .regs = regs,
++ .str = str,
++ .err = err,
++ .trapnr = trap,
++ .signr = sig
++ };
++ return atomic_notifier_call_chain(&notify_page_fault_chain, val, &args);
++}
++#else
++static inline int notify_page_fault(enum die_val val, const char *str,
++ struct pt_regs *regs, long err, int trap, int sig)
++{
++ return NOTIFY_DONE;
++}
++#endif
++
++
++/*
++ * Unlock any spinlocks which will prevent us from getting the
++ * message out
++ */
++void bust_spinlocks(int yes)
++{
++ int loglevel_save = console_loglevel;
++
++ if (yes) {
++ oops_in_progress = 1;
++ return;
++ }
++#ifdef CONFIG_VT
++ unblank_screen();
++#endif
++ oops_in_progress = 0;
++ /*
++ * OK, the message is on the console. Now we call printk()
++ * without oops_in_progress set so that printk will give klogd
++ * a poke. Hold onto your hats...
++ */
++ console_loglevel = 15; /* NMI oopser may have shut the console up */
++ printk(" ");
++ console_loglevel = loglevel_save;
++}
++
++/*
++ * Return EIP plus the CS segment base. The segment limit is also
++ * adjusted, clamped to the kernel/user address space (whichever is
++ * appropriate), and returned in *eip_limit.
++ *
++ * The segment is checked, because it might have been changed by another
++ * task between the original faulting instruction and here.
++ *
++ * If CS is no longer a valid code segment, or if EIP is beyond the
++ * limit, or if it is a kernel address when CS is not a kernel segment,
++ * then the returned value will be greater than *eip_limit.
++ *
++ * This is slow, but is very rarely executed.
++ */
++static inline unsigned long get_segment_eip(struct pt_regs *regs,
++ unsigned long *eip_limit)
++{
++ unsigned long eip = regs->eip;
++ unsigned seg = regs->xcs & 0xffff;
++ u32 seg_ar, seg_limit, base, *desc;
++
++ /* Unlikely, but must come before segment checks. */
++ if (unlikely(regs->eflags & VM_MASK)) {
++ base = seg << 4;
++ *eip_limit = base + 0xffff;
++ return base + (eip & 0xffff);
++ }
++
++ /* The standard kernel/user address space limit. */
++ *eip_limit = (seg & 2) ? USER_DS.seg : KERNEL_DS.seg;
++
++ /* By far the most common cases. */
++ if (likely(seg == __USER_CS || seg == GET_KERNEL_CS()))
++ return eip;
++
++ /* Check the segment exists, is within the current LDT/GDT size,
++ that kernel/user (ring 0..3) has the appropriate privilege,
++ that it's a code segment, and get the limit. */
++ __asm__ ("larl %3,%0; lsll %3,%1"
++ : "=&r" (seg_ar), "=r" (seg_limit) : "0" (0), "rm" (seg));
++ if ((~seg_ar & 0x9800) || eip > seg_limit) {
++ *eip_limit = 0;
++ return 1; /* So that returned eip > *eip_limit. */
++ }
++
++ /* Get the GDT/LDT descriptor base.
++ When you look for races in this code remember that
++ LDT and other horrors are only used in user space. */
++ if (seg & (1<<2)) {
++ /* Must lock the LDT while reading it. */
++ down(&current->mm->context.sem);
++ desc = current->mm->context.ldt;
++ desc = (void *)desc + (seg & ~7);
++ } else {
++ /* Must disable preemption while reading the GDT. */
++ desc = (u32 *)get_cpu_gdt_table(get_cpu());
++ desc = (void *)desc + (seg & ~7);
++ }
++
++ /* Decode the code segment base from the descriptor */
++ base = get_desc_base((unsigned long *)desc);
++
++ if (seg & (1<<2)) {
++ up(&current->mm->context.sem);
++ } else
++ put_cpu();
++
++ /* Adjust EIP and segment limit, and clamp at the kernel limit.
++ It's legitimate for segments to wrap at 0xffffffff. */
++ seg_limit += base;
++ if (seg_limit < *eip_limit && seg_limit >= base)
++ *eip_limit = seg_limit;
++ return eip + base;
++}
++
++/*
++ * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
++ * Check that here and ignore it.
++ */
++static int __is_prefetch(struct pt_regs *regs, unsigned long addr)
++{
++ unsigned long limit;
++ unsigned long instr = get_segment_eip (regs, &limit);
++ int scan_more = 1;
++ int prefetch = 0;
++ int i;
++
++ for (i = 0; scan_more && i < 15; i++) {
++ unsigned char opcode;
++ unsigned char instr_hi;
++ unsigned char instr_lo;
++
++ if (instr > limit)
++ break;
++ if (__get_user(opcode, (unsigned char __user *) instr))
++ break;
++
++ instr_hi = opcode & 0xf0;
++ instr_lo = opcode & 0x0f;
++ instr++;
++
++ switch (instr_hi) {
++ case 0x20:
++ case 0x30:
++ /* Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes. */
++ scan_more = ((instr_lo & 7) == 0x6);
++ break;
++
++ case 0x60:
++ /* 0x64 thru 0x67 are valid prefixes in all modes. */
++ scan_more = (instr_lo & 0xC) == 0x4;
++ break;
++ case 0xF0:
++ /* 0xF0, 0xF2, and 0xF3 are valid prefixes */
++ scan_more = !instr_lo || (instr_lo>>1) == 1;
++ break;
++ case 0x00:
++ /* Prefetch instruction is 0x0F0D or 0x0F18 */
++ scan_more = 0;
++ if (instr > limit)
++ break;
++ if (__get_user(opcode, (unsigned char __user *) instr))
++ break;
++ prefetch = (instr_lo == 0xF) &&
++ (opcode == 0x0D || opcode == 0x18);
++ break;
++ default:
++ scan_more = 0;
++ break;
++ }
++ }
++ return prefetch;
++}
++
++static inline int is_prefetch(struct pt_regs *regs, unsigned long addr,
++ unsigned long error_code)
++{
++ if (unlikely(boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
++ boot_cpu_data.x86 >= 6)) {
++ /* Catch an obscure case of prefetch inside an NX page. */
++ if (nx_enabled && (error_code & 16))
++ return 0;
++ return __is_prefetch(regs, addr);
++ }
++ return 0;
++}
++
++static noinline void force_sig_info_fault(int si_signo, int si_code,
++ unsigned long address, struct task_struct *tsk)
++{
++ siginfo_t info;
++
++ info.si_signo = si_signo;
++ info.si_errno = 0;
++ info.si_code = si_code;
++ info.si_addr = (void __user *)address;
++ force_sig_info(si_signo, &info, tsk);
++}
++
++fastcall void do_invalid_op(struct pt_regs *, unsigned long);
++
++#ifdef CONFIG_X86_PAE
++static void dump_fault_path(unsigned long address)
++{
++ unsigned long *p, page;
++ unsigned long mfn;
++
++ page = read_cr3();
++ p = (unsigned long *)__va(page);
++ p += (address >> 30) * 2;
++ printk(KERN_ALERT "%08lx -> *pde = %08lx:%08lx\n", page, p[1], p[0]);
++ if (p[0] & 1) {
++ mfn = (p[0] >> PAGE_SHIFT) | (p[1] << 20);
++ page = mfn_to_pfn(mfn) << PAGE_SHIFT;
++ p = (unsigned long *)__va(page);
++ address &= 0x3fffffff;
++ p += (address >> 21) * 2;
++ printk(KERN_ALERT "%08lx -> *pme = %08lx:%08lx\n",
++ page, p[1], p[0]);
++ mfn = (p[0] >> PAGE_SHIFT) | (p[1] << 20);
++#ifdef CONFIG_HIGHPTE
++ if (mfn_to_pfn(mfn) >= highstart_pfn)
++ return;
++#endif
++ if (p[0] & 1) {
++ page = mfn_to_pfn(mfn) << PAGE_SHIFT;
++ p = (unsigned long *) __va(page);
++ address &= 0x001fffff;
++ p += (address >> 12) * 2;
++ printk(KERN_ALERT "%08lx -> *pte = %08lx:%08lx\n",
++ page, p[1], p[0]);
++ }
++ }
++}
++#else
++static void dump_fault_path(unsigned long address)
++{
++ unsigned long page;
++
++ page = read_cr3();
++ page = ((unsigned long *) __va(page))[address >> 22];
++ if (oops_may_print())
++ printk(KERN_ALERT "*pde = ma %08lx pa %08lx\n", page,
++ machine_to_phys(page));
++ /*
++ * We must not directly access the pte in the highpte
++ * case if the page table is located in highmem.
++ * And lets rather not kmap-atomic the pte, just in case
++ * it's allocated already.
++ */
++#ifdef CONFIG_HIGHPTE
++ if ((page >> PAGE_SHIFT) >= highstart_pfn)
++ return;
++#endif
++ if ((page & 1) && oops_may_print()) {
++ page &= PAGE_MASK;
++ address &= 0x003ff000;
++ page = machine_to_phys(page);
++ page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT];
++ printk(KERN_ALERT "*pte = ma %08lx pa %08lx\n", page,
++ machine_to_phys(page));
++ }
++}
++#endif
++
++static int spurious_fault(struct pt_regs *regs,
++ unsigned long address,
++ unsigned long error_code)
++{
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++ pte_t *pte;
++
++ /* Reserved-bit violation or user access to kernel space? */
++ if (error_code & 0x0c)
++ return 0;
++
++ pgd = init_mm.pgd + pgd_index(address);
++ if (!pgd_present(*pgd))
++ return 0;
++
++ pud = pud_offset(pgd, address);
++ if (!pud_present(*pud))
++ return 0;
++
++ pmd = pmd_offset(pud, address);
++ if (!pmd_present(*pmd))
++ return 0;
++
++ pte = pte_offset_kernel(pmd, address);
++ if (!pte_present(*pte))
++ return 0;
++ if ((error_code & 0x02) && !pte_write(*pte))
++ return 0;
++#ifdef CONFIG_X86_PAE
++ if ((error_code & 0x10) && (pte_val(*pte) & _PAGE_NX))
++ return 0;
++#endif
++
++ return 1;
++}
++
++static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
++{
++ unsigned index = pgd_index(address);
++ pgd_t *pgd_k;
++ pud_t *pud, *pud_k;
++ pmd_t *pmd, *pmd_k;
++
++ pgd += index;
++ pgd_k = init_mm.pgd + index;
++
++ if (!pgd_present(*pgd_k))
++ return NULL;
++
++ /*
++ * set_pgd(pgd, *pgd_k); here would be useless on PAE
++ * and redundant with the set_pmd() on non-PAE. As would
++ * set_pud.
++ */
++
++ pud = pud_offset(pgd, address);
++ pud_k = pud_offset(pgd_k, address);
++ if (!pud_present(*pud_k))
++ return NULL;
++
++ pmd = pmd_offset(pud, address);
++ pmd_k = pmd_offset(pud_k, address);
++ if (!pmd_present(*pmd_k))
++ return NULL;
++ if (!pmd_present(*pmd))
++#ifndef CONFIG_XEN
++ set_pmd(pmd, *pmd_k);
++#else
++ /*
++ * When running on Xen we must launder *pmd_k through
++ * pmd_val() to ensure that _PAGE_PRESENT is correctly set.
++ */
++ set_pmd(pmd, __pmd(pmd_val(*pmd_k)));
++#endif
++ else
++ BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
++ return pmd_k;
++}
++
++/*
++ * Handle a fault on the vmalloc or module mapping area
++ *
++ * This assumes no large pages in there.
++ */
++static inline int vmalloc_fault(unsigned long address)
++{
++ unsigned long pgd_paddr;
++ pmd_t *pmd_k;
++ pte_t *pte_k;
++ /*
++ * Synchronize this task's top level page-table
++ * with the 'reference' page table.
++ *
++ * Do _not_ use "current" here. We might be inside
++ * an interrupt in the middle of a task switch..
++ */
++ pgd_paddr = read_cr3();
++ pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
++ if (!pmd_k)
++ return -1;
++ pte_k = pte_offset_kernel(pmd_k, address);
++ if (!pte_present(*pte_k))
++ return -1;
++ return 0;
++}
++
++/*
++ * This routine handles page faults. It determines the address,
++ * and the problem, and then passes it off to one of the appropriate
++ * routines.
++ *
++ * error_code:
++ * bit 0 == 0 means no page found, 1 means protection fault
++ * bit 1 == 0 means read, 1 means write
++ * bit 2 == 0 means kernel, 1 means user-mode
++ * bit 3 == 1 means use of reserved bit detected
++ * bit 4 == 1 means fault was an instruction fetch
++ */
++fastcall void __kprobes do_page_fault(struct pt_regs *regs,
++ unsigned long error_code)
++{
++ struct task_struct *tsk;
++ struct mm_struct *mm;
++ struct vm_area_struct * vma;
++ unsigned long address;
++ int write, si_code;
++
++ /* get the address */
++ address = read_cr2();
++
++ /* Set the "privileged fault" bit to something sane. */
++ error_code &= ~4;
++ error_code |= (regs->xcs & 2) << 1;
++ if (regs->eflags & X86_EFLAGS_VM)
++ error_code |= 4;
++
++ tsk = current;
++
++ si_code = SEGV_MAPERR;
++
++ /*
++ * We fault-in kernel-space virtual memory on-demand. The
++ * 'reference' page table is init_mm.pgd.
++ *
++ * NOTE! We MUST NOT take any locks for this case. We may
++ * be in an interrupt or a critical region, and should
++ * only copy the information from the master page table,
++ * nothing more.
++ *
++ * This verifies that the fault happens in kernel space
++ * (error_code & 4) == 0, and that the fault was not a
++ * protection error (error_code & 9) == 0.
++ */
++ if (unlikely(address >= TASK_SIZE)) {
++#ifdef CONFIG_XEN
++ /* Faults in hypervisor area can never be patched up. */
++ if (address >= hypervisor_virt_start)
++ goto bad_area_nosemaphore;
++#endif
++ if (!(error_code & 0x0000000d) && vmalloc_fault(address) >= 0)
++ return;
++ /* Can take a spurious fault if mapping changes R/O -> R/W. */
++ if (spurious_fault(regs, address, error_code))
++ return;
++ if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
++ SIGSEGV) == NOTIFY_STOP)
++ return;
++ /*
++ * Don't take the mm semaphore here. If we fixup a prefetch
++ * fault we could otherwise deadlock.
++ */
++ goto bad_area_nosemaphore;
++ }
++
++ if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
++ SIGSEGV) == NOTIFY_STOP)
++ return;
++
++ /* It's safe to allow irq's after cr2 has been saved and the vmalloc
++ fault has been handled. */
++ if (regs->eflags & (X86_EFLAGS_IF|VM_MASK))
++ local_irq_enable();
++
++ mm = tsk->mm;
++
++ /*
++ * If we're in an interrupt, have no user context or are running in an
++ * atomic region then we must not take the fault..
++ */
++ if (in_atomic() || !mm)
++ goto bad_area_nosemaphore;
++
++ /* When running in the kernel we expect faults to occur only to
++ * addresses in user space. All other faults represent errors in the
++ * kernel and should generate an OOPS. Unfortunatly, in the case of an
++ * erroneous fault occurring in a code path which already holds mmap_sem
++ * we will deadlock attempting to validate the fault against the
++ * address space. Luckily the kernel only validly references user
++ * space from well defined areas of code, which are listed in the
++ * exceptions table.
++ *
++ * As the vast majority of faults will be valid we will only perform
++ * the source reference check when there is a possibilty of a deadlock.
++ * Attempt to lock the address space, if we cannot we then validate the
++ * source. If this is invalid we can skip the address space check,
++ * thus avoiding the deadlock.
++ */
++ if (!down_read_trylock(&mm->mmap_sem)) {
++ if ((error_code & 4) == 0 &&
++ !search_exception_tables(regs->eip))
++ goto bad_area_nosemaphore;
++ down_read(&mm->mmap_sem);
++ }
++
++ vma = find_vma(mm, address);
++ if (!vma)
++ goto bad_area;
++ if (vma->vm_start <= address)
++ goto good_area;
++ if (!(vma->vm_flags & VM_GROWSDOWN))
++ goto bad_area;
++ if (error_code & 4) {
++ /*
++ * Accessing the stack below %esp is always a bug.
++ * The large cushion allows instructions like enter
++ * and pusha to work. ("enter $65535,$31" pushes
++ * 32 pointers and then decrements %esp by 65535.)
++ */
++ if (address + 65536 + 32 * sizeof(unsigned long) < regs->esp)
++ goto bad_area;
++ }
++ if (expand_stack(vma, address))
++ goto bad_area;
++/*
++ * Ok, we have a good vm_area for this memory access, so
++ * we can handle it..
++ */
++good_area:
++ si_code = SEGV_ACCERR;
++ write = 0;
++ switch (error_code & 3) {
++ default: /* 3: write, present */
++#ifdef TEST_VERIFY_AREA
++ if (regs->cs == GET_KERNEL_CS())
++ printk("WP fault at %08lx\n", regs->eip);
++#endif
++ /* fall through */
++ case 2: /* write, not present */
++ if (!(vma->vm_flags & VM_WRITE))
++ goto bad_area;
++ write++;
++ break;
++ case 1: /* read, present */
++ goto bad_area;
++ case 0: /* read, not present */
++ if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
++ goto bad_area;
++ }
++
++ survive:
++ /*
++ * If for any reason at all we couldn't handle the fault,
++ * make sure we exit gracefully rather than endlessly redo
++ * the fault.
++ */
++ switch (handle_mm_fault(mm, vma, address, write)) {
++ case VM_FAULT_MINOR:
++ tsk->min_flt++;
++ break;
++ case VM_FAULT_MAJOR:
++ tsk->maj_flt++;
++ break;
++ case VM_FAULT_SIGBUS:
++ goto do_sigbus;
++ case VM_FAULT_OOM:
++ goto out_of_memory;
++ default:
++ BUG();
++ }
++
++ /*
++ * Did it hit the DOS screen memory VA from vm86 mode?
++ */
++ if (regs->eflags & VM_MASK) {
++ unsigned long bit = (address - 0xA0000) >> PAGE_SHIFT;
++ if (bit < 32)
++ tsk->thread.screen_bitmap |= 1 << bit;
++ }
++ up_read(&mm->mmap_sem);
++ return;
++
++/*
++ * Something tried to access memory that isn't in our memory map..
++ * Fix it, but check if it's kernel or user first..
++ */
++bad_area:
++ up_read(&mm->mmap_sem);
++
++bad_area_nosemaphore:
++ /* User mode accesses just cause a SIGSEGV */
++ if (error_code & 4) {
++ /*
++ * Valid to do another page fault here because this one came
++ * from user space.
++ */
++ if (is_prefetch(regs, address, error_code))
++ return;
++
++ tsk->thread.cr2 = address;
++ /* Kernel addresses are always protection faults */
++ tsk->thread.error_code = error_code | (address >= TASK_SIZE);
++ tsk->thread.trap_no = 14;
++ force_sig_info_fault(SIGSEGV, si_code, address, tsk);
++ return;
++ }
++
++#ifdef CONFIG_X86_F00F_BUG
++ /*
++ * Pentium F0 0F C7 C8 bug workaround.
++ */
++ if (boot_cpu_data.f00f_bug) {
++ unsigned long nr;
++
++ nr = (address - idt_descr.address) >> 3;
++
++ if (nr == 6) {
++ do_invalid_op(regs, 0);
++ return;
++ }
++ }
++#endif
++
++no_context:
++ /* Are we prepared to handle this kernel fault? */
++ if (fixup_exception(regs))
++ return;
++
++ /*
++ * Valid to do another page fault here, because if this fault
++ * had been triggered by is_prefetch fixup_exception would have
++ * handled it.
++ */
++ if (is_prefetch(regs, address, error_code))
++ return;
++
++/*
++ * Oops. The kernel tried to access some bad page. We'll have to
++ * terminate things with extreme prejudice.
++ */
++
++ bust_spinlocks(1);
++
++ if (oops_may_print()) {
++ #ifdef CONFIG_X86_PAE
++ if (error_code & 16) {
++ pte_t *pte = lookup_address(address);
++
++ if (pte && pte_present(*pte) && !pte_exec_kernel(*pte))
++ printk(KERN_CRIT "kernel tried to execute "
++ "NX-protected page - exploit attempt? "
++ "(uid: %d)\n", current->uid);
++ }
++ #endif
++ if (address < PAGE_SIZE)
++ printk(KERN_ALERT "BUG: unable to handle kernel NULL "
++ "pointer dereference");
++ else
++ printk(KERN_ALERT "BUG: unable to handle kernel paging"
++ " request");
++ printk(" at virtual address %08lx\n",address);
++ printk(KERN_ALERT " printing eip:\n");
++ printk("%08lx\n", regs->eip);
++ }
++ dump_fault_path(address);
++ tsk->thread.cr2 = address;
++ tsk->thread.trap_no = 14;
++ tsk->thread.error_code = error_code;
++ die("Oops", regs, error_code);
++ bust_spinlocks(0);
++ do_exit(SIGKILL);
++
++/*
++ * We ran out of memory, or some other thing happened to us that made
++ * us unable to handle the page fault gracefully.
++ */
++out_of_memory:
++ up_read(&mm->mmap_sem);
++ if (tsk->pid == 1) {
++ yield();
++ down_read(&mm->mmap_sem);
++ goto survive;
++ }
++ printk("VM: killing process %s\n", tsk->comm);
++ if (error_code & 4)
++ do_exit(SIGKILL);
++ goto no_context;
++
++do_sigbus:
++ up_read(&mm->mmap_sem);
++
++ /* Kernel mode? Handle exceptions or die */
++ if (!(error_code & 4))
++ goto no_context;
++
++ /* User space => ok to do another page fault */
++ if (is_prefetch(regs, address, error_code))
++ return;
++
++ tsk->thread.cr2 = address;
++ tsk->thread.error_code = error_code;
++ tsk->thread.trap_no = 14;
++ force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk);
++}
++
++#if !HAVE_SHARED_KERNEL_PMD
++void vmalloc_sync_all(void)
++{
++ /*
++ * Note that races in the updates of insync and start aren't
++ * problematic: insync can only get set bits added, and updates to
++ * start are only improving performance (without affecting correctness
++ * if undone).
++ */
++ static DECLARE_BITMAP(insync, PTRS_PER_PGD);
++ static unsigned long start = TASK_SIZE;
++ unsigned long address;
++
++ BUILD_BUG_ON(TASK_SIZE & ~PGDIR_MASK);
++ for (address = start; address >= TASK_SIZE; address += PGDIR_SIZE) {
++ if (!test_bit(pgd_index(address), insync)) {
++ unsigned long flags;
++ struct page *page;
++
++ spin_lock_irqsave(&pgd_lock, flags);
++ for (page = pgd_list; page; page =
++ (struct page *)page->index)
++ if (!vmalloc_sync_one(page_address(page),
++ address)) {
++ BUG_ON(page != pgd_list);
++ break;
++ }
++ spin_unlock_irqrestore(&pgd_lock, flags);
++ if (!page)
++ set_bit(pgd_index(address), insync);
++ }
++ if (address == start && test_bit(pgd_index(address), insync))
++ start = address + PGDIR_SIZE;
++ }
++}
++#endif
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/arch/i386/mm/highmem-xen.c 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,136 @@
++#include <linux/highmem.h>
++#include <linux/module.h>
++
++void *kmap(struct page *page)
++{
++ might_sleep();
++ if (!PageHighMem(page))
++ return page_address(page);
++ return kmap_high(page);
++}
++
++void kunmap(struct page *page)
++{
++ if (in_interrupt())
++ BUG();
++ if (!PageHighMem(page))
++ return;
++ kunmap_high(page);
++}
++
++/*
++ * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
++ * no global lock is needed and because the kmap code must perform a global TLB
++ * invalidation when the kmap pool wraps.
++ *
++ * However when holding an atomic kmap is is not legal to sleep, so atomic
++ * kmaps are appropriate for short, tight code paths only.
++ */
++static void *__kmap_atomic(struct page *page, enum km_type type, pgprot_t prot)
++{
++ enum fixed_addresses idx;
++ unsigned long vaddr;
++
++ /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
++ inc_preempt_count();
++ if (!PageHighMem(page))
++ return page_address(page);
++
++ idx = type + KM_TYPE_NR*smp_processor_id();
++ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
++#ifdef CONFIG_DEBUG_HIGHMEM
++ if (!pte_none(*(kmap_pte-idx)))
++ BUG();
++#endif
++ set_pte_at_sync(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot));
++
++ return (void*) vaddr;
++}
++
++void *kmap_atomic(struct page *page, enum km_type type)
++{
++ return __kmap_atomic(page, type, kmap_prot);
++}
++
++/* Same as kmap_atomic but with PAGE_KERNEL_RO page protection. */
++void *kmap_atomic_pte(struct page *page, enum km_type type)
++{
++ return __kmap_atomic(page, type,
++ test_bit(PG_pinned, &page->flags)
++ ? PAGE_KERNEL_RO : kmap_prot);
++}
++
++void kunmap_atomic(void *kvaddr, enum km_type type)
++{
++#if defined(CONFIG_DEBUG_HIGHMEM) || defined(CONFIG_XEN)
++ unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
++ enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
++
++ if (vaddr < FIXADDR_START) { // FIXME
++ dec_preempt_count();
++ preempt_check_resched();
++ return;
++ }
++#endif
++
++#if defined(CONFIG_DEBUG_HIGHMEM)
++ if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx))
++ BUG();
++
++ /*
++ * force other mappings to Oops if they'll try to access
++ * this pte without first remap it
++ */
++ pte_clear(&init_mm, vaddr, kmap_pte-idx);
++ __flush_tlb_one(vaddr);
++#elif defined(CONFIG_XEN)
++ /*
++ * We must ensure there are no dangling pagetable references when
++ * returning memory to Xen (decrease_reservation).
++ * XXX TODO: We could make this faster by only zapping when
++ * kmap_flush_unused is called but that is trickier and more invasive.
++ */
++ pte_clear(&init_mm, vaddr, kmap_pte-idx);
++#endif
++
++ dec_preempt_count();
++ preempt_check_resched();
++}
++
++/* This is the same as kmap_atomic() but can map memory that doesn't
++ * have a struct page associated with it.
++ */
++void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
++{
++ enum fixed_addresses idx;
++ unsigned long vaddr;
++
++ inc_preempt_count();
++
++ idx = type + KM_TYPE_NR*smp_processor_id();
++ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
++ set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot));
++ __flush_tlb_one(vaddr);
++
++ return (void*) vaddr;
++}
++
++struct page *kmap_atomic_to_page(void *ptr)
++{
++ unsigned long idx, vaddr = (unsigned long)ptr;
++ pte_t *pte;
++
++ if (vaddr < FIXADDR_START)
++ return virt_to_page(ptr);
++
++ idx = virt_to_fix(vaddr);
++ pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
++ return pte_page(*pte);
++}
++
++EXPORT_SYMBOL(kmap);
++EXPORT_SYMBOL(kunmap);
++EXPORT_SYMBOL(kmap_atomic);
++EXPORT_SYMBOL(kmap_atomic_pte);
++EXPORT_SYMBOL(kunmap_atomic);
++EXPORT_SYMBOL(kmap_atomic_to_page);
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/arch/i386/mm/hypervisor.c 2007-08-27 14:01:52.000000000 -0400
+@@ -0,0 +1,451 @@
++/******************************************************************************
++ * mm/hypervisor.c
++ *
++ * Update page tables via the hypervisor.
++ *
++ * Copyright (c) 2002-2004, K A Fraser
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/vmalloc.h>
++#include <asm/page.h>
++#include <asm/pgtable.h>
++#include <asm/hypervisor.h>
++#include <xen/balloon.h>
++#include <xen/features.h>
++#include <xen/interface/memory.h>
++#include <linux/module.h>
++#include <linux/percpu.h>
++#include <asm/tlbflush.h>
++
++#ifdef CONFIG_X86_64
++#define pmd_val_ma(v) (v).pmd
++#else
++#ifdef CONFIG_X86_PAE
++# define pmd_val_ma(v) ((v).pmd)
++# define pud_val_ma(v) ((v).pgd.pgd)
++#else
++# define pmd_val_ma(v) ((v).pud.pgd.pgd)
++#endif
++#endif
++
++void xen_l1_entry_update(pte_t *ptr, pte_t val)
++{
++ mmu_update_t u;
++ u.ptr = virt_to_machine(ptr);
++ u.val = pte_val_ma(val);
++ BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
++}
++
++void xen_l2_entry_update(pmd_t *ptr, pmd_t val)
++{
++ mmu_update_t u;
++ u.ptr = virt_to_machine(ptr);
++ u.val = pmd_val_ma(val);
++ BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
++}
++
++#ifdef CONFIG_X86_PAE
++void xen_l3_entry_update(pud_t *ptr, pud_t val)
++{
++ mmu_update_t u;
++ u.ptr = virt_to_machine(ptr);
++ u.val = pud_val_ma(val);
++ BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
++}
++#endif
++
++#ifdef CONFIG_X86_64
++void xen_l3_entry_update(pud_t *ptr, pud_t val)
++{
++ mmu_update_t u;
++ u.ptr = virt_to_machine(ptr);
++ u.val = val.pud;
++ BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
++}
++
++void xen_l4_entry_update(pgd_t *ptr, pgd_t val)
++{
++ mmu_update_t u;
++ u.ptr = virt_to_machine(ptr);
++ u.val = val.pgd;
++ BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
++}
++#endif /* CONFIG_X86_64 */
++
++void xen_pt_switch(unsigned long ptr)
++{
++ struct mmuext_op op;
++ op.cmd = MMUEXT_NEW_BASEPTR;
++ op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
++ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
++
++void xen_new_user_pt(unsigned long ptr)
++{
++ struct mmuext_op op;
++ op.cmd = MMUEXT_NEW_USER_BASEPTR;
++ op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
++ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
++
++void xen_tlb_flush(void)
++{
++ struct mmuext_op op;
++ op.cmd = MMUEXT_TLB_FLUSH_LOCAL;
++ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
++EXPORT_SYMBOL(xen_tlb_flush);
++
++void xen_invlpg(unsigned long ptr)
++{
++ struct mmuext_op op;
++ op.cmd = MMUEXT_INVLPG_LOCAL;
++ op.arg1.linear_addr = ptr & PAGE_MASK;
++ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
++EXPORT_SYMBOL(xen_invlpg);
++
++#ifdef CONFIG_SMP
++
++void xen_tlb_flush_all(void)
++{
++ struct mmuext_op op;
++ op.cmd = MMUEXT_TLB_FLUSH_ALL;
++ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
++
++void xen_tlb_flush_mask(cpumask_t *mask)
++{
++ struct mmuext_op op;
++ if ( cpus_empty(*mask) )
++ return;
++ op.cmd = MMUEXT_TLB_FLUSH_MULTI;
++ set_xen_guest_handle(op.arg2.vcpumask, mask->bits);
++ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
++
++void xen_invlpg_all(unsigned long ptr)
++{
++ struct mmuext_op op;
++ op.cmd = MMUEXT_INVLPG_ALL;
++ op.arg1.linear_addr = ptr & PAGE_MASK;
++ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
++
++void xen_invlpg_mask(cpumask_t *mask, unsigned long ptr)
++{
++ struct mmuext_op op;
++ if ( cpus_empty(*mask) )
++ return;
++ op.cmd = MMUEXT_INVLPG_MULTI;
++ op.arg1.linear_addr = ptr & PAGE_MASK;
++ set_xen_guest_handle(op.arg2.vcpumask, mask->bits);
++ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
++
++#endif /* CONFIG_SMP */
++
++void xen_pgd_pin(unsigned long ptr)
++{
++ struct mmuext_op op;
++#ifdef CONFIG_X86_64
++ op.cmd = MMUEXT_PIN_L4_TABLE;
++#elif defined(CONFIG_X86_PAE)
++ op.cmd = MMUEXT_PIN_L3_TABLE;
++#else
++ op.cmd = MMUEXT_PIN_L2_TABLE;
++#endif
++ op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
++ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
++
++void xen_pgd_unpin(unsigned long ptr)
++{
++ struct mmuext_op op;
++ op.cmd = MMUEXT_UNPIN_TABLE;
++ op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
++ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
++
++void xen_set_ldt(unsigned long ptr, unsigned long len)
++{
++ struct mmuext_op op;
++ op.cmd = MMUEXT_SET_LDT;
++ op.arg1.linear_addr = ptr;
++ op.arg2.nr_ents = len;
++ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
++
++/*
++ * Bitmap is indexed by page number. If bit is set, the page is part of a
++ * xen_create_contiguous_region() area of memory.
++ */
++unsigned long *contiguous_bitmap;
++
++static void contiguous_bitmap_set(
++ unsigned long first_page, unsigned long nr_pages)
++{
++ unsigned long start_off, end_off, curr_idx, end_idx;
++
++ curr_idx = first_page / BITS_PER_LONG;
++ start_off = first_page & (BITS_PER_LONG-1);
++ end_idx = (first_page + nr_pages) / BITS_PER_LONG;
++ end_off = (first_page + nr_pages) & (BITS_PER_LONG-1);
++
++ if (curr_idx == end_idx) {
++ contiguous_bitmap[curr_idx] |=
++ ((1UL<<end_off)-1) & -(1UL<<start_off);
++ } else {
++ contiguous_bitmap[curr_idx] |= -(1UL<<start_off);
++ while ( ++curr_idx < end_idx )
++ contiguous_bitmap[curr_idx] = ~0UL;
++ contiguous_bitmap[curr_idx] |= (1UL<<end_off)-1;
++ }
++}
++
++static void contiguous_bitmap_clear(
++ unsigned long first_page, unsigned long nr_pages)
++{
++ unsigned long start_off, end_off, curr_idx, end_idx;
++
++ curr_idx = first_page / BITS_PER_LONG;
++ start_off = first_page & (BITS_PER_LONG-1);
++ end_idx = (first_page + nr_pages) / BITS_PER_LONG;
++ end_off = (first_page + nr_pages) & (BITS_PER_LONG-1);
++
++ if (curr_idx == end_idx) {
++ contiguous_bitmap[curr_idx] &=
++ -(1UL<<end_off) | ((1UL<<start_off)-1);
++ } else {
++ contiguous_bitmap[curr_idx] &= (1UL<<start_off)-1;
++ while ( ++curr_idx != end_idx )
++ contiguous_bitmap[curr_idx] = 0;
++ contiguous_bitmap[curr_idx] &= -(1UL<<end_off);
++ }
++}
++
++/* Protected by balloon_lock. */
++#define MAX_CONTIG_ORDER 9 /* 2MB */
++static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
++static multicall_entry_t cr_mcl[1<<MAX_CONTIG_ORDER];
++
++/* Ensure multi-page extents are contiguous in machine memory. */
++int xen_create_contiguous_region(
++ unsigned long vstart, unsigned int order, unsigned int address_bits)
++{
++ unsigned long *in_frames = discontig_frames, out_frame;
++ unsigned long frame, i, flags;
++ long rc;
++ int success;
++ struct xen_memory_exchange exchange = {
++ .in = {
++ .nr_extents = 1UL << order,
++ .extent_order = 0,
++ .domid = DOMID_SELF
++ },
++ .out = {
++ .nr_extents = 1,
++ .extent_order = order,
++ .address_bits = address_bits,
++ .domid = DOMID_SELF
++ }
++ };
++
++ /*
++ * Currently an auto-translated guest will not perform I/O, nor will
++ * it require PAE page directories below 4GB. Therefore any calls to
++ * this function are redundant and can be ignored.
++ */
++ if (xen_feature(XENFEAT_auto_translated_physmap))
++ return 0;
++
++ if (unlikely(order > MAX_CONTIG_ORDER))
++ return -ENOMEM;
++
++ set_xen_guest_handle(exchange.in.extent_start, in_frames);
++ set_xen_guest_handle(exchange.out.extent_start, &out_frame);
++
++ scrub_pages(vstart, 1 << order);
++
++ balloon_lock(flags);
++
++ /* 1. Zap current PTEs, remembering MFNs. */
++ for (i = 0; i < (1UL<<order); i++) {
++ in_frames[i] = pfn_to_mfn((__pa(vstart) >> PAGE_SHIFT) + i);
++ MULTI_update_va_mapping(cr_mcl + i, vstart + (i*PAGE_SIZE),
++ __pte_ma(0), 0);
++ set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i,
++ INVALID_P2M_ENTRY);
++ }
++ if (HYPERVISOR_multicall_check(cr_mcl, i, NULL))
++ BUG();
++
++ /* 2. Get a new contiguous memory extent. */
++ out_frame = __pa(vstart) >> PAGE_SHIFT;
++ rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
++ success = (exchange.nr_exchanged == (1UL << order));
++ BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
++ BUG_ON(success && (rc != 0));
++#if CONFIG_XEN_COMPAT <= 0x030002
++ if (unlikely(rc == -ENOSYS)) {
++ /* Compatibility when XENMEM_exchange is unsupported. */
++ if (HYPERVISOR_memory_op(XENMEM_decrease_reservation,
++ &exchange.in) != (1UL << order))
++ BUG();
++ success = (HYPERVISOR_memory_op(XENMEM_populate_physmap,
++ &exchange.out) == 1);
++ if (!success) {
++ /* Couldn't get special memory: fall back to normal. */
++ for (i = 0; i < (1UL<<order); i++)
++ in_frames[i] = (__pa(vstart)>>PAGE_SHIFT) + i;
++ if (HYPERVISOR_memory_op(XENMEM_populate_physmap,
++ &exchange.in) != (1UL<<order))
++ BUG();
++ }
++ }
++#endif
++
++ /* 3. Map the new extent in place of old pages. */
++ for (i = 0; i < (1UL<<order); i++) {
++ frame = success ? (out_frame + i) : in_frames[i];
++ MULTI_update_va_mapping(cr_mcl + i, vstart + (i*PAGE_SIZE),
++ pfn_pte_ma(frame, PAGE_KERNEL), 0);
++ set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i, frame);
++ }
++
++ cr_mcl[i - 1].args[MULTI_UVMFLAGS_INDEX] = order
++ ? UVMF_TLB_FLUSH|UVMF_ALL
++ : UVMF_INVLPG|UVMF_ALL;
++ if (HYPERVISOR_multicall_check(cr_mcl, i, NULL))
++ BUG();
++
++ if (success)
++ contiguous_bitmap_set(__pa(vstart) >> PAGE_SHIFT,
++ 1UL << order);
++
++ balloon_unlock(flags);
++
++ return success ? 0 : -ENOMEM;
++}
++EXPORT_SYMBOL_GPL(xen_create_contiguous_region);
++
++void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order)
++{
++ unsigned long *out_frames = discontig_frames, in_frame;
++ unsigned long frame, i, flags;
++ long rc;
++ int success;
++ struct xen_memory_exchange exchange = {
++ .in = {
++ .nr_extents = 1,
++ .extent_order = order,
++ .domid = DOMID_SELF
++ },
++ .out = {
++ .nr_extents = 1UL << order,
++ .extent_order = 0,
++ .domid = DOMID_SELF
++ }
++ };
++
++ if (xen_feature(XENFEAT_auto_translated_physmap) ||
++ !test_bit(__pa(vstart) >> PAGE_SHIFT, contiguous_bitmap))
++ return;
++
++ if (unlikely(order > MAX_CONTIG_ORDER))
++ return;
++
++ set_xen_guest_handle(exchange.in.extent_start, &in_frame);
++ set_xen_guest_handle(exchange.out.extent_start, out_frames);
++
++ scrub_pages(vstart, 1 << order);
++
++ balloon_lock(flags);
++
++ contiguous_bitmap_clear(__pa(vstart) >> PAGE_SHIFT, 1UL << order);
++
++ /* 1. Find start MFN of contiguous extent. */
++ in_frame = pfn_to_mfn(__pa(vstart) >> PAGE_SHIFT);
++
++ /* 2. Zap current PTEs. */
++ for (i = 0; i < (1UL<<order); i++) {
++ MULTI_update_va_mapping(cr_mcl + i, vstart + (i*PAGE_SIZE),
++ __pte_ma(0), 0);
++ set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i,
++ INVALID_P2M_ENTRY);
++ out_frames[i] = (__pa(vstart) >> PAGE_SHIFT) + i;
++ }
++ if (HYPERVISOR_multicall_check(cr_mcl, i, NULL))
++ BUG();
++
++ /* 3. Do the exchange for non-contiguous MFNs. */
++ rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
++ success = (exchange.nr_exchanged == 1);
++ BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
++ BUG_ON(success && (rc != 0));
++#if CONFIG_XEN_COMPAT <= 0x030002
++ if (unlikely(rc == -ENOSYS)) {
++ /* Compatibility when XENMEM_exchange is unsupported. */
++ if (HYPERVISOR_memory_op(XENMEM_decrease_reservation,
++ &exchange.in) != 1)
++ BUG();
++ if (HYPERVISOR_memory_op(XENMEM_populate_physmap,
++ &exchange.out) != (1UL << order))
++ BUG();
++ success = 1;
++ }
++#endif
++
++ /* 4. Map new pages in place of old pages. */
++ for (i = 0; i < (1UL<<order); i++) {
++ frame = success ? out_frames[i] : (in_frame + i);
++ MULTI_update_va_mapping(cr_mcl + i, vstart + (i*PAGE_SIZE),
++ pfn_pte_ma(frame, PAGE_KERNEL), 0);
++ set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i, frame);
++ }
++
++ cr_mcl[i - 1].args[MULTI_UVMFLAGS_INDEX] = order
++ ? UVMF_TLB_FLUSH|UVMF_ALL
++ : UVMF_INVLPG|UVMF_ALL;
++ if (HYPERVISOR_multicall_check(cr_mcl, i, NULL))
++ BUG();
++
++ balloon_unlock(flags);
++}
++EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
++
++#ifdef __i386__
++int write_ldt_entry(void *ldt, int entry, __u32 entry_a, __u32 entry_b)
++{
++ __u32 *lp = (__u32 *)((char *)ldt + entry * 8);
++ maddr_t mach_lp = arbitrary_virt_to_machine(lp);
++ return HYPERVISOR_update_descriptor(
++ mach_lp, (u64)entry_a | ((u64)entry_b<<32));
++}
++#endif
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/arch/i386/mm/init-xen.c 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,850 @@
++/*
++ * linux/arch/i386/mm/init.c
++ *
++ * Copyright (C) 1995 Linus Torvalds
++ *
++ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
++ */
++
++#include <linux/module.h>
++#include <linux/signal.h>
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/string.h>
++#include <linux/types.h>
++#include <linux/ptrace.h>
++#include <linux/mman.h>
++#include <linux/mm.h>
++#include <linux/hugetlb.h>
++#include <linux/swap.h>
++#include <linux/smp.h>
++#include <linux/init.h>
++#include <linux/highmem.h>
++#include <linux/pagemap.h>
++#include <linux/poison.h>
++#include <linux/bootmem.h>
++#include <linux/slab.h>
++#include <linux/proc_fs.h>
++#include <linux/efi.h>
++#include <linux/memory_hotplug.h>
++#include <linux/initrd.h>
++#include <linux/cpumask.h>
++#include <linux/dma-mapping.h>
++#include <linux/scatterlist.h>
++
++#include <asm/processor.h>
++#include <asm/system.h>
++#include <asm/uaccess.h>
++#include <asm/pgtable.h>
++#include <asm/dma.h>
++#include <asm/fixmap.h>
++#include <asm/e820.h>
++#include <asm/apic.h>
++#include <asm/tlb.h>
++#include <asm/tlbflush.h>
++#include <asm/sections.h>
++#include <asm/hypervisor.h>
++#include <asm/swiotlb.h>
++
++extern unsigned long *contiguous_bitmap;
++
++unsigned int __VMALLOC_RESERVE = 128 << 20;
++
++DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
++unsigned long highstart_pfn, highend_pfn;
++
++static int noinline do_test_wp_bit(void);
++
++/*
++ * Creates a middle page table and puts a pointer to it in the
++ * given global directory entry. This only returns the gd entry
++ * in non-PAE compilation mode, since the middle layer is folded.
++ */
++static pmd_t * __init one_md_table_init(pgd_t *pgd)
++{
++ pud_t *pud;
++ pmd_t *pmd_table;
++
++#ifdef CONFIG_X86_PAE
++ pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
++ make_lowmem_page_readonly(pmd_table, XENFEAT_writable_page_tables);
++ set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
++ pud = pud_offset(pgd, 0);
++ if (pmd_table != pmd_offset(pud, 0))
++ BUG();
++#else
++ pud = pud_offset(pgd, 0);
++ pmd_table = pmd_offset(pud, 0);
++#endif
++
++ return pmd_table;
++}
++
++/*
++ * Create a page table and place a pointer to it in a middle page
++ * directory entry.
++ */
++static pte_t * __init one_page_table_init(pmd_t *pmd)
++{
++ if (pmd_none(*pmd)) {
++ pte_t *page_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
++ make_lowmem_page_readonly(page_table,
++ XENFEAT_writable_page_tables);
++ set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
++ if (page_table != pte_offset_kernel(pmd, 0))
++ BUG();
++
++ return page_table;
++ }
++
++ return pte_offset_kernel(pmd, 0);
++}
++
++/*
++ * This function initializes a certain range of kernel virtual memory
++ * with new bootmem page tables, everywhere page tables are missing in
++ * the given range.
++ */
++
++/*
++ * NOTE: The pagetables are allocated contiguous on the physical space
++ * so we can cache the place of the first one and move around without
++ * checking the pgd every time.
++ */
++static void __init page_table_range_init (unsigned long start, unsigned long end, pgd_t *pgd_base)
++{
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++ int pgd_idx, pmd_idx;
++ unsigned long vaddr;
++
++ vaddr = start;
++ pgd_idx = pgd_index(vaddr);
++ pmd_idx = pmd_index(vaddr);
++ pgd = pgd_base + pgd_idx;
++
++ for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
++ if (pgd_none(*pgd))
++ one_md_table_init(pgd);
++ pud = pud_offset(pgd, vaddr);
++ pmd = pmd_offset(pud, vaddr);
++ for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); pmd++, pmd_idx++) {
++ if (vaddr < hypervisor_virt_start && pmd_none(*pmd))
++ one_page_table_init(pmd);
++
++ vaddr += PMD_SIZE;
++ }
++ pmd_idx = 0;
++ }
++}
++
++static inline int is_kernel_text(unsigned long addr)
++{
++ if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
++ return 1;
++ return 0;
++}
++
++/*
++ * This maps the physical memory to kernel virtual address space, a total
++ * of max_low_pfn pages, by creating page tables starting from address
++ * PAGE_OFFSET.
++ */
++static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
++{
++ unsigned long pfn;
++ pgd_t *pgd;
++ pmd_t *pmd;
++ pte_t *pte;
++ int pgd_idx, pmd_idx, pte_ofs;
++
++ unsigned long max_ram_pfn = xen_start_info->nr_pages;
++ if (max_ram_pfn > max_low_pfn)
++ max_ram_pfn = max_low_pfn;
++
++ pgd_idx = pgd_index(PAGE_OFFSET);
++ pgd = pgd_base + pgd_idx;
++ pfn = 0;
++ pmd_idx = pmd_index(PAGE_OFFSET);
++ pte_ofs = pte_index(PAGE_OFFSET);
++
++ for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
++#ifdef CONFIG_XEN
++ /*
++ * Native linux hasn't PAE-paging enabled yet at this
++ * point. When running as xen domain we are in PAE
++ * mode already, thus we can't simply hook a empty
++ * pmd. That would kill the mappings we are currently
++ * using ...
++ */
++ pmd = pmd_offset(pud_offset(pgd, PAGE_OFFSET), PAGE_OFFSET);
++#else
++ pmd = one_md_table_init(pgd);
++#endif
++ if (pfn >= max_low_pfn)
++ continue;
++ pmd += pmd_idx;
++ for (; pmd_idx < PTRS_PER_PMD && pfn < max_low_pfn; pmd++, pmd_idx++) {
++ unsigned int address = pfn * PAGE_SIZE + PAGE_OFFSET;
++ if (address >= hypervisor_virt_start)
++ continue;
++
++ /* Map with big pages if possible, otherwise create normal page tables. */
++ if (cpu_has_pse) {
++ unsigned int address2 = (pfn + PTRS_PER_PTE - 1) * PAGE_SIZE + PAGE_OFFSET + PAGE_SIZE-1;
++
++ if (is_kernel_text(address) || is_kernel_text(address2))
++ set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE_EXEC));
++ else
++ set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE));
++ pfn += PTRS_PER_PTE;
++ } else {
++ pte = one_page_table_init(pmd);
++
++ pte += pte_ofs;
++ for (; pte_ofs < PTRS_PER_PTE && pfn < max_low_pfn; pte++, pfn++, pte_ofs++) {
++ /* XEN: Only map initial RAM allocation. */
++ if ((pfn >= max_ram_pfn) || pte_present(*pte))
++ continue;
++ if (is_kernel_text(address))
++ set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
++ else
++ set_pte(pte, pfn_pte(pfn, PAGE_KERNEL));
++ }
++ pte_ofs = 0;
++ }
++ }
++ pmd_idx = 0;
++ }
++}
++
++#ifndef CONFIG_XEN
++
++static inline int page_kills_ppro(unsigned long pagenr)
++{
++ if (pagenr >= 0x70000 && pagenr <= 0x7003F)
++ return 1;
++ return 0;
++}
++
++#else
++
++#define page_kills_ppro(p) 0
++
++#endif
++
++extern int is_available_memory(efi_memory_desc_t *);
++
++int page_is_ram(unsigned long pagenr)
++{
++ int i;
++ unsigned long addr, end;
++
++ if (efi_enabled) {
++ efi_memory_desc_t *md;
++ void *p;
++
++ for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
++ md = p;
++ if (!is_available_memory(md))
++ continue;
++ addr = (md->phys_addr+PAGE_SIZE-1) >> PAGE_SHIFT;
++ end = (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT)) >> PAGE_SHIFT;
++
++ if ((pagenr >= addr) && (pagenr < end))
++ return 1;
++ }
++ return 0;
++ }
++
++ for (i = 0; i < e820.nr_map; i++) {
++
++ if (e820.map[i].type != E820_RAM) /* not usable memory */
++ continue;
++ /*
++ * !!!FIXME!!! Some BIOSen report areas as RAM that
++ * are not. Notably the 640->1Mb area. We need a sanity
++ * check here.
++ */
++ addr = (e820.map[i].addr+PAGE_SIZE-1) >> PAGE_SHIFT;
++ end = (e820.map[i].addr+e820.map[i].size) >> PAGE_SHIFT;
++ if ((pagenr >= addr) && (pagenr < end))
++ return 1;
++ }
++ return 0;
++}
++
++#ifdef CONFIG_HIGHMEM
++pte_t *kmap_pte;
++pgprot_t kmap_prot;
++
++#define kmap_get_fixmap_pte(vaddr) \
++ pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), vaddr), (vaddr)), (vaddr))
++
++static void __init kmap_init(void)
++{
++ unsigned long kmap_vstart;
++
++ /* cache the first kmap pte */
++ kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
++ kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
++
++ kmap_prot = PAGE_KERNEL;
++}
++
++static void __init permanent_kmaps_init(pgd_t *pgd_base)
++{
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++ pte_t *pte;
++ unsigned long vaddr;
++
++ vaddr = PKMAP_BASE;
++ page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
++
++ pgd = swapper_pg_dir + pgd_index(vaddr);
++ pud = pud_offset(pgd, vaddr);
++ pmd = pmd_offset(pud, vaddr);
++ pte = pte_offset_kernel(pmd, vaddr);
++ pkmap_page_table = pte;
++}
++
++static void __meminit free_new_highpage(struct page *page, int pfn)
++{
++ init_page_count(page);
++ if (pfn < xen_start_info->nr_pages)
++ __free_page(page);
++ totalhigh_pages++;
++}
++
++void __init add_one_highpage_init(struct page *page, int pfn, int bad_ppro)
++{
++ if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn))) {
++ ClearPageReserved(page);
++ free_new_highpage(page, pfn);
++ } else
++ SetPageReserved(page);
++}
++
++static int add_one_highpage_hotplug(struct page *page, unsigned long pfn)
++{
++ free_new_highpage(page, pfn);
++ totalram_pages++;
++#ifdef CONFIG_FLATMEM
++ max_mapnr = max(pfn, max_mapnr);
++#endif
++ num_physpages++;
++ return 0;
++}
++
++/*
++ * Not currently handling the NUMA case.
++ * Assuming single node and all memory that
++ * has been added dynamically that would be
++ * onlined here is in HIGHMEM
++ */
++void online_page(struct page *page)
++{
++ ClearPageReserved(page);
++ add_one_highpage_hotplug(page, page_to_pfn(page));
++}
++
++
++#ifdef CONFIG_NUMA
++extern void set_highmem_pages_init(int);
++#else
++static void __init set_highmem_pages_init(int bad_ppro)
++{
++ int pfn;
++ for (pfn = highstart_pfn; pfn < highend_pfn; pfn++)
++ add_one_highpage_init(pfn_to_page(pfn), pfn, bad_ppro);
++ totalram_pages += totalhigh_pages;
++}
++#endif /* CONFIG_FLATMEM */
++
++#else
++#define kmap_init() do { } while (0)
++#define permanent_kmaps_init(pgd_base) do { } while (0)
++#define set_highmem_pages_init(bad_ppro) do { } while (0)
++#endif /* CONFIG_HIGHMEM */
++
++unsigned long long __PAGE_KERNEL = _PAGE_KERNEL;
++EXPORT_SYMBOL(__PAGE_KERNEL);
++unsigned long long __PAGE_KERNEL_EXEC = _PAGE_KERNEL_EXEC;
++
++#ifdef CONFIG_NUMA
++extern void __init remap_numa_kva(void);
++#else
++#define remap_numa_kva() do {} while (0)
++#endif
++
++pgd_t *swapper_pg_dir;
++
++static void __init pagetable_init (void)
++{
++ unsigned long vaddr;
++ pgd_t *pgd_base = (pgd_t *)xen_start_info->pt_base;
++
++ swapper_pg_dir = pgd_base;
++ init_mm.pgd = pgd_base;
++
++ /* Enable PSE if available */
++ if (cpu_has_pse) {
++ set_in_cr4(X86_CR4_PSE);
++ }
++
++ /* Enable PGE if available */
++ if (cpu_has_pge) {
++ set_in_cr4(X86_CR4_PGE);
++ __PAGE_KERNEL |= _PAGE_GLOBAL;
++ __PAGE_KERNEL_EXEC |= _PAGE_GLOBAL;
++ }
++
++ kernel_physical_mapping_init(pgd_base);
++ remap_numa_kva();
++
++ /*
++ * Fixed mappings, only the page table structure has to be
++ * created - mappings will be set by set_fixmap():
++ */
++ vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
++ page_table_range_init(vaddr, hypervisor_virt_start, pgd_base);
++
++ permanent_kmaps_init(pgd_base);
++}
++
++#if defined(CONFIG_SOFTWARE_SUSPEND) || defined(CONFIG_ACPI_SLEEP)
++/*
++ * Swap suspend & friends need this for resume because things like the intel-agp
++ * driver might have split up a kernel 4MB mapping.
++ */
++char __nosavedata swsusp_pg_dir[PAGE_SIZE]
++ __attribute__ ((aligned (PAGE_SIZE)));
++
++static inline void save_pg_dir(void)
++{
++ memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
++}
++#else
++static inline void save_pg_dir(void)
++{
++}
++#endif
++
++void zap_low_mappings (void)
++{
++ int i;
++
++ save_pg_dir();
++
++ /*
++ * Zap initial low-memory mappings.
++ *
++ * Note that "pgd_clear()" doesn't do it for
++ * us, because pgd_clear() is a no-op on i386.
++ */
++ for (i = 0; i < USER_PTRS_PER_PGD; i++)
++#if defined(CONFIG_X86_PAE) && !defined(CONFIG_XEN)
++ set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page)));
++#else
++ set_pgd(swapper_pg_dir+i, __pgd(0));
++#endif
++ flush_tlb_all();
++}
++
++static int disable_nx __initdata = 0;
++u64 __supported_pte_mask __read_mostly = ~_PAGE_NX;
++EXPORT_SYMBOL(__supported_pte_mask);
++
++/*
++ * noexec = on|off
++ *
++ * Control non executable mappings.
++ *
++ * on Enable
++ * off Disable
++ */
++void __init noexec_setup(const char *str)
++{
++ if (!strncmp(str, "on",2) && cpu_has_nx) {
++ __supported_pte_mask |= _PAGE_NX;
++ disable_nx = 0;
++ } else if (!strncmp(str,"off",3)) {
++ disable_nx = 1;
++ __supported_pte_mask &= ~_PAGE_NX;
++ }
++}
++
++int nx_enabled = 0;
++#ifdef CONFIG_X86_PAE
++
++static void __init set_nx(void)
++{
++ unsigned int v[4], l, h;
++
++ if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
++ cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
++ if ((v[3] & (1 << 20)) && !disable_nx) {
++ rdmsr(MSR_EFER, l, h);
++ l |= EFER_NX;
++ wrmsr(MSR_EFER, l, h);
++ nx_enabled = 1;
++ __supported_pte_mask |= _PAGE_NX;
++ }
++ }
++}
++
++/*
++ * Enables/disables executability of a given kernel page and
++ * returns the previous setting.
++ */
++int __init set_kernel_exec(unsigned long vaddr, int enable)
++{
++ pte_t *pte;
++ int ret = 1;
++
++ if (!nx_enabled)
++ goto out;
++
++ pte = lookup_address(vaddr);
++ BUG_ON(!pte);
++
++ if (!pte_exec_kernel(*pte))
++ ret = 0;
++
++ if (enable)
++ pte->pte_high &= ~(1 << (_PAGE_BIT_NX - 32));
++ else
++ pte->pte_high |= 1 << (_PAGE_BIT_NX - 32);
++ __flush_tlb_all();
++out:
++ return ret;
++}
++
++#endif
++
++/*
++ * paging_init() sets up the page tables - note that the first 8MB are
++ * already mapped by head.S.
++ *
++ * This routines also unmaps the page at virtual kernel address 0, so
++ * that we can trap those pesky NULL-reference errors in the kernel.
++ */
++void __init paging_init(void)
++{
++ int i;
++
++#ifdef CONFIG_X86_PAE
++ set_nx();
++ if (nx_enabled)
++ printk("NX (Execute Disable) protection: active\n");
++#endif
++
++ pagetable_init();
++
++#if defined(CONFIG_X86_PAE) && !defined(CONFIG_XEN)
++ /*
++ * We will bail out later - printk doesn't work right now so
++ * the user would just see a hanging kernel.
++ * when running as xen domain we are already in PAE mode at
++ * this point.
++ */
++ if (cpu_has_pae)
++ set_in_cr4(X86_CR4_PAE);
++#endif
++ __flush_tlb_all();
++
++ kmap_init();
++
++ /* Switch to the real shared_info page, and clear the
++ * dummy page. */
++ set_fixmap(FIX_SHARED_INFO, xen_start_info->shared_info);
++ HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
++ memset(empty_zero_page, 0, sizeof(empty_zero_page));
++
++ /* Setup mapping of lower 1st MB */
++ for (i = 0; i < NR_FIX_ISAMAPS; i++)
++ if (is_initial_xendomain())
++ set_fixmap(FIX_ISAMAP_BEGIN - i, i * PAGE_SIZE);
++ else
++ __set_fixmap(FIX_ISAMAP_BEGIN - i,
++ virt_to_machine(empty_zero_page),
++ PAGE_KERNEL_RO);
++}
++
++/*
++ * Test if the WP bit works in supervisor mode. It isn't supported on 386's
++ * and also on some strange 486's (NexGen etc.). All 586+'s are OK. This
++ * used to involve black magic jumps to work around some nasty CPU bugs,
++ * but fortunately the switch to using exceptions got rid of all that.
++ */
++
++static void __init test_wp_bit(void)
++{
++ printk("Checking if this processor honours the WP bit even in supervisor mode... ");
++
++ /* Any page-aligned address will do, the test is non-destructive */
++ __set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY);
++ boot_cpu_data.wp_works_ok = do_test_wp_bit();
++ clear_fixmap(FIX_WP_TEST);
++
++ if (!boot_cpu_data.wp_works_ok) {
++ printk("No.\n");
++#ifdef CONFIG_X86_WP_WORKS_OK
++ panic("This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
++#endif
++ } else {
++ printk("Ok.\n");
++ }
++}
++
++static void __init set_max_mapnr_init(void)
++{
++#ifdef CONFIG_HIGHMEM
++ num_physpages = highend_pfn;
++#else
++ num_physpages = max_low_pfn;
++#endif
++#ifdef CONFIG_FLATMEM
++ max_mapnr = num_physpages;
++#endif
++}
++
++static struct kcore_list kcore_mem, kcore_vmalloc;
++
++void __init mem_init(void)
++{
++ extern int ppro_with_ram_bug(void);
++ int codesize, reservedpages, datasize, initsize;
++ int tmp;
++ int bad_ppro;
++ unsigned long pfn;
++
++ contiguous_bitmap = alloc_bootmem_low_pages(
++ (max_low_pfn + 2*BITS_PER_LONG) >> 3);
++ BUG_ON(!contiguous_bitmap);
++ memset(contiguous_bitmap, 0, (max_low_pfn + 2*BITS_PER_LONG) >> 3);
++
++#if defined(CONFIG_SWIOTLB)
++ swiotlb_init();
++#endif
++
++#ifdef CONFIG_FLATMEM
++ if (!mem_map)
++ BUG();
++#endif
++
++ bad_ppro = ppro_with_ram_bug();
++
++#ifdef CONFIG_HIGHMEM
++ /* check that fixmap and pkmap do not overlap */
++ if (PKMAP_BASE+LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) {
++ printk(KERN_ERR "fixmap and kmap areas overlap - this will crash\n");
++ printk(KERN_ERR "pkstart: %lxh pkend: %lxh fixstart %lxh\n",
++ PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, FIXADDR_START);
++ BUG();
++ }
++#endif
++
++ set_max_mapnr_init();
++
++#ifdef CONFIG_HIGHMEM
++ high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
++#else
++ high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
++#endif
++ printk("vmalloc area: %lx-%lx, maxmem %lx\n",
++ VMALLOC_START,VMALLOC_END,MAXMEM);
++ BUG_ON(VMALLOC_START > VMALLOC_END);
++
++ /* this will put all low memory onto the freelists */
++ totalram_pages += free_all_bootmem();
++ /* XEN: init and count low-mem pages outside initial allocation. */
++ for (pfn = xen_start_info->nr_pages; pfn < max_low_pfn; pfn++) {
++ ClearPageReserved(pfn_to_page(pfn));
++ init_page_count(pfn_to_page(pfn));
++ totalram_pages++;
++ }
++
++ reservedpages = 0;
++ for (tmp = 0; tmp < max_low_pfn; tmp++)
++ /*
++ * Only count reserved RAM pages
++ */
++ if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
++ reservedpages++;
++
++ set_highmem_pages_init(bad_ppro);
++
++ codesize = (unsigned long) &_etext - (unsigned long) &_text;
++ datasize = (unsigned long) &_edata - (unsigned long) &_etext;
++ initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
++
++ kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
++ kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
++ VMALLOC_END-VMALLOC_START);
++
++ printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n",
++ (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
++ num_physpages << (PAGE_SHIFT-10),
++ codesize >> 10,
++ reservedpages << (PAGE_SHIFT-10),
++ datasize >> 10,
++ initsize >> 10,
++ (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
++ );
++
++#ifdef CONFIG_X86_PAE
++ if (!cpu_has_pae)
++ panic("cannot execute a PAE-enabled kernel on a PAE-less CPU!");
++#endif
++ if (boot_cpu_data.wp_works_ok < 0)
++ test_wp_bit();
++
++ /*
++ * Subtle. SMP is doing it's boot stuff late (because it has to
++ * fork idle threads) - but it also needs low mappings for the
++ * protected-mode entry to work. We zap these entries only after
++ * the WP-bit has been tested.
++ */
++#ifndef CONFIG_SMP
++ zap_low_mappings();
++#endif
++
++ set_bit(PG_pinned, &virt_to_page(init_mm.pgd)->flags);
++}
++
++/*
++ * this is for the non-NUMA, single node SMP system case.
++ * Specifically, in the case of x86, we will always add
++ * memory to the highmem for now.
++ */
++#ifdef CONFIG_MEMORY_HOTPLUG
++#ifndef CONFIG_NEED_MULTIPLE_NODES
++int arch_add_memory(int nid, u64 start, u64 size)
++{
++ struct pglist_data *pgdata = &contig_page_data;
++ struct zone *zone = pgdata->node_zones + MAX_NR_ZONES-1;
++ unsigned long start_pfn = start >> PAGE_SHIFT;
++ unsigned long nr_pages = size >> PAGE_SHIFT;
++
++ return __add_pages(zone, start_pfn, nr_pages);
++}
++
++int remove_memory(u64 start, u64 size)
++{
++ return -EINVAL;
++}
++#endif
++#endif
++
++kmem_cache_t *pgd_cache;
++kmem_cache_t *pmd_cache;
++
++void __init pgtable_cache_init(void)
++{
++ if (PTRS_PER_PMD > 1) {
++ pmd_cache = kmem_cache_create("pmd",
++ PTRS_PER_PMD*sizeof(pmd_t),
++ PTRS_PER_PMD*sizeof(pmd_t),
++ 0,
++ pmd_ctor,
++ NULL);
++ if (!pmd_cache)
++ panic("pgtable_cache_init(): cannot create pmd cache");
++ }
++ pgd_cache = kmem_cache_create("pgd",
++#ifndef CONFIG_XEN
++ PTRS_PER_PGD*sizeof(pgd_t),
++ PTRS_PER_PGD*sizeof(pgd_t),
++#else
++ PAGE_SIZE,
++ PAGE_SIZE,
++#endif
++ 0,
++ pgd_ctor,
++ PTRS_PER_PMD == 1 ? pgd_dtor : NULL);
++ if (!pgd_cache)
++ panic("pgtable_cache_init(): Cannot create pgd cache");
++}
++
++/*
++ * This function cannot be __init, since exceptions don't work in that
++ * section. Put this after the callers, so that it cannot be inlined.
++ */
++static int noinline do_test_wp_bit(void)
++{
++ char tmp_reg;
++ int flag;
++
++ __asm__ __volatile__(
++ " movb %0,%1 \n"
++ "1: movb %1,%0 \n"
++ " xorl %2,%2 \n"
++ "2: \n"
++ ".section __ex_table,\"a\"\n"
++ " .align 4 \n"
++ " .long 1b,2b \n"
++ ".previous \n"
++ :"=m" (*(char *)fix_to_virt(FIX_WP_TEST)),
++ "=q" (tmp_reg),
++ "=r" (flag)
++ :"2" (1)
++ :"memory");
++
++ return flag;
++}
++
++#ifdef CONFIG_DEBUG_RODATA
++
++void mark_rodata_ro(void)
++{
++ unsigned long addr = (unsigned long)__start_rodata;
++
++ for (; addr < (unsigned long)__end_rodata; addr += PAGE_SIZE)
++ change_page_attr(virt_to_page(addr), 1, PAGE_KERNEL_RO);
++
++ printk("Write protecting the kernel read-only data: %uk\n",
++ (__end_rodata - __start_rodata) >> 10);
++
++ /*
++ * change_page_attr() requires a global_flush_tlb() call after it.
++ * We do this after the printk so that if something went wrong in the
++ * change, the printk gets out at least to give a better debug hint
++ * of who is the culprit.
++ */
++ global_flush_tlb();
++}
++#endif
++
++void free_init_pages(char *what, unsigned long begin, unsigned long end)
++{
++ unsigned long addr;
++
++ for (addr = begin; addr < end; addr += PAGE_SIZE) {
++ ClearPageReserved(virt_to_page(addr));
++ init_page_count(virt_to_page(addr));
++ memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
++ free_page(addr);
++ totalram_pages++;
++ }
++ printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
++}
++
++void free_initmem(void)
++{
++ free_init_pages("unused kernel memory",
++ (unsigned long)(&__init_begin),
++ (unsigned long)(&__init_end));
++}
++
++#ifdef CONFIG_BLK_DEV_INITRD
++void free_initrd_mem(unsigned long start, unsigned long end)
++{
++ free_init_pages("initrd memory", start, end);
++}
++#endif
++
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/arch/i386/mm/ioremap-xen.c 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,443 @@
++/*
++ * arch/i386/mm/ioremap.c
++ *
++ * Re-map IO memory to kernel address space so that we can access it.
++ * This is needed for high PCI addresses that aren't mapped in the
++ * 640k-1MB IO memory area on PC's
++ *
++ * (C) Copyright 1995 1996 Linus Torvalds
++ */
++
++#include <linux/vmalloc.h>
++#include <linux/init.h>
++#include <linux/slab.h>
++#include <linux/module.h>
++#include <asm/io.h>
++#include <asm/fixmap.h>
++#include <asm/cacheflush.h>
++#include <asm/tlbflush.h>
++#include <asm/pgtable.h>
++#include <asm/pgalloc.h>
++
++#define ISA_START_ADDRESS 0x0
++#define ISA_END_ADDRESS 0x100000
++
++static int direct_remap_area_pte_fn(pte_t *pte,
++ struct page *pmd_page,
++ unsigned long address,
++ void *data)
++{
++ mmu_update_t **v = (mmu_update_t **)data;
++
++ BUG_ON(!pte_none(*pte));
++
++ (*v)->ptr = ((u64)pfn_to_mfn(page_to_pfn(pmd_page)) <<
++ PAGE_SHIFT) | ((unsigned long)pte & ~PAGE_MASK);
++ (*v)++;
++
++ return 0;
++}
++
++static int __direct_remap_pfn_range(struct mm_struct *mm,
++ unsigned long address,
++ unsigned long mfn,
++ unsigned long size,
++ pgprot_t prot,
++ domid_t domid)
++{
++ int rc;
++ unsigned long i, start_address;
++ mmu_update_t *u, *v, *w;
++
++ u = v = w = (mmu_update_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
++ if (u == NULL)
++ return -ENOMEM;
++
++ start_address = address;
++
++ flush_cache_all();
++
++ for (i = 0; i < size; i += PAGE_SIZE) {
++ if ((v - u) == (PAGE_SIZE / sizeof(mmu_update_t))) {
++ /* Flush a full batch after filling in the PTE ptrs. */
++ rc = apply_to_page_range(mm, start_address,
++ address - start_address,
++ direct_remap_area_pte_fn, &w);
++ if (rc)
++ goto out;
++ rc = -EFAULT;
++ if (HYPERVISOR_mmu_update(u, v - u, NULL, domid) < 0)
++ goto out;
++ v = w = u;
++ start_address = address;
++ }
++
++ /*
++ * Fill in the machine address: PTE ptr is done later by
++ * __direct_remap_area_pages().
++ */
++ v->val = pte_val_ma(pfn_pte_ma(mfn, prot));
++
++ mfn++;
++ address += PAGE_SIZE;
++ v++;
++ }
++
++ if (v != u) {
++ /* Final batch. */
++ rc = apply_to_page_range(mm, start_address,
++ address - start_address,
++ direct_remap_area_pte_fn, &w);
++ if (rc)
++ goto out;
++ rc = -EFAULT;
++ if (unlikely(HYPERVISOR_mmu_update(u, v - u, NULL, domid) < 0))
++ goto out;
++ }
++
++ rc = 0;
++
++ out:
++ flush_tlb_all();
++
++ free_page((unsigned long)u);
++
++ return rc;
++}
++
++int direct_remap_pfn_range(struct vm_area_struct *vma,
++ unsigned long address,
++ unsigned long mfn,
++ unsigned long size,
++ pgprot_t prot,
++ domid_t domid)
++{
++ if (xen_feature(XENFEAT_auto_translated_physmap))
++ return remap_pfn_range(vma, address, mfn, size, prot);
++
++ if (domid == DOMID_SELF)
++ return -EINVAL;
++
++ vma->vm_flags |= VM_IO | VM_RESERVED;
++
++ vma->vm_mm->context.has_foreign_mappings = 1;
++
++ return __direct_remap_pfn_range(
++ vma->vm_mm, address, mfn, size, prot, domid);
++}
++EXPORT_SYMBOL(direct_remap_pfn_range);
++
++int direct_kernel_remap_pfn_range(unsigned long address,
++ unsigned long mfn,
++ unsigned long size,
++ pgprot_t prot,
++ domid_t domid)
++{
++ return __direct_remap_pfn_range(
++ &init_mm, address, mfn, size, prot, domid);
++}
++EXPORT_SYMBOL(direct_kernel_remap_pfn_range);
++
++static int lookup_pte_fn(
++ pte_t *pte, struct page *pmd_page, unsigned long addr, void *data)
++{
++ uint64_t *ptep = (uint64_t *)data;
++ if (ptep)
++ *ptep = ((uint64_t)pfn_to_mfn(page_to_pfn(pmd_page)) <<
++ PAGE_SHIFT) | ((unsigned long)pte & ~PAGE_MASK);
++ return 0;
++}
++
++int create_lookup_pte_addr(struct mm_struct *mm,
++ unsigned long address,
++ uint64_t *ptep)
++{
++ return apply_to_page_range(mm, address, PAGE_SIZE,
++ lookup_pte_fn, ptep);
++}
++
++EXPORT_SYMBOL(create_lookup_pte_addr);
++
++static int noop_fn(
++ pte_t *pte, struct page *pmd_page, unsigned long addr, void *data)
++{
++ return 0;
++}
++
++int touch_pte_range(struct mm_struct *mm,
++ unsigned long address,
++ unsigned long size)
++{
++ return apply_to_page_range(mm, address, size, noop_fn, NULL);
++}
++
++EXPORT_SYMBOL(touch_pte_range);
++
++/*
++ * Does @address reside within a non-highmem page that is local to this virtual
++ * machine (i.e., not an I/O page, nor a memory page belonging to another VM).
++ * See the comment that accompanies mfn_to_local_pfn() in page.h to understand
++ * why this works.
++ */
++static inline int is_local_lowmem(unsigned long address)
++{
++ extern unsigned long max_low_pfn;
++ return (mfn_to_local_pfn(address >> PAGE_SHIFT) < max_low_pfn);
++}
++
++/*
++ * Generic mapping function (not visible outside):
++ */
++
++/*
++ * Remap an arbitrary physical address space into the kernel virtual
++ * address space. Needed when the kernel wants to access high addresses
++ * directly.
++ *
++ * NOTE! We need to allow non-page-aligned mappings too: we will obviously
++ * have to convert them into an offset in a page-aligned mapping, but the
++ * caller shouldn't need to know that small detail.
++ */
++void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
++{
++ void __iomem * addr;
++ struct vm_struct * area;
++ unsigned long offset, last_addr;
++ domid_t domid = DOMID_IO;
++
++ /* Don't allow wraparound or zero size */
++ last_addr = phys_addr + size - 1;
++ if (!size || last_addr < phys_addr)
++ return NULL;
++
++ /*
++ * Don't remap the low PCI/ISA area, it's always mapped..
++ */
++ if (is_initial_xendomain() &&
++ phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
++ return (void __iomem *) isa_bus_to_virt(phys_addr);
++
++ /*
++ * Don't allow anybody to remap normal RAM that we're using..
++ */
++ if (is_local_lowmem(phys_addr)) {
++ char *t_addr, *t_end;
++ struct page *page;
++
++ t_addr = bus_to_virt(phys_addr);
++ t_end = t_addr + (size - 1);
++
++ for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
++ if(!PageReserved(page))
++ return NULL;
++
++ domid = DOMID_SELF;
++ }
++
++ /*
++ * Mappings have to be page-aligned
++ */
++ offset = phys_addr & ~PAGE_MASK;
++ phys_addr &= PAGE_MASK;
++ size = PAGE_ALIGN(last_addr+1) - phys_addr;
++
++ /*
++ * Ok, go for it..
++ */
++ area = get_vm_area(size, VM_IOREMAP | (flags << 20));
++ if (!area)
++ return NULL;
++ area->phys_addr = phys_addr;
++ addr = (void __iomem *) area->addr;
++ flags |= _KERNPG_TABLE;
++ if (__direct_remap_pfn_range(&init_mm, (unsigned long)addr,
++ phys_addr>>PAGE_SHIFT,
++ size, __pgprot(flags), domid)) {
++ vunmap((void __force *) addr);
++ return NULL;
++ }
++ return (void __iomem *) (offset + (char __iomem *)addr);
++}
++EXPORT_SYMBOL(__ioremap);
++
++/**
++ * ioremap_nocache - map bus memory into CPU space
++ * @offset: bus address of the memory
++ * @size: size of the resource to map
++ *
++ * ioremap_nocache performs a platform specific sequence of operations to
++ * make bus memory CPU accessible via the readb/readw/readl/writeb/
++ * writew/writel functions and the other mmio helpers. The returned
++ * address is not guaranteed to be usable directly as a virtual
++ * address.
++ *
++ * This version of ioremap ensures that the memory is marked uncachable
++ * on the CPU as well as honouring existing caching rules from things like
++ * the PCI bus. Note that there are other caches and buffers on many
++ * busses. In particular driver authors should read up on PCI writes
++ *
++ * It's useful if some control registers are in such an area and
++ * write combining or read caching is not desirable:
++ *
++ * Must be freed with iounmap.
++ */
++
++void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
++{
++ unsigned long last_addr;
++ void __iomem *p = __ioremap(phys_addr, size, _PAGE_PCD);
++ if (!p)
++ return p;
++
++ /* Guaranteed to be > phys_addr, as per __ioremap() */
++ last_addr = phys_addr + size - 1;
++
++ if (is_local_lowmem(last_addr)) {
++ struct page *ppage = virt_to_page(bus_to_virt(phys_addr));
++ unsigned long npages;
++
++ phys_addr &= PAGE_MASK;
++
++ /* This might overflow and become zero.. */
++ last_addr = PAGE_ALIGN(last_addr);
++
++ /* .. but that's ok, because modulo-2**n arithmetic will make
++ * the page-aligned "last - first" come out right.
++ */
++ npages = (last_addr - phys_addr) >> PAGE_SHIFT;
++
++ if (change_page_attr(ppage, npages, PAGE_KERNEL_NOCACHE) < 0) {
++ iounmap(p);
++ p = NULL;
++ }
++ global_flush_tlb();
++ }
++
++ return p;
++}
++EXPORT_SYMBOL(ioremap_nocache);
++
++/**
++ * iounmap - Free a IO remapping
++ * @addr: virtual address from ioremap_*
++ *
++ * Caller must ensure there is only one unmapping for the same pointer.
++ */
++void iounmap(volatile void __iomem *addr)
++{
++ struct vm_struct *p, *o;
++
++ if ((void __force *)addr <= high_memory)
++ return;
++
++ /*
++ * __ioremap special-cases the PCI/ISA range by not instantiating a
++ * vm_area and by simply returning an address into the kernel mapping
++ * of ISA space. So handle that here.
++ */
++ if ((unsigned long) addr >= fix_to_virt(FIX_ISAMAP_BEGIN))
++ return;
++
++ addr = (volatile void __iomem *)(PAGE_MASK & (unsigned long __force)addr);
++
++ /* Use the vm area unlocked, assuming the caller
++ ensures there isn't another iounmap for the same address
++ in parallel. Reuse of the virtual address is prevented by
++ leaving it in the global lists until we're done with it.
++ cpa takes care of the direct mappings. */
++ read_lock(&vmlist_lock);
++ for (p = vmlist; p; p = p->next) {
++ if (p->addr == addr)
++ break;
++ }
++ read_unlock(&vmlist_lock);
++
++ if (!p) {
++ printk("iounmap: bad address %p\n", addr);
++ dump_stack();
++ return;
++ }
++
++ /* Reset the direct mapping. Can block */
++ if ((p->flags >> 20) && is_local_lowmem(p->phys_addr)) {
++ /* p->size includes the guard page, but cpa doesn't like that */
++ change_page_attr(virt_to_page(bus_to_virt(p->phys_addr)),
++ (p->size - PAGE_SIZE) >> PAGE_SHIFT,
++ PAGE_KERNEL);
++ global_flush_tlb();
++ }
++
++ /* Finally remove it */
++ o = remove_vm_area((void *)addr);
++ BUG_ON(p != o || o == NULL);
++ kfree(p);
++}
++EXPORT_SYMBOL(iounmap);
++
++void __init *bt_ioremap(unsigned long phys_addr, unsigned long size)
++{
++ unsigned long offset, last_addr;
++ unsigned int nrpages;
++ enum fixed_addresses idx;
++
++ /* Don't allow wraparound or zero size */
++ last_addr = phys_addr + size - 1;
++ if (!size || last_addr < phys_addr)
++ return NULL;
++
++ /*
++ * Don't remap the low PCI/ISA area, it's always mapped..
++ */
++ if (is_initial_xendomain() &&
++ phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
++ return isa_bus_to_virt(phys_addr);
++
++ /*
++ * Mappings have to be page-aligned
++ */
++ offset = phys_addr & ~PAGE_MASK;
++ phys_addr &= PAGE_MASK;
++ size = PAGE_ALIGN(last_addr) - phys_addr;
++
++ /*
++ * Mappings have to fit in the FIX_BTMAP area.
++ */
++ nrpages = size >> PAGE_SHIFT;
++ if (nrpages > NR_FIX_BTMAPS)
++ return NULL;
++
++ /*
++ * Ok, go for it..
++ */
++ idx = FIX_BTMAP_BEGIN;
++ while (nrpages > 0) {
++ set_fixmap(idx, phys_addr);
++ phys_addr += PAGE_SIZE;
++ --idx;
++ --nrpages;
++ }
++ return (void*) (offset + fix_to_virt(FIX_BTMAP_BEGIN));
++}
++
++void __init bt_iounmap(void *addr, unsigned long size)
++{
++ unsigned long virt_addr;
++ unsigned long offset;
++ unsigned int nrpages;
++ enum fixed_addresses idx;
++
++ virt_addr = (unsigned long)addr;
++ if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN))
++ return;
++ if (virt_addr >= fix_to_virt(FIX_ISAMAP_BEGIN))
++ return;
++ offset = virt_addr & ~PAGE_MASK;
++ nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
++
++ idx = FIX_BTMAP_BEGIN;
++ while (nrpages > 0) {
++ clear_fixmap(idx);
++ --idx;
++ --nrpages;
++ }
++}
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/arch/i386/mm/pgtable-xen.c 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,727 @@
++/*
++ * linux/arch/i386/mm/pgtable.c
++ */
++
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/mm.h>
++#include <linux/swap.h>
++#include <linux/smp.h>
++#include <linux/highmem.h>
++#include <linux/slab.h>
++#include <linux/pagemap.h>
++#include <linux/spinlock.h>
++#include <linux/module.h>
++
++#include <asm/system.h>
++#include <asm/pgtable.h>
++#include <asm/pgalloc.h>
++#include <asm/fixmap.h>
++#include <asm/e820.h>
++#include <asm/tlb.h>
++#include <asm/tlbflush.h>
++#include <asm/io.h>
++#include <asm/mmu_context.h>
++
++#include <xen/features.h>
++#include <asm/hypervisor.h>
++
++static void pgd_test_and_unpin(pgd_t *pgd);
++
++void show_mem(void)
++{
++ int total = 0, reserved = 0;
++ int shared = 0, cached = 0;
++ int highmem = 0;
++ struct page *page;
++ pg_data_t *pgdat;
++ unsigned long i;
++ unsigned long flags;
++
++ printk(KERN_INFO "Mem-info:\n");
++ show_free_areas();
++ printk(KERN_INFO "Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
++ for_each_online_pgdat(pgdat) {
++ pgdat_resize_lock(pgdat, &flags);
++ for (i = 0; i < pgdat->node_spanned_pages; ++i) {
++ page = pgdat_page_nr(pgdat, i);
++ total++;
++ if (PageHighMem(page))
++ highmem++;
++ if (PageReserved(page))
++ reserved++;
++ else if (PageSwapCache(page))
++ cached++;
++ else if (page_count(page))
++ shared += page_count(page) - 1;
++ }
++ pgdat_resize_unlock(pgdat, &flags);
++ }
++ printk(KERN_INFO "%d pages of RAM\n", total);
++ printk(KERN_INFO "%d pages of HIGHMEM\n", highmem);
++ printk(KERN_INFO "%d reserved pages\n", reserved);
++ printk(KERN_INFO "%d pages shared\n", shared);
++ printk(KERN_INFO "%d pages swap cached\n", cached);
++
++ printk(KERN_INFO "%lu pages dirty\n", global_page_state(NR_FILE_DIRTY));
++ printk(KERN_INFO "%lu pages writeback\n",
++ global_page_state(NR_WRITEBACK));
++ printk(KERN_INFO "%lu pages mapped\n", global_page_state(NR_FILE_MAPPED));
++ printk(KERN_INFO "%lu pages slab\n", global_page_state(NR_SLAB));
++ printk(KERN_INFO "%lu pages pagetables\n",
++ global_page_state(NR_PAGETABLE));
++}
++
++/*
++ * Associate a virtual page frame with a given physical page frame
++ * and protection flags for that frame.
++ */
++static void set_pte_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
++{
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++ pte_t *pte;
++
++ pgd = swapper_pg_dir + pgd_index(vaddr);
++ if (pgd_none(*pgd)) {
++ BUG();
++ return;
++ }
++ pud = pud_offset(pgd, vaddr);
++ if (pud_none(*pud)) {
++ BUG();
++ return;
++ }
++ pmd = pmd_offset(pud, vaddr);
++ if (pmd_none(*pmd)) {
++ BUG();
++ return;
++ }
++ pte = pte_offset_kernel(pmd, vaddr);
++ if (pgprot_val(flags))
++ /* <pfn,flags> stored as-is, to permit clearing entries */
++ set_pte(pte, pfn_pte(pfn, flags));
++ else
++ pte_clear(&init_mm, vaddr, pte);
++
++ /*
++ * It's enough to flush this one mapping.
++ * (PGE mappings get flushed as well)
++ */
++ __flush_tlb_one(vaddr);
++}
++
++/*
++ * Associate a virtual page frame with a given physical page frame
++ * and protection flags for that frame.
++ */
++static void set_pte_pfn_ma(unsigned long vaddr, unsigned long pfn,
++ pgprot_t flags)
++{
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++ pte_t *pte;
++
++ pgd = swapper_pg_dir + pgd_index(vaddr);
++ if (pgd_none(*pgd)) {
++ BUG();
++ return;
++ }
++ pud = pud_offset(pgd, vaddr);
++ if (pud_none(*pud)) {
++ BUG();
++ return;
++ }
++ pmd = pmd_offset(pud, vaddr);
++ if (pmd_none(*pmd)) {
++ BUG();
++ return;
++ }
++ pte = pte_offset_kernel(pmd, vaddr);
++ if (pgprot_val(flags))
++ /* <pfn,flags> stored as-is, to permit clearing entries */
++ set_pte(pte, pfn_pte_ma(pfn, flags));
++ else
++ pte_clear(&init_mm, vaddr, pte);
++
++ /*
++ * It's enough to flush this one mapping.
++ * (PGE mappings get flushed as well)
++ */
++ __flush_tlb_one(vaddr);
++}
++
++/*
++ * Associate a large virtual page frame with a given physical page frame
++ * and protection flags for that frame. pfn is for the base of the page,
++ * vaddr is what the page gets mapped to - both must be properly aligned.
++ * The pmd must already be instantiated. Assumes PAE mode.
++ */
++void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
++{
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++
++ if (vaddr & (PMD_SIZE-1)) { /* vaddr is misaligned */
++ printk(KERN_WARNING "set_pmd_pfn: vaddr misaligned\n");
++ return; /* BUG(); */
++ }
++ if (pfn & (PTRS_PER_PTE-1)) { /* pfn is misaligned */
++ printk(KERN_WARNING "set_pmd_pfn: pfn misaligned\n");
++ return; /* BUG(); */
++ }
++ pgd = swapper_pg_dir + pgd_index(vaddr);
++ if (pgd_none(*pgd)) {
++ printk(KERN_WARNING "set_pmd_pfn: pgd_none\n");
++ return; /* BUG(); */
++ }
++ pud = pud_offset(pgd, vaddr);
++ pmd = pmd_offset(pud, vaddr);
++ set_pmd(pmd, pfn_pmd(pfn, flags));
++ /*
++ * It's enough to flush this one mapping.
++ * (PGE mappings get flushed as well)
++ */
++ __flush_tlb_one(vaddr);
++}
++
++static int nr_fixmaps = 0;
++unsigned long hypervisor_virt_start = HYPERVISOR_VIRT_START;
++unsigned long __FIXADDR_TOP = (HYPERVISOR_VIRT_START - 2 * PAGE_SIZE);
++EXPORT_SYMBOL(__FIXADDR_TOP);
++
++void __init set_fixaddr_top(unsigned long top)
++{
++ BUG_ON(nr_fixmaps > 0);
++ hypervisor_virt_start = top;
++ __FIXADDR_TOP = hypervisor_virt_start - 2 * PAGE_SIZE;
++}
++
++void __set_fixmap (enum fixed_addresses idx, maddr_t phys, pgprot_t flags)
++{
++ unsigned long address = __fix_to_virt(idx);
++
++ if (idx >= __end_of_fixed_addresses) {
++ BUG();
++ return;
++ }
++ switch (idx) {
++ case FIX_WP_TEST:
++#ifdef CONFIG_X86_F00F_BUG
++ case FIX_F00F_IDT:
++#endif
++ case FIX_VDSO:
++ set_pte_pfn(address, phys >> PAGE_SHIFT, flags);
++ break;
++ default:
++ set_pte_pfn_ma(address, phys >> PAGE_SHIFT, flags);
++ break;
++ }
++ nr_fixmaps++;
++}
++
++pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
++{
++ pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
++ if (pte)
++ make_lowmem_page_readonly(pte, XENFEAT_writable_page_tables);
++ return pte;
++}
++
++struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
++{
++ struct page *pte;
++
++#ifdef CONFIG_HIGHPTE
++ pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT|__GFP_ZERO, 0);
++#else
++ pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
++#endif
++ if (pte) {
++ SetPageForeign(pte, pte_free);
++ init_page_count(pte);
++ }
++ return pte;
++}
++
++void pte_free(struct page *pte)
++{
++ unsigned long pfn = page_to_pfn(pte);
++
++ if (!PageHighMem(pte)) {
++ unsigned long va = (unsigned long)__va(pfn << PAGE_SHIFT);
++
++ if (!pte_write(*virt_to_ptep(va)))
++ if (HYPERVISOR_update_va_mapping(
++ va, pfn_pte(pfn, PAGE_KERNEL), 0))
++ BUG();
++ } else
++ clear_bit(PG_pinned, &pte->flags);
++
++ ClearPageForeign(pte);
++ init_page_count(pte);
++
++ __free_page(pte);
++}
++
++void pmd_ctor(void *pmd, kmem_cache_t *cache, unsigned long flags)
++{
++ memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
++}
++
++/*
++ * List of all pgd's needed for non-PAE so it can invalidate entries
++ * in both cached and uncached pgd's; not needed for PAE since the
++ * kernel pmd is shared. If PAE were not to share the pmd a similar
++ * tactic would be needed. This is essentially codepath-based locking
++ * against pageattr.c; it is the unique case in which a valid change
++ * of kernel pagetables can't be lazily synchronized by vmalloc faults.
++ * vmalloc faults work because attached pagetables are never freed.
++ * The locking scheme was chosen on the basis of manfred's
++ * recommendations and having no core impact whatsoever.
++ * -- wli
++ */
++DEFINE_SPINLOCK(pgd_lock);
++struct page *pgd_list;
++
++static inline void pgd_list_add(pgd_t *pgd)
++{
++ struct page *page = virt_to_page(pgd);
++ page->index = (unsigned long)pgd_list;
++ if (pgd_list)
++ set_page_private(pgd_list, (unsigned long)&page->index);
++ pgd_list = page;
++ set_page_private(page, (unsigned long)&pgd_list);
++}
++
++static inline void pgd_list_del(pgd_t *pgd)
++{
++ struct page *next, **pprev, *page = virt_to_page(pgd);
++ next = (struct page *)page->index;
++ pprev = (struct page **)page_private(page);
++ *pprev = next;
++ if (next)
++ set_page_private(next, (unsigned long)pprev);
++}
++
++void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused)
++{
++ unsigned long flags;
++
++ if (PTRS_PER_PMD > 1) {
++ if (HAVE_SHARED_KERNEL_PMD)
++ clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD,
++ swapper_pg_dir + USER_PTRS_PER_PGD,
++ KERNEL_PGD_PTRS);
++ } else {
++ spin_lock_irqsave(&pgd_lock, flags);
++ clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD,
++ swapper_pg_dir + USER_PTRS_PER_PGD,
++ KERNEL_PGD_PTRS);
++ memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
++ pgd_list_add(pgd);
++ spin_unlock_irqrestore(&pgd_lock, flags);
++ }
++}
++
++/* never called when PTRS_PER_PMD > 1 */
++void pgd_dtor(void *pgd, kmem_cache_t *cache, unsigned long unused)
++{
++ unsigned long flags; /* can be called from interrupt context */
++
++ spin_lock_irqsave(&pgd_lock, flags);
++ pgd_list_del(pgd);
++ spin_unlock_irqrestore(&pgd_lock, flags);
++
++ pgd_test_and_unpin(pgd);
++}
++
++pgd_t *pgd_alloc(struct mm_struct *mm)
++{
++ int i;
++ pgd_t *pgd = kmem_cache_alloc(pgd_cache, GFP_KERNEL);
++ pmd_t **pmd;
++ unsigned long flags;
++
++ pgd_test_and_unpin(pgd);
++
++ if (PTRS_PER_PMD == 1 || !pgd)
++ return pgd;
++
++ if (HAVE_SHARED_KERNEL_PMD) {
++ for (i = 0; i < USER_PTRS_PER_PGD; ++i) {
++ pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
++ if (!pmd)
++ goto out_oom;
++ set_pgd(&pgd[i], __pgd(1 + __pa(pmd)));
++ }
++ return pgd;
++ }
++
++ /*
++ * We can race save/restore (if we sleep during a GFP_KERNEL memory
++ * allocation). We therefore store virtual addresses of pmds as they
++ * do not change across save/restore, and poke the machine addresses
++ * into the pgdir under the pgd_lock.
++ */
++ pmd = kmalloc(PTRS_PER_PGD * sizeof(pmd_t *), GFP_KERNEL);
++ if (!pmd) {
++ kmem_cache_free(pgd_cache, pgd);
++ return NULL;
++ }
++
++ /* Allocate pmds, remember virtual addresses. */
++ for (i = 0; i < PTRS_PER_PGD; ++i) {
++ pmd[i] = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
++ if (!pmd[i])
++ goto out_oom;
++ }
++
++ spin_lock_irqsave(&pgd_lock, flags);
++
++ /* Protect against save/restore: move below 4GB under pgd_lock. */
++ if (!xen_feature(XENFEAT_pae_pgdir_above_4gb)) {
++ int rc = xen_create_contiguous_region(
++ (unsigned long)pgd, 0, 32);
++ if (rc) {
++ spin_unlock_irqrestore(&pgd_lock, flags);
++ goto out_oom;
++ }
++ }
++
++ /* Copy kernel pmd contents and write-protect the new pmds. */
++ for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
++ unsigned long v = (unsigned long)i << PGDIR_SHIFT;
++ pgd_t *kpgd = pgd_offset_k(v);
++ pud_t *kpud = pud_offset(kpgd, v);
++ pmd_t *kpmd = pmd_offset(kpud, v);
++ memcpy(pmd[i], kpmd, PAGE_SIZE);
++ make_lowmem_page_readonly(
++ pmd[i], XENFEAT_writable_page_tables);
++ }
++
++ /* It is safe to poke machine addresses of pmds under the pmd_lock. */
++ for (i = 0; i < PTRS_PER_PGD; i++)
++ set_pgd(&pgd[i], __pgd(1 + __pa(pmd[i])));
++
++ /* Ensure this pgd gets picked up and pinned on save/restore. */
++ pgd_list_add(pgd);
++
++ spin_unlock_irqrestore(&pgd_lock, flags);
++
++ kfree(pmd);
++
++ return pgd;
++
++out_oom:
++ if (HAVE_SHARED_KERNEL_PMD) {
++ for (i--; i >= 0; i--)
++ kmem_cache_free(pmd_cache,
++ (void *)__va(pgd_val(pgd[i])-1));
++ } else {
++ for (i--; i >= 0; i--)
++ kmem_cache_free(pmd_cache, pmd[i]);
++ kfree(pmd);
++ }
++ kmem_cache_free(pgd_cache, pgd);
++ return NULL;
++}
++
++void pgd_free(pgd_t *pgd)
++{
++ int i;
++
++ /*
++ * After this the pgd should not be pinned for the duration of this
++ * function's execution. We should never sleep and thus never race:
++ * 1. User pmds will not become write-protected under our feet due
++ * to a concurrent mm_pin_all().
++ * 2. The machine addresses in PGD entries will not become invalid
++ * due to a concurrent save/restore.
++ */
++ pgd_test_and_unpin(pgd);
++
++ /* in the PAE case user pgd entries are overwritten before usage */
++ if (PTRS_PER_PMD > 1) {
++ for (i = 0; i < USER_PTRS_PER_PGD; ++i) {
++ pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1);
++ kmem_cache_free(pmd_cache, pmd);
++ }
++
++ if (!HAVE_SHARED_KERNEL_PMD) {
++ unsigned long flags;
++ spin_lock_irqsave(&pgd_lock, flags);
++ pgd_list_del(pgd);
++ spin_unlock_irqrestore(&pgd_lock, flags);
++
++ for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
++ pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1);
++ make_lowmem_page_writable(
++ pmd, XENFEAT_writable_page_tables);
++ memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
++ kmem_cache_free(pmd_cache, pmd);
++ }
++
++ if (!xen_feature(XENFEAT_pae_pgdir_above_4gb))
++ xen_destroy_contiguous_region(
++ (unsigned long)pgd, 0);
++ }
++ }
++
++ /* in the non-PAE case, free_pgtables() clears user pgd entries */
++ kmem_cache_free(pgd_cache, pgd);
++}
++
++void make_lowmem_page_readonly(void *va, unsigned int feature)
++{
++ pte_t *pte;
++ int rc;
++
++ if (xen_feature(feature))
++ return;
++
++ pte = virt_to_ptep(va);
++ rc = HYPERVISOR_update_va_mapping(
++ (unsigned long)va, pte_wrprotect(*pte), 0);
++ BUG_ON(rc);
++}
++
++void make_lowmem_page_writable(void *va, unsigned int feature)
++{
++ pte_t *pte;
++ int rc;
++
++ if (xen_feature(feature))
++ return;
++
++ pte = virt_to_ptep(va);
++ rc = HYPERVISOR_update_va_mapping(
++ (unsigned long)va, pte_mkwrite(*pte), 0);
++ BUG_ON(rc);
++}
++
++void make_page_readonly(void *va, unsigned int feature)
++{
++ pte_t *pte;
++ int rc;
++
++ if (xen_feature(feature))
++ return;
++
++ pte = virt_to_ptep(va);
++ rc = HYPERVISOR_update_va_mapping(
++ (unsigned long)va, pte_wrprotect(*pte), 0);
++ if (rc) /* fallback? */
++ xen_l1_entry_update(pte, pte_wrprotect(*pte));
++ if ((unsigned long)va >= (unsigned long)high_memory) {
++ unsigned long pfn = pte_pfn(*pte);
++#ifdef CONFIG_HIGHMEM
++ if (pfn >= highstart_pfn)
++ kmap_flush_unused(); /* flush stale writable kmaps */
++ else
++#endif
++ make_lowmem_page_readonly(
++ phys_to_virt(pfn << PAGE_SHIFT), feature);
++ }
++}
++
++void make_page_writable(void *va, unsigned int feature)
++{
++ pte_t *pte;
++ int rc;
++
++ if (xen_feature(feature))
++ return;
++
++ pte = virt_to_ptep(va);
++ rc = HYPERVISOR_update_va_mapping(
++ (unsigned long)va, pte_mkwrite(*pte), 0);
++ if (rc) /* fallback? */
++ xen_l1_entry_update(pte, pte_mkwrite(*pte));
++ if ((unsigned long)va >= (unsigned long)high_memory) {
++ unsigned long pfn = pte_pfn(*pte);
++#ifdef CONFIG_HIGHMEM
++ if (pfn < highstart_pfn)
++#endif
++ make_lowmem_page_writable(
++ phys_to_virt(pfn << PAGE_SHIFT), feature);
++ }
++}
++
++void make_pages_readonly(void *va, unsigned int nr, unsigned int feature)
++{
++ if (xen_feature(feature))
++ return;
++
++ while (nr-- != 0) {
++ make_page_readonly(va, feature);
++ va = (void *)((unsigned long)va + PAGE_SIZE);
++ }
++}
++
++void make_pages_writable(void *va, unsigned int nr, unsigned int feature)
++{
++ if (xen_feature(feature))
++ return;
++
++ while (nr-- != 0) {
++ make_page_writable(va, feature);
++ va = (void *)((unsigned long)va + PAGE_SIZE);
++ }
++}
++
++static inline void pgd_walk_set_prot(struct page *page, pgprot_t flags)
++{
++ unsigned long pfn = page_to_pfn(page);
++ int rc;
++
++ if (PageHighMem(page)) {
++ if (pgprot_val(flags) & _PAGE_RW)
++ clear_bit(PG_pinned, &page->flags);
++ else
++ set_bit(PG_pinned, &page->flags);
++ } else {
++ rc = HYPERVISOR_update_va_mapping(
++ (unsigned long)__va(pfn << PAGE_SHIFT),
++ pfn_pte(pfn, flags), 0);
++ if (rc)
++ BUG();
++ }
++}
++
++static void pgd_walk(pgd_t *pgd_base, pgprot_t flags)
++{
++ pgd_t *pgd = pgd_base;
++ pud_t *pud;
++ pmd_t *pmd;
++ int g, u, m, rc;
++
++ if (xen_feature(XENFEAT_auto_translated_physmap))
++ return;
++
++ for (g = 0; g < USER_PTRS_PER_PGD; g++, pgd++) {
++ if (pgd_none(*pgd))
++ continue;
++ pud = pud_offset(pgd, 0);
++ if (PTRS_PER_PUD > 1) /* not folded */
++ pgd_walk_set_prot(virt_to_page(pud),flags);
++ for (u = 0; u < PTRS_PER_PUD; u++, pud++) {
++ if (pud_none(*pud))
++ continue;
++ pmd = pmd_offset(pud, 0);
++ if (PTRS_PER_PMD > 1) /* not folded */
++ pgd_walk_set_prot(virt_to_page(pmd),flags);
++ for (m = 0; m < PTRS_PER_PMD; m++, pmd++) {
++ if (pmd_none(*pmd))
++ continue;
++ pgd_walk_set_prot(pmd_page(*pmd),flags);
++ }
++ }
++ }
++
++ rc = HYPERVISOR_update_va_mapping(
++ (unsigned long)pgd_base,
++ pfn_pte(virt_to_phys(pgd_base)>>PAGE_SHIFT, flags),
++ UVMF_TLB_FLUSH);
++ if (rc)
++ BUG();
++}
++
++static void __pgd_pin(pgd_t *pgd)
++{
++ pgd_walk(pgd, PAGE_KERNEL_RO);
++ kmap_flush_unused();
++ xen_pgd_pin(__pa(pgd));
++ set_bit(PG_pinned, &virt_to_page(pgd)->flags);
++}
++
++static void __pgd_unpin(pgd_t *pgd)
++{
++ xen_pgd_unpin(__pa(pgd));
++ pgd_walk(pgd, PAGE_KERNEL);
++ clear_bit(PG_pinned, &virt_to_page(pgd)->flags);
++}
++
++static void pgd_test_and_unpin(pgd_t *pgd)
++{
++ if (test_bit(PG_pinned, &virt_to_page(pgd)->flags))
++ __pgd_unpin(pgd);
++}
++
++void mm_pin(struct mm_struct *mm)
++{
++ if (xen_feature(XENFEAT_writable_page_tables))
++ return;
++ spin_lock(&mm->page_table_lock);
++ __pgd_pin(mm->pgd);
++ spin_unlock(&mm->page_table_lock);
++}
++
++void mm_unpin(struct mm_struct *mm)
++{
++ if (xen_feature(XENFEAT_writable_page_tables))
++ return;
++ spin_lock(&mm->page_table_lock);
++ __pgd_unpin(mm->pgd);
++ spin_unlock(&mm->page_table_lock);
++}
++
++void mm_pin_all(void)
++{
++ struct page *page;
++ unsigned long flags;
++
++ if (xen_feature(XENFEAT_writable_page_tables))
++ return;
++
++ /*
++ * Allow uninterrupted access to the pgd_list. Also protects
++ * __pgd_pin() by disabling preemption.
++ * All other CPUs must be at a safe point (e.g., in stop_machine
++ * or offlined entirely).
++ */
++ spin_lock_irqsave(&pgd_lock, flags);
++ for (page = pgd_list; page; page = (struct page *)page->index) {
++ if (!test_bit(PG_pinned, &page->flags))
++ __pgd_pin((pgd_t *)page_address(page));
++ }
++ spin_unlock_irqrestore(&pgd_lock, flags);
++}
++
++void _arch_dup_mmap(struct mm_struct *mm)
++{
++ if (!test_bit(PG_pinned, &virt_to_page(mm->pgd)->flags))
++ mm_pin(mm);
++}
++
++void _arch_exit_mmap(struct mm_struct *mm)
++{
++ struct task_struct *tsk = current;
++
++ task_lock(tsk);
++
++ /*
++ * We aggressively remove defunct pgd from cr3. We execute unmap_vmas()
++ * *much* faster this way, as no tlb flushes means bigger wrpt batches.
++ */
++ if (tsk->active_mm == mm) {
++ tsk->active_mm = &init_mm;
++ atomic_inc(&init_mm.mm_count);
++
++ switch_mm(mm, &init_mm, tsk);
++
++ atomic_dec(&mm->mm_count);
++ BUG_ON(atomic_read(&mm->mm_count) == 0);
++ }
++
++ task_unlock(tsk);
++
++ if (test_bit(PG_pinned, &virt_to_page(mm->pgd)->flags) &&
++ (atomic_read(&mm->mm_count) == 1) &&
++ !mm->context.has_foreign_mappings)
++ mm_unpin(mm);
++}
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/arch/i386/oprofile/xenoprof.c 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,179 @@
++/**
++ * @file xenoprof.c
++ *
++ * @remark Copyright 2002 OProfile authors
++ * @remark Read the file COPYING
++ *
++ * @author John Levon <levon@movementarian.org>
++ *
++ * Modified by Aravind Menon and Jose Renato Santos for Xen
++ * These modifications are:
++ * Copyright (C) 2005 Hewlett-Packard Co.
++ *
++ * x86-specific part
++ * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
++ * VA Linux Systems Japan K.K.
++ */
++
++#include <linux/init.h>
++#include <linux/oprofile.h>
++#include <linux/sched.h>
++#include <asm/pgtable.h>
++
++#include <xen/driver_util.h>
++#include <xen/interface/xen.h>
++#include <xen/interface/xenoprof.h>
++#include <xen/xenoprof.h>
++#include "op_counter.h"
++
++static unsigned int num_events = 0;
++
++void __init xenoprof_arch_init_counter(struct xenoprof_init *init)
++{
++ num_events = init->num_events;
++ /* just in case - make sure we do not overflow event list
++ (i.e. counter_config list) */
++ if (num_events > OP_MAX_COUNTER) {
++ num_events = OP_MAX_COUNTER;
++ init->num_events = num_events;
++ }
++}
++
++void xenoprof_arch_counter(void)
++{
++ int i;
++ struct xenoprof_counter counter;
++
++ for (i=0; i<num_events; i++) {
++ counter.ind = i;
++ counter.count = (uint64_t)counter_config[i].count;
++ counter.enabled = (uint32_t)counter_config[i].enabled;
++ counter.event = (uint32_t)counter_config[i].event;
++ counter.kernel = (uint32_t)counter_config[i].kernel;
++ counter.user = (uint32_t)counter_config[i].user;
++ counter.unit_mask = (uint64_t)counter_config[i].unit_mask;
++ HYPERVISOR_xenoprof_op(XENOPROF_counter,
++ &counter);
++ }
++}
++
++void xenoprof_arch_start(void)
++{
++ /* nothing */
++}
++
++void xenoprof_arch_stop(void)
++{
++ /* nothing */
++}
++
++void xenoprof_arch_unmap_shared_buffer(struct xenoprof_shared_buffer * sbuf)
++{
++ if (sbuf->buffer) {
++ vunmap(sbuf->buffer);
++ sbuf->buffer = NULL;
++ }
++}
++
++int xenoprof_arch_map_shared_buffer(struct xenoprof_get_buffer * get_buffer,
++ struct xenoprof_shared_buffer * sbuf)
++{
++ int npages, ret;
++ struct vm_struct *area;
++
++ sbuf->buffer = NULL;
++ if ( (ret = HYPERVISOR_xenoprof_op(XENOPROF_get_buffer, get_buffer)) )
++ return ret;
++
++ npages = (get_buffer->bufsize * get_buffer->nbuf - 1) / PAGE_SIZE + 1;
++
++ area = alloc_vm_area(npages * PAGE_SIZE);
++ if (area == NULL)
++ return -ENOMEM;
++
++ if ( (ret = direct_kernel_remap_pfn_range(
++ (unsigned long)area->addr,
++ get_buffer->buf_gmaddr >> PAGE_SHIFT,
++ npages * PAGE_SIZE, __pgprot(_KERNPG_TABLE),
++ DOMID_SELF)) ) {
++ vunmap(area->addr);
++ return ret;
++ }
++
++ sbuf->buffer = area->addr;
++ return ret;
++}
++
++int xenoprof_arch_set_passive(struct xenoprof_passive * pdomain,
++ struct xenoprof_shared_buffer * sbuf)
++{
++ int ret;
++ int npages;
++ struct vm_struct *area;
++ pgprot_t prot = __pgprot(_KERNPG_TABLE);
++
++ sbuf->buffer = NULL;
++ ret = HYPERVISOR_xenoprof_op(XENOPROF_set_passive, pdomain);
++ if (ret)
++ goto out;
++
++ npages = (pdomain->bufsize * pdomain->nbuf - 1) / PAGE_SIZE + 1;
++
++ area = alloc_vm_area(npages * PAGE_SIZE);
++ if (area == NULL) {
++ ret = -ENOMEM;
++ goto out;
++ }
++
++ ret = direct_kernel_remap_pfn_range(
++ (unsigned long)area->addr,
++ pdomain->buf_gmaddr >> PAGE_SHIFT,
++ npages * PAGE_SIZE, prot, DOMID_SELF);
++ if (ret) {
++ vunmap(area->addr);
++ goto out;
++ }
++ sbuf->buffer = area->addr;
++
++out:
++ return ret;
++}
++
++struct op_counter_config counter_config[OP_MAX_COUNTER];
++
++int xenoprof_create_files(struct super_block * sb, struct dentry * root)
++{
++ unsigned int i;
++
++ for (i = 0; i < num_events; ++i) {
++ struct dentry * dir;
++ char buf[2];
++
++ snprintf(buf, 2, "%d", i);
++ dir = oprofilefs_mkdir(sb, root, buf);
++ oprofilefs_create_ulong(sb, dir, "enabled",
++ &counter_config[i].enabled);
++ oprofilefs_create_ulong(sb, dir, "event",
++ &counter_config[i].event);
++ oprofilefs_create_ulong(sb, dir, "count",
++ &counter_config[i].count);
++ oprofilefs_create_ulong(sb, dir, "unit_mask",
++ &counter_config[i].unit_mask);
++ oprofilefs_create_ulong(sb, dir, "kernel",
++ &counter_config[i].kernel);
++ oprofilefs_create_ulong(sb, dir, "user",
++ &counter_config[i].user);
++ }
++
++ return 0;
++}
++
++int __init oprofile_arch_init(struct oprofile_operations * ops)
++{
++ return xenoprofile_init(ops);
++}
++
++void oprofile_arch_exit(void)
++{
++ xenoprofile_exit();
++}
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/arch/i386/pci/irq-xen.c 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,1205 @@
++/*
++ * Low-Level PCI Support for PC -- Routing of Interrupts
++ *
++ * (c) 1999--2000 Martin Mares <mj@ucw.cz>
++ */
++
++#include <linux/types.h>
++#include <linux/kernel.h>
++#include <linux/pci.h>
++#include <linux/init.h>
++#include <linux/slab.h>
++#include <linux/interrupt.h>
++#include <linux/dmi.h>
++#include <asm/io.h>
++#include <asm/smp.h>
++#include <asm/io_apic.h>
++#include <linux/irq.h>
++#include <linux/acpi.h>
++
++#include "pci.h"
++
++#define PIRQ_SIGNATURE (('$' << 0) + ('P' << 8) + ('I' << 16) + ('R' << 24))
++#define PIRQ_VERSION 0x0100
++
++static int broken_hp_bios_irq9;
++static int acer_tm360_irqrouting;
++
++static struct irq_routing_table *pirq_table;
++
++static int pirq_enable_irq(struct pci_dev *dev);
++
++/*
++ * Never use: 0, 1, 2 (timer, keyboard, and cascade)
++ * Avoid using: 13, 14 and 15 (FP error and IDE).
++ * Penalize: 3, 4, 6, 7, 12 (known ISA uses: serial, floppy, parallel and mouse)
++ */
++unsigned int pcibios_irq_mask = 0xfff8;
++
++static int pirq_penalty[16] = {
++ 1000000, 1000000, 1000000, 1000, 1000, 0, 1000, 1000,
++ 0, 0, 0, 0, 1000, 100000, 100000, 100000
++};
++
++struct irq_router {
++ char *name;
++ u16 vendor, device;
++ int (*get)(struct pci_dev *router, struct pci_dev *dev, int pirq);
++ int (*set)(struct pci_dev *router, struct pci_dev *dev, int pirq, int new);
++};
++
++struct irq_router_handler {
++ u16 vendor;
++ int (*probe)(struct irq_router *r, struct pci_dev *router, u16 device);
++};
++
++int (*pcibios_enable_irq)(struct pci_dev *dev) = NULL;
++void (*pcibios_disable_irq)(struct pci_dev *dev) = NULL;
++
++/*
++ * Check passed address for the PCI IRQ Routing Table signature
++ * and perform checksum verification.
++ */
++
++static inline struct irq_routing_table * pirq_check_routing_table(u8 *addr)
++{
++ struct irq_routing_table *rt;
++ int i;
++ u8 sum;
++
++ rt = (struct irq_routing_table *) addr;
++ if (rt->signature != PIRQ_SIGNATURE ||
++ rt->version != PIRQ_VERSION ||
++ rt->size % 16 ||
++ rt->size < sizeof(struct irq_routing_table))
++ return NULL;
++ sum = 0;
++ for (i=0; i < rt->size; i++)
++ sum += addr[i];
++ if (!sum) {
++ DBG(KERN_DEBUG "PCI: Interrupt Routing Table found at 0x%p\n", rt);
++ return rt;
++ }
++ return NULL;
++}
++
++
++
++/*
++ * Search 0xf0000 -- 0xfffff for the PCI IRQ Routing Table.
++ */
++
++static struct irq_routing_table * __init pirq_find_routing_table(void)
++{
++ u8 *addr;
++ struct irq_routing_table *rt;
++
++#ifdef CONFIG_XEN
++ if (!is_initial_xendomain())
++ return NULL;
++#endif
++ if (pirq_table_addr) {
++ rt = pirq_check_routing_table((u8 *) isa_bus_to_virt(pirq_table_addr));
++ if (rt)
++ return rt;
++ printk(KERN_WARNING "PCI: PIRQ table NOT found at pirqaddr\n");
++ }
++ for(addr = (u8 *) isa_bus_to_virt(0xf0000); addr < (u8 *) isa_bus_to_virt(0x100000); addr += 16) {
++ rt = pirq_check_routing_table(addr);
++ if (rt)
++ return rt;
++ }
++ return NULL;
++}
++
++/*
++ * If we have a IRQ routing table, use it to search for peer host
++ * bridges. It's a gross hack, but since there are no other known
++ * ways how to get a list of buses, we have to go this way.
++ */
++
++static void __init pirq_peer_trick(void)
++{
++ struct irq_routing_table *rt = pirq_table;
++ u8 busmap[256];
++ int i;
++ struct irq_info *e;
++
++ memset(busmap, 0, sizeof(busmap));
++ for(i=0; i < (rt->size - sizeof(struct irq_routing_table)) / sizeof(struct irq_info); i++) {
++ e = &rt->slots[i];
++#ifdef DEBUG
++ {
++ int j;
++ DBG(KERN_DEBUG "%02x:%02x slot=%02x", e->bus, e->devfn/8, e->slot);
++ for(j=0; j<4; j++)
++ DBG(" %d:%02x/%04x", j, e->irq[j].link, e->irq[j].bitmap);
++ DBG("\n");
++ }
++#endif
++ busmap[e->bus] = 1;
++ }
++ for(i = 1; i < 256; i++) {
++ if (!busmap[i] || pci_find_bus(0, i))
++ continue;
++ if (pci_scan_bus(i, &pci_root_ops, NULL))
++ printk(KERN_INFO "PCI: Discovered primary peer bus %02x [IRQ]\n", i);
++ }
++ pcibios_last_bus = -1;
++}
++
++/*
++ * Code for querying and setting of IRQ routes on various interrupt routers.
++ */
++
++void eisa_set_level_irq(unsigned int irq)
++{
++ unsigned char mask = 1 << (irq & 7);
++ unsigned int port = 0x4d0 + (irq >> 3);
++ unsigned char val;
++ static u16 eisa_irq_mask;
++
++ if (irq >= 16 || (1 << irq) & eisa_irq_mask)
++ return;
++
++ eisa_irq_mask |= (1 << irq);
++ printk(KERN_DEBUG "PCI: setting IRQ %u as level-triggered\n", irq);
++ val = inb(port);
++ if (!(val & mask)) {
++ DBG(KERN_DEBUG " -> edge");
++ outb(val | mask, port);
++ }
++}
++
++/*
++ * Common IRQ routing practice: nybbles in config space,
++ * offset by some magic constant.
++ */
++static unsigned int read_config_nybble(struct pci_dev *router, unsigned offset, unsigned nr)
++{
++ u8 x;
++ unsigned reg = offset + (nr >> 1);
++
++ pci_read_config_byte(router, reg, &x);
++ return (nr & 1) ? (x >> 4) : (x & 0xf);
++}
++
++static void write_config_nybble(struct pci_dev *router, unsigned offset, unsigned nr, unsigned int val)
++{
++ u8 x;
++ unsigned reg = offset + (nr >> 1);
++
++ pci_read_config_byte(router, reg, &x);
++ x = (nr & 1) ? ((x & 0x0f) | (val << 4)) : ((x & 0xf0) | val);
++ pci_write_config_byte(router, reg, x);
++}
++
++/*
++ * ALI pirq entries are damn ugly, and completely undocumented.
++ * This has been figured out from pirq tables, and it's not a pretty
++ * picture.
++ */
++static int pirq_ali_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
++{
++ static const unsigned char irqmap[16] = { 0, 9, 3, 10, 4, 5, 7, 6, 1, 11, 0, 12, 0, 14, 0, 15 };
++
++ return irqmap[read_config_nybble(router, 0x48, pirq-1)];
++}
++
++static int pirq_ali_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
++{
++ static const unsigned char irqmap[16] = { 0, 8, 0, 2, 4, 5, 7, 6, 0, 1, 3, 9, 11, 0, 13, 15 };
++ unsigned int val = irqmap[irq];
++
++ if (val) {
++ write_config_nybble(router, 0x48, pirq-1, val);
++ return 1;
++ }
++ return 0;
++}
++
++/*
++ * The Intel PIIX4 pirq rules are fairly simple: "pirq" is
++ * just a pointer to the config space.
++ */
++static int pirq_piix_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
++{
++ u8 x;
++
++ pci_read_config_byte(router, pirq, &x);
++ return (x < 16) ? x : 0;
++}
++
++static int pirq_piix_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
++{
++ pci_write_config_byte(router, pirq, irq);
++ return 1;
++}
++
++/*
++ * The VIA pirq rules are nibble-based, like ALI,
++ * but without the ugly irq number munging.
++ * However, PIRQD is in the upper instead of lower 4 bits.
++ */
++static int pirq_via_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
++{
++ return read_config_nybble(router, 0x55, pirq == 4 ? 5 : pirq);
++}
++
++static int pirq_via_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
++{
++ write_config_nybble(router, 0x55, pirq == 4 ? 5 : pirq, irq);
++ return 1;
++}
++
++/*
++ * The VIA pirq rules are nibble-based, like ALI,
++ * but without the ugly irq number munging.
++ * However, for 82C586, nibble map is different .
++ */
++static int pirq_via586_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
++{
++ static const unsigned int pirqmap[4] = { 3, 2, 5, 1 };
++ return read_config_nybble(router, 0x55, pirqmap[pirq-1]);
++}
++
++static int pirq_via586_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
++{
++ static const unsigned int pirqmap[4] = { 3, 2, 5, 1 };
++ write_config_nybble(router, 0x55, pirqmap[pirq-1], irq);
++ return 1;
++}
++
++/*
++ * ITE 8330G pirq rules are nibble-based
++ * FIXME: pirqmap may be { 1, 0, 3, 2 },
++ * 2+3 are both mapped to irq 9 on my system
++ */
++static int pirq_ite_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
++{
++ static const unsigned char pirqmap[4] = { 1, 0, 2, 3 };
++ return read_config_nybble(router,0x43, pirqmap[pirq-1]);
++}
++
++static int pirq_ite_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
++{
++ static const unsigned char pirqmap[4] = { 1, 0, 2, 3 };
++ write_config_nybble(router, 0x43, pirqmap[pirq-1], irq);
++ return 1;
++}
++
++/*
++ * OPTI: high four bits are nibble pointer..
++ * I wonder what the low bits do?
++ */
++static int pirq_opti_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
++{
++ return read_config_nybble(router, 0xb8, pirq >> 4);
++}
++
++static int pirq_opti_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
++{
++ write_config_nybble(router, 0xb8, pirq >> 4, irq);
++ return 1;
++}
++
++/*
++ * Cyrix: nibble offset 0x5C
++ * 0x5C bits 7:4 is INTB bits 3:0 is INTA
++ * 0x5D bits 7:4 is INTD bits 3:0 is INTC
++ */
++static int pirq_cyrix_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
++{
++ return read_config_nybble(router, 0x5C, (pirq-1)^1);
++}
++
++static int pirq_cyrix_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
++{
++ write_config_nybble(router, 0x5C, (pirq-1)^1, irq);
++ return 1;
++}
++
++/*
++ * PIRQ routing for SiS 85C503 router used in several SiS chipsets.
++ * We have to deal with the following issues here:
++ * - vendors have different ideas about the meaning of link values
++ * - some onboard devices (integrated in the chipset) have special
++ * links and are thus routed differently (i.e. not via PCI INTA-INTD)
++ * - different revision of the router have a different layout for
++ * the routing registers, particularly for the onchip devices
++ *
++ * For all routing registers the common thing is we have one byte
++ * per routeable link which is defined as:
++ * bit 7 IRQ mapping enabled (0) or disabled (1)
++ * bits [6:4] reserved (sometimes used for onchip devices)
++ * bits [3:0] IRQ to map to
++ * allowed: 3-7, 9-12, 14-15
++ * reserved: 0, 1, 2, 8, 13
++ *
++ * The config-space registers located at 0x41/0x42/0x43/0x44 are
++ * always used to route the normal PCI INT A/B/C/D respectively.
++ * Apparently there are systems implementing PCI routing table using
++ * link values 0x01-0x04 and others using 0x41-0x44 for PCI INTA..D.
++ * We try our best to handle both link mappings.
++ *
++ * Currently (2003-05-21) it appears most SiS chipsets follow the
++ * definition of routing registers from the SiS-5595 southbridge.
++ * According to the SiS 5595 datasheets the revision id's of the
++ * router (ISA-bridge) should be 0x01 or 0xb0.
++ *
++ * Furthermore we've also seen lspci dumps with revision 0x00 and 0xb1.
++ * Looks like these are used in a number of SiS 5xx/6xx/7xx chipsets.
++ * They seem to work with the current routing code. However there is
++ * some concern because of the two USB-OHCI HCs (original SiS 5595
++ * had only one). YMMV.
++ *
++ * Onchip routing for router rev-id 0x01/0xb0 and probably 0x00/0xb1:
++ *
++ * 0x61: IDEIRQ:
++ * bits [6:5] must be written 01
++ * bit 4 channel-select primary (0), secondary (1)
++ *
++ * 0x62: USBIRQ:
++ * bit 6 OHCI function disabled (0), enabled (1)
++ *
++ * 0x6a: ACPI/SCI IRQ: bits 4-6 reserved
++ *
++ * 0x7e: Data Acq. Module IRQ - bits 4-6 reserved
++ *
++ * We support USBIRQ (in addition to INTA-INTD) and keep the
++ * IDE, ACPI and DAQ routing untouched as set by the BIOS.
++ *
++ * Currently the only reported exception is the new SiS 65x chipset
++ * which includes the SiS 69x southbridge. Here we have the 85C503
++ * router revision 0x04 and there are changes in the register layout
++ * mostly related to the different USB HCs with USB 2.0 support.
++ *
++ * Onchip routing for router rev-id 0x04 (try-and-error observation)
++ *
++ * 0x60/0x61/0x62/0x63: 1xEHCI and 3xOHCI (companion) USB-HCs
++ * bit 6-4 are probably unused, not like 5595
++ */
++
++#define PIRQ_SIS_IRQ_MASK 0x0f
++#define PIRQ_SIS_IRQ_DISABLE 0x80
++#define PIRQ_SIS_USB_ENABLE 0x40
++
++static int pirq_sis_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
++{
++ u8 x;
++ int reg;
++
++ reg = pirq;
++ if (reg >= 0x01 && reg <= 0x04)
++ reg += 0x40;
++ pci_read_config_byte(router, reg, &x);
++ return (x & PIRQ_SIS_IRQ_DISABLE) ? 0 : (x & PIRQ_SIS_IRQ_MASK);
++}
++
++static int pirq_sis_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
++{
++ u8 x;
++ int reg;
++
++ reg = pirq;
++ if (reg >= 0x01 && reg <= 0x04)
++ reg += 0x40;
++ pci_read_config_byte(router, reg, &x);
++ x &= ~(PIRQ_SIS_IRQ_MASK | PIRQ_SIS_IRQ_DISABLE);
++ x |= irq ? irq: PIRQ_SIS_IRQ_DISABLE;
++ pci_write_config_byte(router, reg, x);
++ return 1;
++}
++
++
++/*
++ * VLSI: nibble offset 0x74 - educated guess due to routing table and
++ * config space of VLSI 82C534 PCI-bridge/router (1004:0102)
++ * Tested on HP OmniBook 800 covering PIRQ 1, 2, 4, 8 for onboard
++ * devices, PIRQ 3 for non-pci(!) soundchip and (untested) PIRQ 6
++ * for the busbridge to the docking station.
++ */
++
++static int pirq_vlsi_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
++{
++ if (pirq > 8) {
++ printk(KERN_INFO "VLSI router pirq escape (%d)\n", pirq);
++ return 0;
++ }
++ return read_config_nybble(router, 0x74, pirq-1);
++}
++
++static int pirq_vlsi_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
++{
++ if (pirq > 8) {
++ printk(KERN_INFO "VLSI router pirq escape (%d)\n", pirq);
++ return 0;
++ }
++ write_config_nybble(router, 0x74, pirq-1, irq);
++ return 1;
++}
++
++/*
++ * ServerWorks: PCI interrupts mapped to system IRQ lines through Index
++ * and Redirect I/O registers (0x0c00 and 0x0c01). The Index register
++ * format is (PCIIRQ## | 0x10), e.g.: PCIIRQ10=0x1a. The Redirect
++ * register is a straight binary coding of desired PIC IRQ (low nibble).
++ *
++ * The 'link' value in the PIRQ table is already in the correct format
++ * for the Index register. There are some special index values:
++ * 0x00 for ACPI (SCI), 0x01 for USB, 0x02 for IDE0, 0x04 for IDE1,
++ * and 0x03 for SMBus.
++ */
++static int pirq_serverworks_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
++{
++ outb_p(pirq, 0xc00);
++ return inb(0xc01) & 0xf;
++}
++
++static int pirq_serverworks_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
++{
++ outb_p(pirq, 0xc00);
++ outb_p(irq, 0xc01);
++ return 1;
++}
++
++/* Support for AMD756 PCI IRQ Routing
++ * Jhon H. Caicedo <jhcaiced@osso.org.co>
++ * Jun/21/2001 0.2.0 Release, fixed to use "nybble" functions... (jhcaiced)
++ * Jun/19/2001 Alpha Release 0.1.0 (jhcaiced)
++ * The AMD756 pirq rules are nibble-based
++ * offset 0x56 0-3 PIRQA 4-7 PIRQB
++ * offset 0x57 0-3 PIRQC 4-7 PIRQD
++ */
++static int pirq_amd756_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
++{
++ u8 irq;
++ irq = 0;
++ if (pirq <= 4)
++ {
++ irq = read_config_nybble(router, 0x56, pirq - 1);
++ }
++ printk(KERN_INFO "AMD756: dev %04x:%04x, router pirq : %d get irq : %2d\n",
++ dev->vendor, dev->device, pirq, irq);
++ return irq;
++}
++
++static int pirq_amd756_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
++{
++ printk(KERN_INFO "AMD756: dev %04x:%04x, router pirq : %d SET irq : %2d\n",
++ dev->vendor, dev->device, pirq, irq);
++ if (pirq <= 4)
++ {
++ write_config_nybble(router, 0x56, pirq - 1, irq);
++ }
++ return 1;
++}
++
++#ifdef CONFIG_PCI_BIOS
++
++static int pirq_bios_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
++{
++ struct pci_dev *bridge;
++ int pin = pci_get_interrupt_pin(dev, &bridge);
++ return pcibios_set_irq_routing(bridge, pin, irq);
++}
++
++#endif
++
++static __init int intel_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
++{
++ static struct pci_device_id __initdata pirq_440gx[] = {
++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0) },
++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_2) },
++ { },
++ };
++
++ /* 440GX has a proprietary PIRQ router -- don't use it */
++ if (pci_dev_present(pirq_440gx))
++ return 0;
++
++ switch(device)
++ {
++ case PCI_DEVICE_ID_INTEL_82371FB_0:
++ case PCI_DEVICE_ID_INTEL_82371SB_0:
++ case PCI_DEVICE_ID_INTEL_82371AB_0:
++ case PCI_DEVICE_ID_INTEL_82371MX:
++ case PCI_DEVICE_ID_INTEL_82443MX_0:
++ case PCI_DEVICE_ID_INTEL_82801AA_0:
++ case PCI_DEVICE_ID_INTEL_82801AB_0:
++ case PCI_DEVICE_ID_INTEL_82801BA_0:
++ case PCI_DEVICE_ID_INTEL_82801BA_10:
++ case PCI_DEVICE_ID_INTEL_82801CA_0:
++ case PCI_DEVICE_ID_INTEL_82801CA_12:
++ case PCI_DEVICE_ID_INTEL_82801DB_0:
++ case PCI_DEVICE_ID_INTEL_82801E_0:
++ case PCI_DEVICE_ID_INTEL_82801EB_0:
++ case PCI_DEVICE_ID_INTEL_ESB_1:
++ case PCI_DEVICE_ID_INTEL_ICH6_0:
++ case PCI_DEVICE_ID_INTEL_ICH6_1:
++ case PCI_DEVICE_ID_INTEL_ICH7_0:
++ case PCI_DEVICE_ID_INTEL_ICH7_1:
++ case PCI_DEVICE_ID_INTEL_ICH7_30:
++ case PCI_DEVICE_ID_INTEL_ICH7_31:
++ case PCI_DEVICE_ID_INTEL_ESB2_0:
++ case PCI_DEVICE_ID_INTEL_ICH8_0:
++ case PCI_DEVICE_ID_INTEL_ICH8_1:
++ case PCI_DEVICE_ID_INTEL_ICH8_2:
++ case PCI_DEVICE_ID_INTEL_ICH8_3:
++ case PCI_DEVICE_ID_INTEL_ICH8_4:
++ r->name = "PIIX/ICH";
++ r->get = pirq_piix_get;
++ r->set = pirq_piix_set;
++ return 1;
++ }
++ return 0;
++}
++
++static __init int via_router_probe(struct irq_router *r,
++ struct pci_dev *router, u16 device)
++{
++ /* FIXME: We should move some of the quirk fixup stuff here */
++
++ /*
++ * work arounds for some buggy BIOSes
++ */
++ if (device == PCI_DEVICE_ID_VIA_82C586_0) {
++ switch(router->device) {
++ case PCI_DEVICE_ID_VIA_82C686:
++ /*
++ * Asus k7m bios wrongly reports 82C686A
++ * as 586-compatible
++ */
++ device = PCI_DEVICE_ID_VIA_82C686;
++ break;
++ case PCI_DEVICE_ID_VIA_8235:
++ /**
++ * Asus a7v-x bios wrongly reports 8235
++ * as 586-compatible
++ */
++ device = PCI_DEVICE_ID_VIA_8235;
++ break;
++ }
++ }
++
++ switch(device) {
++ case PCI_DEVICE_ID_VIA_82C586_0:
++ r->name = "VIA";
++ r->get = pirq_via586_get;
++ r->set = pirq_via586_set;
++ return 1;
++ case PCI_DEVICE_ID_VIA_82C596:
++ case PCI_DEVICE_ID_VIA_82C686:
++ case PCI_DEVICE_ID_VIA_8231:
++ case PCI_DEVICE_ID_VIA_8233A:
++ case PCI_DEVICE_ID_VIA_8235:
++ case PCI_DEVICE_ID_VIA_8237:
++ /* FIXME: add new ones for 8233/5 */
++ r->name = "VIA";
++ r->get = pirq_via_get;
++ r->set = pirq_via_set;
++ return 1;
++ }
++ return 0;
++}
++
++static __init int vlsi_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
++{
++ switch(device)
++ {
++ case PCI_DEVICE_ID_VLSI_82C534:
++ r->name = "VLSI 82C534";
++ r->get = pirq_vlsi_get;
++ r->set = pirq_vlsi_set;
++ return 1;
++ }
++ return 0;
++}
++
++
++static __init int serverworks_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
++{
++ switch(device)
++ {
++ case PCI_DEVICE_ID_SERVERWORKS_OSB4:
++ case PCI_DEVICE_ID_SERVERWORKS_CSB5:
++ r->name = "ServerWorks";
++ r->get = pirq_serverworks_get;
++ r->set = pirq_serverworks_set;
++ return 1;
++ }
++ return 0;
++}
++
++static __init int sis_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
++{
++ if (device != PCI_DEVICE_ID_SI_503)
++ return 0;
++
++ r->name = "SIS";
++ r->get = pirq_sis_get;
++ r->set = pirq_sis_set;
++ return 1;
++}
++
++static __init int cyrix_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
++{
++ switch(device)
++ {
++ case PCI_DEVICE_ID_CYRIX_5520:
++ r->name = "NatSemi";
++ r->get = pirq_cyrix_get;
++ r->set = pirq_cyrix_set;
++ return 1;
++ }
++ return 0;
++}
++
++static __init int opti_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
++{
++ switch(device)
++ {
++ case PCI_DEVICE_ID_OPTI_82C700:
++ r->name = "OPTI";
++ r->get = pirq_opti_get;
++ r->set = pirq_opti_set;
++ return 1;
++ }
++ return 0;
++}
++
++static __init int ite_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
++{
++ switch(device)
++ {
++ case PCI_DEVICE_ID_ITE_IT8330G_0:
++ r->name = "ITE";
++ r->get = pirq_ite_get;
++ r->set = pirq_ite_set;
++ return 1;
++ }
++ return 0;
++}
++
++static __init int ali_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
++{
++ switch(device)
++ {
++ case PCI_DEVICE_ID_AL_M1533:
++ case PCI_DEVICE_ID_AL_M1563:
++ printk(KERN_DEBUG "PCI: Using ALI IRQ Router\n");
++ r->name = "ALI";
++ r->get = pirq_ali_get;
++ r->set = pirq_ali_set;
++ return 1;
++ }
++ return 0;
++}
++
++static __init int amd_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
++{
++ switch(device)
++ {
++ case PCI_DEVICE_ID_AMD_VIPER_740B:
++ r->name = "AMD756";
++ break;
++ case PCI_DEVICE_ID_AMD_VIPER_7413:
++ r->name = "AMD766";
++ break;
++ case PCI_DEVICE_ID_AMD_VIPER_7443:
++ r->name = "AMD768";
++ break;
++ default:
++ return 0;
++ }
++ r->get = pirq_amd756_get;
++ r->set = pirq_amd756_set;
++ return 1;
++}
++
++static __initdata struct irq_router_handler pirq_routers[] = {
++ { PCI_VENDOR_ID_INTEL, intel_router_probe },
++ { PCI_VENDOR_ID_AL, ali_router_probe },
++ { PCI_VENDOR_ID_ITE, ite_router_probe },
++ { PCI_VENDOR_ID_VIA, via_router_probe },
++ { PCI_VENDOR_ID_OPTI, opti_router_probe },
++ { PCI_VENDOR_ID_SI, sis_router_probe },
++ { PCI_VENDOR_ID_CYRIX, cyrix_router_probe },
++ { PCI_VENDOR_ID_VLSI, vlsi_router_probe },
++ { PCI_VENDOR_ID_SERVERWORKS, serverworks_router_probe },
++ { PCI_VENDOR_ID_AMD, amd_router_probe },
++ /* Someone with docs needs to add the ATI Radeon IGP */
++ { 0, NULL }
++};
++static struct irq_router pirq_router;
++static struct pci_dev *pirq_router_dev;
++
++
++/*
++ * FIXME: should we have an option to say "generic for
++ * chipset" ?
++ */
++
++static void __init pirq_find_router(struct irq_router *r)
++{
++ struct irq_routing_table *rt = pirq_table;
++ struct irq_router_handler *h;
++
++#ifdef CONFIG_PCI_BIOS
++ if (!rt->signature) {
++ printk(KERN_INFO "PCI: Using BIOS for IRQ routing\n");
++ r->set = pirq_bios_set;
++ r->name = "BIOS";
++ return;
++ }
++#endif
++
++ /* Default unless a driver reloads it */
++ r->name = "default";
++ r->get = NULL;
++ r->set = NULL;
++
++ DBG(KERN_DEBUG "PCI: Attempting to find IRQ router for %04x:%04x\n",
++ rt->rtr_vendor, rt->rtr_device);
++
++ pirq_router_dev = pci_find_slot(rt->rtr_bus, rt->rtr_devfn);
++ if (!pirq_router_dev) {
++ DBG(KERN_DEBUG "PCI: Interrupt router not found at "
++ "%02x:%02x\n", rt->rtr_bus, rt->rtr_devfn);
++ return;
++ }
++
++ for( h = pirq_routers; h->vendor; h++) {
++ /* First look for a router match */
++ if (rt->rtr_vendor == h->vendor && h->probe(r, pirq_router_dev, rt->rtr_device))
++ break;
++ /* Fall back to a device match */
++ if (pirq_router_dev->vendor == h->vendor && h->probe(r, pirq_router_dev, pirq_router_dev->device))
++ break;
++ }
++ printk(KERN_INFO "PCI: Using IRQ router %s [%04x/%04x] at %s\n",
++ pirq_router.name,
++ pirq_router_dev->vendor,
++ pirq_router_dev->device,
++ pci_name(pirq_router_dev));
++}
++
++static struct irq_info *pirq_get_info(struct pci_dev *dev)
++{
++ struct irq_routing_table *rt = pirq_table;
++ int entries = (rt->size - sizeof(struct irq_routing_table)) / sizeof(struct irq_info);
++ struct irq_info *info;
++
++ for (info = rt->slots; entries--; info++)
++ if (info->bus == dev->bus->number && PCI_SLOT(info->devfn) == PCI_SLOT(dev->devfn))
++ return info;
++ return NULL;
++}
++
++static int pcibios_lookup_irq(struct pci_dev *dev, int assign)
++{
++ u8 pin;
++ struct irq_info *info;
++ int i, pirq, newirq;
++ int irq = 0;
++ u32 mask;
++ struct irq_router *r = &pirq_router;
++ struct pci_dev *dev2 = NULL;
++ char *msg = NULL;
++
++ /* Find IRQ pin */
++ pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
++ if (!pin) {
++ DBG(KERN_DEBUG " -> no interrupt pin\n");
++ return 0;
++ }
++ pin = pin - 1;
++
++ /* Find IRQ routing entry */
++
++ if (!pirq_table)
++ return 0;
++
++ DBG(KERN_DEBUG "IRQ for %s[%c]", pci_name(dev), 'A' + pin);
++ info = pirq_get_info(dev);
++ if (!info) {
++ DBG(" -> not found in routing table\n" KERN_DEBUG);
++ return 0;
++ }
++ pirq = info->irq[pin].link;
++ mask = info->irq[pin].bitmap;
++ if (!pirq) {
++ DBG(" -> not routed\n" KERN_DEBUG);
++ return 0;
++ }
++ DBG(" -> PIRQ %02x, mask %04x, excl %04x", pirq, mask, pirq_table->exclusive_irqs);
++ mask &= pcibios_irq_mask;
++
++ /* Work around broken HP Pavilion Notebooks which assign USB to
++ IRQ 9 even though it is actually wired to IRQ 11 */
++
++ if (broken_hp_bios_irq9 && pirq == 0x59 && dev->irq == 9) {
++ dev->irq = 11;
++ pci_write_config_byte(dev, PCI_INTERRUPT_LINE, 11);
++ r->set(pirq_router_dev, dev, pirq, 11);
++ }
++
++ /* same for Acer Travelmate 360, but with CB and irq 11 -> 10 */
++ if (acer_tm360_irqrouting && dev->irq == 11 && dev->vendor == PCI_VENDOR_ID_O2) {
++ pirq = 0x68;
++ mask = 0x400;
++ dev->irq = r->get(pirq_router_dev, dev, pirq);
++ pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
++ }
++
++ /*
++ * Find the best IRQ to assign: use the one
++ * reported by the device if possible.
++ */
++ newirq = dev->irq;
++ if (newirq && !((1 << newirq) & mask)) {
++ if ( pci_probe & PCI_USE_PIRQ_MASK) newirq = 0;
++ else printk("\n" KERN_WARNING
++ "PCI: IRQ %i for device %s doesn't match PIRQ mask "
++ "- try pci=usepirqmask\n" KERN_DEBUG, newirq,
++ pci_name(dev));
++ }
++ if (!newirq && assign) {
++ for (i = 0; i < 16; i++) {
++ if (!(mask & (1 << i)))
++ continue;
++ if (pirq_penalty[i] < pirq_penalty[newirq] && can_request_irq(i, IRQF_SHARED))
++ newirq = i;
++ }
++ }
++ DBG(" -> newirq=%d", newirq);
++
++ /* Check if it is hardcoded */
++ if ((pirq & 0xf0) == 0xf0) {
++ irq = pirq & 0xf;
++ DBG(" -> hardcoded IRQ %d\n", irq);
++ msg = "Hardcoded";
++ } else if ( r->get && (irq = r->get(pirq_router_dev, dev, pirq)) && \
++ ((!(pci_probe & PCI_USE_PIRQ_MASK)) || ((1 << irq) & mask)) ) {
++ DBG(" -> got IRQ %d\n", irq);
++ msg = "Found";
++ eisa_set_level_irq(irq);
++ } else if (newirq && r->set && (dev->class >> 8) != PCI_CLASS_DISPLAY_VGA) {
++ DBG(" -> assigning IRQ %d", newirq);
++ if (r->set(pirq_router_dev, dev, pirq, newirq)) {
++ eisa_set_level_irq(newirq);
++ DBG(" ... OK\n");
++ msg = "Assigned";
++ irq = newirq;
++ }
++ }
++
++ if (!irq) {
++ DBG(" ... failed\n");
++ if (newirq && mask == (1 << newirq)) {
++ msg = "Guessed";
++ irq = newirq;
++ } else
++ return 0;
++ }
++ printk(KERN_INFO "PCI: %s IRQ %d for device %s\n", msg, irq, pci_name(dev));
++
++ /* Update IRQ for all devices with the same pirq value */
++ while ((dev2 = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev2)) != NULL) {
++ pci_read_config_byte(dev2, PCI_INTERRUPT_PIN, &pin);
++ if (!pin)
++ continue;
++ pin--;
++ info = pirq_get_info(dev2);
++ if (!info)
++ continue;
++ if (info->irq[pin].link == pirq) {
++ /* We refuse to override the dev->irq information. Give a warning! */
++ if ( dev2->irq && dev2->irq != irq && \
++ (!(pci_probe & PCI_USE_PIRQ_MASK) || \
++ ((1 << dev2->irq) & mask)) ) {
++#ifndef CONFIG_PCI_MSI
++ printk(KERN_INFO "IRQ routing conflict for %s, have irq %d, want irq %d\n",
++ pci_name(dev2), dev2->irq, irq);
++#endif
++ continue;
++ }
++ dev2->irq = irq;
++ pirq_penalty[irq]++;
++ if (dev != dev2)
++ printk(KERN_INFO "PCI: Sharing IRQ %d with %s\n", irq, pci_name(dev2));
++ }
++ }
++ return 1;
++}
++
++static void __init pcibios_fixup_irqs(void)
++{
++ struct pci_dev *dev = NULL;
++ u8 pin;
++
++ DBG(KERN_DEBUG "PCI: IRQ fixup\n");
++ while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
++ /*
++ * If the BIOS has set an out of range IRQ number, just ignore it.
++ * Also keep track of which IRQ's are already in use.
++ */
++ if (dev->irq >= 16) {
++ DBG(KERN_DEBUG "%s: ignoring bogus IRQ %d\n", pci_name(dev), dev->irq);
++ dev->irq = 0;
++ }
++ /* If the IRQ is already assigned to a PCI device, ignore its ISA use penalty */
++ if (pirq_penalty[dev->irq] >= 100 && pirq_penalty[dev->irq] < 100000)
++ pirq_penalty[dev->irq] = 0;
++ pirq_penalty[dev->irq]++;
++ }
++
++ dev = NULL;
++ while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
++ pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
++#ifdef CONFIG_X86_IO_APIC
++ /*
++ * Recalculate IRQ numbers if we use the I/O APIC.
++ */
++ if (io_apic_assign_pci_irqs)
++ {
++ int irq;
++
++ if (pin) {
++ pin--; /* interrupt pins are numbered starting from 1 */
++ irq = IO_APIC_get_PCI_irq_vector(dev->bus->number, PCI_SLOT(dev->devfn), pin);
++ /*
++ * Busses behind bridges are typically not listed in the MP-table.
++ * In this case we have to look up the IRQ based on the parent bus,
++ * parent slot, and pin number. The SMP code detects such bridged
++ * busses itself so we should get into this branch reliably.
++ */
++ if (irq < 0 && dev->bus->parent) { /* go back to the bridge */
++ struct pci_dev * bridge = dev->bus->self;
++
++ pin = (pin + PCI_SLOT(dev->devfn)) % 4;
++ irq = IO_APIC_get_PCI_irq_vector(bridge->bus->number,
++ PCI_SLOT(bridge->devfn), pin);
++ if (irq >= 0)
++ printk(KERN_WARNING "PCI: using PPB %s[%c] to get irq %d\n",
++ pci_name(bridge), 'A' + pin, irq);
++ }
++ if (irq >= 0) {
++ if (use_pci_vector() &&
++ !platform_legacy_irq(irq))
++ irq = IO_APIC_VECTOR(irq);
++
++ printk(KERN_INFO "PCI->APIC IRQ transform: %s[%c] -> IRQ %d\n",
++ pci_name(dev), 'A' + pin, irq);
++ dev->irq = irq;
++ }
++ }
++ }
++#endif
++ /*
++ * Still no IRQ? Try to lookup one...
++ */
++ if (pin && !dev->irq)
++ pcibios_lookup_irq(dev, 0);
++ }
++}
++
++/*
++ * Work around broken HP Pavilion Notebooks which assign USB to
++ * IRQ 9 even though it is actually wired to IRQ 11
++ */
++static int __init fix_broken_hp_bios_irq9(struct dmi_system_id *d)
++{
++ if (!broken_hp_bios_irq9) {
++ broken_hp_bios_irq9 = 1;
++ printk(KERN_INFO "%s detected - fixing broken IRQ routing\n", d->ident);
++ }
++ return 0;
++}
++
++/*
++ * Work around broken Acer TravelMate 360 Notebooks which assign
++ * Cardbus to IRQ 11 even though it is actually wired to IRQ 10
++ */
++static int __init fix_acer_tm360_irqrouting(struct dmi_system_id *d)
++{
++ if (!acer_tm360_irqrouting) {
++ acer_tm360_irqrouting = 1;
++ printk(KERN_INFO "%s detected - fixing broken IRQ routing\n", d->ident);
++ }
++ return 0;
++}
++
++static struct dmi_system_id __initdata pciirq_dmi_table[] = {
++ {
++ .callback = fix_broken_hp_bios_irq9,
++ .ident = "HP Pavilion N5400 Series Laptop",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
++ DMI_MATCH(DMI_BIOS_VERSION, "GE.M1.03"),
++ DMI_MATCH(DMI_PRODUCT_VERSION, "HP Pavilion Notebook Model GE"),
++ DMI_MATCH(DMI_BOARD_VERSION, "OmniBook N32N-736"),
++ },
++ },
++ {
++ .callback = fix_acer_tm360_irqrouting,
++ .ident = "Acer TravelMate 36x Laptop",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 360"),
++ },
++ },
++ { }
++};
++
++static int __init pcibios_irq_init(void)
++{
++ DBG(KERN_DEBUG "PCI: IRQ init\n");
++
++ if (pcibios_enable_irq || raw_pci_ops == NULL)
++ return 0;
++
++ dmi_check_system(pciirq_dmi_table);
++
++ pirq_table = pirq_find_routing_table();
++
++#ifdef CONFIG_PCI_BIOS
++ if (!pirq_table && (pci_probe & PCI_BIOS_IRQ_SCAN))
++ pirq_table = pcibios_get_irq_routing_table();
++#endif
++ if (pirq_table) {
++ pirq_peer_trick();
++ pirq_find_router(&pirq_router);
++ if (pirq_table->exclusive_irqs) {
++ int i;
++ for (i=0; i<16; i++)
++ if (!(pirq_table->exclusive_irqs & (1 << i)))
++ pirq_penalty[i] += 100;
++ }
++ /* If we're using the I/O APIC, avoid using the PCI IRQ routing table */
++ if (io_apic_assign_pci_irqs)
++ pirq_table = NULL;
++ }
++
++ pcibios_enable_irq = pirq_enable_irq;
++
++ pcibios_fixup_irqs();
++ return 0;
++}
++
++subsys_initcall(pcibios_irq_init);
++
++
++static void pirq_penalize_isa_irq(int irq, int active)
++{
++ /*
++ * If any ISAPnP device reports an IRQ in its list of possible
++ * IRQ's, we try to avoid assigning it to PCI devices.
++ */
++ if (irq < 16) {
++ if (active)
++ pirq_penalty[irq] += 1000;
++ else
++ pirq_penalty[irq] += 100;
++ }
++}
++
++void pcibios_penalize_isa_irq(int irq, int active)
++{
++#ifdef CONFIG_ACPI
++ if (!acpi_noirq)
++ acpi_penalize_isa_irq(irq, active);
++ else
++#endif
++ pirq_penalize_isa_irq(irq, active);
++}
++
++static int pirq_enable_irq(struct pci_dev *dev)
++{
++ u8 pin;
++ struct pci_dev *temp_dev;
++
++ pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
++ if (pin && !pcibios_lookup_irq(dev, 1) && !dev->irq) {
++ char *msg = "";
++
++ pin--; /* interrupt pins are numbered starting from 1 */
++
++ if (io_apic_assign_pci_irqs) {
++ int irq;
++
++ irq = IO_APIC_get_PCI_irq_vector(dev->bus->number, PCI_SLOT(dev->devfn), pin);
++ /*
++ * Busses behind bridges are typically not listed in the MP-table.
++ * In this case we have to look up the IRQ based on the parent bus,
++ * parent slot, and pin number. The SMP code detects such bridged
++ * busses itself so we should get into this branch reliably.
++ */
++ temp_dev = dev;
++ while (irq < 0 && dev->bus->parent) { /* go back to the bridge */
++ struct pci_dev * bridge = dev->bus->self;
++
++ pin = (pin + PCI_SLOT(dev->devfn)) % 4;
++ irq = IO_APIC_get_PCI_irq_vector(bridge->bus->number,
++ PCI_SLOT(bridge->devfn), pin);
++ if (irq >= 0)
++ printk(KERN_WARNING "PCI: using PPB %s[%c] to get irq %d\n",
++ pci_name(bridge), 'A' + pin, irq);
++ dev = bridge;
++ }
++ dev = temp_dev;
++ if (irq >= 0) {
++#ifdef CONFIG_PCI_MSI
++ if (!platform_legacy_irq(irq))
++ irq = IO_APIC_VECTOR(irq);
++#endif
++ printk(KERN_INFO "PCI->APIC IRQ transform: %s[%c] -> IRQ %d\n",
++ pci_name(dev), 'A' + pin, irq);
++ dev->irq = irq;
++ return 0;
++ } else
++ msg = " Probably buggy MP table.";
++ } else if (pci_probe & PCI_BIOS_IRQ_SCAN)
++ msg = "";
++ else
++ msg = " Please try using pci=biosirq.";
++
++ /* With IDE legacy devices the IRQ lookup failure is not a problem.. */
++ if (dev->class >> 8 == PCI_CLASS_STORAGE_IDE && !(dev->class & 0x5))
++ return 0;
++
++ printk(KERN_WARNING "PCI: No IRQ known for interrupt pin %c of device %s.%s\n",
++ 'A' + pin, pci_name(dev), msg);
++ }
++ return 0;
++}
++
++int pci_vector_resources(int last, int nr_released)
++{
++ int count = nr_released;
++
++ int next = last;
++ int offset = (last % 8);
++
++ while (next < FIRST_SYSTEM_VECTOR) {
++ next += 8;
++#ifdef CONFIG_X86_64
++ if (next == IA32_SYSCALL_VECTOR)
++ continue;
++#else
++ if (next == SYSCALL_VECTOR)
++ continue;
++#endif
++ count++;
++ if (next >= FIRST_SYSTEM_VECTOR) {
++ if (offset%8) {
++ next = FIRST_DEVICE_VECTOR + offset;
++ offset++;
++ continue;
++ }
++ count--;
++ }
++ }
++
++ return count;
++}
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/arch/i386/pci/pcifront.c 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,55 @@
++/*
++ * PCI Frontend Stub - puts some "dummy" functions in to the Linux x86 PCI core
++ * to support the Xen PCI Frontend's operation
++ *
++ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
++ */
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/pci.h>
++#include <asm/acpi.h>
++#include "pci.h"
++
++static int pcifront_enable_irq(struct pci_dev *dev)
++{
++ u8 irq;
++ pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
++ dev->irq = irq;
++
++ return 0;
++}
++
++extern u8 pci_cache_line_size;
++
++static int __init pcifront_x86_stub_init(void)
++{
++ struct cpuinfo_x86 *c = &boot_cpu_data;
++
++ /* Only install our method if we haven't found real hardware already */
++ if (raw_pci_ops)
++ return 0;
++
++ printk(KERN_INFO "PCI: setting up Xen PCI frontend stub\n");
++
++ /* Copied from arch/i386/pci/common.c */
++ pci_cache_line_size = 32 >> 2;
++ if (c->x86 >= 6 && c->x86_vendor == X86_VENDOR_AMD)
++ pci_cache_line_size = 64 >> 2; /* K7 & K8 */
++ else if (c->x86 > 6 && c->x86_vendor == X86_VENDOR_INTEL)
++ pci_cache_line_size = 128 >> 2; /* P4 */
++
++ /* On x86, we need to disable the normal IRQ routing table and
++ * just ask the backend
++ */
++ pcibios_enable_irq = pcifront_enable_irq;
++ pcibios_disable_irq = NULL;
++
++#ifdef CONFIG_ACPI
++ /* Keep ACPI out of the picture */
++ acpi_noirq = 1;
++#endif
++
++ return 0;
++}
++
++arch_initcall(pcifront_x86_stub_init);
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/arch/x86_64/ia32/ia32entry-xen.S 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,743 @@
++/*
++ * Compatibility mode system call entry point for x86-64.
++ *
++ * Copyright 2000-2002 Andi Kleen, SuSE Labs.
++ */
++
++#include <asm/dwarf2.h>
++#include <asm/calling.h>
++#include <asm/asm-offsets.h>
++#include <asm/current.h>
++#include <asm/errno.h>
++#include <asm/ia32_unistd.h>
++#include <asm/thread_info.h>
++#include <asm/segment.h>
++#include <asm/vsyscall32.h>
++#include <asm/irqflags.h>
++#include <linux/linkage.h>
++
++#define __XEN_X86_64 1
++
++#define IA32_NR_syscalls ((ia32_syscall_end - ia32_sys_call_table)/8)
++
++ .macro IA32_ARG_FIXUP noebp=0
++ movl %edi,%r8d
++ .if \noebp
++ .else
++ movl %ebp,%r9d
++ .endif
++ xchg %ecx,%esi
++ movl %ebx,%edi
++ movl %edx,%edx /* zero extension */
++ .endm
++
++ /* clobbers %eax */
++ .macro CLEAR_RREGS
++ xorl %eax,%eax
++ movq %rax,R11(%rsp)
++ movq %rax,R10(%rsp)
++ movq %rax,R9(%rsp)
++ movq %rax,R8(%rsp)
++ .endm
++
++#if defined (__XEN_X86_64)
++#include "../kernel/xen_entry.S"
++
++#define __swapgs
++#define __cli
++#define __sti
++#else
++/*
++ * Use the native instructions
++ */
++#define __swapgs swapgs
++#define __cli cli
++#define __sti sti
++#endif
++
++ .macro CFI_STARTPROC32 simple
++ CFI_STARTPROC \simple
++ CFI_UNDEFINED r8
++ CFI_UNDEFINED r9
++ CFI_UNDEFINED r10
++ CFI_UNDEFINED r11
++ CFI_UNDEFINED r12
++ CFI_UNDEFINED r13
++ CFI_UNDEFINED r14
++ CFI_UNDEFINED r15
++ .endm
++
++/*
++ * 32bit SYSENTER instruction entry.
++ *
++ * Arguments:
++ * %eax System call number.
++ * %ebx Arg1
++ * %ecx Arg2
++ * %edx Arg3
++ * %esi Arg4
++ * %edi Arg5
++ * %ebp user stack
++ * 0(%ebp) Arg6
++ *
++ * Interrupts off.
++ *
++ * This is purely a fast path. For anything complicated we use the int 0x80
++ * path below. Set up a complete hardware stack frame to share code
++ * with the int 0x80 path.
++ */
++ENTRY(ia32_sysenter_target)
++ CFI_STARTPROC32 simple
++ CFI_DEF_CFA rsp,0
++ CFI_REGISTER rsp,rbp
++ __swapgs
++ movq %gs:pda_kernelstack, %rsp
++ addq $(PDA_STACKOFFSET),%rsp
++ /*
++ * No need to follow this irqs on/off section: the syscall
++ * disabled irqs, here we enable it straight after entry:
++ */
++ XEN_UNBLOCK_EVENTS(%r11)
++ __sti
++ movl %ebp,%ebp /* zero extension */
++ pushq $__USER32_DS
++ CFI_ADJUST_CFA_OFFSET 8
++ /*CFI_REL_OFFSET ss,0*/
++ pushq %rbp
++ CFI_ADJUST_CFA_OFFSET 8
++ CFI_REL_OFFSET rsp,0
++ pushfq
++ CFI_ADJUST_CFA_OFFSET 8
++ /*CFI_REL_OFFSET rflags,0*/
++ movl $VSYSCALL32_SYSEXIT, %r10d
++ CFI_REGISTER rip,r10
++ pushq $__USER32_CS
++ CFI_ADJUST_CFA_OFFSET 8
++ /*CFI_REL_OFFSET cs,0*/
++ movl %eax, %eax
++ pushq %r10
++ CFI_ADJUST_CFA_OFFSET 8
++ CFI_REL_OFFSET rip,0
++ pushq %rax
++ CFI_ADJUST_CFA_OFFSET 8
++ cld
++ SAVE_ARGS 0,0,0
++ /* no need to do an access_ok check here because rbp has been
++ 32bit zero extended */
++1: movl (%rbp),%r9d
++ .section __ex_table,"a"
++ .quad 1b,ia32_badarg
++ .previous
++ GET_THREAD_INFO(%r10)
++ orl $TS_COMPAT,threadinfo_status(%r10)
++ testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%r10)
++ CFI_REMEMBER_STATE
++ jnz sysenter_tracesys
++sysenter_do_call:
++ cmpl $(IA32_NR_syscalls-1),%eax
++ ja ia32_badsys
++ IA32_ARG_FIXUP 1
++ call *ia32_sys_call_table(,%rax,8)
++ movq %rax,RAX-ARGOFFSET(%rsp)
++ GET_THREAD_INFO(%r10)
++ XEN_BLOCK_EVENTS(%r11)
++ __cli
++ TRACE_IRQS_OFF
++ testl $_TIF_ALLWORK_MASK,threadinfo_flags(%r10)
++ jnz int_ret_from_sys_call
++ andl $~TS_COMPAT,threadinfo_status(%r10)
++ /* clear IF, that popfq doesn't enable interrupts early */
++ andl $~0x200,EFLAGS-R11(%rsp)
++ RESTORE_ARGS 1,24,1,1,1,1
++ popfq
++ CFI_ADJUST_CFA_OFFSET -8
++ /*CFI_RESTORE rflags*/
++ popq %rcx /* User %esp */
++ CFI_ADJUST_CFA_OFFSET -8
++ CFI_REGISTER rsp,rcx
++ movl $VSYSCALL32_SYSEXIT,%edx /* User %eip */
++ CFI_REGISTER rip,rdx
++ TRACE_IRQS_ON
++ __swapgs
++ XEN_UNBLOCK_EVENTS(%r11)
++ __sti /* sti only takes effect after the next instruction */
++ /* sysexit */
++ .byte 0xf, 0x35 /* TBD */
++
++sysenter_tracesys:
++ CFI_RESTORE_STATE
++ SAVE_REST
++ CLEAR_RREGS
++ movq $-ENOSYS,RAX(%rsp) /* really needed? */
++ movq %rsp,%rdi /* &pt_regs -> arg1 */
++ call syscall_trace_enter
++ LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */
++ RESTORE_REST
++ movl %ebp, %ebp
++ /* no need to do an access_ok check here because rbp has been
++ 32bit zero extended */
++1: movl (%rbp),%r9d
++ .section __ex_table,"a"
++ .quad 1b,ia32_badarg
++ .previous
++ jmp sysenter_do_call
++ CFI_ENDPROC
++ENDPROC(ia32_sysenter_target)
++
++/*
++ * 32bit SYSCALL instruction entry.
++ *
++ * Arguments:
++ * %eax System call number.
++ * %ebx Arg1
++ * %ecx return EIP
++ * %edx Arg3
++ * %esi Arg4
++ * %edi Arg5
++ * %ebp Arg2 [note: not saved in the stack frame, should not be touched]
++ * %esp user stack
++ * 0(%esp) Arg6
++ *
++ * Interrupts off.
++ *
++ * This is purely a fast path. For anything complicated we use the int 0x80
++ * path below. Set up a complete hardware stack frame to share code
++ * with the int 0x80 path.
++ */
++ENTRY(ia32_cstar_target)
++ CFI_STARTPROC32 simple
++ CFI_DEF_CFA rsp,PDA_STACKOFFSET
++ CFI_REGISTER rip,rcx
++ /*CFI_REGISTER rflags,r11*/
++ __swapgs
++ movl %esp,%r8d
++ CFI_REGISTER rsp,r8
++ movq %gs:pda_kernelstack,%rsp
++ /*
++ * No need to follow this irqs on/off section: the syscall
++ * disabled irqs and here we enable it straight after entry:
++ */
++ XEN_UNBLOCK_EVENTS(%r11)
++ __sti
++ SAVE_ARGS 8,1,1
++ movl %eax,%eax /* zero extension */
++ movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
++ movq %rcx,RIP-ARGOFFSET(%rsp)
++ CFI_REL_OFFSET rip,RIP-ARGOFFSET
++ movq %rbp,RCX-ARGOFFSET(%rsp) /* this lies slightly to ptrace */
++ movl %ebp,%ecx
++ movq $__USER32_CS,CS-ARGOFFSET(%rsp)
++ movq $__USER32_DS,SS-ARGOFFSET(%rsp)
++ movq %r11,EFLAGS-ARGOFFSET(%rsp)
++ /*CFI_REL_OFFSET rflags,EFLAGS-ARGOFFSET*/
++ movq %r8,RSP-ARGOFFSET(%rsp)
++ CFI_REL_OFFSET rsp,RSP-ARGOFFSET
++ /* no need to do an access_ok check here because r8 has been
++ 32bit zero extended */
++ /* hardware stack frame is complete now */
++1: movl (%r8),%r9d
++ .section __ex_table,"a"
++ .quad 1b,ia32_badarg
++ .previous
++ GET_THREAD_INFO(%r10)
++ orl $TS_COMPAT,threadinfo_status(%r10)
++ testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%r10)
++ CFI_REMEMBER_STATE
++ jnz cstar_tracesys
++cstar_do_call:
++ cmpl $IA32_NR_syscalls-1,%eax
++ ja ia32_badsys
++ IA32_ARG_FIXUP 1
++ call *ia32_sys_call_table(,%rax,8)
++ movq %rax,RAX-ARGOFFSET(%rsp)
++ GET_THREAD_INFO(%r10)
++ XEN_BLOCK_EVENTS(%r11)
++ __cli
++ TRACE_IRQS_OFF
++ testl $_TIF_ALLWORK_MASK,threadinfo_flags(%r10)
++ jnz int_ret_from_sys_call
++ andl $~TS_COMPAT,threadinfo_status(%r10)
++ RESTORE_ARGS 1,-ARG_SKIP,1,1,1
++ movl RIP-ARGOFFSET(%rsp),%ecx
++ CFI_REGISTER rip,rcx
++ movl EFLAGS-ARGOFFSET(%rsp),%r11d
++ /*CFI_REGISTER rflags,r11*/
++ TRACE_IRQS_ON
++ movl RSP-ARGOFFSET(%rsp),%esp
++ CFI_RESTORE rsp
++ __swapgs
++ sysretl /* TBD */
++
++cstar_tracesys:
++ CFI_RESTORE_STATE
++ SAVE_REST
++ CLEAR_RREGS
++ movq $-ENOSYS,RAX(%rsp) /* really needed? */
++ movq %rsp,%rdi /* &pt_regs -> arg1 */
++ call syscall_trace_enter
++ LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */
++ RESTORE_REST
++ movl RSP-ARGOFFSET(%rsp), %r8d
++ /* no need to do an access_ok check here because r8 has been
++ 32bit zero extended */
++1: movl (%r8),%r9d
++ .section __ex_table,"a"
++ .quad 1b,ia32_badarg
++ .previous
++ jmp cstar_do_call
++END(ia32_cstar_target)
++
++ia32_badarg:
++ movq $-EFAULT,%rax
++ jmp ia32_sysret
++ CFI_ENDPROC
++
++/*
++ * Emulated IA32 system calls via int 0x80.
++ *
++ * Arguments:
++ * %eax System call number.
++ * %ebx Arg1
++ * %ecx Arg2
++ * %edx Arg3
++ * %esi Arg4
++ * %edi Arg5
++ * %ebp Arg6 [note: not saved in the stack frame, should not be touched]
++ *
++ * Notes:
++ * Uses the same stack frame as the x86-64 version.
++ * All registers except %eax must be saved (but ptrace may violate that)
++ * Arguments are zero extended. For system calls that want sign extension and
++ * take long arguments a wrapper is needed. Most calls can just be called
++ * directly.
++ * Assumes it is only called from user space and entered with interrupts off.
++ */
++
++ENTRY(ia32_syscall)
++ CFI_STARTPROC simple
++ CFI_DEF_CFA rsp,SS+8-RIP
++ /*CFI_REL_OFFSET ss,SS-RIP*/
++ CFI_REL_OFFSET rsp,RSP-RIP
++ /*CFI_REL_OFFSET rflags,EFLAGS-RIP*/
++ /*CFI_REL_OFFSET cs,CS-RIP*/
++ CFI_REL_OFFSET rip,RIP-RIP
++ __swapgs
++ /*
++ * No need to follow this irqs on/off section: the syscall
++ * disabled irqs and here we enable it straight after entry:
++ */
++ XEN_UNBLOCK_EVENTS(%r11)
++ __sti
++ movq (%rsp),%rcx
++ movq 8(%rsp),%r11
++ addq $0x10,%rsp /* skip rcx and r11 */
++ movl %eax,%eax
++ pushq %rax
++ CFI_ADJUST_CFA_OFFSET 8
++ cld
++/* 1: jmp 1b */
++ /* note the registers are not zero extended to the sf.
++ this could be a problem. */
++ SAVE_ARGS 0,0,1
++ GET_THREAD_INFO(%r10)
++ orl $TS_COMPAT,threadinfo_status(%r10)
++ testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%r10)
++ jnz ia32_tracesys
++ia32_do_syscall:
++ cmpl $(IA32_NR_syscalls-1),%eax
++ ja ia32_badsys
++ IA32_ARG_FIXUP
++ call *ia32_sys_call_table(,%rax,8) # xxx: rip relative
++ia32_sysret:
++ movq %rax,RAX-ARGOFFSET(%rsp)
++ jmp int_ret_from_sys_call
++
++ia32_tracesys:
++ SAVE_REST
++ movq $-ENOSYS,RAX(%rsp) /* really needed? */
++ movq %rsp,%rdi /* &pt_regs -> arg1 */
++ call syscall_trace_enter
++ LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */
++ RESTORE_REST
++ jmp ia32_do_syscall
++END(ia32_syscall)
++
++ia32_badsys:
++ movq $0,ORIG_RAX-ARGOFFSET(%rsp)
++ movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
++ jmp int_ret_from_sys_call
++
++quiet_ni_syscall:
++ movq $-ENOSYS,%rax
++ ret
++ CFI_ENDPROC
++
++ .macro PTREGSCALL label, func, arg
++ .globl \label
++\label:
++ leaq \func(%rip),%rax
++ leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */
++ jmp ia32_ptregs_common
++ .endm
++
++ CFI_STARTPROC32
++
++ PTREGSCALL stub32_rt_sigreturn, sys32_rt_sigreturn, %rdi
++ PTREGSCALL stub32_sigreturn, sys32_sigreturn, %rdi
++ PTREGSCALL stub32_sigaltstack, sys32_sigaltstack, %rdx
++ PTREGSCALL stub32_sigsuspend, sys32_sigsuspend, %rcx
++ PTREGSCALL stub32_execve, sys32_execve, %rcx
++ PTREGSCALL stub32_fork, sys_fork, %rdi
++ PTREGSCALL stub32_clone, sys32_clone, %rdx
++ PTREGSCALL stub32_vfork, sys_vfork, %rdi
++ PTREGSCALL stub32_iopl, sys_iopl, %rsi
++ PTREGSCALL stub32_rt_sigsuspend, sys_rt_sigsuspend, %rdx
++
++ENTRY(ia32_ptregs_common)
++ popq %r11
++ CFI_ENDPROC
++ CFI_STARTPROC32 simple
++ CFI_DEF_CFA rsp,SS+8-ARGOFFSET
++ CFI_REL_OFFSET rax,RAX-ARGOFFSET
++ CFI_REL_OFFSET rcx,RCX-ARGOFFSET
++ CFI_REL_OFFSET rdx,RDX-ARGOFFSET
++ CFI_REL_OFFSET rsi,RSI-ARGOFFSET
++ CFI_REL_OFFSET rdi,RDI-ARGOFFSET
++ CFI_REL_OFFSET rip,RIP-ARGOFFSET
++/* CFI_REL_OFFSET cs,CS-ARGOFFSET*/
++/* CFI_REL_OFFSET rflags,EFLAGS-ARGOFFSET*/
++ CFI_REL_OFFSET rsp,RSP-ARGOFFSET
++/* CFI_REL_OFFSET ss,SS-ARGOFFSET*/
++ SAVE_REST
++ call *%rax
++ RESTORE_REST
++ jmp ia32_sysret /* misbalances the return cache */
++ CFI_ENDPROC
++END(ia32_ptregs_common)
++
++ .section .rodata,"a"
++ .align 8
++ia32_sys_call_table:
++ .quad sys_restart_syscall
++ .quad sys_exit
++ .quad stub32_fork
++ .quad sys_read
++ .quad sys_write
++ .quad compat_sys_open /* 5 */
++ .quad sys_close
++ .quad sys32_waitpid
++ .quad sys_creat
++ .quad sys_link
++ .quad sys_unlink /* 10 */
++ .quad stub32_execve
++ .quad sys_chdir
++ .quad compat_sys_time
++ .quad sys_mknod
++ .quad sys_chmod /* 15 */
++ .quad sys_lchown16
++ .quad quiet_ni_syscall /* old break syscall holder */
++ .quad sys_stat
++ .quad sys32_lseek
++ .quad sys_getpid /* 20 */
++ .quad compat_sys_mount /* mount */
++ .quad sys_oldumount /* old_umount */
++ .quad sys_setuid16
++ .quad sys_getuid16
++ .quad compat_sys_stime /* stime */ /* 25 */
++ .quad sys32_ptrace /* ptrace */
++ .quad sys_alarm
++ .quad sys_fstat /* (old)fstat */
++ .quad sys_pause
++ .quad compat_sys_utime /* 30 */
++ .quad quiet_ni_syscall /* old stty syscall holder */
++ .quad quiet_ni_syscall /* old gtty syscall holder */
++ .quad sys_access
++ .quad sys_nice
++ .quad quiet_ni_syscall /* 35 */ /* old ftime syscall holder */
++ .quad sys_sync
++ .quad sys32_kill
++ .quad sys_rename
++ .quad sys_mkdir
++ .quad sys_rmdir /* 40 */
++ .quad sys_dup
++ .quad sys32_pipe
++ .quad compat_sys_times
++ .quad quiet_ni_syscall /* old prof syscall holder */
++ .quad sys_brk /* 45 */
++ .quad sys_setgid16
++ .quad sys_getgid16
++ .quad sys_signal
++ .quad sys_geteuid16
++ .quad sys_getegid16 /* 50 */
++ .quad sys_acct
++ .quad sys_umount /* new_umount */
++ .quad quiet_ni_syscall /* old lock syscall holder */
++ .quad compat_sys_ioctl
++ .quad compat_sys_fcntl64 /* 55 */
++ .quad quiet_ni_syscall /* old mpx syscall holder */
++ .quad sys_setpgid
++ .quad quiet_ni_syscall /* old ulimit syscall holder */
++ .quad sys32_olduname
++ .quad sys_umask /* 60 */
++ .quad sys_chroot
++ .quad sys32_ustat
++ .quad sys_dup2
++ .quad sys_getppid
++ .quad sys_getpgrp /* 65 */
++ .quad sys_setsid
++ .quad sys32_sigaction
++ .quad sys_sgetmask
++ .quad sys_ssetmask
++ .quad sys_setreuid16 /* 70 */
++ .quad sys_setregid16
++ .quad stub32_sigsuspend
++ .quad compat_sys_sigpending
++ .quad sys_sethostname
++ .quad compat_sys_setrlimit /* 75 */
++ .quad compat_sys_old_getrlimit /* old_getrlimit */
++ .quad compat_sys_getrusage
++ .quad sys32_gettimeofday
++ .quad sys32_settimeofday
++ .quad sys_getgroups16 /* 80 */
++ .quad sys_setgroups16
++ .quad sys32_old_select
++ .quad sys_symlink
++ .quad sys_lstat
++ .quad sys_readlink /* 85 */
++#ifdef CONFIG_IA32_AOUT
++ .quad sys_uselib
++#else
++ .quad quiet_ni_syscall
++#endif
++ .quad sys_swapon
++ .quad sys_reboot
++ .quad compat_sys_old_readdir
++ .quad sys32_mmap /* 90 */
++ .quad sys_munmap
++ .quad sys_truncate
++ .quad sys_ftruncate
++ .quad sys_fchmod
++ .quad sys_fchown16 /* 95 */
++ .quad sys_getpriority
++ .quad sys_setpriority
++ .quad quiet_ni_syscall /* old profil syscall holder */
++ .quad compat_sys_statfs
++ .quad compat_sys_fstatfs /* 100 */
++ .quad sys_ioperm
++ .quad compat_sys_socketcall
++ .quad sys_syslog
++ .quad compat_sys_setitimer
++ .quad compat_sys_getitimer /* 105 */
++ .quad compat_sys_newstat
++ .quad compat_sys_newlstat
++ .quad compat_sys_newfstat
++ .quad sys32_uname
++ .quad stub32_iopl /* 110 */
++ .quad sys_vhangup
++ .quad quiet_ni_syscall /* old "idle" system call */
++ .quad sys32_vm86_warning /* vm86old */
++ .quad compat_sys_wait4
++ .quad sys_swapoff /* 115 */
++ .quad sys32_sysinfo
++ .quad sys32_ipc
++ .quad sys_fsync
++ .quad stub32_sigreturn
++ .quad stub32_clone /* 120 */
++ .quad sys_setdomainname
++ .quad sys_uname
++ .quad sys_modify_ldt
++ .quad compat_sys_adjtimex
++ .quad sys32_mprotect /* 125 */
++ .quad compat_sys_sigprocmask
++ .quad quiet_ni_syscall /* create_module */
++ .quad sys_init_module
++ .quad sys_delete_module
++ .quad quiet_ni_syscall /* 130 get_kernel_syms */
++ .quad sys_quotactl
++ .quad sys_getpgid
++ .quad sys_fchdir
++ .quad quiet_ni_syscall /* bdflush */
++ .quad sys_sysfs /* 135 */
++ .quad sys_personality
++ .quad quiet_ni_syscall /* for afs_syscall */
++ .quad sys_setfsuid16
++ .quad sys_setfsgid16
++ .quad sys_llseek /* 140 */
++ .quad compat_sys_getdents
++ .quad compat_sys_select
++ .quad sys_flock
++ .quad sys_msync
++ .quad compat_sys_readv /* 145 */
++ .quad compat_sys_writev
++ .quad sys_getsid
++ .quad sys_fdatasync
++ .quad sys32_sysctl /* sysctl */
++ .quad sys_mlock /* 150 */
++ .quad sys_munlock
++ .quad sys_mlockall
++ .quad sys_munlockall
++ .quad sys_sched_setparam
++ .quad sys_sched_getparam /* 155 */
++ .quad sys_sched_setscheduler
++ .quad sys_sched_getscheduler
++ .quad sys_sched_yield
++ .quad sys_sched_get_priority_max
++ .quad sys_sched_get_priority_min /* 160 */
++ .quad sys_sched_rr_get_interval
++ .quad compat_sys_nanosleep
++ .quad sys_mremap
++ .quad sys_setresuid16
++ .quad sys_getresuid16 /* 165 */
++ .quad sys32_vm86_warning /* vm86 */
++ .quad quiet_ni_syscall /* query_module */
++ .quad sys_poll
++ .quad compat_sys_nfsservctl
++ .quad sys_setresgid16 /* 170 */
++ .quad sys_getresgid16
++ .quad sys_prctl
++ .quad stub32_rt_sigreturn
++ .quad sys32_rt_sigaction
++ .quad sys32_rt_sigprocmask /* 175 */
++ .quad sys32_rt_sigpending
++ .quad compat_sys_rt_sigtimedwait
++ .quad sys32_rt_sigqueueinfo
++ .quad stub32_rt_sigsuspend
++ .quad sys32_pread /* 180 */
++ .quad sys32_pwrite
++ .quad sys_chown16
++ .quad sys_getcwd
++ .quad sys_capget
++ .quad sys_capset
++ .quad stub32_sigaltstack
++ .quad sys32_sendfile
++ .quad quiet_ni_syscall /* streams1 */
++ .quad quiet_ni_syscall /* streams2 */
++ .quad stub32_vfork /* 190 */
++ .quad compat_sys_getrlimit
++ .quad sys32_mmap2
++ .quad sys32_truncate64
++ .quad sys32_ftruncate64
++ .quad sys32_stat64 /* 195 */
++ .quad sys32_lstat64
++ .quad sys32_fstat64
++ .quad sys_lchown
++ .quad sys_getuid
++ .quad sys_getgid /* 200 */
++ .quad sys_geteuid
++ .quad sys_getegid
++ .quad sys_setreuid
++ .quad sys_setregid
++ .quad sys_getgroups /* 205 */
++ .quad sys_setgroups
++ .quad sys_fchown
++ .quad sys_setresuid
++ .quad sys_getresuid
++ .quad sys_setresgid /* 210 */
++ .quad sys_getresgid
++ .quad sys_chown
++ .quad sys_setuid
++ .quad sys_setgid
++ .quad sys_setfsuid /* 215 */
++ .quad sys_setfsgid
++ .quad sys_pivot_root
++ .quad sys_mincore
++ .quad sys_madvise
++ .quad compat_sys_getdents64 /* 220 getdents64 */
++ .quad compat_sys_fcntl64
++ .quad quiet_ni_syscall /* tux */
++ .quad quiet_ni_syscall /* security */
++ .quad sys_gettid
++ .quad sys_readahead /* 225 */
++ .quad sys_setxattr
++ .quad sys_lsetxattr
++ .quad sys_fsetxattr
++ .quad sys_getxattr
++ .quad sys_lgetxattr /* 230 */
++ .quad sys_fgetxattr
++ .quad sys_listxattr
++ .quad sys_llistxattr
++ .quad sys_flistxattr
++ .quad sys_removexattr /* 235 */
++ .quad sys_lremovexattr
++ .quad sys_fremovexattr
++ .quad sys_tkill
++ .quad sys_sendfile64
++ .quad compat_sys_futex /* 240 */
++ .quad compat_sys_sched_setaffinity
++ .quad compat_sys_sched_getaffinity
++ .quad sys32_set_thread_area
++ .quad sys32_get_thread_area
++ .quad compat_sys_io_setup /* 245 */
++ .quad sys_io_destroy
++ .quad compat_sys_io_getevents
++ .quad compat_sys_io_submit
++ .quad sys_io_cancel
++ .quad sys_fadvise64 /* 250 */
++ .quad quiet_ni_syscall /* free_huge_pages */
++ .quad sys_exit_group
++ .quad sys32_lookup_dcookie
++ .quad sys_epoll_create
++ .quad sys_epoll_ctl /* 255 */
++ .quad sys_epoll_wait
++ .quad sys_remap_file_pages
++ .quad sys_set_tid_address
++ .quad compat_sys_timer_create
++ .quad compat_sys_timer_settime /* 260 */
++ .quad compat_sys_timer_gettime
++ .quad sys_timer_getoverrun
++ .quad sys_timer_delete
++ .quad compat_sys_clock_settime
++ .quad compat_sys_clock_gettime /* 265 */
++ .quad compat_sys_clock_getres
++ .quad compat_sys_clock_nanosleep
++ .quad compat_sys_statfs64
++ .quad compat_sys_fstatfs64
++ .quad sys_tgkill /* 270 */
++ .quad compat_sys_utimes
++ .quad sys32_fadvise64_64
++ .quad quiet_ni_syscall /* sys_vserver */
++ .quad sys_mbind
++ .quad compat_sys_get_mempolicy /* 275 */
++ .quad sys_set_mempolicy
++ .quad compat_sys_mq_open
++ .quad sys_mq_unlink
++ .quad compat_sys_mq_timedsend
++ .quad compat_sys_mq_timedreceive /* 280 */
++ .quad compat_sys_mq_notify
++ .quad compat_sys_mq_getsetattr
++ .quad compat_sys_kexec_load /* reserved for kexec */
++ .quad compat_sys_waitid
++ .quad quiet_ni_syscall /* 285: sys_altroot */
++ .quad sys_add_key
++ .quad sys_request_key
++ .quad sys_keyctl
++ .quad sys_ioprio_set
++ .quad sys_ioprio_get /* 290 */
++ .quad sys_inotify_init
++ .quad sys_inotify_add_watch
++ .quad sys_inotify_rm_watch
++ .quad sys_migrate_pages
++ .quad compat_sys_openat /* 295 */
++ .quad sys_mkdirat
++ .quad sys_mknodat
++ .quad sys_fchownat
++ .quad compat_sys_futimesat
++ .quad sys32_fstatat /* 300 */
++ .quad sys_unlinkat
++ .quad sys_renameat
++ .quad sys_linkat
++ .quad sys_symlinkat
++ .quad sys_readlinkat /* 305 */
++ .quad sys_fchmodat
++ .quad sys_faccessat
++ .quad quiet_ni_syscall /* pselect6 for now */
++ .quad quiet_ni_syscall /* ppoll for now */
++ .quad sys_unshare /* 310 */
++ .quad compat_sys_set_robust_list
++ .quad compat_sys_get_robust_list
++ .quad sys_splice
++ .quad sys_sync_file_range
++ .quad sys_tee
++ .quad compat_sys_vmsplice
++ .quad compat_sys_move_pages
++ia32_syscall_end:
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/arch/x86_64/ia32/syscall32-xen.c 2007-08-27 14:02:01.000000000 -0400
+@@ -0,0 +1,128 @@
++/* Copyright 2002,2003 Andi Kleen, SuSE Labs */
++
++/* vsyscall handling for 32bit processes. Map a stub page into it
++ on demand because 32bit cannot reach the kernel's fixmaps */
++
++#include <linux/mm.h>
++#include <linux/string.h>
++#include <linux/kernel.h>
++#include <linux/gfp.h>
++#include <linux/init.h>
++#include <linux/stringify.h>
++#include <linux/security.h>
++#include <asm/proto.h>
++#include <asm/tlbflush.h>
++#include <asm/ia32_unistd.h>
++
++#ifdef USE_INT80
++extern unsigned char syscall32_int80[], syscall32_int80_end[];
++#endif
++extern unsigned char syscall32_syscall[], syscall32_syscall_end[];
++extern unsigned char syscall32_sysenter[], syscall32_sysenter_end[];
++extern int sysctl_vsyscall32;
++
++char *syscall32_page;
++#ifndef USE_INT80
++static int use_sysenter = -1;
++#endif
++
++static struct page *
++syscall32_nopage(struct vm_area_struct *vma, unsigned long adr, int *type)
++{
++ struct page *p = virt_to_page(adr - vma->vm_start + syscall32_page);
++ get_page(p);
++ return p;
++}
++
++/* Prevent VMA merging */
++static void syscall32_vma_close(struct vm_area_struct *vma)
++{
++}
++
++static struct vm_operations_struct syscall32_vm_ops = {
++ .close = syscall32_vma_close,
++ .nopage = syscall32_nopage,
++};
++
++struct linux_binprm;
++
++/* Setup a VMA at program startup for the vsyscall page */
++int syscall32_setup_pages(struct linux_binprm *bprm, int exstack)
++{
++ int npages = (VSYSCALL32_END - VSYSCALL32_BASE) >> PAGE_SHIFT;
++ struct vm_area_struct *vma;
++ struct mm_struct *mm = current->mm;
++ int ret;
++
++ vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
++ if (!vma)
++ return -ENOMEM;
++
++ memset(vma, 0, sizeof(struct vm_area_struct));
++ /* Could randomize here */
++ vma->vm_start = VSYSCALL32_BASE;
++ vma->vm_end = VSYSCALL32_END;
++ /* MAYWRITE to allow gdb to COW and set breakpoints */
++ vma->vm_flags = VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC|VM_MAYWRITE;
++ vma->vm_flags |= mm->def_flags;
++ vma->vm_page_prot = protection_map[vma->vm_flags & 7];
++ vma->vm_ops = &syscall32_vm_ops;
++ vma->vm_mm = mm;
++
++ down_write(&mm->mmap_sem);
++ if ((ret = insert_vm_struct(mm, vma))) {
++ up_write(&mm->mmap_sem);
++ kmem_cache_free(vm_area_cachep, vma);
++ return ret;
++ }
++ mm->total_vm += npages;
++ up_write(&mm->mmap_sem);
++ return 0;
++}
++
++static int __init init_syscall32(void)
++{
++ syscall32_page = (void *)get_zeroed_page(GFP_KERNEL);
++ if (!syscall32_page)
++ panic("Cannot allocate syscall32 page");
++
++#ifdef USE_INT80
++ /*
++ * At this point we use int 0x80.
++ */
++ memcpy(syscall32_page, syscall32_int80,
++ syscall32_int80_end - syscall32_int80);
++#else
++ if (use_sysenter > 0) {
++ memcpy(syscall32_page, syscall32_sysenter,
++ syscall32_sysenter_end - syscall32_sysenter);
++ } else {
++ memcpy(syscall32_page, syscall32_syscall,
++ syscall32_syscall_end - syscall32_syscall);
++ }
++#endif
++ return 0;
++}
++
++/*
++ * This must be done early in case we have an initrd containing 32-bit
++ * binaries (e.g., hotplug). This could be pushed upstream to arch/x86_64.
++ */
++core_initcall(init_syscall32);
++
++/* May not be __init: called during resume */
++void syscall32_cpu_init(void)
++{
++#ifndef USE_INT80
++ if (use_sysenter < 0)
++ use_sysenter = (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL);
++
++ /* Load these always in case some future AMD CPU supports
++ SYSENTER from compat mode too. */
++ checking_wrmsrl(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
++ checking_wrmsrl(MSR_IA32_SYSENTER_ESP, 0ULL);
++ checking_wrmsrl(MSR_IA32_SYSENTER_EIP, (u64)ia32_sysenter_target);
++
++ wrmsrl(MSR_CSTAR, ia32_cstar_target);
++#endif
++}
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/arch/x86_64/ia32/syscall32_syscall-xen.S 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,28 @@
++/* 32bit VDSOs mapped into user space. */
++
++ .section ".init.data","aw"
++
++#ifdef USE_INT80
++
++ .globl syscall32_int80
++ .globl syscall32_int80_end
++
++syscall32_int80:
++ .incbin "arch/x86_64/ia32/vsyscall-int80.so"
++syscall32_int80_end:
++
++#endif
++
++ .globl syscall32_syscall
++ .globl syscall32_syscall_end
++
++syscall32_syscall:
++ .incbin "arch/x86_64/ia32/vsyscall-syscall.so"
++syscall32_syscall_end:
++
++ .globl syscall32_sysenter
++ .globl syscall32_sysenter_end
++
++syscall32_sysenter:
++ .incbin "arch/x86_64/ia32/vsyscall-sysenter.so"
++syscall32_sysenter_end:
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/arch/x86_64/ia32/vsyscall-int80.S 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,58 @@
++/*
++ * Code for the vsyscall page. This version uses the old int $0x80 method.
++ *
++ * NOTE:
++ * 1) __kernel_vsyscall _must_ be first in this page.
++ * 2) there are alignment constraints on this stub, see vsyscall-sigreturn.S
++ * for details.
++ */
++#include <asm/ia32_unistd.h>
++#include <asm/asm-offsets.h>
++
++ .code32
++ .text
++ .section .text.vsyscall,"ax"
++ .globl __kernel_vsyscall
++ .type __kernel_vsyscall,@function
++__kernel_vsyscall:
++.LSTART_vsyscall:
++ int $0x80
++ ret
++.LEND_vsyscall:
++ .size __kernel_vsyscall,.-.LSTART_vsyscall
++ .previous
++
++ .section .eh_frame,"a",@progbits
++.LSTARTFRAME:
++ .long .LENDCIE-.LSTARTCIE
++.LSTARTCIE:
++ .long 0 /* CIE ID */
++ .byte 1 /* Version number */
++ .string "zR" /* NUL-terminated augmentation string */
++ .uleb128 1 /* Code alignment factor */
++ .sleb128 -4 /* Data alignment factor */
++ .byte 8 /* Return address register column */
++ .uleb128 1 /* Augmentation value length */
++ .byte 0x1b /* DW_EH_PE_pcrel|DW_EH_PE_sdata4. */
++ .byte 0x0c /* DW_CFA_def_cfa */
++ .uleb128 4
++ .uleb128 4
++ .byte 0x88 /* DW_CFA_offset, column 0x8 */
++ .uleb128 1
++ .align 4
++.LENDCIE:
++
++ .long .LENDFDE1-.LSTARTFDE1 /* Length FDE */
++.LSTARTFDE1:
++ .long .LSTARTFDE1-.LSTARTFRAME /* CIE pointer */
++ .long .LSTART_vsyscall-. /* PC-relative start address */
++ .long .LEND_vsyscall-.LSTART_vsyscall
++ .uleb128 0 /* Augmentation length */
++ .align 4
++.LENDFDE1:
++
++/*
++ * Get the common code for the sigreturn entry points.
++ */
++#define SYSCALL_ENTER_KERNEL int $0x80
++#include "vsyscall-sigreturn.S"
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/arch/x86_64/kernel/apic-xen.c 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,197 @@
++/*
++ * Local APIC handling, local APIC timers
++ *
++ * (c) 1999, 2000 Ingo Molnar <mingo@redhat.com>
++ *
++ * Fixes
++ * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
++ * thanks to Eric Gilmore
++ * and Rolf G. Tews
++ * for testing these extensively.
++ * Maciej W. Rozycki : Various updates and fixes.
++ * Mikael Pettersson : Power Management for UP-APIC.
++ * Pavel Machek and
++ * Mikael Pettersson : PM converted to driver model.
++ */
++
++#include <linux/init.h>
++
++#include <linux/mm.h>
++#include <linux/delay.h>
++#include <linux/bootmem.h>
++#include <linux/smp_lock.h>
++#include <linux/interrupt.h>
++#include <linux/mc146818rtc.h>
++#include <linux/kernel_stat.h>
++#include <linux/sysdev.h>
++#include <linux/module.h>
++
++#include <asm/atomic.h>
++#include <asm/smp.h>
++#include <asm/mtrr.h>
++#include <asm/mpspec.h>
++#include <asm/desc.h>
++#include <asm/arch_hooks.h>
++#include <asm/hpet.h>
++#include <asm/idle.h>
++
++int apic_verbosity;
++
++/*
++ * 'what should we do if we get a hw irq event on an illegal vector'.
++ * each architecture has to answer this themselves.
++ */
++void ack_bad_irq(unsigned int irq)
++{
++ printk("unexpected IRQ trap at vector %02x\n", irq);
++ /*
++ * Currently unexpected vectors happen only on SMP and APIC.
++ * We _must_ ack these because every local APIC has only N
++ * irq slots per priority level, and a 'hanging, unacked' IRQ
++ * holds up an irq slot - in excessive cases (when multiple
++ * unexpected vectors occur) that might lock up the APIC
++ * completely.
++ * But don't ack when the APIC is disabled. -AK
++ */
++ if (!disable_apic)
++ ack_APIC_irq();
++}
++
++int setup_profiling_timer(unsigned int multiplier)
++{
++ return -EINVAL;
++}
++
++void smp_local_timer_interrupt(struct pt_regs *regs)
++{
++ profile_tick(CPU_PROFILING, regs);
++#ifndef CONFIG_XEN
++#ifdef CONFIG_SMP
++ update_process_times(user_mode(regs));
++#endif
++#endif
++ /*
++ * We take the 'long' return path, and there every subsystem
++ * grabs the appropriate locks (kernel lock/ irq lock).
++ *
++ * we might want to decouple profiling from the 'long path',
++ * and do the profiling totally in assembly.
++ *
++ * Currently this isn't too much of an issue (performance wise),
++ * we can take more than 100K local irqs per second on a 100 MHz P5.
++ */
++}
++
++/*
++ * Local APIC timer interrupt. This is the most natural way for doing
++ * local interrupts, but local timer interrupts can be emulated by
++ * broadcast interrupts too. [in case the hw doesn't support APIC timers]
++ *
++ * [ if a single-CPU system runs an SMP kernel then we call the local
++ * interrupt as well. Thus we cannot inline the local irq ... ]
++ */
++void smp_apic_timer_interrupt(struct pt_regs *regs)
++{
++ /*
++ * the NMI deadlock-detector uses this.
++ */
++ add_pda(apic_timer_irqs, 1);
++
++ /*
++ * NOTE! We'd better ACK the irq immediately,
++ * because timer handling can be slow.
++ */
++ ack_APIC_irq();
++ /*
++ * update_process_times() expects us to have done irq_enter().
++ * Besides, if we don't timer interrupts ignore the global
++ * interrupt lock, which is the WrongThing (tm) to do.
++ */
++ exit_idle();
++ irq_enter();
++ smp_local_timer_interrupt(regs);
++ irq_exit();
++}
++
++/*
++ * This interrupt should _never_ happen with our APIC/SMP architecture
++ */
++asmlinkage void smp_spurious_interrupt(void)
++{
++ unsigned int v;
++ exit_idle();
++ irq_enter();
++ /*
++ * Check if this really is a spurious interrupt and ACK it
++ * if it is a vectored one. Just in case...
++ * Spurious interrupts should not be ACKed.
++ */
++ v = apic_read(APIC_ISR + ((SPURIOUS_APIC_VECTOR & ~0x1f) >> 1));
++ if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f)))
++ ack_APIC_irq();
++
++#if 0
++ static unsigned long last_warning;
++ static unsigned long skipped;
++
++ /* see sw-dev-man vol 3, chapter 7.4.13.5 */
++ if (time_before(last_warning+30*HZ,jiffies)) {
++ printk(KERN_INFO "spurious APIC interrupt on CPU#%d, %ld skipped.\n",
++ smp_processor_id(), skipped);
++ last_warning = jiffies;
++ skipped = 0;
++ } else {
++ skipped++;
++ }
++#endif
++ irq_exit();
++}
++
++/*
++ * This interrupt should never happen with our APIC/SMP architecture
++ */
++
++asmlinkage void smp_error_interrupt(void)
++{
++ unsigned int v, v1;
++
++ exit_idle();
++ irq_enter();
++ /* First tickle the hardware, only then report what went on. -- REW */
++ v = apic_read(APIC_ESR);
++ apic_write(APIC_ESR, 0);
++ v1 = apic_read(APIC_ESR);
++ ack_APIC_irq();
++ atomic_inc(&irq_err_count);
++
++ /* Here is what the APIC error bits mean:
++ 0: Send CS error
++ 1: Receive CS error
++ 2: Send accept error
++ 3: Receive accept error
++ 4: Reserved
++ 5: Send illegal vector
++ 6: Received illegal vector
++ 7: Illegal register address
++ */
++ printk (KERN_DEBUG "APIC error on CPU%d: %02x(%02x)\n",
++ smp_processor_id(), v , v1);
++ irq_exit();
++}
++
++int disable_apic;
++
++/*
++ * This initializes the IO-APIC and APIC hardware if this is
++ * a UP kernel.
++ */
++int __init APIC_init_uniprocessor (void)
++{
++#ifdef CONFIG_X86_IO_APIC
++ if (smp_found_config)
++ if (!skip_ioapic_setup && nr_ioapics)
++ setup_IO_APIC();
++#endif
++
++ return 1;
++}
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/arch/x86_64/kernel/e820-xen.c 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,774 @@
++/*
++ * Handle the memory map.
++ * The functions here do the job until bootmem takes over.
++ *
++ * Getting sanitize_e820_map() in sync with i386 version by applying change:
++ * - Provisions for empty E820 memory regions (reported by certain BIOSes).
++ * Alex Achenbach <xela@slit.de>, December 2002.
++ * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
++ *
++ */
++#include <linux/kernel.h>
++#include <linux/types.h>
++#include <linux/init.h>
++#include <linux/bootmem.h>
++#include <linux/ioport.h>
++#include <linux/string.h>
++#include <linux/kexec.h>
++#include <linux/module.h>
++
++#include <asm/pgtable.h>
++#include <asm/page.h>
++#include <asm/e820.h>
++#include <asm/proto.h>
++#include <asm/bootsetup.h>
++#include <asm/sections.h>
++#include <xen/interface/memory.h>
++
++/*
++ * PFN of last memory page.
++ */
++unsigned long end_pfn;
++EXPORT_SYMBOL(end_pfn);
++
++/*
++ * end_pfn only includes RAM, while end_pfn_map includes all e820 entries.
++ * The direct mapping extends to end_pfn_map, so that we can directly access
++ * apertures, ACPI and other tables without having to play with fixmaps.
++ */
++unsigned long end_pfn_map;
++
++/*
++ * Last pfn which the user wants to use.
++ */
++unsigned long end_user_pfn = MAXMEM>>PAGE_SHIFT;
++
++extern struct resource code_resource, data_resource;
++
++/* Check for some hardcoded bad areas that early boot is not allowed to touch */
++static inline int bad_addr(unsigned long *addrp, unsigned long size)
++{
++ unsigned long addr = *addrp, last = addr + size;
++
++#ifndef CONFIG_XEN
++ /* various gunk below that needed for SMP startup */
++ if (addr < 0x8000) {
++ *addrp = 0x8000;
++ return 1;
++ }
++
++ /* direct mapping tables of the kernel */
++ if (last >= table_start<<PAGE_SHIFT && addr < table_end<<PAGE_SHIFT) {
++ *addrp = table_end << PAGE_SHIFT;
++ return 1;
++ }
++
++ /* initrd */
++#ifdef CONFIG_BLK_DEV_INITRD
++ if (LOADER_TYPE && INITRD_START && last >= INITRD_START &&
++ addr < INITRD_START+INITRD_SIZE) {
++ *addrp = INITRD_START + INITRD_SIZE;
++ return 1;
++ }
++#endif
++ /* kernel code + 640k memory hole (later should not be needed, but
++ be paranoid for now) */
++ if (last >= 640*1024 && addr < 1024*1024) {
++ *addrp = 1024*1024;
++ return 1;
++ }
++ if (last >= __pa_symbol(&_text) && last < __pa_symbol(&_end)) {
++ *addrp = __pa_symbol(&_end);
++ return 1;
++ }
++
++ if (last >= ebda_addr && addr < ebda_addr + ebda_size) {
++ *addrp = ebda_addr + ebda_size;
++ return 1;
++ }
++
++ /* XXX ramdisk image here? */
++#else
++ if (last < (table_end<<PAGE_SHIFT)) {
++ *addrp = table_end << PAGE_SHIFT;
++ return 1;
++ }
++#endif
++ return 0;
++}
++
++#ifndef CONFIG_XEN
++/*
++ * This function checks if any part of the range <start,end> is mapped
++ * with type.
++ */
++int __meminit
++e820_any_mapped(unsigned long start, unsigned long end, unsigned type)
++{
++ int i;
++ for (i = 0; i < e820.nr_map; i++) {
++ struct e820entry *ei = &e820.map[i];
++ if (type && ei->type != type)
++ continue;
++ if (ei->addr >= end || ei->addr + ei->size <= start)
++ continue;
++ return 1;
++ }
++ return 0;
++}
++#endif
++
++/*
++ * This function checks if the entire range <start,end> is mapped with type.
++ *
++ * Note: this function only works correct if the e820 table is sorted and
++ * not-overlapping, which is the case
++ */
++int __init e820_all_mapped(unsigned long start, unsigned long end, unsigned type)
++{
++ int i;
++
++#ifndef CONFIG_XEN
++ for (i = 0; i < e820.nr_map; i++) {
++ struct e820entry *ei = &e820.map[i];
++#else
++ extern struct e820map machine_e820;
++
++ if (!is_initial_xendomain())
++ return 0;
++ for (i = 0; i < machine_e820.nr_map; i++) {
++ const struct e820entry *ei = &machine_e820.map[i];
++#endif
++
++ if (type && ei->type != type)
++ continue;
++ /* is the region (part) in overlap with the current region ?*/
++ if (ei->addr >= end || ei->addr + ei->size <= start)
++ continue;
++
++ /* if the region is at the beginning of <start,end> we move
++ * start to the end of the region since it's ok until there
++ */
++ if (ei->addr <= start)
++ start = ei->addr + ei->size;
++ /* if start is now at or beyond end, we're done, full coverage */
++ if (start >= end)
++ return 1; /* we're done */
++ }
++ return 0;
++}
++
++/*
++ * Find a free area in a specific range.
++ */
++unsigned long __init find_e820_area(unsigned long start, unsigned long end, unsigned size)
++{
++ int i;
++ for (i = 0; i < e820.nr_map; i++) {
++ struct e820entry *ei = &e820.map[i];
++ unsigned long addr = ei->addr, last;
++ if (ei->type != E820_RAM)
++ continue;
++ if (addr < start)
++ addr = start;
++ if (addr > ei->addr + ei->size)
++ continue;
++ while (bad_addr(&addr, size) && addr+size <= ei->addr+ei->size)
++ ;
++ last = addr + size;
++ if (last > ei->addr + ei->size)
++ continue;
++ if (last > end)
++ continue;
++ return addr;
++ }
++ return -1UL;
++}
++
++/*
++ * Free bootmem based on the e820 table for a node.
++ */
++void __init e820_bootmem_free(pg_data_t *pgdat, unsigned long start,unsigned long end)
++{
++ int i;
++ for (i = 0; i < e820.nr_map; i++) {
++ struct e820entry *ei = &e820.map[i];
++ unsigned long last, addr;
++
++ if (ei->type != E820_RAM ||
++ ei->addr+ei->size <= start ||
++ ei->addr >= end)
++ continue;
++
++ addr = round_up(ei->addr, PAGE_SIZE);
++ if (addr < start)
++ addr = start;
++
++ last = round_down(ei->addr + ei->size, PAGE_SIZE);
++ if (last >= end)
++ last = end;
++
++ if (last > addr && last-addr >= PAGE_SIZE)
++ free_bootmem_node(pgdat, addr, last-addr);
++ }
++}
++
++/*
++ * Find the highest page frame number we have available
++ */
++unsigned long __init e820_end_of_ram(void)
++{
++ int i;
++ unsigned long end_pfn = 0;
++
++ for (i = 0; i < e820.nr_map; i++) {
++ struct e820entry *ei = &e820.map[i];
++ unsigned long start, end;
++
++ start = round_up(ei->addr, PAGE_SIZE);
++ end = round_down(ei->addr + ei->size, PAGE_SIZE);
++ if (start >= end)
++ continue;
++ if (ei->type == E820_RAM) {
++ if (end > end_pfn<<PAGE_SHIFT)
++ end_pfn = end>>PAGE_SHIFT;
++ } else {
++ if (end > end_pfn_map<<PAGE_SHIFT)
++ end_pfn_map = end>>PAGE_SHIFT;
++ }
++ }
++
++ if (end_pfn > end_pfn_map)
++ end_pfn_map = end_pfn;
++ if (end_pfn_map > MAXMEM>>PAGE_SHIFT)
++ end_pfn_map = MAXMEM>>PAGE_SHIFT;
++ if (end_pfn > end_user_pfn)
++ end_pfn = end_user_pfn;
++ if (end_pfn > end_pfn_map)
++ end_pfn = end_pfn_map;
++
++ return end_pfn;
++}
++
++/*
++ * Compute how much memory is missing in a range.
++ * Unlike the other functions in this file the arguments are in page numbers.
++ */
++unsigned long __init
++e820_hole_size(unsigned long start_pfn, unsigned long end_pfn)
++{
++ unsigned long ram = 0;
++ unsigned long start = start_pfn << PAGE_SHIFT;
++ unsigned long end = end_pfn << PAGE_SHIFT;
++ int i;
++ for (i = 0; i < e820.nr_map; i++) {
++ struct e820entry *ei = &e820.map[i];
++ unsigned long last, addr;
++
++ if (ei->type != E820_RAM ||
++ ei->addr+ei->size <= start ||
++ ei->addr >= end)
++ continue;
++
++ addr = round_up(ei->addr, PAGE_SIZE);
++ if (addr < start)
++ addr = start;
++
++ last = round_down(ei->addr + ei->size, PAGE_SIZE);
++ if (last >= end)
++ last = end;
++
++ if (last > addr)
++ ram += last - addr;
++ }
++ return ((end - start) - ram) >> PAGE_SHIFT;
++}
++
++/*
++ * Mark e820 reserved areas as busy for the resource manager.
++ */
++void __init e820_reserve_resources(struct e820entry *e820, int nr_map)
++{
++ int i;
++ for (i = 0; i < nr_map; i++) {
++ struct resource *res;
++ res = alloc_bootmem_low(sizeof(struct resource));
++ switch (e820[i].type) {
++ case E820_RAM: res->name = "System RAM"; break;
++ case E820_ACPI: res->name = "ACPI Tables"; break;
++ case E820_NVS: res->name = "ACPI Non-volatile Storage"; break;
++ default: res->name = "reserved";
++ }
++ res->start = e820[i].addr;
++ res->end = res->start + e820[i].size - 1;
++ res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
++ request_resource(&iomem_resource, res);
++ if (e820[i].type == E820_RAM) {
++ /*
++ * We don't know which RAM region contains kernel data,
++ * so we try it repeatedly and let the resource manager
++ * test it.
++ */
++#ifndef CONFIG_XEN
++ request_resource(res, &code_resource);
++ request_resource(res, &data_resource);
++#endif
++#ifdef CONFIG_KEXEC
++ if (crashk_res.start != crashk_res.end)
++ request_resource(res, &crashk_res);
++#ifdef CONFIG_XEN
++ xen_machine_kexec_register_resources(res);
++#endif
++#endif
++ }
++ }
++}
++
++/*
++ * Add a memory region to the kernel e820 map.
++ */
++void __init add_memory_region(unsigned long start, unsigned long size, int type)
++{
++ int x = e820.nr_map;
++
++ if (x == E820MAX) {
++ printk(KERN_ERR "Ooops! Too many entries in the memory map!\n");
++ return;
++ }
++
++ e820.map[x].addr = start;
++ e820.map[x].size = size;
++ e820.map[x].type = type;
++ e820.nr_map++;
++}
++
++void __init e820_print_map(char *who)
++{
++ int i;
++
++ for (i = 0; i < e820.nr_map; i++) {
++ printk(" %s: %016Lx - %016Lx ", who,
++ (unsigned long long) e820.map[i].addr,
++ (unsigned long long) (e820.map[i].addr + e820.map[i].size));
++ switch (e820.map[i].type) {
++ case E820_RAM: printk("(usable)\n");
++ break;
++ case E820_RESERVED:
++ printk("(reserved)\n");
++ break;
++ case E820_ACPI:
++ printk("(ACPI data)\n");
++ break;
++ case E820_NVS:
++ printk("(ACPI NVS)\n");
++ break;
++ default: printk("type %u\n", e820.map[i].type);
++ break;
++ }
++ }
++}
++
++/*
++ * Sanitize the BIOS e820 map.
++ *
++ * Some e820 responses include overlapping entries. The following
++ * replaces the original e820 map with a new one, removing overlaps.
++ *
++ */
++static int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map)
++{
++ struct change_member {
++ struct e820entry *pbios; /* pointer to original bios entry */
++ unsigned long long addr; /* address for this change point */
++ };
++ static struct change_member change_point_list[2*E820MAX] __initdata;
++ static struct change_member *change_point[2*E820MAX] __initdata;
++ static struct e820entry *overlap_list[E820MAX] __initdata;
++ static struct e820entry new_bios[E820MAX] __initdata;
++ struct change_member *change_tmp;
++ unsigned long current_type, last_type;
++ unsigned long long last_addr;
++ int chgidx, still_changing;
++ int overlap_entries;
++ int new_bios_entry;
++ int old_nr, new_nr, chg_nr;
++ int i;
++
++ /*
++ Visually we're performing the following (1,2,3,4 = memory types)...
++
++ Sample memory map (w/overlaps):
++ ____22__________________
++ ______________________4_
++ ____1111________________
++ _44_____________________
++ 11111111________________
++ ____________________33__
++ ___________44___________
++ __________33333_________
++ ______________22________
++ ___________________2222_
++ _________111111111______
++ _____________________11_
++ _________________4______
++
++ Sanitized equivalent (no overlap):
++ 1_______________________
++ _44_____________________
++ ___1____________________
++ ____22__________________
++ ______11________________
++ _________1______________
++ __________3_____________
++ ___________44___________
++ _____________33_________
++ _______________2________
++ ________________1_______
++ _________________4______
++ ___________________2____
++ ____________________33__
++ ______________________4_
++ */
++
++ /* if there's only one memory region, don't bother */
++ if (*pnr_map < 2)
++ return -1;
++
++ old_nr = *pnr_map;
++
++ /* bail out if we find any unreasonable addresses in bios map */
++ for (i=0; i<old_nr; i++)
++ if (biosmap[i].addr + biosmap[i].size < biosmap[i].addr)
++ return -1;
++
++ /* create pointers for initial change-point information (for sorting) */
++ for (i=0; i < 2*old_nr; i++)
++ change_point[i] = &change_point_list[i];
++
++ /* record all known change-points (starting and ending addresses),
++ omitting those that are for empty memory regions */
++ chgidx = 0;
++ for (i=0; i < old_nr; i++) {
++ if (biosmap[i].size != 0) {
++ change_point[chgidx]->addr = biosmap[i].addr;
++ change_point[chgidx++]->pbios = &biosmap[i];
++ change_point[chgidx]->addr = biosmap[i].addr + biosmap[i].size;
++ change_point[chgidx++]->pbios = &biosmap[i];
++ }
++ }
++ chg_nr = chgidx;
++
++ /* sort change-point list by memory addresses (low -> high) */
++ still_changing = 1;
++ while (still_changing) {
++ still_changing = 0;
++ for (i=1; i < chg_nr; i++) {
++ /* if <current_addr> > <last_addr>, swap */
++ /* or, if current=<start_addr> & last=<end_addr>, swap */
++ if ((change_point[i]->addr < change_point[i-1]->addr) ||
++ ((change_point[i]->addr == change_point[i-1]->addr) &&
++ (change_point[i]->addr == change_point[i]->pbios->addr) &&
++ (change_point[i-1]->addr != change_point[i-1]->pbios->addr))
++ )
++ {
++ change_tmp = change_point[i];
++ change_point[i] = change_point[i-1];
++ change_point[i-1] = change_tmp;
++ still_changing=1;
++ }
++ }
++ }
++
++ /* create a new bios memory map, removing overlaps */
++ overlap_entries=0; /* number of entries in the overlap table */
++ new_bios_entry=0; /* index for creating new bios map entries */
++ last_type = 0; /* start with undefined memory type */
++ last_addr = 0; /* start with 0 as last starting address */
++ /* loop through change-points, determining affect on the new bios map */
++ for (chgidx=0; chgidx < chg_nr; chgidx++)
++ {
++ /* keep track of all overlapping bios entries */
++ if (change_point[chgidx]->addr == change_point[chgidx]->pbios->addr)
++ {
++ /* add map entry to overlap list (> 1 entry implies an overlap) */
++ overlap_list[overlap_entries++]=change_point[chgidx]->pbios;
++ }
++ else
++ {
++ /* remove entry from list (order independent, so swap with last) */
++ for (i=0; i<overlap_entries; i++)
++ {
++ if (overlap_list[i] == change_point[chgidx]->pbios)
++ overlap_list[i] = overlap_list[overlap_entries-1];
++ }
++ overlap_entries--;
++ }
++ /* if there are overlapping entries, decide which "type" to use */
++ /* (larger value takes precedence -- 1=usable, 2,3,4,4+=unusable) */
++ current_type = 0;
++ for (i=0; i<overlap_entries; i++)
++ if (overlap_list[i]->type > current_type)
++ current_type = overlap_list[i]->type;
++ /* continue building up new bios map based on this information */
++ if (current_type != last_type) {
++ if (last_type != 0) {
++ new_bios[new_bios_entry].size =
++ change_point[chgidx]->addr - last_addr;
++ /* move forward only if the new size was non-zero */
++ if (new_bios[new_bios_entry].size != 0)
++ if (++new_bios_entry >= E820MAX)
++ break; /* no more space left for new bios entries */
++ }
++ if (current_type != 0) {
++ new_bios[new_bios_entry].addr = change_point[chgidx]->addr;
++ new_bios[new_bios_entry].type = current_type;
++ last_addr=change_point[chgidx]->addr;
++ }
++ last_type = current_type;
++ }
++ }
++ new_nr = new_bios_entry; /* retain count for new bios entries */
++
++ /* copy new bios mapping into original location */
++ memcpy(biosmap, new_bios, new_nr*sizeof(struct e820entry));
++ *pnr_map = new_nr;
++
++ return 0;
++}
++
++/*
++ * Copy the BIOS e820 map into a safe place.
++ *
++ * Sanity-check it while we're at it..
++ *
++ * If we're lucky and live on a modern system, the setup code
++ * will have given us a memory map that we can use to properly
++ * set up memory. If we aren't, we'll fake a memory map.
++ *
++ * We check to see that the memory map contains at least 2 elements
++ * before we'll use it, because the detection code in setup.S may
++ * not be perfect and most every PC known to man has two memory
++ * regions: one from 0 to 640k, and one from 1mb up. (The IBM
++ * thinkpad 560x, for example, does not cooperate with the memory
++ * detection code.)
++ */
++static int __init copy_e820_map(struct e820entry * biosmap, int nr_map)
++{
++#ifndef CONFIG_XEN
++ /* Only one memory region (or negative)? Ignore it */
++ if (nr_map < 2)
++ return -1;
++#else
++ BUG_ON(nr_map < 1);
++#endif
++
++ do {
++ unsigned long start = biosmap->addr;
++ unsigned long size = biosmap->size;
++ unsigned long end = start + size;
++ unsigned long type = biosmap->type;
++
++ /* Overflow in 64 bits? Ignore the memory map. */
++ if (start > end)
++ return -1;
++
++#ifndef CONFIG_XEN
++ /*
++ * Some BIOSes claim RAM in the 640k - 1M region.
++ * Not right. Fix it up.
++ *
++ * This should be removed on Hammer which is supposed to not
++ * have non e820 covered ISA mappings there, but I had some strange
++ * problems so it stays for now. -AK
++ */
++ if (type == E820_RAM) {
++ if (start < 0x100000ULL && end > 0xA0000ULL) {
++ if (start < 0xA0000ULL)
++ add_memory_region(start, 0xA0000ULL-start, type);
++ if (end <= 0x100000ULL)
++ continue;
++ start = 0x100000ULL;
++ size = end - start;
++ }
++ }
++#endif
++
++ add_memory_region(start, size, type);
++ } while (biosmap++,--nr_map);
++ return 0;
++}
++
++#ifndef CONFIG_XEN
++void __init setup_memory_region(void)
++{
++ char *who = "BIOS-e820";
++
++ /*
++ * Try to copy the BIOS-supplied E820-map.
++ *
++ * Otherwise fake a memory map; one section from 0k->640k,
++ * the next section from 1mb->appropriate_mem_k
++ */
++ sanitize_e820_map(E820_MAP, &E820_MAP_NR);
++ if (copy_e820_map(E820_MAP, E820_MAP_NR) < 0) {
++ unsigned long mem_size;
++
++ /* compare results from other methods and take the greater */
++ if (ALT_MEM_K < EXT_MEM_K) {
++ mem_size = EXT_MEM_K;
++ who = "BIOS-88";
++ } else {
++ mem_size = ALT_MEM_K;
++ who = "BIOS-e801";
++ }
++
++ e820.nr_map = 0;
++ add_memory_region(0, LOWMEMSIZE(), E820_RAM);
++ add_memory_region(HIGH_MEMORY, mem_size << 10, E820_RAM);
++ }
++ printk(KERN_INFO "BIOS-provided physical RAM map:\n");
++ e820_print_map(who);
++}
++
++#else /* CONFIG_XEN */
++
++void __init setup_memory_region(void)
++{
++ int rc;
++ struct xen_memory_map memmap;
++ /*
++ * This is rather large for a stack variable but this early in
++ * the boot process we know we have plenty slack space.
++ */
++ struct e820entry map[E820MAX];
++
++ memmap.nr_entries = E820MAX;
++ set_xen_guest_handle(memmap.buffer, map);
++
++ rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap);
++ if ( rc == -ENOSYS ) {
++ memmap.nr_entries = 1;
++ map[0].addr = 0ULL;
++ map[0].size = xen_start_info->nr_pages << PAGE_SHIFT;
++ /* 8MB slack (to balance backend allocations). */
++ map[0].size += 8 << 20;
++ map[0].type = E820_RAM;
++ rc = 0;
++ }
++ BUG_ON(rc);
++
++ sanitize_e820_map(map, (char *)&memmap.nr_entries);
++
++ BUG_ON(copy_e820_map(map, (char)memmap.nr_entries) < 0);
++
++ printk(KERN_INFO "BIOS-provided physical RAM map:\n");
++ e820_print_map("Xen");
++}
++#endif
++
++void __init parse_memopt(char *p, char **from)
++{
++ int i;
++ unsigned long current_end;
++ unsigned long end;
++
++ end_user_pfn = memparse(p, from);
++ end_user_pfn >>= PAGE_SHIFT;
++
++ end = end_user_pfn<<PAGE_SHIFT;
++ i = e820.nr_map-1;
++ current_end = e820.map[i].addr + e820.map[i].size;
++
++ if (current_end < end) {
++ /*
++ * The e820 map ends before our requested size so
++ * extend the final entry to the requested address.
++ */
++ if (e820.map[i].type == E820_RAM)
++ e820.map[i].size = end - e820.map[i].addr;
++ else
++ add_memory_region(current_end, end - current_end, E820_RAM);
++ }
++}
++
++void __init parse_memmapopt(char *p, char **from)
++{
++ unsigned long long start_at, mem_size;
++
++ mem_size = memparse(p, from);
++ p = *from;
++ if (*p == '@') {
++ start_at = memparse(p+1, from);
++ add_memory_region(start_at, mem_size, E820_RAM);
++ } else if (*p == '#') {
++ start_at = memparse(p+1, from);
++ add_memory_region(start_at, mem_size, E820_ACPI);
++ } else if (*p == '$') {
++ start_at = memparse(p+1, from);
++ add_memory_region(start_at, mem_size, E820_RESERVED);
++ } else {
++ end_user_pfn = (mem_size >> PAGE_SHIFT);
++ }
++ p = *from;
++}
++
++unsigned long pci_mem_start = 0xaeedbabe;
++EXPORT_SYMBOL(pci_mem_start);
++
++/*
++ * Search for the biggest gap in the low 32 bits of the e820
++ * memory space. We pass this space to PCI to assign MMIO resources
++ * for hotplug or unconfigured devices in.
++ * Hopefully the BIOS let enough space left.
++ */
++__init void e820_setup_gap(struct e820entry *e820, int nr_map)
++{
++ unsigned long gapstart, gapsize, round;
++ unsigned long last;
++ int i;
++ int found = 0;
++
++ last = 0x100000000ull;
++ gapstart = 0x10000000;
++ gapsize = 0x400000;
++ i = nr_map;
++ while (--i >= 0) {
++ unsigned long long start = e820[i].addr;
++ unsigned long long end = start + e820[i].size;
++
++ /*
++ * Since "last" is at most 4GB, we know we'll
++ * fit in 32 bits if this condition is true
++ */
++ if (last > end) {
++ unsigned long gap = last - end;
++
++ if (gap > gapsize) {
++ gapsize = gap;
++ gapstart = end;
++ found = 1;
++ }
++ }
++ if (start < last)
++ last = start;
++ }
++
++ if (!found) {
++ gapstart = (end_pfn << PAGE_SHIFT) + 1024*1024;
++ printk(KERN_ERR "PCI: Warning: Cannot find a gap in the 32bit address range\n"
++ KERN_ERR "PCI: Unassigned devices with 32bit resource registers may break!\n");
++ }
++
++ /*
++ * See how much we want to round up: start off with
++ * rounding to the next 1MB area.
++ */
++ round = 0x100000;
++ while ((gapsize >> 4) > round)
++ round += round;
++ /* Fun with two's complement */
++ pci_mem_start = (gapstart + round) & -round;
++
++ printk(KERN_INFO "Allocating PCI resources starting at %lx (gap: %lx:%lx)\n",
++ pci_mem_start, gapstart, gapsize);
++}
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/arch/x86_64/kernel/early_printk-xen.c 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,302 @@
++#include <linux/console.h>
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/string.h>
++#include <linux/screen_info.h>
++#include <asm/io.h>
++#include <asm/processor.h>
++#include <asm/fcntl.h>
++
++/* Simple VGA output */
++
++#ifdef __i386__
++#include <asm/setup.h>
++#define VGABASE (__ISA_IO_base + 0xb8000)
++#else
++#include <asm/bootsetup.h>
++#define VGABASE ((void __iomem *)0xffffffff800b8000UL)
++#endif
++
++#ifndef CONFIG_XEN
++static int max_ypos = 25, max_xpos = 80;
++static int current_ypos = 25, current_xpos = 0;
++
++static void early_vga_write(struct console *con, const char *str, unsigned n)
++{
++ char c;
++ int i, k, j;
++
++ while ((c = *str++) != '\0' && n-- > 0) {
++ if (current_ypos >= max_ypos) {
++ /* scroll 1 line up */
++ for (k = 1, j = 0; k < max_ypos; k++, j++) {
++ for (i = 0; i < max_xpos; i++) {
++ writew(readw(VGABASE+2*(max_xpos*k+i)),
++ VGABASE + 2*(max_xpos*j + i));
++ }
++ }
++ for (i = 0; i < max_xpos; i++)
++ writew(0x720, VGABASE + 2*(max_xpos*j + i));
++ current_ypos = max_ypos-1;
++ }
++ if (c == '\n') {
++ current_xpos = 0;
++ current_ypos++;
++ } else if (c != '\r') {
++ writew(((0x7 << 8) | (unsigned short) c),
++ VGABASE + 2*(max_xpos*current_ypos +
++ current_xpos++));
++ if (current_xpos >= max_xpos) {
++ current_xpos = 0;
++ current_ypos++;
++ }
++ }
++ }
++}
++
++static struct console early_vga_console = {
++ .name = "earlyvga",
++ .write = early_vga_write,
++ .flags = CON_PRINTBUFFER,
++ .index = -1,
++};
++
++/* Serial functions loosely based on a similar package from Klaus P. Gerlicher */
++
++static int early_serial_base = 0x3f8; /* ttyS0 */
++
++#define XMTRDY 0x20
++
++#define DLAB 0x80
++
++#define TXR 0 /* Transmit register (WRITE) */
++#define RXR 0 /* Receive register (READ) */
++#define IER 1 /* Interrupt Enable */
++#define IIR 2 /* Interrupt ID */
++#define FCR 2 /* FIFO control */
++#define LCR 3 /* Line control */
++#define MCR 4 /* Modem control */
++#define LSR 5 /* Line Status */
++#define MSR 6 /* Modem Status */
++#define DLL 0 /* Divisor Latch Low */
++#define DLH 1 /* Divisor latch High */
++
++static int early_serial_putc(unsigned char ch)
++{
++ unsigned timeout = 0xffff;
++ while ((inb(early_serial_base + LSR) & XMTRDY) == 0 && --timeout)
++ cpu_relax();
++ outb(ch, early_serial_base + TXR);
++ return timeout ? 0 : -1;
++}
++
++static void early_serial_write(struct console *con, const char *s, unsigned n)
++{
++ while (*s && n-- > 0) {
++ early_serial_putc(*s);
++ if (*s == '\n')
++ early_serial_putc('\r');
++ s++;
++ }
++}
++
++#define DEFAULT_BAUD 9600
++
++static __init void early_serial_init(char *s)
++{
++ unsigned char c;
++ unsigned divisor;
++ unsigned baud = DEFAULT_BAUD;
++ char *e;
++
++ if (*s == ',')
++ ++s;
++
++ if (*s) {
++ unsigned port;
++ if (!strncmp(s,"0x",2)) {
++ early_serial_base = simple_strtoul(s, &e, 16);
++ } else {
++ static int bases[] = { 0x3f8, 0x2f8 };
++
++ if (!strncmp(s,"ttyS",4))
++ s += 4;
++ port = simple_strtoul(s, &e, 10);
++ if (port > 1 || s == e)
++ port = 0;
++ early_serial_base = bases[port];
++ }
++ s += strcspn(s, ",");
++ if (*s == ',')
++ s++;
++ }
++
++ outb(0x3, early_serial_base + LCR); /* 8n1 */
++ outb(0, early_serial_base + IER); /* no interrupt */
++ outb(0, early_serial_base + FCR); /* no fifo */
++ outb(0x3, early_serial_base + MCR); /* DTR + RTS */
++
++ if (*s) {
++ baud = simple_strtoul(s, &e, 0);
++ if (baud == 0 || s == e)
++ baud = DEFAULT_BAUD;
++ }
++
++ divisor = 115200 / baud;
++ c = inb(early_serial_base + LCR);
++ outb(c | DLAB, early_serial_base + LCR);
++ outb(divisor & 0xff, early_serial_base + DLL);
++ outb((divisor >> 8) & 0xff, early_serial_base + DLH);
++ outb(c & ~DLAB, early_serial_base + LCR);
++}
++
++#else /* CONFIG_XEN */
++
++static void
++early_serial_write(struct console *con, const char *s, unsigned count)
++{
++ int n;
++
++ while (count > 0) {
++ n = HYPERVISOR_console_io(CONSOLEIO_write, count, (char *)s);
++ if (n <= 0)
++ break;
++ count -= n;
++ s += n;
++ }
++}
++
++static __init void early_serial_init(char *s)
++{
++}
++
++/*
++ * No early VGA console on Xen, as we do not have convenient ISA-space
++ * mappings. Someone should fix this for domain 0. For now, use fake serial.
++ */
++#define early_vga_console early_serial_console
++
++#endif
++
++static struct console early_serial_console = {
++ .name = "earlyser",
++ .write = early_serial_write,
++ .flags = CON_PRINTBUFFER,
++ .index = -1,
++};
++
++/* Console interface to a host file on AMD's SimNow! */
++
++static int simnow_fd;
++
++enum {
++ MAGIC1 = 0xBACCD00A,
++ MAGIC2 = 0xCA110000,
++ XOPEN = 5,
++ XWRITE = 4,
++};
++
++static noinline long simnow(long cmd, long a, long b, long c)
++{
++ long ret;
++ asm volatile("cpuid" :
++ "=a" (ret) :
++ "b" (a), "c" (b), "d" (c), "0" (MAGIC1), "D" (cmd + MAGIC2));
++ return ret;
++}
++
++void __init simnow_init(char *str)
++{
++ char *fn = "klog";
++ if (*str == '=')
++ fn = ++str;
++ /* error ignored */
++ simnow_fd = simnow(XOPEN, (unsigned long)fn, O_WRONLY|O_APPEND|O_CREAT, 0644);
++}
++
++static void simnow_write(struct console *con, const char *s, unsigned n)
++{
++ simnow(XWRITE, simnow_fd, (unsigned long)s, n);
++}
++
++static struct console simnow_console = {
++ .name = "simnow",
++ .write = simnow_write,
++ .flags = CON_PRINTBUFFER,
++ .index = -1,
++};
++
++/* Direct interface for emergencies */
++struct console *early_console = &early_vga_console;
++static int early_console_initialized = 0;
++
++void early_printk(const char *fmt, ...)
++{
++ char buf[512];
++ int n;
++ va_list ap;
++
++ va_start(ap,fmt);
++ n = vscnprintf(buf,512,fmt,ap);
++ early_console->write(early_console,buf,n);
++ va_end(ap);
++}
++
++static int __initdata keep_early;
++
++int __init setup_early_printk(char *opt)
++{
++ char *space;
++ char buf[256];
++
++ if (early_console_initialized)
++ return 1;
++
++ strlcpy(buf,opt,sizeof(buf));
++ space = strchr(buf, ' ');
++ if (space)
++ *space = 0;
++
++ if (strstr(buf,"keep"))
++ keep_early = 1;
++
++ if (!strncmp(buf, "serial", 6)) {
++ early_serial_init(buf + 6);
++ early_console = &early_serial_console;
++ } else if (!strncmp(buf, "ttyS", 4)) {
++ early_serial_init(buf);
++ early_console = &early_serial_console;
++ } else if (!strncmp(buf, "vga", 3)
++#ifndef CONFIG_XEN
++ && SCREEN_INFO.orig_video_isVGA == 1) {
++ max_xpos = SCREEN_INFO.orig_video_cols;
++ max_ypos = SCREEN_INFO.orig_video_lines;
++ current_ypos = SCREEN_INFO.orig_y;
++#else
++ || !strncmp(buf, "xen", 3)) {
++#endif
++ early_console = &early_vga_console;
++ } else if (!strncmp(buf, "simnow", 6)) {
++ simnow_init(buf + 6);
++ early_console = &simnow_console;
++ keep_early = 1;
++ }
++ early_console_initialized = 1;
++ register_console(early_console);
++ return 0;
++}
++
++void __init disable_early_printk(void)
++{
++ if (!early_console_initialized || !early_console)
++ return;
++ if (!keep_early) {
++ printk("disabling early console\n");
++ unregister_console(early_console);
++ early_console_initialized = 0;
++ } else {
++ printk("keeping early console\n");
++ }
++}
++
++__setup("earlyprintk=", setup_early_printk);
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/arch/x86_64/kernel/entry-xen.S 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,1325 @@
++/*
++ * linux/arch/x86_64/entry.S
++ *
++ * Copyright (C) 1991, 1992 Linus Torvalds
++ * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
++ * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
++ *
++ * $Id$
++ *
++ * Jun Nakajima <jun.nakajima@intel.com>
++ * Asit Mallick <asit.k.mallick@intel.com>
++ * Modified for Xen
++ */
++
++/*
++ * entry.S contains the system-call and fault low-level handling routines.
++ *
++ * NOTE: This code handles signal-recognition, which happens every time
++ * after an interrupt and after each system call.
++ *
++ * Normal syscalls and interrupts don't save a full stack frame, this is
++ * only done for syscall tracing, signals or fork/exec et.al.
++ *
++ * A note on terminology:
++ * - top of stack: Architecture defined interrupt frame from SS to RIP
++ * at the top of the kernel process stack.
++ * - partial stack frame: partially saved registers upto R11.
++ * - full stack frame: Like partial stack frame, but all register saved.
++ *
++ * TODO:
++ * - schedule it carefully for the final hardware.
++ */
++
++#define ASSEMBLY 1
++#include <linux/linkage.h>
++#include <asm/segment.h>
++#include <asm/smp.h>
++#include <asm/cache.h>
++#include <asm/errno.h>
++#include <asm/dwarf2.h>
++#include <asm/calling.h>
++#include <asm/asm-offsets.h>
++#include <asm/msr.h>
++#include <asm/unistd.h>
++#include <asm/thread_info.h>
++#include <asm/hw_irq.h>
++#include <asm/page.h>
++#include <asm/irqflags.h>
++#include <asm/errno.h>
++#include <xen/interface/arch-x86_64.h>
++#include <xen/interface/features.h>
++
++#include "irq_vectors.h"
++
++#include "xen_entry.S"
++
++ .code64
++
++#ifndef CONFIG_PREEMPT
++#define retint_kernel retint_restore_args
++#endif
++
++
++.macro TRACE_IRQS_IRETQ offset=ARGOFFSET
++#ifdef CONFIG_TRACE_IRQFLAGS
++ bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
++ jnc 1f
++ TRACE_IRQS_ON
++1:
++#endif
++.endm
++
++NMI_MASK = 0x80000000
++
++/*
++ * C code is not supposed to know about undefined top of stack. Every time
++ * a C function with an pt_regs argument is called from the SYSCALL based
++ * fast path FIXUP_TOP_OF_STACK is needed.
++ * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs
++ * manipulation.
++ */
++
++ /* %rsp:at FRAMEEND */
++ .macro FIXUP_TOP_OF_STACK tmp
++ movq $__USER_CS,CS(%rsp)
++ movq $-1,RCX(%rsp)
++ .endm
++
++ .macro RESTORE_TOP_OF_STACK tmp,offset=0
++ .endm
++
++ .macro FAKE_STACK_FRAME child_rip
++ /* push in order ss, rsp, eflags, cs, rip */
++ xorl %eax, %eax
++ pushq %rax /* ss */
++ CFI_ADJUST_CFA_OFFSET 8
++ /*CFI_REL_OFFSET ss,0*/
++ pushq %rax /* rsp */
++ CFI_ADJUST_CFA_OFFSET 8
++ CFI_REL_OFFSET rsp,0
++ pushq $(1<<9) /* eflags - interrupts on */
++ CFI_ADJUST_CFA_OFFSET 8
++ /*CFI_REL_OFFSET rflags,0*/
++ pushq $__KERNEL_CS /* cs */
++ CFI_ADJUST_CFA_OFFSET 8
++ /*CFI_REL_OFFSET cs,0*/
++ pushq \child_rip /* rip */
++ CFI_ADJUST_CFA_OFFSET 8
++ CFI_REL_OFFSET rip,0
++ pushq %rax /* orig rax */
++ CFI_ADJUST_CFA_OFFSET 8
++ .endm
++
++ .macro UNFAKE_STACK_FRAME
++ addq $8*6, %rsp
++ CFI_ADJUST_CFA_OFFSET -(6*8)
++ .endm
++
++ .macro CFI_DEFAULT_STACK start=1,adj=0
++ .if \start
++ CFI_STARTPROC simple
++ CFI_DEF_CFA rsp,SS+8-(\adj*ARGOFFSET)
++ .else
++ CFI_DEF_CFA_OFFSET SS+8-(\adj*ARGOFFSET)
++ .endif
++ .if \adj == 0
++ CFI_REL_OFFSET r15,R15
++ CFI_REL_OFFSET r14,R14
++ CFI_REL_OFFSET r13,R13
++ CFI_REL_OFFSET r12,R12
++ CFI_REL_OFFSET rbp,RBP
++ CFI_REL_OFFSET rbx,RBX
++ .endif
++ CFI_REL_OFFSET r11,R11
++ CFI_REL_OFFSET r10,R10
++ CFI_REL_OFFSET r9,R9
++ CFI_REL_OFFSET r8,R8
++ CFI_REL_OFFSET rax,RAX
++ CFI_REL_OFFSET rcx,RCX
++ CFI_REL_OFFSET rdx,RDX
++ CFI_REL_OFFSET rsi,RSI
++ CFI_REL_OFFSET rdi,RDI
++ CFI_REL_OFFSET rip,RIP
++ /*CFI_REL_OFFSET cs,CS*/
++ /*CFI_REL_OFFSET rflags,EFLAGS*/
++ CFI_REL_OFFSET rsp,RSP
++ /*CFI_REL_OFFSET ss,SS*/
++ .endm
++
++ /*
++ * Must be consistent with the definition in arch-x86/xen-x86_64.h:
++ * struct iret_context {
++ * u64 rax, r11, rcx, flags, rip, cs, rflags, rsp, ss;
++ * };
++ * with rax, r11, and rcx being taken care of in the hypercall stub.
++ */
++ .macro HYPERVISOR_IRET flag
++ testb $3,1*8(%rsp)
++ jnz 2f
++ testl $NMI_MASK,2*8(%rsp)
++ jnz 2f
++
++ cmpb $0,(xen_features+XENFEAT_supervisor_mode_kernel)(%rip)
++ jne 1f
++
++ /* Direct iret to kernel space. Correct CS and SS. */
++ orl $3,1*8(%rsp)
++ orl $3,4*8(%rsp)
++1: iretq
++
++2: /* Slow iret via hypervisor. */
++ andl $~NMI_MASK, 2*8(%rsp)
++ pushq $\flag
++ jmp hypercall_page + (__HYPERVISOR_iret * 32)
++ .endm
++
++/*
++ * A newly forked process directly context switches into this.
++ */
++/* rdi: prev */
++ENTRY(ret_from_fork)
++ CFI_DEFAULT_STACK
++ call schedule_tail
++ GET_THREAD_INFO(%rcx)
++ testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%rcx)
++ jnz rff_trace
++rff_action:
++ RESTORE_REST
++ testl $3,CS-ARGOFFSET(%rsp) # from kernel_thread?
++ je int_ret_from_sys_call
++ testl $_TIF_IA32,threadinfo_flags(%rcx)
++ jnz int_ret_from_sys_call
++ RESTORE_TOP_OF_STACK %rdi,ARGOFFSET
++ jmp ret_from_sys_call
++rff_trace:
++ movq %rsp,%rdi
++ call syscall_trace_leave
++ GET_THREAD_INFO(%rcx)
++ jmp rff_action
++ CFI_ENDPROC
++END(ret_from_fork)
++
++/*
++ * initial frame state for interrupts and exceptions
++ */
++ .macro _frame ref
++ CFI_STARTPROC simple
++ CFI_DEF_CFA rsp,SS+8-\ref
++ /*CFI_REL_OFFSET ss,SS-\ref*/
++ CFI_REL_OFFSET rsp,RSP-\ref
++ /*CFI_REL_OFFSET rflags,EFLAGS-\ref*/
++ /*CFI_REL_OFFSET cs,CS-\ref*/
++ CFI_REL_OFFSET rip,RIP-\ref
++ .endm
++
++/*
++ * System call entry. Upto 6 arguments in registers are supported.
++ *
++ * SYSCALL does not save anything on the stack and does not change the
++ * stack pointer.
++ */
++
++/*
++ * Register setup:
++ * rax system call number
++ * rdi arg0
++ * rcx return address for syscall/sysret, C arg3
++ * rsi arg1
++ * rdx arg2
++ * r10 arg3 (--> moved to rcx for C)
++ * r8 arg4
++ * r9 arg5
++ * r11 eflags for syscall/sysret, temporary for C
++ * r12-r15,rbp,rbx saved by C code, not touched.
++ *
++ * Interrupts are off on entry.
++ * Only called from user space.
++ *
++ * XXX if we had a free scratch register we could save the RSP into the stack frame
++ * and report it properly in ps. Unfortunately we haven't.
++ *
++ * When user can change the frames always force IRET. That is because
++ * it deals with uncanonical addresses better. SYSRET has trouble
++ * with them due to bugs in both AMD and Intel CPUs.
++ */
++
++ENTRY(system_call)
++ _frame (RIP-0x10)
++ SAVE_ARGS -8,0
++ movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
++ /*
++ * No need to follow this irqs off/on section - it's straight
++ * and short:
++ */
++ XEN_UNBLOCK_EVENTS(%r11)
++ GET_THREAD_INFO(%rcx)
++ testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%rcx)
++ CFI_REMEMBER_STATE
++ jnz tracesys
++ cmpq $__NR_syscall_max,%rax
++ ja badsys
++ movq %r10,%rcx
++ call *sys_call_table(,%rax,8) # XXX: rip relative
++ movq %rax,RAX-ARGOFFSET(%rsp)
++/*
++ * Syscall return path ending with SYSRET (fast path)
++ * Has incomplete stack frame and undefined top of stack.
++ */
++ .globl ret_from_sys_call
++ret_from_sys_call:
++ movl $_TIF_ALLWORK_MASK,%edi
++ /* edi: flagmask */
++sysret_check:
++ GET_THREAD_INFO(%rcx)
++ XEN_BLOCK_EVENTS(%rsi)
++ TRACE_IRQS_OFF
++ movl threadinfo_flags(%rcx),%edx
++ andl %edi,%edx
++ CFI_REMEMBER_STATE
++ jnz sysret_careful
++ /*
++ * sysretq will re-enable interrupts:
++ */
++ TRACE_IRQS_ON
++ XEN_UNBLOCK_EVENTS(%rsi)
++ RESTORE_ARGS 0,8,0
++ HYPERVISOR_IRET VGCF_IN_SYSCALL
++
++ /* Handle reschedules */
++ /* edx: work, edi: workmask */
++sysret_careful:
++ CFI_RESTORE_STATE
++ bt $TIF_NEED_RESCHED,%edx
++ jnc sysret_signal
++ TRACE_IRQS_ON
++ XEN_UNBLOCK_EVENTS(%rsi)
++ pushq %rdi
++ CFI_ADJUST_CFA_OFFSET 8
++ call schedule
++ popq %rdi
++ CFI_ADJUST_CFA_OFFSET -8
++ jmp sysret_check
++
++ /* Handle a signal */
++sysret_signal:
++ TRACE_IRQS_ON
++/* sti */
++ XEN_UNBLOCK_EVENTS(%rsi)
++ testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx
++ jz 1f
++
++ /* Really a signal */
++ /* edx: work flags (arg3) */
++ leaq do_notify_resume(%rip),%rax
++ leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1
++ xorl %esi,%esi # oldset -> arg2
++ call ptregscall_common
++1: movl $_TIF_NEED_RESCHED,%edi
++ /* Use IRET because user could have changed frame. This
++ works because ptregscall_common has called FIXUP_TOP_OF_STACK. */
++ XEN_BLOCK_EVENTS(%rsi)
++ TRACE_IRQS_OFF
++ jmp int_with_check
++
++badsys:
++ movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
++ jmp ret_from_sys_call
++
++ /* Do syscall tracing */
++tracesys:
++ CFI_RESTORE_STATE
++ SAVE_REST
++ movq $-ENOSYS,RAX(%rsp)
++ FIXUP_TOP_OF_STACK %rdi
++ movq %rsp,%rdi
++ call syscall_trace_enter
++ LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */
++ RESTORE_REST
++ cmpq $__NR_syscall_max,%rax
++ ja 1f
++ movq %r10,%rcx /* fixup for C */
++ call *sys_call_table(,%rax,8)
++1: movq %rax,RAX-ARGOFFSET(%rsp)
++ /* Use IRET because user could have changed frame */
++ jmp int_ret_from_sys_call
++ CFI_ENDPROC
++END(system_call)
++
++/*
++ * Syscall return path ending with IRET.
++ * Has correct top of stack, but partial stack frame.
++ */
++ENTRY(int_ret_from_sys_call)
++ CFI_STARTPROC simple
++ CFI_DEF_CFA rsp,SS+8-ARGOFFSET
++ /*CFI_REL_OFFSET ss,SS-ARGOFFSET*/
++ CFI_REL_OFFSET rsp,RSP-ARGOFFSET
++ /*CFI_REL_OFFSET rflags,EFLAGS-ARGOFFSET*/
++ /*CFI_REL_OFFSET cs,CS-ARGOFFSET*/
++ CFI_REL_OFFSET rip,RIP-ARGOFFSET
++ CFI_REL_OFFSET rdx,RDX-ARGOFFSET
++ CFI_REL_OFFSET rcx,RCX-ARGOFFSET
++ CFI_REL_OFFSET rax,RAX-ARGOFFSET
++ CFI_REL_OFFSET rdi,RDI-ARGOFFSET
++ CFI_REL_OFFSET rsi,RSI-ARGOFFSET
++ CFI_REL_OFFSET r8,R8-ARGOFFSET
++ CFI_REL_OFFSET r9,R9-ARGOFFSET
++ CFI_REL_OFFSET r10,R10-ARGOFFSET
++ CFI_REL_OFFSET r11,R11-ARGOFFSET
++ XEN_BLOCK_EVENTS(%rsi)
++ TRACE_IRQS_OFF
++ testb $3,CS-ARGOFFSET(%rsp)
++ jnz 1f
++ /* Need to set the proper %ss (not NULL) for ring 3 iretq */
++ movl $__KERNEL_DS,SS-ARGOFFSET(%rsp)
++ jmp retint_restore_args # retrun from ring3 kernel
++1:
++ movl $_TIF_ALLWORK_MASK,%edi
++ /* edi: mask to check */
++int_with_check:
++ GET_THREAD_INFO(%rcx)
++ movl threadinfo_flags(%rcx),%edx
++ andl %edi,%edx
++ jnz int_careful
++ andl $~TS_COMPAT,threadinfo_status(%rcx)
++ jmp retint_restore_args
++
++ /* Either reschedule or signal or syscall exit tracking needed. */
++ /* First do a reschedule test. */
++ /* edx: work, edi: workmask */
++int_careful:
++ bt $TIF_NEED_RESCHED,%edx
++ jnc int_very_careful
++ TRACE_IRQS_ON
++/* sti */
++ XEN_UNBLOCK_EVENTS(%rsi)
++ pushq %rdi
++ CFI_ADJUST_CFA_OFFSET 8
++ call schedule
++ popq %rdi
++ CFI_ADJUST_CFA_OFFSET -8
++ XEN_BLOCK_EVENTS(%rsi)
++ TRACE_IRQS_OFF
++ jmp int_with_check
++
++ /* handle signals and tracing -- both require a full stack frame */
++int_very_careful:
++ TRACE_IRQS_ON
++/* sti */
++ XEN_UNBLOCK_EVENTS(%rsi)
++ SAVE_REST
++ /* Check for syscall exit trace */
++ testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edx
++ jz int_signal
++ pushq %rdi
++ CFI_ADJUST_CFA_OFFSET 8
++ leaq 8(%rsp),%rdi # &ptregs -> arg1
++ call syscall_trace_leave
++ popq %rdi
++ CFI_ADJUST_CFA_OFFSET -8
++ andl $~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edi
++ XEN_BLOCK_EVENTS(%rsi)
++ TRACE_IRQS_OFF
++ jmp int_restore_rest
++
++int_signal:
++ testl $(_TIF_NOTIFY_RESUME|_TIF_SIGPENDING|_TIF_SINGLESTEP),%edx
++ jz 1f
++ movq %rsp,%rdi # &ptregs -> arg1
++ xorl %esi,%esi # oldset -> arg2
++ call do_notify_resume
++1: movl $_TIF_NEED_RESCHED,%edi
++int_restore_rest:
++ RESTORE_REST
++ XEN_BLOCK_EVENTS(%rsi)
++ TRACE_IRQS_OFF
++ jmp int_with_check
++ CFI_ENDPROC
++END(int_ret_from_sys_call)
++
++/*
++ * Certain special system calls that need to save a complete full stack frame.
++ */
++
++ .macro PTREGSCALL label,func,arg
++ .globl \label
++\label:
++ leaq \func(%rip),%rax
++ leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */
++ jmp ptregscall_common
++END(\label)
++ .endm
++
++ CFI_STARTPROC
++
++ PTREGSCALL stub_clone, sys_clone, %r8
++ PTREGSCALL stub_fork, sys_fork, %rdi
++ PTREGSCALL stub_vfork, sys_vfork, %rdi
++ PTREGSCALL stub_rt_sigsuspend, sys_rt_sigsuspend, %rdx
++ PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx
++ PTREGSCALL stub_iopl, sys_iopl, %rsi
++
++ENTRY(ptregscall_common)
++ popq %r11
++ CFI_ADJUST_CFA_OFFSET -8
++ CFI_REGISTER rip, r11
++ SAVE_REST
++ movq %r11, %r15
++ CFI_REGISTER rip, r15
++ FIXUP_TOP_OF_STACK %r11
++ call *%rax
++ RESTORE_TOP_OF_STACK %r11
++ movq %r15, %r11
++ CFI_REGISTER rip, r11
++ RESTORE_REST
++ pushq %r11
++ CFI_ADJUST_CFA_OFFSET 8
++ CFI_REL_OFFSET rip, 0
++ ret
++ CFI_ENDPROC
++END(ptregscall_common)
++
++ENTRY(stub_execve)
++ CFI_STARTPROC
++ popq %r11
++ CFI_ADJUST_CFA_OFFSET -8
++ CFI_REGISTER rip, r11
++ SAVE_REST
++ FIXUP_TOP_OF_STACK %r11
++ call sys_execve
++ RESTORE_TOP_OF_STACK %r11
++ movq %rax,RAX(%rsp)
++ RESTORE_REST
++ jmp int_ret_from_sys_call
++ CFI_ENDPROC
++END(stub_execve)
++
++/*
++ * sigreturn is special because it needs to restore all registers on return.
++ * This cannot be done with SYSRET, so use the IRET return path instead.
++ */
++ENTRY(stub_rt_sigreturn)
++ CFI_STARTPROC
++ addq $8, %rsp
++ CFI_ADJUST_CFA_OFFSET -8
++ SAVE_REST
++ movq %rsp,%rdi
++ FIXUP_TOP_OF_STACK %r11
++ call sys_rt_sigreturn
++ movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
++ RESTORE_REST
++ jmp int_ret_from_sys_call
++ CFI_ENDPROC
++END(stub_rt_sigreturn)
++
++/* initial frame state for interrupts (and exceptions without error code) */
++#define INTR_FRAME _frame (RIP-0x10); \
++ CFI_REL_OFFSET rcx,0; \
++ CFI_REL_OFFSET r11,8
++
++/* initial frame state for exceptions with error code (and interrupts with
++ vector already pushed) */
++#define XCPT_FRAME _frame (RIP-0x18); \
++ CFI_REL_OFFSET rcx,0; \
++ CFI_REL_OFFSET r11,8
++
++/*
++ * Interrupt exit.
++ *
++ */
++
++retint_check:
++ CFI_DEFAULT_STACK adj=1
++ movl threadinfo_flags(%rcx),%edx
++ andl %edi,%edx
++ CFI_REMEMBER_STATE
++ jnz retint_careful
++retint_restore_args:
++ movl EFLAGS-REST_SKIP(%rsp), %eax
++ shr $9, %eax # EAX[0] == IRET_EFLAGS.IF
++ XEN_GET_VCPU_INFO(%rsi)
++ andb evtchn_upcall_mask(%rsi),%al
++ andb $1,%al # EAX[0] == IRET_EFLAGS.IF & event_mask
++ jnz restore_all_enable_events # != 0 => enable event delivery
++ XEN_PUT_VCPU_INFO(%rsi)
++
++ RESTORE_ARGS 0,8,0
++ HYPERVISOR_IRET 0
++
++ /* edi: workmask, edx: work */
++retint_careful:
++ CFI_RESTORE_STATE
++ bt $TIF_NEED_RESCHED,%edx
++ jnc retint_signal
++ TRACE_IRQS_ON
++ XEN_UNBLOCK_EVENTS(%rsi)
++/* sti */
++ pushq %rdi
++ CFI_ADJUST_CFA_OFFSET 8
++ call schedule
++ popq %rdi
++ CFI_ADJUST_CFA_OFFSET -8
++ GET_THREAD_INFO(%rcx)
++ XEN_BLOCK_EVENTS(%rsi)
++/* cli */
++ TRACE_IRQS_OFF
++ jmp retint_check
++
++retint_signal:
++ testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx
++ jz retint_restore_args
++ TRACE_IRQS_ON
++ XEN_UNBLOCK_EVENTS(%rsi)
++ SAVE_REST
++ movq $-1,ORIG_RAX(%rsp)
++ xorl %esi,%esi # oldset
++ movq %rsp,%rdi # &pt_regs
++ call do_notify_resume
++ RESTORE_REST
++ XEN_BLOCK_EVENTS(%rsi)
++ TRACE_IRQS_OFF
++ movl $_TIF_NEED_RESCHED,%edi
++ GET_THREAD_INFO(%rcx)
++ jmp retint_check
++
++#ifdef CONFIG_PREEMPT
++ /* Returning to kernel space. Check if we need preemption */
++ /* rcx: threadinfo. interrupts off. */
++ .p2align
++retint_kernel:
++ cmpl $0,threadinfo_preempt_count(%rcx)
++ jnz retint_restore_args
++ bt $TIF_NEED_RESCHED,threadinfo_flags(%rcx)
++ jnc retint_restore_args
++ bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */
++ jnc retint_restore_args
++ call preempt_schedule_irq
++ jmp retint_kernel /* check again */
++#endif
++
++ CFI_ENDPROC
++END(retint_check)
++
++#ifndef CONFIG_XEN
++/*
++ * APIC interrupts.
++ */
++ .macro apicinterrupt num,func
++ INTR_FRAME
++ pushq $~(\num)
++ CFI_ADJUST_CFA_OFFSET 8
++ interrupt \func
++ jmp error_entry
++ CFI_ENDPROC
++ .endm
++
++ENTRY(thermal_interrupt)
++ apicinterrupt THERMAL_APIC_VECTOR,smp_thermal_interrupt
++END(thermal_interrupt)
++
++ENTRY(threshold_interrupt)
++ apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt
++END(threshold_interrupt)
++
++#ifdef CONFIG_SMP
++ENTRY(reschedule_interrupt)
++ apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt
++END(reschedule_interrupt)
++
++ .macro INVALIDATE_ENTRY num
++ENTRY(invalidate_interrupt\num)
++ apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt
++END(invalidate_interrupt\num)
++ .endm
++
++ INVALIDATE_ENTRY 0
++ INVALIDATE_ENTRY 1
++ INVALIDATE_ENTRY 2
++ INVALIDATE_ENTRY 3
++ INVALIDATE_ENTRY 4
++ INVALIDATE_ENTRY 5
++ INVALIDATE_ENTRY 6
++ INVALIDATE_ENTRY 7
++
++ENTRY(call_function_interrupt)
++ apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt
++END(call_function_interrupt)
++#endif
++
++#ifdef CONFIG_X86_LOCAL_APIC
++ENTRY(apic_timer_interrupt)
++ apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt
++END(apic_timer_interrupt)
++
++ENTRY(error_interrupt)
++ apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt
++END(error_interrupt)
++
++ENTRY(spurious_interrupt)
++ apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt
++END(spurious_interrupt)
++#endif
++#endif /* !CONFIG_XEN */
++
++/*
++ * Exception entry points.
++ */
++ .macro zeroentry sym
++ INTR_FRAME
++ movq (%rsp),%rcx
++ CFI_RESTORE rcx
++ movq 8(%rsp),%r11
++ CFI_RESTORE r11
++ addq $0x10,%rsp /* skip rcx and r11 */
++ CFI_ADJUST_CFA_OFFSET -0x10
++ pushq $0 /* push error code/oldrax */
++ CFI_ADJUST_CFA_OFFSET 8
++ pushq %rax /* push real oldrax to the rdi slot */
++ CFI_ADJUST_CFA_OFFSET 8
++ CFI_REL_OFFSET rax,0
++ leaq \sym(%rip),%rax
++ jmp error_entry
++ CFI_ENDPROC
++ .endm
++
++ .macro errorentry sym
++ XCPT_FRAME
++ movq (%rsp),%rcx
++ CFI_RESTORE rcx
++ movq 8(%rsp),%r11
++ CFI_RESTORE r11
++ addq $0x10,%rsp /* rsp points to the error code */
++ CFI_ADJUST_CFA_OFFSET -0x10
++ pushq %rax
++ CFI_ADJUST_CFA_OFFSET 8
++ CFI_REL_OFFSET rax,0
++ leaq \sym(%rip),%rax
++ jmp error_entry
++ CFI_ENDPROC
++ .endm
++
++#if 0 /* not XEN */
++ /* error code is on the stack already */
++ /* handle NMI like exceptions that can happen everywhere */
++ .macro paranoidentry sym, ist=0, irqtrace=1
++ movq (%rsp),%rcx
++ movq 8(%rsp),%r11
++ addq $0x10,%rsp /* skip rcx and r11 */
++ SAVE_ALL
++ cld
++#if 0 /* not XEN */
++ movl $1,%ebx
++ movl $MSR_GS_BASE,%ecx
++ rdmsr
++ testl %edx,%edx
++ js 1f
++ swapgs
++ xorl %ebx,%ebx
++1:
++#endif
++ .if \ist
++ movq %gs:pda_data_offset, %rbp
++ .endif
++ movq %rsp,%rdi
++ movq ORIG_RAX(%rsp),%rsi
++ movq $-1,ORIG_RAX(%rsp)
++ .if \ist
++ subq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
++ .endif
++ call \sym
++ .if \ist
++ addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
++ .endif
++/* cli */
++ XEN_BLOCK_EVENTS(%rsi)
++ .if \irqtrace
++ TRACE_IRQS_OFF
++ .endif
++ .endm
++
++ /*
++ * "Paranoid" exit path from exception stack.
++ * Paranoid because this is used by NMIs and cannot take
++ * any kernel state for granted.
++ * We don't do kernel preemption checks here, because only
++ * NMI should be common and it does not enable IRQs and
++ * cannot get reschedule ticks.
++ *
++ * "trace" is 0 for the NMI handler only, because irq-tracing
++ * is fundamentally NMI-unsafe. (we cannot change the soft and
++ * hard flags at once, atomically)
++ */
++ .macro paranoidexit trace=1
++ /* ebx: no swapgs flag */
++paranoid_exit\trace:
++ testl %ebx,%ebx /* swapgs needed? */
++ jnz paranoid_restore\trace
++ testl $3,CS(%rsp)
++ jnz paranoid_userspace\trace
++paranoid_swapgs\trace:
++ TRACE_IRQS_IRETQ 0
++ swapgs
++paranoid_restore\trace:
++ RESTORE_ALL 8
++ iretq
++paranoid_userspace\trace:
++ GET_THREAD_INFO(%rcx)
++ movl threadinfo_flags(%rcx),%ebx
++ andl $_TIF_WORK_MASK,%ebx
++ jz paranoid_swapgs\trace
++ movq %rsp,%rdi /* &pt_regs */
++ call sync_regs
++ movq %rax,%rsp /* switch stack for scheduling */
++ testl $_TIF_NEED_RESCHED,%ebx
++ jnz paranoid_schedule\trace
++ movl %ebx,%edx /* arg3: thread flags */
++ .if \trace
++ TRACE_IRQS_ON
++ .endif
++ sti
++ xorl %esi,%esi /* arg2: oldset */
++ movq %rsp,%rdi /* arg1: &pt_regs */
++ call do_notify_resume
++ cli
++ .if \trace
++ TRACE_IRQS_OFF
++ .endif
++ jmp paranoid_userspace\trace
++paranoid_schedule\trace:
++ .if \trace
++ TRACE_IRQS_ON
++ .endif
++ sti
++ call schedule
++ cli
++ .if \trace
++ TRACE_IRQS_OFF
++ .endif
++ jmp paranoid_userspace\trace
++ CFI_ENDPROC
++ .endm
++#endif
++
++/*
++ * Exception entry point. This expects an error code/orig_rax on the stack
++ * and the exception handler in %rax.
++ */
++ENTRY(error_entry)
++ _frame RDI
++ CFI_REL_OFFSET rax,0
++ /* rdi slot contains rax, oldrax contains error code */
++ cld
++ subq $14*8,%rsp
++ CFI_ADJUST_CFA_OFFSET (14*8)
++ movq %rsi,13*8(%rsp)
++ CFI_REL_OFFSET rsi,RSI
++ movq 14*8(%rsp),%rsi /* load rax from rdi slot */
++ CFI_REGISTER rax,rsi
++ movq %rdx,12*8(%rsp)
++ CFI_REL_OFFSET rdx,RDX
++ movq %rcx,11*8(%rsp)
++ CFI_REL_OFFSET rcx,RCX
++ movq %rsi,10*8(%rsp) /* store rax */
++ CFI_REL_OFFSET rax,RAX
++ movq %r8, 9*8(%rsp)
++ CFI_REL_OFFSET r8,R8
++ movq %r9, 8*8(%rsp)
++ CFI_REL_OFFSET r9,R9
++ movq %r10,7*8(%rsp)
++ CFI_REL_OFFSET r10,R10
++ movq %r11,6*8(%rsp)
++ CFI_REL_OFFSET r11,R11
++ movq %rbx,5*8(%rsp)
++ CFI_REL_OFFSET rbx,RBX
++ movq %rbp,4*8(%rsp)
++ CFI_REL_OFFSET rbp,RBP
++ movq %r12,3*8(%rsp)
++ CFI_REL_OFFSET r12,R12
++ movq %r13,2*8(%rsp)
++ CFI_REL_OFFSET r13,R13
++ movq %r14,1*8(%rsp)
++ CFI_REL_OFFSET r14,R14
++ movq %r15,(%rsp)
++ CFI_REL_OFFSET r15,R15
++#if 0
++ cmpl $__KERNEL_CS,CS(%rsp)
++ CFI_REMEMBER_STATE
++ je error_kernelspace
++#endif
++error_call_handler:
++ movq %rdi, RDI(%rsp)
++ CFI_REL_OFFSET rdi,RDI
++ movq %rsp,%rdi
++ movq ORIG_RAX(%rsp),%rsi # get error code
++ movq $-1,ORIG_RAX(%rsp)
++ call *%rax
++error_exit:
++ RESTORE_REST
++/* cli */
++ XEN_BLOCK_EVENTS(%rsi)
++ TRACE_IRQS_OFF
++ GET_THREAD_INFO(%rcx)
++ testb $3,CS-ARGOFFSET(%rsp)
++ jz retint_kernel
++ movl threadinfo_flags(%rcx),%edx
++ movl $_TIF_WORK_MASK,%edi
++ andl %edi,%edx
++ jnz retint_careful
++ /*
++ * The iret might restore flags:
++ */
++ TRACE_IRQS_IRETQ
++ jmp retint_restore_args
++
++#if 0
++ /*
++ * We need to re-write the logic here because we don't do iretq to
++ * to return to user mode. It's still possible that we get trap/fault
++ * in the kernel (when accessing buffers pointed to by system calls,
++ * for example).
++ *
++ */
++ CFI_RESTORE_STATE
++error_kernelspace:
++ incl %ebx
++ /* There are two places in the kernel that can potentially fault with
++ usergs. Handle them here. The exception handlers after
++ iret run with kernel gs again, so don't set the user space flag.
++ B stepping K8s sometimes report an truncated RIP for IRET
++ exceptions returning to compat mode. Check for these here too. */
++ leaq iret_label(%rip),%rbp
++ cmpq %rbp,RIP(%rsp)
++ je error_swapgs
++ movl %ebp,%ebp /* zero extend */
++ cmpq %rbp,RIP(%rsp)
++ je error_swapgs
++ cmpq $gs_change,RIP(%rsp)
++ je error_swapgs
++ jmp error_sti
++#endif
++ CFI_ENDPROC
++END(error_entry)
++
++ENTRY(hypervisor_callback)
++ zeroentry do_hypervisor_callback
++END(hypervisor_callback)
++
++/*
++ * Copied from arch/xen/i386/kernel/entry.S
++ */
++# A note on the "critical region" in our callback handler.
++# We want to avoid stacking callback handlers due to events occurring
++# during handling of the last event. To do this, we keep events disabled
++# until we've done all processing. HOWEVER, we must enable events before
++# popping the stack frame (can't be done atomically) and so it would still
++# be possible to get enough handler activations to overflow the stack.
++# Although unlikely, bugs of that kind are hard to track down, so we'd
++# like to avoid the possibility.
++# So, on entry to the handler we detect whether we interrupted an
++# existing activation in its critical region -- if so, we pop the current
++# activation and restart the handler using the previous one.
++ENTRY(do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
++ CFI_STARTPROC
++# Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
++# see the correct pointer to the pt_regs
++ movq %rdi, %rsp # we don't return, adjust the stack frame
++ CFI_ENDPROC
++ CFI_DEFAULT_STACK
++11: incl %gs:pda_irqcount
++ movq %rsp,%rbp
++ CFI_DEF_CFA_REGISTER rbp
++ cmovzq %gs:pda_irqstackptr,%rsp
++ pushq %rbp # backlink for old unwinder
++ call evtchn_do_upcall
++ popq %rsp
++ CFI_DEF_CFA_REGISTER rsp
++ decl %gs:pda_irqcount
++ jmp error_exit
++ CFI_ENDPROC
++END(do_hypervisor_callback)
++
++#ifdef CONFIG_X86_LOCAL_APIC
++KPROBE_ENTRY(nmi)
++ zeroentry do_nmi_callback
++ENTRY(do_nmi_callback)
++ CFI_STARTPROC
++ addq $8, %rsp
++ CFI_ENDPROC
++ CFI_DEFAULT_STACK
++ call do_nmi
++ orl $NMI_MASK,EFLAGS(%rsp)
++ RESTORE_REST
++ XEN_BLOCK_EVENTS(%rsi)
++ TRACE_IRQS_OFF
++ GET_THREAD_INFO(%rcx)
++ jmp retint_restore_args
++ CFI_ENDPROC
++ .previous .text
++END(nmi)
++#endif
++
++ ALIGN
++restore_all_enable_events:
++ CFI_DEFAULT_STACK adj=1
++ TRACE_IRQS_ON
++ XEN_UNBLOCK_EVENTS(%rsi) # %rsi is already set up...
++
++scrit: /**** START OF CRITICAL REGION ****/
++ XEN_TEST_PENDING(%rsi)
++ CFI_REMEMBER_STATE
++ jnz 14f # process more events if necessary...
++ XEN_PUT_VCPU_INFO(%rsi)
++ RESTORE_ARGS 0,8,0
++ HYPERVISOR_IRET 0
++
++ CFI_RESTORE_STATE
++14: XEN_LOCKED_BLOCK_EVENTS(%rsi)
++ XEN_PUT_VCPU_INFO(%rsi)
++ SAVE_REST
++ movq %rsp,%rdi # set the argument again
++ jmp 11b
++ CFI_ENDPROC
++ecrit: /**** END OF CRITICAL REGION ****/
++# At this point, unlike on x86-32, we don't do the fixup to simplify the
++# code and the stack frame is more complex on x86-64.
++# When the kernel is interrupted in the critical section, the kernel
++# will do IRET in that case, and everything will be restored at that point,
++# i.e. it just resumes from the next instruction interrupted with the same context.
++
++# Hypervisor uses this for application faults while it executes.
++# We get here for two reasons:
++# 1. Fault while reloading DS, ES, FS or GS
++# 2. Fault while executing IRET
++# Category 1 we do not need to fix up as Xen has already reloaded all segment
++# registers that could be reloaded and zeroed the others.
++# Category 2 we fix up by killing the current process. We cannot use the
++# normal Linux return path in this case because if we use the IRET hypercall
++# to pop the stack frame we end up in an infinite loop of failsafe callbacks.
++# We distinguish between categories by comparing each saved segment register
++# with its current contents: any discrepancy means we in category 1.
++ENTRY(failsafe_callback)
++ _frame (RIP-0x30)
++ CFI_REL_OFFSET rcx, 0
++ CFI_REL_OFFSET r11, 8
++ movw %ds,%cx
++ cmpw %cx,0x10(%rsp)
++ CFI_REMEMBER_STATE
++ jne 1f
++ movw %es,%cx
++ cmpw %cx,0x18(%rsp)
++ jne 1f
++ movw %fs,%cx
++ cmpw %cx,0x20(%rsp)
++ jne 1f
++ movw %gs,%cx
++ cmpw %cx,0x28(%rsp)
++ jne 1f
++ /* All segments match their saved values => Category 2 (Bad IRET). */
++ movq (%rsp),%rcx
++ CFI_RESTORE rcx
++ movq 8(%rsp),%r11
++ CFI_RESTORE r11
++ addq $0x30,%rsp
++ CFI_ADJUST_CFA_OFFSET -0x30
++ movq $11,%rdi /* SIGSEGV */
++ jmp do_exit
++ CFI_RESTORE_STATE
++1: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */
++ movq (%rsp),%rcx
++ CFI_RESTORE rcx
++ movq 8(%rsp),%r11
++ CFI_RESTORE r11
++ addq $0x30,%rsp
++ CFI_ADJUST_CFA_OFFSET -0x30
++ pushq $0
++ CFI_ADJUST_CFA_OFFSET 8
++ SAVE_ALL
++ jmp error_exit
++ CFI_ENDPROC
++#if 0
++ .section __ex_table,"a"
++ .align 8
++ .quad gs_change,bad_gs
++ .previous
++ .section .fixup,"ax"
++ /* running with kernelgs */
++bad_gs:
++/* swapgs */ /* switch back to user gs */
++ xorl %eax,%eax
++ movl %eax,%gs
++ jmp 2b
++ .previous
++#endif
++
++/*
++ * Create a kernel thread.
++ *
++ * C extern interface:
++ * extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
++ *
++ * asm input arguments:
++ * rdi: fn, rsi: arg, rdx: flags
++ */
++ENTRY(kernel_thread)
++ CFI_STARTPROC
++ FAKE_STACK_FRAME $child_rip
++ SAVE_ALL
++
++ # rdi: flags, rsi: usp, rdx: will be &pt_regs
++ movq %rdx,%rdi
++ orq kernel_thread_flags(%rip),%rdi
++ movq $-1, %rsi
++ movq %rsp, %rdx
++
++ xorl %r8d,%r8d
++ xorl %r9d,%r9d
++
++ # clone now
++ call do_fork
++ movq %rax,RAX(%rsp)
++ xorl %edi,%edi
++
++ /*
++ * It isn't worth to check for reschedule here,
++ * so internally to the x86_64 port you can rely on kernel_thread()
++ * not to reschedule the child before returning, this avoids the need
++ * of hacks for example to fork off the per-CPU idle tasks.
++ * [Hopefully no generic code relies on the reschedule -AK]
++ */
++ RESTORE_ALL
++ UNFAKE_STACK_FRAME
++ ret
++ CFI_ENDPROC
++ENDPROC(kernel_thread)
++
++child_rip:
++ pushq $0 # fake return address
++ CFI_STARTPROC
++ /*
++ * Here we are in the child and the registers are set as they were
++ * at kernel_thread() invocation in the parent.
++ */
++ movq %rdi, %rax
++ movq %rsi, %rdi
++ call *%rax
++ # exit
++ xorl %edi, %edi
++ call do_exit
++ CFI_ENDPROC
++ENDPROC(child_rip)
++
++/*
++ * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
++ *
++ * C extern interface:
++ * extern long execve(char *name, char **argv, char **envp)
++ *
++ * asm input arguments:
++ * rdi: name, rsi: argv, rdx: envp
++ *
++ * We want to fallback into:
++ * extern long sys_execve(char *name, char **argv,char **envp, struct pt_regs regs)
++ *
++ * do_sys_execve asm fallback arguments:
++ * rdi: name, rsi: argv, rdx: envp, fake frame on the stack
++ */
++ENTRY(execve)
++ CFI_STARTPROC
++ FAKE_STACK_FRAME $0
++ SAVE_ALL
++ call sys_execve
++ movq %rax, RAX(%rsp)
++ RESTORE_REST
++ testq %rax,%rax
++ jne 1f
++ jmp int_ret_from_sys_call
++1: RESTORE_ARGS
++ UNFAKE_STACK_FRAME
++ ret
++ CFI_ENDPROC
++ENDPROC(execve)
++
++KPROBE_ENTRY(page_fault)
++ errorentry do_page_fault
++END(page_fault)
++ .previous .text
++
++ENTRY(coprocessor_error)
++ zeroentry do_coprocessor_error
++END(coprocessor_error)
++
++ENTRY(simd_coprocessor_error)
++ zeroentry do_simd_coprocessor_error
++END(simd_coprocessor_error)
++
++ENTRY(device_not_available)
++ zeroentry math_state_restore
++END(device_not_available)
++
++ /* runs on exception stack */
++KPROBE_ENTRY(debug)
++/* INTR_FRAME
++ pushq $0
++ CFI_ADJUST_CFA_OFFSET 8 */
++ zeroentry do_debug
++/* paranoidexit
++ CFI_ENDPROC */
++END(debug)
++ .previous .text
++
++#if 0
++ /* runs on exception stack */
++KPROBE_ENTRY(nmi)
++ INTR_FRAME
++ pushq $-1
++ CFI_ADJUST_CFA_OFFSET 8
++ paranoidentry do_nmi, 0, 0
++#ifdef CONFIG_TRACE_IRQFLAGS
++ paranoidexit 0
++#else
++ jmp paranoid_exit1
++ CFI_ENDPROC
++#endif
++END(nmi)
++ .previous .text
++#endif
++
++KPROBE_ENTRY(int3)
++/* INTR_FRAME
++ pushq $0
++ CFI_ADJUST_CFA_OFFSET 8 */
++ zeroentry do_int3
++/* jmp paranoid_exit1
++ CFI_ENDPROC */
++END(int3)
++ .previous .text
++
++ENTRY(overflow)
++ zeroentry do_overflow
++END(overflow)
++
++ENTRY(bounds)
++ zeroentry do_bounds
++END(bounds)
++
++ENTRY(invalid_op)
++ zeroentry do_invalid_op
++END(invalid_op)
++
++ENTRY(coprocessor_segment_overrun)
++ zeroentry do_coprocessor_segment_overrun
++END(coprocessor_segment_overrun)
++
++ENTRY(reserved)
++ zeroentry do_reserved
++END(reserved)
++
++#if 0
++ /* runs on exception stack */
++ENTRY(double_fault)
++ XCPT_FRAME
++ paranoidentry do_double_fault
++ jmp paranoid_exit1
++ CFI_ENDPROC
++END(double_fault)
++#endif
++
++ENTRY(invalid_TSS)
++ errorentry do_invalid_TSS
++END(invalid_TSS)
++
++ENTRY(segment_not_present)
++ errorentry do_segment_not_present
++END(segment_not_present)
++
++ /* runs on exception stack */
++ENTRY(stack_segment)
++/* XCPT_FRAME
++ paranoidentry do_stack_segment */
++ errorentry do_stack_segment
++/* jmp paranoid_exit1
++ CFI_ENDPROC */
++END(stack_segment)
++
++KPROBE_ENTRY(general_protection)
++ errorentry do_general_protection
++END(general_protection)
++ .previous .text
++
++ENTRY(alignment_check)
++ errorentry do_alignment_check
++END(alignment_check)
++
++ENTRY(divide_error)
++ zeroentry do_divide_error
++END(divide_error)
++
++ENTRY(spurious_interrupt_bug)
++ zeroentry do_spurious_interrupt_bug
++END(spurious_interrupt_bug)
++
++#ifdef CONFIG_X86_MCE
++ /* runs on exception stack */
++ENTRY(machine_check)
++ INTR_FRAME
++ pushq $0
++ CFI_ADJUST_CFA_OFFSET 8
++ paranoidentry do_machine_check
++ jmp paranoid_exit1
++ CFI_ENDPROC
++END(machine_check)
++#endif
++
++/* Call softirq on interrupt stack. Interrupts are off. */
++ENTRY(call_softirq)
++ CFI_STARTPROC
++ push %rbp
++ CFI_ADJUST_CFA_OFFSET 8
++ CFI_REL_OFFSET rbp,0
++ mov %rsp,%rbp
++ CFI_DEF_CFA_REGISTER rbp
++ incl %gs:pda_irqcount
++ cmove %gs:pda_irqstackptr,%rsp
++ push %rbp # backlink for old unwinder
++ call __do_softirq
++ leaveq
++ CFI_DEF_CFA_REGISTER rsp
++ CFI_ADJUST_CFA_OFFSET -8
++ decl %gs:pda_irqcount
++ ret
++ CFI_ENDPROC
++ENDPROC(call_softirq)
++
++#ifdef CONFIG_STACK_UNWIND
++ENTRY(arch_unwind_init_running)
++ CFI_STARTPROC
++ movq %r15, R15(%rdi)
++ movq %r14, R14(%rdi)
++ xchgq %rsi, %rdx
++ movq %r13, R13(%rdi)
++ movq %r12, R12(%rdi)
++ xorl %eax, %eax
++ movq %rbp, RBP(%rdi)
++ movq %rbx, RBX(%rdi)
++ movq (%rsp), %rcx
++ movq %rax, R11(%rdi)
++ movq %rax, R10(%rdi)
++ movq %rax, R9(%rdi)
++ movq %rax, R8(%rdi)
++ movq %rax, RAX(%rdi)
++ movq %rax, RCX(%rdi)
++ movq %rax, RDX(%rdi)
++ movq %rax, RSI(%rdi)
++ movq %rax, RDI(%rdi)
++ movq %rax, ORIG_RAX(%rdi)
++ movq %rcx, RIP(%rdi)
++ leaq 8(%rsp), %rcx
++ movq $__KERNEL_CS, CS(%rdi)
++ movq %rax, EFLAGS(%rdi)
++ movq %rcx, RSP(%rdi)
++ movq $__KERNEL_DS, SS(%rdi)
++ jmpq *%rdx
++ CFI_ENDPROC
++ENDPROC(arch_unwind_init_running)
++#endif
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/arch/x86_64/kernel/genapic-xen.c 2007-08-27 14:02:01.000000000 -0400
+@@ -0,0 +1,143 @@
++/*
++ * Copyright 2004 James Cleverdon, IBM.
++ * Subject to the GNU Public License, v.2
++ *
++ * Generic APIC sub-arch probe layer.
++ *
++ * Hacked for x86-64 by James Cleverdon from i386 architecture code by
++ * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
++ * James Cleverdon.
++ */
++#include <linux/threads.h>
++#include <linux/cpumask.h>
++#include <linux/string.h>
++#include <linux/kernel.h>
++#include <linux/ctype.h>
++#include <linux/init.h>
++#include <linux/module.h>
++
++#include <asm/smp.h>
++#include <asm/ipi.h>
++
++#if defined(CONFIG_ACPI)
++#include <acpi/acpi_bus.h>
++#endif
++
++/* which logical CPU number maps to which CPU (physical APIC ID) */
++u8 x86_cpu_to_apicid[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = BAD_APICID };
++EXPORT_SYMBOL(x86_cpu_to_apicid);
++u8 x86_cpu_to_log_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
++
++extern struct genapic apic_cluster;
++extern struct genapic apic_flat;
++extern struct genapic apic_physflat;
++
++#ifndef CONFIG_XEN
++struct genapic *genapic = &apic_flat;
++#else
++extern struct genapic apic_xen;
++struct genapic *genapic = &apic_xen;
++#endif
++
++
++/*
++ * Check the APIC IDs in bios_cpu_apicid and choose the APIC mode.
++ */
++void __init clustered_apic_check(void)
++{
++#ifndef CONFIG_XEN
++ long i;
++ u8 clusters, max_cluster;
++ u8 id;
++ u8 cluster_cnt[NUM_APIC_CLUSTERS];
++ int max_apic = 0;
++
++#if defined(CONFIG_ACPI)
++ /*
++ * Some x86_64 machines use physical APIC mode regardless of how many
++ * procs/clusters are present (x86_64 ES7000 is an example).
++ */
++ if (acpi_fadt.revision > FADT2_REVISION_ID)
++ if (acpi_fadt.force_apic_physical_destination_mode) {
++ genapic = &apic_cluster;
++ goto print;
++ }
++#endif
++
++ memset(cluster_cnt, 0, sizeof(cluster_cnt));
++ for (i = 0; i < NR_CPUS; i++) {
++ id = bios_cpu_apicid[i];
++ if (id == BAD_APICID)
++ continue;
++ if (id > max_apic)
++ max_apic = id;
++ cluster_cnt[APIC_CLUSTERID(id)]++;
++ }
++
++ /* Don't use clustered mode on AMD platforms. */
++ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
++ genapic = &apic_physflat;
++#ifndef CONFIG_HOTPLUG_CPU
++ /* In the CPU hotplug case we cannot use broadcast mode
++ because that opens a race when a CPU is removed.
++ Stay at physflat mode in this case.
++ It is bad to do this unconditionally though. Once
++ we have ACPI platform support for CPU hotplug
++ we should detect hotplug capablity from ACPI tables and
++ only do this when really needed. -AK */
++ if (max_apic <= 8)
++ genapic = &apic_flat;
++#endif
++ goto print;
++ }
++
++ clusters = 0;
++ max_cluster = 0;
++
++ for (i = 0; i < NUM_APIC_CLUSTERS; i++) {
++ if (cluster_cnt[i] > 0) {
++ ++clusters;
++ if (cluster_cnt[i] > max_cluster)
++ max_cluster = cluster_cnt[i];
++ }
++ }
++
++ /*
++ * If we have clusters <= 1 and CPUs <= 8 in cluster 0, then flat mode,
++ * else if max_cluster <= 4 and cluster_cnt[15] == 0, clustered logical
++ * else physical mode.
++ * (We don't use lowest priority delivery + HW APIC IRQ steering, so
++ * can ignore the clustered logical case and go straight to physical.)
++ */
++ if (clusters <= 1 && max_cluster <= 8 && cluster_cnt[0] == max_cluster) {
++#ifdef CONFIG_HOTPLUG_CPU
++ /* Don't use APIC shortcuts in CPU hotplug to avoid races */
++ genapic = &apic_physflat;
++#else
++ genapic = &apic_flat;
++#endif
++ } else
++ genapic = &apic_cluster;
++
++print:
++#else
++ /* hardcode to xen apic functions */
++ genapic = &apic_xen;
++#endif
++ printk(KERN_INFO "Setting APIC routing to %s\n", genapic->name);
++}
++
++/* Same for both flat and clustered. */
++
++#ifdef CONFIG_XEN
++extern void xen_send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int dest);
++#endif
++
++void send_IPI_self(int vector)
++{
++#ifndef CONFIG_XEN
++ __send_IPI_shortcut(APIC_DEST_SELF, vector, APIC_DEST_PHYSICAL);
++#else
++ xen_send_IPI_shortcut(APIC_DEST_SELF, vector, APIC_DEST_PHYSICAL);
++#endif
++}
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/arch/x86_64/kernel/genapic_xen.c 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,161 @@
++/*
++ * Copyright 2004 James Cleverdon, IBM.
++ * Subject to the GNU Public License, v.2
++ *
++ * Xen APIC subarch code. Maximum 8 CPUs, logical delivery.
++ *
++ * Hacked for x86-64 by James Cleverdon from i386 architecture code by
++ * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
++ * James Cleverdon.
++ *
++ * Hacked to pieces for Xen by Chris Wright.
++ */
++#include <linux/threads.h>
++#include <linux/cpumask.h>
++#include <linux/string.h>
++#include <linux/kernel.h>
++#include <linux/ctype.h>
++#include <linux/init.h>
++#ifdef CONFIG_XEN_PRIVILEGED_GUEST
++#include <asm/smp.h>
++#include <asm/ipi.h>
++#else
++#include <asm/apic.h>
++#include <asm/apicdef.h>
++#include <asm/genapic.h>
++#endif
++#include <xen/evtchn.h>
++
++DECLARE_PER_CPU(int, ipi_to_irq[NR_IPIS]);
++
++static inline void __send_IPI_one(unsigned int cpu, int vector)
++{
++ int irq = per_cpu(ipi_to_irq, cpu)[vector];
++ BUG_ON(irq < 0);
++ notify_remote_via_irq(irq);
++}
++
++void xen_send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int dest)
++{
++ int cpu;
++
++ switch (shortcut) {
++ case APIC_DEST_SELF:
++ __send_IPI_one(smp_processor_id(), vector);
++ break;
++ case APIC_DEST_ALLBUT:
++ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
++ if (cpu == smp_processor_id())
++ continue;
++ if (cpu_isset(cpu, cpu_online_map)) {
++ __send_IPI_one(cpu, vector);
++ }
++ }
++ break;
++ case APIC_DEST_ALLINC:
++ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
++ if (cpu_isset(cpu, cpu_online_map)) {
++ __send_IPI_one(cpu, vector);
++ }
++ }
++ break;
++ default:
++ printk("XXXXXX __send_IPI_shortcut %08x vector %d\n", shortcut,
++ vector);
++ break;
++ }
++}
++
++static cpumask_t xen_target_cpus(void)
++{
++ return cpu_online_map;
++}
++
++/*
++ * Set up the logical destination ID.
++ * Do nothing, not called now.
++ */
++static void xen_init_apic_ldr(void)
++{
++ Dprintk("%s\n", __FUNCTION__);
++ return;
++}
++
++static void xen_send_IPI_allbutself(int vector)
++{
++ /*
++ * if there are no other CPUs in the system then
++ * we get an APIC send error if we try to broadcast.
++ * thus we have to avoid sending IPIs in this case.
++ */
++ Dprintk("%s\n", __FUNCTION__);
++ if (num_online_cpus() > 1)
++ xen_send_IPI_shortcut(APIC_DEST_ALLBUT, vector, APIC_DEST_LOGICAL);
++}
++
++static void xen_send_IPI_all(int vector)
++{
++ Dprintk("%s\n", __FUNCTION__);
++ xen_send_IPI_shortcut(APIC_DEST_ALLINC, vector, APIC_DEST_LOGICAL);
++}
++
++static void xen_send_IPI_mask(cpumask_t cpumask, int vector)
++{
++ unsigned long mask = cpus_addr(cpumask)[0];
++ unsigned int cpu;
++ unsigned long flags;
++
++ Dprintk("%s\n", __FUNCTION__);
++ local_irq_save(flags);
++ WARN_ON(mask & ~cpus_addr(cpu_online_map)[0]);
++
++ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
++ if (cpu_isset(cpu, cpumask)) {
++ __send_IPI_one(cpu, vector);
++ }
++ }
++ local_irq_restore(flags);
++}
++
++#ifdef CONFIG_XEN_PRIVILEGED_GUEST
++static int xen_apic_id_registered(void)
++{
++ /* better be set */
++ Dprintk("%s\n", __FUNCTION__);
++ return physid_isset(smp_processor_id(), phys_cpu_present_map);
++}
++#endif
++
++static unsigned int xen_cpu_mask_to_apicid(cpumask_t cpumask)
++{
++ Dprintk("%s\n", __FUNCTION__);
++ return cpus_addr(cpumask)[0] & APIC_ALL_CPUS;
++}
++
++static unsigned int phys_pkg_id(int index_msb)
++{
++ u32 ebx;
++
++ Dprintk("%s\n", __FUNCTION__);
++ ebx = cpuid_ebx(1);
++ return ((ebx >> 24) & 0xFF) >> index_msb;
++}
++
++struct genapic apic_xen = {
++ .name = "xen",
++#ifdef CONFIG_XEN_PRIVILEGED_GUEST
++ .int_delivery_mode = dest_LowestPrio,
++#endif
++ .int_dest_mode = (APIC_DEST_LOGICAL != 0),
++ .int_delivery_dest = APIC_DEST_LOGICAL | APIC_DM_LOWEST,
++ .target_cpus = xen_target_cpus,
++#ifdef CONFIG_XEN_PRIVILEGED_GUEST
++ .apic_id_registered = xen_apic_id_registered,
++#endif
++ .init_apic_ldr = xen_init_apic_ldr,
++ .send_IPI_all = xen_send_IPI_all,
++ .send_IPI_allbutself = xen_send_IPI_allbutself,
++ .send_IPI_mask = xen_send_IPI_mask,
++ .cpu_mask_to_apicid = xen_cpu_mask_to_apicid,
++ .phys_pkg_id = phys_pkg_id,
++};
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/arch/x86_64/kernel/head-xen.S 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,203 @@
++/*
++ * linux/arch/x86_64/kernel/head.S -- start in 32bit and switch to 64bit
++ *
++ * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
++ * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
++ * Copyright (C) 2000 Karsten Keil <kkeil@suse.de>
++ * Copyright (C) 2001,2002 Andi Kleen <ak@suse.de>
++ *
++ * $Id: head.S,v 1.49 2002/03/19 17:39:25 ak Exp $
++ *
++ * Jun Nakajima <jun.nakajima@intel.com>
++ * Modified for Xen
++ */
++
++
++#include <linux/linkage.h>
++#include <linux/threads.h>
++#include <linux/init.h>
++#include <linux/elfnote.h>
++#include <asm/desc.h>
++#include <asm/segment.h>
++#include <asm/page.h>
++#include <asm/msr.h>
++#include <asm/cache.h>
++#include <asm/dwarf2.h>
++#include <xen/interface/elfnote.h>
++
++ .section .bootstrap.text, "ax", @progbits
++ .code64
++#define VIRT_ENTRY_OFFSET 0x0
++.org VIRT_ENTRY_OFFSET
++ .globl startup_64
++startup_64:
++ENTRY(_start)
++ movq $(init_thread_union+THREAD_SIZE-8),%rsp
++
++ /* rsi is pointer to startup info structure.
++ pass it to C */
++ movq %rsi,%rdi
++ pushq $0 # fake return address
++ jmp x86_64_start_kernel
++
++ENTRY(stext)
++ENTRY(_stext)
++
++ $page = 0
++#define NEXT_PAGE(name) \
++ $page = $page + 1; \
++ .org $page * 0x1000; \
++ phys_##name = $page * 0x1000 + __PHYSICAL_START; \
++ENTRY(name)
++
++NEXT_PAGE(init_level4_pgt)
++ /* This gets initialized in x86_64_start_kernel */
++ .fill 512,8,0
++
++ /*
++ * We update two pgd entries to make kernel and user pgd consistent
++ * at pgd_populate(). It can be used for kernel modules. So we place
++ * this page here for those cases to avoid memory corruption.
++ * We also use this page to establish the initiali mapping for
++ * vsyscall area.
++ */
++NEXT_PAGE(init_level4_user_pgt)
++ .fill 512,8,0
++
++NEXT_PAGE(level3_kernel_pgt)
++ .fill 512,8,0
++
++ /*
++ * This is used for vsyscall area mapping as we have a different
++ * level4 page table for user.
++ */
++NEXT_PAGE(level3_user_pgt)
++ .fill 512,8,0
++
++NEXT_PAGE(level2_kernel_pgt)
++ .fill 512,8,0
++
++NEXT_PAGE(hypercall_page)
++ CFI_STARTPROC
++ .rept 0x1000 / 0x20
++ .skip 1 /* push %rcx */
++ CFI_ADJUST_CFA_OFFSET 8
++ CFI_REL_OFFSET rcx,0
++ .skip 2 /* push %r11 */
++ CFI_ADJUST_CFA_OFFSET 8
++ CFI_REL_OFFSET rcx,0
++ .skip 5 /* mov $#,%eax */
++ .skip 2 /* syscall */
++ .skip 2 /* pop %r11 */
++ CFI_ADJUST_CFA_OFFSET -8
++ CFI_RESTORE r11
++ .skip 1 /* pop %rcx */
++ CFI_ADJUST_CFA_OFFSET -8
++ CFI_RESTORE rcx
++ .align 0x20,0 /* ret */
++ .endr
++ CFI_ENDPROC
++
++#undef NEXT_PAGE
++
++ .data
++
++ .align 16
++ .globl cpu_gdt_descr
++cpu_gdt_descr:
++ .word gdt_end-cpu_gdt_table-1
++gdt:
++ .quad cpu_gdt_table
++#ifdef CONFIG_SMP
++ .rept NR_CPUS-1
++ .word 0
++ .quad 0
++ .endr
++#endif
++
++/* We need valid kernel segments for data and code in long mode too
++ * IRET will check the segment types kkeil 2000/10/28
++ * Also sysret mandates a special GDT layout
++ */
++
++ .section .data.page_aligned, "aw"
++ .align PAGE_SIZE
++
++/* The TLS descriptors are currently at a different place compared to i386.
++ Hopefully nobody expects them at a fixed place (Wine?) */
++
++ENTRY(cpu_gdt_table)
++ .quad 0x0000000000000000 /* NULL descriptor */
++ .quad 0x0 /* unused */
++ .quad 0x00af9a000000ffff /* __KERNEL_CS */
++ .quad 0x00cf92000000ffff /* __KERNEL_DS */
++ .quad 0x00cffa000000ffff /* __USER32_CS */
++ .quad 0x00cff2000000ffff /* __USER_DS, __USER32_DS */
++ .quad 0x00affa000000ffff /* __USER_CS */
++ .quad 0x00cf9a000000ffff /* __KERNEL32_CS */
++ .quad 0,0 /* TSS */
++ .quad 0,0 /* LDT */
++ .quad 0,0,0 /* three TLS descriptors */
++ .quad 0 /* unused */
++gdt_end:
++ /* asm/segment.h:GDT_ENTRIES must match this */
++ /* This should be a multiple of the cache line size */
++ /* GDTs of other CPUs are now dynamically allocated */
++
++ /* zero the remaining page */
++ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
++
++ .section .bss.page_aligned, "aw", @nobits
++ .align PAGE_SIZE
++ENTRY(empty_zero_page)
++ .skip PAGE_SIZE
++
++#if CONFIG_XEN_COMPAT <= 0x030002
++/*
++ * __xen_guest information
++ */
++.macro utoh value
++ .if (\value) < 0 || (\value) >= 0x10
++ utoh (((\value)>>4)&0x0fffffffffffffff)
++ .endif
++ .if ((\value) & 0xf) < 10
++ .byte '0' + ((\value) & 0xf)
++ .else
++ .byte 'A' + ((\value) & 0xf) - 10
++ .endif
++.endm
++
++.section __xen_guest
++ .ascii "GUEST_OS=linux,GUEST_VER=2.6"
++ .ascii ",XEN_VER=xen-3.0"
++ .ascii ",VIRT_BASE=0x"
++ utoh __START_KERNEL_map
++ .ascii ",ELF_PADDR_OFFSET=0x"
++ utoh __START_KERNEL_map
++ .ascii ",VIRT_ENTRY=0x"
++ utoh (__START_KERNEL_map + __PHYSICAL_START + VIRT_ENTRY_OFFSET)
++ .ascii ",HYPERCALL_PAGE=0x"
++ utoh (phys_hypercall_page >> PAGE_SHIFT)
++ .ascii ",FEATURES=writable_page_tables"
++ .ascii "|writable_descriptor_tables"
++ .ascii "|auto_translated_physmap"
++ .ascii "|supervisor_mode_kernel"
++ .ascii ",LOADER=generic"
++ .byte 0
++#endif /* CONFIG_XEN_COMPAT <= 0x030002 */
++
++ ELFNOTE(Xen, XEN_ELFNOTE_GUEST_OS, .asciz, "linux")
++ ELFNOTE(Xen, XEN_ELFNOTE_GUEST_VERSION, .asciz, "2.6")
++ ELFNOTE(Xen, XEN_ELFNOTE_XEN_VERSION, .asciz, "xen-3.0")
++ ELFNOTE(Xen, XEN_ELFNOTE_VIRT_BASE, .quad, __START_KERNEL_map)
++#if CONFIG_XEN_COMPAT <= 0x030002
++ ELFNOTE(Xen, XEN_ELFNOTE_PADDR_OFFSET, .quad, __START_KERNEL_map)
++#else
++ ELFNOTE(Xen, XEN_ELFNOTE_PADDR_OFFSET, .quad, 0)
++#endif
++ ELFNOTE(Xen, XEN_ELFNOTE_ENTRY, .quad, startup_64)
++ ELFNOTE(Xen, XEN_ELFNOTE_HYPERCALL_PAGE, .quad, hypercall_page)
++ ELFNOTE(Xen, XEN_ELFNOTE_L1_MFN_VALID, .quad, _PAGE_PRESENT,_PAGE_PRESENT)
++ ELFNOTE(Xen, XEN_ELFNOTE_FEATURES, .asciz, "writable_page_tables|writable_descriptor_tables|auto_translated_physmap|pae_pgdir_above_4gb|supervisor_mode_kernel")
++ ELFNOTE(Xen, XEN_ELFNOTE_LOADER, .asciz, "generic")
++ ELFNOTE(Xen, XEN_ELFNOTE_SUSPEND_CANCEL, .long, 1)
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/arch/x86_64/kernel/head64-xen.c 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,162 @@
++/*
++ * linux/arch/x86_64/kernel/head64.c -- prepare to run common code
++ *
++ * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
++ *
++ * Jun Nakajima <jun.nakajima@intel.com>
++ * Modified for Xen.
++ */
++
++#include <linux/init.h>
++#include <linux/linkage.h>
++#include <linux/types.h>
++#include <linux/kernel.h>
++#include <linux/string.h>
++#include <linux/percpu.h>
++#include <linux/module.h>
++
++#include <asm/processor.h>
++#include <asm/proto.h>
++#include <asm/smp.h>
++#include <asm/bootsetup.h>
++#include <asm/setup.h>
++#include <asm/desc.h>
++#include <asm/pgtable.h>
++#include <asm/sections.h>
++
++unsigned long start_pfn;
++
++/* Don't add a printk in there. printk relies on the PDA which is not initialized
++ yet. */
++#if 0
++static void __init clear_bss(void)
++{
++ memset(__bss_start, 0,
++ (unsigned long) __bss_stop - (unsigned long) __bss_start);
++}
++#endif
++
++#define NEW_CL_POINTER 0x228 /* Relative to real mode data */
++#define OLD_CL_MAGIC_ADDR 0x90020
++#define OLD_CL_MAGIC 0xA33F
++#define OLD_CL_BASE_ADDR 0x90000
++#define OLD_CL_OFFSET 0x90022
++
++extern char saved_command_line[];
++
++static void __init copy_bootdata(char *real_mode_data)
++{
++#ifndef CONFIG_XEN
++ int new_data;
++ char * command_line;
++
++ memcpy(x86_boot_params, real_mode_data, BOOT_PARAM_SIZE);
++ new_data = *(int *) (x86_boot_params + NEW_CL_POINTER);
++ if (!new_data) {
++ if (OLD_CL_MAGIC != * (u16 *) OLD_CL_MAGIC_ADDR) {
++ printk("so old bootloader that it does not support commandline?!\n");
++ return;
++ }
++ new_data = OLD_CL_BASE_ADDR + * (u16 *) OLD_CL_OFFSET;
++ printk("old bootloader convention, maybe loadlin?\n");
++ }
++ command_line = (char *) ((u64)(new_data));
++ memcpy(saved_command_line, command_line, COMMAND_LINE_SIZE);
++#else
++ int max_cmdline;
++
++ if ((max_cmdline = MAX_GUEST_CMDLINE) > COMMAND_LINE_SIZE)
++ max_cmdline = COMMAND_LINE_SIZE;
++ memcpy(saved_command_line, xen_start_info->cmd_line, max_cmdline);
++ saved_command_line[max_cmdline-1] = '\0';
++#endif
++ printk("Bootdata ok (command line is %s)\n", saved_command_line);
++}
++
++static void __init setup_boot_cpu_data(void)
++{
++ unsigned int dummy, eax;
++
++ /* get vendor info */
++ cpuid(0, (unsigned int *)&boot_cpu_data.cpuid_level,
++ (unsigned int *)&boot_cpu_data.x86_vendor_id[0],
++ (unsigned int *)&boot_cpu_data.x86_vendor_id[8],
++ (unsigned int *)&boot_cpu_data.x86_vendor_id[4]);
++
++ /* get cpu type */
++ cpuid(1, &eax, &dummy, &dummy,
++ (unsigned int *) &boot_cpu_data.x86_capability);
++ boot_cpu_data.x86 = (eax >> 8) & 0xf;
++ boot_cpu_data.x86_model = (eax >> 4) & 0xf;
++ boot_cpu_data.x86_mask = eax & 0xf;
++}
++
++#include <xen/interface/memory.h>
++unsigned long *machine_to_phys_mapping;
++EXPORT_SYMBOL(machine_to_phys_mapping);
++unsigned int machine_to_phys_order;
++EXPORT_SYMBOL(machine_to_phys_order);
++
++void __init x86_64_start_kernel(char * real_mode_data)
++{
++ struct xen_machphys_mapping mapping;
++ unsigned long machine_to_phys_nr_ents;
++ char *s;
++ int i;
++
++ setup_xen_features();
++
++ xen_start_info = (struct start_info *)real_mode_data;
++ if (!xen_feature(XENFEAT_auto_translated_physmap))
++ phys_to_machine_mapping =
++ (unsigned long *)xen_start_info->mfn_list;
++ start_pfn = (__pa(xen_start_info->pt_base) >> PAGE_SHIFT) +
++ xen_start_info->nr_pt_frames;
++
++ machine_to_phys_mapping = (unsigned long *)MACH2PHYS_VIRT_START;
++ machine_to_phys_nr_ents = MACH2PHYS_NR_ENTRIES;
++ if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) {
++ machine_to_phys_mapping = (unsigned long *)mapping.v_start;
++ machine_to_phys_nr_ents = mapping.max_mfn + 1;
++ }
++ while ((1UL << machine_to_phys_order) < machine_to_phys_nr_ents )
++ machine_to_phys_order++;
++
++#if 0
++ for (i = 0; i < 256; i++)
++ set_intr_gate(i, early_idt_handler);
++ asm volatile("lidt %0" :: "m" (idt_descr));
++#endif
++
++ /*
++ * This must be called really, really early:
++ */
++ lockdep_init();
++
++ for (i = 0; i < NR_CPUS; i++)
++ cpu_pda(i) = &boot_cpu_pda[i];
++
++ pda_init(0);
++ copy_bootdata(real_mode_data);
++#ifdef CONFIG_SMP
++ cpu_set(0, cpu_online_map);
++#endif
++ s = strstr(saved_command_line, "earlyprintk=");
++ if (s != NULL)
++ setup_early_printk(strchr(s, '=') + 1);
++#ifdef CONFIG_NUMA
++ s = strstr(saved_command_line, "numa=");
++ if (s != NULL)
++ numa_setup(s+5);
++#endif
++#ifdef CONFIG_X86_IO_APIC
++ if (strstr(saved_command_line, "disableapic"))
++ disable_apic = 1;
++#endif
++ /* You need early console to see that */
++ if (__pa_symbol(&_end) >= KERNEL_TEXT_SIZE)
++ panic("Kernel too big for kernel mapping\n");
++
++ setup_boot_cpu_data();
++ start_kernel();
++}
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/arch/x86_64/kernel/io_apic-xen.c 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,2269 @@
++/*
++ * Intel IO-APIC support for multi-Pentium hosts.
++ *
++ * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar, Hajnalka Szabo
++ *
++ * Many thanks to Stig Venaas for trying out countless experimental
++ * patches and reporting/debugging problems patiently!
++ *
++ * (c) 1999, Multiple IO-APIC support, developed by
++ * Ken-ichi Yaku <yaku@css1.kbnes.nec.co.jp> and
++ * Hidemi Kishimoto <kisimoto@css1.kbnes.nec.co.jp>,
++ * further tested and cleaned up by Zach Brown <zab@redhat.com>
++ * and Ingo Molnar <mingo@redhat.com>
++ *
++ * Fixes
++ * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
++ * thanks to Eric Gilmore
++ * and Rolf G. Tews
++ * for testing these extensively
++ * Paul Diefenbaugh : Added full ACPI support
++ */
++
++#include <linux/mm.h>
++#include <linux/interrupt.h>
++#include <linux/init.h>
++#include <linux/delay.h>
++#include <linux/sched.h>
++#include <linux/smp_lock.h>
++#include <linux/mc146818rtc.h>
++#include <linux/acpi.h>
++#include <linux/sysdev.h>
++#ifdef CONFIG_ACPI
++#include <acpi/acpi_bus.h>
++#endif
++
++#include <asm/io.h>
++#include <asm/smp.h>
++#include <asm/desc.h>
++#include <asm/proto.h>
++#include <asm/mach_apic.h>
++#include <asm/acpi.h>
++#include <asm/dma.h>
++#include <asm/nmi.h>
++
++#define __apicdebuginit __init
++
++int sis_apic_bug; /* not actually supported, dummy for compile */
++
++static int no_timer_check;
++
++int disable_timer_pin_1 __initdata;
++
++#ifndef CONFIG_XEN
++int timer_over_8254 __initdata = 0;
++
++/* Where if anywhere is the i8259 connect in external int mode */
++static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
++#endif
++
++static DEFINE_SPINLOCK(ioapic_lock);
++static DEFINE_SPINLOCK(vector_lock);
++
++/*
++ * # of IRQ routing registers
++ */
++int nr_ioapic_registers[MAX_IO_APICS];
++
++/*
++ * Rough estimation of how many shared IRQs there are, can
++ * be changed anytime.
++ */
++#define MAX_PLUS_SHARED_IRQS NR_IRQ_VECTORS
++#define PIN_MAP_SIZE (MAX_PLUS_SHARED_IRQS + NR_IRQS)
++
++/*
++ * This is performance-critical, we want to do it O(1)
++ *
++ * the indexing order of this array favors 1:1 mappings
++ * between pins and IRQs.
++ */
++
++static struct irq_pin_list {
++ short apic, pin, next;
++} irq_2_pin[PIN_MAP_SIZE];
++
++int vector_irq[NR_VECTORS] __read_mostly = { [0 ... NR_VECTORS - 1] = -1};
++#ifdef CONFIG_PCI_MSI
++#define vector_to_irq(vector) \
++ (platform_legacy_irq(vector) ? vector : vector_irq[vector])
++#else
++#define vector_to_irq(vector) (vector)
++#endif
++
++#ifdef CONFIG_XEN
++
++#include <xen/interface/xen.h>
++#include <xen/interface/physdev.h>
++
++/* Fake i8259 */
++#define make_8259A_irq(_irq) (io_apic_irqs &= ~(1UL<<(_irq)))
++#define disable_8259A_irq(_irq) ((void)0)
++#define i8259A_irq_pending(_irq) (0)
++
++unsigned long io_apic_irqs;
++
++static inline unsigned int xen_io_apic_read(unsigned int apic, unsigned int reg)
++{
++ struct physdev_apic apic_op;
++ int ret;
++
++ apic_op.apic_physbase = mp_ioapics[apic].mpc_apicaddr;
++ apic_op.reg = reg;
++ ret = HYPERVISOR_physdev_op(PHYSDEVOP_apic_read, &apic_op);
++ if (ret)
++ return ret;
++ return apic_op.value;
++}
++
++static inline void xen_io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
++{
++ struct physdev_apic apic_op;
++
++ apic_op.apic_physbase = mp_ioapics[apic].mpc_apicaddr;
++ apic_op.reg = reg;
++ apic_op.value = value;
++ HYPERVISOR_physdev_op(PHYSDEVOP_apic_write, &apic_op);
++}
++
++#define io_apic_read(a,r) xen_io_apic_read(a,r)
++#define io_apic_write(a,r,v) xen_io_apic_write(a,r,v)
++
++#define clear_IO_APIC() ((void)0)
++
++#else
++
++#ifdef CONFIG_SMP
++static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
++{
++ unsigned long flags;
++ unsigned int dest;
++ cpumask_t tmp;
++
++ cpus_and(tmp, mask, cpu_online_map);
++ if (cpus_empty(tmp))
++ tmp = TARGET_CPUS;
++
++ cpus_and(mask, tmp, CPU_MASK_ALL);
++
++ dest = cpu_mask_to_apicid(mask);
++
++ /*
++ * Only the high 8 bits are valid.
++ */
++ dest = SET_APIC_LOGICAL_ID(dest);
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ __DO_ACTION(1, = dest, )
++ set_irq_info(irq, mask);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++}
++#endif
++
++#endif /* !CONFIG_XEN */
++
++/*
++ * The common case is 1:1 IRQ<->pin mappings. Sometimes there are
++ * shared ISA-space IRQs, so we have to support them. We are super
++ * fast in the common case, and fast for shared ISA-space IRQs.
++ */
++static void add_pin_to_irq(unsigned int irq, int apic, int pin)
++{
++ static int first_free_entry = NR_IRQS;
++ struct irq_pin_list *entry = irq_2_pin + irq;
++
++ BUG_ON(irq >= NR_IRQS);
++ while (entry->next)
++ entry = irq_2_pin + entry->next;
++
++ if (entry->pin != -1) {
++ entry->next = first_free_entry;
++ entry = irq_2_pin + entry->next;
++ if (++first_free_entry >= PIN_MAP_SIZE)
++ panic("io_apic.c: ran out of irq_2_pin entries!");
++ }
++ entry->apic = apic;
++ entry->pin = pin;
++}
++
++#ifndef CONFIG_XEN
++#define __DO_ACTION(R, ACTION, FINAL) \
++ \
++{ \
++ int pin; \
++ struct irq_pin_list *entry = irq_2_pin + irq; \
++ \
++ BUG_ON(irq >= NR_IRQS); \
++ for (;;) { \
++ unsigned int reg; \
++ pin = entry->pin; \
++ if (pin == -1) \
++ break; \
++ reg = io_apic_read(entry->apic, 0x10 + R + pin*2); \
++ reg ACTION; \
++ io_apic_modify(entry->apic, reg); \
++ if (!entry->next) \
++ break; \
++ entry = irq_2_pin + entry->next; \
++ } \
++ FINAL; \
++}
++
++#define DO_ACTION(name,R,ACTION, FINAL) \
++ \
++ static void name##_IO_APIC_irq (unsigned int irq) \
++ __DO_ACTION(R, ACTION, FINAL)
++
++DO_ACTION( __mask, 0, |= 0x00010000, io_apic_sync(entry->apic) )
++ /* mask = 1 */
++DO_ACTION( __unmask, 0, &= 0xfffeffff, )
++ /* mask = 0 */
++
++static void mask_IO_APIC_irq (unsigned int irq)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ __mask_IO_APIC_irq(irq);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++}
++
++static void unmask_IO_APIC_irq (unsigned int irq)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ __unmask_IO_APIC_irq(irq);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++}
++
++static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
++{
++ struct IO_APIC_route_entry entry;
++ unsigned long flags;
++
++ /* Check delivery_mode to be sure we're not clearing an SMI pin */
++ spin_lock_irqsave(&ioapic_lock, flags);
++ *(((int*)&entry) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
++ *(((int*)&entry) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++ if (entry.delivery_mode == dest_SMI)
++ return;
++ /*
++ * Disable it in the IO-APIC irq-routing table:
++ */
++ memset(&entry, 0, sizeof(entry));
++ entry.mask = 1;
++ spin_lock_irqsave(&ioapic_lock, flags);
++ io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry) + 0));
++ io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry) + 1));
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++}
++
++static void clear_IO_APIC (void)
++{
++ int apic, pin;
++
++ for (apic = 0; apic < nr_ioapics; apic++)
++ for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
++ clear_IO_APIC_pin(apic, pin);
++}
++
++#endif /* !CONFIG_XEN */
++
++static u8 gsi_2_irq[NR_IRQ_VECTORS] = { [0 ... NR_IRQ_VECTORS-1] = 0xFF };
++
++/*
++ * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to
++ * specific CPU-side IRQs.
++ */
++
++#define MAX_PIRQS 8
++static int pirq_entries [MAX_PIRQS];
++static int pirqs_enabled;
++int skip_ioapic_setup;
++int ioapic_force;
++
++/* dummy parsing: see setup.c */
++
++static int __init disable_ioapic_setup(char *str)
++{
++ skip_ioapic_setup = 1;
++ return 1;
++}
++
++static int __init enable_ioapic_setup(char *str)
++{
++ ioapic_force = 1;
++ skip_ioapic_setup = 0;
++ return 1;
++}
++
++__setup("noapic", disable_ioapic_setup);
++__setup("apic", enable_ioapic_setup);
++
++#ifndef CONFIG_XEN
++static int __init setup_disable_8254_timer(char *s)
++{
++ timer_over_8254 = -1;
++ return 1;
++}
++static int __init setup_enable_8254_timer(char *s)
++{
++ timer_over_8254 = 2;
++ return 1;
++}
++
++__setup("disable_8254_timer", setup_disable_8254_timer);
++__setup("enable_8254_timer", setup_enable_8254_timer);
++#endif /* !CONFIG_XEN */
++
++#include <asm/pci-direct.h>
++#include <linux/pci_ids.h>
++#include <linux/pci.h>
++
++
++#ifdef CONFIG_ACPI
++
++static int nvidia_hpet_detected __initdata;
++
++static int __init nvidia_hpet_check(unsigned long phys, unsigned long size)
++{
++ nvidia_hpet_detected = 1;
++ return 0;
++}
++#endif
++
++/* Temporary Hack. Nvidia and VIA boards currently only work with IO-APIC
++ off. Check for an Nvidia or VIA PCI bridge and turn it off.
++ Use pci direct infrastructure because this runs before the PCI subsystem.
++
++ Can be overwritten with "apic"
++
++ And another hack to disable the IOMMU on VIA chipsets.
++
++ ... and others. Really should move this somewhere else.
++
++ Kludge-O-Rama. */
++void __init check_ioapic(void)
++{
++ int num,slot,func;
++ /* Poor man's PCI discovery */
++ for (num = 0; num < 32; num++) {
++ for (slot = 0; slot < 32; slot++) {
++ for (func = 0; func < 8; func++) {
++ u32 class;
++ u32 vendor;
++ u8 type;
++ class = read_pci_config(num,slot,func,
++ PCI_CLASS_REVISION);
++ if (class == 0xffffffff)
++ break;
++
++ if ((class >> 16) != PCI_CLASS_BRIDGE_PCI)
++ continue;
++
++ vendor = read_pci_config(num, slot, func,
++ PCI_VENDOR_ID);
++ vendor &= 0xffff;
++ switch (vendor) {
++ case PCI_VENDOR_ID_VIA:
++#ifdef CONFIG_IOMMU
++ if ((end_pfn > MAX_DMA32_PFN ||
++ force_iommu) &&
++ !iommu_aperture_allowed) {
++ printk(KERN_INFO
++ "Looks like a VIA chipset. Disabling IOMMU. Override with \"iommu=allowed\"\n");
++ iommu_aperture_disabled = 1;
++ }
++#endif
++ return;
++ case PCI_VENDOR_ID_NVIDIA:
++#ifdef CONFIG_ACPI
++ /*
++ * All timer overrides on Nvidia are
++ * wrong unless HPET is enabled.
++ */
++ nvidia_hpet_detected = 0;
++ acpi_table_parse(ACPI_HPET,
++ nvidia_hpet_check);
++ if (nvidia_hpet_detected == 0) {
++ acpi_skip_timer_override = 1;
++ printk(KERN_INFO "Nvidia board "
++ "detected. Ignoring ACPI "
++ "timer override.\n");
++ }
++#endif
++ /* RED-PEN skip them on mptables too? */
++ return;
++ case PCI_VENDOR_ID_ATI:
++
++ /* This should be actually default, but
++ for 2.6.16 let's do it for ATI only where
++ it's really needed. */
++#ifndef CONFIG_XEN
++ if (timer_over_8254 == 1) {
++ timer_over_8254 = 0;
++ printk(KERN_INFO
++ "ATI board detected. Disabling timer routing over 8254.\n");
++ }
++#endif
++ return;
++ }
++
++
++ /* No multi-function device? */
++ type = read_pci_config_byte(num,slot,func,
++ PCI_HEADER_TYPE);
++ if (!(type & 0x80))
++ break;
++ }
++ }
++ }
++}
++
++static int __init ioapic_pirq_setup(char *str)
++{
++ int i, max;
++ int ints[MAX_PIRQS+1];
++
++ get_options(str, ARRAY_SIZE(ints), ints);
++
++ for (i = 0; i < MAX_PIRQS; i++)
++ pirq_entries[i] = -1;
++
++ pirqs_enabled = 1;
++ apic_printk(APIC_VERBOSE, "PIRQ redirection, working around broken MP-BIOS.\n");
++ max = MAX_PIRQS;
++ if (ints[0] < MAX_PIRQS)
++ max = ints[0];
++
++ for (i = 0; i < max; i++) {
++ apic_printk(APIC_VERBOSE, "... PIRQ%d -> IRQ %d\n", i, ints[i+1]);
++ /*
++ * PIRQs are mapped upside down, usually.
++ */
++ pirq_entries[MAX_PIRQS-i-1] = ints[i+1];
++ }
++ return 1;
++}
++
++__setup("pirq=", ioapic_pirq_setup);
++
++/*
++ * Find the IRQ entry number of a certain pin.
++ */
++static int find_irq_entry(int apic, int pin, int type)
++{
++ int i;
++
++ for (i = 0; i < mp_irq_entries; i++)
++ if (mp_irqs[i].mpc_irqtype == type &&
++ (mp_irqs[i].mpc_dstapic == mp_ioapics[apic].mpc_apicid ||
++ mp_irqs[i].mpc_dstapic == MP_APIC_ALL) &&
++ mp_irqs[i].mpc_dstirq == pin)
++ return i;
++
++ return -1;
++}
++
++#ifndef CONFIG_XEN
++/*
++ * Find the pin to which IRQ[irq] (ISA) is connected
++ */
++static int __init find_isa_irq_pin(int irq, int type)
++{
++ int i;
++
++ for (i = 0; i < mp_irq_entries; i++) {
++ int lbus = mp_irqs[i].mpc_srcbus;
++
++ if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA ||
++ mp_bus_id_to_type[lbus] == MP_BUS_EISA ||
++ mp_bus_id_to_type[lbus] == MP_BUS_MCA) &&
++ (mp_irqs[i].mpc_irqtype == type) &&
++ (mp_irqs[i].mpc_srcbusirq == irq))
++
++ return mp_irqs[i].mpc_dstirq;
++ }
++ return -1;
++}
++
++static int __init find_isa_irq_apic(int irq, int type)
++{
++ int i;
++
++ for (i = 0; i < mp_irq_entries; i++) {
++ int lbus = mp_irqs[i].mpc_srcbus;
++
++ if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA ||
++ mp_bus_id_to_type[lbus] == MP_BUS_EISA ||
++ mp_bus_id_to_type[lbus] == MP_BUS_MCA) &&
++ (mp_irqs[i].mpc_irqtype == type) &&
++ (mp_irqs[i].mpc_srcbusirq == irq))
++ break;
++ }
++ if (i < mp_irq_entries) {
++ int apic;
++ for(apic = 0; apic < nr_ioapics; apic++) {
++ if (mp_ioapics[apic].mpc_apicid == mp_irqs[i].mpc_dstapic)
++ return apic;
++ }
++ }
++
++ return -1;
++}
++#endif
++
++/*
++ * Find a specific PCI IRQ entry.
++ * Not an __init, possibly needed by modules
++ */
++static int pin_2_irq(int idx, int apic, int pin);
++
++int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
++{
++ int apic, i, best_guess = -1;
++
++ apic_printk(APIC_DEBUG, "querying PCI -> IRQ mapping bus:%d, slot:%d, pin:%d.\n",
++ bus, slot, pin);
++ if (mp_bus_id_to_pci_bus[bus] == -1) {
++ apic_printk(APIC_VERBOSE, "PCI BIOS passed nonexistent PCI bus %d!\n", bus);
++ return -1;
++ }
++ for (i = 0; i < mp_irq_entries; i++) {
++ int lbus = mp_irqs[i].mpc_srcbus;
++
++ for (apic = 0; apic < nr_ioapics; apic++)
++ if (mp_ioapics[apic].mpc_apicid == mp_irqs[i].mpc_dstapic ||
++ mp_irqs[i].mpc_dstapic == MP_APIC_ALL)
++ break;
++
++ if ((mp_bus_id_to_type[lbus] == MP_BUS_PCI) &&
++ !mp_irqs[i].mpc_irqtype &&
++ (bus == lbus) &&
++ (slot == ((mp_irqs[i].mpc_srcbusirq >> 2) & 0x1f))) {
++ int irq = pin_2_irq(i,apic,mp_irqs[i].mpc_dstirq);
++
++ if (!(apic || IO_APIC_IRQ(irq)))
++ continue;
++
++ if (pin == (mp_irqs[i].mpc_srcbusirq & 3))
++ return irq;
++ /*
++ * Use the first all-but-pin matching entry as a
++ * best-guess fuzzy result for broken mptables.
++ */
++ if (best_guess < 0)
++ best_guess = irq;
++ }
++ }
++ BUG_ON(best_guess >= NR_IRQS);
++ return best_guess;
++}
++
++/*
++ * EISA Edge/Level control register, ELCR
++ */
++static int EISA_ELCR(unsigned int irq)
++{
++ if (irq < 16) {
++ unsigned int port = 0x4d0 + (irq >> 3);
++ return (inb(port) >> (irq & 7)) & 1;
++ }
++ apic_printk(APIC_VERBOSE, "Broken MPtable reports ISA irq %d\n", irq);
++ return 0;
++}
++
++/* EISA interrupts are always polarity zero and can be edge or level
++ * trigger depending on the ELCR value. If an interrupt is listed as
++ * EISA conforming in the MP table, that means its trigger type must
++ * be read in from the ELCR */
++
++#define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].mpc_srcbusirq))
++#define default_EISA_polarity(idx) (0)
++
++/* ISA interrupts are always polarity zero edge triggered,
++ * when listed as conforming in the MP table. */
++
++#define default_ISA_trigger(idx) (0)
++#define default_ISA_polarity(idx) (0)
++
++/* PCI interrupts are always polarity one level triggered,
++ * when listed as conforming in the MP table. */
++
++#define default_PCI_trigger(idx) (1)
++#define default_PCI_polarity(idx) (1)
++
++/* MCA interrupts are always polarity zero level triggered,
++ * when listed as conforming in the MP table. */
++
++#define default_MCA_trigger(idx) (1)
++#define default_MCA_polarity(idx) (0)
++
++static int __init MPBIOS_polarity(int idx)
++{
++ int bus = mp_irqs[idx].mpc_srcbus;
++ int polarity;
++
++ /*
++ * Determine IRQ line polarity (high active or low active):
++ */
++ switch (mp_irqs[idx].mpc_irqflag & 3)
++ {
++ case 0: /* conforms, ie. bus-type dependent polarity */
++ {
++ switch (mp_bus_id_to_type[bus])
++ {
++ case MP_BUS_ISA: /* ISA pin */
++ {
++ polarity = default_ISA_polarity(idx);
++ break;
++ }
++ case MP_BUS_EISA: /* EISA pin */
++ {
++ polarity = default_EISA_polarity(idx);
++ break;
++ }
++ case MP_BUS_PCI: /* PCI pin */
++ {
++ polarity = default_PCI_polarity(idx);
++ break;
++ }
++ case MP_BUS_MCA: /* MCA pin */
++ {
++ polarity = default_MCA_polarity(idx);
++ break;
++ }
++ default:
++ {
++ printk(KERN_WARNING "broken BIOS!!\n");
++ polarity = 1;
++ break;
++ }
++ }
++ break;
++ }
++ case 1: /* high active */
++ {
++ polarity = 0;
++ break;
++ }
++ case 2: /* reserved */
++ {
++ printk(KERN_WARNING "broken BIOS!!\n");
++ polarity = 1;
++ break;
++ }
++ case 3: /* low active */
++ {
++ polarity = 1;
++ break;
++ }
++ default: /* invalid */
++ {
++ printk(KERN_WARNING "broken BIOS!!\n");
++ polarity = 1;
++ break;
++ }
++ }
++ return polarity;
++}
++
++static int MPBIOS_trigger(int idx)
++{
++ int bus = mp_irqs[idx].mpc_srcbus;
++ int trigger;
++
++ /*
++ * Determine IRQ trigger mode (edge or level sensitive):
++ */
++ switch ((mp_irqs[idx].mpc_irqflag>>2) & 3)
++ {
++ case 0: /* conforms, ie. bus-type dependent */
++ {
++ switch (mp_bus_id_to_type[bus])
++ {
++ case MP_BUS_ISA: /* ISA pin */
++ {
++ trigger = default_ISA_trigger(idx);
++ break;
++ }
++ case MP_BUS_EISA: /* EISA pin */
++ {
++ trigger = default_EISA_trigger(idx);
++ break;
++ }
++ case MP_BUS_PCI: /* PCI pin */
++ {
++ trigger = default_PCI_trigger(idx);
++ break;
++ }
++ case MP_BUS_MCA: /* MCA pin */
++ {
++ trigger = default_MCA_trigger(idx);
++ break;
++ }
++ default:
++ {
++ printk(KERN_WARNING "broken BIOS!!\n");
++ trigger = 1;
++ break;
++ }
++ }
++ break;
++ }
++ case 1: /* edge */
++ {
++ trigger = 0;
++ break;
++ }
++ case 2: /* reserved */
++ {
++ printk(KERN_WARNING "broken BIOS!!\n");
++ trigger = 1;
++ break;
++ }
++ case 3: /* level */
++ {
++ trigger = 1;
++ break;
++ }
++ default: /* invalid */
++ {
++ printk(KERN_WARNING "broken BIOS!!\n");
++ trigger = 0;
++ break;
++ }
++ }
++ return trigger;
++}
++
++static inline int irq_polarity(int idx)
++{
++ return MPBIOS_polarity(idx);
++}
++
++static inline int irq_trigger(int idx)
++{
++ return MPBIOS_trigger(idx);
++}
++
++static int next_irq = 16;
++
++/*
++ * gsi_irq_sharing -- Name overload! "irq" can be either a legacy IRQ
++ * in the range 0-15, a linux IRQ in the range 0-223, or a GSI number
++ * from ACPI, which can reach 800 in large boxen.
++ *
++ * Compact the sparse GSI space into a sequential IRQ series and reuse
++ * vectors if possible.
++ */
++int gsi_irq_sharing(int gsi)
++{
++ int i, tries, vector;
++
++ BUG_ON(gsi >= NR_IRQ_VECTORS);
++
++ if (platform_legacy_irq(gsi))
++ return gsi;
++
++ if (gsi_2_irq[gsi] != 0xFF)
++ return (int)gsi_2_irq[gsi];
++
++ tries = NR_IRQS;
++ try_again:
++ vector = assign_irq_vector(gsi);
++
++ /*
++ * Sharing vectors means sharing IRQs, so scan irq_vectors for previous
++ * use of vector and if found, return that IRQ. However, we never want
++ * to share legacy IRQs, which usually have a different trigger mode
++ * than PCI.
++ */
++ for (i = 0; i < NR_IRQS; i++)
++ if (IO_APIC_VECTOR(i) == vector)
++ break;
++ if (platform_legacy_irq(i)) {
++ if (--tries >= 0) {
++ IO_APIC_VECTOR(i) = 0;
++ goto try_again;
++ }
++ panic("gsi_irq_sharing: didn't find an IRQ using vector 0x%02X for GSI %d", vector, gsi);
++ }
++ if (i < NR_IRQS) {
++ gsi_2_irq[gsi] = i;
++ printk(KERN_INFO "GSI %d sharing vector 0x%02X and IRQ %d\n",
++ gsi, vector, i);
++ return i;
++ }
++
++ i = next_irq++;
++ BUG_ON(i >= NR_IRQS);
++ gsi_2_irq[gsi] = i;
++ IO_APIC_VECTOR(i) = vector;
++ printk(KERN_INFO "GSI %d assigned vector 0x%02X and IRQ %d\n",
++ gsi, vector, i);
++ return i;
++}
++
++static int pin_2_irq(int idx, int apic, int pin)
++{
++ int irq, i;
++ int bus = mp_irqs[idx].mpc_srcbus;
++
++ /*
++ * Debugging check, we are in big trouble if this message pops up!
++ */
++ if (mp_irqs[idx].mpc_dstirq != pin)
++ printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n");
++
++ switch (mp_bus_id_to_type[bus])
++ {
++ case MP_BUS_ISA: /* ISA pin */
++ case MP_BUS_EISA:
++ case MP_BUS_MCA:
++ {
++ irq = mp_irqs[idx].mpc_srcbusirq;
++ break;
++ }
++ case MP_BUS_PCI: /* PCI pin */
++ {
++ /*
++ * PCI IRQs are mapped in order
++ */
++ i = irq = 0;
++ while (i < apic)
++ irq += nr_ioapic_registers[i++];
++ irq += pin;
++ irq = gsi_irq_sharing(irq);
++ break;
++ }
++ default:
++ {
++ printk(KERN_ERR "unknown bus type %d.\n",bus);
++ irq = 0;
++ break;
++ }
++ }
++ BUG_ON(irq >= NR_IRQS);
++
++ /*
++ * PCI IRQ command line redirection. Yes, limits are hardcoded.
++ */
++ if ((pin >= 16) && (pin <= 23)) {
++ if (pirq_entries[pin-16] != -1) {
++ if (!pirq_entries[pin-16]) {
++ apic_printk(APIC_VERBOSE, "disabling PIRQ%d\n", pin-16);
++ } else {
++ irq = pirq_entries[pin-16];
++ apic_printk(APIC_VERBOSE, "using PIRQ%d -> IRQ %d\n",
++ pin-16, irq);
++ }
++ }
++ }
++ BUG_ON(irq >= NR_IRQS);
++ return irq;
++}
++
++static inline int IO_APIC_irq_trigger(int irq)
++{
++ int apic, idx, pin;
++
++ for (apic = 0; apic < nr_ioapics; apic++) {
++ for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
++ idx = find_irq_entry(apic,pin,mp_INT);
++ if ((idx != -1) && (irq == pin_2_irq(idx,apic,pin)))
++ return irq_trigger(idx);
++ }
++ }
++ /*
++ * nonexistent IRQs are edge default
++ */
++ return 0;
++}
++
++/* irq_vectors is indexed by the sum of all RTEs in all I/O APICs. */
++u8 irq_vector[NR_IRQ_VECTORS] __read_mostly;
++
++int assign_irq_vector(int irq)
++{
++ unsigned long flags;
++ int vector;
++ struct physdev_irq irq_op;
++
++ BUG_ON(irq != AUTO_ASSIGN && (unsigned)irq >= NR_IRQ_VECTORS);
++
++ spin_lock_irqsave(&vector_lock, flags);
++
++ if (irq != AUTO_ASSIGN && IO_APIC_VECTOR(irq) > 0) {
++ spin_unlock_irqrestore(&vector_lock, flags);
++ return IO_APIC_VECTOR(irq);
++ }
++
++ irq_op.irq = irq;
++ if (HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) {
++ spin_unlock_irqrestore(&vector_lock, flags);
++ return -ENOSPC;
++ }
++
++ vector = irq_op.vector;
++ vector_irq[vector] = irq;
++ if (irq != AUTO_ASSIGN)
++ IO_APIC_VECTOR(irq) = vector;
++
++ spin_unlock_irqrestore(&vector_lock, flags);
++
++ return vector;
++}
++
++extern void (*interrupt[NR_IRQS])(void);
++#ifndef CONFIG_XEN
++static struct hw_interrupt_type ioapic_level_type;
++static struct hw_interrupt_type ioapic_edge_type;
++
++#define IOAPIC_AUTO -1
++#define IOAPIC_EDGE 0
++#define IOAPIC_LEVEL 1
++
++static void ioapic_register_intr(int irq, int vector, unsigned long trigger)
++{
++ unsigned idx;
++
++ idx = use_pci_vector() && !platform_legacy_irq(irq) ? vector : irq;
++
++ if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
++ trigger == IOAPIC_LEVEL)
++ irq_desc[idx].chip = &ioapic_level_type;
++ else
++ irq_desc[idx].chip = &ioapic_edge_type;
++ set_intr_gate(vector, interrupt[idx]);
++}
++#else
++#define ioapic_register_intr(_irq,_vector,_trigger) ((void)0)
++#endif /* !CONFIG_XEN */
++
++static void __init setup_IO_APIC_irqs(void)
++{
++ struct IO_APIC_route_entry entry;
++ int apic, pin, idx, irq, first_notcon = 1, vector;
++ unsigned long flags;
++
++ apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
++
++ for (apic = 0; apic < nr_ioapics; apic++) {
++ for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
++
++ /*
++ * add it to the IO-APIC irq-routing table:
++ */
++ memset(&entry,0,sizeof(entry));
++
++ entry.delivery_mode = INT_DELIVERY_MODE;
++ entry.dest_mode = INT_DEST_MODE;
++ entry.mask = 0; /* enable IRQ */
++ entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
++
++ idx = find_irq_entry(apic,pin,mp_INT);
++ if (idx == -1) {
++ if (first_notcon) {
++ apic_printk(APIC_VERBOSE, KERN_DEBUG " IO-APIC (apicid-pin) %d-%d", mp_ioapics[apic].mpc_apicid, pin);
++ first_notcon = 0;
++ } else
++ apic_printk(APIC_VERBOSE, ", %d-%d", mp_ioapics[apic].mpc_apicid, pin);
++ continue;
++ }
++
++ entry.trigger = irq_trigger(idx);
++ entry.polarity = irq_polarity(idx);
++
++ if (irq_trigger(idx)) {
++ entry.trigger = 1;
++ entry.mask = 1;
++ entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
++ }
++
++ irq = pin_2_irq(idx, apic, pin);
++ add_pin_to_irq(irq, apic, pin);
++
++ if (/* !apic && */ !IO_APIC_IRQ(irq))
++ continue;
++
++ if (IO_APIC_IRQ(irq)) {
++ vector = assign_irq_vector(irq);
++ entry.vector = vector;
++
++ ioapic_register_intr(irq, vector, IOAPIC_AUTO);
++ if (!apic && (irq < 16))
++ disable_8259A_irq(irq);
++ }
++ spin_lock_irqsave(&ioapic_lock, flags);
++ io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1));
++ io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0));
++ set_native_irq_info(irq, TARGET_CPUS);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++ }
++ }
++
++ if (!first_notcon)
++ apic_printk(APIC_VERBOSE," not connected.\n");
++}
++
++#ifndef CONFIG_XEN
++/*
++ * Set up the 8259A-master output pin as broadcast to all
++ * CPUs.
++ */
++static void __init setup_ExtINT_IRQ0_pin(unsigned int apic, unsigned int pin, int vector)
++{
++ struct IO_APIC_route_entry entry;
++ unsigned long flags;
++
++ memset(&entry,0,sizeof(entry));
++
++ disable_8259A_irq(0);
++
++ /* mask LVT0 */
++ apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
++
++ /*
++ * We use logical delivery to get the timer IRQ
++ * to the first CPU.
++ */
++ entry.dest_mode = INT_DEST_MODE;
++ entry.mask = 0; /* unmask IRQ now */
++ entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
++ entry.delivery_mode = INT_DELIVERY_MODE;
++ entry.polarity = 0;
++ entry.trigger = 0;
++ entry.vector = vector;
++
++ /*
++ * The timer IRQ doesn't have to know that behind the
++ * scene we have a 8259A-master in AEOI mode ...
++ */
++ irq_desc[0].chip = &ioapic_edge_type;
++
++ /*
++ * Add it to the IO-APIC irq-routing table:
++ */
++ spin_lock_irqsave(&ioapic_lock, flags);
++ io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1));
++ io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0));
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ enable_8259A_irq(0);
++}
++
++void __init UNEXPECTED_IO_APIC(void)
++{
++}
++
++void __apicdebuginit print_IO_APIC(void)
++{
++ int apic, i;
++ union IO_APIC_reg_00 reg_00;
++ union IO_APIC_reg_01 reg_01;
++ union IO_APIC_reg_02 reg_02;
++ unsigned long flags;
++
++ if (apic_verbosity == APIC_QUIET)
++ return;
++
++ printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
++ for (i = 0; i < nr_ioapics; i++)
++ printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
++ mp_ioapics[i].mpc_apicid, nr_ioapic_registers[i]);
++
++ /*
++ * We are a bit conservative about what we expect. We have to
++ * know about every hardware change ASAP.
++ */
++ printk(KERN_INFO "testing the IO APIC.......................\n");
++
++ for (apic = 0; apic < nr_ioapics; apic++) {
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ reg_00.raw = io_apic_read(apic, 0);
++ reg_01.raw = io_apic_read(apic, 1);
++ if (reg_01.bits.version >= 0x10)
++ reg_02.raw = io_apic_read(apic, 2);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ printk("\n");
++ printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].mpc_apicid);
++ printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
++ printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID);
++ if (reg_00.bits.__reserved_1 || reg_00.bits.__reserved_2)
++ UNEXPECTED_IO_APIC();
++
++ printk(KERN_DEBUG ".... register #01: %08X\n", *(int *)&reg_01);
++ printk(KERN_DEBUG "....... : max redirection entries: %04X\n", reg_01.bits.entries);
++ if ( (reg_01.bits.entries != 0x0f) && /* older (Neptune) boards */
++ (reg_01.bits.entries != 0x17) && /* typical ISA+PCI boards */
++ (reg_01.bits.entries != 0x1b) && /* Compaq Proliant boards */
++ (reg_01.bits.entries != 0x1f) && /* dual Xeon boards */
++ (reg_01.bits.entries != 0x22) && /* bigger Xeon boards */
++ (reg_01.bits.entries != 0x2E) &&
++ (reg_01.bits.entries != 0x3F) &&
++ (reg_01.bits.entries != 0x03)
++ )
++ UNEXPECTED_IO_APIC();
++
++ printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ);
++ printk(KERN_DEBUG "....... : IO APIC version: %04X\n", reg_01.bits.version);
++ if ( (reg_01.bits.version != 0x01) && /* 82489DX IO-APICs */
++ (reg_01.bits.version != 0x02) && /* 82801BA IO-APICs (ICH2) */
++ (reg_01.bits.version != 0x10) && /* oldest IO-APICs */
++ (reg_01.bits.version != 0x11) && /* Pentium/Pro IO-APICs */
++ (reg_01.bits.version != 0x13) && /* Xeon IO-APICs */
++ (reg_01.bits.version != 0x20) /* Intel P64H (82806 AA) */
++ )
++ UNEXPECTED_IO_APIC();
++ if (reg_01.bits.__reserved_1 || reg_01.bits.__reserved_2)
++ UNEXPECTED_IO_APIC();
++
++ if (reg_01.bits.version >= 0x10) {
++ printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw);
++ printk(KERN_DEBUG "....... : arbitration: %02X\n", reg_02.bits.arbitration);
++ if (reg_02.bits.__reserved_1 || reg_02.bits.__reserved_2)
++ UNEXPECTED_IO_APIC();
++ }
++
++ printk(KERN_DEBUG ".... IRQ redirection table:\n");
++
++ printk(KERN_DEBUG " NR Log Phy Mask Trig IRR Pol"
++ " Stat Dest Deli Vect: \n");
++
++ for (i = 0; i <= reg_01.bits.entries; i++) {
++ struct IO_APIC_route_entry entry;
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ *(((int *)&entry)+0) = io_apic_read(apic, 0x10+i*2);
++ *(((int *)&entry)+1) = io_apic_read(apic, 0x11+i*2);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ printk(KERN_DEBUG " %02x %03X %02X ",
++ i,
++ entry.dest.logical.logical_dest,
++ entry.dest.physical.physical_dest
++ );
++
++ printk("%1d %1d %1d %1d %1d %1d %1d %02X\n",
++ entry.mask,
++ entry.trigger,
++ entry.irr,
++ entry.polarity,
++ entry.delivery_status,
++ entry.dest_mode,
++ entry.delivery_mode,
++ entry.vector
++ );
++ }
++ }
++ if (use_pci_vector())
++ printk(KERN_INFO "Using vector-based indexing\n");
++ printk(KERN_DEBUG "IRQ to pin mappings:\n");
++ for (i = 0; i < NR_IRQS; i++) {
++ struct irq_pin_list *entry = irq_2_pin + i;
++ if (entry->pin < 0)
++ continue;
++ if (use_pci_vector() && !platform_legacy_irq(i))
++ printk(KERN_DEBUG "IRQ%d ", IO_APIC_VECTOR(i));
++ else
++ printk(KERN_DEBUG "IRQ%d ", i);
++ for (;;) {
++ printk("-> %d:%d", entry->apic, entry->pin);
++ if (!entry->next)
++ break;
++ entry = irq_2_pin + entry->next;
++ }
++ printk("\n");
++ }
++
++ printk(KERN_INFO ".................................... done.\n");
++
++ return;
++}
++
++#if 0
++
++static __apicdebuginit void print_APIC_bitfield (int base)
++{
++ unsigned int v;
++ int i, j;
++
++ if (apic_verbosity == APIC_QUIET)
++ return;
++
++ printk(KERN_DEBUG "0123456789abcdef0123456789abcdef\n" KERN_DEBUG);
++ for (i = 0; i < 8; i++) {
++ v = apic_read(base + i*0x10);
++ for (j = 0; j < 32; j++) {
++ if (v & (1<<j))
++ printk("1");
++ else
++ printk("0");
++ }
++ printk("\n");
++ }
++}
++
++void __apicdebuginit print_local_APIC(void * dummy)
++{
++ unsigned int v, ver, maxlvt;
++
++ if (apic_verbosity == APIC_QUIET)
++ return;
++
++ printk("\n" KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n",
++ smp_processor_id(), hard_smp_processor_id());
++ v = apic_read(APIC_ID);
++ printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, GET_APIC_ID(v));
++ v = apic_read(APIC_LVR);
++ printk(KERN_INFO "... APIC VERSION: %08x\n", v);
++ ver = GET_APIC_VERSION(v);
++ maxlvt = get_maxlvt();
++
++ v = apic_read(APIC_TASKPRI);
++ printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
++
++ v = apic_read(APIC_ARBPRI);
++ printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v,
++ v & APIC_ARBPRI_MASK);
++ v = apic_read(APIC_PROCPRI);
++ printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v);
++
++ v = apic_read(APIC_EOI);
++ printk(KERN_DEBUG "... APIC EOI: %08x\n", v);
++ v = apic_read(APIC_RRR);
++ printk(KERN_DEBUG "... APIC RRR: %08x\n", v);
++ v = apic_read(APIC_LDR);
++ printk(KERN_DEBUG "... APIC LDR: %08x\n", v);
++ v = apic_read(APIC_DFR);
++ printk(KERN_DEBUG "... APIC DFR: %08x\n", v);
++ v = apic_read(APIC_SPIV);
++ printk(KERN_DEBUG "... APIC SPIV: %08x\n", v);
++
++ printk(KERN_DEBUG "... APIC ISR field:\n");
++ print_APIC_bitfield(APIC_ISR);
++ printk(KERN_DEBUG "... APIC TMR field:\n");
++ print_APIC_bitfield(APIC_TMR);
++ printk(KERN_DEBUG "... APIC IRR field:\n");
++ print_APIC_bitfield(APIC_IRR);
++
++ v = apic_read(APIC_ESR);
++ printk(KERN_DEBUG "... APIC ESR: %08x\n", v);
++
++ v = apic_read(APIC_ICR);
++ printk(KERN_DEBUG "... APIC ICR: %08x\n", v);
++ v = apic_read(APIC_ICR2);
++ printk(KERN_DEBUG "... APIC ICR2: %08x\n", v);
++
++ v = apic_read(APIC_LVTT);
++ printk(KERN_DEBUG "... APIC LVTT: %08x\n", v);
++
++ if (maxlvt > 3) { /* PC is LVT#4. */
++ v = apic_read(APIC_LVTPC);
++ printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v);
++ }
++ v = apic_read(APIC_LVT0);
++ printk(KERN_DEBUG "... APIC LVT0: %08x\n", v);
++ v = apic_read(APIC_LVT1);
++ printk(KERN_DEBUG "... APIC LVT1: %08x\n", v);
++
++ if (maxlvt > 2) { /* ERR is LVT#3. */
++ v = apic_read(APIC_LVTERR);
++ printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v);
++ }
++
++ v = apic_read(APIC_TMICT);
++ printk(KERN_DEBUG "... APIC TMICT: %08x\n", v);
++ v = apic_read(APIC_TMCCT);
++ printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v);
++ v = apic_read(APIC_TDCR);
++ printk(KERN_DEBUG "... APIC TDCR: %08x\n", v);
++ printk("\n");
++}
++
++void print_all_local_APICs (void)
++{
++ on_each_cpu(print_local_APIC, NULL, 1, 1);
++}
++
++void __apicdebuginit print_PIC(void)
++{
++ unsigned int v;
++ unsigned long flags;
++
++ if (apic_verbosity == APIC_QUIET)
++ return;
++
++ printk(KERN_DEBUG "\nprinting PIC contents\n");
++
++ spin_lock_irqsave(&i8259A_lock, flags);
++
++ v = inb(0xa1) << 8 | inb(0x21);
++ printk(KERN_DEBUG "... PIC IMR: %04x\n", v);
++
++ v = inb(0xa0) << 8 | inb(0x20);
++ printk(KERN_DEBUG "... PIC IRR: %04x\n", v);
++
++ outb(0x0b,0xa0);
++ outb(0x0b,0x20);
++ v = inb(0xa0) << 8 | inb(0x20);
++ outb(0x0a,0xa0);
++ outb(0x0a,0x20);
++
++ spin_unlock_irqrestore(&i8259A_lock, flags);
++
++ printk(KERN_DEBUG "... PIC ISR: %04x\n", v);
++
++ v = inb(0x4d1) << 8 | inb(0x4d0);
++ printk(KERN_DEBUG "... PIC ELCR: %04x\n", v);
++}
++
++#endif /* 0 */
++
++#else
++void __init print_IO_APIC(void) { }
++#endif /* !CONFIG_XEN */
++
++static void __init enable_IO_APIC(void)
++{
++ union IO_APIC_reg_01 reg_01;
++#ifndef CONFIG_XEN
++ int i8259_apic, i8259_pin;
++#endif
++ int i, apic;
++ unsigned long flags;
++
++ for (i = 0; i < PIN_MAP_SIZE; i++) {
++ irq_2_pin[i].pin = -1;
++ irq_2_pin[i].next = 0;
++ }
++ if (!pirqs_enabled)
++ for (i = 0; i < MAX_PIRQS; i++)
++ pirq_entries[i] = -1;
++
++ /*
++ * The number of IO-APIC IRQ registers (== #pins):
++ */
++ for (apic = 0; apic < nr_ioapics; apic++) {
++ spin_lock_irqsave(&ioapic_lock, flags);
++ reg_01.raw = io_apic_read(apic, 1);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++ nr_ioapic_registers[apic] = reg_01.bits.entries+1;
++ }
++#ifndef CONFIG_XEN
++ for(apic = 0; apic < nr_ioapics; apic++) {
++ int pin;
++ /* See if any of the pins is in ExtINT mode */
++ for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
++ struct IO_APIC_route_entry entry;
++ spin_lock_irqsave(&ioapic_lock, flags);
++ *(((int *)&entry) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
++ *(((int *)&entry) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++
++ /* If the interrupt line is enabled and in ExtInt mode
++ * I have found the pin where the i8259 is connected.
++ */
++ if ((entry.mask == 0) && (entry.delivery_mode == dest_ExtINT)) {
++ ioapic_i8259.apic = apic;
++ ioapic_i8259.pin = pin;
++ goto found_i8259;
++ }
++ }
++ }
++ found_i8259:
++ /* Look to see what if the MP table has reported the ExtINT */
++ i8259_pin = find_isa_irq_pin(0, mp_ExtINT);
++ i8259_apic = find_isa_irq_apic(0, mp_ExtINT);
++ /* Trust the MP table if nothing is setup in the hardware */
++ if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) {
++ printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n");
++ ioapic_i8259.pin = i8259_pin;
++ ioapic_i8259.apic = i8259_apic;
++ }
++ /* Complain if the MP table and the hardware disagree */
++ if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) &&
++ (i8259_pin >= 0) && (ioapic_i8259.pin >= 0))
++ {
++ printk(KERN_WARNING "ExtINT in hardware and MP table differ\n");
++ }
++#endif
++
++ /*
++ * Do not trust the IO-APIC being empty at bootup
++ */
++ clear_IO_APIC();
++}
++
++/*
++ * Not an __init, needed by the reboot code
++ */
++void disable_IO_APIC(void)
++{
++ /*
++ * Clear the IO-APIC before rebooting:
++ */
++ clear_IO_APIC();
++
++#ifndef CONFIG_XEN
++ /*
++ * If the i8259 is routed through an IOAPIC
++ * Put that IOAPIC in virtual wire mode
++ * so legacy interrupts can be delivered.
++ */
++ if (ioapic_i8259.pin != -1) {
++ struct IO_APIC_route_entry entry;
++ unsigned long flags;
++
++ memset(&entry, 0, sizeof(entry));
++ entry.mask = 0; /* Enabled */
++ entry.trigger = 0; /* Edge */
++ entry.irr = 0;
++ entry.polarity = 0; /* High */
++ entry.delivery_status = 0;
++ entry.dest_mode = 0; /* Physical */
++ entry.delivery_mode = dest_ExtINT; /* ExtInt */
++ entry.vector = 0;
++ entry.dest.physical.physical_dest =
++ GET_APIC_ID(apic_read(APIC_ID));
++
++ /*
++ * Add it to the IO-APIC irq-routing table:
++ */
++ spin_lock_irqsave(&ioapic_lock, flags);
++ io_apic_write(ioapic_i8259.apic, 0x11+2*ioapic_i8259.pin,
++ *(((int *)&entry)+1));
++ io_apic_write(ioapic_i8259.apic, 0x10+2*ioapic_i8259.pin,
++ *(((int *)&entry)+0));
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++ }
++
++ disconnect_bsp_APIC(ioapic_i8259.pin != -1);
++#endif
++}
++
++/*
++ * function to set the IO-APIC physical IDs based on the
++ * values stored in the MPC table.
++ *
++ * by Matt Domsch <Matt_Domsch@dell.com> Tue Dec 21 12:25:05 CST 1999
++ */
++
++#ifndef CONFIG_XEN
++static void __init setup_ioapic_ids_from_mpc (void)
++{
++ union IO_APIC_reg_00 reg_00;
++ int apic;
++ int i;
++ unsigned char old_id;
++ unsigned long flags;
++
++ /*
++ * Set the IOAPIC ID to the value stored in the MPC table.
++ */
++ for (apic = 0; apic < nr_ioapics; apic++) {
++
++ /* Read the register 0 value */
++ spin_lock_irqsave(&ioapic_lock, flags);
++ reg_00.raw = io_apic_read(apic, 0);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ old_id = mp_ioapics[apic].mpc_apicid;
++
++
++ printk(KERN_INFO "Using IO-APIC %d\n", mp_ioapics[apic].mpc_apicid);
++
++
++ /*
++ * We need to adjust the IRQ routing table
++ * if the ID changed.
++ */
++ if (old_id != mp_ioapics[apic].mpc_apicid)
++ for (i = 0; i < mp_irq_entries; i++)
++ if (mp_irqs[i].mpc_dstapic == old_id)
++ mp_irqs[i].mpc_dstapic
++ = mp_ioapics[apic].mpc_apicid;
++
++ /*
++ * Read the right value from the MPC table and
++ * write it into the ID register.
++ */
++ apic_printk(APIC_VERBOSE,KERN_INFO "...changing IO-APIC physical APIC ID to %d ...",
++ mp_ioapics[apic].mpc_apicid);
++
++ reg_00.bits.ID = mp_ioapics[apic].mpc_apicid;
++ spin_lock_irqsave(&ioapic_lock, flags);
++ io_apic_write(apic, 0, reg_00.raw);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ /*
++ * Sanity check
++ */
++ spin_lock_irqsave(&ioapic_lock, flags);
++ reg_00.raw = io_apic_read(apic, 0);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++ if (reg_00.bits.ID != mp_ioapics[apic].mpc_apicid)
++ printk("could not set ID!\n");
++ else
++ apic_printk(APIC_VERBOSE," ok.\n");
++ }
++}
++#else
++static void __init setup_ioapic_ids_from_mpc(void) { }
++#endif
++
++/*
++ * There is a nasty bug in some older SMP boards, their mptable lies
++ * about the timer IRQ. We do the following to work around the situation:
++ *
++ * - timer IRQ defaults to IO-APIC IRQ
++ * - if this function detects that timer IRQs are defunct, then we fall
++ * back to ISA timer IRQs
++ */
++#ifndef CONFIG_XEN
++static int __init timer_irq_works(void)
++{
++ unsigned long t1 = jiffies;
++
++ local_irq_enable();
++ /* Let ten ticks pass... */
++ mdelay((10 * 1000) / HZ);
++
++ /*
++ * Expect a few ticks at least, to be sure some possible
++ * glue logic does not lock up after one or two first
++ * ticks in a non-ExtINT mode. Also the local APIC
++ * might have cached one ExtINT interrupt. Finally, at
++ * least one tick may be lost due to delays.
++ */
++
++ /* jiffies wrap? */
++ if (jiffies - t1 > 4)
++ return 1;
++ return 0;
++}
++
++/*
++ * In the SMP+IOAPIC case it might happen that there are an unspecified
++ * number of pending IRQ events unhandled. These cases are very rare,
++ * so we 'resend' these IRQs via IPIs, to the same CPU. It's much
++ * better to do it this way as thus we do not have to be aware of
++ * 'pending' interrupts in the IRQ path, except at this point.
++ */
++/*
++ * Edge triggered needs to resend any interrupt
++ * that was delayed but this is now handled in the device
++ * independent code.
++ */
++
++/*
++ * Starting up a edge-triggered IO-APIC interrupt is
++ * nasty - we need to make sure that we get the edge.
++ * If it is already asserted for some reason, we need
++ * return 1 to indicate that is was pending.
++ *
++ * This is not complete - we should be able to fake
++ * an edge even if it isn't on the 8259A...
++ */
++
++static unsigned int startup_edge_ioapic_irq(unsigned int irq)
++{
++ int was_pending = 0;
++ unsigned long flags;
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ if (irq < 16) {
++ disable_8259A_irq(irq);
++ if (i8259A_irq_pending(irq))
++ was_pending = 1;
++ }
++ __unmask_IO_APIC_irq(irq);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ return was_pending;
++}
++
++/*
++ * Once we have recorded IRQ_PENDING already, we can mask the
++ * interrupt for real. This prevents IRQ storms from unhandled
++ * devices.
++ */
++static void ack_edge_ioapic_irq(unsigned int irq)
++{
++ move_irq(irq);
++ if ((irq_desc[irq].status & (IRQ_PENDING | IRQ_DISABLED))
++ == (IRQ_PENDING | IRQ_DISABLED))
++ mask_IO_APIC_irq(irq);
++ ack_APIC_irq();
++}
++
++/*
++ * Level triggered interrupts can just be masked,
++ * and shutting down and starting up the interrupt
++ * is the same as enabling and disabling them -- except
++ * with a startup need to return a "was pending" value.
++ *
++ * Level triggered interrupts are special because we
++ * do not touch any IO-APIC register while handling
++ * them. We ack the APIC in the end-IRQ handler, not
++ * in the start-IRQ-handler. Protection against reentrance
++ * from the same interrupt is still provided, both by the
++ * generic IRQ layer and by the fact that an unacked local
++ * APIC does not accept IRQs.
++ */
++static unsigned int startup_level_ioapic_irq (unsigned int irq)
++{
++ unmask_IO_APIC_irq(irq);
++
++ return 0; /* don't check for pending */
++}
++
++static void end_level_ioapic_irq (unsigned int irq)
++{
++ move_irq(irq);
++ ack_APIC_irq();
++}
++
++#ifdef CONFIG_PCI_MSI
++static unsigned int startup_edge_ioapic_vector(unsigned int vector)
++{
++ int irq = vector_to_irq(vector);
++
++ return startup_edge_ioapic_irq(irq);
++}
++
++static void ack_edge_ioapic_vector(unsigned int vector)
++{
++ int irq = vector_to_irq(vector);
++
++ move_native_irq(vector);
++ ack_edge_ioapic_irq(irq);
++}
++
++static unsigned int startup_level_ioapic_vector (unsigned int vector)
++{
++ int irq = vector_to_irq(vector);
++
++ return startup_level_ioapic_irq (irq);
++}
++
++static void end_level_ioapic_vector (unsigned int vector)
++{
++ int irq = vector_to_irq(vector);
++
++ move_native_irq(vector);
++ end_level_ioapic_irq(irq);
++}
++
++static void mask_IO_APIC_vector (unsigned int vector)
++{
++ int irq = vector_to_irq(vector);
++
++ mask_IO_APIC_irq(irq);
++}
++
++static void unmask_IO_APIC_vector (unsigned int vector)
++{
++ int irq = vector_to_irq(vector);
++
++ unmask_IO_APIC_irq(irq);
++}
++
++#ifdef CONFIG_SMP
++static void set_ioapic_affinity_vector (unsigned int vector,
++ cpumask_t cpu_mask)
++{
++ int irq = vector_to_irq(vector);
++
++ set_native_irq_info(vector, cpu_mask);
++ set_ioapic_affinity_irq(irq, cpu_mask);
++}
++#endif // CONFIG_SMP
++#endif // CONFIG_PCI_MSI
++
++static int ioapic_retrigger(unsigned int irq)
++{
++ send_IPI_self(IO_APIC_VECTOR(irq));
++
++ return 1;
++}
++
++/*
++ * Level and edge triggered IO-APIC interrupts need different handling,
++ * so we use two separate IRQ descriptors. Edge triggered IRQs can be
++ * handled with the level-triggered descriptor, but that one has slightly
++ * more overhead. Level-triggered interrupts cannot be handled with the
++ * edge-triggered handler, without risking IRQ storms and other ugly
++ * races.
++ */
++
++static struct hw_interrupt_type ioapic_edge_type __read_mostly = {
++ .typename = "IO-APIC-edge",
++ .startup = startup_edge_ioapic,
++ .shutdown = shutdown_edge_ioapic,
++ .enable = enable_edge_ioapic,
++ .disable = disable_edge_ioapic,
++ .ack = ack_edge_ioapic,
++ .end = end_edge_ioapic,
++#ifdef CONFIG_SMP
++ .set_affinity = set_ioapic_affinity,
++#endif
++ .retrigger = ioapic_retrigger,
++};
++
++static struct hw_interrupt_type ioapic_level_type __read_mostly = {
++ .typename = "IO-APIC-level",
++ .startup = startup_level_ioapic,
++ .shutdown = shutdown_level_ioapic,
++ .enable = enable_level_ioapic,
++ .disable = disable_level_ioapic,
++ .ack = mask_and_ack_level_ioapic,
++ .end = end_level_ioapic,
++#ifdef CONFIG_SMP
++ .set_affinity = set_ioapic_affinity,
++#endif
++ .retrigger = ioapic_retrigger,
++};
++#endif /* !CONFIG_XEN */
++
++static inline void init_IO_APIC_traps(void)
++{
++ int irq;
++
++ /*
++ * NOTE! The local APIC isn't very good at handling
++ * multiple interrupts at the same interrupt level.
++ * As the interrupt level is determined by taking the
++ * vector number and shifting that right by 4, we
++ * want to spread these out a bit so that they don't
++ * all fall in the same interrupt level.
++ *
++ * Also, we've got to be careful not to trash gate
++ * 0x80, because int 0x80 is hm, kind of importantish. ;)
++ */
++ for (irq = 0; irq < NR_IRQS ; irq++) {
++ int tmp = irq;
++ if (use_pci_vector()) {
++ if (!platform_legacy_irq(tmp))
++ if ((tmp = vector_to_irq(tmp)) == -1)
++ continue;
++ }
++ if (IO_APIC_IRQ(tmp) && !IO_APIC_VECTOR(tmp)) {
++ /*
++ * Hmm.. We don't have an entry for this,
++ * so default to an old-fashioned 8259
++ * interrupt if we can..
++ */
++ if (irq < 16)
++ make_8259A_irq(irq);
++#ifndef CONFIG_XEN
++ else
++ /* Strange. Oh, well.. */
++ irq_desc[irq].chip = &no_irq_type;
++#endif
++ }
++ }
++}
++
++#ifndef CONFIG_XEN
++static void enable_lapic_irq (unsigned int irq)
++{
++ unsigned long v;
++
++ v = apic_read(APIC_LVT0);
++ apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED);
++}
++
++static void disable_lapic_irq (unsigned int irq)
++{
++ unsigned long v;
++
++ v = apic_read(APIC_LVT0);
++ apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
++}
++
++static void ack_lapic_irq (unsigned int irq)
++{
++ ack_APIC_irq();
++}
++
++static void end_lapic_irq (unsigned int i) { /* nothing */ }
++
++static struct hw_interrupt_type lapic_irq_type __read_mostly = {
++ .typename = "local-APIC-edge",
++ .startup = NULL, /* startup_irq() not used for IRQ0 */
++ .shutdown = NULL, /* shutdown_irq() not used for IRQ0 */
++ .enable = enable_lapic_irq,
++ .disable = disable_lapic_irq,
++ .ack = ack_lapic_irq,
++ .end = end_lapic_irq,
++};
++
++static void setup_nmi (void)
++{
++ /*
++ * Dirty trick to enable the NMI watchdog ...
++ * We put the 8259A master into AEOI mode and
++ * unmask on all local APICs LVT0 as NMI.
++ *
++ * The idea to use the 8259A in AEOI mode ('8259A Virtual Wire')
++ * is from Maciej W. Rozycki - so we do not have to EOI from
++ * the NMI handler or the timer interrupt.
++ */
++ printk(KERN_INFO "activating NMI Watchdog ...");
++
++ enable_NMI_through_LVT0(NULL);
++
++ printk(" done.\n");
++}
++
++/*
++ * This looks a bit hackish but it's about the only one way of sending
++ * a few INTA cycles to 8259As and any associated glue logic. ICR does
++ * not support the ExtINT mode, unfortunately. We need to send these
++ * cycles as some i82489DX-based boards have glue logic that keeps the
++ * 8259A interrupt line asserted until INTA. --macro
++ */
++static inline void unlock_ExtINT_logic(void)
++{
++ int apic, pin, i;
++ struct IO_APIC_route_entry entry0, entry1;
++ unsigned char save_control, save_freq_select;
++ unsigned long flags;
++
++ pin = find_isa_irq_pin(8, mp_INT);
++ apic = find_isa_irq_apic(8, mp_INT);
++ if (pin == -1)
++ return;
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ *(((int *)&entry0) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
++ *(((int *)&entry0) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++ clear_IO_APIC_pin(apic, pin);
++
++ memset(&entry1, 0, sizeof(entry1));
++
++ entry1.dest_mode = 0; /* physical delivery */
++ entry1.mask = 0; /* unmask IRQ now */
++ entry1.dest.physical.physical_dest = hard_smp_processor_id();
++ entry1.delivery_mode = dest_ExtINT;
++ entry1.polarity = entry0.polarity;
++ entry1.trigger = 0;
++ entry1.vector = 0;
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry1) + 1));
++ io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry1) + 0));
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ save_control = CMOS_READ(RTC_CONTROL);
++ save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
++ CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6,
++ RTC_FREQ_SELECT);
++ CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL);
++
++ i = 100;
++ while (i-- > 0) {
++ mdelay(10);
++ if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF)
++ i -= 10;
++ }
++
++ CMOS_WRITE(save_control, RTC_CONTROL);
++ CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
++ clear_IO_APIC_pin(apic, pin);
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry0) + 1));
++ io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry0) + 0));
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++}
++
++int timer_uses_ioapic_pin_0;
++
++/*
++ * This code may look a bit paranoid, but it's supposed to cooperate with
++ * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ
++ * is so screwy. Thanks to Brian Perkins for testing/hacking this beast
++ * fanatically on his truly buggy board.
++ *
++ * FIXME: really need to revamp this for modern platforms only.
++ */
++static inline void check_timer(void)
++{
++ int apic1, pin1, apic2, pin2;
++ int vector;
++
++ /*
++ * get/set the timer IRQ vector:
++ */
++ disable_8259A_irq(0);
++ vector = assign_irq_vector(0);
++ set_intr_gate(vector, interrupt[0]);
++
++ /*
++ * Subtle, code in do_timer_interrupt() expects an AEOI
++ * mode for the 8259A whenever interrupts are routed
++ * through I/O APICs. Also IRQ0 has to be enabled in
++ * the 8259A which implies the virtual wire has to be
++ * disabled in the local APIC.
++ */
++ apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
++ init_8259A(1);
++ if (timer_over_8254 > 0)
++ enable_8259A_irq(0);
++
++ pin1 = find_isa_irq_pin(0, mp_INT);
++ apic1 = find_isa_irq_apic(0, mp_INT);
++ pin2 = ioapic_i8259.pin;
++ apic2 = ioapic_i8259.apic;
++
++ if (pin1 == 0)
++ timer_uses_ioapic_pin_0 = 1;
++
++ apic_printk(APIC_VERBOSE,KERN_INFO "..TIMER: vector=0x%02X apic1=%d pin1=%d apic2=%d pin2=%d\n",
++ vector, apic1, pin1, apic2, pin2);
++
++ if (pin1 != -1) {
++ /*
++ * Ok, does IRQ0 through the IOAPIC work?
++ */
++ unmask_IO_APIC_irq(0);
++ if (!no_timer_check && timer_irq_works()) {
++ nmi_watchdog_default();
++ if (nmi_watchdog == NMI_IO_APIC) {
++ disable_8259A_irq(0);
++ setup_nmi();
++ enable_8259A_irq(0);
++ }
++ if (disable_timer_pin_1 > 0)
++ clear_IO_APIC_pin(0, pin1);
++ return;
++ }
++ clear_IO_APIC_pin(apic1, pin1);
++ apic_printk(APIC_QUIET,KERN_ERR "..MP-BIOS bug: 8254 timer not "
++ "connected to IO-APIC\n");
++ }
++
++ apic_printk(APIC_VERBOSE,KERN_INFO "...trying to set up timer (IRQ0) "
++ "through the 8259A ... ");
++ if (pin2 != -1) {
++ apic_printk(APIC_VERBOSE,"\n..... (found apic %d pin %d) ...",
++ apic2, pin2);
++ /*
++ * legacy devices should be connected to IO APIC #0
++ */
++ setup_ExtINT_IRQ0_pin(apic2, pin2, vector);
++ if (timer_irq_works()) {
++ apic_printk(APIC_VERBOSE," works.\n");
++ nmi_watchdog_default();
++ if (nmi_watchdog == NMI_IO_APIC) {
++ setup_nmi();
++ }
++ return;
++ }
++ /*
++ * Cleanup, just in case ...
++ */
++ clear_IO_APIC_pin(apic2, pin2);
++ }
++ apic_printk(APIC_VERBOSE," failed.\n");
++
++ if (nmi_watchdog == NMI_IO_APIC) {
++ printk(KERN_WARNING "timer doesn't work through the IO-APIC - disabling NMI Watchdog!\n");
++ nmi_watchdog = 0;
++ }
++
++ apic_printk(APIC_VERBOSE, KERN_INFO "...trying to set up timer as Virtual Wire IRQ...");
++
++ disable_8259A_irq(0);
++ irq_desc[0].chip = &lapic_irq_type;
++ apic_write(APIC_LVT0, APIC_DM_FIXED | vector); /* Fixed mode */
++ enable_8259A_irq(0);
++
++ if (timer_irq_works()) {
++ apic_printk(APIC_VERBOSE," works.\n");
++ return;
++ }
++ apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | vector);
++ apic_printk(APIC_VERBOSE," failed.\n");
++
++ apic_printk(APIC_VERBOSE, KERN_INFO "...trying to set up timer as ExtINT IRQ...");
++
++ init_8259A(0);
++ make_8259A_irq(0);
++ apic_write(APIC_LVT0, APIC_DM_EXTINT);
++
++ unlock_ExtINT_logic();
++
++ if (timer_irq_works()) {
++ apic_printk(APIC_VERBOSE," works.\n");
++ return;
++ }
++ apic_printk(APIC_VERBOSE," failed :(.\n");
++ panic("IO-APIC + timer doesn't work! Try using the 'noapic' kernel parameter\n");
++}
++#else
++#define check_timer() ((void)0)
++int timer_uses_ioapic_pin_0 = 0;
++#endif /* !CONFIG_XEN */
++
++static int __init notimercheck(char *s)
++{
++ no_timer_check = 1;
++ return 1;
++}
++__setup("no_timer_check", notimercheck);
++
++/*
++ *
++ * IRQ's that are handled by the PIC in the MPS IOAPIC case.
++ * - IRQ2 is the cascade IRQ, and cannot be a io-apic IRQ.
++ * Linux doesn't really care, as it's not actually used
++ * for any interrupt handling anyway.
++ */
++#define PIC_IRQS (1<<2)
++
++void __init setup_IO_APIC(void)
++{
++ enable_IO_APIC();
++
++ if (acpi_ioapic)
++ io_apic_irqs = ~0; /* all IRQs go through IOAPIC */
++ else
++ io_apic_irqs = ~PIC_IRQS;
++
++ apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n");
++
++ /*
++ * Set up the IO-APIC IRQ routing table.
++ */
++ if (!acpi_ioapic)
++ setup_ioapic_ids_from_mpc();
++#ifndef CONFIG_XEN
++ sync_Arb_IDs();
++#endif /* !CONFIG_XEN */
++ setup_IO_APIC_irqs();
++ init_IO_APIC_traps();
++ check_timer();
++ if (!acpi_ioapic)
++ print_IO_APIC();
++}
++
++struct sysfs_ioapic_data {
++ struct sys_device dev;
++ struct IO_APIC_route_entry entry[0];
++};
++static struct sysfs_ioapic_data * mp_ioapic_data[MAX_IO_APICS];
++
++static int ioapic_suspend(struct sys_device *dev, pm_message_t state)
++{
++ struct IO_APIC_route_entry *entry;
++ struct sysfs_ioapic_data *data;
++ unsigned long flags;
++ int i;
++
++ data = container_of(dev, struct sysfs_ioapic_data, dev);
++ entry = data->entry;
++ spin_lock_irqsave(&ioapic_lock, flags);
++ for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ ) {
++ *(((int *)entry) + 1) = io_apic_read(dev->id, 0x11 + 2 * i);
++ *(((int *)entry) + 0) = io_apic_read(dev->id, 0x10 + 2 * i);
++ }
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ return 0;
++}
++
++static int ioapic_resume(struct sys_device *dev)
++{
++ struct IO_APIC_route_entry *entry;
++ struct sysfs_ioapic_data *data;
++ unsigned long flags;
++ union IO_APIC_reg_00 reg_00;
++ int i;
++
++ data = container_of(dev, struct sysfs_ioapic_data, dev);
++ entry = data->entry;
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ reg_00.raw = io_apic_read(dev->id, 0);
++ if (reg_00.bits.ID != mp_ioapics[dev->id].mpc_apicid) {
++ reg_00.bits.ID = mp_ioapics[dev->id].mpc_apicid;
++ io_apic_write(dev->id, 0, reg_00.raw);
++ }
++ for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ ) {
++ io_apic_write(dev->id, 0x11+2*i, *(((int *)entry)+1));
++ io_apic_write(dev->id, 0x10+2*i, *(((int *)entry)+0));
++ }
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ return 0;
++}
++
++static struct sysdev_class ioapic_sysdev_class = {
++ set_kset_name("ioapic"),
++ .suspend = ioapic_suspend,
++ .resume = ioapic_resume,
++};
++
++static int __init ioapic_init_sysfs(void)
++{
++ struct sys_device * dev;
++ int i, size, error = 0;
++
++ error = sysdev_class_register(&ioapic_sysdev_class);
++ if (error)
++ return error;
++
++ for (i = 0; i < nr_ioapics; i++ ) {
++ size = sizeof(struct sys_device) + nr_ioapic_registers[i]
++ * sizeof(struct IO_APIC_route_entry);
++ mp_ioapic_data[i] = kmalloc(size, GFP_KERNEL);
++ if (!mp_ioapic_data[i]) {
++ printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
++ continue;
++ }
++ memset(mp_ioapic_data[i], 0, size);
++ dev = &mp_ioapic_data[i]->dev;
++ dev->id = i;
++ dev->cls = &ioapic_sysdev_class;
++ error = sysdev_register(dev);
++ if (error) {
++ kfree(mp_ioapic_data[i]);
++ mp_ioapic_data[i] = NULL;
++ printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
++ continue;
++ }
++ }
++
++ return 0;
++}
++
++device_initcall(ioapic_init_sysfs);
++
++/* --------------------------------------------------------------------------
++ ACPI-based IOAPIC Configuration
++ -------------------------------------------------------------------------- */
++
++#ifdef CONFIG_ACPI
++
++#define IO_APIC_MAX_ID 0xFE
++
++int __init io_apic_get_version (int ioapic)
++{
++ union IO_APIC_reg_01 reg_01;
++ unsigned long flags;
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ reg_01.raw = io_apic_read(ioapic, 1);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ return reg_01.bits.version;
++}
++
++
++int __init io_apic_get_redir_entries (int ioapic)
++{
++ union IO_APIC_reg_01 reg_01;
++ unsigned long flags;
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ reg_01.raw = io_apic_read(ioapic, 1);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ return reg_01.bits.entries;
++}
++
++
++int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int active_high_low)
++{
++ struct IO_APIC_route_entry entry;
++ unsigned long flags;
++
++ if (!IO_APIC_IRQ(irq)) {
++ apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n",
++ ioapic);
++ return -EINVAL;
++ }
++
++ /*
++ * Generate a PCI IRQ routing entry and program the IOAPIC accordingly.
++ * Note that we mask (disable) IRQs now -- these get enabled when the
++ * corresponding device driver registers for this IRQ.
++ */
++
++ memset(&entry,0,sizeof(entry));
++
++ entry.delivery_mode = INT_DELIVERY_MODE;
++ entry.dest_mode = INT_DEST_MODE;
++ entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
++ entry.trigger = edge_level;
++ entry.polarity = active_high_low;
++ entry.mask = 1; /* Disabled (masked) */
++
++ irq = gsi_irq_sharing(irq);
++ /*
++ * IRQs < 16 are already in the irq_2_pin[] map
++ */
++ if (irq >= 16)
++ add_pin_to_irq(irq, ioapic, pin);
++
++ entry.vector = assign_irq_vector(irq);
++
++ apic_printk(APIC_VERBOSE,KERN_DEBUG "IOAPIC[%d]: Set PCI routing entry (%d-%d -> 0x%x -> "
++ "IRQ %d Mode:%i Active:%i)\n", ioapic,
++ mp_ioapics[ioapic].mpc_apicid, pin, entry.vector, irq,
++ edge_level, active_high_low);
++
++ ioapic_register_intr(irq, entry.vector, edge_level);
++
++ if (!ioapic && (irq < 16))
++ disable_8259A_irq(irq);
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ io_apic_write(ioapic, 0x11+2*pin, *(((int *)&entry)+1));
++ io_apic_write(ioapic, 0x10+2*pin, *(((int *)&entry)+0));
++ set_native_irq_info(use_pci_vector() ? entry.vector : irq, TARGET_CPUS);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ return 0;
++}
++
++#endif /* CONFIG_ACPI */
++
++
++#ifndef CONFIG_XEN
++/*
++ * This function currently is only a helper for the i386 smp boot process where
++ * we need to reprogram the ioredtbls to cater for the cpus which have come online
++ * so mask in all cases should simply be TARGET_CPUS
++ */
++#ifdef CONFIG_SMP
++void __init setup_ioapic_dest(void)
++{
++ int pin, ioapic, irq, irq_entry;
++
++ if (skip_ioapic_setup == 1)
++ return;
++
++ for (ioapic = 0; ioapic < nr_ioapics; ioapic++) {
++ for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) {
++ irq_entry = find_irq_entry(ioapic, pin, mp_INT);
++ if (irq_entry == -1)
++ continue;
++ irq = pin_2_irq(irq_entry, ioapic, pin);
++ set_ioapic_affinity_irq(irq, TARGET_CPUS);
++ }
++
++ }
++}
++#endif
++#endif /* !CONFIG_XEN */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/arch/x86_64/kernel/ioport-xen.c 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,99 @@
++/*
++ * linux/arch/x86_64/kernel/ioport.c
++ *
++ * This contains the io-permission bitmap code - written by obz, with changes
++ * by Linus.
++ */
++
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/capability.h>
++#include <linux/errno.h>
++#include <linux/types.h>
++#include <linux/ioport.h>
++#include <linux/mm.h>
++#include <linux/smp.h>
++#include <linux/smp_lock.h>
++#include <linux/stddef.h>
++#include <linux/slab.h>
++#include <linux/thread_info.h>
++#include <xen/interface/physdev.h>
++
++/* Set EXTENT bits starting at BASE in BITMAP to value TURN_ON. */
++static void set_bitmap(unsigned long *bitmap, unsigned int base, unsigned int extent, int new_value)
++{
++ int i;
++
++ if (new_value)
++ for (i = base; i < base + extent; i++)
++ __set_bit(i, bitmap);
++ else
++ for (i = base; i < base + extent; i++)
++ clear_bit(i, bitmap);
++}
++
++/*
++ * this changes the io permissions bitmap in the current task.
++ */
++asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
++{
++ struct thread_struct * t = &current->thread;
++ unsigned long *bitmap;
++ struct physdev_set_iobitmap set_iobitmap;
++
++ if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
++ return -EINVAL;
++ if (turn_on && !capable(CAP_SYS_RAWIO))
++ return -EPERM;
++
++ /*
++ * If it's the first ioperm() call in this thread's lifetime, set the
++ * IO bitmap up. ioperm() is much less timing critical than clone(),
++ * this is why we delay this operation until now:
++ */
++ if (!t->io_bitmap_ptr) {
++ bitmap = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
++ if (!bitmap)
++ return -ENOMEM;
++
++ memset(bitmap, 0xff, IO_BITMAP_BYTES);
++ t->io_bitmap_ptr = bitmap;
++
++ set_xen_guest_handle(set_iobitmap.bitmap, (char *)bitmap);
++ set_iobitmap.nr_ports = IO_BITMAP_BITS;
++ HYPERVISOR_physdev_op(PHYSDEVOP_set_iobitmap, &set_iobitmap);
++ }
++
++ set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
++
++ return 0;
++}
++
++/*
++ * sys_iopl has to be used when you want to access the IO ports
++ * beyond the 0x3ff range: to get the full 65536 ports bitmapped
++ * you'd need 8kB of bitmaps/process, which is a bit excessive.
++ *
++ */
++
++asmlinkage long sys_iopl(unsigned int new_iopl, struct pt_regs *regs)
++{
++ unsigned int old_iopl = current->thread.iopl;
++ struct physdev_set_iopl set_iopl;
++
++ if (new_iopl > 3)
++ return -EINVAL;
++
++ /* Need "raw I/O" privileges for direct port access. */
++ if ((new_iopl > old_iopl) && !capable(CAP_SYS_RAWIO))
++ return -EPERM;
++
++ /* Change our version of the privilege levels. */
++ current->thread.iopl = new_iopl;
++
++ /* Force the change at ring 0. */
++ set_iopl.iopl = (new_iopl == 0) ? 1 : new_iopl;
++ HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
++
++ return 0;
++}
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/arch/x86_64/kernel/irq-xen.c 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,197 @@
++/*
++ * linux/arch/x86_64/kernel/irq.c
++ *
++ * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
++ *
++ * This file contains the lowest level x86_64-specific interrupt
++ * entry and irq statistics code. All the remaining irq logic is
++ * done by the generic kernel/irq/ code and in the
++ * x86_64-specific irq controller code. (e.g. i8259.c and
++ * io_apic.c.)
++ */
++
++#include <linux/kernel_stat.h>
++#include <linux/interrupt.h>
++#include <linux/seq_file.h>
++#include <linux/module.h>
++#include <linux/delay.h>
++#include <asm/uaccess.h>
++#include <asm/io_apic.h>
++#include <asm/idle.h>
++
++atomic_t irq_err_count;
++#ifdef CONFIG_X86_IO_APIC
++#ifdef APIC_MISMATCH_DEBUG
++atomic_t irq_mis_count;
++#endif
++#endif
++
++#ifdef CONFIG_DEBUG_STACKOVERFLOW
++/*
++ * Probabilistic stack overflow check:
++ *
++ * Only check the stack in process context, because everything else
++ * runs on the big interrupt stacks. Checking reliably is too expensive,
++ * so we just check from interrupts.
++ */
++static inline void stack_overflow_check(struct pt_regs *regs)
++{
++ u64 curbase = (u64) current->thread_info;
++ static unsigned long warned = -60*HZ;
++
++ if (regs->rsp >= curbase && regs->rsp <= curbase + THREAD_SIZE &&
++ regs->rsp < curbase + sizeof(struct thread_info) + 128 &&
++ time_after(jiffies, warned + 60*HZ)) {
++ printk("do_IRQ: %s near stack overflow (cur:%Lx,rsp:%lx)\n",
++ current->comm, curbase, regs->rsp);
++ show_stack(NULL,NULL);
++ warned = jiffies;
++ }
++}
++#endif
++
++/*
++ * Generic, controller-independent functions:
++ */
++
++int show_interrupts(struct seq_file *p, void *v)
++{
++ int i = *(loff_t *) v, j;
++ struct irqaction * action;
++ unsigned long flags;
++
++ if (i == 0) {
++ seq_printf(p, " ");
++ for_each_online_cpu(j)
++ seq_printf(p, "CPU%-8d",j);
++ seq_putc(p, '\n');
++ }
++
++ if (i < NR_IRQS) {
++ spin_lock_irqsave(&irq_desc[i].lock, flags);
++ action = irq_desc[i].action;
++ if (!action)
++ goto skip;
++ seq_printf(p, "%3d: ",i);
++#ifndef CONFIG_SMP
++ seq_printf(p, "%10u ", kstat_irqs(i));
++#else
++ for_each_online_cpu(j)
++ seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
++#endif
++ seq_printf(p, " %14s", irq_desc[i].chip->typename);
++
++ seq_printf(p, " %s", action->name);
++ for (action=action->next; action; action = action->next)
++ seq_printf(p, ", %s", action->name);
++ seq_putc(p, '\n');
++skip:
++ spin_unlock_irqrestore(&irq_desc[i].lock, flags);
++ } else if (i == NR_IRQS) {
++ seq_printf(p, "NMI: ");
++ for_each_online_cpu(j)
++ seq_printf(p, "%10u ", cpu_pda(j)->__nmi_count);
++ seq_putc(p, '\n');
++#ifdef CONFIG_X86_LOCAL_APIC
++ seq_printf(p, "LOC: ");
++ for_each_online_cpu(j)
++ seq_printf(p, "%10u ", cpu_pda(j)->apic_timer_irqs);
++ seq_putc(p, '\n');
++#endif
++ seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
++#ifdef CONFIG_X86_IO_APIC
++#ifdef APIC_MISMATCH_DEBUG
++ seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count));
++#endif
++#endif
++ }
++ return 0;
++}
++
++/*
++ * do_IRQ handles all normal device IRQ's (the special
++ * SMP cross-CPU interrupts have their own specific
++ * handlers).
++ */
++asmlinkage unsigned int do_IRQ(struct pt_regs *regs)
++{
++ /* high bit used in ret_from_ code */
++ unsigned irq = ~regs->orig_rax;
++
++ if (unlikely(irq >= NR_IRQS)) {
++ printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
++ __FUNCTION__, irq);
++ BUG();
++ }
++
++ exit_idle();
++ irq_enter();
++#ifdef CONFIG_DEBUG_STACKOVERFLOW
++ stack_overflow_check(regs);
++#endif
++ __do_IRQ(irq, regs);
++ irq_exit();
++
++ return 1;
++}
++
++#ifdef CONFIG_HOTPLUG_CPU
++void fixup_irqs(cpumask_t map)
++{
++ unsigned int irq;
++ static int warned;
++
++ for (irq = 0; irq < NR_IRQS; irq++) {
++ cpumask_t mask;
++ if (irq == 2)
++ continue;
++
++ cpus_and(mask, irq_desc[irq].affinity, map);
++ if (any_online_cpu(mask) == NR_CPUS) {
++ printk("Breaking affinity for irq %i\n", irq);
++ mask = map;
++ }
++ if (irq_desc[irq].chip->set_affinity)
++ irq_desc[irq].chip->set_affinity(irq, mask);
++ else if (irq_desc[irq].action && !(warned++))
++ printk("Cannot set affinity for irq %i\n", irq);
++ }
++
++ /* That doesn't seem sufficient. Give it 1ms. */
++ local_irq_enable();
++ mdelay(1);
++ local_irq_disable();
++}
++#endif
++
++extern void call_softirq(void);
++
++asmlinkage void do_softirq(void)
++{
++ __u32 pending;
++ unsigned long flags;
++
++ if (in_interrupt())
++ return;
++
++ local_irq_save(flags);
++ pending = local_softirq_pending();
++ /* Switch to interrupt stack */
++ if (pending) {
++ call_softirq();
++ WARN_ON_ONCE(softirq_count());
++ }
++ local_irq_restore(flags);
++}
++EXPORT_SYMBOL(do_softirq);
++
++#ifndef CONFIG_X86_LOCAL_APIC
++/*
++ * 'what should we do if we get a hw irq event on an illegal vector'.
++ * each architecture has to answer this themselves.
++ */
++void ack_bad_irq(unsigned int irq)
++{
++ printk("unexpected IRQ trap at vector %02x\n", irq);
++}
++#endif
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/arch/x86_64/kernel/ldt-xen.c 2007-08-27 14:01:58.000000000 -0400
+@@ -0,0 +1,282 @@
++/*
++ * linux/arch/x86_64/kernel/ldt.c
++ *
++ * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds
++ * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
++ * Copyright (C) 2002 Andi Kleen
++ *
++ * This handles calls from both 32bit and 64bit mode.
++ */
++
++#include <linux/errno.h>
++#include <linux/sched.h>
++#include <linux/string.h>
++#include <linux/mm.h>
++#include <linux/smp.h>
++#include <linux/smp_lock.h>
++#include <linux/vmalloc.h>
++#include <linux/slab.h>
++
++#include <asm/uaccess.h>
++#include <asm/system.h>
++#include <asm/ldt.h>
++#include <asm/desc.h>
++#include <asm/proto.h>
++#include <asm/pgalloc.h>
++
++#ifdef CONFIG_SMP /* avoids "defined but not used" warnig */
++static void flush_ldt(void *null)
++{
++ if (current->active_mm)
++ load_LDT(&current->active_mm->context);
++}
++#endif
++
++static int alloc_ldt(mm_context_t *pc, unsigned mincount, int reload)
++{
++ void *oldldt;
++ void *newldt;
++ unsigned oldsize;
++
++ if (mincount <= (unsigned)pc->size)
++ return 0;
++ oldsize = pc->size;
++ mincount = (mincount+511)&(~511);
++ if (mincount*LDT_ENTRY_SIZE > PAGE_SIZE)
++ newldt = vmalloc(mincount*LDT_ENTRY_SIZE);
++ else
++ newldt = kmalloc(mincount*LDT_ENTRY_SIZE, GFP_KERNEL);
++
++ if (!newldt)
++ return -ENOMEM;
++
++ if (oldsize)
++ memcpy(newldt, pc->ldt, oldsize*LDT_ENTRY_SIZE);
++ oldldt = pc->ldt;
++ memset(newldt+oldsize*LDT_ENTRY_SIZE, 0, (mincount-oldsize)*LDT_ENTRY_SIZE);
++ wmb();
++ pc->ldt = newldt;
++ wmb();
++ pc->size = mincount;
++ wmb();
++ if (reload) {
++#ifdef CONFIG_SMP
++ cpumask_t mask;
++
++ preempt_disable();
++#endif
++ make_pages_readonly(
++ pc->ldt,
++ (pc->size * LDT_ENTRY_SIZE) / PAGE_SIZE,
++ XENFEAT_writable_descriptor_tables);
++ load_LDT(pc);
++#ifdef CONFIG_SMP
++ mask = cpumask_of_cpu(smp_processor_id());
++ if (!cpus_equal(current->mm->cpu_vm_mask, mask))
++ smp_call_function(flush_ldt, NULL, 1, 1);
++ preempt_enable();
++#endif
++ }
++ if (oldsize) {
++ make_pages_writable(
++ oldldt,
++ (oldsize * LDT_ENTRY_SIZE) / PAGE_SIZE,
++ XENFEAT_writable_descriptor_tables);
++ if (oldsize*LDT_ENTRY_SIZE > PAGE_SIZE)
++ vfree(oldldt);
++ else
++ kfree(oldldt);
++ }
++ return 0;
++}
++
++static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
++{
++ int err = alloc_ldt(new, old->size, 0);
++ if (err < 0)
++ return err;
++ memcpy(new->ldt, old->ldt, old->size*LDT_ENTRY_SIZE);
++ make_pages_readonly(
++ new->ldt,
++ (new->size * LDT_ENTRY_SIZE) / PAGE_SIZE,
++ XENFEAT_writable_descriptor_tables);
++ return 0;
++}
++
++/*
++ * we do not have to muck with descriptors here, that is
++ * done in switch_mm() as needed.
++ */
++int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
++{
++ struct mm_struct * old_mm;
++ int retval = 0;
++
++ memset(&mm->context, 0, sizeof(mm->context));
++ init_MUTEX(&mm->context.sem);
++ old_mm = current->mm;
++ if (old_mm && old_mm->context.size > 0) {
++ down(&old_mm->context.sem);
++ retval = copy_ldt(&mm->context, &old_mm->context);
++ up(&old_mm->context.sem);
++ }
++ if (retval == 0) {
++ spin_lock(&mm_unpinned_lock);
++ list_add(&mm->context.unpinned, &mm_unpinned);
++ spin_unlock(&mm_unpinned_lock);
++ }
++ return retval;
++}
++
++/*
++ *
++ * Don't touch the LDT register - we're already in the next thread.
++ */
++void destroy_context(struct mm_struct *mm)
++{
++ if (mm->context.size) {
++ if (mm == current->active_mm)
++ clear_LDT();
++ make_pages_writable(
++ mm->context.ldt,
++ (mm->context.size * LDT_ENTRY_SIZE) / PAGE_SIZE,
++ XENFEAT_writable_descriptor_tables);
++ if (mm->context.size*LDT_ENTRY_SIZE > PAGE_SIZE)
++ vfree(mm->context.ldt);
++ else
++ kfree(mm->context.ldt);
++ mm->context.size = 0;
++ }
++ if (!mm->context.pinned) {
++ spin_lock(&mm_unpinned_lock);
++ list_del(&mm->context.unpinned);
++ spin_unlock(&mm_unpinned_lock);
++ }
++}
++
++static int read_ldt(void __user * ptr, unsigned long bytecount)
++{
++ int err;
++ unsigned long size;
++ struct mm_struct * mm = current->mm;
++
++ if (!mm->context.size)
++ return 0;
++ if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES)
++ bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES;
++
++ down(&mm->context.sem);
++ size = mm->context.size*LDT_ENTRY_SIZE;
++ if (size > bytecount)
++ size = bytecount;
++
++ err = 0;
++ if (copy_to_user(ptr, mm->context.ldt, size))
++ err = -EFAULT;
++ up(&mm->context.sem);
++ if (err < 0)
++ goto error_return;
++ if (size != bytecount) {
++ /* zero-fill the rest */
++ if (clear_user(ptr+size, bytecount-size) != 0) {
++ err = -EFAULT;
++ goto error_return;
++ }
++ }
++ return bytecount;
++error_return:
++ return err;
++}
++
++static int read_default_ldt(void __user * ptr, unsigned long bytecount)
++{
++ /* Arbitrary number */
++ /* x86-64 default LDT is all zeros */
++ if (bytecount > 128)
++ bytecount = 128;
++ if (clear_user(ptr, bytecount))
++ return -EFAULT;
++ return bytecount;
++}
++
++static int write_ldt(void __user * ptr, unsigned long bytecount, int oldmode)
++{
++ struct task_struct *me = current;
++ struct mm_struct * mm = me->mm;
++ __u32 entry_1, entry_2, *lp;
++ unsigned long mach_lp;
++ int error;
++ struct user_desc ldt_info;
++
++ error = -EINVAL;
++
++ if (bytecount != sizeof(ldt_info))
++ goto out;
++ error = -EFAULT;
++ if (copy_from_user(&ldt_info, ptr, bytecount))
++ goto out;
++
++ error = -EINVAL;
++ if (ldt_info.entry_number >= LDT_ENTRIES)
++ goto out;
++ if (ldt_info.contents == 3) {
++ if (oldmode)
++ goto out;
++ if (ldt_info.seg_not_present == 0)
++ goto out;
++ }
++
++ down(&mm->context.sem);
++ if (ldt_info.entry_number >= (unsigned)mm->context.size) {
++ error = alloc_ldt(&current->mm->context, ldt_info.entry_number+1, 1);
++ if (error < 0)
++ goto out_unlock;
++ }
++
++ lp = (__u32 *) ((ldt_info.entry_number << 3) + (char *) mm->context.ldt);
++ mach_lp = arbitrary_virt_to_machine(lp);
++
++ /* Allow LDTs to be cleared by the user. */
++ if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
++ if (oldmode || LDT_empty(&ldt_info)) {
++ entry_1 = 0;
++ entry_2 = 0;
++ goto install;
++ }
++ }
++
++ entry_1 = LDT_entry_a(&ldt_info);
++ entry_2 = LDT_entry_b(&ldt_info);
++ if (oldmode)
++ entry_2 &= ~(1 << 20);
++
++ /* Install the new entry ... */
++install:
++ error = HYPERVISOR_update_descriptor(mach_lp, (unsigned long)((entry_1 | (unsigned long) entry_2 << 32)));
++
++out_unlock:
++ up(&mm->context.sem);
++out:
++ return error;
++}
++
++asmlinkage int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount)
++{
++ int ret = -ENOSYS;
++
++ switch (func) {
++ case 0:
++ ret = read_ldt(ptr, bytecount);
++ break;
++ case 1:
++ ret = write_ldt(ptr, bytecount, 1);
++ break;
++ case 2:
++ ret = read_default_ldt(ptr, bytecount);
++ break;
++ case 0x11:
++ ret = write_ldt(ptr, bytecount, 0);
++ break;
++ }
++ return ret;
++}
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/arch/x86_64/kernel/mpparse-xen.c 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,1011 @@
++/*
++ * Intel Multiprocessor Specification 1.1 and 1.4
++ * compliant MP-table parsing routines.
++ *
++ * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
++ * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
++ *
++ * Fixes
++ * Erich Boleyn : MP v1.4 and additional changes.
++ * Alan Cox : Added EBDA scanning
++ * Ingo Molnar : various cleanups and rewrites
++ * Maciej W. Rozycki: Bits for default MP configurations
++ * Paul Diefenbaugh: Added full ACPI support
++ */
++
++#include <linux/mm.h>
++#include <linux/init.h>
++#include <linux/delay.h>
++#include <linux/bootmem.h>
++#include <linux/smp_lock.h>
++#include <linux/kernel_stat.h>
++#include <linux/mc146818rtc.h>
++#include <linux/acpi.h>
++#include <linux/module.h>
++
++#include <asm/smp.h>
++#include <asm/mtrr.h>
++#include <asm/mpspec.h>
++#include <asm/pgalloc.h>
++#include <asm/io_apic.h>
++#include <asm/proto.h>
++#include <asm/acpi.h>
++
++/* Have we found an MP table */
++int smp_found_config;
++unsigned int __initdata maxcpus = NR_CPUS;
++
++int acpi_found_madt;
++
++/*
++ * Various Linux-internal data structures created from the
++ * MP-table.
++ */
++unsigned char apic_version [MAX_APICS];
++unsigned char mp_bus_id_to_type [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 };
++int mp_bus_id_to_pci_bus [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 };
++
++static int mp_current_pci_id = 0;
++/* I/O APIC entries */
++struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS];
++
++/* # of MP IRQ source entries */
++struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
++
++/* MP IRQ source entries */
++int mp_irq_entries;
++
++int nr_ioapics;
++int pic_mode;
++unsigned long mp_lapic_addr = 0;
++
++
++
++/* Processor that is doing the boot up */
++unsigned int boot_cpu_id = -1U;
++/* Internal processor count */
++unsigned int num_processors __initdata = 0;
++
++unsigned disabled_cpus __initdata;
++
++/* Bitmask of physically existing CPUs */
++physid_mask_t phys_cpu_present_map = PHYSID_MASK_NONE;
++
++/* ACPI MADT entry parsing functions */
++#ifdef CONFIG_ACPI
++extern struct acpi_boot_flags acpi_boot;
++#ifdef CONFIG_X86_LOCAL_APIC
++extern int acpi_parse_lapic (acpi_table_entry_header *header);
++extern int acpi_parse_lapic_addr_ovr (acpi_table_entry_header *header);
++extern int acpi_parse_lapic_nmi (acpi_table_entry_header *header);
++#endif /*CONFIG_X86_LOCAL_APIC*/
++#ifdef CONFIG_X86_IO_APIC
++extern int acpi_parse_ioapic (acpi_table_entry_header *header);
++#endif /*CONFIG_X86_IO_APIC*/
++#endif /*CONFIG_ACPI*/
++
++u8 bios_cpu_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
++
++
++/*
++ * Intel MP BIOS table parsing routines:
++ */
++
++/*
++ * Checksum an MP configuration block.
++ */
++
++static int __init mpf_checksum(unsigned char *mp, int len)
++{
++ int sum = 0;
++
++ while (len--)
++ sum += *mp++;
++
++ return sum & 0xFF;
++}
++
++#ifndef CONFIG_XEN
++static void __cpuinit MP_processor_info (struct mpc_config_processor *m)
++{
++ int cpu;
++ unsigned char ver;
++ cpumask_t tmp_map;
++
++ if (!(m->mpc_cpuflag & CPU_ENABLED)) {
++ disabled_cpus++;
++ return;
++ }
++
++ printk(KERN_INFO "Processor #%d %d:%d APIC version %d\n",
++ m->mpc_apicid,
++ (m->mpc_cpufeature & CPU_FAMILY_MASK)>>8,
++ (m->mpc_cpufeature & CPU_MODEL_MASK)>>4,
++ m->mpc_apicver);
++
++ if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) {
++ Dprintk(" Bootup CPU\n");
++ boot_cpu_id = m->mpc_apicid;
++ }
++ if (num_processors >= NR_CPUS) {
++ printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached."
++ " Processor ignored.\n", NR_CPUS);
++ return;
++ }
++
++ num_processors++;
++ cpus_complement(tmp_map, cpu_present_map);
++ cpu = first_cpu(tmp_map);
++
++#if MAX_APICS < 255
++ if ((int)m->mpc_apicid > MAX_APICS) {
++ printk(KERN_ERR "Processor #%d INVALID. (Max ID: %d).\n",
++ m->mpc_apicid, MAX_APICS);
++ return;
++ }
++#endif
++ ver = m->mpc_apicver;
++
++ physid_set(m->mpc_apicid, phys_cpu_present_map);
++ /*
++ * Validate version
++ */
++ if (ver == 0x0) {
++ printk(KERN_ERR "BIOS bug, APIC version is 0 for CPU#%d! fixing up to 0x10. (tell your hw vendor)\n", m->mpc_apicid);
++ ver = 0x10;
++ }
++ apic_version[m->mpc_apicid] = ver;
++ if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) {
++ /*
++ * bios_cpu_apicid is required to have processors listed
++ * in same order as logical cpu numbers. Hence the first
++ * entry is BSP, and so on.
++ */
++ cpu = 0;
++ }
++ bios_cpu_apicid[cpu] = m->mpc_apicid;
++ x86_cpu_to_apicid[cpu] = m->mpc_apicid;
++
++ cpu_set(cpu, cpu_possible_map);
++ cpu_set(cpu, cpu_present_map);
++}
++#else
++static void __cpuinit MP_processor_info (struct mpc_config_processor *m)
++{
++ num_processors++;
++}
++#endif /* CONFIG_XEN */
++
++static void __init MP_bus_info (struct mpc_config_bus *m)
++{
++ char str[7];
++
++ memcpy(str, m->mpc_bustype, 6);
++ str[6] = 0;
++ Dprintk("Bus #%d is %s\n", m->mpc_busid, str);
++
++ if (strncmp(str, "ISA", 3) == 0) {
++ mp_bus_id_to_type[m->mpc_busid] = MP_BUS_ISA;
++ } else if (strncmp(str, "EISA", 4) == 0) {
++ mp_bus_id_to_type[m->mpc_busid] = MP_BUS_EISA;
++ } else if (strncmp(str, "PCI", 3) == 0) {
++ mp_bus_id_to_type[m->mpc_busid] = MP_BUS_PCI;
++ mp_bus_id_to_pci_bus[m->mpc_busid] = mp_current_pci_id;
++ mp_current_pci_id++;
++ } else if (strncmp(str, "MCA", 3) == 0) {
++ mp_bus_id_to_type[m->mpc_busid] = MP_BUS_MCA;
++ } else {
++ printk(KERN_ERR "Unknown bustype %s\n", str);
++ }
++}
++
++static void __init MP_ioapic_info (struct mpc_config_ioapic *m)
++{
++ if (!(m->mpc_flags & MPC_APIC_USABLE))
++ return;
++
++ printk("I/O APIC #%d Version %d at 0x%X.\n",
++ m->mpc_apicid, m->mpc_apicver, m->mpc_apicaddr);
++ if (nr_ioapics >= MAX_IO_APICS) {
++ printk(KERN_ERR "Max # of I/O APICs (%d) exceeded (found %d).\n",
++ MAX_IO_APICS, nr_ioapics);
++ panic("Recompile kernel with bigger MAX_IO_APICS!.\n");
++ }
++ if (!m->mpc_apicaddr) {
++ printk(KERN_ERR "WARNING: bogus zero I/O APIC address"
++ " found in MP table, skipping!\n");
++ return;
++ }
++ mp_ioapics[nr_ioapics] = *m;
++ nr_ioapics++;
++}
++
++static void __init MP_intsrc_info (struct mpc_config_intsrc *m)
++{
++ mp_irqs [mp_irq_entries] = *m;
++ Dprintk("Int: type %d, pol %d, trig %d, bus %d,"
++ " IRQ %02x, APIC ID %x, APIC INT %02x\n",
++ m->mpc_irqtype, m->mpc_irqflag & 3,
++ (m->mpc_irqflag >> 2) & 3, m->mpc_srcbus,
++ m->mpc_srcbusirq, m->mpc_dstapic, m->mpc_dstirq);
++ if (++mp_irq_entries >= MAX_IRQ_SOURCES)
++ panic("Max # of irq sources exceeded!!\n");
++}
++
++static void __init MP_lintsrc_info (struct mpc_config_lintsrc *m)
++{
++ Dprintk("Lint: type %d, pol %d, trig %d, bus %d,"
++ " IRQ %02x, APIC ID %x, APIC LINT %02x\n",
++ m->mpc_irqtype, m->mpc_irqflag & 3,
++ (m->mpc_irqflag >> 2) &3, m->mpc_srcbusid,
++ m->mpc_srcbusirq, m->mpc_destapic, m->mpc_destapiclint);
++ /*
++ * Well it seems all SMP boards in existence
++ * use ExtINT/LVT1 == LINT0 and
++ * NMI/LVT2 == LINT1 - the following check
++ * will show us if this assumptions is false.
++ * Until then we do not have to add baggage.
++ */
++ if ((m->mpc_irqtype == mp_ExtINT) &&
++ (m->mpc_destapiclint != 0))
++ BUG();
++ if ((m->mpc_irqtype == mp_NMI) &&
++ (m->mpc_destapiclint != 1))
++ BUG();
++}
++
++/*
++ * Read/parse the MPC
++ */
++
++static int __init smp_read_mpc(struct mp_config_table *mpc)
++{
++ char str[16];
++ int count=sizeof(*mpc);
++ unsigned char *mpt=((unsigned char *)mpc)+count;
++
++ if (memcmp(mpc->mpc_signature,MPC_SIGNATURE,4)) {
++ printk("SMP mptable: bad signature [%c%c%c%c]!\n",
++ mpc->mpc_signature[0],
++ mpc->mpc_signature[1],
++ mpc->mpc_signature[2],
++ mpc->mpc_signature[3]);
++ return 0;
++ }
++ if (mpf_checksum((unsigned char *)mpc,mpc->mpc_length)) {
++ printk("SMP mptable: checksum error!\n");
++ return 0;
++ }
++ if (mpc->mpc_spec!=0x01 && mpc->mpc_spec!=0x04) {
++ printk(KERN_ERR "SMP mptable: bad table version (%d)!!\n",
++ mpc->mpc_spec);
++ return 0;
++ }
++ if (!mpc->mpc_lapic) {
++ printk(KERN_ERR "SMP mptable: null local APIC address!\n");
++ return 0;
++ }
++ memcpy(str,mpc->mpc_oem,8);
++ str[8]=0;
++ printk(KERN_INFO "OEM ID: %s ",str);
++
++ memcpy(str,mpc->mpc_productid,12);
++ str[12]=0;
++ printk("Product ID: %s ",str);
++
++ printk("APIC at: 0x%X\n",mpc->mpc_lapic);
++
++ /* save the local APIC address, it might be non-default */
++ if (!acpi_lapic)
++ mp_lapic_addr = mpc->mpc_lapic;
++
++ /*
++ * Now process the configuration blocks.
++ */
++ while (count < mpc->mpc_length) {
++ switch(*mpt) {
++ case MP_PROCESSOR:
++ {
++ struct mpc_config_processor *m=
++ (struct mpc_config_processor *)mpt;
++ if (!acpi_lapic)
++ MP_processor_info(m);
++ mpt += sizeof(*m);
++ count += sizeof(*m);
++ break;
++ }
++ case MP_BUS:
++ {
++ struct mpc_config_bus *m=
++ (struct mpc_config_bus *)mpt;
++ MP_bus_info(m);
++ mpt += sizeof(*m);
++ count += sizeof(*m);
++ break;
++ }
++ case MP_IOAPIC:
++ {
++ struct mpc_config_ioapic *m=
++ (struct mpc_config_ioapic *)mpt;
++ MP_ioapic_info(m);
++ mpt+=sizeof(*m);
++ count+=sizeof(*m);
++ break;
++ }
++ case MP_INTSRC:
++ {
++ struct mpc_config_intsrc *m=
++ (struct mpc_config_intsrc *)mpt;
++
++ MP_intsrc_info(m);
++ mpt+=sizeof(*m);
++ count+=sizeof(*m);
++ break;
++ }
++ case MP_LINTSRC:
++ {
++ struct mpc_config_lintsrc *m=
++ (struct mpc_config_lintsrc *)mpt;
++ MP_lintsrc_info(m);
++ mpt+=sizeof(*m);
++ count+=sizeof(*m);
++ break;
++ }
++ }
++ }
++ clustered_apic_check();
++ if (!num_processors)
++ printk(KERN_ERR "SMP mptable: no processors registered!\n");
++ return num_processors;
++}
++
++static int __init ELCR_trigger(unsigned int irq)
++{
++ unsigned int port;
++
++ port = 0x4d0 + (irq >> 3);
++ return (inb(port) >> (irq & 7)) & 1;
++}
++
++static void __init construct_default_ioirq_mptable(int mpc_default_type)
++{
++ struct mpc_config_intsrc intsrc;
++ int i;
++ int ELCR_fallback = 0;
++
++ intsrc.mpc_type = MP_INTSRC;
++ intsrc.mpc_irqflag = 0; /* conforming */
++ intsrc.mpc_srcbus = 0;
++ intsrc.mpc_dstapic = mp_ioapics[0].mpc_apicid;
++
++ intsrc.mpc_irqtype = mp_INT;
++
++ /*
++ * If true, we have an ISA/PCI system with no IRQ entries
++ * in the MP table. To prevent the PCI interrupts from being set up
++ * incorrectly, we try to use the ELCR. The sanity check to see if
++ * there is good ELCR data is very simple - IRQ0, 1, 2 and 13 can
++ * never be level sensitive, so we simply see if the ELCR agrees.
++ * If it does, we assume it's valid.
++ */
++ if (mpc_default_type == 5) {
++ printk(KERN_INFO "ISA/PCI bus type with no IRQ information... falling back to ELCR\n");
++
++ if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) || ELCR_trigger(13))
++ printk(KERN_ERR "ELCR contains invalid data... not using ELCR\n");
++ else {
++ printk(KERN_INFO "Using ELCR to identify PCI interrupts\n");
++ ELCR_fallback = 1;
++ }
++ }
++
++ for (i = 0; i < 16; i++) {
++ switch (mpc_default_type) {
++ case 2:
++ if (i == 0 || i == 13)
++ continue; /* IRQ0 & IRQ13 not connected */
++ /* fall through */
++ default:
++ if (i == 2)
++ continue; /* IRQ2 is never connected */
++ }
++
++ if (ELCR_fallback) {
++ /*
++ * If the ELCR indicates a level-sensitive interrupt, we
++ * copy that information over to the MP table in the
++ * irqflag field (level sensitive, active high polarity).
++ */
++ if (ELCR_trigger(i))
++ intsrc.mpc_irqflag = 13;
++ else
++ intsrc.mpc_irqflag = 0;
++ }
++
++ intsrc.mpc_srcbusirq = i;
++ intsrc.mpc_dstirq = i ? i : 2; /* IRQ0 to INTIN2 */
++ MP_intsrc_info(&intsrc);
++ }
++
++ intsrc.mpc_irqtype = mp_ExtINT;
++ intsrc.mpc_srcbusirq = 0;
++ intsrc.mpc_dstirq = 0; /* 8259A to INTIN0 */
++ MP_intsrc_info(&intsrc);
++}
++
++static inline void __init construct_default_ISA_mptable(int mpc_default_type)
++{
++ struct mpc_config_processor processor;
++ struct mpc_config_bus bus;
++ struct mpc_config_ioapic ioapic;
++ struct mpc_config_lintsrc lintsrc;
++ int linttypes[2] = { mp_ExtINT, mp_NMI };
++ int i;
++
++ /*
++ * local APIC has default address
++ */
++ mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
++
++ /*
++ * 2 CPUs, numbered 0 & 1.
++ */
++ processor.mpc_type = MP_PROCESSOR;
++ /* Either an integrated APIC or a discrete 82489DX. */
++ processor.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
++ processor.mpc_cpuflag = CPU_ENABLED;
++ processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) |
++ (boot_cpu_data.x86_model << 4) |
++ boot_cpu_data.x86_mask;
++ processor.mpc_featureflag = boot_cpu_data.x86_capability[0];
++ processor.mpc_reserved[0] = 0;
++ processor.mpc_reserved[1] = 0;
++ for (i = 0; i < 2; i++) {
++ processor.mpc_apicid = i;
++ MP_processor_info(&processor);
++ }
++
++ bus.mpc_type = MP_BUS;
++ bus.mpc_busid = 0;
++ switch (mpc_default_type) {
++ default:
++ printk(KERN_ERR "???\nUnknown standard configuration %d\n",
++ mpc_default_type);
++ /* fall through */
++ case 1:
++ case 5:
++ memcpy(bus.mpc_bustype, "ISA ", 6);
++ break;
++ case 2:
++ case 6:
++ case 3:
++ memcpy(bus.mpc_bustype, "EISA ", 6);
++ break;
++ case 4:
++ case 7:
++ memcpy(bus.mpc_bustype, "MCA ", 6);
++ }
++ MP_bus_info(&bus);
++ if (mpc_default_type > 4) {
++ bus.mpc_busid = 1;
++ memcpy(bus.mpc_bustype, "PCI ", 6);
++ MP_bus_info(&bus);
++ }
++
++ ioapic.mpc_type = MP_IOAPIC;
++ ioapic.mpc_apicid = 2;
++ ioapic.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
++ ioapic.mpc_flags = MPC_APIC_USABLE;
++ ioapic.mpc_apicaddr = 0xFEC00000;
++ MP_ioapic_info(&ioapic);
++
++ /*
++ * We set up most of the low 16 IO-APIC pins according to MPS rules.
++ */
++ construct_default_ioirq_mptable(mpc_default_type);
++
++ lintsrc.mpc_type = MP_LINTSRC;
++ lintsrc.mpc_irqflag = 0; /* conforming */
++ lintsrc.mpc_srcbusid = 0;
++ lintsrc.mpc_srcbusirq = 0;
++ lintsrc.mpc_destapic = MP_APIC_ALL;
++ for (i = 0; i < 2; i++) {
++ lintsrc.mpc_irqtype = linttypes[i];
++ lintsrc.mpc_destapiclint = i;
++ MP_lintsrc_info(&lintsrc);
++ }
++}
++
++static struct intel_mp_floating *mpf_found;
++
++/*
++ * Scan the memory blocks for an SMP configuration block.
++ */
++void __init get_smp_config (void)
++{
++ struct intel_mp_floating *mpf = mpf_found;
++
++ /*
++ * ACPI supports both logical (e.g. Hyper-Threading) and physical
++ * processors, where MPS only supports physical.
++ */
++ if (acpi_lapic && acpi_ioapic) {
++ printk(KERN_INFO "Using ACPI (MADT) for SMP configuration information\n");
++ return;
++ }
++ else if (acpi_lapic)
++ printk(KERN_INFO "Using ACPI for processor (LAPIC) configuration information\n");
++
++ printk("Intel MultiProcessor Specification v1.%d\n", mpf->mpf_specification);
++ if (mpf->mpf_feature2 & (1<<7)) {
++ printk(KERN_INFO " IMCR and PIC compatibility mode.\n");
++ pic_mode = 1;
++ } else {
++ printk(KERN_INFO " Virtual Wire compatibility mode.\n");
++ pic_mode = 0;
++ }
++
++ /*
++ * Now see if we need to read further.
++ */
++ if (mpf->mpf_feature1 != 0) {
++
++ printk(KERN_INFO "Default MP configuration #%d\n", mpf->mpf_feature1);
++ construct_default_ISA_mptable(mpf->mpf_feature1);
++
++ } else if (mpf->mpf_physptr) {
++
++ /*
++ * Read the physical hardware table. Anything here will
++ * override the defaults.
++ */
++ if (!smp_read_mpc(isa_bus_to_virt(mpf->mpf_physptr))) {
++ smp_found_config = 0;
++ printk(KERN_ERR "BIOS bug, MP table errors detected!...\n");
++ printk(KERN_ERR "... disabling SMP support. (tell your hw vendor)\n");
++ return;
++ }
++ /*
++ * If there are no explicit MP IRQ entries, then we are
++ * broken. We set up most of the low 16 IO-APIC pins to
++ * ISA defaults and hope it will work.
++ */
++ if (!mp_irq_entries) {
++ struct mpc_config_bus bus;
++
++ printk(KERN_ERR "BIOS bug, no explicit IRQ entries, using default mptable. (tell your hw vendor)\n");
++
++ bus.mpc_type = MP_BUS;
++ bus.mpc_busid = 0;
++ memcpy(bus.mpc_bustype, "ISA ", 6);
++ MP_bus_info(&bus);
++
++ construct_default_ioirq_mptable(0);
++ }
++
++ } else
++ BUG();
++
++ printk(KERN_INFO "Processors: %d\n", num_processors);
++ /*
++ * Only use the first configuration found.
++ */
++}
++
++static int __init smp_scan_config (unsigned long base, unsigned long length)
++{
++ extern void __bad_mpf_size(void);
++ unsigned int *bp = isa_bus_to_virt(base);
++ struct intel_mp_floating *mpf;
++
++ Dprintk("Scan SMP from %p for %ld bytes.\n", bp,length);
++ if (sizeof(*mpf) != 16)
++ __bad_mpf_size();
++
++ while (length > 0) {
++ mpf = (struct intel_mp_floating *)bp;
++ if ((*bp == SMP_MAGIC_IDENT) &&
++ (mpf->mpf_length == 1) &&
++ !mpf_checksum((unsigned char *)bp, 16) &&
++ ((mpf->mpf_specification == 1)
++ || (mpf->mpf_specification == 4)) ) {
++
++ smp_found_config = 1;
++ mpf_found = mpf;
++ return 1;
++ }
++ bp += 4;
++ length -= 16;
++ }
++ return 0;
++}
++
++void __init find_intel_smp (void)
++{
++ unsigned int address;
++
++ /*
++ * FIXME: Linux assumes you have 640K of base ram..
++ * this continues the error...
++ *
++ * 1) Scan the bottom 1K for a signature
++ * 2) Scan the top 1K of base RAM
++ * 3) Scan the 64K of bios
++ */
++ if (smp_scan_config(0x0,0x400) ||
++ smp_scan_config(639*0x400,0x400) ||
++ smp_scan_config(0xF0000,0x10000))
++ return;
++ /*
++ * If it is an SMP machine we should know now, unless the
++ * configuration is in an EISA/MCA bus machine with an
++ * extended bios data area.
++ *
++ * there is a real-mode segmented pointer pointing to the
++ * 4K EBDA area at 0x40E, calculate and scan it here.
++ *
++ * NOTE! There are Linux loaders that will corrupt the EBDA
++ * area, and as such this kind of SMP config may be less
++ * trustworthy, simply because the SMP table may have been
++ * stomped on during early boot. These loaders are buggy and
++ * should be fixed.
++ */
++
++ address = *(unsigned short *)phys_to_virt(0x40E);
++ address <<= 4;
++ if (smp_scan_config(address, 0x1000))
++ return;
++
++ /* If we have come this far, we did not find an MP table */
++ printk(KERN_INFO "No mptable found.\n");
++}
++
++/*
++ * - Intel MP Configuration Table
++ */
++void __init find_smp_config (void)
++{
++#ifdef CONFIG_X86_LOCAL_APIC
++ find_intel_smp();
++#endif
++}
++
++
++/* --------------------------------------------------------------------------
++ ACPI-based MP Configuration
++ -------------------------------------------------------------------------- */
++
++#ifdef CONFIG_ACPI
++
++void __init mp_register_lapic_address (
++ u64 address)
++{
++#ifndef CONFIG_XEN
++ mp_lapic_addr = (unsigned long) address;
++
++ set_fixmap_nocache(FIX_APIC_BASE, mp_lapic_addr);
++
++ if (boot_cpu_id == -1U)
++ boot_cpu_id = GET_APIC_ID(apic_read(APIC_ID));
++
++ Dprintk("Boot CPU = %d\n", boot_cpu_physical_apicid);
++#endif
++}
++
++
++void __cpuinit mp_register_lapic (
++ u8 id,
++ u8 enabled)
++{
++ struct mpc_config_processor processor;
++ int boot_cpu = 0;
++
++ if (id >= MAX_APICS) {
++ printk(KERN_WARNING "Processor #%d invalid (max %d)\n",
++ id, MAX_APICS);
++ return;
++ }
++
++ if (id == boot_cpu_physical_apicid)
++ boot_cpu = 1;
++
++#ifndef CONFIG_XEN
++ processor.mpc_type = MP_PROCESSOR;
++ processor.mpc_apicid = id;
++ processor.mpc_apicver = GET_APIC_VERSION(apic_read(APIC_LVR));
++ processor.mpc_cpuflag = (enabled ? CPU_ENABLED : 0);
++ processor.mpc_cpuflag |= (boot_cpu ? CPU_BOOTPROCESSOR : 0);
++ processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) |
++ (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask;
++ processor.mpc_featureflag = boot_cpu_data.x86_capability[0];
++ processor.mpc_reserved[0] = 0;
++ processor.mpc_reserved[1] = 0;
++#endif
++
++ MP_processor_info(&processor);
++}
++
++#ifdef CONFIG_X86_IO_APIC
++
++#define MP_ISA_BUS 0
++#define MP_MAX_IOAPIC_PIN 127
++
++static struct mp_ioapic_routing {
++ int apic_id;
++ int gsi_start;
++ int gsi_end;
++ u32 pin_programmed[4];
++} mp_ioapic_routing[MAX_IO_APICS];
++
++
++static int mp_find_ioapic (
++ int gsi)
++{
++ int i = 0;
++
++ /* Find the IOAPIC that manages this GSI. */
++ for (i = 0; i < nr_ioapics; i++) {
++ if ((gsi >= mp_ioapic_routing[i].gsi_start)
++ && (gsi <= mp_ioapic_routing[i].gsi_end))
++ return i;
++ }
++
++ printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi);
++
++ return -1;
++}
++
++
++void __init mp_register_ioapic (
++ u8 id,
++ u32 address,
++ u32 gsi_base)
++{
++ int idx = 0;
++
++ if (nr_ioapics >= MAX_IO_APICS) {
++ printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded "
++ "(found %d)\n", MAX_IO_APICS, nr_ioapics);
++ panic("Recompile kernel with bigger MAX_IO_APICS!\n");
++ }
++ if (!address) {
++ printk(KERN_ERR "WARNING: Bogus (zero) I/O APIC address"
++ " found in MADT table, skipping!\n");
++ return;
++ }
++
++ idx = nr_ioapics++;
++
++ mp_ioapics[idx].mpc_type = MP_IOAPIC;
++ mp_ioapics[idx].mpc_flags = MPC_APIC_USABLE;
++ mp_ioapics[idx].mpc_apicaddr = address;
++
++#ifndef CONFIG_XEN
++ set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
++#endif
++ mp_ioapics[idx].mpc_apicid = id;
++ mp_ioapics[idx].mpc_apicver = io_apic_get_version(idx);
++
++ /*
++ * Build basic IRQ lookup table to facilitate gsi->io_apic lookups
++ * and to prevent reprogramming of IOAPIC pins (PCI IRQs).
++ */
++ mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].mpc_apicid;
++ mp_ioapic_routing[idx].gsi_start = gsi_base;
++ mp_ioapic_routing[idx].gsi_end = gsi_base +
++ io_apic_get_redir_entries(idx);
++
++ printk(KERN_INFO "IOAPIC[%d]: apic_id %d, version %d, address 0x%x, "
++ "GSI %d-%d\n", idx, mp_ioapics[idx].mpc_apicid,
++ mp_ioapics[idx].mpc_apicver, mp_ioapics[idx].mpc_apicaddr,
++ mp_ioapic_routing[idx].gsi_start,
++ mp_ioapic_routing[idx].gsi_end);
++
++ return;
++}
++
++
++void __init mp_override_legacy_irq (
++ u8 bus_irq,
++ u8 polarity,
++ u8 trigger,
++ u32 gsi)
++{
++ struct mpc_config_intsrc intsrc;
++ int ioapic = -1;
++ int pin = -1;
++
++ /*
++ * Convert 'gsi' to 'ioapic.pin'.
++ */
++ ioapic = mp_find_ioapic(gsi);
++ if (ioapic < 0)
++ return;
++ pin = gsi - mp_ioapic_routing[ioapic].gsi_start;
++
++ /*
++ * TBD: This check is for faulty timer entries, where the override
++ * erroneously sets the trigger to level, resulting in a HUGE
++ * increase of timer interrupts!
++ */
++ if ((bus_irq == 0) && (trigger == 3))
++ trigger = 1;
++
++ intsrc.mpc_type = MP_INTSRC;
++ intsrc.mpc_irqtype = mp_INT;
++ intsrc.mpc_irqflag = (trigger << 2) | polarity;
++ intsrc.mpc_srcbus = MP_ISA_BUS;
++ intsrc.mpc_srcbusirq = bus_irq; /* IRQ */
++ intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid; /* APIC ID */
++ intsrc.mpc_dstirq = pin; /* INTIN# */
++
++ Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, %d-%d\n",
++ intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3,
++ (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus,
++ intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, intsrc.mpc_dstirq);
++
++ mp_irqs[mp_irq_entries] = intsrc;
++ if (++mp_irq_entries == MAX_IRQ_SOURCES)
++ panic("Max # of irq sources exceeded!\n");
++
++ return;
++}
++
++
++void __init mp_config_acpi_legacy_irqs (void)
++{
++ struct mpc_config_intsrc intsrc;
++ int i = 0;
++ int ioapic = -1;
++
++ /*
++ * Fabricate the legacy ISA bus (bus #31).
++ */
++ mp_bus_id_to_type[MP_ISA_BUS] = MP_BUS_ISA;
++ Dprintk("Bus #%d is ISA\n", MP_ISA_BUS);
++
++ /*
++ * Locate the IOAPIC that manages the ISA IRQs (0-15).
++ */
++ ioapic = mp_find_ioapic(0);
++ if (ioapic < 0)
++ return;
++
++ intsrc.mpc_type = MP_INTSRC;
++ intsrc.mpc_irqflag = 0; /* Conforming */
++ intsrc.mpc_srcbus = MP_ISA_BUS;
++ intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid;
++
++ /*
++ * Use the default configuration for the IRQs 0-15. Unless
++ * overridden by (MADT) interrupt source override entries.
++ */
++ for (i = 0; i < 16; i++) {
++ int idx;
++
++ for (idx = 0; idx < mp_irq_entries; idx++) {
++ struct mpc_config_intsrc *irq = mp_irqs + idx;
++
++ /* Do we already have a mapping for this ISA IRQ? */
++ if (irq->mpc_srcbus == MP_ISA_BUS && irq->mpc_srcbusirq == i)
++ break;
++
++ /* Do we already have a mapping for this IOAPIC pin */
++ if ((irq->mpc_dstapic == intsrc.mpc_dstapic) &&
++ (irq->mpc_dstirq == i))
++ break;
++ }
++
++ if (idx != mp_irq_entries) {
++ printk(KERN_DEBUG "ACPI: IRQ%d used by override.\n", i);
++ continue; /* IRQ already used */
++ }
++
++ intsrc.mpc_irqtype = mp_INT;
++ intsrc.mpc_srcbusirq = i; /* Identity mapped */
++ intsrc.mpc_dstirq = i;
++
++ Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, "
++ "%d-%d\n", intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3,
++ (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus,
++ intsrc.mpc_srcbusirq, intsrc.mpc_dstapic,
++ intsrc.mpc_dstirq);
++
++ mp_irqs[mp_irq_entries] = intsrc;
++ if (++mp_irq_entries == MAX_IRQ_SOURCES)
++ panic("Max # of irq sources exceeded!\n");
++ }
++
++ return;
++}
++
++#define MAX_GSI_NUM 4096
++
++int mp_register_gsi(u32 gsi, int triggering, int polarity)
++{
++ int ioapic = -1;
++ int ioapic_pin = 0;
++ int idx, bit = 0;
++ static int pci_irq = 16;
++ /*
++ * Mapping between Global System Interrupts, which
++ * represent all possible interrupts, to the IRQs
++ * assigned to actual devices.
++ */
++ static int gsi_to_irq[MAX_GSI_NUM];
++
++ if (acpi_irq_model != ACPI_IRQ_MODEL_IOAPIC)
++ return gsi;
++
++ /* Don't set up the ACPI SCI because it's already set up */
++ if (acpi_fadt.sci_int == gsi)
++ return gsi;
++
++ ioapic = mp_find_ioapic(gsi);
++ if (ioapic < 0) {
++ printk(KERN_WARNING "No IOAPIC for GSI %u\n", gsi);
++ return gsi;
++ }
++
++ ioapic_pin = gsi - mp_ioapic_routing[ioapic].gsi_start;
++
++ /*
++ * Avoid pin reprogramming. PRTs typically include entries
++ * with redundant pin->gsi mappings (but unique PCI devices);
++ * we only program the IOAPIC on the first.
++ */
++ bit = ioapic_pin % 32;
++ idx = (ioapic_pin < 32) ? 0 : (ioapic_pin / 32);
++ if (idx > 3) {
++ printk(KERN_ERR "Invalid reference to IOAPIC pin "
++ "%d-%d\n", mp_ioapic_routing[ioapic].apic_id,
++ ioapic_pin);
++ return gsi;
++ }
++ if ((1<<bit) & mp_ioapic_routing[ioapic].pin_programmed[idx]) {
++ Dprintk(KERN_DEBUG "Pin %d-%d already programmed\n",
++ mp_ioapic_routing[ioapic].apic_id, ioapic_pin);
++ return gsi_to_irq[gsi];
++ }
++
++ mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit);
++
++ if (triggering == ACPI_LEVEL_SENSITIVE) {
++ /*
++ * For PCI devices assign IRQs in order, avoiding gaps
++ * due to unused I/O APIC pins.
++ */
++ int irq = gsi;
++ if (gsi < MAX_GSI_NUM) {
++ /*
++ * Retain the VIA chipset work-around (gsi > 15), but
++ * avoid a problem where the 8254 timer (IRQ0) is setup
++ * via an override (so it's not on pin 0 of the ioapic),
++ * and at the same time, the pin 0 interrupt is a PCI
++ * type. The gsi > 15 test could cause these two pins
++ * to be shared as IRQ0, and they are not shareable.
++ * So test for this condition, and if necessary, avoid
++ * the pin collision.
++ */
++ if (gsi > 15 || (gsi == 0 && !timer_uses_ioapic_pin_0))
++ gsi = pci_irq++;
++ /*
++ * Don't assign IRQ used by ACPI SCI
++ */
++ if (gsi == acpi_fadt.sci_int)
++ gsi = pci_irq++;
++ gsi_to_irq[irq] = gsi;
++ } else {
++ printk(KERN_ERR "GSI %u is too high\n", gsi);
++ return gsi;
++ }
++ }
++
++ io_apic_set_pci_routing(ioapic, ioapic_pin, gsi,
++ triggering == ACPI_EDGE_SENSITIVE ? 0 : 1,
++ polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
++ return gsi;
++}
++
++#endif /*CONFIG_X86_IO_APIC*/
++#endif /*CONFIG_ACPI*/
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/arch/x86_64/kernel/pci-swiotlb-xen.c 2007-08-27 14:02:04.000000000 -0400
+@@ -0,0 +1,55 @@
++/* Glue code to lib/swiotlb.c */
++
++#include <linux/pci.h>
++#include <linux/cache.h>
++#include <linux/module.h>
++#include <asm/dma-mapping.h>
++#include <asm/proto.h>
++#include <asm/swiotlb.h>
++#include <asm/dma.h>
++
++#if 0
++int swiotlb __read_mostly;
++EXPORT_SYMBOL(swiotlb);
++#endif
++
++struct dma_mapping_ops swiotlb_dma_ops = {
++#if 0
++ .mapping_error = swiotlb_dma_mapping_error,
++ .alloc_coherent = swiotlb_alloc_coherent,
++ .free_coherent = swiotlb_free_coherent,
++ .map_single = swiotlb_map_single,
++ .unmap_single = swiotlb_unmap_single,
++ .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
++ .sync_single_for_device = swiotlb_sync_single_for_device,
++ .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu,
++ .sync_single_range_for_device = swiotlb_sync_single_range_for_device,
++ .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
++ .sync_sg_for_device = swiotlb_sync_sg_for_device,
++ .map_sg = swiotlb_map_sg,
++ .unmap_sg = swiotlb_unmap_sg,
++ .dma_supported = NULL,
++#endif
++};
++
++void pci_swiotlb_init(void)
++{
++#if 0
++ /* don't initialize swiotlb if iommu=off (no_iommu=1) */
++ if (!iommu_detected && !no_iommu && end_pfn > MAX_DMA32_PFN)
++ swiotlb = 1;
++ if (swiotlb_force)
++ swiotlb = 1;
++ if (swiotlb) {
++ printk(KERN_INFO "PCI-DMA: Using software bounce buffering for IO (SWIOTLB)\n");
++ swiotlb_init();
++ dma_ops = &swiotlb_dma_ops;
++ }
++#else
++ swiotlb_init();
++ if (swiotlb) {
++ printk(KERN_INFO "PCI-DMA: Using software bounce buffering for IO (SWIOTLB)\n");
++ dma_ops = &swiotlb_dma_ops;
++ }
++#endif
++}
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/arch/x86_64/kernel/process-xen.c 2007-08-27 14:02:04.000000000 -0400
+@@ -0,0 +1,829 @@
++/*
++ * linux/arch/x86-64/kernel/process.c
++ *
++ * Copyright (C) 1995 Linus Torvalds
++ *
++ * Pentium III FXSR, SSE support
++ * Gareth Hughes <gareth@valinux.com>, May 2000
++ *
++ * X86-64 port
++ * Andi Kleen.
++ *
++ * CPU hotplug support - ashok.raj@intel.com
++ *
++ * Jun Nakajima <jun.nakajima@intel.com>
++ * Modified for Xen
++ */
++
++/*
++ * This file handles the architecture-dependent parts of process handling..
++ */
++
++#include <stdarg.h>
++
++#include <linux/cpu.h>
++#include <linux/errno.h>
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/elfcore.h>
++#include <linux/smp.h>
++#include <linux/slab.h>
++#include <linux/user.h>
++#include <linux/module.h>
++#include <linux/a.out.h>
++#include <linux/interrupt.h>
++#include <linux/delay.h>
++#include <linux/ptrace.h>
++#include <linux/utsname.h>
++#include <linux/random.h>
++#include <linux/notifier.h>
++#include <linux/kprobes.h>
++
++#include <asm/uaccess.h>
++#include <asm/pgtable.h>
++#include <asm/system.h>
++#include <asm/io.h>
++#include <asm/processor.h>
++#include <asm/i387.h>
++#include <asm/mmu_context.h>
++#include <asm/pda.h>
++#include <asm/prctl.h>
++#include <asm/kdebug.h>
++#include <xen/interface/platform.h>
++#include <xen/interface/physdev.h>
++#include <xen/interface/vcpu.h>
++#include <asm/desc.h>
++#include <asm/proto.h>
++#include <asm/hardirq.h>
++#include <asm/ia32.h>
++#include <asm/idle.h>
++
++#include <xen/cpu_hotplug.h>
++
++asmlinkage extern void ret_from_fork(void);
++
++unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED;
++
++unsigned long boot_option_idle_override = 0;
++EXPORT_SYMBOL(boot_option_idle_override);
++
++/*
++ * Powermanagement idle function, if any..
++ */
++void (*pm_idle)(void);
++EXPORT_SYMBOL(pm_idle);
++static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
++
++static ATOMIC_NOTIFIER_HEAD(idle_notifier);
++
++void idle_notifier_register(struct notifier_block *n)
++{
++ atomic_notifier_chain_register(&idle_notifier, n);
++}
++EXPORT_SYMBOL_GPL(idle_notifier_register);
++
++void idle_notifier_unregister(struct notifier_block *n)
++{
++ atomic_notifier_chain_unregister(&idle_notifier, n);
++}
++EXPORT_SYMBOL(idle_notifier_unregister);
++
++enum idle_state { CPU_IDLE, CPU_NOT_IDLE };
++static DEFINE_PER_CPU(enum idle_state, idle_state) = CPU_NOT_IDLE;
++
++void enter_idle(void)
++{
++ __get_cpu_var(idle_state) = CPU_IDLE;
++ atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
++}
++
++static void __exit_idle(void)
++{
++ __get_cpu_var(idle_state) = CPU_NOT_IDLE;
++ atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
++}
++
++/* Called from interrupts to signify idle end */
++void exit_idle(void)
++{
++ if (current->pid | read_pda(irqcount))
++ return;
++ __exit_idle();
++}
++
++/*
++ * On SMP it's slightly faster (but much more power-consuming!)
++ * to poll the ->need_resched flag instead of waiting for the
++ * cross-CPU IPI to arrive. Use this option with caution.
++ */
++static void poll_idle (void)
++{
++ local_irq_enable();
++
++ asm volatile(
++ "2:"
++ "testl %0,%1;"
++ "rep; nop;"
++ "je 2b;"
++ : :
++ "i" (_TIF_NEED_RESCHED),
++ "m" (current_thread_info()->flags));
++}
++
++static void xen_idle(void)
++{
++ local_irq_disable();
++
++ if (need_resched())
++ local_irq_enable();
++ else {
++ current_thread_info()->status &= ~TS_POLLING;
++ smp_mb__after_clear_bit();
++ safe_halt();
++ current_thread_info()->status |= TS_POLLING;
++ }
++}
++
++#ifdef CONFIG_HOTPLUG_CPU
++static inline void play_dead(void)
++{
++ idle_task_exit();
++ local_irq_disable();
++ cpu_clear(smp_processor_id(), cpu_initialized);
++ preempt_enable_no_resched();
++ HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
++ cpu_bringup();
++}
++#else
++static inline void play_dead(void)
++{
++ BUG();
++}
++#endif /* CONFIG_HOTPLUG_CPU */
++
++/*
++ * The idle thread. There's no useful work to be
++ * done, so just try to conserve power and have a
++ * low exit latency (ie sit in a loop waiting for
++ * somebody to say that they'd like to reschedule)
++ */
++void cpu_idle (void)
++{
++ current_thread_info()->status |= TS_POLLING;
++ /* endless idle loop with no priority at all */
++ while (1) {
++ while (!need_resched()) {
++ void (*idle)(void);
++
++ if (__get_cpu_var(cpu_idle_state))
++ __get_cpu_var(cpu_idle_state) = 0;
++ rmb();
++ idle = xen_idle; /* no alternatives */
++ if (cpu_is_offline(smp_processor_id()))
++ play_dead();
++ enter_idle();
++ idle();
++ __exit_idle();
++ }
++
++ preempt_enable_no_resched();
++ schedule();
++ preempt_disable();
++ }
++}
++
++void cpu_idle_wait(void)
++{
++ unsigned int cpu, this_cpu = get_cpu();
++ cpumask_t map;
++
++ set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
++ put_cpu();
++
++ cpus_clear(map);
++ for_each_online_cpu(cpu) {
++ per_cpu(cpu_idle_state, cpu) = 1;
++ cpu_set(cpu, map);
++ }
++
++ __get_cpu_var(cpu_idle_state) = 0;
++
++ wmb();
++ do {
++ ssleep(1);
++ for_each_online_cpu(cpu) {
++ if (cpu_isset(cpu, map) &&
++ !per_cpu(cpu_idle_state, cpu))
++ cpu_clear(cpu, map);
++ }
++ cpus_and(map, map, cpu_online_map);
++ } while (!cpus_empty(map));
++}
++EXPORT_SYMBOL_GPL(cpu_idle_wait);
++
++void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
++{
++}
++
++static int __init idle_setup (char *str)
++{
++ if (!strncmp(str, "poll", 4)) {
++ printk("using polling idle threads.\n");
++ pm_idle = poll_idle;
++ }
++
++ boot_option_idle_override = 1;
++ return 1;
++}
++
++__setup("idle=", idle_setup);
++
++/* Prints also some state that isn't saved in the pt_regs */
++void __show_regs(struct pt_regs * regs)
++{
++ unsigned long fs, gs, shadowgs;
++ unsigned int fsindex,gsindex;
++ unsigned int ds,cs,es;
++
++ printk("\n");
++ print_modules();
++ printk("Pid: %d, comm: %.20s %s %s %.*s\n",
++ current->pid, current->comm, print_tainted(),
++ system_utsname.release,
++ (int)strcspn(system_utsname.version, " "),
++ system_utsname.version);
++ printk("RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->rip);
++ printk_address(regs->rip);
++ printk("RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss, regs->rsp,
++ regs->eflags);
++ printk("RAX: %016lx RBX: %016lx RCX: %016lx\n",
++ regs->rax, regs->rbx, regs->rcx);
++ printk("RDX: %016lx RSI: %016lx RDI: %016lx\n",
++ regs->rdx, regs->rsi, regs->rdi);
++ printk("RBP: %016lx R08: %016lx R09: %016lx\n",
++ regs->rbp, regs->r8, regs->r9);
++ printk("R10: %016lx R11: %016lx R12: %016lx\n",
++ regs->r10, regs->r11, regs->r12);
++ printk("R13: %016lx R14: %016lx R15: %016lx\n",
++ regs->r13, regs->r14, regs->r15);
++
++ asm("mov %%ds,%0" : "=r" (ds));
++ asm("mov %%cs,%0" : "=r" (cs));
++ asm("mov %%es,%0" : "=r" (es));
++ asm("mov %%fs,%0" : "=r" (fsindex));
++ asm("mov %%gs,%0" : "=r" (gsindex));
++
++ rdmsrl(MSR_FS_BASE, fs);
++ rdmsrl(MSR_GS_BASE, gs);
++ rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
++
++ printk("FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
++ fs,fsindex,gs,gsindex,shadowgs);
++ printk("CS: %04x DS: %04x ES: %04x\n", cs, ds, es);
++
++}
++
++void show_regs(struct pt_regs *regs)
++{
++ printk("CPU %d:", smp_processor_id());
++ __show_regs(regs);
++ show_trace(NULL, regs, (void *)(regs + 1));
++}
++
++/*
++ * Free current thread data structures etc..
++ */
++void exit_thread(void)
++{
++ struct task_struct *me = current;
++ struct thread_struct *t = &me->thread;
++
++ if (me->thread.io_bitmap_ptr) {
++#ifndef CONFIG_X86_NO_TSS
++ struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
++#endif
++#ifdef CONFIG_XEN
++ struct physdev_set_iobitmap iobmp_op;
++ memset(&iobmp_op, 0, sizeof(iobmp_op));
++#endif
++
++ kfree(t->io_bitmap_ptr);
++ t->io_bitmap_ptr = NULL;
++ /*
++ * Careful, clear this in the TSS too:
++ */
++#ifndef CONFIG_X86_NO_TSS
++ memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
++ put_cpu();
++#endif
++#ifdef CONFIG_XEN
++ HYPERVISOR_physdev_op(PHYSDEVOP_set_iobitmap, &iobmp_op);
++#endif
++ t->io_bitmap_max = 0;
++ }
++}
++
++void load_gs_index(unsigned gs)
++{
++ HYPERVISOR_set_segment_base(SEGBASE_GS_USER_SEL, gs);
++}
++
++void flush_thread(void)
++{
++ struct task_struct *tsk = current;
++ struct thread_info *t = current_thread_info();
++
++ if (t->flags & _TIF_ABI_PENDING) {
++ t->flags ^= (_TIF_ABI_PENDING | _TIF_IA32);
++ if (t->flags & _TIF_IA32)
++ current_thread_info()->status |= TS_COMPAT;
++ }
++
++ tsk->thread.debugreg0 = 0;
++ tsk->thread.debugreg1 = 0;
++ tsk->thread.debugreg2 = 0;
++ tsk->thread.debugreg3 = 0;
++ tsk->thread.debugreg6 = 0;
++ tsk->thread.debugreg7 = 0;
++ memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
++ /*
++ * Forget coprocessor state..
++ */
++ clear_fpu(tsk);
++ clear_used_math();
++}
++
++void release_thread(struct task_struct *dead_task)
++{
++ if (dead_task->mm) {
++ if (dead_task->mm->context.size) {
++ printk("WARNING: dead process %8s still has LDT? <%p/%d>\n",
++ dead_task->comm,
++ dead_task->mm->context.ldt,
++ dead_task->mm->context.size);
++ BUG();
++ }
++ }
++}
++
++static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr)
++{
++ struct user_desc ud = {
++ .base_addr = addr,
++ .limit = 0xfffff,
++ .seg_32bit = 1,
++ .limit_in_pages = 1,
++ .useable = 1,
++ };
++ struct n_desc_struct *desc = (void *)t->thread.tls_array;
++ desc += tls;
++ desc->a = LDT_entry_a(&ud);
++ desc->b = LDT_entry_b(&ud);
++}
++
++static inline u32 read_32bit_tls(struct task_struct *t, int tls)
++{
++ struct desc_struct *desc = (void *)t->thread.tls_array;
++ desc += tls;
++ return desc->base0 |
++ (((u32)desc->base1) << 16) |
++ (((u32)desc->base2) << 24);
++}
++
++/*
++ * This gets called before we allocate a new thread and copy
++ * the current task into it.
++ */
++void prepare_to_copy(struct task_struct *tsk)
++{
++ unlazy_fpu(tsk);
++}
++
++int copy_thread(int nr, unsigned long clone_flags, unsigned long rsp,
++ unsigned long unused,
++ struct task_struct * p, struct pt_regs * regs)
++{
++ int err;
++ struct pt_regs * childregs;
++ struct task_struct *me = current;
++
++ childregs = ((struct pt_regs *)
++ (THREAD_SIZE + task_stack_page(p))) - 1;
++ *childregs = *regs;
++
++ childregs->rax = 0;
++ childregs->rsp = rsp;
++ if (rsp == ~0UL)
++ childregs->rsp = (unsigned long)childregs;
++
++ p->thread.rsp = (unsigned long) childregs;
++ p->thread.rsp0 = (unsigned long) (childregs+1);
++ p->thread.userrsp = me->thread.userrsp;
++
++ set_tsk_thread_flag(p, TIF_FORK);
++
++ p->thread.fs = me->thread.fs;
++ p->thread.gs = me->thread.gs;
++
++ asm("mov %%gs,%0" : "=m" (p->thread.gsindex));
++ asm("mov %%fs,%0" : "=m" (p->thread.fsindex));
++ asm("mov %%es,%0" : "=m" (p->thread.es));
++ asm("mov %%ds,%0" : "=m" (p->thread.ds));
++
++ if (unlikely(me->thread.io_bitmap_ptr != NULL)) {
++ p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
++ if (!p->thread.io_bitmap_ptr) {
++ p->thread.io_bitmap_max = 0;
++ return -ENOMEM;
++ }
++ memcpy(p->thread.io_bitmap_ptr, me->thread.io_bitmap_ptr,
++ IO_BITMAP_BYTES);
++ }
++
++ /*
++ * Set a new TLS for the child thread?
++ */
++ if (clone_flags & CLONE_SETTLS) {
++#ifdef CONFIG_IA32_EMULATION
++ if (test_thread_flag(TIF_IA32))
++ err = ia32_child_tls(p, childregs);
++ else
++#endif
++ err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8);
++ if (err)
++ goto out;
++ }
++ p->thread.iopl = current->thread.iopl;
++
++ err = 0;
++out:
++ if (err && p->thread.io_bitmap_ptr) {
++ kfree(p->thread.io_bitmap_ptr);
++ p->thread.io_bitmap_max = 0;
++ }
++ return err;
++}
++
++static inline void __save_init_fpu( struct task_struct *tsk )
++{
++ asm volatile( "rex64 ; fxsave %0 ; fnclex"
++ : "=m" (tsk->thread.i387.fxsave));
++ tsk->thread_info->status &= ~TS_USEDFPU;
++}
++
++/*
++ * switch_to(x,y) should switch tasks from x to y.
++ *
++ * This could still be optimized:
++ * - fold all the options into a flag word and test it with a single test.
++ * - could test fs/gs bitsliced
++ *
++ * Kprobes not supported here. Set the probe on schedule instead.
++ */
++__kprobes struct task_struct *
++__switch_to(struct task_struct *prev_p, struct task_struct *next_p)
++{
++ struct thread_struct *prev = &prev_p->thread,
++ *next = &next_p->thread;
++ int cpu = smp_processor_id();
++#ifndef CONFIG_X86_NO_TSS
++ struct tss_struct *tss = &per_cpu(init_tss, cpu);
++#endif
++ struct physdev_set_iopl iopl_op;
++ struct physdev_set_iobitmap iobmp_op;
++ multicall_entry_t _mcl[8], *mcl = _mcl;
++
++ /*
++ * This is basically '__unlazy_fpu', except that we queue a
++ * multicall to indicate FPU task switch, rather than
++ * synchronously trapping to Xen.
++ * The AMD workaround requires it to be after DS reload, or
++ * after DS has been cleared, which we do in __prepare_arch_switch.
++ */
++ if (prev_p->thread_info->status & TS_USEDFPU) {
++ __save_init_fpu(prev_p); /* _not_ save_init_fpu() */
++ mcl->op = __HYPERVISOR_fpu_taskswitch;
++ mcl->args[0] = 1;
++ mcl++;
++ }
++
++ /*
++ * Reload esp0, LDT and the page table pointer:
++ */
++ mcl->op = __HYPERVISOR_stack_switch;
++ mcl->args[0] = __KERNEL_DS;
++ mcl->args[1] = next->rsp0;
++ mcl++;
++
++ /*
++ * Load the per-thread Thread-Local Storage descriptor.
++ * This is load_TLS(next, cpu) with multicalls.
++ */
++#define C(i) do { \
++ if (unlikely(next->tls_array[i] != prev->tls_array[i])) { \
++ mcl->op = __HYPERVISOR_update_descriptor; \
++ mcl->args[0] = virt_to_machine( \
++ &cpu_gdt(cpu)[GDT_ENTRY_TLS_MIN + i]); \
++ mcl->args[1] = next->tls_array[i]; \
++ mcl++; \
++ } \
++} while (0)
++ C(0); C(1); C(2);
++#undef C
++
++ if (unlikely(prev->iopl != next->iopl)) {
++ iopl_op.iopl = (next->iopl == 0) ? 1 : next->iopl;
++ mcl->op = __HYPERVISOR_physdev_op;
++ mcl->args[0] = PHYSDEVOP_set_iopl;
++ mcl->args[1] = (unsigned long)&iopl_op;
++ mcl++;
++ }
++
++ if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr)) {
++ set_xen_guest_handle(iobmp_op.bitmap,
++ (char *)next->io_bitmap_ptr);
++ iobmp_op.nr_ports = next->io_bitmap_ptr ? IO_BITMAP_BITS : 0;
++ mcl->op = __HYPERVISOR_physdev_op;
++ mcl->args[0] = PHYSDEVOP_set_iobitmap;
++ mcl->args[1] = (unsigned long)&iobmp_op;
++ mcl++;
++ }
++
++ (void)HYPERVISOR_multicall(_mcl, mcl - _mcl);
++ /*
++ * Switch DS and ES.
++ * This won't pick up thread selector changes, but I guess that is ok.
++ */
++ if (unlikely(next->es))
++ loadsegment(es, next->es);
++
++ if (unlikely(next->ds))
++ loadsegment(ds, next->ds);
++
++ /*
++ * Switch FS and GS.
++ */
++ if (unlikely(next->fsindex))
++ loadsegment(fs, next->fsindex);
++
++ if (next->fs)
++ HYPERVISOR_set_segment_base(SEGBASE_FS, next->fs);
++
++ if (unlikely(next->gsindex))
++ load_gs_index(next->gsindex);
++
++ if (next->gs)
++ HYPERVISOR_set_segment_base(SEGBASE_GS_USER, next->gs);
++
++ /*
++ * Switch the PDA context.
++ */
++ prev->userrsp = read_pda(oldrsp);
++ write_pda(oldrsp, next->userrsp);
++ write_pda(pcurrent, next_p);
++ write_pda(kernelstack,
++ task_stack_page(next_p) + THREAD_SIZE - PDA_STACKOFFSET);
++
++ /*
++ * Now maybe reload the debug registers
++ */
++ if (unlikely(next->debugreg7)) {
++ set_debugreg(next->debugreg0, 0);
++ set_debugreg(next->debugreg1, 1);
++ set_debugreg(next->debugreg2, 2);
++ set_debugreg(next->debugreg3, 3);
++ /* no 4 and 5 */
++ set_debugreg(next->debugreg6, 6);
++ set_debugreg(next->debugreg7, 7);
++ }
++
++ return prev_p;
++}
++
++/*
++ * sys_execve() executes a new program.
++ */
++asmlinkage
++long sys_execve(char __user *name, char __user * __user *argv,
++ char __user * __user *envp, struct pt_regs regs)
++{
++ long error;
++ char * filename;
++
++ filename = getname(name);
++ error = PTR_ERR(filename);
++ if (IS_ERR(filename))
++ return error;
++ error = do_execve(filename, argv, envp, &regs);
++ if (error == 0) {
++ task_lock(current);
++ current->ptrace &= ~PT_DTRACE;
++ task_unlock(current);
++ }
++ putname(filename);
++ return error;
++}
++
++void set_personality_64bit(void)
++{
++ /* inherit personality from parent */
++
++ /* Make sure to be in 64bit mode */
++ clear_thread_flag(TIF_IA32);
++
++ /* TBD: overwrites user setup. Should have two bits.
++ But 64bit processes have always behaved this way,
++ so it's not too bad. The main problem is just that
++ 32bit childs are affected again. */
++ current->personality &= ~READ_IMPLIES_EXEC;
++}
++
++asmlinkage long sys_fork(struct pt_regs *regs)
++{
++ return do_fork(SIGCHLD, regs->rsp, regs, 0, NULL, NULL);
++}
++
++asmlinkage long
++sys_clone(unsigned long clone_flags, unsigned long newsp,
++ void __user *parent_tid, void __user *child_tid, struct pt_regs *regs)
++{
++ if (!newsp)
++ newsp = regs->rsp;
++ return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
++}
++
++/*
++ * This is trivial, and on the face of it looks like it
++ * could equally well be done in user mode.
++ *
++ * Not so, for quite unobvious reasons - register pressure.
++ * In user mode vfork() cannot have a stack frame, and if
++ * done by calling the "clone()" system call directly, you
++ * do not have enough call-clobbered registers to hold all
++ * the information you need.
++ */
++asmlinkage long sys_vfork(struct pt_regs *regs)
++{
++ return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->rsp, regs, 0,
++ NULL, NULL);
++}
++
++unsigned long get_wchan(struct task_struct *p)
++{
++ unsigned long stack;
++ u64 fp,rip;
++ int count = 0;
++
++ if (!p || p == current || p->state==TASK_RUNNING)
++ return 0;
++ stack = (unsigned long)task_stack_page(p);
++ if (p->thread.rsp < stack || p->thread.rsp > stack+THREAD_SIZE)
++ return 0;
++ fp = *(u64 *)(p->thread.rsp);
++ do {
++ if (fp < (unsigned long)stack ||
++ fp > (unsigned long)stack+THREAD_SIZE)
++ return 0;
++ rip = *(u64 *)(fp+8);
++ if (!in_sched_functions(rip))
++ return rip;
++ fp = *(u64 *)fp;
++ } while (count++ < 16);
++ return 0;
++}
++
++long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
++{
++ int ret = 0;
++ int doit = task == current;
++ int cpu;
++
++ switch (code) {
++ case ARCH_SET_GS:
++ if (addr >= TASK_SIZE_OF(task))
++ return -EPERM;
++ cpu = get_cpu();
++ /* handle small bases via the GDT because that's faster to
++ switch. */
++ if (addr <= 0xffffffff) {
++ set_32bit_tls(task, GS_TLS, addr);
++ if (doit) {
++ load_TLS(&task->thread, cpu);
++ load_gs_index(GS_TLS_SEL);
++ }
++ task->thread.gsindex = GS_TLS_SEL;
++ task->thread.gs = 0;
++ } else {
++ task->thread.gsindex = 0;
++ task->thread.gs = addr;
++ if (doit) {
++ load_gs_index(0);
++ ret = HYPERVISOR_set_segment_base(
++ SEGBASE_GS_USER, addr);
++ }
++ }
++ put_cpu();
++ break;
++ case ARCH_SET_FS:
++ /* Not strictly needed for fs, but do it for symmetry
++ with gs */
++ if (addr >= TASK_SIZE_OF(task))
++ return -EPERM;
++ cpu = get_cpu();
++ /* handle small bases via the GDT because that's faster to
++ switch. */
++ if (addr <= 0xffffffff) {
++ set_32bit_tls(task, FS_TLS, addr);
++ if (doit) {
++ load_TLS(&task->thread, cpu);
++ asm volatile("movl %0,%%fs" :: "r"(FS_TLS_SEL));
++ }
++ task->thread.fsindex = FS_TLS_SEL;
++ task->thread.fs = 0;
++ } else {
++ task->thread.fsindex = 0;
++ task->thread.fs = addr;
++ if (doit) {
++ /* set the selector to 0 to not confuse
++ __switch_to */
++ asm volatile("movl %0,%%fs" :: "r" (0));
++ ret = HYPERVISOR_set_segment_base(SEGBASE_FS,
++ addr);
++ }
++ }
++ put_cpu();
++ break;
++ case ARCH_GET_FS: {
++ unsigned long base;
++ if (task->thread.fsindex == FS_TLS_SEL)
++ base = read_32bit_tls(task, FS_TLS);
++ else if (doit)
++ rdmsrl(MSR_FS_BASE, base);
++ else
++ base = task->thread.fs;
++ ret = put_user(base, (unsigned long __user *)addr);
++ break;
++ }
++ case ARCH_GET_GS: {
++ unsigned long base;
++ unsigned gsindex;
++ if (task->thread.gsindex == GS_TLS_SEL)
++ base = read_32bit_tls(task, GS_TLS);
++ else if (doit) {
++ asm("movl %%gs,%0" : "=r" (gsindex));
++ if (gsindex)
++ rdmsrl(MSR_KERNEL_GS_BASE, base);
++ else
++ base = task->thread.gs;
++ }
++ else
++ base = task->thread.gs;
++ ret = put_user(base, (unsigned long __user *)addr);
++ break;
++ }
++
++ default:
++ ret = -EINVAL;
++ break;
++ }
++
++ return ret;
++}
++
++long sys_arch_prctl(int code, unsigned long addr)
++{
++ return do_arch_prctl(current, code, addr);
++}
++
++/*
++ * Capture the user space registers if the task is not running (in user space)
++ */
++int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
++{
++ struct pt_regs *pp, ptregs;
++
++ pp = task_pt_regs(tsk);
++
++ ptregs = *pp;
++ ptregs.cs &= 0xffff;
++ ptregs.ss &= 0xffff;
++
++ elf_core_copy_regs(regs, &ptregs);
++
++ boot_option_idle_override = 1;
++ return 1;
++}
++
++unsigned long arch_align_stack(unsigned long sp)
++{
++ if (randomize_va_space)
++ sp -= get_random_int() % 8192;
++ return sp & ~0xf;
++}
++
++#ifndef CONFIG_SMP
++void _restore_vcpu(void)
++{
++}
++#endif
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/arch/x86_64/kernel/setup-xen.c 2007-08-27 14:02:09.000000000 -0400
+@@ -0,0 +1,1650 @@
++/*
++ * linux/arch/x86-64/kernel/setup.c
++ *
++ * Copyright (C) 1995 Linus Torvalds
++ *
++ * Nov 2001 Dave Jones <davej@suse.de>
++ * Forked from i386 setup code.
++ */
++
++/*
++ * This file handles the architecture-dependent parts of initialization
++ */
++
++#include <linux/errno.h>
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/stddef.h>
++#include <linux/unistd.h>
++#include <linux/ptrace.h>
++#include <linux/slab.h>
++#include <linux/user.h>
++#include <linux/a.out.h>
++#include <linux/screen_info.h>
++#include <linux/ioport.h>
++#include <linux/delay.h>
++#include <linux/init.h>
++#include <linux/initrd.h>
++#include <linux/highmem.h>
++#include <linux/bootmem.h>
++#include <linux/module.h>
++#include <asm/processor.h>
++#include <linux/console.h>
++#include <linux/seq_file.h>
++#include <linux/crash_dump.h>
++#include <linux/root_dev.h>
++#include <linux/pci.h>
++#include <linux/acpi.h>
++#include <linux/kallsyms.h>
++#include <linux/edd.h>
++#include <linux/mmzone.h>
++#include <linux/kexec.h>
++#include <linux/cpufreq.h>
++#include <linux/dmi.h>
++#include <linux/dma-mapping.h>
++#include <linux/ctype.h>
++
++#include <asm/mtrr.h>
++#include <asm/uaccess.h>
++#include <asm/system.h>
++#include <asm/io.h>
++#include <asm/smp.h>
++#include <asm/msr.h>
++#include <asm/desc.h>
++#include <video/edid.h>
++#include <asm/e820.h>
++#include <asm/dma.h>
++#include <asm/mpspec.h>
++#include <asm/mmu_context.h>
++#include <asm/bootsetup.h>
++#include <asm/proto.h>
++#include <asm/setup.h>
++#include <asm/mach_apic.h>
++#include <asm/numa.h>
++#include <asm/sections.h>
++#include <asm/dmi.h>
++#ifdef CONFIG_XEN
++#include <linux/percpu.h>
++#include <xen/interface/physdev.h>
++#include "setup_arch_pre.h"
++#include <asm/hypervisor.h>
++#include <xen/interface/nmi.h>
++#include <xen/features.h>
++#include <xen/xencons.h>
++#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
++#define PFN_PHYS(x) ((x) << PAGE_SHIFT)
++#include <asm/mach-xen/setup_arch_post.h>
++#include <xen/interface/memory.h>
++
++#ifdef CONFIG_XEN
++#include <xen/interface/kexec.h>
++#endif
++
++extern unsigned long start_pfn;
++extern struct edid_info edid_info;
++
++shared_info_t *HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
++EXPORT_SYMBOL(HYPERVISOR_shared_info);
++
++extern char hypercall_page[PAGE_SIZE];
++EXPORT_SYMBOL(hypercall_page);
++
++static int xen_panic_event(struct notifier_block *, unsigned long, void *);
++static struct notifier_block xen_panic_block = {
++ xen_panic_event, NULL, 0 /* try to go last */
++};
++
++unsigned long *phys_to_machine_mapping;
++unsigned long *pfn_to_mfn_frame_list_list, *pfn_to_mfn_frame_list[512];
++
++EXPORT_SYMBOL(phys_to_machine_mapping);
++
++DEFINE_PER_CPU(multicall_entry_t, multicall_list[8]);
++DEFINE_PER_CPU(int, nr_multicall_ents);
++
++/* Raw start-of-day parameters from the hypervisor. */
++start_info_t *xen_start_info;
++EXPORT_SYMBOL(xen_start_info);
++#endif
++
++/*
++ * Machine setup..
++ */
++
++struct cpuinfo_x86 boot_cpu_data __read_mostly;
++EXPORT_SYMBOL(boot_cpu_data);
++
++unsigned long mmu_cr4_features;
++
++int acpi_disabled;
++EXPORT_SYMBOL(acpi_disabled);
++#ifdef CONFIG_ACPI
++extern int __initdata acpi_ht;
++extern acpi_interrupt_flags acpi_sci_flags;
++int __initdata acpi_force = 0;
++#endif
++
++int acpi_numa __initdata;
++
++/* Boot loader ID as an integer, for the benefit of proc_dointvec */
++int bootloader_type;
++
++unsigned long saved_video_mode;
++
++/*
++ * Early DMI memory
++ */
++int dmi_alloc_index;
++char dmi_alloc_data[DMI_MAX_DATA];
++
++/*
++ * Setup options
++ */
++struct screen_info screen_info;
++EXPORT_SYMBOL(screen_info);
++struct sys_desc_table_struct {
++ unsigned short length;
++ unsigned char table[0];
++};
++
++struct edid_info edid_info;
++EXPORT_SYMBOL_GPL(edid_info);
++struct e820map e820;
++#ifdef CONFIG_XEN
++struct e820map machine_e820;
++#endif
++
++extern int root_mountflags;
++
++char command_line[COMMAND_LINE_SIZE];
++
++struct resource standard_io_resources[] = {
++ { .name = "dma1", .start = 0x00, .end = 0x1f,
++ .flags = IORESOURCE_BUSY | IORESOURCE_IO },
++ { .name = "pic1", .start = 0x20, .end = 0x21,
++ .flags = IORESOURCE_BUSY | IORESOURCE_IO },
++ { .name = "timer0", .start = 0x40, .end = 0x43,
++ .flags = IORESOURCE_BUSY | IORESOURCE_IO },
++ { .name = "timer1", .start = 0x50, .end = 0x53,
++ .flags = IORESOURCE_BUSY | IORESOURCE_IO },
++ { .name = "keyboard", .start = 0x60, .end = 0x6f,
++ .flags = IORESOURCE_BUSY | IORESOURCE_IO },
++ { .name = "dma page reg", .start = 0x80, .end = 0x8f,
++ .flags = IORESOURCE_BUSY | IORESOURCE_IO },
++ { .name = "pic2", .start = 0xa0, .end = 0xa1,
++ .flags = IORESOURCE_BUSY | IORESOURCE_IO },
++ { .name = "dma2", .start = 0xc0, .end = 0xdf,
++ .flags = IORESOURCE_BUSY | IORESOURCE_IO },
++ { .name = "fpu", .start = 0xf0, .end = 0xff,
++ .flags = IORESOURCE_BUSY | IORESOURCE_IO }
++};
++
++#define STANDARD_IO_RESOURCES \
++ (sizeof standard_io_resources / sizeof standard_io_resources[0])
++
++#define IORESOURCE_RAM (IORESOURCE_BUSY | IORESOURCE_MEM)
++
++struct resource data_resource = {
++ .name = "Kernel data",
++ .start = 0,
++ .end = 0,
++ .flags = IORESOURCE_RAM,
++};
++struct resource code_resource = {
++ .name = "Kernel code",
++ .start = 0,
++ .end = 0,
++ .flags = IORESOURCE_RAM,
++};
++
++#define IORESOURCE_ROM (IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM)
++
++static struct resource system_rom_resource = {
++ .name = "System ROM",
++ .start = 0xf0000,
++ .end = 0xfffff,
++ .flags = IORESOURCE_ROM,
++};
++
++static struct resource extension_rom_resource = {
++ .name = "Extension ROM",
++ .start = 0xe0000,
++ .end = 0xeffff,
++ .flags = IORESOURCE_ROM,
++};
++
++static struct resource adapter_rom_resources[] = {
++ { .name = "Adapter ROM", .start = 0xc8000, .end = 0,
++ .flags = IORESOURCE_ROM },
++ { .name = "Adapter ROM", .start = 0, .end = 0,
++ .flags = IORESOURCE_ROM },
++ { .name = "Adapter ROM", .start = 0, .end = 0,
++ .flags = IORESOURCE_ROM },
++ { .name = "Adapter ROM", .start = 0, .end = 0,
++ .flags = IORESOURCE_ROM },
++ { .name = "Adapter ROM", .start = 0, .end = 0,
++ .flags = IORESOURCE_ROM },
++ { .name = "Adapter ROM", .start = 0, .end = 0,
++ .flags = IORESOURCE_ROM }
++};
++
++#define ADAPTER_ROM_RESOURCES \
++ (sizeof adapter_rom_resources / sizeof adapter_rom_resources[0])
++
++static struct resource video_rom_resource = {
++ .name = "Video ROM",
++ .start = 0xc0000,
++ .end = 0xc7fff,
++ .flags = IORESOURCE_ROM,
++};
++
++static struct resource video_ram_resource = {
++ .name = "Video RAM area",
++ .start = 0xa0000,
++ .end = 0xbffff,
++ .flags = IORESOURCE_RAM,
++};
++
++#define romsignature(x) (*(unsigned short *)(x) == 0xaa55)
++
++static int __init romchecksum(unsigned char *rom, unsigned long length)
++{
++ unsigned char *p, sum = 0;
++
++ for (p = rom; p < rom + length; p++)
++ sum += *p;
++ return sum == 0;
++}
++
++static void __init probe_roms(void)
++{
++ unsigned long start, length, upper;
++ unsigned char *rom;
++ int i;
++
++#ifdef CONFIG_XEN
++ /* Nothing to do if not running in dom0. */
++ if (!is_initial_xendomain())
++ return;
++#endif
++
++ /* video rom */
++ upper = adapter_rom_resources[0].start;
++ for (start = video_rom_resource.start; start < upper; start += 2048) {
++ rom = isa_bus_to_virt(start);
++ if (!romsignature(rom))
++ continue;
++
++ video_rom_resource.start = start;
++
++ /* 0 < length <= 0x7f * 512, historically */
++ length = rom[2] * 512;
++
++ /* if checksum okay, trust length byte */
++ if (length && romchecksum(rom, length))
++ video_rom_resource.end = start + length - 1;
++
++ request_resource(&iomem_resource, &video_rom_resource);
++ break;
++ }
++
++ start = (video_rom_resource.end + 1 + 2047) & ~2047UL;
++ if (start < upper)
++ start = upper;
++
++ /* system rom */
++ request_resource(&iomem_resource, &system_rom_resource);
++ upper = system_rom_resource.start;
++
++ /* check for extension rom (ignore length byte!) */
++ rom = isa_bus_to_virt(extension_rom_resource.start);
++ if (romsignature(rom)) {
++ length = extension_rom_resource.end - extension_rom_resource.start + 1;
++ if (romchecksum(rom, length)) {
++ request_resource(&iomem_resource, &extension_rom_resource);
++ upper = extension_rom_resource.start;
++ }
++ }
++
++ /* check for adapter roms on 2k boundaries */
++ for (i = 0; i < ADAPTER_ROM_RESOURCES && start < upper; start += 2048) {
++ rom = isa_bus_to_virt(start);
++ if (!romsignature(rom))
++ continue;
++
++ /* 0 < length <= 0x7f * 512, historically */
++ length = rom[2] * 512;
++
++ /* but accept any length that fits if checksum okay */
++ if (!length || start + length > upper || !romchecksum(rom, length))
++ continue;
++
++ adapter_rom_resources[i].start = start;
++ adapter_rom_resources[i].end = start + length - 1;
++ request_resource(&iomem_resource, &adapter_rom_resources[i]);
++
++ start = adapter_rom_resources[i++].end & ~2047UL;
++ }
++}
++
++/* Check for full argument with no trailing characters */
++static int fullarg(char *p, char *arg)
++{
++ int l = strlen(arg);
++ return !memcmp(p, arg, l) && (p[l] == 0 || isspace(p[l]));
++}
++
++static __init void parse_cmdline_early (char ** cmdline_p)
++{
++ char c = ' ', *to = command_line, *from = COMMAND_LINE;
++ int len = 0;
++ int userdef = 0;
++
++ for (;;) {
++ if (c != ' ')
++ goto next_char;
++
++#ifdef CONFIG_SMP
++ /*
++ * If the BIOS enumerates physical processors before logical,
++ * maxcpus=N at enumeration-time can be used to disable HT.
++ */
++ else if (!memcmp(from, "maxcpus=", 8)) {
++ extern unsigned int maxcpus;
++
++ maxcpus = simple_strtoul(from + 8, NULL, 0);
++ }
++#endif
++#ifdef CONFIG_ACPI
++ /* "acpi=off" disables both ACPI table parsing and interpreter init */
++ if (fullarg(from,"acpi=off"))
++ disable_acpi();
++
++ if (fullarg(from, "acpi=force")) {
++ /* add later when we do DMI horrors: */
++ acpi_force = 1;
++ acpi_disabled = 0;
++ }
++
++ /* acpi=ht just means: do ACPI MADT parsing
++ at bootup, but don't enable the full ACPI interpreter */
++ if (fullarg(from, "acpi=ht")) {
++ if (!acpi_force)
++ disable_acpi();
++ acpi_ht = 1;
++ }
++ else if (fullarg(from, "pci=noacpi"))
++ acpi_disable_pci();
++ else if (fullarg(from, "acpi=noirq"))
++ acpi_noirq_set();
++
++ else if (fullarg(from, "acpi_sci=edge"))
++ acpi_sci_flags.trigger = 1;
++ else if (fullarg(from, "acpi_sci=level"))
++ acpi_sci_flags.trigger = 3;
++ else if (fullarg(from, "acpi_sci=high"))
++ acpi_sci_flags.polarity = 1;
++ else if (fullarg(from, "acpi_sci=low"))
++ acpi_sci_flags.polarity = 3;
++
++ /* acpi=strict disables out-of-spec workarounds */
++ else if (fullarg(from, "acpi=strict")) {
++ acpi_strict = 1;
++ }
++#ifdef CONFIG_X86_IO_APIC
++ else if (fullarg(from, "acpi_skip_timer_override"))
++ acpi_skip_timer_override = 1;
++#endif
++#endif
++
++#ifndef CONFIG_XEN
++ if (fullarg(from, "nolapic") || fullarg(from, "disableapic")) {
++ clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability);
++ disable_apic = 1;
++ }
++
++ if (fullarg(from, "noapic"))
++ skip_ioapic_setup = 1;
++
++ if (fullarg(from,"apic")) {
++ skip_ioapic_setup = 0;
++ ioapic_force = 1;
++ }
++#endif
++
++ if (!memcmp(from, "mem=", 4))
++ parse_memopt(from+4, &from);
++
++ if (!memcmp(from, "memmap=", 7)) {
++ /* exactmap option is for used defined memory */
++ if (!memcmp(from+7, "exactmap", 8)) {
++#ifdef CONFIG_CRASH_DUMP
++ /* If we are doing a crash dump, we
++ * still need to know the real mem
++ * size before original memory map is
++ * reset.
++ */
++ saved_max_pfn = e820_end_of_ram();
++#endif
++ from += 8+7;
++ end_pfn_map = 0;
++ e820.nr_map = 0;
++ userdef = 1;
++ }
++ else {
++ parse_memmapopt(from+7, &from);
++ userdef = 1;
++ }
++ }
++
++#ifdef CONFIG_NUMA
++ if (!memcmp(from, "numa=", 5))
++ numa_setup(from+5);
++#endif
++
++ if (!memcmp(from,"iommu=",6)) {
++ iommu_setup(from+6);
++ }
++
++ if (fullarg(from,"oops=panic"))
++ panic_on_oops = 1;
++
++ if (!memcmp(from, "noexec=", 7))
++ nonx_setup(from + 7);
++
++#ifdef CONFIG_KEXEC
++ /* crashkernel=size@addr specifies the location to reserve for
++ * a crash kernel. By reserving this memory we guarantee
++ * that linux never set's it up as a DMA target.
++ * Useful for holding code to do something appropriate
++ * after a kernel panic.
++ */
++ else if (!memcmp(from, "crashkernel=", 12)) {
++#ifndef CONFIG_XEN
++ unsigned long size, base;
++ size = memparse(from+12, &from);
++ if (*from == '@') {
++ base = memparse(from+1, &from);
++ /* FIXME: Do I want a sanity check
++ * to validate the memory range?
++ */
++ crashk_res.start = base;
++ crashk_res.end = base + size - 1;
++ }
++#else
++ printk("Ignoring crashkernel command line, "
++ "parameter will be supplied by xen\n");
++#endif
++ }
++#endif
++
++#ifdef CONFIG_PROC_VMCORE
++ /* elfcorehdr= specifies the location of elf core header
++ * stored by the crashed kernel. This option will be passed
++ * by kexec loader to the capture kernel.
++ */
++ else if(!memcmp(from, "elfcorehdr=", 11))
++ elfcorehdr_addr = memparse(from+11, &from);
++#endif
++
++#if defined(CONFIG_HOTPLUG_CPU) && !defined(CONFIG_XEN)
++ else if (!memcmp(from, "additional_cpus=", 16))
++ setup_additional_cpus(from+16);
++#endif
++
++ next_char:
++ c = *(from++);
++ if (!c)
++ break;
++ if (COMMAND_LINE_SIZE <= ++len)
++ break;
++ *(to++) = c;
++ }
++ if (userdef) {
++ printk(KERN_INFO "user-defined physical RAM map:\n");
++ e820_print_map("user");
++ }
++ *to = '\0';
++ *cmdline_p = command_line;
++}
++
++#ifndef CONFIG_NUMA
++static void __init
++contig_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
++{
++ unsigned long bootmap_size, bootmap;
++
++ bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
++ bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size);
++ if (bootmap == -1L)
++ panic("Cannot find bootmem map of size %ld\n",bootmap_size);
++ bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn);
++#ifdef CONFIG_XEN
++ e820_bootmem_free(NODE_DATA(0), 0, xen_start_info->nr_pages<<PAGE_SHIFT);
++#else
++ e820_bootmem_free(NODE_DATA(0), 0, end_pfn << PAGE_SHIFT);
++#endif
++ reserve_bootmem(bootmap, bootmap_size);
++}
++#endif
++
++#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
++struct edd edd;
++#ifdef CONFIG_EDD_MODULE
++EXPORT_SYMBOL(edd);
++#endif
++/**
++ * copy_edd() - Copy the BIOS EDD information
++ * from boot_params into a safe place.
++ *
++ */
++static inline void copy_edd(void)
++{
++ memcpy(edd.mbr_signature, EDD_MBR_SIGNATURE, sizeof(edd.mbr_signature));
++ memcpy(edd.edd_info, EDD_BUF, sizeof(edd.edd_info));
++ edd.mbr_signature_nr = EDD_MBR_SIG_NR;
++ edd.edd_info_nr = EDD_NR;
++}
++#else
++static inline void copy_edd(void)
++{
++}
++#endif
++
++#ifndef CONFIG_XEN
++#define EBDA_ADDR_POINTER 0x40E
++
++unsigned __initdata ebda_addr;
++unsigned __initdata ebda_size;
++
++static void discover_ebda(void)
++{
++ /*
++ * there is a real-mode segmented pointer pointing to the
++ * 4K EBDA area at 0x40E
++ */
++ ebda_addr = *(unsigned short *)EBDA_ADDR_POINTER;
++ ebda_addr <<= 4;
++
++ ebda_size = *(unsigned short *)(unsigned long)ebda_addr;
++
++ /* Round EBDA up to pages */
++ if (ebda_size == 0)
++ ebda_size = 1;
++ ebda_size <<= 10;
++ ebda_size = round_up(ebda_size + (ebda_addr & ~PAGE_MASK), PAGE_SIZE);
++ if (ebda_size > 64*1024)
++ ebda_size = 64*1024;
++}
++#else
++#define discover_ebda() ((void)0)
++#endif
++
++void __init setup_arch(char **cmdline_p)
++{
++#ifdef CONFIG_XEN
++ /* Register a call for panic conditions. */
++ atomic_notifier_chain_register(&panic_notifier_list, &xen_panic_block);
++
++ ROOT_DEV = MKDEV(RAMDISK_MAJOR,0);
++ screen_info = SCREEN_INFO;
++
++ if (is_initial_xendomain()) {
++ /* This is drawn from a dump from vgacon:startup in
++ * standard Linux. */
++ screen_info.orig_video_mode = 3;
++ screen_info.orig_video_isVGA = 1;
++ screen_info.orig_video_lines = 25;
++ screen_info.orig_video_cols = 80;
++ screen_info.orig_video_ega_bx = 3;
++ screen_info.orig_video_points = 16;
++ screen_info.orig_y = screen_info.orig_video_lines - 1;
++ if (xen_start_info->console.dom0.info_size >=
++ sizeof(struct dom0_vga_console_info)) {
++ const struct dom0_vga_console_info *info =
++ (struct dom0_vga_console_info *)(
++ (char *)xen_start_info +
++ xen_start_info->console.dom0.info_off);
++ dom0_init_screen_info(info);
++ }
++ xen_start_info->console.domU.mfn = 0;
++ xen_start_info->console.domU.evtchn = 0;
++ } else
++ screen_info.orig_video_isVGA = 0;
++
++ edid_info = EDID_INFO;
++ saved_video_mode = SAVED_VIDEO_MODE;
++ bootloader_type = LOADER_TYPE;
++
++#ifdef CONFIG_BLK_DEV_RAM
++ rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
++ rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
++ rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
++
++
++#endif
++
++ HYPERVISOR_vm_assist(VMASST_CMD_enable,
++ VMASST_TYPE_writable_pagetables);
++
++ ARCH_SETUP
++#else
++ ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);
++ screen_info = SCREEN_INFO;
++ edid_info = EDID_INFO;
++ saved_video_mode = SAVED_VIDEO_MODE;
++ bootloader_type = LOADER_TYPE;
++
++#ifdef CONFIG_BLK_DEV_RAM
++ rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
++ rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
++ rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
++#endif
++#endif /* !CONFIG_XEN */
++ setup_memory_region();
++ copy_edd();
++
++ if (!MOUNT_ROOT_RDONLY)
++ root_mountflags &= ~MS_RDONLY;
++ init_mm.start_code = (unsigned long) &_text;
++ init_mm.end_code = (unsigned long) &_etext;
++ init_mm.end_data = (unsigned long) &_edata;
++ init_mm.brk = (unsigned long) &_end;
++
++ code_resource.start = virt_to_phys(&_text);
++ code_resource.end = virt_to_phys(&_etext)-1;
++ data_resource.start = virt_to_phys(&_etext);
++ data_resource.end = virt_to_phys(&_edata)-1;
++
++ parse_cmdline_early(cmdline_p);
++
++ early_identify_cpu(&boot_cpu_data);
++
++ /*
++ * partially used pages are not usable - thus
++ * we are rounding upwards:
++ */
++ end_pfn = e820_end_of_ram();
++ num_physpages = end_pfn; /* for pfn_valid */
++
++ check_efer();
++
++ discover_ebda();
++
++ init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT));
++
++ if (is_initial_xendomain())
++ dmi_scan_machine();
++
++#ifdef CONFIG_ACPI_NUMA
++ /*
++ * Parse SRAT to discover nodes.
++ */
++ acpi_numa_init();
++#endif
++
++#ifdef CONFIG_NUMA
++ numa_initmem_init(0, end_pfn);
++#else
++ contig_initmem_init(0, end_pfn);
++#endif
++
++#ifdef CONFIG_XEN
++ /*
++ * Reserve kernel, physmap, start info, initial page tables, and
++ * direct mapping.
++ */
++ reserve_bootmem_generic(__pa_symbol(&_text),
++ (table_end << PAGE_SHIFT) - __pa_symbol(&_text));
++#else
++ /* Reserve direct mapping */
++ reserve_bootmem_generic(table_start << PAGE_SHIFT,
++ (table_end - table_start) << PAGE_SHIFT);
++
++ /* reserve kernel */
++ reserve_bootmem_generic(__pa_symbol(&_text),
++ __pa_symbol(&_end) - __pa_symbol(&_text));
++
++ /*
++ * reserve physical page 0 - it's a special BIOS page on many boxes,
++ * enabling clean reboots, SMP operation, laptop functions.
++ */
++ reserve_bootmem_generic(0, PAGE_SIZE);
++
++ /* reserve ebda region */
++ if (ebda_addr)
++ reserve_bootmem_generic(ebda_addr, ebda_size);
++
++#ifdef CONFIG_SMP
++ /*
++ * But first pinch a few for the stack/trampoline stuff
++ * FIXME: Don't need the extra page at 4K, but need to fix
++ * trampoline before removing it. (see the GDT stuff)
++ */
++ reserve_bootmem_generic(PAGE_SIZE, PAGE_SIZE);
++
++ /* Reserve SMP trampoline */
++ reserve_bootmem_generic(SMP_TRAMPOLINE_BASE, PAGE_SIZE);
++#endif
++#endif
++
++#ifdef CONFIG_ACPI_SLEEP
++ /*
++ * Reserve low memory region for sleep support.
++ */
++ acpi_reserve_bootmem();
++#endif
++#ifdef CONFIG_XEN
++#ifdef CONFIG_BLK_DEV_INITRD
++ if (xen_start_info->mod_start) {
++ if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
++ /*reserve_bootmem_generic(INITRD_START, INITRD_SIZE);*/
++ initrd_start = INITRD_START + PAGE_OFFSET;
++ initrd_end = initrd_start+INITRD_SIZE;
++ initrd_below_start_ok = 1;
++ } else {
++ printk(KERN_ERR "initrd extends beyond end of memory "
++ "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
++ (unsigned long)(INITRD_START + INITRD_SIZE),
++ (unsigned long)(end_pfn << PAGE_SHIFT));
++ initrd_start = 0;
++ }
++ }
++#endif
++#else /* CONFIG_XEN */
++#ifdef CONFIG_BLK_DEV_INITRD
++ if (LOADER_TYPE && INITRD_START) {
++ if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
++ reserve_bootmem_generic(INITRD_START, INITRD_SIZE);
++ initrd_start =
++ INITRD_START ? INITRD_START + PAGE_OFFSET : 0;
++ initrd_end = initrd_start+INITRD_SIZE;
++ }
++ else {
++ printk(KERN_ERR "initrd extends beyond end of memory "
++ "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
++ (unsigned long)(INITRD_START + INITRD_SIZE),
++ (unsigned long)(end_pfn << PAGE_SHIFT));
++ initrd_start = 0;
++ }
++ }
++#endif
++#endif /* !CONFIG_XEN */
++#ifdef CONFIG_KEXEC
++#ifdef CONFIG_XEN
++ xen_machine_kexec_setup_resources();
++#else
++ if (crashk_res.start != crashk_res.end) {
++ reserve_bootmem_generic(crashk_res.start,
++ crashk_res.end - crashk_res.start + 1);
++ }
++#endif
++#endif
++
++ paging_init();
++#ifdef CONFIG_X86_LOCAL_APIC
++ /*
++ * Find and reserve possible boot-time SMP configuration:
++ */
++ find_smp_config();
++#endif
++#ifdef CONFIG_XEN
++ {
++ int i, j, k, fpp;
++
++ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
++ /* Make sure we have a large enough P->M table. */
++ phys_to_machine_mapping = alloc_bootmem_pages(
++ end_pfn * sizeof(unsigned long));
++ memset(phys_to_machine_mapping, ~0,
++ end_pfn * sizeof(unsigned long));
++ memcpy(phys_to_machine_mapping,
++ (unsigned long *)xen_start_info->mfn_list,
++ xen_start_info->nr_pages * sizeof(unsigned long));
++ free_bootmem(
++ __pa(xen_start_info->mfn_list),
++ PFN_PHYS(PFN_UP(xen_start_info->nr_pages *
++ sizeof(unsigned long))));
++
++ /*
++ * Initialise the list of the frames that specify the
++ * list of frames that make up the p2m table. Used by
++ * save/restore.
++ */
++ pfn_to_mfn_frame_list_list = alloc_bootmem_pages(PAGE_SIZE);
++
++ fpp = PAGE_SIZE/sizeof(unsigned long);
++ for (i=0, j=0, k=-1; i< end_pfn; i+=fpp, j++) {
++ if ((j % fpp) == 0) {
++ k++;
++ BUG_ON(k>=fpp);
++ pfn_to_mfn_frame_list[k] =
++ alloc_bootmem_pages(PAGE_SIZE);
++ pfn_to_mfn_frame_list_list[k] =
++ virt_to_mfn(pfn_to_mfn_frame_list[k]);
++ j=0;
++ }
++ pfn_to_mfn_frame_list[k][j] =
++ virt_to_mfn(&phys_to_machine_mapping[i]);
++ }
++ HYPERVISOR_shared_info->arch.max_pfn = end_pfn;
++ HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
++ virt_to_mfn(pfn_to_mfn_frame_list_list);
++ }
++
++ }
++
++ if (!is_initial_xendomain()) {
++ acpi_disabled = 1;
++#ifdef CONFIG_ACPI
++ acpi_ht = 0;
++#endif
++ }
++#endif
++
++#ifndef CONFIG_XEN
++ check_ioapic();
++#endif
++
++ zap_low_mappings(0);
++
++ /*
++ * set this early, so we dont allocate cpu0
++ * if MADT list doesnt list BSP first
++ * mpparse.c/MP_processor_info() allocates logical cpu numbers.
++ */
++ cpu_set(0, cpu_present_map);
++#ifdef CONFIG_ACPI
++ /*
++ * Initialize the ACPI boot-time table parser (gets the RSDP and SDT).
++ * Call this early for SRAT node setup.
++ */
++ acpi_boot_table_init();
++
++ /*
++ * Read APIC and some other early information from ACPI tables.
++ */
++ acpi_boot_init();
++#endif
++
++ init_cpu_to_node();
++
++#ifdef CONFIG_X86_LOCAL_APIC
++ /*
++ * get boot-time SMP configuration:
++ */
++ if (smp_found_config)
++ get_smp_config();
++#ifndef CONFIG_XEN
++ init_apic_mappings();
++#endif
++#endif
++#if defined(CONFIG_XEN) && defined(CONFIG_SMP) && !defined(CONFIG_HOTPLUG_CPU)
++ prefill_possible_map();
++#endif
++
++ /*
++ * Request address space for all standard RAM and ROM resources
++ * and also for regions reported as reserved by the e820.
++ */
++ probe_roms();
++#ifdef CONFIG_XEN
++ if (is_initial_xendomain()) {
++ struct xen_memory_map memmap;
++
++ memmap.nr_entries = E820MAX;
++ set_xen_guest_handle(memmap.buffer, machine_e820.map);
++
++ if (HYPERVISOR_memory_op(XENMEM_machine_memory_map, &memmap))
++ BUG();
++ machine_e820.nr_map = memmap.nr_entries;
++
++ e820_reserve_resources(machine_e820.map, machine_e820.nr_map);
++ }
++#else
++ e820_reserve_resources(e820.map, e820.nr_map);
++#endif
++
++ request_resource(&iomem_resource, &video_ram_resource);
++
++ {
++ unsigned i;
++ /* request I/O space for devices used on all i[345]86 PCs */
++ for (i = 0; i < STANDARD_IO_RESOURCES; i++)
++ request_resource(&ioport_resource, &standard_io_resources[i]);
++ }
++
++#ifdef CONFIG_XEN
++ if (is_initial_xendomain())
++ e820_setup_gap(machine_e820.map, machine_e820.nr_map);
++#else
++ e820_setup_gap(e820.map, e820.nr_map);
++#endif
++
++#ifdef CONFIG_XEN
++ {
++ struct physdev_set_iopl set_iopl;
++
++ set_iopl.iopl = 1;
++ HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
++
++ if (is_initial_xendomain()) {
++#ifdef CONFIG_VT
++#if defined(CONFIG_VGA_CONSOLE)
++ conswitchp = &vga_con;
++#elif defined(CONFIG_DUMMY_CONSOLE)
++ conswitchp = &dummy_con;
++#endif
++#endif
++ } else {
++#if defined(CONFIG_VT) && defined(CONFIG_DUMMY_CONSOLE)
++ conswitchp = &dummy_con;
++#endif
++ }
++ }
++ xencons_early_setup();
++#else /* CONFIG_XEN */
++
++#ifdef CONFIG_VT
++#if defined(CONFIG_VGA_CONSOLE)
++ conswitchp = &vga_con;
++#elif defined(CONFIG_DUMMY_CONSOLE)
++ conswitchp = &dummy_con;
++#endif
++#endif
++
++#endif /* !CONFIG_XEN */
++}
++
++#ifdef CONFIG_XEN
++static int
++xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
++{
++ HYPERVISOR_shutdown(SHUTDOWN_crash);
++ /* we're never actually going to get here... */
++ return NOTIFY_DONE;
++}
++#endif /* !CONFIG_XEN */
++
++
++static int __cpuinit get_model_name(struct cpuinfo_x86 *c)
++{
++ unsigned int *v;
++
++ if (c->extended_cpuid_level < 0x80000004)
++ return 0;
++
++ v = (unsigned int *) c->x86_model_id;
++ cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
++ cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
++ cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
++ c->x86_model_id[48] = 0;
++ return 1;
++}
++
++
++static void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
++{
++ unsigned int n, dummy, eax, ebx, ecx, edx;
++
++ n = c->extended_cpuid_level;
++
++ if (n >= 0x80000005) {
++ cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
++ printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
++ edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
++ c->x86_cache_size=(ecx>>24)+(edx>>24);
++ /* On K8 L1 TLB is inclusive, so don't count it */
++ c->x86_tlbsize = 0;
++ }
++
++ if (n >= 0x80000006) {
++ cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
++ ecx = cpuid_ecx(0x80000006);
++ c->x86_cache_size = ecx >> 16;
++ c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
++
++ printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
++ c->x86_cache_size, ecx & 0xFF);
++ }
++
++ if (n >= 0x80000007)
++ cpuid(0x80000007, &dummy, &dummy, &dummy, &c->x86_power);
++ if (n >= 0x80000008) {
++ cpuid(0x80000008, &eax, &dummy, &dummy, &dummy);
++ c->x86_virt_bits = (eax >> 8) & 0xff;
++ c->x86_phys_bits = eax & 0xff;
++ }
++}
++
++#ifdef CONFIG_NUMA
++static int nearby_node(int apicid)
++{
++ int i;
++ for (i = apicid - 1; i >= 0; i--) {
++ int node = apicid_to_node[i];
++ if (node != NUMA_NO_NODE && node_online(node))
++ return node;
++ }
++ for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
++ int node = apicid_to_node[i];
++ if (node != NUMA_NO_NODE && node_online(node))
++ return node;
++ }
++ return first_node(node_online_map); /* Shouldn't happen */
++}
++#endif
++
++/*
++ * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
++ * Assumes number of cores is a power of two.
++ */
++static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
++{
++#ifdef CONFIG_SMP
++ unsigned bits;
++#ifdef CONFIG_NUMA
++ int cpu = smp_processor_id();
++ int node = 0;
++ unsigned apicid = hard_smp_processor_id();
++#endif
++ unsigned ecx = cpuid_ecx(0x80000008);
++
++ c->x86_max_cores = (ecx & 0xff) + 1;
++
++ /* CPU telling us the core id bits shift? */
++ bits = (ecx >> 12) & 0xF;
++
++ /* Otherwise recompute */
++ if (bits == 0) {
++ while ((1 << bits) < c->x86_max_cores)
++ bits++;
++ }
++
++ /* Low order bits define the core id (index of core in socket) */
++ c->cpu_core_id = c->phys_proc_id & ((1 << bits)-1);
++ /* Convert the APIC ID into the socket ID */
++ c->phys_proc_id = phys_pkg_id(bits);
++
++#ifdef CONFIG_NUMA
++ node = c->phys_proc_id;
++ if (apicid_to_node[apicid] != NUMA_NO_NODE)
++ node = apicid_to_node[apicid];
++ if (!node_online(node)) {
++ /* Two possibilities here:
++ - The CPU is missing memory and no node was created.
++ In that case try picking one from a nearby CPU
++ - The APIC IDs differ from the HyperTransport node IDs
++ which the K8 northbridge parsing fills in.
++ Assume they are all increased by a constant offset,
++ but in the same order as the HT nodeids.
++ If that doesn't result in a usable node fall back to the
++ path for the previous case. */
++ int ht_nodeid = apicid - (cpu_data[0].phys_proc_id << bits);
++ if (ht_nodeid >= 0 &&
++ apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
++ node = apicid_to_node[ht_nodeid];
++ /* Pick a nearby node */
++ if (!node_online(node))
++ node = nearby_node(apicid);
++ }
++ numa_set_node(cpu, node);
++
++ printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
++#endif
++#endif
++}
++
++static void __init init_amd(struct cpuinfo_x86 *c)
++{
++ unsigned level;
++
++#ifdef CONFIG_SMP
++ unsigned long value;
++
++ /*
++ * Disable TLB flush filter by setting HWCR.FFDIS on K8
++ * bit 6 of msr C001_0015
++ *
++ * Errata 63 for SH-B3 steppings
++ * Errata 122 for all steppings (F+ have it disabled by default)
++ */
++ if (c->x86 == 15) {
++ rdmsrl(MSR_K8_HWCR, value);
++ value |= 1 << 6;
++ wrmsrl(MSR_K8_HWCR, value);
++ }
++#endif
++
++ /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
++ 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
++ clear_bit(0*32+31, &c->x86_capability);
++
++ /* On C+ stepping K8 rep microcode works well for copy/memset */
++ level = cpuid_eax(1);
++ if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58))
++ set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
++
++ /* Enable workaround for FXSAVE leak */
++ if (c->x86 >= 6)
++ set_bit(X86_FEATURE_FXSAVE_LEAK, &c->x86_capability);
++
++ level = get_model_name(c);
++ if (!level) {
++ switch (c->x86) {
++ case 15:
++ /* Should distinguish Models here, but this is only
++ a fallback anyways. */
++ strcpy(c->x86_model_id, "Hammer");
++ break;
++ }
++ }
++ display_cacheinfo(c);
++
++ /* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */
++ if (c->x86_power & (1<<8))
++ set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
++
++ /* Multi core CPU? */
++ if (c->extended_cpuid_level >= 0x80000008)
++ amd_detect_cmp(c);
++
++ /* Fix cpuid4 emulation for more */
++ num_cache_leaves = 3;
++}
++
++static void __cpuinit detect_ht(struct cpuinfo_x86 *c)
++{
++#ifdef CONFIG_SMP
++ u32 eax, ebx, ecx, edx;
++ int index_msb, core_bits;
++
++ cpuid(1, &eax, &ebx, &ecx, &edx);
++
++
++ if (!cpu_has(c, X86_FEATURE_HT))
++ return;
++ if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
++ goto out;
++
++ smp_num_siblings = (ebx & 0xff0000) >> 16;
++
++ if (smp_num_siblings == 1) {
++ printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
++ } else if (smp_num_siblings > 1 ) {
++
++ if (smp_num_siblings > NR_CPUS) {
++ printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);
++ smp_num_siblings = 1;
++ return;
++ }
++
++ index_msb = get_count_order(smp_num_siblings);
++ c->phys_proc_id = phys_pkg_id(index_msb);
++
++ smp_num_siblings = smp_num_siblings / c->x86_max_cores;
++
++ index_msb = get_count_order(smp_num_siblings) ;
++
++ core_bits = get_count_order(c->x86_max_cores);
++
++ c->cpu_core_id = phys_pkg_id(index_msb) &
++ ((1 << core_bits) - 1);
++ }
++out:
++ if ((c->x86_max_cores * smp_num_siblings) > 1) {
++ printk(KERN_INFO "CPU: Physical Processor ID: %d\n", c->phys_proc_id);
++ printk(KERN_INFO "CPU: Processor Core ID: %d\n", c->cpu_core_id);
++ }
++
++#endif
++}
++
++/*
++ * find out the number of processor cores on the die
++ */
++static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
++{
++ unsigned int eax, t;
++
++ if (c->cpuid_level < 4)
++ return 1;
++
++ cpuid_count(4, 0, &eax, &t, &t, &t);
++
++ if (eax & 0x1f)
++ return ((eax >> 26) + 1);
++ else
++ return 1;
++}
++
++static void srat_detect_node(void)
++{
++#ifdef CONFIG_NUMA
++ unsigned node;
++ int cpu = smp_processor_id();
++ int apicid = hard_smp_processor_id();
++
++ /* Don't do the funky fallback heuristics the AMD version employs
++ for now. */
++ node = apicid_to_node[apicid];
++ if (node == NUMA_NO_NODE)
++ node = first_node(node_online_map);
++ numa_set_node(cpu, node);
++
++ if (acpi_numa > 0)
++ printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
++#endif
++}
++
++static void __cpuinit init_intel(struct cpuinfo_x86 *c)
++{
++ /* Cache sizes */
++ unsigned n;
++
++ init_intel_cacheinfo(c);
++ if (c->cpuid_level > 9 ) {
++ unsigned eax = cpuid_eax(10);
++ /* Check for version and the number of counters */
++ if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
++ set_bit(X86_FEATURE_ARCH_PERFMON, &c->x86_capability);
++ }
++
++ n = c->extended_cpuid_level;
++ if (n >= 0x80000008) {
++ unsigned eax = cpuid_eax(0x80000008);
++ c->x86_virt_bits = (eax >> 8) & 0xff;
++ c->x86_phys_bits = eax & 0xff;
++ /* CPUID workaround for Intel 0F34 CPU */
++ if (c->x86_vendor == X86_VENDOR_INTEL &&
++ c->x86 == 0xF && c->x86_model == 0x3 &&
++ c->x86_mask == 0x4)
++ c->x86_phys_bits = 36;
++ }
++
++ if (c->x86 == 15)
++ c->x86_cache_alignment = c->x86_clflush_size * 2;
++ if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
++ (c->x86 == 0x6 && c->x86_model >= 0x0e))
++ set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
++ set_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
++ c->x86_max_cores = intel_num_cpu_cores(c);
++
++ srat_detect_node();
++}
++
++static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
++{
++ char *v = c->x86_vendor_id;
++
++ if (!strcmp(v, "AuthenticAMD"))
++ c->x86_vendor = X86_VENDOR_AMD;
++ else if (!strcmp(v, "GenuineIntel"))
++ c->x86_vendor = X86_VENDOR_INTEL;
++ else
++ c->x86_vendor = X86_VENDOR_UNKNOWN;
++}
++
++struct cpu_model_info {
++ int vendor;
++ int family;
++ char *model_names[16];
++};
++
++/* Do some early cpuid on the boot CPU to get some parameter that are
++ needed before check_bugs. Everything advanced is in identify_cpu
++ below. */
++void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
++{
++ u32 tfms;
++
++ c->loops_per_jiffy = loops_per_jiffy;
++ c->x86_cache_size = -1;
++ c->x86_vendor = X86_VENDOR_UNKNOWN;
++ c->x86_model = c->x86_mask = 0; /* So far unknown... */
++ c->x86_vendor_id[0] = '\0'; /* Unset */
++ c->x86_model_id[0] = '\0'; /* Unset */
++ c->x86_clflush_size = 64;
++ c->x86_cache_alignment = c->x86_clflush_size;
++ c->x86_max_cores = 1;
++ c->extended_cpuid_level = 0;
++ memset(&c->x86_capability, 0, sizeof c->x86_capability);
++
++ /* Get vendor name */
++ cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
++ (unsigned int *)&c->x86_vendor_id[0],
++ (unsigned int *)&c->x86_vendor_id[8],
++ (unsigned int *)&c->x86_vendor_id[4]);
++
++ get_cpu_vendor(c);
++
++ /* Initialize the standard set of capabilities */
++ /* Note that the vendor-specific code below might override */
++
++ /* Intel-defined flags: level 0x00000001 */
++ if (c->cpuid_level >= 0x00000001) {
++ __u32 misc;
++ cpuid(0x00000001, &tfms, &misc, &c->x86_capability[4],
++ &c->x86_capability[0]);
++ c->x86 = (tfms >> 8) & 0xf;
++ c->x86_model = (tfms >> 4) & 0xf;
++ c->x86_mask = tfms & 0xf;
++ if (c->x86 == 0xf)
++ c->x86 += (tfms >> 20) & 0xff;
++ if (c->x86 >= 0x6)
++ c->x86_model += ((tfms >> 16) & 0xF) << 4;
++ if (c->x86_capability[0] & (1<<19))
++ c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
++ } else {
++ /* Have CPUID level 0 only - unheard of */
++ c->x86 = 4;
++ }
++
++#ifdef CONFIG_SMP
++ c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff;
++#endif
++}
++
++/*
++ * This does the hard work of actually picking apart the CPU stuff...
++ */
++void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
++{
++ int i;
++ u32 xlvl;
++
++ early_identify_cpu(c);
++
++ /* AMD-defined flags: level 0x80000001 */
++ xlvl = cpuid_eax(0x80000000);
++ c->extended_cpuid_level = xlvl;
++ if ((xlvl & 0xffff0000) == 0x80000000) {
++ if (xlvl >= 0x80000001) {
++ c->x86_capability[1] = cpuid_edx(0x80000001);
++ c->x86_capability[6] = cpuid_ecx(0x80000001);
++ }
++ if (xlvl >= 0x80000004)
++ get_model_name(c); /* Default name */
++ }
++
++ /* Transmeta-defined flags: level 0x80860001 */
++ xlvl = cpuid_eax(0x80860000);
++ if ((xlvl & 0xffff0000) == 0x80860000) {
++ /* Don't set x86_cpuid_level here for now to not confuse. */
++ if (xlvl >= 0x80860001)
++ c->x86_capability[2] = cpuid_edx(0x80860001);
++ }
++
++ c->apicid = phys_pkg_id(0);
++
++ /*
++ * Vendor-specific initialization. In this section we
++ * canonicalize the feature flags, meaning if there are
++ * features a certain CPU supports which CPUID doesn't
++ * tell us, CPUID claiming incorrect flags, or other bugs,
++ * we handle them here.
++ *
++ * At the end of this section, c->x86_capability better
++ * indicate the features this CPU genuinely supports!
++ */
++ switch (c->x86_vendor) {
++ case X86_VENDOR_AMD:
++ init_amd(c);
++ break;
++
++ case X86_VENDOR_INTEL:
++ init_intel(c);
++ break;
++
++ case X86_VENDOR_UNKNOWN:
++ default:
++ display_cacheinfo(c);
++ break;
++ }
++
++ select_idle_routine(c);
++ detect_ht(c);
++
++ /*
++ * On SMP, boot_cpu_data holds the common feature set between
++ * all CPUs; so make sure that we indicate which features are
++ * common between the CPUs. The first time this routine gets
++ * executed, c == &boot_cpu_data.
++ */
++ if (c != &boot_cpu_data) {
++ /* AND the already accumulated flags with these */
++ for (i = 0 ; i < NCAPINTS ; i++)
++ boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
++ }
++
++#ifdef CONFIG_X86_MCE
++ mcheck_init(c);
++#endif
++ if (c == &boot_cpu_data)
++ mtrr_bp_init();
++ else
++ mtrr_ap_init();
++#ifdef CONFIG_NUMA
++ numa_add_cpu(smp_processor_id());
++#endif
++}
++
++
++void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
++{
++ if (c->x86_model_id[0])
++ printk("%s", c->x86_model_id);
++
++ if (c->x86_mask || c->cpuid_level >= 0)
++ printk(" stepping %02x\n", c->x86_mask);
++ else
++ printk("\n");
++}
++
++/*
++ * Get CPU information for use by the procfs.
++ */
++
++static int show_cpuinfo(struct seq_file *m, void *v)
++{
++ struct cpuinfo_x86 *c = v;
++
++ /*
++ * These flag bits must match the definitions in <asm/cpufeature.h>.
++ * NULL means this bit is undefined or reserved; either way it doesn't
++ * have meaning as far as Linux is concerned. Note that it's important
++ * to realize there is a difference between this table and CPUID -- if
++ * applications want to get the raw CPUID data, they should access
++ * /dev/cpu/<cpu_nr>/cpuid instead.
++ */
++ static char *x86_cap_flags[] = {
++ /* Intel-defined */
++ "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
++ "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
++ "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
++ "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", NULL,
++
++ /* AMD-defined */
++ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL, "nx", NULL, "mmxext", NULL,
++ NULL, "fxsr_opt", NULL, "rdtscp", NULL, "lm", "3dnowext", "3dnow",
++
++ /* Transmeta-defined */
++ "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++
++ /* Other (Linux-defined) */
++ "cxmmx", NULL, "cyrix_arr", "centaur_mcr", NULL,
++ "constant_tsc", NULL, NULL,
++ "up", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++
++ /* Intel-defined (#2) */
++ "pni", NULL, NULL, "monitor", "ds_cpl", "vmx", "smx", "est",
++ "tm2", NULL, "cid", NULL, NULL, "cx16", "xtpr", NULL,
++ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++
++ /* VIA/Cyrix/Centaur-defined */
++ NULL, NULL, "rng", "rng_en", NULL, NULL, "ace", "ace_en",
++ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++
++ /* AMD-defined (#2) */
++ "lahf_lm", "cmp_legacy", "svm", NULL, "cr8_legacy", NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++ };
++ static char *x86_power_flags[] = {
++ "ts", /* temperature sensor */
++ "fid", /* frequency id control */
++ "vid", /* voltage id control */
++ "ttp", /* thermal trip */
++ "tm",
++ "stc",
++ NULL,
++ /* nothing */ /* constant_tsc - moved to flags */
++ };
++
++
++#ifdef CONFIG_SMP
++ if (!cpu_online(c-cpu_data))
++ return 0;
++#endif
++
++ seq_printf(m,"processor\t: %u\n"
++ "vendor_id\t: %s\n"
++ "cpu family\t: %d\n"
++ "model\t\t: %d\n"
++ "model name\t: %s\n",
++ (unsigned)(c-cpu_data),
++ c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
++ c->x86,
++ (int)c->x86_model,
++ c->x86_model_id[0] ? c->x86_model_id : "unknown");
++
++ if (c->x86_mask || c->cpuid_level >= 0)
++ seq_printf(m, "stepping\t: %d\n", c->x86_mask);
++ else
++ seq_printf(m, "stepping\t: unknown\n");
++
++ if (cpu_has(c,X86_FEATURE_TSC)) {
++ unsigned int freq = cpufreq_quick_get((unsigned)(c-cpu_data));
++ if (!freq)
++ freq = cpu_khz;
++ seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
++ freq / 1000, (freq % 1000));
++ }
++
++ /* Cache size */
++ if (c->x86_cache_size >= 0)
++ seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
++
++#ifdef CONFIG_SMP
++ if (smp_num_siblings * c->x86_max_cores > 1) {
++ int cpu = c - cpu_data;
++ seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
++ seq_printf(m, "siblings\t: %d\n", cpus_weight(cpu_core_map[cpu]));
++ seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
++ seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
++ }
++#endif
++
++ seq_printf(m,
++ "fpu\t\t: yes\n"
++ "fpu_exception\t: yes\n"
++ "cpuid level\t: %d\n"
++ "wp\t\t: yes\n"
++ "flags\t\t:",
++ c->cpuid_level);
++
++ {
++ int i;
++ for ( i = 0 ; i < 32*NCAPINTS ; i++ )
++ if (cpu_has(c, i) && x86_cap_flags[i] != NULL)
++ seq_printf(m, " %s", x86_cap_flags[i]);
++ }
++
++ seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
++ c->loops_per_jiffy/(500000/HZ),
++ (c->loops_per_jiffy/(5000/HZ)) % 100);
++
++ if (c->x86_tlbsize > 0)
++ seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize);
++ seq_printf(m, "clflush size\t: %d\n", c->x86_clflush_size);
++ seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment);
++
++ seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n",
++ c->x86_phys_bits, c->x86_virt_bits);
++
++ seq_printf(m, "power management:");
++ {
++ unsigned i;
++ for (i = 0; i < 32; i++)
++ if (c->x86_power & (1 << i)) {
++ if (i < ARRAY_SIZE(x86_power_flags) &&
++ x86_power_flags[i])
++ seq_printf(m, "%s%s",
++ x86_power_flags[i][0]?" ":"",
++ x86_power_flags[i]);
++ else
++ seq_printf(m, " [%d]", i);
++ }
++ }
++
++ seq_printf(m, "\n\n");
++
++ return 0;
++}
++
++static void *c_start(struct seq_file *m, loff_t *pos)
++{
++ return *pos < NR_CPUS ? cpu_data + *pos : NULL;
++}
++
++static void *c_next(struct seq_file *m, void *v, loff_t *pos)
++{
++ ++*pos;
++ return c_start(m, pos);
++}
++
++static void c_stop(struct seq_file *m, void *v)
++{
++}
++
++struct seq_operations cpuinfo_op = {
++ .start =c_start,
++ .next = c_next,
++ .stop = c_stop,
++ .show = show_cpuinfo,
++};
++
++#if defined(CONFIG_INPUT_PCSPKR) || defined(CONFIG_INPUT_PCSPKR_MODULE)
++#include <linux/platform_device.h>
++static __init int add_pcspkr(void)
++{
++ struct platform_device *pd;
++ int ret;
++
++ if (!is_initial_xendomain())
++ return 0;
++
++ pd = platform_device_alloc("pcspkr", -1);
++ if (!pd)
++ return -ENOMEM;
++
++ ret = platform_device_add(pd);
++ if (ret)
++ platform_device_put(pd);
++
++ return ret;
++}
++device_initcall(add_pcspkr);
++#endif
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/arch/x86_64/kernel/setup64-xen.c 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,361 @@
++/*
++ * X86-64 specific CPU setup.
++ * Copyright (C) 1995 Linus Torvalds
++ * Copyright 2001, 2002, 2003 SuSE Labs / Andi Kleen.
++ * See setup.c for older changelog.
++ *
++ * Jun Nakajima <jun.nakajima@intel.com>
++ * Modified for Xen
++ *
++ */
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/string.h>
++#include <linux/bootmem.h>
++#include <linux/bitops.h>
++#include <linux/module.h>
++#include <asm/bootsetup.h>
++#include <asm/pda.h>
++#include <asm/pgtable.h>
++#include <asm/processor.h>
++#include <asm/desc.h>
++#include <asm/atomic.h>
++#include <asm/mmu_context.h>
++#include <asm/smp.h>
++#include <asm/i387.h>
++#include <asm/percpu.h>
++#include <asm/proto.h>
++#include <asm/sections.h>
++#ifdef CONFIG_XEN
++#include <asm/hypervisor.h>
++#endif
++
++char x86_boot_params[BOOT_PARAM_SIZE] __initdata = {0,};
++
++cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE;
++
++struct x8664_pda *_cpu_pda[NR_CPUS] __read_mostly;
++EXPORT_SYMBOL(_cpu_pda);
++struct x8664_pda boot_cpu_pda[NR_CPUS] __cacheline_aligned;
++
++#ifndef CONFIG_X86_NO_IDT
++struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table };
++#endif
++
++char boot_cpu_stack[IRQSTACKSIZE] __attribute__((section(".bss.page_aligned")));
++
++unsigned long __supported_pte_mask __read_mostly = ~0UL;
++EXPORT_SYMBOL(__supported_pte_mask);
++static int do_not_nx __cpuinitdata = 0;
++
++/* noexec=on|off
++Control non executable mappings for 64bit processes.
++
++on Enable(default)
++off Disable
++*/
++int __init nonx_setup(char *str)
++{
++ if (!strncmp(str, "on", 2)) {
++ __supported_pte_mask |= _PAGE_NX;
++ do_not_nx = 0;
++ } else if (!strncmp(str, "off", 3)) {
++ do_not_nx = 1;
++ __supported_pte_mask &= ~_PAGE_NX;
++ }
++ return 1;
++}
++__setup("noexec=", nonx_setup); /* parsed early actually */
++
++int force_personality32 = 0;
++
++/* noexec32=on|off
++Control non executable heap for 32bit processes.
++To control the stack too use noexec=off
++
++on PROT_READ does not imply PROT_EXEC for 32bit processes
++off PROT_READ implies PROT_EXEC (default)
++*/
++static int __init nonx32_setup(char *str)
++{
++ if (!strcmp(str, "on"))
++ force_personality32 &= ~READ_IMPLIES_EXEC;
++ else if (!strcmp(str, "off"))
++ force_personality32 |= READ_IMPLIES_EXEC;
++ return 1;
++}
++__setup("noexec32=", nonx32_setup);
++
++/*
++ * Great future plan:
++ * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
++ * Always point %gs to its beginning
++ */
++void __init setup_per_cpu_areas(void)
++{
++ int i;
++ unsigned long size;
++
++#ifdef CONFIG_HOTPLUG_CPU
++ prefill_possible_map();
++#endif
++
++ /* Copy section for each CPU (we discard the original) */
++ size = ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES);
++#ifdef CONFIG_MODULES
++ if (size < PERCPU_ENOUGH_ROOM)
++ size = PERCPU_ENOUGH_ROOM;
++#endif
++
++ for_each_cpu_mask (i, cpu_possible_map) {
++ char *ptr;
++
++ if (!NODE_DATA(cpu_to_node(i))) {
++ printk("cpu with no node %d, num_online_nodes %d\n",
++ i, num_online_nodes());
++ ptr = alloc_bootmem(size);
++ } else {
++ ptr = alloc_bootmem_node(NODE_DATA(cpu_to_node(i)), size);
++ }
++ if (!ptr)
++ panic("Cannot allocate cpu data for CPU %d\n", i);
++ cpu_pda(i)->data_offset = ptr - __per_cpu_start;
++ memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
++ }
++}
++
++#ifdef CONFIG_XEN
++static void switch_pt(void)
++{
++ xen_pt_switch(__pa(init_level4_pgt));
++ xen_new_user_pt(__pa(init_level4_user_pgt));
++}
++
++void __cpuinit cpu_gdt_init(struct desc_ptr *gdt_descr)
++{
++ unsigned long frames[16];
++ unsigned long va;
++ int f;
++
++ for (va = gdt_descr->address, f = 0;
++ va < gdt_descr->address + gdt_descr->size;
++ va += PAGE_SIZE, f++) {
++ frames[f] = virt_to_mfn(va);
++ make_page_readonly(
++ (void *)va, XENFEAT_writable_descriptor_tables);
++ }
++ if (HYPERVISOR_set_gdt(frames, gdt_descr->size /
++ sizeof (struct desc_struct)))
++ BUG();
++}
++#else
++static void switch_pt(void)
++{
++ asm volatile("movq %0,%%cr3" :: "r" (__pa_symbol(&init_level4_pgt)));
++}
++
++void __init cpu_gdt_init(struct desc_ptr *gdt_descr)
++{
++ asm volatile("lgdt %0" :: "m" (*gdt_descr));
++ asm volatile("lidt %0" :: "m" (idt_descr));
++}
++#endif
++
++void pda_init(int cpu)
++{
++ struct x8664_pda *pda = cpu_pda(cpu);
++
++ /* Setup up data that may be needed in __get_free_pages early */
++ asm volatile("movl %0,%%fs ; movl %0,%%gs" :: "r" (0));
++#ifndef CONFIG_XEN
++ wrmsrl(MSR_GS_BASE, pda);
++#else
++ HYPERVISOR_set_segment_base(SEGBASE_GS_KERNEL, (unsigned long)pda);
++#endif
++ pda->cpunumber = cpu;
++ pda->irqcount = -1;
++ pda->kernelstack =
++ (unsigned long)stack_thread_info() - PDA_STACKOFFSET + THREAD_SIZE;
++ pda->active_mm = &init_mm;
++ pda->mmu_state = 0;
++
++ if (cpu == 0) {
++#ifdef CONFIG_XEN
++ xen_init_pt();
++#endif
++ /* others are initialized in smpboot.c */
++ pda->pcurrent = &init_task;
++ pda->irqstackptr = boot_cpu_stack;
++ } else {
++ pda->irqstackptr = (char *)
++ __get_free_pages(GFP_ATOMIC, IRQSTACK_ORDER);
++ if (!pda->irqstackptr)
++ panic("cannot allocate irqstack for cpu %d", cpu);
++ }
++
++ switch_pt();
++
++ pda->irqstackptr += IRQSTACKSIZE-64;
++}
++
++#ifndef CONFIG_X86_NO_TSS
++char boot_exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]
++__attribute__((section(".bss.page_aligned")));
++#endif
++
++/* May not be marked __init: used by software suspend */
++void syscall_init(void)
++{
++#ifndef CONFIG_XEN
++ /*
++ * LSTAR and STAR live in a bit strange symbiosis.
++ * They both write to the same internal register. STAR allows to set CS/DS
++ * but only a 32bit target. LSTAR sets the 64bit rip.
++ */
++ wrmsrl(MSR_STAR, ((u64)__USER32_CS)<<48 | ((u64)__KERNEL_CS)<<32);
++ wrmsrl(MSR_LSTAR, system_call);
++
++ /* Flags to clear on syscall */
++ wrmsrl(MSR_SYSCALL_MASK, EF_TF|EF_DF|EF_IE|0x3000);
++#endif
++#ifdef CONFIG_IA32_EMULATION
++ syscall32_cpu_init ();
++#endif
++}
++
++void __cpuinit check_efer(void)
++{
++ unsigned long efer;
++
++ rdmsrl(MSR_EFER, efer);
++ if (!(efer & EFER_NX) || do_not_nx) {
++ __supported_pte_mask &= ~_PAGE_NX;
++ }
++}
++
++/*
++ * cpu_init() initializes state that is per-CPU. Some data is already
++ * initialized (naturally) in the bootstrap process, such as the GDT
++ * and IDT. We reload them nevertheless, this function acts as a
++ * 'CPU state barrier', nothing should get across.
++ * A lot of state is already set up in PDA init.
++ */
++void __cpuinit cpu_init (void)
++{
++ int cpu = stack_smp_processor_id();
++#ifndef CONFIG_X86_NO_TSS
++ struct tss_struct *t = &per_cpu(init_tss, cpu);
++ struct orig_ist *orig_ist = &per_cpu(orig_ist, cpu);
++ unsigned long v;
++ char *estacks = NULL;
++ unsigned i;
++#endif
++ struct task_struct *me;
++
++ /* CPU 0 is initialised in head64.c */
++ if (cpu != 0) {
++ pda_init(cpu);
++ zap_low_mappings(cpu);
++ }
++#ifndef CONFIG_X86_NO_TSS
++ else
++ estacks = boot_exception_stacks;
++#endif
++
++ me = current;
++
++ if (cpu_test_and_set(cpu, cpu_initialized))
++ panic("CPU#%d already initialized!\n", cpu);
++
++ printk("Initializing CPU#%d\n", cpu);
++
++ clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
++
++ /*
++ * Initialize the per-CPU GDT with the boot GDT,
++ * and set up the GDT descriptor:
++ */
++#ifndef CONFIG_XEN
++ if (cpu)
++ memcpy(cpu_gdt(cpu), cpu_gdt_table, GDT_SIZE);
++#endif
++
++ cpu_gdt_descr[cpu].size = GDT_SIZE;
++ cpu_gdt_init(&cpu_gdt_descr[cpu]);
++
++ memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
++ syscall_init();
++
++ wrmsrl(MSR_FS_BASE, 0);
++ wrmsrl(MSR_KERNEL_GS_BASE, 0);
++ barrier();
++
++ check_efer();
++
++#ifndef CONFIG_X86_NO_TSS
++ /*
++ * set up and load the per-CPU TSS
++ */
++ for (v = 0; v < N_EXCEPTION_STACKS; v++) {
++ if (cpu) {
++ static const unsigned int order[N_EXCEPTION_STACKS] = {
++ [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STACK_ORDER,
++ [DEBUG_STACK - 1] = DEBUG_STACK_ORDER
++ };
++
++ estacks = (char *)__get_free_pages(GFP_ATOMIC, order[v]);
++ if (!estacks)
++ panic("Cannot allocate exception stack %ld %d\n",
++ v, cpu);
++ }
++ switch (v + 1) {
++#if DEBUG_STKSZ > EXCEPTION_STKSZ
++ case DEBUG_STACK:
++ cpu_pda(cpu)->debugstack = (unsigned long)estacks;
++ estacks += DEBUG_STKSZ;
++ break;
++#endif
++ default:
++ estacks += EXCEPTION_STKSZ;
++ break;
++ }
++ orig_ist->ist[v] = t->ist[v] = (unsigned long)estacks;
++ }
++
++ t->io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
++ /*
++ * <= is required because the CPU will access up to
++ * 8 bits beyond the end of the IO permission bitmap.
++ */
++ for (i = 0; i <= IO_BITMAP_LONGS; i++)
++ t->io_bitmap[i] = ~0UL;
++#endif
++
++ atomic_inc(&init_mm.mm_count);
++ me->active_mm = &init_mm;
++ if (me->mm)
++ BUG();
++ enter_lazy_tlb(&init_mm, me);
++
++#ifndef CONFIG_X86_NO_TSS
++ set_tss_desc(cpu, t);
++#endif
++#ifndef CONFIG_XEN
++ load_TR_desc();
++#endif
++ load_LDT(&init_mm.context);
++
++ /*
++ * Clear all 6 debug registers:
++ */
++
++ set_debugreg(0UL, 0);
++ set_debugreg(0UL, 1);
++ set_debugreg(0UL, 2);
++ set_debugreg(0UL, 3);
++ set_debugreg(0UL, 6);
++ set_debugreg(0UL, 7);
++
++ fpu_init();
++}
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/arch/x86_64/kernel/smp-xen.c 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,600 @@
++/*
++ * Intel SMP support routines.
++ *
++ * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
++ * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
++ * (c) 2002,2003 Andi Kleen, SuSE Labs.
++ *
++ * This code is released under the GNU General Public License version 2 or
++ * later.
++ */
++
++#include <linux/init.h>
++
++#include <linux/mm.h>
++#include <linux/delay.h>
++#include <linux/spinlock.h>
++#include <linux/smp_lock.h>
++#include <linux/smp.h>
++#include <linux/kernel_stat.h>
++#include <linux/mc146818rtc.h>
++#include <linux/interrupt.h>
++
++#include <asm/mtrr.h>
++#include <asm/pgalloc.h>
++#include <asm/tlbflush.h>
++#include <asm/mach_apic.h>
++#include <asm/mmu_context.h>
++#include <asm/proto.h>
++#include <asm/apicdef.h>
++#include <asm/idle.h>
++#ifdef CONFIG_XEN
++#include <xen/evtchn.h>
++#endif
++
++#ifndef CONFIG_XEN
++/*
++ * Smarter SMP flushing macros.
++ * c/o Linus Torvalds.
++ *
++ * These mean you can really definitely utterly forget about
++ * writing to user space from interrupts. (Its not allowed anyway).
++ *
++ * Optimizations Manfred Spraul <manfred@colorfullife.com>
++ *
++ * More scalable flush, from Andi Kleen
++ *
++ * To avoid global state use 8 different call vectors.
++ * Each CPU uses a specific vector to trigger flushes on other
++ * CPUs. Depending on the received vector the target CPUs look into
++ * the right per cpu variable for the flush data.
++ *
++ * With more than 8 CPUs they are hashed to the 8 available
++ * vectors. The limited global vector space forces us to this right now.
++ * In future when interrupts are split into per CPU domains this could be
++ * fixed, at the cost of triggering multiple IPIs in some cases.
++ */
++
++union smp_flush_state {
++ struct {
++ cpumask_t flush_cpumask;
++ struct mm_struct *flush_mm;
++ unsigned long flush_va;
++#define FLUSH_ALL -1ULL
++ spinlock_t tlbstate_lock;
++ };
++ char pad[SMP_CACHE_BYTES];
++} ____cacheline_aligned;
++
++/* State is put into the per CPU data section, but padded
++ to a full cache line because other CPUs can access it and we don't
++ want false sharing in the per cpu data segment. */
++static DEFINE_PER_CPU(union smp_flush_state, flush_state);
++#endif
++
++/*
++ * We cannot call mmdrop() because we are in interrupt context,
++ * instead update mm->cpu_vm_mask.
++ */
++static inline void leave_mm(unsigned long cpu)
++{
++ if (read_pda(mmu_state) == TLBSTATE_OK)
++ BUG();
++ cpu_clear(cpu, read_pda(active_mm)->cpu_vm_mask);
++ load_cr3(swapper_pg_dir);
++}
++
++#ifndef CONFIG_XEN
++/*
++ *
++ * The flush IPI assumes that a thread switch happens in this order:
++ * [cpu0: the cpu that switches]
++ * 1) switch_mm() either 1a) or 1b)
++ * 1a) thread switch to a different mm
++ * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask);
++ * Stop ipi delivery for the old mm. This is not synchronized with
++ * the other cpus, but smp_invalidate_interrupt ignore flush ipis
++ * for the wrong mm, and in the worst case we perform a superfluous
++ * tlb flush.
++ * 1a2) set cpu mmu_state to TLBSTATE_OK
++ * Now the smp_invalidate_interrupt won't call leave_mm if cpu0
++ * was in lazy tlb mode.
++ * 1a3) update cpu active_mm
++ * Now cpu0 accepts tlb flushes for the new mm.
++ * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask);
++ * Now the other cpus will send tlb flush ipis.
++ * 1a4) change cr3.
++ * 1b) thread switch without mm change
++ * cpu active_mm is correct, cpu0 already handles
++ * flush ipis.
++ * 1b1) set cpu mmu_state to TLBSTATE_OK
++ * 1b2) test_and_set the cpu bit in cpu_vm_mask.
++ * Atomically set the bit [other cpus will start sending flush ipis],
++ * and test the bit.
++ * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
++ * 2) switch %%esp, ie current
++ *
++ * The interrupt must handle 2 special cases:
++ * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
++ * - the cpu performs speculative tlb reads, i.e. even if the cpu only
++ * runs in kernel space, the cpu could load tlb entries for user space
++ * pages.
++ *
++ * The good news is that cpu mmu_state is local to each cpu, no
++ * write/read ordering problems.
++ */
++
++/*
++ * TLB flush IPI:
++ *
++ * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
++ * 2) Leave the mm if we are in the lazy tlb mode.
++ *
++ * Interrupts are disabled.
++ */
++
++asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs)
++{
++ int cpu;
++ int sender;
++ union smp_flush_state *f;
++
++ cpu = smp_processor_id();
++ /*
++ * orig_rax contains the negated interrupt vector.
++ * Use that to determine where the sender put the data.
++ */
++ sender = ~regs->orig_rax - INVALIDATE_TLB_VECTOR_START;
++ f = &per_cpu(flush_state, sender);
++
++ if (!cpu_isset(cpu, f->flush_cpumask))
++ goto out;
++ /*
++ * This was a BUG() but until someone can quote me the
++ * line from the intel manual that guarantees an IPI to
++ * multiple CPUs is retried _only_ on the erroring CPUs
++ * its staying as a return
++ *
++ * BUG();
++ */
++
++ if (f->flush_mm == read_pda(active_mm)) {
++ if (read_pda(mmu_state) == TLBSTATE_OK) {
++ if (f->flush_va == FLUSH_ALL)
++ local_flush_tlb();
++ else
++ __flush_tlb_one(f->flush_va);
++ } else
++ leave_mm(cpu);
++ }
++out:
++ ack_APIC_irq();
++ cpu_clear(cpu, f->flush_cpumask);
++}
++
++static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
++ unsigned long va)
++{
++ int sender;
++ union smp_flush_state *f;
++
++ /* Caller has disabled preemption */
++ sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS;
++ f = &per_cpu(flush_state, sender);
++
++ /* Could avoid this lock when
++ num_online_cpus() <= NUM_INVALIDATE_TLB_VECTORS, but it is
++ probably not worth checking this for a cache-hot lock. */
++ spin_lock(&f->tlbstate_lock);
++
++ f->flush_mm = mm;
++ f->flush_va = va;
++ cpus_or(f->flush_cpumask, cpumask, f->flush_cpumask);
++
++ /*
++ * We have to send the IPI only to
++ * CPUs affected.
++ */
++ send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR_START + sender);
++
++ while (!cpus_empty(f->flush_cpumask))
++ cpu_relax();
++
++ f->flush_mm = NULL;
++ f->flush_va = 0;
++ spin_unlock(&f->tlbstate_lock);
++}
++
++int __cpuinit init_smp_flush(void)
++{
++ int i;
++ for_each_cpu_mask(i, cpu_possible_map) {
++ spin_lock_init(&per_cpu(flush_state, i).tlbstate_lock);
++ }
++ return 0;
++}
++
++core_initcall(init_smp_flush);
++
++void flush_tlb_current_task(void)
++{
++ struct mm_struct *mm = current->mm;
++ cpumask_t cpu_mask;
++
++ preempt_disable();
++ cpu_mask = mm->cpu_vm_mask;
++ cpu_clear(smp_processor_id(), cpu_mask);
++
++ local_flush_tlb();
++ if (!cpus_empty(cpu_mask))
++ flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
++ preempt_enable();
++}
++EXPORT_SYMBOL(flush_tlb_current_task);
++
++void flush_tlb_mm (struct mm_struct * mm)
++{
++ cpumask_t cpu_mask;
++
++ preempt_disable();
++ cpu_mask = mm->cpu_vm_mask;
++ cpu_clear(smp_processor_id(), cpu_mask);
++
++ if (current->active_mm == mm) {
++ if (current->mm)
++ local_flush_tlb();
++ else
++ leave_mm(smp_processor_id());
++ }
++ if (!cpus_empty(cpu_mask))
++ flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
++
++ preempt_enable();
++}
++EXPORT_SYMBOL(flush_tlb_mm);
++
++void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
++{
++ struct mm_struct *mm = vma->vm_mm;
++ cpumask_t cpu_mask;
++
++ preempt_disable();
++ cpu_mask = mm->cpu_vm_mask;
++ cpu_clear(smp_processor_id(), cpu_mask);
++
++ if (current->active_mm == mm) {
++ if(current->mm)
++ __flush_tlb_one(va);
++ else
++ leave_mm(smp_processor_id());
++ }
++
++ if (!cpus_empty(cpu_mask))
++ flush_tlb_others(cpu_mask, mm, va);
++
++ preempt_enable();
++}
++EXPORT_SYMBOL(flush_tlb_page);
++
++static void do_flush_tlb_all(void* info)
++{
++ unsigned long cpu = smp_processor_id();
++
++ __flush_tlb_all();
++ if (read_pda(mmu_state) == TLBSTATE_LAZY)
++ leave_mm(cpu);
++}
++
++void flush_tlb_all(void)
++{
++ on_each_cpu(do_flush_tlb_all, NULL, 1, 1);
++}
++#else
++asmlinkage void smp_invalidate_interrupt (void)
++{ return; }
++void flush_tlb_current_task(void)
++{ xen_tlb_flush_mask(&current->mm->cpu_vm_mask); }
++void flush_tlb_mm (struct mm_struct * mm)
++{ xen_tlb_flush_mask(&mm->cpu_vm_mask); }
++void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
++{ xen_invlpg_mask(&vma->vm_mm->cpu_vm_mask, va); }
++void flush_tlb_all(void)
++{ xen_tlb_flush_all(); }
++#endif /* Xen */
++
++/*
++ * this function sends a 'reschedule' IPI to another CPU.
++ * it goes straight through and wastes no time serializing
++ * anything. Worst case is that we lose a reschedule ...
++ */
++
++void smp_send_reschedule(int cpu)
++{
++ send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
++}
++
++/*
++ * Structure and data for smp_call_function(). This is designed to minimise
++ * static memory requirements. It also looks cleaner.
++ */
++static DEFINE_SPINLOCK(call_lock);
++
++struct call_data_struct {
++ void (*func) (void *info);
++ void *info;
++ atomic_t started;
++ atomic_t finished;
++ int wait;
++};
++
++static struct call_data_struct * call_data;
++
++void lock_ipi_call_lock(void)
++{
++ spin_lock_irq(&call_lock);
++}
++
++void unlock_ipi_call_lock(void)
++{
++ spin_unlock_irq(&call_lock);
++}
++
++/*
++ * this function sends a 'generic call function' IPI to one other CPU
++ * in the system.
++ *
++ * cpu is a standard Linux logical CPU number.
++ */
++static void
++__smp_call_function_single(int cpu, void (*func) (void *info), void *info,
++ int nonatomic, int wait)
++{
++ struct call_data_struct data;
++ int cpus = 1;
++
++ data.func = func;
++ data.info = info;
++ atomic_set(&data.started, 0);
++ data.wait = wait;
++ if (wait)
++ atomic_set(&data.finished, 0);
++
++ call_data = &data;
++ wmb();
++ /* Send a message to all other CPUs and wait for them to respond */
++ send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNCTION_VECTOR);
++
++ /* Wait for response */
++ while (atomic_read(&data.started) != cpus)
++ cpu_relax();
++
++ if (!wait)
++ return;
++
++ while (atomic_read(&data.finished) != cpus)
++ cpu_relax();
++}
++
++/*
++ * smp_call_function_single - Run a function on another CPU
++ * @func: The function to run. This must be fast and non-blocking.
++ * @info: An arbitrary pointer to pass to the function.
++ * @nonatomic: Currently unused.
++ * @wait: If true, wait until function has completed on other CPUs.
++ *
++ * Retrurns 0 on success, else a negative status code.
++ *
++ * Does not return until the remote CPU is nearly ready to execute <func>
++ * or is or has executed.
++ */
++
++int smp_call_function_single (int cpu, void (*func) (void *info), void *info,
++ int nonatomic, int wait)
++{
++ /* prevent preemption and reschedule on another processor */
++ int me = get_cpu();
++ if (cpu == me) {
++ WARN_ON(1);
++ put_cpu();
++ return -EBUSY;
++ }
++ spin_lock_bh(&call_lock);
++ __smp_call_function_single(cpu, func, info, nonatomic, wait);
++ spin_unlock_bh(&call_lock);
++ put_cpu();
++ return 0;
++}
++
++/*
++ * this function sends a 'generic call function' IPI to all other CPUs
++ * in the system.
++ */
++static void __smp_call_function (void (*func) (void *info), void *info,
++ int nonatomic, int wait)
++{
++ struct call_data_struct data;
++ int cpus = num_online_cpus()-1;
++
++ if (!cpus)
++ return;
++
++ data.func = func;
++ data.info = info;
++ atomic_set(&data.started, 0);
++ data.wait = wait;
++ if (wait)
++ atomic_set(&data.finished, 0);
++
++ call_data = &data;
++ wmb();
++ /* Send a message to all other CPUs and wait for them to respond */
++ send_IPI_allbutself(CALL_FUNCTION_VECTOR);
++
++ /* Wait for response */
++ while (atomic_read(&data.started) != cpus)
++#ifndef CONFIG_XEN
++ cpu_relax();
++#else
++ barrier();
++#endif
++
++ if (!wait)
++ return;
++
++ while (atomic_read(&data.finished) != cpus)
++#ifndef CONFIG_XEN
++ cpu_relax();
++#else
++ barrier();
++#endif
++}
++
++/*
++ * smp_call_function - run a function on all other CPUs.
++ * @func: The function to run. This must be fast and non-blocking.
++ * @info: An arbitrary pointer to pass to the function.
++ * @nonatomic: currently unused.
++ * @wait: If true, wait (atomically) until function has completed on other
++ * CPUs.
++ *
++ * Returns 0 on success, else a negative status code. Does not return until
++ * remote CPUs are nearly ready to execute func or are or have executed.
++ *
++ * You must not call this function with disabled interrupts or from a
++ * hardware interrupt handler or from a bottom half handler.
++ * Actually there are a few legal cases, like panic.
++ */
++int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
++ int wait)
++{
++ spin_lock(&call_lock);
++ __smp_call_function(func,info,nonatomic,wait);
++ spin_unlock(&call_lock);
++ return 0;
++}
++EXPORT_SYMBOL(smp_call_function);
++
++void smp_stop_cpu(void)
++{
++ unsigned long flags;
++ /*
++ * Remove this CPU:
++ */
++ cpu_clear(smp_processor_id(), cpu_online_map);
++ local_irq_save(flags);
++#ifndef CONFIG_XEN
++ disable_local_APIC();
++#endif
++ local_irq_restore(flags);
++}
++
++static void smp_really_stop_cpu(void *dummy)
++{
++ smp_stop_cpu();
++ for (;;)
++ halt();
++}
++
++void smp_send_stop(void)
++{
++ int nolock = 0;
++#ifndef CONFIG_XEN
++ if (reboot_force)
++ return;
++#endif
++ /* Don't deadlock on the call lock in panic */
++ if (!spin_trylock(&call_lock)) {
++ /* ignore locking because we have panicked anyways */
++ nolock = 1;
++ }
++ __smp_call_function(smp_really_stop_cpu, NULL, 0, 0);
++ if (!nolock)
++ spin_unlock(&call_lock);
++
++ local_irq_disable();
++#ifndef CONFIG_XEN
++ disable_local_APIC();
++#endif
++ local_irq_enable();
++}
++
++/*
++ * Reschedule call back. Nothing to do,
++ * all the work is done automatically when
++ * we return from the interrupt.
++ */
++#ifndef CONFIG_XEN
++asmlinkage void smp_reschedule_interrupt(void)
++#else
++asmlinkage irqreturn_t smp_reschedule_interrupt(void)
++#endif
++{
++#ifndef CONFIG_XEN
++ ack_APIC_irq();
++#else
++ return IRQ_HANDLED;
++#endif
++}
++
++#ifndef CONFIG_XEN
++asmlinkage void smp_call_function_interrupt(void)
++#else
++asmlinkage irqreturn_t smp_call_function_interrupt(void)
++#endif
++{
++ void (*func) (void *info) = call_data->func;
++ void *info = call_data->info;
++ int wait = call_data->wait;
++
++#ifndef CONFIG_XEN
++ ack_APIC_irq();
++#endif
++ /*
++ * Notify initiating CPU that I've grabbed the data and am
++ * about to execute the function
++ */
++ mb();
++ atomic_inc(&call_data->started);
++ /*
++ * At this point the info structure may be out of scope unless wait==1
++ */
++ exit_idle();
++ irq_enter();
++ (*func)(info);
++ irq_exit();
++ if (wait) {
++ mb();
++ atomic_inc(&call_data->finished);
++ }
++#ifdef CONFIG_XEN
++ return IRQ_HANDLED;
++#endif
++}
++
++int safe_smp_processor_id(void)
++{
++#ifdef CONFIG_XEN
++ return smp_processor_id();
++#else
++ unsigned apicid, i;
++
++ if (disable_apic)
++ return 0;
++
++ apicid = hard_smp_processor_id();
++ if (apicid < NR_CPUS && x86_cpu_to_apicid[apicid] == apicid)
++ return apicid;
++
++ for (i = 0; i < NR_CPUS; ++i) {
++ if (x86_cpu_to_apicid[i] == apicid)
++ return i;
++ }
++
++ /* No entries in x86_cpu_to_apicid? Either no MPS|ACPI,
++ * or called too early. Either way, we must be CPU 0. */
++ if (x86_cpu_to_apicid[0] == BAD_APICID)
++ return 0;
++
++ return 0; /* Should not happen */
++#endif
++}
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/arch/x86_64/kernel/traps-xen.c 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,1175 @@
++/*
++ * linux/arch/x86-64/traps.c
++ *
++ * Copyright (C) 1991, 1992 Linus Torvalds
++ * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
++ *
++ * Pentium III FXSR, SSE support
++ * Gareth Hughes <gareth@valinux.com>, May 2000
++ */
++
++/*
++ * 'Traps.c' handles hardware traps and faults after we have saved some
++ * state in 'entry.S'.
++ */
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/string.h>
++#include <linux/errno.h>
++#include <linux/ptrace.h>
++#include <linux/timer.h>
++#include <linux/mm.h>
++#include <linux/init.h>
++#include <linux/delay.h>
++#include <linux/spinlock.h>
++#include <linux/interrupt.h>
++#include <linux/module.h>
++#include <linux/moduleparam.h>
++#include <linux/nmi.h>
++#include <linux/kprobes.h>
++#include <linux/kexec.h>
++#include <linux/unwind.h>
++
++#include <asm/system.h>
++#include <asm/uaccess.h>
++#include <asm/io.h>
++#include <asm/atomic.h>
++#include <asm/debugreg.h>
++#include <asm/desc.h>
++#include <asm/i387.h>
++#include <asm/kdebug.h>
++#include <asm/processor.h>
++#include <asm/unwind.h>
++#include <asm/smp.h>
++#include <asm/pgalloc.h>
++#include <asm/pda.h>
++#include <asm/proto.h>
++#include <asm/nmi.h>
++
++asmlinkage void divide_error(void);
++asmlinkage void debug(void);
++asmlinkage void nmi(void);
++asmlinkage void int3(void);
++asmlinkage void overflow(void);
++asmlinkage void bounds(void);
++asmlinkage void invalid_op(void);
++asmlinkage void device_not_available(void);
++asmlinkage void double_fault(void);
++asmlinkage void coprocessor_segment_overrun(void);
++asmlinkage void invalid_TSS(void);
++asmlinkage void segment_not_present(void);
++asmlinkage void stack_segment(void);
++asmlinkage void general_protection(void);
++asmlinkage void page_fault(void);
++asmlinkage void coprocessor_error(void);
++asmlinkage void simd_coprocessor_error(void);
++asmlinkage void reserved(void);
++asmlinkage void alignment_check(void);
++asmlinkage void machine_check(void);
++asmlinkage void spurious_interrupt_bug(void);
++
++ATOMIC_NOTIFIER_HEAD(die_chain);
++EXPORT_SYMBOL(die_chain);
++
++int register_die_notifier(struct notifier_block *nb)
++{
++ vmalloc_sync_all();
++ return atomic_notifier_chain_register(&die_chain, nb);
++}
++EXPORT_SYMBOL(register_die_notifier); /* used modular by kdb */
++
++int unregister_die_notifier(struct notifier_block *nb)
++{
++ return atomic_notifier_chain_unregister(&die_chain, nb);
++}
++EXPORT_SYMBOL(unregister_die_notifier); /* used modular by kdb */
++
++static inline void conditional_sti(struct pt_regs *regs)
++{
++ if (regs->eflags & X86_EFLAGS_IF)
++ local_irq_enable();
++}
++
++static inline void preempt_conditional_sti(struct pt_regs *regs)
++{
++ preempt_disable();
++ if (regs->eflags & X86_EFLAGS_IF)
++ local_irq_enable();
++}
++
++static inline void preempt_conditional_cli(struct pt_regs *regs)
++{
++ if (regs->eflags & X86_EFLAGS_IF)
++ local_irq_disable();
++ /* Make sure to not schedule here because we could be running
++ on an exception stack. */
++ preempt_enable_no_resched();
++}
++
++static int kstack_depth_to_print = 12;
++#ifdef CONFIG_STACK_UNWIND
++static int call_trace = 1;
++#else
++#define call_trace (-1)
++#endif
++
++#ifdef CONFIG_KALLSYMS
++# include <linux/kallsyms.h>
++void printk_address(unsigned long address)
++{
++ unsigned long offset = 0, symsize;
++ const char *symname;
++ char *modname;
++ char *delim = ":";
++ char namebuf[128];
++
++ symname = kallsyms_lookup(address, &symsize, &offset,
++ &modname, namebuf);
++ if (!symname) {
++ printk(" [<%016lx>]\n", address);
++ return;
++ }
++ if (!modname)
++ modname = delim = "";
++ printk(" [<%016lx>] %s%s%s%s+0x%lx/0x%lx\n",
++ address, delim, modname, delim, symname, offset, symsize);
++}
++#else
++void printk_address(unsigned long address)
++{
++ printk(" [<%016lx>]\n", address);
++}
++#endif
++
++static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
++ unsigned *usedp, const char **idp)
++{
++#ifndef CONFIG_X86_NO_TSS
++ static char ids[][8] = {
++ [DEBUG_STACK - 1] = "#DB",
++ [NMI_STACK - 1] = "NMI",
++ [DOUBLEFAULT_STACK - 1] = "#DF",
++ [STACKFAULT_STACK - 1] = "#SS",
++ [MCE_STACK - 1] = "#MC",
++#if DEBUG_STKSZ > EXCEPTION_STKSZ
++ [N_EXCEPTION_STACKS ... N_EXCEPTION_STACKS + DEBUG_STKSZ / EXCEPTION_STKSZ - 2] = "#DB[?]"
++#endif
++ };
++ unsigned k;
++
++ /*
++ * Iterate over all exception stacks, and figure out whether
++ * 'stack' is in one of them:
++ */
++ for (k = 0; k < N_EXCEPTION_STACKS; k++) {
++ unsigned long end;
++
++ /*
++ * set 'end' to the end of the exception stack.
++ */
++ switch (k + 1) {
++ /*
++ * TODO: this block is not needed i think, because
++ * setup64.c:cpu_init() sets up t->ist[DEBUG_STACK]
++ * properly too.
++ */
++#if DEBUG_STKSZ > EXCEPTION_STKSZ
++ case DEBUG_STACK:
++ end = cpu_pda(cpu)->debugstack + DEBUG_STKSZ;
++ break;
++#endif
++ default:
++ end = per_cpu(orig_ist, cpu).ist[k];
++ break;
++ }
++ /*
++ * Is 'stack' above this exception frame's end?
++ * If yes then skip to the next frame.
++ */
++ if (stack >= end)
++ continue;
++ /*
++ * Is 'stack' above this exception frame's start address?
++ * If yes then we found the right frame.
++ */
++ if (stack >= end - EXCEPTION_STKSZ) {
++ /*
++ * Make sure we only iterate through an exception
++ * stack once. If it comes up for the second time
++ * then there's something wrong going on - just
++ * break out and return NULL:
++ */
++ if (*usedp & (1U << k))
++ break;
++ *usedp |= 1U << k;
++ *idp = ids[k];
++ return (unsigned long *)end;
++ }
++ /*
++ * If this is a debug stack, and if it has a larger size than
++ * the usual exception stacks, then 'stack' might still
++ * be within the lower portion of the debug stack:
++ */
++#if DEBUG_STKSZ > EXCEPTION_STKSZ
++ if (k == DEBUG_STACK - 1 && stack >= end - DEBUG_STKSZ) {
++ unsigned j = N_EXCEPTION_STACKS - 1;
++
++ /*
++ * Black magic. A large debug stack is composed of
++ * multiple exception stack entries, which we
++ * iterate through now. Dont look:
++ */
++ do {
++ ++j;
++ end -= EXCEPTION_STKSZ;
++ ids[j][4] = '1' + (j - N_EXCEPTION_STACKS);
++ } while (stack < end - EXCEPTION_STKSZ);
++ if (*usedp & (1U << j))
++ break;
++ *usedp |= 1U << j;
++ *idp = ids[j];
++ return (unsigned long *)end;
++ }
++#endif
++ }
++#endif
++ return NULL;
++}
++
++static int show_trace_unwind(struct unwind_frame_info *info, void *context)
++{
++ int n = 0;
++
++ while (unwind(info) == 0 && UNW_PC(info)) {
++ n++;
++ printk_address(UNW_PC(info));
++ if (arch_unw_user_mode(info))
++ break;
++ }
++ return n;
++}
++
++/*
++ * x86-64 can have upto three kernel stacks:
++ * process stack
++ * interrupt stack
++ * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
++ */
++
++void show_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long * stack)
++{
++ const unsigned cpu = safe_smp_processor_id();
++ unsigned long *irqstack_end = (unsigned long *)cpu_pda(cpu)->irqstackptr;
++ unsigned used = 0;
++
++ printk("\nCall Trace:\n");
++
++ if (!tsk)
++ tsk = current;
++
++ if (call_trace >= 0) {
++ int unw_ret = 0;
++ struct unwind_frame_info info;
++
++ if (regs) {
++ if (unwind_init_frame_info(&info, tsk, regs) == 0)
++ unw_ret = show_trace_unwind(&info, NULL);
++ } else if (tsk == current)
++ unw_ret = unwind_init_running(&info, show_trace_unwind, NULL);
++ else {
++ if (unwind_init_blocked(&info, tsk) == 0)
++ unw_ret = show_trace_unwind(&info, NULL);
++ }
++ if (unw_ret > 0) {
++ if (call_trace == 1 && !arch_unw_user_mode(&info)) {
++ print_symbol("DWARF2 unwinder stuck at %s\n",
++ UNW_PC(&info));
++ if ((long)UNW_SP(&info) < 0) {
++ printk("Leftover inexact backtrace:\n");
++ stack = (unsigned long *)UNW_SP(&info);
++ } else
++ printk("Full inexact backtrace again:\n");
++ } else if (call_trace >= 1)
++ return;
++ else
++ printk("Full inexact backtrace again:\n");
++ } else
++ printk("Inexact backtrace:\n");
++ }
++
++ /*
++ * Print function call entries within a stack. 'cond' is the
++ * "end of stackframe" condition, that the 'stack++'
++ * iteration will eventually trigger.
++ */
++#define HANDLE_STACK(cond) \
++ do while (cond) { \
++ unsigned long addr = *stack++; \
++ if (kernel_text_address(addr)) { \
++ /* \
++ * If the address is either in the text segment of the \
++ * kernel, or in the region which contains vmalloc'ed \
++ * memory, it *may* be the address of a calling \
++ * routine; if so, print it so that someone tracing \
++ * down the cause of the crash will be able to figure \
++ * out the call path that was taken. \
++ */ \
++ printk_address(addr); \
++ } \
++ } while (0)
++
++ /*
++ * Print function call entries in all stacks, starting at the
++ * current stack address. If the stacks consist of nested
++ * exceptions
++ */
++ for ( ; ; ) {
++ const char *id;
++ unsigned long *estack_end;
++ estack_end = in_exception_stack(cpu, (unsigned long)stack,
++ &used, &id);
++
++ if (estack_end) {
++ printk(" <%s>", id);
++ HANDLE_STACK (stack < estack_end);
++ printk(" <EOE>");
++ /*
++ * We link to the next stack via the
++ * second-to-last pointer (index -2 to end) in the
++ * exception stack:
++ */
++ stack = (unsigned long *) estack_end[-2];
++ continue;
++ }
++ if (irqstack_end) {
++ unsigned long *irqstack;
++ irqstack = irqstack_end -
++ (IRQSTACKSIZE - 64) / sizeof(*irqstack);
++
++ if (stack >= irqstack && stack < irqstack_end) {
++ printk(" <IRQ>");
++ HANDLE_STACK (stack < irqstack_end);
++ /*
++ * We link to the next stack (which would be
++ * the process stack normally) the last
++ * pointer (index -1 to end) in the IRQ stack:
++ */
++ stack = (unsigned long *) (irqstack_end[-1]);
++ irqstack_end = NULL;
++ printk(" <EOI>");
++ continue;
++ }
++ }
++ break;
++ }
++
++ /*
++ * This prints the process stack:
++ */
++ HANDLE_STACK (((long) stack & (THREAD_SIZE-1)) != 0);
++#undef HANDLE_STACK
++
++ printk("\n");
++}
++
++static void _show_stack(struct task_struct *tsk, struct pt_regs *regs, unsigned long * rsp)
++{
++ unsigned long *stack;
++ int i;
++ const int cpu = safe_smp_processor_id();
++ unsigned long *irqstack_end = (unsigned long *) (cpu_pda(cpu)->irqstackptr);
++ unsigned long *irqstack = (unsigned long *) (cpu_pda(cpu)->irqstackptr - IRQSTACKSIZE);
++
++ // debugging aid: "show_stack(NULL, NULL);" prints the
++ // back trace for this cpu.
++
++ if (rsp == NULL) {
++ if (tsk)
++ rsp = (unsigned long *)tsk->thread.rsp;
++ else
++ rsp = (unsigned long *)&rsp;
++ }
++
++ stack = rsp;
++ for(i=0; i < kstack_depth_to_print; i++) {
++ if (stack >= irqstack && stack <= irqstack_end) {
++ if (stack == irqstack_end) {
++ stack = (unsigned long *) (irqstack_end[-1]);
++ printk(" <EOI> ");
++ }
++ } else {
++ if (((long) stack & (THREAD_SIZE-1)) == 0)
++ break;
++ }
++ if (i && ((i % 4) == 0))
++ printk("\n");
++ printk(" %016lx", *stack++);
++ touch_nmi_watchdog();
++ }
++ show_trace(tsk, regs, rsp);
++}
++
++void show_stack(struct task_struct *tsk, unsigned long * rsp)
++{
++ _show_stack(tsk, NULL, rsp);
++}
++
++/*
++ * The architecture-independent dump_stack generator
++ */
++void dump_stack(void)
++{
++ unsigned long dummy;
++ show_trace(NULL, NULL, &dummy);
++}
++
++EXPORT_SYMBOL(dump_stack);
++
++void show_registers(struct pt_regs *regs)
++{
++ int i;
++ int in_kernel = !user_mode(regs);
++ unsigned long rsp;
++ const int cpu = safe_smp_processor_id();
++ struct task_struct *cur = cpu_pda(cpu)->pcurrent;
++
++ rsp = regs->rsp;
++
++ printk("CPU %d ", cpu);
++ __show_regs(regs);
++ printk("Process %s (pid: %d, threadinfo %p, task %p)\n",
++ cur->comm, cur->pid, task_thread_info(cur), cur);
++
++ /*
++ * When in-kernel, we also print out the stack and code at the
++ * time of the fault..
++ */
++ if (in_kernel) {
++
++ printk("Stack: ");
++ _show_stack(NULL, regs, (unsigned long*)rsp);
++
++ printk("\nCode: ");
++ if (regs->rip < PAGE_OFFSET)
++ goto bad;
++
++ for (i=0; i<20; i++) {
++ unsigned char c;
++ if (__get_user(c, &((unsigned char*)regs->rip)[i])) {
++bad:
++ printk(" Bad RIP value.");
++ break;
++ }
++ printk("%02x ", c);
++ }
++ }
++ printk("\n");
++}
++
++void handle_BUG(struct pt_regs *regs)
++{
++ struct bug_frame f;
++ long len;
++ const char *prefix = "";
++
++ if (user_mode(regs))
++ return;
++ if (__copy_from_user(&f, (const void __user *) regs->rip,
++ sizeof(struct bug_frame)))
++ return;
++ if (f.filename >= 0 ||
++ f.ud2[0] != 0x0f || f.ud2[1] != 0x0b)
++ return;
++ len = __strnlen_user((char *)(long)f.filename, PATH_MAX) - 1;
++ if (len < 0 || len >= PATH_MAX)
++ f.filename = (int)(long)"unmapped filename";
++ else if (len > 50) {
++ f.filename += len - 50;
++ prefix = "...";
++ }
++ printk("----------- [cut here ] --------- [please bite here ] ---------\n");
++ printk(KERN_ALERT "Kernel BUG at %s%.50s:%d\n", prefix, (char *)(long)f.filename, f.line);
++}
++
++#ifdef CONFIG_BUG
++void out_of_line_bug(void)
++{
++ BUG();
++}
++EXPORT_SYMBOL(out_of_line_bug);
++#endif
++
++static DEFINE_SPINLOCK(die_lock);
++static int die_owner = -1;
++static unsigned int die_nest_count;
++
++unsigned __kprobes long oops_begin(void)
++{
++ int cpu = safe_smp_processor_id();
++ unsigned long flags;
++
++ /* racy, but better than risking deadlock. */
++ local_irq_save(flags);
++ if (!spin_trylock(&die_lock)) {
++ if (cpu == die_owner)
++ /* nested oops. should stop eventually */;
++ else
++ spin_lock(&die_lock);
++ }
++ die_nest_count++;
++ die_owner = cpu;
++ console_verbose();
++ bust_spinlocks(1);
++ return flags;
++}
++
++void __kprobes oops_end(unsigned long flags)
++{
++ die_owner = -1;
++ bust_spinlocks(0);
++ die_nest_count--;
++ if (die_nest_count)
++ /* We still own the lock */
++ local_irq_restore(flags);
++ else
++ /* Nest count reaches zero, release the lock. */
++ spin_unlock_irqrestore(&die_lock, flags);
++ if (panic_on_oops)
++ panic("Fatal exception");
++}
++
++void __kprobes __die(const char * str, struct pt_regs * regs, long err)
++{
++ static int die_counter;
++ printk(KERN_EMERG "%s: %04lx [%u] ", str, err & 0xffff,++die_counter);
++#ifdef CONFIG_PREEMPT
++ printk("PREEMPT ");
++#endif
++#ifdef CONFIG_SMP
++ printk("SMP ");
++#endif
++#ifdef CONFIG_DEBUG_PAGEALLOC
++ printk("DEBUG_PAGEALLOC");
++#endif
++ printk("\n");
++ notify_die(DIE_OOPS, str, regs, err, current->thread.trap_no, SIGSEGV);
++ show_registers(regs);
++ /* Executive summary in case the oops scrolled away */
++ printk(KERN_ALERT "RIP ");
++ printk_address(regs->rip);
++ printk(" RSP <%016lx>\n", regs->rsp);
++ if (kexec_should_crash(current))
++ crash_kexec(regs);
++}
++
++void die(const char * str, struct pt_regs * regs, long err)
++{
++ unsigned long flags = oops_begin();
++
++ handle_BUG(regs);
++ __die(str, regs, err);
++ oops_end(flags);
++ do_exit(SIGSEGV);
++}
++
++#ifdef CONFIG_X86_LOCAL_APIC
++void __kprobes die_nmi(char *str, struct pt_regs *regs)
++{
++ unsigned long flags = oops_begin();
++
++ /*
++ * We are in trouble anyway, lets at least try
++ * to get a message out.
++ */
++ printk(str, safe_smp_processor_id());
++ show_registers(regs);
++ if (kexec_should_crash(current))
++ crash_kexec(regs);
++ if (panic_on_timeout || panic_on_oops)
++ panic("nmi watchdog");
++ printk("console shuts up ...\n");
++ oops_end(flags);
++ nmi_exit();
++ local_irq_enable();
++ do_exit(SIGSEGV);
++}
++#endif
++
++static void __kprobes do_trap(int trapnr, int signr, char *str,
++ struct pt_regs * regs, long error_code,
++ siginfo_t *info)
++{
++ struct task_struct *tsk = current;
++
++ tsk->thread.error_code = error_code;
++ tsk->thread.trap_no = trapnr;
++
++ if (user_mode(regs)) {
++ if (exception_trace && unhandled_signal(tsk, signr))
++ printk(KERN_INFO
++ "%s[%d] trap %s rip:%lx rsp:%lx error:%lx\n",
++ tsk->comm, tsk->pid, str,
++ regs->rip, regs->rsp, error_code);
++
++ if (info)
++ force_sig_info(signr, info, tsk);
++ else
++ force_sig(signr, tsk);
++ return;
++ }
++
++
++ /* kernel trap */
++ {
++ const struct exception_table_entry *fixup;
++ fixup = search_exception_tables(regs->rip);
++ if (fixup)
++ regs->rip = fixup->fixup;
++ else
++ die(str, regs, error_code);
++ return;
++ }
++}
++
++#define DO_ERROR(trapnr, signr, str, name) \
++asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
++{ \
++ if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
++ == NOTIFY_STOP) \
++ return; \
++ conditional_sti(regs); \
++ do_trap(trapnr, signr, str, regs, error_code, NULL); \
++}
++
++#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
++asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
++{ \
++ siginfo_t info; \
++ info.si_signo = signr; \
++ info.si_errno = 0; \
++ info.si_code = sicode; \
++ info.si_addr = (void __user *)siaddr; \
++ if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
++ == NOTIFY_STOP) \
++ return; \
++ conditional_sti(regs); \
++ do_trap(trapnr, signr, str, regs, error_code, &info); \
++}
++
++DO_ERROR_INFO( 0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->rip)
++DO_ERROR( 4, SIGSEGV, "overflow", overflow)
++DO_ERROR( 5, SIGSEGV, "bounds", bounds)
++DO_ERROR_INFO( 6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->rip)
++DO_ERROR( 7, SIGSEGV, "device not available", device_not_available)
++DO_ERROR( 9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun)
++DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
++DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
++DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
++DO_ERROR(18, SIGSEGV, "reserved", reserved)
++
++/* Runs on IST stack */
++asmlinkage void do_stack_segment(struct pt_regs *regs, long error_code)
++{
++ if (notify_die(DIE_TRAP, "stack segment", regs, error_code,
++ 12, SIGBUS) == NOTIFY_STOP)
++ return;
++ preempt_conditional_sti(regs);
++ do_trap(12, SIGBUS, "stack segment", regs, error_code, NULL);
++ preempt_conditional_cli(regs);
++}
++
++asmlinkage void do_double_fault(struct pt_regs * regs, long error_code)
++{
++ static const char str[] = "double fault";
++ struct task_struct *tsk = current;
++
++ /* Return not checked because double check cannot be ignored */
++ notify_die(DIE_TRAP, str, regs, error_code, 8, SIGSEGV);
++
++ tsk->thread.error_code = error_code;
++ tsk->thread.trap_no = 8;
++
++ /* This is always a kernel trap and never fixable (and thus must
++ never return). */
++ for (;;)
++ die(str, regs, error_code);
++}
++
++asmlinkage void __kprobes do_general_protection(struct pt_regs * regs,
++ long error_code)
++{
++ struct task_struct *tsk = current;
++
++ conditional_sti(regs);
++
++ tsk->thread.error_code = error_code;
++ tsk->thread.trap_no = 13;
++
++ if (user_mode(regs)) {
++ if (exception_trace && unhandled_signal(tsk, SIGSEGV))
++ printk(KERN_INFO
++ "%s[%d] general protection rip:%lx rsp:%lx error:%lx\n",
++ tsk->comm, tsk->pid,
++ regs->rip, regs->rsp, error_code);
++
++ force_sig(SIGSEGV, tsk);
++ return;
++ }
++
++ /* kernel gp */
++ {
++ const struct exception_table_entry *fixup;
++ fixup = search_exception_tables(regs->rip);
++ if (fixup) {
++ regs->rip = fixup->fixup;
++ return;
++ }
++ if (notify_die(DIE_GPF, "general protection fault", regs,
++ error_code, 13, SIGSEGV) == NOTIFY_STOP)
++ return;
++ die("general protection fault", regs, error_code);
++ }
++}
++
++static __kprobes void
++mem_parity_error(unsigned char reason, struct pt_regs * regs)
++{
++ printk("Uhhuh. NMI received. Dazed and confused, but trying to continue\n");
++ printk("You probably have a hardware problem with your RAM chips\n");
++
++#if 0 /* XEN */
++ /* Clear and disable the memory parity error line. */
++ reason = (reason & 0xf) | 4;
++ outb(reason, 0x61);
++#endif /* XEN */
++}
++
++static __kprobes void
++io_check_error(unsigned char reason, struct pt_regs * regs)
++{
++ printk("NMI: IOCK error (debug interrupt?)\n");
++ show_registers(regs);
++
++#if 0 /* XEN */
++ /* Re-enable the IOCK line, wait for a few seconds */
++ reason = (reason & 0xf) | 8;
++ outb(reason, 0x61);
++ mdelay(2000);
++ reason &= ~8;
++ outb(reason, 0x61);
++#endif /* XEN */
++}
++
++static __kprobes void
++unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
++{ printk("Uhhuh. NMI received for unknown reason %02x.\n", reason);
++ printk("Dazed and confused, but trying to continue\n");
++ printk("Do you have a strange power saving mode enabled?\n");
++}
++
++/* Runs on IST stack. This code must keep interrupts off all the time.
++ Nested NMIs are prevented by the CPU. */
++asmlinkage __kprobes void default_do_nmi(struct pt_regs *regs)
++{
++ unsigned char reason = 0;
++ int cpu;
++
++ cpu = smp_processor_id();
++
++ /* Only the BSP gets external NMIs from the system. */
++ if (!cpu)
++ reason = get_nmi_reason();
++
++ if (!(reason & 0xc0)) {
++ if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT)
++ == NOTIFY_STOP)
++ return;
++#ifdef CONFIG_X86_LOCAL_APIC
++ /*
++ * Ok, so this is none of the documented NMI sources,
++ * so it must be the NMI watchdog.
++ */
++ if (nmi_watchdog > 0) {
++ nmi_watchdog_tick(regs,reason);
++ return;
++ }
++#endif
++ unknown_nmi_error(reason, regs);
++ return;
++ }
++ if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP)
++ return;
++
++ /* AK: following checks seem to be broken on modern chipsets. FIXME */
++
++ if (reason & 0x80)
++ mem_parity_error(reason, regs);
++ if (reason & 0x40)
++ io_check_error(reason, regs);
++}
++
++/* runs on IST stack. */
++asmlinkage void __kprobes do_int3(struct pt_regs * regs, long error_code)
++{
++ if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) == NOTIFY_STOP) {
++ return;
++ }
++ preempt_conditional_sti(regs);
++ do_trap(3, SIGTRAP, "int3", regs, error_code, NULL);
++ preempt_conditional_cli(regs);
++}
++
++/* Help handler running on IST stack to switch back to user stack
++ for scheduling or signal handling. The actual stack switch is done in
++ entry.S */
++asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
++{
++ struct pt_regs *regs = eregs;
++ /* Did already sync */
++ if (eregs == (struct pt_regs *)eregs->rsp)
++ ;
++ /* Exception from user space */
++ else if (user_mode(eregs))
++ regs = task_pt_regs(current);
++ /* Exception from kernel and interrupts are enabled. Move to
++ kernel process stack. */
++ else if (eregs->eflags & X86_EFLAGS_IF)
++ regs = (struct pt_regs *)(eregs->rsp -= sizeof(struct pt_regs));
++ if (eregs != regs)
++ *regs = *eregs;
++ return regs;
++}
++
++/* runs on IST stack. */
++asmlinkage void __kprobes do_debug(struct pt_regs * regs,
++ unsigned long error_code)
++{
++ unsigned long condition;
++ struct task_struct *tsk = current;
++ siginfo_t info;
++
++ get_debugreg(condition, 6);
++
++ if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
++ SIGTRAP) == NOTIFY_STOP)
++ return;
++
++ preempt_conditional_sti(regs);
++
++ /* Mask out spurious debug traps due to lazy DR7 setting */
++ if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) {
++ if (!tsk->thread.debugreg7) {
++ goto clear_dr7;
++ }
++ }
++
++ tsk->thread.debugreg6 = condition;
++
++ /* Mask out spurious TF errors due to lazy TF clearing */
++ if (condition & DR_STEP) {
++ /*
++ * The TF error should be masked out only if the current
++ * process is not traced and if the TRAP flag has been set
++ * previously by a tracing process (condition detected by
++ * the PT_DTRACE flag); remember that the i386 TRAP flag
++ * can be modified by the process itself in user mode,
++ * allowing programs to debug themselves without the ptrace()
++ * interface.
++ */
++ if (!user_mode(regs))
++ goto clear_TF_reenable;
++ /*
++ * Was the TF flag set by a debugger? If so, clear it now,
++ * so that register information is correct.
++ */
++ if (tsk->ptrace & PT_DTRACE) {
++ regs->eflags &= ~TF_MASK;
++ tsk->ptrace &= ~PT_DTRACE;
++ }
++ }
++
++ /* Ok, finally something we can handle */
++ tsk->thread.trap_no = 1;
++ tsk->thread.error_code = error_code;
++ info.si_signo = SIGTRAP;
++ info.si_errno = 0;
++ info.si_code = TRAP_BRKPT;
++ info.si_addr = user_mode(regs) ? (void __user *)regs->rip : NULL;
++ force_sig_info(SIGTRAP, &info, tsk);
++
++clear_dr7:
++ set_debugreg(0UL, 7);
++ preempt_conditional_cli(regs);
++ return;
++
++clear_TF_reenable:
++ set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
++ regs->eflags &= ~TF_MASK;
++ preempt_conditional_cli(regs);
++}
++
++static int kernel_math_error(struct pt_regs *regs, const char *str, int trapnr)
++{
++ const struct exception_table_entry *fixup;
++ fixup = search_exception_tables(regs->rip);
++ if (fixup) {
++ regs->rip = fixup->fixup;
++ return 1;
++ }
++ notify_die(DIE_GPF, str, regs, 0, trapnr, SIGFPE);
++ /* Illegal floating point operation in the kernel */
++ current->thread.trap_no = trapnr;
++ die(str, regs, 0);
++ return 0;
++}
++
++/*
++ * Note that we play around with the 'TS' bit in an attempt to get
++ * the correct behaviour even in the presence of the asynchronous
++ * IRQ13 behaviour
++ */
++asmlinkage void do_coprocessor_error(struct pt_regs *regs)
++{
++ void __user *rip = (void __user *)(regs->rip);
++ struct task_struct * task;
++ siginfo_t info;
++ unsigned short cwd, swd;
++
++ conditional_sti(regs);
++ if (!user_mode(regs) &&
++ kernel_math_error(regs, "kernel x87 math error", 16))
++ return;
++
++ /*
++ * Save the info for the exception handler and clear the error.
++ */
++ task = current;
++ save_init_fpu(task);
++ task->thread.trap_no = 16;
++ task->thread.error_code = 0;
++ info.si_signo = SIGFPE;
++ info.si_errno = 0;
++ info.si_code = __SI_FAULT;
++ info.si_addr = rip;
++ /*
++ * (~cwd & swd) will mask out exceptions that are not set to unmasked
++ * status. 0x3f is the exception bits in these regs, 0x200 is the
++ * C1 reg you need in case of a stack fault, 0x040 is the stack
++ * fault bit. We should only be taking one exception at a time,
++ * so if this combination doesn't produce any single exception,
++ * then we have a bad program that isn't synchronizing its FPU usage
++ * and it will suffer the consequences since we won't be able to
++ * fully reproduce the context of the exception
++ */
++ cwd = get_fpu_cwd(task);
++ swd = get_fpu_swd(task);
++ switch (swd & ~cwd & 0x3f) {
++ case 0x000:
++ default:
++ break;
++ case 0x001: /* Invalid Op */
++ /*
++ * swd & 0x240 == 0x040: Stack Underflow
++ * swd & 0x240 == 0x240: Stack Overflow
++ * User must clear the SF bit (0x40) if set
++ */
++ info.si_code = FPE_FLTINV;
++ break;
++ case 0x002: /* Denormalize */
++ case 0x010: /* Underflow */
++ info.si_code = FPE_FLTUND;
++ break;
++ case 0x004: /* Zero Divide */
++ info.si_code = FPE_FLTDIV;
++ break;
++ case 0x008: /* Overflow */
++ info.si_code = FPE_FLTOVF;
++ break;
++ case 0x020: /* Precision */
++ info.si_code = FPE_FLTRES;
++ break;
++ }
++ force_sig_info(SIGFPE, &info, task);
++}
++
++asmlinkage void bad_intr(void)
++{
++ printk("bad interrupt");
++}
++
++asmlinkage void do_simd_coprocessor_error(struct pt_regs *regs)
++{
++ void __user *rip = (void __user *)(regs->rip);
++ struct task_struct * task;
++ siginfo_t info;
++ unsigned short mxcsr;
++
++ conditional_sti(regs);
++ if (!user_mode(regs) &&
++ kernel_math_error(regs, "kernel simd math error", 19))
++ return;
++
++ /*
++ * Save the info for the exception handler and clear the error.
++ */
++ task = current;
++ save_init_fpu(task);
++ task->thread.trap_no = 19;
++ task->thread.error_code = 0;
++ info.si_signo = SIGFPE;
++ info.si_errno = 0;
++ info.si_code = __SI_FAULT;
++ info.si_addr = rip;
++ /*
++ * The SIMD FPU exceptions are handled a little differently, as there
++ * is only a single status/control register. Thus, to determine which
++ * unmasked exception was caught we must mask the exception mask bits
++ * at 0x1f80, and then use these to mask the exception bits at 0x3f.
++ */
++ mxcsr = get_fpu_mxcsr(task);
++ switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) {
++ case 0x000:
++ default:
++ break;
++ case 0x001: /* Invalid Op */
++ info.si_code = FPE_FLTINV;
++ break;
++ case 0x002: /* Denormalize */
++ case 0x010: /* Underflow */
++ info.si_code = FPE_FLTUND;
++ break;
++ case 0x004: /* Zero Divide */
++ info.si_code = FPE_FLTDIV;
++ break;
++ case 0x008: /* Overflow */
++ info.si_code = FPE_FLTOVF;
++ break;
++ case 0x020: /* Precision */
++ info.si_code = FPE_FLTRES;
++ break;
++ }
++ force_sig_info(SIGFPE, &info, task);
++}
++
++asmlinkage void do_spurious_interrupt_bug(struct pt_regs * regs)
++{
++}
++
++#if 0
++asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void)
++{
++}
++#endif
++
++asmlinkage void __attribute__((weak)) mce_threshold_interrupt(void)
++{
++}
++
++/*
++ * 'math_state_restore()' saves the current math information in the
++ * old math state array, and gets the new ones from the current task
++ *
++ * Careful.. There are problems with IBM-designed IRQ13 behaviour.
++ * Don't touch unless you *really* know how it works.
++ */
++asmlinkage void math_state_restore(void)
++{
++ struct task_struct *me = current;
++ /* clts(); */ /* 'clts' is done for us by Xen during virtual trap. */
++
++ if (!used_math())
++ init_fpu(me);
++ restore_fpu_checking(&me->thread.i387.fxsave);
++ task_thread_info(me)->status |= TS_USEDFPU;
++}
++
++
++/*
++ * NB. All these are "interrupt gates" (i.e. events_mask is set) because we
++ * specify <dpl>|4 in the second field.
++ */
++static trap_info_t trap_table[] = {
++ { 0, 0|4, __KERNEL_CS, (unsigned long)divide_error },
++ { 1, 0|4, __KERNEL_CS, (unsigned long)debug },
++ { 3, 3|4, __KERNEL_CS, (unsigned long)int3 },
++ { 4, 3|4, __KERNEL_CS, (unsigned long)overflow },
++ { 5, 0|4, __KERNEL_CS, (unsigned long)bounds },
++ { 6, 0|4, __KERNEL_CS, (unsigned long)invalid_op },
++ { 7, 0|4, __KERNEL_CS, (unsigned long)device_not_available },
++ { 9, 0|4, __KERNEL_CS, (unsigned long)coprocessor_segment_overrun},
++ { 10, 0|4, __KERNEL_CS, (unsigned long)invalid_TSS },
++ { 11, 0|4, __KERNEL_CS, (unsigned long)segment_not_present },
++ { 12, 0|4, __KERNEL_CS, (unsigned long)stack_segment },
++ { 13, 0|4, __KERNEL_CS, (unsigned long)general_protection },
++ { 14, 0|4, __KERNEL_CS, (unsigned long)page_fault },
++ { 15, 0|4, __KERNEL_CS, (unsigned long)spurious_interrupt_bug },
++ { 16, 0|4, __KERNEL_CS, (unsigned long)coprocessor_error },
++ { 17, 0|4, __KERNEL_CS, (unsigned long)alignment_check },
++#ifdef CONFIG_X86_MCE
++ { 18, 0|4, __KERNEL_CS, (unsigned long)machine_check },
++#endif
++ { 19, 0|4, __KERNEL_CS, (unsigned long)simd_coprocessor_error },
++#ifdef CONFIG_IA32_EMULATION
++ { IA32_SYSCALL_VECTOR, 3|4, __KERNEL_CS, (unsigned long)ia32_syscall},
++#endif
++ { 0, 0, 0, 0 }
++};
++
++void __init trap_init(void)
++{
++ int ret;
++
++ ret = HYPERVISOR_set_trap_table(trap_table);
++
++ if (ret)
++ printk("HYPERVISOR_set_trap_table faild: error %d\n",
++ ret);
++
++ /*
++ * Should be a barrier for any external CPU state.
++ */
++ cpu_init();
++}
++
++void smp_trap_init(trap_info_t *trap_ctxt)
++{
++ trap_info_t *t = trap_table;
++
++ for (t = trap_table; t->address; t++) {
++ trap_ctxt[t->vector].flags = t->flags;
++ trap_ctxt[t->vector].cs = t->cs;
++ trap_ctxt[t->vector].address = t->address;
++ }
++}
++
++
++/* Actual parsing is done early in setup.c. */
++static int __init oops_dummy(char *s)
++{
++ panic_on_oops = 1;
++ return 1;
++}
++__setup("oops=", oops_dummy);
++
++static int __init kstack_setup(char *s)
++{
++ kstack_depth_to_print = simple_strtoul(s,NULL,0);
++ return 1;
++}
++__setup("kstack=", kstack_setup);
++
++#ifdef CONFIG_STACK_UNWIND
++static int __init call_trace_setup(char *s)
++{
++ if (strcmp(s, "old") == 0)
++ call_trace = -1;
++ else if (strcmp(s, "both") == 0)
++ call_trace = 0;
++ else if (strcmp(s, "newfallback") == 0)
++ call_trace = 1;
++ else if (strcmp(s, "new") == 0)
++ call_trace = 2;
++ return 1;
++}
++__setup("call_trace=", call_trace_setup);
++#endif
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/arch/x86_64/kernel/vsyscall-xen.c 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,239 @@
++/*
++ * linux/arch/x86_64/kernel/vsyscall.c
++ *
++ * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
++ * Copyright 2003 Andi Kleen, SuSE Labs.
++ *
++ * Thanks to hpa@transmeta.com for some useful hint.
++ * Special thanks to Ingo Molnar for his early experience with
++ * a different vsyscall implementation for Linux/IA32 and for the name.
++ *
++ * vsyscall 1 is located at -10Mbyte, vsyscall 2 is located
++ * at virtual address -10Mbyte+1024bytes etc... There are at max 4
++ * vsyscalls. One vsyscall can reserve more than 1 slot to avoid
++ * jumping out of line if necessary. We cannot add more with this
++ * mechanism because older kernels won't return -ENOSYS.
++ * If we want more than four we need a vDSO.
++ *
++ * Note: the concept clashes with user mode linux. If you use UML and
++ * want per guest time just set the kernel.vsyscall64 sysctl to 0.
++ */
++
++#include <linux/time.h>
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/timer.h>
++#include <linux/seqlock.h>
++#include <linux/jiffies.h>
++#include <linux/sysctl.h>
++
++#include <asm/vsyscall.h>
++#include <asm/pgtable.h>
++#include <asm/page.h>
++#include <asm/fixmap.h>
++#include <asm/errno.h>
++#include <asm/io.h>
++
++#define __vsyscall(nr) __attribute__ ((unused,__section__(".vsyscall_" #nr)))
++
++int __sysctl_vsyscall __section_sysctl_vsyscall = 1;
++seqlock_t __xtime_lock __section_xtime_lock = SEQLOCK_UNLOCKED;
++
++#include <asm/unistd.h>
++
++static __always_inline void timeval_normalize(struct timeval * tv)
++{
++ time_t __sec;
++
++ __sec = tv->tv_usec / 1000000;
++ if (__sec) {
++ tv->tv_usec %= 1000000;
++ tv->tv_sec += __sec;
++ }
++}
++
++static __always_inline void do_vgettimeofday(struct timeval * tv)
++{
++ long sequence, t;
++ unsigned long sec, usec;
++
++ do {
++ sequence = read_seqbegin(&__xtime_lock);
++
++ sec = __xtime.tv_sec;
++ usec = (__xtime.tv_nsec / 1000) +
++ (__jiffies - __wall_jiffies) * (1000000 / HZ);
++
++ if (__vxtime.mode != VXTIME_HPET) {
++ t = get_cycles_sync();
++ if (t < __vxtime.last_tsc)
++ t = __vxtime.last_tsc;
++ usec += ((t - __vxtime.last_tsc) *
++ __vxtime.tsc_quot) >> 32;
++ /* See comment in x86_64 do_gettimeofday. */
++ } else {
++ usec += ((readl((void *)fix_to_virt(VSYSCALL_HPET) + 0xf0) -
++ __vxtime.last) * __vxtime.quot) >> 32;
++ }
++ } while (read_seqretry(&__xtime_lock, sequence));
++
++ tv->tv_sec = sec + usec / 1000000;
++ tv->tv_usec = usec % 1000000;
++}
++
++/* RED-PEN may want to readd seq locking, but then the variable should be write-once. */
++static __always_inline void do_get_tz(struct timezone * tz)
++{
++ *tz = __sys_tz;
++}
++
++static __always_inline int gettimeofday(struct timeval *tv, struct timezone *tz)
++{
++ int ret;
++ asm volatile("vsysc2: syscall"
++ : "=a" (ret)
++ : "0" (__NR_gettimeofday),"D" (tv),"S" (tz) : __syscall_clobber );
++ return ret;
++}
++
++static __always_inline long time_syscall(long *t)
++{
++ long secs;
++ asm volatile("vsysc1: syscall"
++ : "=a" (secs)
++ : "0" (__NR_time),"D" (t) : __syscall_clobber);
++ return secs;
++}
++
++int __vsyscall(0) vgettimeofday(struct timeval * tv, struct timezone * tz)
++{
++ if (!__sysctl_vsyscall)
++ return gettimeofday(tv,tz);
++ if (tv)
++ do_vgettimeofday(tv);
++ if (tz)
++ do_get_tz(tz);
++ return 0;
++}
++
++/* This will break when the xtime seconds get inaccurate, but that is
++ * unlikely */
++time_t __vsyscall(1) vtime(time_t *t)
++{
++ if (!__sysctl_vsyscall)
++ return time_syscall(t);
++ else if (t)
++ *t = __xtime.tv_sec;
++ return __xtime.tv_sec;
++}
++
++long __vsyscall(2) venosys_0(void)
++{
++ return -ENOSYS;
++}
++
++long __vsyscall(3) venosys_1(void)
++{
++ return -ENOSYS;
++}
++
++#ifdef CONFIG_SYSCTL
++
++#define SYSCALL 0x050f
++#define NOP2 0x9090
++
++/*
++ * NOP out syscall in vsyscall page when not needed.
++ */
++static int vsyscall_sysctl_change(ctl_table *ctl, int write, struct file * filp,
++ void __user *buffer, size_t *lenp, loff_t *ppos)
++{
++ extern u16 vsysc1, vsysc2;
++ u16 *map1, *map2;
++ int ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
++ if (!write)
++ return ret;
++ /* gcc has some trouble with __va(__pa()), so just do it this
++ way. */
++ map1 = ioremap(__pa_symbol(&vsysc1), 2);
++ if (!map1)
++ return -ENOMEM;
++ map2 = ioremap(__pa_symbol(&vsysc2), 2);
++ if (!map2) {
++ ret = -ENOMEM;
++ goto out;
++ }
++ if (!sysctl_vsyscall) {
++ *map1 = SYSCALL;
++ *map2 = SYSCALL;
++ } else {
++ *map1 = NOP2;
++ *map2 = NOP2;
++ }
++ iounmap(map2);
++out:
++ iounmap(map1);
++ return ret;
++}
++
++static int vsyscall_sysctl_nostrat(ctl_table *t, int __user *name, int nlen,
++ void __user *oldval, size_t __user *oldlenp,
++ void __user *newval, size_t newlen,
++ void **context)
++{
++ return -ENOSYS;
++}
++
++static ctl_table kernel_table2[] = {
++ { .ctl_name = 99, .procname = "vsyscall64",
++ .data = &sysctl_vsyscall, .maxlen = sizeof(int), .mode = 0644,
++ .strategy = vsyscall_sysctl_nostrat,
++ .proc_handler = vsyscall_sysctl_change },
++ { 0, }
++};
++
++static ctl_table kernel_root_table2[] = {
++ { .ctl_name = CTL_KERN, .procname = "kernel", .mode = 0555,
++ .child = kernel_table2 },
++ { 0 },
++};
++
++#endif
++
++static void __init map_vsyscall(void)
++{
++ extern char __vsyscall_0;
++ unsigned long physaddr_page0 = __pa_symbol(&__vsyscall_0);
++
++ __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_page0, PAGE_KERNEL_VSYSCALL);
++}
++
++#ifdef CONFIG_XEN
++static void __init map_vsyscall_user(void)
++{
++ extern void __set_fixmap_user(enum fixed_addresses, unsigned long, pgprot_t);
++ extern char __vsyscall_0;
++ unsigned long physaddr_page0 = __pa_symbol(&__vsyscall_0);
++
++ __set_fixmap_user(VSYSCALL_FIRST_PAGE, physaddr_page0, PAGE_KERNEL_VSYSCALL);
++}
++#endif
++
++static int __init vsyscall_init(void)
++{
++ BUG_ON(((unsigned long) &vgettimeofday !=
++ VSYSCALL_ADDR(__NR_vgettimeofday)));
++ BUG_ON((unsigned long) &vtime != VSYSCALL_ADDR(__NR_vtime));
++ BUG_ON((VSYSCALL_ADDR(0) != __fix_to_virt(VSYSCALL_FIRST_PAGE)));
++ map_vsyscall();
++#ifdef CONFIG_XEN
++ map_vsyscall_user();
++ sysctl_vsyscall = 0; /* disable vgettimeofay() */
++#endif
++#ifdef CONFIG_SYSCTL
++ register_sysctl_table(kernel_root_table2, 0);
++#endif
++ return 0;
++}
++
++__initcall(vsyscall_init);
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/arch/x86_64/kernel/xen_entry.S 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,40 @@
++/*
++ * Copied from arch/xen/i386/kernel/entry.S
++ */
++/* Offsets into shared_info_t. */
++#define evtchn_upcall_pending /* 0 */
++#define evtchn_upcall_mask 1
++
++#define sizeof_vcpu_shift 6
++
++#ifdef CONFIG_SMP
++//#define preempt_disable(reg) incl threadinfo_preempt_count(reg)
++//#define preempt_enable(reg) decl threadinfo_preempt_count(reg)
++#define preempt_disable(reg)
++#define preempt_enable(reg)
++#define XEN_GET_VCPU_INFO(reg) preempt_disable(%rbp) ; \
++ movq %gs:pda_cpunumber,reg ; \
++ shl $32, reg ; \
++ shr $32-sizeof_vcpu_shift,reg ; \
++ addq HYPERVISOR_shared_info,reg
++#define XEN_PUT_VCPU_INFO(reg) preempt_enable(%rbp) ; \
++#define XEN_PUT_VCPU_INFO_fixup .byte 0xff,0xff,0xff
++#else
++#define XEN_GET_VCPU_INFO(reg) movq HYPERVISOR_shared_info,reg
++#define XEN_PUT_VCPU_INFO(reg)
++#define XEN_PUT_VCPU_INFO_fixup
++#endif
++
++#define XEN_LOCKED_BLOCK_EVENTS(reg) movb $1,evtchn_upcall_mask(reg)
++#define XEN_LOCKED_UNBLOCK_EVENTS(reg) movb $0,evtchn_upcall_mask(reg)
++#define XEN_BLOCK_EVENTS(reg) XEN_GET_VCPU_INFO(reg) ; \
++ XEN_LOCKED_BLOCK_EVENTS(reg) ; \
++ XEN_PUT_VCPU_INFO(reg)
++#define XEN_UNBLOCK_EVENTS(reg) XEN_GET_VCPU_INFO(reg) ; \
++ XEN_LOCKED_UNBLOCK_EVENTS(reg) ; \
++ XEN_PUT_VCPU_INFO(reg)
++#define XEN_TEST_PENDING(reg) testb $0xFF,evtchn_upcall_pending(reg)
++
++VGCF_IN_SYSCALL = (1<<8)
++
++
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/arch/x86_64/mm/fault-xen.c 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,724 @@
++/*
++ * linux/arch/x86-64/mm/fault.c
++ *
++ * Copyright (C) 1995 Linus Torvalds
++ * Copyright (C) 2001,2002 Andi Kleen, SuSE Labs.
++ */
++
++#include <linux/signal.h>
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/string.h>
++#include <linux/types.h>
++#include <linux/ptrace.h>
++#include <linux/mman.h>
++#include <linux/mm.h>
++#include <linux/smp.h>
++#include <linux/smp_lock.h>
++#include <linux/interrupt.h>
++#include <linux/init.h>
++#include <linux/tty.h>
++#include <linux/vt_kern.h> /* For unblank_screen() */
++#include <linux/compiler.h>
++#include <linux/module.h>
++#include <linux/kprobes.h>
++
++#include <asm/system.h>
++#include <asm/uaccess.h>
++#include <asm/pgalloc.h>
++#include <asm/smp.h>
++#include <asm/tlbflush.h>
++#include <asm/proto.h>
++#include <asm/kdebug.h>
++#include <asm-generic/sections.h>
++
++/* Page fault error code bits */
++#define PF_PROT (1<<0) /* or no page found */
++#define PF_WRITE (1<<1)
++#define PF_USER (1<<2)
++#define PF_RSVD (1<<3)
++#define PF_INSTR (1<<4)
++
++#ifdef CONFIG_KPROBES
++ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain);
++
++/* Hook to register for page fault notifications */
++int register_page_fault_notifier(struct notifier_block *nb)
++{
++ vmalloc_sync_all();
++ return atomic_notifier_chain_register(&notify_page_fault_chain, nb);
++}
++
++int unregister_page_fault_notifier(struct notifier_block *nb)
++{
++ return atomic_notifier_chain_unregister(&notify_page_fault_chain, nb);
++}
++
++static inline int notify_page_fault(enum die_val val, const char *str,
++ struct pt_regs *regs, long err, int trap, int sig)
++{
++ struct die_args args = {
++ .regs = regs,
++ .str = str,
++ .err = err,
++ .trapnr = trap,
++ .signr = sig
++ };
++ return atomic_notifier_call_chain(&notify_page_fault_chain, val, &args);
++}
++#else
++static inline int notify_page_fault(enum die_val val, const char *str,
++ struct pt_regs *regs, long err, int trap, int sig)
++{
++ return NOTIFY_DONE;
++}
++#endif
++
++void bust_spinlocks(int yes)
++{
++ int loglevel_save = console_loglevel;
++ if (yes) {
++ oops_in_progress = 1;
++ } else {
++#ifdef CONFIG_VT
++ unblank_screen();
++#endif
++ oops_in_progress = 0;
++ /*
++ * OK, the message is on the console. Now we call printk()
++ * without oops_in_progress set so that printk will give klogd
++ * a poke. Hold onto your hats...
++ */
++ console_loglevel = 15; /* NMI oopser may have shut the console up */
++ printk(" ");
++ console_loglevel = loglevel_save;
++ }
++}
++
++/* Sometimes the CPU reports invalid exceptions on prefetch.
++ Check that here and ignore.
++ Opcode checker based on code by Richard Brunner */
++static noinline int is_prefetch(struct pt_regs *regs, unsigned long addr,
++ unsigned long error_code)
++{
++ unsigned char *instr;
++ int scan_more = 1;
++ int prefetch = 0;
++ unsigned char *max_instr;
++
++ /* If it was a exec fault ignore */
++ if (error_code & PF_INSTR)
++ return 0;
++
++ instr = (unsigned char *)convert_rip_to_linear(current, regs);
++ max_instr = instr + 15;
++
++ if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE)
++ return 0;
++
++ while (scan_more && instr < max_instr) {
++ unsigned char opcode;
++ unsigned char instr_hi;
++ unsigned char instr_lo;
++
++ if (__get_user(opcode, instr))
++ break;
++
++ instr_hi = opcode & 0xf0;
++ instr_lo = opcode & 0x0f;
++ instr++;
++
++ switch (instr_hi) {
++ case 0x20:
++ case 0x30:
++ /* Values 0x26,0x2E,0x36,0x3E are valid x86
++ prefixes. In long mode, the CPU will signal
++ invalid opcode if some of these prefixes are
++ present so we will never get here anyway */
++ scan_more = ((instr_lo & 7) == 0x6);
++ break;
++
++ case 0x40:
++ /* In AMD64 long mode, 0x40 to 0x4F are valid REX prefixes
++ Need to figure out under what instruction mode the
++ instruction was issued ... */
++ /* Could check the LDT for lm, but for now it's good
++ enough to assume that long mode only uses well known
++ segments or kernel. */
++ scan_more = (!user_mode(regs)) || (regs->cs == __USER_CS);
++ break;
++
++ case 0x60:
++ /* 0x64 thru 0x67 are valid prefixes in all modes. */
++ scan_more = (instr_lo & 0xC) == 0x4;
++ break;
++ case 0xF0:
++ /* 0xF0, 0xF2, and 0xF3 are valid prefixes in all modes. */
++ scan_more = !instr_lo || (instr_lo>>1) == 1;
++ break;
++ case 0x00:
++ /* Prefetch instruction is 0x0F0D or 0x0F18 */
++ scan_more = 0;
++ if (__get_user(opcode, instr))
++ break;
++ prefetch = (instr_lo == 0xF) &&
++ (opcode == 0x0D || opcode == 0x18);
++ break;
++ default:
++ scan_more = 0;
++ break;
++ }
++ }
++ return prefetch;
++}
++
++static int bad_address(void *p)
++{
++ unsigned long dummy;
++ return __get_user(dummy, (unsigned long *)p);
++}
++
++void dump_pagetable(unsigned long address)
++{
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++ pte_t *pte;
++
++ pgd = __va(read_cr3() & PHYSICAL_PAGE_MASK);
++ pgd += pgd_index(address);
++ if (bad_address(pgd)) goto bad;
++ printk("PGD %lx ", pgd_val(*pgd));
++ if (!pgd_present(*pgd)) goto ret;
++
++ pud = pud_offset(pgd, address);
++ if (bad_address(pud)) goto bad;
++ printk("PUD %lx ", pud_val(*pud));
++ if (!pud_present(*pud)) goto ret;
++
++ pmd = pmd_offset(pud, address);
++ if (bad_address(pmd)) goto bad;
++ printk("PMD %lx ", pmd_val(*pmd));
++ if (!pmd_present(*pmd)) goto ret;
++
++ pte = pte_offset_kernel(pmd, address);
++ if (bad_address(pte)) goto bad;
++ printk("PTE %lx", pte_val(*pte));
++ret:
++ printk("\n");
++ return;
++bad:
++ printk("BAD\n");
++}
++
++static const char errata93_warning[] =
++KERN_ERR "******* Your BIOS seems to not contain a fix for K8 errata #93\n"
++KERN_ERR "******* Working around it, but it may cause SEGVs or burn power.\n"
++KERN_ERR "******* Please consider a BIOS update.\n"
++KERN_ERR "******* Disabling USB legacy in the BIOS may also help.\n";
++
++/* Workaround for K8 erratum #93 & buggy BIOS.
++ BIOS SMM functions are required to use a specific workaround
++ to avoid corruption of the 64bit RIP register on C stepping K8.
++ A lot of BIOS that didn't get tested properly miss this.
++ The OS sees this as a page fault with the upper 32bits of RIP cleared.
++ Try to work around it here.
++ Note we only handle faults in kernel here. */
++
++static int is_errata93(struct pt_regs *regs, unsigned long address)
++{
++ static int warned;
++ if (address != regs->rip)
++ return 0;
++ if ((address >> 32) != 0)
++ return 0;
++ address |= 0xffffffffUL << 32;
++ if ((address >= (u64)_stext && address <= (u64)_etext) ||
++ (address >= MODULES_VADDR && address <= MODULES_END)) {
++ if (!warned) {
++ printk(errata93_warning);
++ warned = 1;
++ }
++ regs->rip = address;
++ return 1;
++ }
++ return 0;
++}
++
++int unhandled_signal(struct task_struct *tsk, int sig)
++{
++ if (tsk->pid == 1)
++ return 1;
++ if (tsk->ptrace & PT_PTRACED)
++ return 0;
++ return (tsk->sighand->action[sig-1].sa.sa_handler == SIG_IGN) ||
++ (tsk->sighand->action[sig-1].sa.sa_handler == SIG_DFL);
++}
++
++static noinline void pgtable_bad(unsigned long address, struct pt_regs *regs,
++ unsigned long error_code)
++{
++ unsigned long flags = oops_begin();
++ struct task_struct *tsk;
++
++ printk(KERN_ALERT "%s: Corrupted page table at address %lx\n",
++ current->comm, address);
++ dump_pagetable(address);
++ tsk = current;
++ tsk->thread.cr2 = address;
++ tsk->thread.trap_no = 14;
++ tsk->thread.error_code = error_code;
++ __die("Bad pagetable", regs, error_code);
++ oops_end(flags);
++ do_exit(SIGKILL);
++}
++
++/*
++ * Handle a fault on the vmalloc area
++ *
++ * This assumes no large pages in there.
++ */
++static int vmalloc_fault(unsigned long address)
++{
++ pgd_t *pgd, *pgd_ref;
++ pud_t *pud, *pud_ref;
++ pmd_t *pmd, *pmd_ref;
++ pte_t *pte, *pte_ref;
++
++ /* Copy kernel mappings over when needed. This can also
++ happen within a race in page table update. In the later
++ case just flush. */
++
++ /* On Xen the line below does not always work. Needs investigating! */
++ /*pgd = pgd_offset(current->mm ?: &init_mm, address);*/
++ pgd = __va(read_cr3() & PHYSICAL_PAGE_MASK);
++ pgd += pgd_index(address);
++ pgd_ref = pgd_offset_k(address);
++ if (pgd_none(*pgd_ref))
++ return -1;
++ if (pgd_none(*pgd))
++ set_pgd(pgd, *pgd_ref);
++ else
++ BUG_ON(pgd_page(*pgd) != pgd_page(*pgd_ref));
++
++ /* Below here mismatches are bugs because these lower tables
++ are shared */
++
++ pud = pud_offset(pgd, address);
++ pud_ref = pud_offset(pgd_ref, address);
++ if (pud_none(*pud_ref))
++ return -1;
++ if (pud_none(*pud) || pud_page(*pud) != pud_page(*pud_ref))
++ BUG();
++ pmd = pmd_offset(pud, address);
++ pmd_ref = pmd_offset(pud_ref, address);
++ if (pmd_none(*pmd_ref))
++ return -1;
++ if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref))
++ BUG();
++ pte_ref = pte_offset_kernel(pmd_ref, address);
++ if (!pte_present(*pte_ref))
++ return -1;
++ pte = pte_offset_kernel(pmd, address);
++ /* Don't use pte_page here, because the mappings can point
++ outside mem_map, and the NUMA hash lookup cannot handle
++ that. */
++ if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref))
++ BUG();
++ return 0;
++}
++
++int page_fault_trace = 0;
++int exception_trace = 1;
++
++
++#define MEM_VERBOSE 1
++
++#ifdef MEM_VERBOSE
++#define MEM_LOG(_f, _a...) \
++ printk("fault.c:[%d]-> " _f "\n", \
++ __LINE__ , ## _a )
++#else
++#define MEM_LOG(_f, _a...) ((void)0)
++#endif
++
++static int spurious_fault(struct pt_regs *regs,
++ unsigned long address,
++ unsigned long error_code)
++{
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++ pte_t *pte;
++
++#ifdef CONFIG_XEN
++ /* Faults in hypervisor area are never spurious. */
++ if ((address >= HYPERVISOR_VIRT_START) &&
++ (address < HYPERVISOR_VIRT_END))
++ return 0;
++#endif
++
++ /* Reserved-bit violation or user access to kernel space? */
++ if (error_code & (PF_RSVD|PF_USER))
++ return 0;
++
++ pgd = init_mm.pgd + pgd_index(address);
++ if (!pgd_present(*pgd))
++ return 0;
++
++ pud = pud_offset(pgd, address);
++ if (!pud_present(*pud))
++ return 0;
++
++ pmd = pmd_offset(pud, address);
++ if (!pmd_present(*pmd))
++ return 0;
++
++ pte = pte_offset_kernel(pmd, address);
++ if (!pte_present(*pte))
++ return 0;
++ if ((error_code & PF_WRITE) && !pte_write(*pte))
++ return 0;
++ if ((error_code & PF_INSTR) && (pte_val(*pte) & _PAGE_NX))
++ return 0;
++
++ return 1;
++}
++
++/*
++ * This routine handles page faults. It determines the address,
++ * and the problem, and then passes it off to one of the appropriate
++ * routines.
++ */
++asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
++ unsigned long error_code)
++{
++ struct task_struct *tsk;
++ struct mm_struct *mm;
++ struct vm_area_struct * vma;
++ unsigned long address;
++ const struct exception_table_entry *fixup;
++ int write;
++ unsigned long flags;
++ siginfo_t info;
++
++ if (!user_mode(regs))
++ error_code &= ~PF_USER; /* means kernel */
++
++ tsk = current;
++ mm = tsk->mm;
++ prefetchw(&mm->mmap_sem);
++
++ /* get the address */
++ address = current_vcpu_info()->arch.cr2;
++
++ info.si_code = SEGV_MAPERR;
++
++
++ /*
++ * We fault-in kernel-space virtual memory on-demand. The
++ * 'reference' page table is init_mm.pgd.
++ *
++ * NOTE! We MUST NOT take any locks for this case. We may
++ * be in an interrupt or a critical region, and should
++ * only copy the information from the master page table,
++ * nothing more.
++ *
++ * This verifies that the fault happens in kernel space
++ * (error_code & 4) == 0, and that the fault was not a
++ * protection error (error_code & 9) == 0.
++ */
++ if (unlikely(address >= TASK_SIZE64)) {
++ /*
++ * Don't check for the module range here: its PML4
++ * is always initialized because it's shared with the main
++ * kernel text. Only vmalloc may need PML4 syncups.
++ */
++ if (!(error_code & (PF_RSVD|PF_USER|PF_PROT)) &&
++ ((address >= VMALLOC_START && address < VMALLOC_END))) {
++ if (vmalloc_fault(address) >= 0)
++ return;
++ }
++ /* Can take a spurious fault if mapping changes R/O -> R/W. */
++ if (spurious_fault(regs, address, error_code))
++ return;
++ if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
++ SIGSEGV) == NOTIFY_STOP)
++ return;
++ /*
++ * Don't take the mm semaphore here. If we fixup a prefetch
++ * fault we could otherwise deadlock.
++ */
++ goto bad_area_nosemaphore;
++ }
++
++ if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
++ SIGSEGV) == NOTIFY_STOP)
++ return;
++
++ if (likely(regs->eflags & X86_EFLAGS_IF))
++ local_irq_enable();
++
++ if (unlikely(page_fault_trace))
++ printk("pagefault rip:%lx rsp:%lx cs:%lu ss:%lu address %lx error %lx\n",
++ regs->rip,regs->rsp,regs->cs,regs->ss,address,error_code);
++
++ if (unlikely(error_code & PF_RSVD))
++ pgtable_bad(address, regs, error_code);
++
++ /*
++ * If we're in an interrupt or have no user
++ * context, we must not take the fault..
++ */
++ if (unlikely(in_atomic() || !mm))
++ goto bad_area_nosemaphore;
++
++ again:
++ /* When running in the kernel we expect faults to occur only to
++ * addresses in user space. All other faults represent errors in the
++ * kernel and should generate an OOPS. Unfortunatly, in the case of an
++ * erroneous fault occurring in a code path which already holds mmap_sem
++ * we will deadlock attempting to validate the fault against the
++ * address space. Luckily the kernel only validly references user
++ * space from well defined areas of code, which are listed in the
++ * exceptions table.
++ *
++ * As the vast majority of faults will be valid we will only perform
++ * the source reference check when there is a possibilty of a deadlock.
++ * Attempt to lock the address space, if we cannot we then validate the
++ * source. If this is invalid we can skip the address space check,
++ * thus avoiding the deadlock.
++ */
++ if (!down_read_trylock(&mm->mmap_sem)) {
++ if ((error_code & PF_USER) == 0 &&
++ !search_exception_tables(regs->rip))
++ goto bad_area_nosemaphore;
++ down_read(&mm->mmap_sem);
++ }
++
++ vma = find_vma(mm, address);
++ if (!vma)
++ goto bad_area;
++ if (likely(vma->vm_start <= address))
++ goto good_area;
++ if (!(vma->vm_flags & VM_GROWSDOWN))
++ goto bad_area;
++ if (error_code & 4) {
++ /* Allow userspace just enough access below the stack pointer
++ * to let the 'enter' instruction work.
++ */
++ if (address + 65536 + 32 * sizeof(unsigned long) < regs->rsp)
++ goto bad_area;
++ }
++ if (expand_stack(vma, address))
++ goto bad_area;
++/*
++ * Ok, we have a good vm_area for this memory access, so
++ * we can handle it..
++ */
++good_area:
++ info.si_code = SEGV_ACCERR;
++ write = 0;
++ switch (error_code & (PF_PROT|PF_WRITE)) {
++ default: /* 3: write, present */
++ /* fall through */
++ case PF_WRITE: /* write, not present */
++ if (!(vma->vm_flags & VM_WRITE))
++ goto bad_area;
++ write++;
++ break;
++ case PF_PROT: /* read, present */
++ goto bad_area;
++ case 0: /* read, not present */
++ if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
++ goto bad_area;
++ }
++
++ /*
++ * If for any reason at all we couldn't handle the fault,
++ * make sure we exit gracefully rather than endlessly redo
++ * the fault.
++ */
++ switch (handle_mm_fault(mm, vma, address, write)) {
++ case VM_FAULT_MINOR:
++ tsk->min_flt++;
++ break;
++ case VM_FAULT_MAJOR:
++ tsk->maj_flt++;
++ break;
++ case VM_FAULT_SIGBUS:
++ goto do_sigbus;
++ default:
++ goto out_of_memory;
++ }
++
++ up_read(&mm->mmap_sem);
++ return;
++
++/*
++ * Something tried to access memory that isn't in our memory map..
++ * Fix it, but check if it's kernel or user first..
++ */
++bad_area:
++ up_read(&mm->mmap_sem);
++
++bad_area_nosemaphore:
++ /* User mode accesses just cause a SIGSEGV */
++ if (error_code & PF_USER) {
++ if (is_prefetch(regs, address, error_code))
++ return;
++
++ /* Work around K8 erratum #100 K8 in compat mode
++ occasionally jumps to illegal addresses >4GB. We
++ catch this here in the page fault handler because
++ these addresses are not reachable. Just detect this
++ case and return. Any code segment in LDT is
++ compatibility mode. */
++ if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) &&
++ (address >> 32))
++ return;
++
++ if (exception_trace && unhandled_signal(tsk, SIGSEGV)) {
++ printk(
++ "%s%s[%d]: segfault at %016lx rip %016lx rsp %016lx error %lx\n",
++ tsk->pid > 1 ? KERN_INFO : KERN_EMERG,
++ tsk->comm, tsk->pid, address, regs->rip,
++ regs->rsp, error_code);
++ }
++
++ tsk->thread.cr2 = address;
++ /* Kernel addresses are always protection faults */
++ tsk->thread.error_code = error_code | (address >= TASK_SIZE);
++ tsk->thread.trap_no = 14;
++ info.si_signo = SIGSEGV;
++ info.si_errno = 0;
++ /* info.si_code has been set above */
++ info.si_addr = (void __user *)address;
++ force_sig_info(SIGSEGV, &info, tsk);
++ return;
++ }
++
++no_context:
++
++ /* Are we prepared to handle this kernel fault? */
++ fixup = search_exception_tables(regs->rip);
++ if (fixup) {
++ regs->rip = fixup->fixup;
++ return;
++ }
++
++ /*
++ * Hall of shame of CPU/BIOS bugs.
++ */
++
++ if (is_prefetch(regs, address, error_code))
++ return;
++
++ if (is_errata93(regs, address))
++ return;
++
++/*
++ * Oops. The kernel tried to access some bad page. We'll have to
++ * terminate things with extreme prejudice.
++ */
++
++ flags = oops_begin();
++
++ if (address < PAGE_SIZE)
++ printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
++ else
++ printk(KERN_ALERT "Unable to handle kernel paging request");
++ printk(" at %016lx RIP: \n" KERN_ALERT,address);
++ printk_address(regs->rip);
++ dump_pagetable(address);
++ tsk->thread.cr2 = address;
++ tsk->thread.trap_no = 14;
++ tsk->thread.error_code = error_code;
++ __die("Oops", regs, error_code);
++ /* Executive summary in case the body of the oops scrolled away */
++ printk(KERN_EMERG "CR2: %016lx\n", address);
++ oops_end(flags);
++ do_exit(SIGKILL);
++
++/*
++ * We ran out of memory, or some other thing happened to us that made
++ * us unable to handle the page fault gracefully.
++ */
++out_of_memory:
++ up_read(&mm->mmap_sem);
++ if (current->pid == 1) {
++ yield();
++ goto again;
++ }
++ printk("VM: killing process %s\n", tsk->comm);
++ if (error_code & 4)
++ do_exit(SIGKILL);
++ goto no_context;
++
++do_sigbus:
++ up_read(&mm->mmap_sem);
++
++ /* Kernel mode? Handle exceptions or die */
++ if (!(error_code & PF_USER))
++ goto no_context;
++
++ tsk->thread.cr2 = address;
++ tsk->thread.error_code = error_code;
++ tsk->thread.trap_no = 14;
++ info.si_signo = SIGBUS;
++ info.si_errno = 0;
++ info.si_code = BUS_ADRERR;
++ info.si_addr = (void __user *)address;
++ force_sig_info(SIGBUS, &info, tsk);
++ return;
++}
++
++DEFINE_SPINLOCK(pgd_lock);
++struct page *pgd_list;
++
++void vmalloc_sync_all(void)
++{
++ /* Note that races in the updates of insync and start aren't
++ problematic:
++ insync can only get set bits added, and updates to start are only
++ improving performance (without affecting correctness if undone). */
++ static DECLARE_BITMAP(insync, PTRS_PER_PGD);
++ static unsigned long start = VMALLOC_START & PGDIR_MASK;
++ unsigned long address;
++
++ for (address = start; address <= VMALLOC_END; address += PGDIR_SIZE) {
++ if (!test_bit(pgd_index(address), insync)) {
++ const pgd_t *pgd_ref = pgd_offset_k(address);
++ struct page *page;
++
++ if (pgd_none(*pgd_ref))
++ continue;
++ spin_lock(&pgd_lock);
++ for (page = pgd_list; page;
++ page = (struct page *)page->index) {
++ pgd_t *pgd;
++ pgd = (pgd_t *)page_address(page) + pgd_index(address);
++ if (pgd_none(*pgd))
++ set_pgd(pgd, *pgd_ref);
++ else
++ BUG_ON(pgd_page(*pgd) != pgd_page(*pgd_ref));
++ }
++ spin_unlock(&pgd_lock);
++ set_bit(pgd_index(address), insync);
++ }
++ if (address == start)
++ start = address + PGDIR_SIZE;
++ }
++ /* Check that there is no need to do the same for the modules area. */
++ BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL));
++ BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) ==
++ (__START_KERNEL & PGDIR_MASK)));
++}
++
++static int __init enable_pagefaulttrace(char *str)
++{
++ page_fault_trace = 1;
++ return 1;
++}
++__setup("pagefaulttrace", enable_pagefaulttrace);
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/arch/x86_64/mm/init-xen.c 2007-08-27 14:02:10.000000000 -0400
+@@ -0,0 +1,1241 @@
++/*
++ * linux/arch/x86_64/mm/init.c
++ *
++ * Copyright (C) 1995 Linus Torvalds
++ * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
++ * Copyright (C) 2002,2003 Andi Kleen <ak@suse.de>
++ *
++ * Jun Nakajima <jun.nakajima@intel.com>
++ * Modified for Xen.
++ */
++
++#include <linux/signal.h>
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/string.h>
++#include <linux/types.h>
++#include <linux/ptrace.h>
++#include <linux/mman.h>
++#include <linux/mm.h>
++#include <linux/swap.h>
++#include <linux/smp.h>
++#include <linux/init.h>
++#include <linux/pagemap.h>
++#include <linux/bootmem.h>
++#include <linux/proc_fs.h>
++#include <linux/pci.h>
++#include <linux/poison.h>
++#include <linux/dma-mapping.h>
++#include <linux/module.h>
++#include <linux/memory_hotplug.h>
++
++#include <asm/processor.h>
++#include <asm/system.h>
++#include <asm/uaccess.h>
++#include <asm/pgtable.h>
++#include <asm/pgalloc.h>
++#include <asm/dma.h>
++#include <asm/fixmap.h>
++#include <asm/e820.h>
++#include <asm/apic.h>
++#include <asm/tlb.h>
++#include <asm/mmu_context.h>
++#include <asm/proto.h>
++#include <asm/smp.h>
++#include <asm/sections.h>
++
++#include <xen/features.h>
++
++#ifndef Dprintk
++#define Dprintk(x...)
++#endif
++
++struct dma_mapping_ops* dma_ops;
++EXPORT_SYMBOL(dma_ops);
++
++#if CONFIG_XEN_COMPAT <= 0x030002
++unsigned int __kernel_page_user;
++EXPORT_SYMBOL(__kernel_page_user);
++#endif
++
++extern unsigned long *contiguous_bitmap;
++
++static unsigned long dma_reserve __initdata;
++
++DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
++extern unsigned long start_pfn;
++
++/*
++ * Use this until direct mapping is established, i.e. before __va() is
++ * available in init_memory_mapping().
++ */
++
++#define addr_to_page(addr, page) \
++ (addr) &= PHYSICAL_PAGE_MASK; \
++ (page) = ((unsigned long *) ((unsigned long) \
++ (((mfn_to_pfn((addr) >> PAGE_SHIFT)) << PAGE_SHIFT) + \
++ __START_KERNEL_map)))
++
++static void __meminit early_make_page_readonly(void *va, unsigned int feature)
++{
++ unsigned long addr, _va = (unsigned long)va;
++ pte_t pte, *ptep;
++ unsigned long *page = (unsigned long *) init_level4_pgt;
++
++ if (xen_feature(feature))
++ return;
++
++ addr = (unsigned long) page[pgd_index(_va)];
++ addr_to_page(addr, page);
++
++ addr = page[pud_index(_va)];
++ addr_to_page(addr, page);
++
++ addr = page[pmd_index(_va)];
++ addr_to_page(addr, page);
++
++ ptep = (pte_t *) &page[pte_index(_va)];
++
++ pte.pte = ptep->pte & ~_PAGE_RW;
++ if (HYPERVISOR_update_va_mapping(_va, pte, 0))
++ BUG();
++}
++
++static void __make_page_readonly(void *va)
++{
++ pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t pte, *ptep;
++ unsigned long addr = (unsigned long) va;
++
++ pgd = pgd_offset_k(addr);
++ pud = pud_offset(pgd, addr);
++ pmd = pmd_offset(pud, addr);
++ ptep = pte_offset_kernel(pmd, addr);
++
++ pte.pte = ptep->pte & ~_PAGE_RW;
++ if (HYPERVISOR_update_va_mapping(addr, pte, 0))
++ xen_l1_entry_update(ptep, pte); /* fallback */
++
++ if ((addr >= VMALLOC_START) && (addr < VMALLOC_END))
++ __make_page_readonly(__va(pte_pfn(pte) << PAGE_SHIFT));
++}
++
++static void __make_page_writable(void *va)
++{
++ pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t pte, *ptep;
++ unsigned long addr = (unsigned long) va;
++
++ pgd = pgd_offset_k(addr);
++ pud = pud_offset(pgd, addr);
++ pmd = pmd_offset(pud, addr);
++ ptep = pte_offset_kernel(pmd, addr);
++
++ pte.pte = ptep->pte | _PAGE_RW;
++ if (HYPERVISOR_update_va_mapping(addr, pte, 0))
++ xen_l1_entry_update(ptep, pte); /* fallback */
++
++ if ((addr >= VMALLOC_START) && (addr < VMALLOC_END))
++ __make_page_writable(__va(pte_pfn(pte) << PAGE_SHIFT));
++}
++
++void make_page_readonly(void *va, unsigned int feature)
++{
++ if (!xen_feature(feature))
++ __make_page_readonly(va);
++}
++
++void make_page_writable(void *va, unsigned int feature)
++{
++ if (!xen_feature(feature))
++ __make_page_writable(va);
++}
++
++void make_pages_readonly(void *va, unsigned nr, unsigned int feature)
++{
++ if (xen_feature(feature))
++ return;
++
++ while (nr-- != 0) {
++ __make_page_readonly(va);
++ va = (void*)((unsigned long)va + PAGE_SIZE);
++ }
++}
++
++void make_pages_writable(void *va, unsigned nr, unsigned int feature)
++{
++ if (xen_feature(feature))
++ return;
++
++ while (nr-- != 0) {
++ __make_page_writable(va);
++ va = (void*)((unsigned long)va + PAGE_SIZE);
++ }
++}
++
++/*
++ * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
++ * physical space so we can cache the place of the first one and move
++ * around without checking the pgd every time.
++ */
++
++void show_mem(void)
++{
++ long i, total = 0, reserved = 0;
++ long shared = 0, cached = 0;
++ pg_data_t *pgdat;
++ struct page *page;
++
++ printk(KERN_INFO "Mem-info:\n");
++ show_free_areas();
++ printk(KERN_INFO "Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
++
++ for_each_online_pgdat(pgdat) {
++ for (i = 0; i < pgdat->node_spanned_pages; ++i) {
++ page = pfn_to_page(pgdat->node_start_pfn + i);
++ total++;
++ if (PageReserved(page))
++ reserved++;
++ else if (PageSwapCache(page))
++ cached++;
++ else if (page_count(page))
++ shared += page_count(page) - 1;
++ }
++ }
++ printk(KERN_INFO "%lu pages of RAM\n", total);
++ printk(KERN_INFO "%lu reserved pages\n",reserved);
++ printk(KERN_INFO "%lu pages shared\n",shared);
++ printk(KERN_INFO "%lu pages swap cached\n",cached);
++}
++
++int after_bootmem;
++
++static __init void *spp_getpage(void)
++{
++ void *ptr;
++ if (after_bootmem)
++ ptr = (void *) get_zeroed_page(GFP_ATOMIC);
++ else if (start_pfn < table_end) {
++ ptr = __va(start_pfn << PAGE_SHIFT);
++ start_pfn++;
++ memset(ptr, 0, PAGE_SIZE);
++ } else
++ ptr = alloc_bootmem_pages(PAGE_SIZE);
++ if (!ptr || ((unsigned long)ptr & ~PAGE_MASK))
++ panic("set_pte_phys: cannot allocate page data %s\n", after_bootmem?"after bootmem":"");
++
++ Dprintk("spp_getpage %p\n", ptr);
++ return ptr;
++}
++
++#define pgd_offset_u(address) (pgd_t *)(init_level4_user_pgt + pgd_index(address))
++
++static inline pud_t *pud_offset_u(unsigned long address)
++{
++ pud_t *pud = level3_user_pgt;
++
++ return pud + pud_index(address);
++}
++
++static __init void set_pte_phys(unsigned long vaddr,
++ unsigned long phys, pgprot_t prot, int user_mode)
++{
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++ pte_t *pte, new_pte;
++
++ Dprintk("set_pte_phys %lx to %lx\n", vaddr, phys);
++
++ pgd = (user_mode ? pgd_offset_u(vaddr) : pgd_offset_k(vaddr));
++ if (pgd_none(*pgd)) {
++ printk("PGD FIXMAP MISSING, it should be setup in head.S!\n");
++ return;
++ }
++ pud = (user_mode ? pud_offset_u(vaddr) : pud_offset(pgd, vaddr));
++ if (pud_none(*pud)) {
++ pmd = (pmd_t *) spp_getpage();
++ make_page_readonly(pmd, XENFEAT_writable_page_tables);
++ set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
++ if (pmd != pmd_offset(pud, 0)) {
++ printk("PAGETABLE BUG #01! %p <-> %p\n", pmd, pmd_offset(pud,0));
++ return;
++ }
++ }
++ pmd = pmd_offset(pud, vaddr);
++ if (pmd_none(*pmd)) {
++ pte = (pte_t *) spp_getpage();
++ make_page_readonly(pte, XENFEAT_writable_page_tables);
++ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
++ if (pte != pte_offset_kernel(pmd, 0)) {
++ printk("PAGETABLE BUG #02!\n");
++ return;
++ }
++ }
++ if (pgprot_val(prot))
++ new_pte = pfn_pte(phys >> PAGE_SHIFT, prot);
++ else
++ new_pte = __pte(0);
++
++ pte = pte_offset_kernel(pmd, vaddr);
++ if (!pte_none(*pte) &&
++ pte_val(*pte) != (pte_val(new_pte) & __supported_pte_mask))
++ pte_ERROR(*pte);
++ set_pte(pte, new_pte);
++
++ /*
++ * It's enough to flush this one mapping.
++ * (PGE mappings get flushed as well)
++ */
++ __flush_tlb_one(vaddr);
++}
++
++static __init void set_pte_phys_ma(unsigned long vaddr,
++ unsigned long phys, pgprot_t prot)
++{
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++ pte_t *pte, new_pte;
++
++ Dprintk("set_pte_phys %lx to %lx\n", vaddr, phys);
++
++ pgd = pgd_offset_k(vaddr);
++ if (pgd_none(*pgd)) {
++ printk("PGD FIXMAP MISSING, it should be setup in head.S!\n");
++ return;
++ }
++ pud = pud_offset(pgd, vaddr);
++ if (pud_none(*pud)) {
++
++ pmd = (pmd_t *) spp_getpage();
++ make_page_readonly(pmd, XENFEAT_writable_page_tables);
++ set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
++ if (pmd != pmd_offset(pud, 0)) {
++ printk("PAGETABLE BUG #01! %p <-> %p\n", pmd, pmd_offset(pud,0));
++ return;
++ }
++ }
++ pmd = pmd_offset(pud, vaddr);
++ if (pmd_none(*pmd)) {
++ pte = (pte_t *) spp_getpage();
++ make_page_readonly(pte, XENFEAT_writable_page_tables);
++ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
++ if (pte != pte_offset_kernel(pmd, 0)) {
++ printk("PAGETABLE BUG #02!\n");
++ return;
++ }
++ }
++ new_pte = pfn_pte_ma(phys >> PAGE_SHIFT, prot);
++
++ pte = pte_offset_kernel(pmd, vaddr);
++ set_pte(pte, new_pte);
++
++ /*
++ * It's enough to flush this one mapping.
++ * (PGE mappings get flushed as well)
++ */
++ __flush_tlb_one(vaddr);
++}
++
++#define SET_FIXMAP_KERNEL 0
++#define SET_FIXMAP_USER 1
++
++/* NOTE: this is meant to be run only at boot */
++void __init
++__set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
++{
++ unsigned long address = __fix_to_virt(idx);
++
++ if (idx >= __end_of_fixed_addresses) {
++ printk("Invalid __set_fixmap\n");
++ return;
++ }
++ switch (idx) {
++ case VSYSCALL_LAST_PAGE ... VSYSCALL_FIRST_PAGE:
++ set_pte_phys(address, phys, prot, SET_FIXMAP_KERNEL);
++ break;
++ default:
++ set_pte_phys_ma(address, phys, prot);
++ break;
++ }
++}
++
++/*
++ * This only supports vsyscall area.
++ */
++void __init
++__set_fixmap_user (enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
++{
++ unsigned long address = __fix_to_virt(idx);
++
++ if (idx >= __end_of_fixed_addresses) {
++ printk("Invalid __set_fixmap\n");
++ return;
++ }
++
++ set_pte_phys(address, phys, prot, SET_FIXMAP_USER);
++}
++
++unsigned long __initdata table_start, table_end;
++
++static __meminit void *alloc_static_page(unsigned long *phys)
++{
++ unsigned long va = (start_pfn << PAGE_SHIFT) + __START_KERNEL_map;
++
++ if (after_bootmem) {
++ void *adr = (void *)get_zeroed_page(GFP_ATOMIC);
++
++ *phys = __pa(adr);
++ return adr;
++ }
++
++ *phys = start_pfn << PAGE_SHIFT;
++ start_pfn++;
++ memset((void *)va, 0, PAGE_SIZE);
++ return (void *)va;
++}
++
++#define PTE_SIZE PAGE_SIZE
++
++static inline void __set_pte(pte_t *dst, pte_t val)
++{
++ *dst = val;
++}
++
++static inline int make_readonly(unsigned long paddr)
++{
++ extern char __vsyscall_0;
++ int readonly = 0;
++
++ /* Make new page tables read-only. */
++ if (!xen_feature(XENFEAT_writable_page_tables)
++ && (paddr >= (table_start << PAGE_SHIFT))
++ && (paddr < (table_end << PAGE_SHIFT)))
++ readonly = 1;
++ /* Make old page tables read-only. */
++ if (!xen_feature(XENFEAT_writable_page_tables)
++ && (paddr >= (xen_start_info->pt_base - __START_KERNEL_map))
++ && (paddr < (start_pfn << PAGE_SHIFT)))
++ readonly = 1;
++
++ /*
++ * No need for writable mapping of kernel image. This also ensures that
++ * page and descriptor tables embedded inside don't have writable
++ * mappings. Exclude the vsyscall area here, allowing alternative
++ * instruction patching to work.
++ */
++ if ((paddr >= __pa_symbol(&_text)) && (paddr < __pa_symbol(&_end))
++ && !(paddr >= __pa_symbol(&__vsyscall_0)
++ && paddr < __pa_symbol(&__vsyscall_0) + PAGE_SIZE))
++ readonly = 1;
++
++ return readonly;
++}
++
++#ifndef CONFIG_XEN
++/* Must run before zap_low_mappings */
++__init void *early_ioremap(unsigned long addr, unsigned long size)
++{
++ unsigned long map = round_down(addr, LARGE_PAGE_SIZE);
++
++ /* actually usually some more */
++ if (size >= LARGE_PAGE_SIZE) {
++ printk("SMBIOS area too long %lu\n", size);
++ return NULL;
++ }
++ set_pmd(temp_mappings[0].pmd, __pmd(map | _KERNPG_TABLE | _PAGE_PSE));
++ map += LARGE_PAGE_SIZE;
++ set_pmd(temp_mappings[1].pmd, __pmd(map | _KERNPG_TABLE | _PAGE_PSE));
++ __flush_tlb();
++ return temp_mappings[0].address + (addr & (LARGE_PAGE_SIZE-1));
++}
++
++/* To avoid virtual aliases later */
++__init void early_iounmap(void *addr, unsigned long size)
++{
++ if ((void *)round_down((unsigned long)addr, LARGE_PAGE_SIZE) != temp_mappings[0].address)
++ printk("early_iounmap: bad address %p\n", addr);
++ set_pmd(temp_mappings[0].pmd, __pmd(0));
++ set_pmd(temp_mappings[1].pmd, __pmd(0));
++ __flush_tlb();
++}
++#endif
++
++static void __meminit
++phys_pmd_init(pmd_t *pmd, unsigned long address, unsigned long end)
++{
++ int i, k;
++
++ for (i = 0; i < PTRS_PER_PMD; pmd++, i++) {
++ unsigned long pte_phys;
++ pte_t *pte, *pte_save;
++
++ if (address >= end) {
++ if (!after_bootmem)
++ for (; i < PTRS_PER_PMD; i++, pmd++)
++ set_pmd(pmd, __pmd(0));
++ break;
++ }
++ pte = alloc_static_page(&pte_phys);
++ pte_save = pte;
++ for (k = 0; k < PTRS_PER_PTE; pte++, k++, address += PTE_SIZE) {
++ if ((address >= end) ||
++ ((address >> PAGE_SHIFT) >=
++ xen_start_info->nr_pages)) {
++ __set_pte(pte, __pte(0));
++ continue;
++ }
++ if (make_readonly(address)) {
++ __set_pte(pte,
++ __pte(address | (_KERNPG_TABLE & ~_PAGE_RW)));
++ continue;
++ }
++ __set_pte(pte, __pte(address | _KERNPG_TABLE));
++ }
++ pte = pte_save;
++ early_make_page_readonly(pte, XENFEAT_writable_page_tables);
++ set_pmd(pmd, __pmd(pte_phys | _KERNPG_TABLE));
++ }
++}
++
++static void __meminit
++phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end)
++{
++ pmd_t *pmd = pmd_offset(pud, (unsigned long)__va(address));
++
++ if (pmd_none(*pmd)) {
++ spin_lock(&init_mm.page_table_lock);
++ phys_pmd_init(pmd, address, end);
++ spin_unlock(&init_mm.page_table_lock);
++ __flush_tlb_all();
++ }
++}
++
++static void __meminit phys_pud_init(pud_t *pud, unsigned long address, unsigned long end)
++{
++ long i = pud_index(address);
++
++ pud = pud + i;
++
++ if (after_bootmem && pud_val(*pud)) {
++ phys_pmd_update(pud, address, end);
++ return;
++ }
++
++ for (; i < PTRS_PER_PUD; pud++, i++) {
++ unsigned long paddr, pmd_phys;
++ pmd_t *pmd;
++
++ paddr = (address & PGDIR_MASK) + i*PUD_SIZE;
++ if (paddr >= end)
++ break;
++
++ pmd = alloc_static_page(&pmd_phys);
++ early_make_page_readonly(pmd, XENFEAT_writable_page_tables);
++ spin_lock(&init_mm.page_table_lock);
++ set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE));
++ phys_pmd_init(pmd, paddr, end);
++ spin_unlock(&init_mm.page_table_lock);
++ }
++ __flush_tlb();
++}
++
++void __init xen_init_pt(void)
++{
++ unsigned long addr, *page;
++
++ /* Find the initial pte page that was built for us. */
++ page = (unsigned long *)xen_start_info->pt_base;
++ addr = page[pgd_index(__START_KERNEL_map)];
++ addr_to_page(addr, page);
++ addr = page[pud_index(__START_KERNEL_map)];
++ addr_to_page(addr, page);
++
++#if CONFIG_XEN_COMPAT <= 0x030002
++ /* On Xen 3.0.2 and older we may need to explicitly specify _PAGE_USER
++ in kernel PTEs. We check that here. */
++ if (HYPERVISOR_xen_version(XENVER_version, NULL) <= 0x30000) {
++ unsigned long *pg;
++ pte_t pte;
++
++ /* Mess with the initial mapping of page 0. It's not needed. */
++ BUILD_BUG_ON(__START_KERNEL <= __START_KERNEL_map);
++ addr = page[pmd_index(__START_KERNEL_map)];
++ addr_to_page(addr, pg);
++ pte.pte = pg[pte_index(__START_KERNEL_map)];
++ BUG_ON(!(pte.pte & _PAGE_PRESENT));
++
++ /* If _PAGE_USER isn't set, we obviously do not need it. */
++ if (pte.pte & _PAGE_USER) {
++ /* _PAGE_USER is needed, but is it set implicitly? */
++ pte.pte &= ~_PAGE_USER;
++ if ((HYPERVISOR_update_va_mapping(__START_KERNEL_map,
++ pte, 0) != 0) ||
++ !(pg[pte_index(__START_KERNEL_map)] & _PAGE_USER))
++ /* We need to explicitly specify _PAGE_USER. */
++ __kernel_page_user = _PAGE_USER;
++ }
++ }
++#endif
++
++ /* Construct mapping of initial pte page in our own directories. */
++ init_level4_pgt[pgd_index(__START_KERNEL_map)] =
++ mk_kernel_pgd(__pa_symbol(level3_kernel_pgt));
++ level3_kernel_pgt[pud_index(__START_KERNEL_map)] =
++ __pud(__pa_symbol(level2_kernel_pgt) |
++ _KERNPG_TABLE);
++ memcpy((void *)level2_kernel_pgt, page, PAGE_SIZE);
++
++ early_make_page_readonly(init_level4_pgt,
++ XENFEAT_writable_page_tables);
++ early_make_page_readonly(init_level4_user_pgt,
++ XENFEAT_writable_page_tables);
++ early_make_page_readonly(level3_kernel_pgt,
++ XENFEAT_writable_page_tables);
++ early_make_page_readonly(level3_user_pgt,
++ XENFEAT_writable_page_tables);
++ early_make_page_readonly(level2_kernel_pgt,
++ XENFEAT_writable_page_tables);
++
++ if (!xen_feature(XENFEAT_writable_page_tables)) {
++ xen_pgd_pin(__pa_symbol(init_level4_pgt));
++ xen_pgd_pin(__pa_symbol(init_level4_user_pgt));
++ }
++
++ set_pgd((pgd_t *)(init_level4_user_pgt + 511),
++ mk_kernel_pgd(__pa_symbol(level3_user_pgt)));
++}
++
++static void __init extend_init_mapping(unsigned long tables_space)
++{
++ unsigned long va = __START_KERNEL_map;
++ unsigned long phys, addr, *pte_page;
++ pmd_t *pmd;
++ pte_t *pte, new_pte;
++ unsigned long *page = (unsigned long *)init_level4_pgt;
++
++ addr = page[pgd_index(va)];
++ addr_to_page(addr, page);
++ addr = page[pud_index(va)];
++ addr_to_page(addr, page);
++
++ /* Kill mapping of low 1MB. */
++ while (va < (unsigned long)&_text) {
++ HYPERVISOR_update_va_mapping(va, __pte_ma(0), 0);
++ va += PAGE_SIZE;
++ }
++
++ /* Ensure init mappings cover kernel text/data and initial tables. */
++ while (va < (__START_KERNEL_map
++ + (start_pfn << PAGE_SHIFT)
++ + tables_space)) {
++ pmd = (pmd_t *)&page[pmd_index(va)];
++ if (pmd_none(*pmd)) {
++ pte_page = alloc_static_page(&phys);
++ early_make_page_readonly(
++ pte_page, XENFEAT_writable_page_tables);
++ set_pmd(pmd, __pmd(phys | _KERNPG_TABLE));
++ } else {
++ addr = page[pmd_index(va)];
++ addr_to_page(addr, pte_page);
++ }
++ pte = (pte_t *)&pte_page[pte_index(va)];
++ if (pte_none(*pte)) {
++ new_pte = pfn_pte(
++ (va - __START_KERNEL_map) >> PAGE_SHIFT,
++ __pgprot(_KERNPG_TABLE));
++ xen_l1_entry_update(pte, new_pte);
++ }
++ va += PAGE_SIZE;
++ }
++
++ /* Finally, blow away any spurious initial mappings. */
++ while (1) {
++ pmd = (pmd_t *)&page[pmd_index(va)];
++ if (pmd_none(*pmd))
++ break;
++ HYPERVISOR_update_va_mapping(va, __pte_ma(0), 0);
++ va += PAGE_SIZE;
++ }
++}
++
++static void __init find_early_table_space(unsigned long end)
++{
++ unsigned long puds, pmds, ptes, tables;
++
++ puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
++ pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
++ ptes = (end + PTE_SIZE - 1) >> PAGE_SHIFT;
++
++ tables = round_up(puds * 8, PAGE_SIZE) +
++ round_up(pmds * 8, PAGE_SIZE) +
++ round_up(ptes * 8, PAGE_SIZE);
++
++ extend_init_mapping(tables);
++
++ table_start = start_pfn;
++ table_end = table_start + (tables>>PAGE_SHIFT);
++
++ early_printk("kernel direct mapping tables up to %lx @ %lx-%lx\n",
++ end, table_start << PAGE_SHIFT,
++ (table_start << PAGE_SHIFT) + tables);
++}
++
++static void xen_finish_init_mapping(void)
++{
++ unsigned long i, start, end;
++
++ /* Re-vector virtual addresses pointing into the initial
++ mapping to the just-established permanent ones. */
++ xen_start_info = __va(__pa(xen_start_info));
++ xen_start_info->pt_base = (unsigned long)
++ __va(__pa(xen_start_info->pt_base));
++ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
++ phys_to_machine_mapping =
++ __va(__pa(xen_start_info->mfn_list));
++ xen_start_info->mfn_list = (unsigned long)
++ phys_to_machine_mapping;
++ }
++ if (xen_start_info->mod_start)
++ xen_start_info->mod_start = (unsigned long)
++ __va(__pa(xen_start_info->mod_start));
++
++ /* Destroy the Xen-created mappings beyond the kernel image as
++ * well as the temporary mappings created above. Prevents
++ * overlap with modules area (if init mapping is very big).
++ */
++ start = PAGE_ALIGN((unsigned long)_end);
++ end = __START_KERNEL_map + (table_end << PAGE_SHIFT);
++ for (; start < end; start += PAGE_SIZE)
++ WARN_ON(HYPERVISOR_update_va_mapping(
++ start, __pte_ma(0), 0));
++
++ /* Allocate pte's for initial fixmaps from 'start_pfn' allocator. */
++ table_end = ~0UL;
++
++ /*
++ * Prefetch pte's for the bt_ioremap() area. It gets used before the
++ * boot-time allocator is online, so allocate-on-demand would fail.
++ */
++ for (i = FIX_BTMAP_END; i <= FIX_BTMAP_BEGIN; i++)
++ __set_fixmap(i, 0, __pgprot(0));
++
++ /* Switch to the real shared_info page, and clear the dummy page. */
++ set_fixmap(FIX_SHARED_INFO, xen_start_info->shared_info);
++ HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
++ memset(empty_zero_page, 0, sizeof(empty_zero_page));
++
++ /* Set up mapping of lowest 1MB of physical memory. */
++ for (i = 0; i < NR_FIX_ISAMAPS; i++)
++ if (is_initial_xendomain())
++ set_fixmap(FIX_ISAMAP_BEGIN - i, i * PAGE_SIZE);
++ else
++ __set_fixmap(FIX_ISAMAP_BEGIN - i,
++ virt_to_mfn(empty_zero_page)
++ << PAGE_SHIFT,
++ PAGE_KERNEL_RO);
++
++ /* Disable the 'start_pfn' allocator. */
++ table_end = start_pfn;
++}
++
++/* Setup the direct mapping of the physical memory at PAGE_OFFSET.
++ This runs before bootmem is initialized and gets pages directly from the
++ physical memory. To access them they are temporarily mapped. */
++void __meminit init_memory_mapping(unsigned long start, unsigned long end)
++{
++ unsigned long next;
++
++ Dprintk("init_memory_mapping\n");
++
++ /*
++ * Find space for the kernel direct mapping tables.
++ * Later we should allocate these tables in the local node of the memory
++ * mapped. Unfortunately this is done currently before the nodes are
++ * discovered.
++ */
++ if (!after_bootmem)
++ find_early_table_space(end);
++
++ start = (unsigned long)__va(start);
++ end = (unsigned long)__va(end);
++
++ for (; start < end; start = next) {
++ unsigned long pud_phys;
++ pgd_t *pgd = pgd_offset_k(start);
++ pud_t *pud;
++
++ if (after_bootmem) {
++ pud = pud_offset(pgd, start & PGDIR_MASK);
++ make_page_readonly(pud, XENFEAT_writable_page_tables);
++ pud_phys = __pa(pud);
++ } else {
++ pud = alloc_static_page(&pud_phys);
++ early_make_page_readonly(pud, XENFEAT_writable_page_tables);
++ }
++ next = start + PGDIR_SIZE;
++ if (next > end)
++ next = end;
++ phys_pud_init(pud, __pa(start), __pa(next));
++ if (!after_bootmem)
++ set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys));
++ }
++
++ if (!after_bootmem) {
++ BUG_ON(start_pfn != table_end);
++ xen_finish_init_mapping();
++ }
++
++ __flush_tlb_all();
++}
++
++void __cpuinit zap_low_mappings(int cpu)
++{
++ /* this is not required for Xen */
++#if 0
++ swap_low_mappings();
++#endif
++}
++
++/* Compute zone sizes for the DMA and DMA32 zones in a node. */
++__init void
++size_zones(unsigned long *z, unsigned long *h,
++ unsigned long start_pfn, unsigned long end_pfn)
++{
++ int i;
++#ifndef CONFIG_XEN
++ unsigned long w;
++#endif
++
++ for (i = 0; i < MAX_NR_ZONES; i++)
++ z[i] = 0;
++
++#ifndef CONFIG_XEN
++ if (start_pfn < MAX_DMA_PFN)
++ z[ZONE_DMA] = MAX_DMA_PFN - start_pfn;
++ if (start_pfn < MAX_DMA32_PFN) {
++ unsigned long dma32_pfn = MAX_DMA32_PFN;
++ if (dma32_pfn > end_pfn)
++ dma32_pfn = end_pfn;
++ z[ZONE_DMA32] = dma32_pfn - start_pfn;
++ }
++ z[ZONE_NORMAL] = end_pfn - start_pfn;
++
++ /* Remove lower zones from higher ones. */
++ w = 0;
++ for (i = 0; i < MAX_NR_ZONES; i++) {
++ if (z[i])
++ z[i] -= w;
++ w += z[i];
++ }
++
++ /* Compute holes */
++ w = start_pfn;
++ for (i = 0; i < MAX_NR_ZONES; i++) {
++ unsigned long s = w;
++ w += z[i];
++ h[i] = e820_hole_size(s, w);
++ }
++
++ /* Add the space pace needed for mem_map to the holes too. */
++ for (i = 0; i < MAX_NR_ZONES; i++)
++ h[i] += (z[i] * sizeof(struct page)) / PAGE_SIZE;
++
++ /* The 16MB DMA zone has the kernel and other misc mappings.
++ Account them too */
++ if (h[ZONE_DMA]) {
++ h[ZONE_DMA] += dma_reserve;
++ if (h[ZONE_DMA] >= z[ZONE_DMA]) {
++ printk(KERN_WARNING
++ "Kernel too large and filling up ZONE_DMA?\n");
++ h[ZONE_DMA] = z[ZONE_DMA];
++ }
++ }
++#else
++ z[ZONE_DMA] = end_pfn;
++ for (i = 0; i < MAX_NR_ZONES; i++)
++ h[i] = 0;
++#endif
++}
++
++#ifndef CONFIG_NUMA
++void __init paging_init(void)
++{
++ unsigned long zones[MAX_NR_ZONES], holes[MAX_NR_ZONES];
++
++ memory_present(0, 0, end_pfn);
++ sparse_init();
++ size_zones(zones, holes, 0, end_pfn);
++ free_area_init_node(0, NODE_DATA(0), zones,
++ __pa(PAGE_OFFSET) >> PAGE_SHIFT, holes);
++
++ init_mm.context.pinned = 1;
++}
++#endif
++
++/* Unmap a kernel mapping if it exists. This is useful to avoid prefetches
++ from the CPU leading to inconsistent cache lines. address and size
++ must be aligned to 2MB boundaries.
++ Does nothing when the mapping doesn't exist. */
++void __init clear_kernel_mapping(unsigned long address, unsigned long size)
++{
++ unsigned long end = address + size;
++
++ BUG_ON(address & ~LARGE_PAGE_MASK);
++ BUG_ON(size & ~LARGE_PAGE_MASK);
++
++ for (; address < end; address += LARGE_PAGE_SIZE) {
++ pgd_t *pgd = pgd_offset_k(address);
++ pud_t *pud;
++ pmd_t *pmd;
++ if (pgd_none(*pgd))
++ continue;
++ pud = pud_offset(pgd, address);
++ if (pud_none(*pud))
++ continue;
++ pmd = pmd_offset(pud, address);
++ if (!pmd || pmd_none(*pmd))
++ continue;
++ if (0 == (pmd_val(*pmd) & _PAGE_PSE)) {
++ /* Could handle this, but it should not happen currently. */
++ printk(KERN_ERR
++ "clear_kernel_mapping: mapping has been split. will leak memory\n");
++ pmd_ERROR(*pmd);
++ }
++ set_pmd(pmd, __pmd(0));
++ }
++ __flush_tlb_all();
++}
++
++/*
++ * Memory hotplug specific functions
++ */
++void online_page(struct page *page)
++{
++ ClearPageReserved(page);
++ init_page_count(page);
++ __free_page(page);
++ totalram_pages++;
++ num_physpages++;
++}
++
++#ifdef CONFIG_MEMORY_HOTPLUG
++/*
++ * XXX: memory_add_physaddr_to_nid() is to find node id from physical address
++ * via probe interface of sysfs. If acpi notifies hot-add event, then it
++ * can tell node id by searching dsdt. But, probe interface doesn't have
++ * node id. So, return 0 as node id at this time.
++ */
++#ifdef CONFIG_NUMA
++int memory_add_physaddr_to_nid(u64 start)
++{
++ return 0;
++}
++#endif
++
++/*
++ * Memory is added always to NORMAL zone. This means you will never get
++ * additional DMA/DMA32 memory.
++ */
++int arch_add_memory(int nid, u64 start, u64 size)
++{
++ struct pglist_data *pgdat = NODE_DATA(nid);
++ struct zone *zone = pgdat->node_zones + MAX_NR_ZONES-2;
++ unsigned long start_pfn = start >> PAGE_SHIFT;
++ unsigned long nr_pages = size >> PAGE_SHIFT;
++ int ret;
++
++ ret = __add_pages(zone, start_pfn, nr_pages);
++ if (ret)
++ goto error;
++
++ init_memory_mapping(start, (start + size -1));
++
++ return ret;
++error:
++ printk("%s: Problem encountered in __add_pages!\n", __func__);
++ return ret;
++}
++EXPORT_SYMBOL_GPL(arch_add_memory);
++
++int remove_memory(u64 start, u64 size)
++{
++ return -EINVAL;
++}
++EXPORT_SYMBOL_GPL(remove_memory);
++
++#else /* CONFIG_MEMORY_HOTPLUG */
++/*
++ * Memory Hotadd without sparsemem. The mem_maps have been allocated in advance,
++ * just online the pages.
++ */
++int __add_pages(struct zone *z, unsigned long start_pfn, unsigned long nr_pages)
++{
++ int err = -EIO;
++ unsigned long pfn;
++ unsigned long total = 0, mem = 0;
++ for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) {
++ if (pfn_valid(pfn)) {
++ online_page(pfn_to_page(pfn));
++ err = 0;
++ mem++;
++ }
++ total++;
++ }
++ if (!err) {
++ z->spanned_pages += total;
++ z->present_pages += mem;
++ z->zone_pgdat->node_spanned_pages += total;
++ z->zone_pgdat->node_present_pages += mem;
++ }
++ return err;
++}
++#endif /* CONFIG_MEMORY_HOTPLUG */
++
++static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, kcore_modules,
++ kcore_vsyscall;
++
++void __init mem_init(void)
++{
++ long codesize, reservedpages, datasize, initsize;
++ unsigned long pfn;
++
++ contiguous_bitmap = alloc_bootmem_low_pages(
++ (end_pfn + 2*BITS_PER_LONG) >> 3);
++ BUG_ON(!contiguous_bitmap);
++ memset(contiguous_bitmap, 0, (end_pfn + 2*BITS_PER_LONG) >> 3);
++
++ pci_iommu_alloc();
++
++ /* How many end-of-memory variables you have, grandma! */
++ max_low_pfn = end_pfn;
++ max_pfn = end_pfn;
++ num_physpages = end_pfn;
++ high_memory = (void *) __va(end_pfn * PAGE_SIZE);
++
++ /* clear the zero-page */
++ memset(empty_zero_page, 0, PAGE_SIZE);
++
++ reservedpages = 0;
++
++ /* this will put all low memory onto the freelists */
++#ifdef CONFIG_NUMA
++ totalram_pages = numa_free_all_bootmem();
++#else
++ totalram_pages = free_all_bootmem();
++#endif
++ /* XEN: init and count pages outside initial allocation. */
++ for (pfn = xen_start_info->nr_pages; pfn < max_pfn; pfn++) {
++ ClearPageReserved(pfn_to_page(pfn));
++ init_page_count(pfn_to_page(pfn));
++ totalram_pages++;
++ }
++ reservedpages = end_pfn - totalram_pages - e820_hole_size(0, end_pfn);
++
++ after_bootmem = 1;
++
++ codesize = (unsigned long) &_etext - (unsigned long) &_text;
++ datasize = (unsigned long) &_edata - (unsigned long) &_etext;
++ initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
++
++ /* Register memory areas for /proc/kcore */
++ kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
++ kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
++ VMALLOC_END-VMALLOC_START);
++ kclist_add(&kcore_kernel, &_stext, _end - _stext);
++ kclist_add(&kcore_modules, (void *)MODULES_VADDR, MODULES_LEN);
++ kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START,
++ VSYSCALL_END - VSYSCALL_START);
++
++ printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n",
++ (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
++ end_pfn << (PAGE_SHIFT-10),
++ codesize >> 10,
++ reservedpages << (PAGE_SHIFT-10),
++ datasize >> 10,
++ initsize >> 10);
++
++#ifndef CONFIG_XEN
++#ifdef CONFIG_SMP
++ /*
++ * Sync boot_level4_pgt mappings with the init_level4_pgt
++ * except for the low identity mappings which are already zapped
++ * in init_level4_pgt. This sync-up is essential for AP's bringup
++ */
++ memcpy(boot_level4_pgt+1, init_level4_pgt+1, (PTRS_PER_PGD-1)*sizeof(pgd_t));
++#endif
++#endif
++}
++
++void free_init_pages(char *what, unsigned long begin, unsigned long end)
++{
++ unsigned long addr;
++
++ if (begin >= end)
++ return;
++
++ printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
++ for (addr = begin; addr < end; addr += PAGE_SIZE) {
++ ClearPageReserved(virt_to_page(addr));
++ init_page_count(virt_to_page(addr));
++ memset((void *)(addr & ~(PAGE_SIZE-1)),
++ POISON_FREE_INITMEM, PAGE_SIZE);
++ if (addr >= __START_KERNEL_map) {
++ /* make_readonly() reports all kernel addresses. */
++ __make_page_writable(__va(__pa(addr)));
++ if (HYPERVISOR_update_va_mapping(addr, __pte(0), 0)) {
++ pgd_t *pgd = pgd_offset_k(addr);
++ pud_t *pud = pud_offset(pgd, addr);
++ pmd_t *pmd = pmd_offset(pud, addr);
++ pte_t *pte = pte_offset_kernel(pmd, addr);
++
++ xen_l1_entry_update(pte, __pte(0)); /* fallback */
++ }
++ }
++ free_page(addr);
++ totalram_pages++;
++ }
++}
++
++void free_initmem(void)
++{
++ memset(__initdata_begin, POISON_FREE_INITDATA,
++ __initdata_end - __initdata_begin);
++ free_init_pages("unused kernel memory",
++ (unsigned long)(&__init_begin),
++ (unsigned long)(&__init_end));
++}
++
++#ifdef CONFIG_DEBUG_RODATA
++
++void mark_rodata_ro(void)
++{
++ unsigned long addr = (unsigned long)__start_rodata;
++
++ for (; addr < (unsigned long)__end_rodata; addr += PAGE_SIZE)
++ change_page_attr_addr(addr, 1, PAGE_KERNEL_RO);
++
++ printk ("Write protecting the kernel read-only data: %luk\n",
++ (__end_rodata - __start_rodata) >> 10);
++
++ /*
++ * change_page_attr_addr() requires a global_flush_tlb() call after it.
++ * We do this after the printk so that if something went wrong in the
++ * change, the printk gets out at least to give a better debug hint
++ * of who is the culprit.
++ */
++ global_flush_tlb();
++}
++#endif
++
++#ifdef CONFIG_BLK_DEV_INITRD
++void free_initrd_mem(unsigned long start, unsigned long end)
++{
++ free_init_pages("initrd memory", start, end);
++}
++#endif
++
++void __init reserve_bootmem_generic(unsigned long phys, unsigned len)
++{
++ /* Should check here against the e820 map to avoid double free */
++#ifdef CONFIG_NUMA
++ int nid = phys_to_nid(phys);
++ reserve_bootmem_node(NODE_DATA(nid), phys, len);
++#else
++ reserve_bootmem(phys, len);
++#endif
++ if (phys+len <= MAX_DMA_PFN*PAGE_SIZE)
++ dma_reserve += len / PAGE_SIZE;
++}
++
++int kern_addr_valid(unsigned long addr)
++{
++ unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++ pte_t *pte;
++
++ if (above != 0 && above != -1UL)
++ return 0;
++
++ pgd = pgd_offset_k(addr);
++ if (pgd_none(*pgd))
++ return 0;
++
++ pud = pud_offset(pgd, addr);
++ if (pud_none(*pud))
++ return 0;
++
++ pmd = pmd_offset(pud, addr);
++ if (pmd_none(*pmd))
++ return 0;
++ if (pmd_large(*pmd))
++ return pfn_valid(pmd_pfn(*pmd));
++
++ pte = pte_offset_kernel(pmd, addr);
++ if (pte_none(*pte))
++ return 0;
++ return pfn_valid(pte_pfn(*pte));
++}
++
++#ifdef CONFIG_SYSCTL
++#include <linux/sysctl.h>
++
++extern int exception_trace, page_fault_trace;
++
++static ctl_table debug_table2[] = {
++ { 99, "exception-trace", &exception_trace, sizeof(int), 0644, NULL,
++ proc_dointvec },
++ { 0, }
++};
++
++static ctl_table debug_root_table2[] = {
++ { .ctl_name = CTL_DEBUG, .procname = "debug", .mode = 0555,
++ .child = debug_table2 },
++ { 0 },
++};
++
++static __init int x8664_sysctl_init(void)
++{
++ register_sysctl_table(debug_root_table2, 1);
++ return 0;
++}
++__initcall(x8664_sysctl_init);
++#endif
++
++/* A pseudo VMAs to allow ptrace access for the vsyscall page. This only
++ covers the 64bit vsyscall page now. 32bit has a real VMA now and does
++ not need special handling anymore. */
++
++static struct vm_area_struct gate_vma = {
++ .vm_start = VSYSCALL_START,
++ .vm_end = VSYSCALL_END,
++ .vm_page_prot = PAGE_READONLY
++};
++
++struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
++{
++#ifdef CONFIG_IA32_EMULATION
++ if (test_tsk_thread_flag(tsk, TIF_IA32))
++ return NULL;
++#endif
++ return &gate_vma;
++}
++
++int in_gate_area(struct task_struct *task, unsigned long addr)
++{
++ struct vm_area_struct *vma = get_gate_vma(task);
++ if (!vma)
++ return 0;
++ return (addr >= vma->vm_start) && (addr < vma->vm_end);
++}
++
++/* Use this when you have no reliable task/vma, typically from interrupt
++ * context. It is less reliable than using the task's vma and may give
++ * false positives.
++ */
++int in_gate_area_no_task(unsigned long addr)
++{
++ return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END);
++}
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/arch/x86_64/mm/pageattr-xen.c 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,433 @@
++/*
++ * Copyright 2002 Andi Kleen, SuSE Labs.
++ * Thanks to Ben LaHaise for precious feedback.
++ */
++
++#include <linux/mm.h>
++#include <linux/sched.h>
++#include <linux/highmem.h>
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <asm/uaccess.h>
++#include <asm/processor.h>
++#include <asm/tlbflush.h>
++#include <asm/io.h>
++
++#ifdef CONFIG_XEN
++#include <asm/pgalloc.h>
++#include <asm/mmu_context.h>
++
++LIST_HEAD(mm_unpinned);
++DEFINE_SPINLOCK(mm_unpinned_lock);
++
++static inline void mm_walk_set_prot(void *pt, pgprot_t flags)
++{
++ struct page *page = virt_to_page(pt);
++ unsigned long pfn = page_to_pfn(page);
++ int rc;
++
++ rc = HYPERVISOR_update_va_mapping(
++ (unsigned long)__va(pfn << PAGE_SHIFT),
++ pfn_pte(pfn, flags), 0);
++ if (rc)
++ BUG();
++}
++
++static void mm_walk(struct mm_struct *mm, pgprot_t flags)
++{
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++ pte_t *pte;
++ int g,u,m;
++
++ pgd = mm->pgd;
++ /*
++ * Cannot iterate up to USER_PTRS_PER_PGD as these pagetables may not
++ * be the 'current' task's pagetables (e.g., current may be 32-bit,
++ * but the pagetables may be for a 64-bit task).
++ * Subtracting 1 from TASK_SIZE64 means the loop limit is correct
++ * regardless of whether TASK_SIZE64 is a multiple of PGDIR_SIZE.
++ */
++ for (g = 0; g <= ((TASK_SIZE64-1) / PGDIR_SIZE); g++, pgd++) {
++ if (pgd_none(*pgd))
++ continue;
++ pud = pud_offset(pgd, 0);
++ if (PTRS_PER_PUD > 1) /* not folded */
++ mm_walk_set_prot(pud,flags);
++ for (u = 0; u < PTRS_PER_PUD; u++, pud++) {
++ if (pud_none(*pud))
++ continue;
++ pmd = pmd_offset(pud, 0);
++ if (PTRS_PER_PMD > 1) /* not folded */
++ mm_walk_set_prot(pmd,flags);
++ for (m = 0; m < PTRS_PER_PMD; m++, pmd++) {
++ if (pmd_none(*pmd))
++ continue;
++ pte = pte_offset_kernel(pmd,0);
++ mm_walk_set_prot(pte,flags);
++ }
++ }
++ }
++}
++
++void mm_pin(struct mm_struct *mm)
++{
++ if (xen_feature(XENFEAT_writable_page_tables))
++ return;
++
++ spin_lock(&mm->page_table_lock);
++
++ mm_walk(mm, PAGE_KERNEL_RO);
++ if (HYPERVISOR_update_va_mapping(
++ (unsigned long)mm->pgd,
++ pfn_pte(virt_to_phys(mm->pgd)>>PAGE_SHIFT, PAGE_KERNEL_RO),
++ UVMF_TLB_FLUSH))
++ BUG();
++ if (HYPERVISOR_update_va_mapping(
++ (unsigned long)__user_pgd(mm->pgd),
++ pfn_pte(virt_to_phys(__user_pgd(mm->pgd))>>PAGE_SHIFT,
++ PAGE_KERNEL_RO),
++ UVMF_TLB_FLUSH))
++ BUG();
++ xen_pgd_pin(__pa(mm->pgd)); /* kernel */
++ xen_pgd_pin(__pa(__user_pgd(mm->pgd))); /* user */
++ mm->context.pinned = 1;
++ spin_lock(&mm_unpinned_lock);
++ list_del(&mm->context.unpinned);
++ spin_unlock(&mm_unpinned_lock);
++
++ spin_unlock(&mm->page_table_lock);
++}
++
++void mm_unpin(struct mm_struct *mm)
++{
++ if (xen_feature(XENFEAT_writable_page_tables))
++ return;
++
++ spin_lock(&mm->page_table_lock);
++
++ xen_pgd_unpin(__pa(mm->pgd));
++ xen_pgd_unpin(__pa(__user_pgd(mm->pgd)));
++ if (HYPERVISOR_update_va_mapping(
++ (unsigned long)mm->pgd,
++ pfn_pte(virt_to_phys(mm->pgd)>>PAGE_SHIFT, PAGE_KERNEL), 0))
++ BUG();
++ if (HYPERVISOR_update_va_mapping(
++ (unsigned long)__user_pgd(mm->pgd),
++ pfn_pte(virt_to_phys(__user_pgd(mm->pgd))>>PAGE_SHIFT,
++ PAGE_KERNEL), 0))
++ BUG();
++ mm_walk(mm, PAGE_KERNEL);
++ xen_tlb_flush();
++ mm->context.pinned = 0;
++ spin_lock(&mm_unpinned_lock);
++ list_add(&mm->context.unpinned, &mm_unpinned);
++ spin_unlock(&mm_unpinned_lock);
++
++ spin_unlock(&mm->page_table_lock);
++}
++
++void mm_pin_all(void)
++{
++ if (xen_feature(XENFEAT_writable_page_tables))
++ return;
++
++ /*
++ * Allow uninterrupted access to the mm_unpinned list. We don't
++ * actually take the mm_unpinned_lock as it is taken inside mm_pin().
++ * All other CPUs must be at a safe point (e.g., in stop_machine
++ * or offlined entirely).
++ */
++ preempt_disable();
++ while (!list_empty(&mm_unpinned))
++ mm_pin(list_entry(mm_unpinned.next, struct mm_struct,
++ context.unpinned));
++ preempt_enable();
++}
++
++void _arch_dup_mmap(struct mm_struct *mm)
++{
++ if (!mm->context.pinned)
++ mm_pin(mm);
++}
++
++void _arch_exit_mmap(struct mm_struct *mm)
++{
++ struct task_struct *tsk = current;
++
++ task_lock(tsk);
++
++ /*
++ * We aggressively remove defunct pgd from cr3. We execute unmap_vmas()
++ * *much* faster this way, as no tlb flushes means bigger wrpt batches.
++ */
++ if (tsk->active_mm == mm) {
++ tsk->active_mm = &init_mm;
++ atomic_inc(&init_mm.mm_count);
++
++ switch_mm(mm, &init_mm, tsk);
++
++ atomic_dec(&mm->mm_count);
++ BUG_ON(atomic_read(&mm->mm_count) == 0);
++ }
++
++ task_unlock(tsk);
++
++ if ( mm->context.pinned && (atomic_read(&mm->mm_count) == 1) &&
++ !mm->context.has_foreign_mappings )
++ mm_unpin(mm);
++}
++
++struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
++{
++ struct page *pte;
++
++ pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
++ if (pte) {
++ SetPageForeign(pte, pte_free);
++ init_page_count(pte);
++ }
++ return pte;
++}
++
++void pte_free(struct page *pte)
++{
++ unsigned long va = (unsigned long)__va(page_to_pfn(pte)<<PAGE_SHIFT);
++
++ if (!pte_write(*virt_to_ptep(va)))
++ if (HYPERVISOR_update_va_mapping(
++ va, pfn_pte(page_to_pfn(pte), PAGE_KERNEL), 0))
++ BUG();
++
++ ClearPageForeign(pte);
++ init_page_count(pte);
++
++ __free_page(pte);
++}
++#endif /* CONFIG_XEN */
++
++static inline pte_t *lookup_address(unsigned long address)
++{
++ pgd_t *pgd = pgd_offset_k(address);
++ pud_t *pud;
++ pmd_t *pmd;
++ pte_t *pte;
++ if (pgd_none(*pgd))
++ return NULL;
++ pud = pud_offset(pgd, address);
++ if (!pud_present(*pud))
++ return NULL;
++ pmd = pmd_offset(pud, address);
++ if (!pmd_present(*pmd))
++ return NULL;
++ if (pmd_large(*pmd))
++ return (pte_t *)pmd;
++ pte = pte_offset_kernel(pmd, address);
++ if (pte && !pte_present(*pte))
++ pte = NULL;
++ return pte;
++}
++
++static struct page *split_large_page(unsigned long address, pgprot_t prot,
++ pgprot_t ref_prot)
++{
++ int i;
++ unsigned long addr;
++ struct page *base = alloc_pages(GFP_KERNEL, 0);
++ pte_t *pbase;
++ if (!base)
++ return NULL;
++ /*
++ * page_private is used to track the number of entries in
++ * the page table page have non standard attributes.
++ */
++ SetPagePrivate(base);
++ page_private(base) = 0;
++
++ address = __pa(address);
++ addr = address & LARGE_PAGE_MASK;
++ pbase = (pte_t *)page_address(base);
++ for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
++ pbase[i] = pfn_pte(addr >> PAGE_SHIFT,
++ addr == address ? prot : ref_prot);
++ }
++ return base;
++}
++
++
++static void flush_kernel_map(void *address)
++{
++ if (0 && address && cpu_has_clflush) {
++ /* is this worth it? */
++ int i;
++ for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
++ asm volatile("clflush (%0)" :: "r" (address + i));
++ } else
++ asm volatile("wbinvd":::"memory");
++ if (address)
++ __flush_tlb_one(address);
++ else
++ __flush_tlb_all();
++}
++
++
++static inline void flush_map(unsigned long address)
++{
++ on_each_cpu(flush_kernel_map, (void *)address, 1, 1);
++}
++
++static struct page *deferred_pages; /* protected by init_mm.mmap_sem */
++
++static inline void save_page(struct page *fpage)
++{
++ fpage->lru.next = (struct list_head *)deferred_pages;
++ deferred_pages = fpage;
++}
++
++/*
++ * No more special protections in this 2/4MB area - revert to a
++ * large page again.
++ */
++static void revert_page(unsigned long address, pgprot_t ref_prot)
++{
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++ pte_t large_pte;
++
++ pgd = pgd_offset_k(address);
++ BUG_ON(pgd_none(*pgd));
++ pud = pud_offset(pgd,address);
++ BUG_ON(pud_none(*pud));
++ pmd = pmd_offset(pud, address);
++ BUG_ON(pmd_val(*pmd) & _PAGE_PSE);
++ pgprot_val(ref_prot) |= _PAGE_PSE;
++ large_pte = mk_pte_phys(__pa(address) & LARGE_PAGE_MASK, ref_prot);
++ set_pte((pte_t *)pmd, large_pte);
++}
++
++static int
++__change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot,
++ pgprot_t ref_prot)
++{
++ pte_t *kpte;
++ struct page *kpte_page;
++ unsigned kpte_flags;
++ pgprot_t ref_prot2;
++ kpte = lookup_address(address);
++ if (!kpte) return 0;
++ kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK);
++ kpte_flags = pte_val(*kpte);
++ if (pgprot_val(prot) != pgprot_val(ref_prot)) {
++ if ((kpte_flags & _PAGE_PSE) == 0) {
++ set_pte(kpte, pfn_pte(pfn, prot));
++ } else {
++ /*
++ * split_large_page will take the reference for this
++ * change_page_attr on the split page.
++ */
++
++ struct page *split;
++ ref_prot2 = __pgprot(pgprot_val(pte_pgprot(*lookup_address(address))) & ~(1<<_PAGE_BIT_PSE));
++
++ split = split_large_page(address, prot, ref_prot2);
++ if (!split)
++ return -ENOMEM;
++ set_pte(kpte,mk_pte(split, ref_prot2));
++ kpte_page = split;
++ }
++ page_private(kpte_page)++;
++ } else if ((kpte_flags & _PAGE_PSE) == 0) {
++ set_pte(kpte, pfn_pte(pfn, ref_prot));
++ BUG_ON(page_private(kpte_page) == 0);
++ page_private(kpte_page)--;
++ } else
++ BUG();
++
++ /* on x86-64 the direct mapping set at boot is not using 4k pages */
++ /*
++ * ..., but the XEN guest kernels (currently) do:
++ * If the pte was reserved, it means it was created at boot
++ * time (not via split_large_page) and in turn we must not
++ * replace it with a large page.
++ */
++#ifndef CONFIG_XEN
++ BUG_ON(PageReserved(kpte_page));
++#else
++ if (PageReserved(kpte_page))
++ return 0;
++#endif
++
++ if (page_private(kpte_page) == 0) {
++ save_page(kpte_page);
++ revert_page(address, ref_prot);
++ }
++ return 0;
++}
++
++/*
++ * Change the page attributes of an page in the linear mapping.
++ *
++ * This should be used when a page is mapped with a different caching policy
++ * than write-back somewhere - some CPUs do not like it when mappings with
++ * different caching policies exist. This changes the page attributes of the
++ * in kernel linear mapping too.
++ *
++ * The caller needs to ensure that there are no conflicting mappings elsewhere.
++ * This function only deals with the kernel linear map.
++ *
++ * Caller must call global_flush_tlb() after this.
++ */
++int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot)
++{
++ int err = 0;
++ int i;
++
++ down_write(&init_mm.mmap_sem);
++ for (i = 0; i < numpages; i++, address += PAGE_SIZE) {
++ unsigned long pfn = __pa(address) >> PAGE_SHIFT;
++
++ err = __change_page_attr(address, pfn, prot, PAGE_KERNEL);
++ if (err)
++ break;
++ /* Handle kernel mapping too which aliases part of the
++ * lowmem */
++ if (__pa(address) < KERNEL_TEXT_SIZE) {
++ unsigned long addr2;
++ pgprot_t prot2 = prot;
++ addr2 = __START_KERNEL_map + __pa(address);
++ pgprot_val(prot2) &= ~_PAGE_NX;
++ err = __change_page_attr(addr2, pfn, prot2, PAGE_KERNEL_EXEC);
++ }
++ }
++ up_write(&init_mm.mmap_sem);
++ return err;
++}
++
++/* Don't call this for MMIO areas that may not have a mem_map entry */
++int change_page_attr(struct page *page, int numpages, pgprot_t prot)
++{
++ unsigned long addr = (unsigned long)page_address(page);
++ return change_page_attr_addr(addr, numpages, prot);
++}
++
++void global_flush_tlb(void)
++{
++ struct page *dpage;
++
++ down_read(&init_mm.mmap_sem);
++ dpage = xchg(&deferred_pages, NULL);
++ up_read(&init_mm.mmap_sem);
++
++ flush_map((dpage && !dpage->lru.next) ? (unsigned long)page_address(dpage) : 0);
++ while (dpage) {
++ struct page *tmp = dpage;
++ dpage = (struct page *)dpage->lru.next;
++ ClearPagePrivate(tmp);
++ __free_page(tmp);
++ }
++}
++
++EXPORT_SYMBOL(change_page_attr);
++EXPORT_SYMBOL(global_flush_tlb);
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/asm-i386/mach-xen/asm/agp.h 2007-08-27 14:01:58.000000000 -0400
+@@ -0,0 +1,37 @@
++#ifndef AGP_H
++#define AGP_H 1
++
++#include <asm/pgtable.h>
++#include <asm/cacheflush.h>
++#include <asm/system.h>
++
++/*
++ * Functions to keep the agpgart mappings coherent with the MMU.
++ * The GART gives the CPU a physical alias of pages in memory. The alias region is
++ * mapped uncacheable. Make sure there are no conflicting mappings
++ * with different cachability attributes for the same page. This avoids
++ * data corruption on some CPUs.
++ */
++
++int map_page_into_agp(struct page *page);
++int unmap_page_from_agp(struct page *page);
++#define flush_agp_mappings() global_flush_tlb()
++
++/* Could use CLFLUSH here if the cpu supports it. But then it would
++ need to be called for each cacheline of the whole page so it may not be
++ worth it. Would need a page for it. */
++#define flush_agp_cache() wbinvd()
++
++/* Convert a physical address to an address suitable for the GART. */
++#define phys_to_gart(x) phys_to_machine(x)
++#define gart_to_phys(x) machine_to_phys(x)
++
++/* GATT allocation. Returns/accepts GATT kernel virtual address. */
++#define alloc_gatt_pages(order) ({ \
++ char *_t; dma_addr_t _d; \
++ _t = dma_alloc_coherent(NULL,PAGE_SIZE<<(order),&_d,GFP_KERNEL); \
++ _t; })
++#define free_gatt_pages(table, order) \
++ dma_free_coherent(NULL,PAGE_SIZE<<(order),(table),virt_to_bus(table))
++
++#endif
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/asm-i386/mach-xen/asm/desc.h 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,164 @@
++#ifndef __ARCH_DESC_H
++#define __ARCH_DESC_H
++
++#include <asm/ldt.h>
++#include <asm/segment.h>
++
++#define CPU_16BIT_STACK_SIZE 1024
++
++#ifndef __ASSEMBLY__
++
++#include <linux/preempt.h>
++#include <linux/smp.h>
++
++#include <asm/mmu.h>
++
++extern struct desc_struct cpu_gdt_table[GDT_ENTRIES];
++
++DECLARE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]);
++
++struct Xgt_desc_struct {
++ unsigned short size;
++ unsigned long address __attribute__((packed));
++ unsigned short pad;
++} __attribute__ ((packed));
++
++extern struct Xgt_desc_struct idt_descr;
++DECLARE_PER_CPU(struct Xgt_desc_struct, cpu_gdt_descr);
++
++
++static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
++{
++ return (struct desc_struct *)per_cpu(cpu_gdt_descr, cpu).address;
++}
++
++#define load_TR_desc() __asm__ __volatile__("ltr %w0"::"q" (GDT_ENTRY_TSS*8))
++#define load_LDT_desc() __asm__ __volatile__("lldt %w0"::"q" (GDT_ENTRY_LDT*8))
++
++#define load_gdt(dtr) __asm__ __volatile("lgdt %0"::"m" (*dtr))
++#define load_idt(dtr) __asm__ __volatile("lidt %0"::"m" (*dtr))
++#define load_tr(tr) __asm__ __volatile("ltr %0"::"mr" (tr))
++#define load_ldt(ldt) __asm__ __volatile("lldt %0"::"mr" (ldt))
++
++#define store_gdt(dtr) __asm__ ("sgdt %0":"=m" (*dtr))
++#define store_idt(dtr) __asm__ ("sidt %0":"=m" (*dtr))
++#define store_tr(tr) __asm__ ("str %0":"=mr" (tr))
++#define store_ldt(ldt) __asm__ ("sldt %0":"=mr" (ldt))
++
++/*
++ * This is the ldt that every process will get unless we need
++ * something other than this.
++ */
++extern struct desc_struct default_ldt[];
++extern void set_intr_gate(unsigned int irq, void * addr);
++
++#define _set_tssldt_desc(n,addr,limit,type) \
++__asm__ __volatile__ ("movw %w3,0(%2)\n\t" \
++ "movw %w1,2(%2)\n\t" \
++ "rorl $16,%1\n\t" \
++ "movb %b1,4(%2)\n\t" \
++ "movb %4,5(%2)\n\t" \
++ "movb $0,6(%2)\n\t" \
++ "movb %h1,7(%2)\n\t" \
++ "rorl $16,%1" \
++ : "=m"(*(n)) : "q" (addr), "r"(n), "ir"(limit), "i"(type))
++
++#ifndef CONFIG_X86_NO_TSS
++static inline void __set_tss_desc(unsigned int cpu, unsigned int entry, void *addr)
++{
++ _set_tssldt_desc(&get_cpu_gdt_table(cpu)[entry], (int)addr,
++ offsetof(struct tss_struct, __cacheline_filler) - 1, 0x89);
++}
++
++#define set_tss_desc(cpu,addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr)
++#endif
++
++static inline void set_ldt_desc(unsigned int cpu, void *addr, unsigned int size)
++{
++ _set_tssldt_desc(&get_cpu_gdt_table(cpu)[GDT_ENTRY_LDT], (int)addr, ((size << 3)-1), 0x82);
++}
++
++#define LDT_entry_a(info) \
++ ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff))
++
++#define LDT_entry_b(info) \
++ (((info)->base_addr & 0xff000000) | \
++ (((info)->base_addr & 0x00ff0000) >> 16) | \
++ ((info)->limit & 0xf0000) | \
++ (((info)->read_exec_only ^ 1) << 9) | \
++ ((info)->contents << 10) | \
++ (((info)->seg_not_present ^ 1) << 15) | \
++ ((info)->seg_32bit << 22) | \
++ ((info)->limit_in_pages << 23) | \
++ ((info)->useable << 20) | \
++ 0x7000)
++
++#define LDT_empty(info) (\
++ (info)->base_addr == 0 && \
++ (info)->limit == 0 && \
++ (info)->contents == 0 && \
++ (info)->read_exec_only == 1 && \
++ (info)->seg_32bit == 0 && \
++ (info)->limit_in_pages == 0 && \
++ (info)->seg_not_present == 1 && \
++ (info)->useable == 0 )
++
++extern int write_ldt_entry(void *ldt, int entry, __u32 entry_a, __u32 entry_b);
++
++#if TLS_SIZE != 24
++# error update this code.
++#endif
++
++static inline void load_TLS(struct thread_struct *t, unsigned int cpu)
++{
++#define C(i) HYPERVISOR_update_descriptor(virt_to_machine(&get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i]), *(u64 *)&t->tls_array[i])
++ C(0); C(1); C(2);
++#undef C
++}
++
++static inline void clear_LDT(void)
++{
++ int cpu = get_cpu();
++
++ /*
++ * NB. We load the default_ldt for lcall7/27 handling on demand, as
++ * it slows down context switching. Noone uses it anyway.
++ */
++ cpu = cpu; /* XXX avoid compiler warning */
++ xen_set_ldt(0UL, 0);
++ put_cpu();
++}
++
++/*
++ * load one particular LDT into the current CPU
++ */
++static inline void load_LDT_nolock(mm_context_t *pc, int cpu)
++{
++ void *segments = pc->ldt;
++ int count = pc->size;
++
++ if (likely(!count))
++ segments = NULL;
++
++ xen_set_ldt((unsigned long)segments, count);
++}
++
++static inline void load_LDT(mm_context_t *pc)
++{
++ int cpu = get_cpu();
++ load_LDT_nolock(pc, cpu);
++ put_cpu();
++}
++
++static inline unsigned long get_desc_base(unsigned long *desc)
++{
++ unsigned long base;
++ base = ((desc[0] >> 16) & 0x0000ffff) |
++ ((desc[1] << 16) & 0x00ff0000) |
++ (desc[1] & 0xff000000);
++ return base;
++}
++
++#endif /* !__ASSEMBLY__ */
++
++#endif
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/asm-i386/mach-xen/asm/dma-mapping.h 2007-08-27 14:02:07.000000000 -0400
+@@ -0,0 +1,157 @@
++#ifndef _ASM_I386_DMA_MAPPING_H
++#define _ASM_I386_DMA_MAPPING_H
++
++/*
++ * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for
++ * documentation.
++ */
++
++#include <linux/mm.h>
++#include <asm/cache.h>
++#include <asm/io.h>
++#include <asm/scatterlist.h>
++#include <asm/swiotlb.h>
++
++static inline int
++address_needs_mapping(struct device *hwdev, dma_addr_t addr)
++{
++ dma_addr_t mask = 0xffffffff;
++ /* If the device has a mask, use it, otherwise default to 32 bits */
++ if (hwdev && hwdev->dma_mask)
++ mask = *hwdev->dma_mask;
++ return (addr & ~mask) != 0;
++}
++
++static inline int
++range_straddles_page_boundary(void *p, size_t size)
++{
++ extern unsigned long *contiguous_bitmap;
++ return (((((unsigned long)p & ~PAGE_MASK) + size) > PAGE_SIZE) &&
++ !test_bit(__pa(p) >> PAGE_SHIFT, contiguous_bitmap));
++}
++
++#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
++#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
++
++void *dma_alloc_coherent(struct device *dev, size_t size,
++ dma_addr_t *dma_handle, gfp_t flag);
++
++void dma_free_coherent(struct device *dev, size_t size,
++ void *vaddr, dma_addr_t dma_handle);
++
++extern dma_addr_t
++dma_map_single(struct device *dev, void *ptr, size_t size,
++ enum dma_data_direction direction);
++
++extern void
++dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
++ enum dma_data_direction direction);
++
++extern int dma_map_sg(struct device *hwdev, struct scatterlist *sg,
++ int nents, enum dma_data_direction direction);
++extern void dma_unmap_sg(struct device *hwdev, struct scatterlist *sg,
++ int nents, enum dma_data_direction direction);
++
++#ifdef CONFIG_HIGHMEM
++extern dma_addr_t
++dma_map_page(struct device *dev, struct page *page, unsigned long offset,
++ size_t size, enum dma_data_direction direction);
++
++extern void
++dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
++ enum dma_data_direction direction);
++#else
++#define dma_map_page(dev, page, offset, size, dir) \
++ dma_map_single(dev, page_address(page) + (offset), (size), (dir))
++#define dma_unmap_page dma_unmap_single
++#endif
++
++extern void
++dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
++ enum dma_data_direction direction);
++
++extern void
++dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
++ enum dma_data_direction direction);
++
++static inline void
++dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
++ unsigned long offset, size_t size,
++ enum dma_data_direction direction)
++{
++ dma_sync_single_for_cpu(dev, dma_handle+offset, size, direction);
++}
++
++static inline void
++dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
++ unsigned long offset, size_t size,
++ enum dma_data_direction direction)
++{
++ dma_sync_single_for_device(dev, dma_handle+offset, size, direction);
++}
++
++static inline void
++dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
++ enum dma_data_direction direction)
++{
++ if (swiotlb)
++ swiotlb_sync_sg_for_cpu(dev,sg,nelems,direction);
++ flush_write_buffers();
++}
++
++static inline void
++dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
++ enum dma_data_direction direction)
++{
++ if (swiotlb)
++ swiotlb_sync_sg_for_device(dev,sg,nelems,direction);
++ flush_write_buffers();
++}
++
++extern int
++dma_mapping_error(dma_addr_t dma_addr);
++
++extern int
++dma_supported(struct device *dev, u64 mask);
++
++static inline int
++dma_set_mask(struct device *dev, u64 mask)
++{
++ if(!dev->dma_mask || !dma_supported(dev, mask))
++ return -EIO;
++
++ *dev->dma_mask = mask;
++
++ return 0;
++}
++
++static inline int
++dma_get_cache_alignment(void)
++{
++ /* no easy way to get cache size on all x86, so return the
++ * maximum possible, to be safe */
++ return (1 << INTERNODE_CACHE_SHIFT);
++}
++
++#define dma_is_consistent(d) (1)
++
++static inline void
++dma_cache_sync(void *vaddr, size_t size,
++ enum dma_data_direction direction)
++{
++ flush_write_buffers();
++}
++
++#define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
++extern int
++dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
++ dma_addr_t device_addr, size_t size, int flags);
++
++extern void
++dma_release_declared_memory(struct device *dev);
++
++extern void *
++dma_mark_declared_memory_occupied(struct device *dev,
++ dma_addr_t device_addr, size_t size);
++
++#endif
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/asm-i386/mach-xen/asm/fixmap.h 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,155 @@
++/*
++ * fixmap.h: compile-time virtual memory allocation
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file "COPYING" in the main directory of this archive
++ * for more details.
++ *
++ * Copyright (C) 1998 Ingo Molnar
++ *
++ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
++ */
++
++#ifndef _ASM_FIXMAP_H
++#define _ASM_FIXMAP_H
++
++
++/* used by vmalloc.c, vsyscall.lds.S.
++ *
++ * Leave one empty page between vmalloc'ed areas and
++ * the start of the fixmap.
++ */
++extern unsigned long __FIXADDR_TOP;
++
++#ifndef __ASSEMBLY__
++#include <linux/kernel.h>
++#include <asm/acpi.h>
++#include <asm/apicdef.h>
++#include <asm/page.h>
++#ifdef CONFIG_HIGHMEM
++#include <linux/threads.h>
++#include <asm/kmap_types.h>
++#endif
++
++/*
++ * Here we define all the compile-time 'special' virtual
++ * addresses. The point is to have a constant address at
++ * compile time, but to set the physical address only
++ * in the boot process. We allocate these special addresses
++ * from the end of virtual memory (0xfffff000) backwards.
++ * Also this lets us do fail-safe vmalloc(), we
++ * can guarantee that these special addresses and
++ * vmalloc()-ed addresses never overlap.
++ *
++ * these 'compile-time allocated' memory buffers are
++ * fixed-size 4k pages. (or larger if used with an increment
++ * highger than 1) use fixmap_set(idx,phys) to associate
++ * physical memory with fixmap indices.
++ *
++ * TLB entries of such buffers will not be flushed across
++ * task switches.
++ */
++enum fixed_addresses {
++ FIX_HOLE,
++ FIX_VDSO,
++#ifdef CONFIG_X86_LOCAL_APIC
++ FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */
++#endif
++#ifdef CONFIG_X86_IO_APIC
++ FIX_IO_APIC_BASE_0,
++ FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS-1,
++#endif
++#ifdef CONFIG_X86_VISWS_APIC
++ FIX_CO_CPU, /* Cobalt timer */
++ FIX_CO_APIC, /* Cobalt APIC Redirection Table */
++ FIX_LI_PCIA, /* Lithium PCI Bridge A */
++ FIX_LI_PCIB, /* Lithium PCI Bridge B */
++#endif
++#ifdef CONFIG_X86_F00F_BUG
++ FIX_F00F_IDT, /* Virtual mapping for IDT */
++#endif
++#ifdef CONFIG_X86_CYCLONE_TIMER
++ FIX_CYCLONE_TIMER, /*cyclone timer register*/
++#endif
++#ifdef CONFIG_HIGHMEM
++ FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
++ FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
++#endif
++#ifdef CONFIG_ACPI
++ FIX_ACPI_BEGIN,
++ FIX_ACPI_END = FIX_ACPI_BEGIN + FIX_ACPI_PAGES - 1,
++#endif
++#ifdef CONFIG_PCI_MMCONFIG
++ FIX_PCIE_MCFG,
++#endif
++ FIX_SHARED_INFO,
++#define NR_FIX_ISAMAPS 256
++ FIX_ISAMAP_END,
++ FIX_ISAMAP_BEGIN = FIX_ISAMAP_END + NR_FIX_ISAMAPS - 1,
++ __end_of_permanent_fixed_addresses,
++ /* temporary boot-time mappings, used before ioremap() is functional */
++#define NR_FIX_BTMAPS 16
++ FIX_BTMAP_END = __end_of_permanent_fixed_addresses,
++ FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS - 1,
++ FIX_WP_TEST,
++ __end_of_fixed_addresses
++};
++
++extern void set_fixaddr_top(unsigned long top);
++
++extern void __set_fixmap(enum fixed_addresses idx,
++ maddr_t phys, pgprot_t flags);
++
++#define set_fixmap(idx, phys) \
++ __set_fixmap(idx, phys, PAGE_KERNEL)
++/*
++ * Some hardware wants to get fixmapped without caching.
++ */
++#define set_fixmap_nocache(idx, phys) \
++ __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE)
++
++#define clear_fixmap(idx) \
++ __set_fixmap(idx, 0, __pgprot(0))
++
++#define FIXADDR_TOP ((unsigned long)__FIXADDR_TOP)
++
++#define __FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT)
++#define __FIXADDR_BOOT_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
++#define FIXADDR_START (FIXADDR_TOP - __FIXADDR_SIZE)
++#define FIXADDR_BOOT_START (FIXADDR_TOP - __FIXADDR_BOOT_SIZE)
++
++#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
++#define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT)
++
++extern void __this_fixmap_does_not_exist(void);
++
++/*
++ * 'index to address' translation. If anyone tries to use the idx
++ * directly without tranlation, we catch the bug with a NULL-deference
++ * kernel oops. Illegal ranges of incoming indices are caught too.
++ */
++static __always_inline unsigned long fix_to_virt(const unsigned int idx)
++{
++ /*
++ * this branch gets completely eliminated after inlining,
++ * except when someone tries to use fixaddr indices in an
++ * illegal way. (such as mixing up address types or using
++ * out-of-range indices).
++ *
++ * If it doesn't get removed, the linker will complain
++ * loudly with a reasonably clear error message..
++ */
++ if (idx >= __end_of_fixed_addresses)
++ __this_fixmap_does_not_exist();
++
++ return __fix_to_virt(idx);
++}
++
++static inline unsigned long virt_to_fix(const unsigned long vaddr)
++{
++ BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
++ return __virt_to_fix(vaddr);
++}
++
++#endif /* !__ASSEMBLY__ */
++#endif
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/asm-i386/mach-xen/asm/floppy.h 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,147 @@
++/*
++ * Architecture specific parts of the Floppy driver
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file "COPYING" in the main directory of this archive
++ * for more details.
++ *
++ * Copyright (C) 1995
++ *
++ * Modifications for Xen are Copyright (c) 2004, Keir Fraser.
++ */
++#ifndef __ASM_XEN_I386_FLOPPY_H
++#define __ASM_XEN_I386_FLOPPY_H
++
++#include <linux/vmalloc.h>
++
++/* XEN: Hit DMA paths on the head. This trick from asm-m68k/floppy.h. */
++#include <asm/dma.h>
++#undef MAX_DMA_ADDRESS
++#define MAX_DMA_ADDRESS 0
++#define CROSS_64KB(a,s) (0)
++
++#define fd_inb(port) inb_p(port)
++#define fd_outb(value,port) outb_p(value,port)
++
++#define fd_request_dma() (0)
++#define fd_free_dma() ((void)0)
++#define fd_enable_irq() enable_irq(FLOPPY_IRQ)
++#define fd_disable_irq() disable_irq(FLOPPY_IRQ)
++#define fd_free_irq() free_irq(FLOPPY_IRQ, NULL)
++#define fd_get_dma_residue() (virtual_dma_count + virtual_dma_residue)
++#define fd_dma_setup(addr, size, mode, io) vdma_dma_setup(addr, size, mode, io)
++/*
++ * Do not use vmalloc/vfree: floppy_release_irq_and_dma() gets called from
++ * softirq context via motor_off_callback. A generic bug we happen to trigger.
++ */
++#define fd_dma_mem_alloc(size) __get_free_pages(GFP_KERNEL, get_order(size))
++#define fd_dma_mem_free(addr, size) free_pages(addr, get_order(size))
++
++static int virtual_dma_count;
++static int virtual_dma_residue;
++static char *virtual_dma_addr;
++static int virtual_dma_mode;
++static int doing_pdma;
++
++static irqreturn_t floppy_hardint(int irq, void *dev_id, struct pt_regs * regs)
++{
++ register unsigned char st;
++ register int lcount;
++ register char *lptr;
++
++ if (!doing_pdma)
++ return floppy_interrupt(irq, dev_id, regs);
++
++ st = 1;
++ for(lcount=virtual_dma_count, lptr=virtual_dma_addr;
++ lcount; lcount--, lptr++) {
++ st=inb(virtual_dma_port+4) & 0xa0 ;
++ if(st != 0xa0)
++ break;
++ if(virtual_dma_mode)
++ outb_p(*lptr, virtual_dma_port+5);
++ else
++ *lptr = inb_p(virtual_dma_port+5);
++ }
++ virtual_dma_count = lcount;
++ virtual_dma_addr = lptr;
++ st = inb(virtual_dma_port+4);
++
++ if(st == 0x20)
++ return IRQ_HANDLED;
++ if(!(st & 0x20)) {
++ virtual_dma_residue += virtual_dma_count;
++ virtual_dma_count=0;
++ doing_pdma = 0;
++ floppy_interrupt(irq, dev_id, regs);
++ return IRQ_HANDLED;
++ }
++ return IRQ_HANDLED;
++}
++
++static void fd_disable_dma(void)
++{
++ doing_pdma = 0;
++ virtual_dma_residue += virtual_dma_count;
++ virtual_dma_count=0;
++}
++
++static int fd_request_irq(void)
++{
++ return request_irq(FLOPPY_IRQ, floppy_hardint,
++ IRQF_DISABLED, "floppy", NULL);
++}
++
++static int vdma_dma_setup(char *addr, unsigned long size, int mode, int io)
++{
++ doing_pdma = 1;
++ virtual_dma_port = io;
++ virtual_dma_mode = (mode == DMA_MODE_WRITE);
++ virtual_dma_addr = addr;
++ virtual_dma_count = size;
++ virtual_dma_residue = 0;
++ return 0;
++}
++
++/* XEN: This trick to force 'virtual DMA' is from include/asm-m68k/floppy.h. */
++#define FDC1 xen_floppy_init()
++static int FDC2 = -1;
++
++static int xen_floppy_init(void)
++{
++ use_virtual_dma = 1;
++ can_use_virtual_dma = 1;
++ return 0x3f0;
++}
++
++/*
++ * Floppy types are stored in the rtc's CMOS RAM and so rtc_lock
++ * is needed to prevent corrupted CMOS RAM in case "insmod floppy"
++ * coincides with another rtc CMOS user. Paul G.
++ */
++#define FLOPPY0_TYPE ({ \
++ unsigned long flags; \
++ unsigned char val; \
++ spin_lock_irqsave(&rtc_lock, flags); \
++ val = (CMOS_READ(0x10) >> 4) & 15; \
++ spin_unlock_irqrestore(&rtc_lock, flags); \
++ val; \
++})
++
++#define FLOPPY1_TYPE ({ \
++ unsigned long flags; \
++ unsigned char val; \
++ spin_lock_irqsave(&rtc_lock, flags); \
++ val = CMOS_READ(0x10) & 15; \
++ spin_unlock_irqrestore(&rtc_lock, flags); \
++ val; \
++})
++
++#define N_FDC 2
++#define N_DRIVE 8
++
++#define FLOPPY_MOTOR_MASK 0xf0
++
++#define EXTRA_FLOPPY_PARAMS
++
++#endif /* __ASM_XEN_I386_FLOPPY_H */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/asm-i386/mach-xen/asm/highmem.h 2007-08-27 14:01:58.000000000 -0400
+@@ -0,0 +1,80 @@
++/*
++ * highmem.h: virtual kernel memory mappings for high memory
++ *
++ * Used in CONFIG_HIGHMEM systems for memory pages which
++ * are not addressable by direct kernel virtual addresses.
++ *
++ * Copyright (C) 1999 Gerhard Wichert, Siemens AG
++ * Gerhard.Wichert@pdb.siemens.de
++ *
++ *
++ * Redesigned the x86 32-bit VM architecture to deal with
++ * up to 16 Terabyte physical memory. With current x86 CPUs
++ * we now support up to 64 Gigabytes physical RAM.
++ *
++ * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
++ */
++
++#ifndef _ASM_HIGHMEM_H
++#define _ASM_HIGHMEM_H
++
++#ifdef __KERNEL__
++
++#include <linux/interrupt.h>
++#include <linux/threads.h>
++#include <asm/kmap_types.h>
++#include <asm/tlbflush.h>
++
++/* declarations for highmem.c */
++extern unsigned long highstart_pfn, highend_pfn;
++
++extern pte_t *kmap_pte;
++extern pgprot_t kmap_prot;
++extern pte_t *pkmap_page_table;
++
++/*
++ * Right now we initialize only a single pte table. It can be extended
++ * easily, subsequent pte tables have to be allocated in one physical
++ * chunk of RAM.
++ */
++#ifdef CONFIG_X86_PAE
++#define LAST_PKMAP 512
++#else
++#define LAST_PKMAP 1024
++#endif
++/*
++ * Ordering is:
++ *
++ * FIXADDR_TOP
++ * fixed_addresses
++ * FIXADDR_START
++ * temp fixed addresses
++ * FIXADDR_BOOT_START
++ * Persistent kmap area
++ * PKMAP_BASE
++ * VMALLOC_END
++ * Vmalloc area
++ * VMALLOC_START
++ * high_memory
++ */
++#define PKMAP_BASE ( (FIXADDR_BOOT_START - PAGE_SIZE*(LAST_PKMAP + 1)) & PMD_MASK )
++#define LAST_PKMAP_MASK (LAST_PKMAP-1)
++#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
++#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
++
++extern void * FASTCALL(kmap_high(struct page *page));
++extern void FASTCALL(kunmap_high(struct page *page));
++
++void *kmap(struct page *page);
++void kunmap(struct page *page);
++void *kmap_atomic(struct page *page, enum km_type type);
++void *kmap_atomic_pte(struct page *page, enum km_type type);
++void kunmap_atomic(void *kvaddr, enum km_type type);
++void *kmap_atomic_pfn(unsigned long pfn, enum km_type type);
++struct page *kmap_atomic_to_page(void *ptr);
++
++#define flush_cache_kmaps() do { } while (0)
++
++#endif /* __KERNEL__ */
++
++#endif /* _ASM_HIGHMEM_H */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/asm-i386/mach-xen/asm/hw_irq.h 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,72 @@
++#ifndef _ASM_HW_IRQ_H
++#define _ASM_HW_IRQ_H
++
++/*
++ * linux/include/asm/hw_irq.h
++ *
++ * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar
++ *
++ * moved some of the old arch/i386/kernel/irq.h to here. VY
++ *
++ * IRQ/IPI changes taken from work by Thomas Radke
++ * <tomsoft@informatik.tu-chemnitz.de>
++ */
++
++#include <linux/profile.h>
++#include <asm/atomic.h>
++#include <asm/irq.h>
++#include <asm/sections.h>
++
++struct hw_interrupt_type;
++
++#define NMI_VECTOR 0x02
++
++/*
++ * Various low-level irq details needed by irq.c, process.c,
++ * time.c, io_apic.c and smp.c
++ *
++ * Interrupt entry/exit code at both C and assembly level
++ */
++
++extern u8 irq_vector[NR_IRQ_VECTORS];
++#define IO_APIC_VECTOR(irq) (irq_vector[irq])
++#define AUTO_ASSIGN -1
++
++extern void (*interrupt[NR_IRQS])(void);
++
++#ifdef CONFIG_SMP
++fastcall void reschedule_interrupt(void);
++fastcall void invalidate_interrupt(void);
++fastcall void call_function_interrupt(void);
++#endif
++
++#ifdef CONFIG_X86_LOCAL_APIC
++fastcall void apic_timer_interrupt(void);
++fastcall void error_interrupt(void);
++fastcall void spurious_interrupt(void);
++fastcall void thermal_interrupt(struct pt_regs *);
++#define platform_legacy_irq(irq) ((irq) < 16)
++#endif
++
++void disable_8259A_irq(unsigned int irq);
++void enable_8259A_irq(unsigned int irq);
++int i8259A_irq_pending(unsigned int irq);
++void make_8259A_irq(unsigned int irq);
++void init_8259A(int aeoi);
++void FASTCALL(send_IPI_self(int vector));
++void init_VISWS_APIC_irqs(void);
++void setup_IO_APIC(void);
++void disable_IO_APIC(void);
++void print_IO_APIC(void);
++int IO_APIC_get_PCI_irq_vector(int bus, int slot, int fn);
++void send_IPI(int dest, int vector);
++void setup_ioapic_dest(void);
++
++extern unsigned long io_apic_irqs;
++
++extern atomic_t irq_err_count;
++extern atomic_t irq_mis_count;
++
++#define IO_APIC_IRQ(x) (((x) >= 16) || ((1<<(x)) & io_apic_irqs))
++
++#endif /* _ASM_HW_IRQ_H */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/asm-i386/mach-xen/asm/hypercall.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,407 @@
++/******************************************************************************
++ * hypercall.h
++ *
++ * Linux-specific hypervisor handling.
++ *
++ * Copyright (c) 2002-2004, K A Fraser
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#ifndef __HYPERCALL_H__
++#define __HYPERCALL_H__
++
++#include <linux/string.h> /* memcpy() */
++
++#ifndef __HYPERVISOR_H__
++# error "please don't include this file directly"
++#endif
++
++#define __STR(x) #x
++#define STR(x) __STR(x)
++
++#ifdef CONFIG_XEN
++#define HYPERCALL_STR(name) \
++ "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"
++#else
++#define HYPERCALL_STR(name) \
++ "mov hypercall_stubs,%%eax; " \
++ "add $("STR(__HYPERVISOR_##name)" * 32),%%eax; " \
++ "call *%%eax"
++#endif
++
++#define _hypercall0(type, name) \
++({ \
++ long __res; \
++ asm volatile ( \
++ HYPERCALL_STR(name) \
++ : "=a" (__res) \
++ : \
++ : "memory" ); \
++ (type)__res; \
++})
++
++#define _hypercall1(type, name, a1) \
++({ \
++ long __res, __ign1; \
++ asm volatile ( \
++ HYPERCALL_STR(name) \
++ : "=a" (__res), "=b" (__ign1) \
++ : "1" ((long)(a1)) \
++ : "memory" ); \
++ (type)__res; \
++})
++
++#define _hypercall2(type, name, a1, a2) \
++({ \
++ long __res, __ign1, __ign2; \
++ asm volatile ( \
++ HYPERCALL_STR(name) \
++ : "=a" (__res), "=b" (__ign1), "=c" (__ign2) \
++ : "1" ((long)(a1)), "2" ((long)(a2)) \
++ : "memory" ); \
++ (type)__res; \
++})
++
++#define _hypercall3(type, name, a1, a2, a3) \
++({ \
++ long __res, __ign1, __ign2, __ign3; \
++ asm volatile ( \
++ HYPERCALL_STR(name) \
++ : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \
++ "=d" (__ign3) \
++ : "1" ((long)(a1)), "2" ((long)(a2)), \
++ "3" ((long)(a3)) \
++ : "memory" ); \
++ (type)__res; \
++})
++
++#define _hypercall4(type, name, a1, a2, a3, a4) \
++({ \
++ long __res, __ign1, __ign2, __ign3, __ign4; \
++ asm volatile ( \
++ HYPERCALL_STR(name) \
++ : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \
++ "=d" (__ign3), "=S" (__ign4) \
++ : "1" ((long)(a1)), "2" ((long)(a2)), \
++ "3" ((long)(a3)), "4" ((long)(a4)) \
++ : "memory" ); \
++ (type)__res; \
++})
++
++#define _hypercall5(type, name, a1, a2, a3, a4, a5) \
++({ \
++ long __res, __ign1, __ign2, __ign3, __ign4, __ign5; \
++ asm volatile ( \
++ HYPERCALL_STR(name) \
++ : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \
++ "=d" (__ign3), "=S" (__ign4), "=D" (__ign5) \
++ : "1" ((long)(a1)), "2" ((long)(a2)), \
++ "3" ((long)(a3)), "4" ((long)(a4)), \
++ "5" ((long)(a5)) \
++ : "memory" ); \
++ (type)__res; \
++})
++
++static inline int
++HYPERVISOR_set_trap_table(
++ trap_info_t *table)
++{
++ return _hypercall1(int, set_trap_table, table);
++}
++
++static inline int
++HYPERVISOR_mmu_update(
++ mmu_update_t *req, int count, int *success_count, domid_t domid)
++{
++ return _hypercall4(int, mmu_update, req, count, success_count, domid);
++}
++
++static inline int
++HYPERVISOR_mmuext_op(
++ struct mmuext_op *op, int count, int *success_count, domid_t domid)
++{
++ return _hypercall4(int, mmuext_op, op, count, success_count, domid);
++}
++
++static inline int
++HYPERVISOR_set_gdt(
++ unsigned long *frame_list, int entries)
++{
++ return _hypercall2(int, set_gdt, frame_list, entries);
++}
++
++static inline int
++HYPERVISOR_stack_switch(
++ unsigned long ss, unsigned long esp)
++{
++ return _hypercall2(int, stack_switch, ss, esp);
++}
++
++static inline int
++HYPERVISOR_set_callbacks(
++ unsigned long event_selector, unsigned long event_address,
++ unsigned long failsafe_selector, unsigned long failsafe_address)
++{
++ return _hypercall4(int, set_callbacks,
++ event_selector, event_address,
++ failsafe_selector, failsafe_address);
++}
++
++static inline int
++HYPERVISOR_fpu_taskswitch(
++ int set)
++{
++ return _hypercall1(int, fpu_taskswitch, set);
++}
++
++static inline int
++HYPERVISOR_sched_op_compat(
++ int cmd, unsigned long arg)
++{
++ return _hypercall2(int, sched_op_compat, cmd, arg);
++}
++
++static inline int
++HYPERVISOR_sched_op(
++ int cmd, void *arg)
++{
++ return _hypercall2(int, sched_op, cmd, arg);
++}
++
++static inline long
++HYPERVISOR_set_timer_op(
++ u64 timeout)
++{
++ unsigned long timeout_hi = (unsigned long)(timeout>>32);
++ unsigned long timeout_lo = (unsigned long)timeout;
++ return _hypercall2(long, set_timer_op, timeout_lo, timeout_hi);
++}
++
++static inline int
++HYPERVISOR_platform_op(
++ struct xen_platform_op *platform_op)
++{
++ platform_op->interface_version = XENPF_INTERFACE_VERSION;
++ return _hypercall1(int, platform_op, platform_op);
++}
++
++static inline int
++HYPERVISOR_set_debugreg(
++ int reg, unsigned long value)
++{
++ return _hypercall2(int, set_debugreg, reg, value);
++}
++
++static inline unsigned long
++HYPERVISOR_get_debugreg(
++ int reg)
++{
++ return _hypercall1(unsigned long, get_debugreg, reg);
++}
++
++static inline int
++HYPERVISOR_update_descriptor(
++ u64 ma, u64 desc)
++{
++ return _hypercall4(int, update_descriptor, ma, ma>>32, desc, desc>>32);
++}
++
++static inline int
++HYPERVISOR_memory_op(
++ unsigned int cmd, void *arg)
++{
++ return _hypercall2(int, memory_op, cmd, arg);
++}
++
++static inline int
++HYPERVISOR_multicall(
++ multicall_entry_t *call_list, int nr_calls)
++{
++ return _hypercall2(int, multicall, call_list, nr_calls);
++}
++
++static inline int
++HYPERVISOR_update_va_mapping(
++ unsigned long va, pte_t new_val, unsigned long flags)
++{
++ unsigned long pte_hi = 0;
++#ifdef CONFIG_X86_PAE
++ pte_hi = new_val.pte_high;
++#endif
++ return _hypercall4(int, update_va_mapping, va,
++ new_val.pte_low, pte_hi, flags);
++}
++
++static inline int
++HYPERVISOR_event_channel_op(
++ int cmd, void *arg)
++{
++ int rc = _hypercall2(int, event_channel_op, cmd, arg);
++
++#if CONFIG_XEN_COMPAT <= 0x030002
++ if (unlikely(rc == -ENOSYS)) {
++ struct evtchn_op op;
++ op.cmd = cmd;
++ memcpy(&op.u, arg, sizeof(op.u));
++ rc = _hypercall1(int, event_channel_op_compat, &op);
++ memcpy(arg, &op.u, sizeof(op.u));
++ }
++#endif
++
++ return rc;
++}
++
++static inline int
++HYPERVISOR_acm_op(
++ int cmd, void *arg)
++{
++ return _hypercall2(int, acm_op, cmd, arg);
++}
++
++static inline int
++HYPERVISOR_xen_version(
++ int cmd, void *arg)
++{
++ return _hypercall2(int, xen_version, cmd, arg);
++}
++
++static inline int
++HYPERVISOR_console_io(
++ int cmd, int count, char *str)
++{
++ return _hypercall3(int, console_io, cmd, count, str);
++}
++
++static inline int
++HYPERVISOR_physdev_op(
++ int cmd, void *arg)
++{
++ int rc = _hypercall2(int, physdev_op, cmd, arg);
++
++#if CONFIG_XEN_COMPAT <= 0x030002
++ if (unlikely(rc == -ENOSYS)) {
++ struct physdev_op op;
++ op.cmd = cmd;
++ memcpy(&op.u, arg, sizeof(op.u));
++ rc = _hypercall1(int, physdev_op_compat, &op);
++ memcpy(arg, &op.u, sizeof(op.u));
++ }
++#endif
++
++ return rc;
++}
++
++static inline int
++HYPERVISOR_grant_table_op(
++ unsigned int cmd, void *uop, unsigned int count)
++{
++ return _hypercall3(int, grant_table_op, cmd, uop, count);
++}
++
++static inline int
++HYPERVISOR_update_va_mapping_otherdomain(
++ unsigned long va, pte_t new_val, unsigned long flags, domid_t domid)
++{
++ unsigned long pte_hi = 0;
++#ifdef CONFIG_X86_PAE
++ pte_hi = new_val.pte_high;
++#endif
++ return _hypercall5(int, update_va_mapping_otherdomain, va,
++ new_val.pte_low, pte_hi, flags, domid);
++}
++
++static inline int
++HYPERVISOR_vm_assist(
++ unsigned int cmd, unsigned int type)
++{
++ return _hypercall2(int, vm_assist, cmd, type);
++}
++
++static inline int
++HYPERVISOR_vcpu_op(
++ int cmd, int vcpuid, void *extra_args)
++{
++ return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args);
++}
++
++static inline int
++HYPERVISOR_suspend(
++ unsigned long srec)
++{
++ struct sched_shutdown sched_shutdown = {
++ .reason = SHUTDOWN_suspend
++ };
++
++ int rc = _hypercall3(int, sched_op, SCHEDOP_shutdown,
++ &sched_shutdown, srec);
++
++#if CONFIG_XEN_COMPAT <= 0x030002
++ if (rc == -ENOSYS)
++ rc = _hypercall3(int, sched_op_compat, SCHEDOP_shutdown,
++ SHUTDOWN_suspend, srec);
++#endif
++
++ return rc;
++}
++
++static inline int
++HYPERVISOR_nmi_op(
++ unsigned long op, void *arg)
++{
++ return _hypercall2(int, nmi_op, op, arg);
++}
++
++static inline unsigned long
++HYPERVISOR_hvm_op(
++ int op, void *arg)
++{
++ return _hypercall2(unsigned long, hvm_op, op, arg);
++}
++
++static inline int
++HYPERVISOR_callback_op(
++ int cmd, void *arg)
++{
++ return _hypercall2(int, callback_op, cmd, arg);
++}
++
++static inline int
++HYPERVISOR_xenoprof_op(
++ int op, void *arg)
++{
++ return _hypercall2(int, xenoprof_op, op, arg);
++}
++
++static inline int
++HYPERVISOR_kexec_op(
++ unsigned long op, void *args)
++{
++ return _hypercall2(int, kexec_op, op, args);
++}
++
++
++
++#endif /* __HYPERCALL_H__ */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/asm-i386/mach-xen/asm/hypervisor.h 2007-08-27 14:02:01.000000000 -0400
+@@ -0,0 +1,258 @@
++/******************************************************************************
++ * hypervisor.h
++ *
++ * Linux-specific hypervisor handling.
++ *
++ * Copyright (c) 2002-2004, K A Fraser
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#ifndef __HYPERVISOR_H__
++#define __HYPERVISOR_H__
++
++#include <linux/types.h>
++#include <linux/kernel.h>
++#include <linux/version.h>
++#include <linux/errno.h>
++#include <xen/interface/xen.h>
++#include <xen/interface/platform.h>
++#include <xen/interface/event_channel.h>
++#include <xen/interface/physdev.h>
++#include <xen/interface/sched.h>
++#include <xen/interface/nmi.h>
++#include <asm/ptrace.h>
++#include <asm/page.h>
++#if defined(__i386__)
++# ifdef CONFIG_X86_PAE
++# include <asm-generic/pgtable-nopud.h>
++# else
++# include <asm-generic/pgtable-nopmd.h>
++# endif
++#endif
++
++extern shared_info_t *HYPERVISOR_shared_info;
++
++#define vcpu_info(cpu) (HYPERVISOR_shared_info->vcpu_info + (cpu))
++#ifdef CONFIG_SMP
++#define current_vcpu_info() vcpu_info(smp_processor_id())
++#else
++#define current_vcpu_info() vcpu_info(0)
++#endif
++
++#ifdef CONFIG_X86_32
++extern unsigned long hypervisor_virt_start;
++#endif
++
++/* arch/xen/i386/kernel/setup.c */
++extern start_info_t *xen_start_info;
++#ifdef CONFIG_XEN_PRIVILEGED_GUEST
++#define is_initial_xendomain() (xen_start_info->flags & SIF_INITDOMAIN)
++#else
++#define is_initial_xendomain() 0
++#endif
++
++/* arch/xen/kernel/evtchn.c */
++/* Force a proper event-channel callback from Xen. */
++void force_evtchn_callback(void);
++
++/* arch/xen/kernel/process.c */
++void xen_cpu_idle (void);
++
++/* arch/xen/i386/kernel/hypervisor.c */
++void do_hypervisor_callback(struct pt_regs *regs);
++
++/* arch/xen/i386/mm/hypervisor.c */
++/*
++ * NB. ptr values should be PHYSICAL, not MACHINE. 'vals' should be already
++ * be MACHINE addresses.
++ */
++
++void xen_pt_switch(unsigned long ptr);
++void xen_new_user_pt(unsigned long ptr); /* x86_64 only */
++void xen_load_gs(unsigned int selector); /* x86_64 only */
++void xen_tlb_flush(void);
++void xen_invlpg(unsigned long ptr);
++
++void xen_l1_entry_update(pte_t *ptr, pte_t val);
++void xen_l2_entry_update(pmd_t *ptr, pmd_t val);
++void xen_l3_entry_update(pud_t *ptr, pud_t val); /* x86_64/PAE */
++void xen_l4_entry_update(pgd_t *ptr, pgd_t val); /* x86_64 only */
++void xen_pgd_pin(unsigned long ptr);
++void xen_pgd_unpin(unsigned long ptr);
++
++void xen_set_ldt(unsigned long ptr, unsigned long bytes);
++
++#ifdef CONFIG_SMP
++#include <linux/cpumask.h>
++void xen_tlb_flush_all(void);
++void xen_invlpg_all(unsigned long ptr);
++void xen_tlb_flush_mask(cpumask_t *mask);
++void xen_invlpg_mask(cpumask_t *mask, unsigned long ptr);
++#endif
++
++/* Returns zero on success else negative errno. */
++int xen_create_contiguous_region(
++ unsigned long vstart, unsigned int order, unsigned int address_bits);
++void xen_destroy_contiguous_region(
++ unsigned long vstart, unsigned int order);
++
++/* Turn jiffies into Xen system time. */
++u64 jiffies_to_st(unsigned long jiffies);
++
++#ifdef CONFIG_XEN_SCRUB_PAGES
++#define scrub_pages(_p,_n) memset((void *)(_p), 0, (_n) << PAGE_SHIFT)
++#else
++#define scrub_pages(_p,_n) ((void)0)
++#endif
++
++#include <xen/hypercall.h>
++
++#if defined(CONFIG_X86_64)
++#define MULTI_UVMFLAGS_INDEX 2
++#define MULTI_UVMDOMID_INDEX 3
++#else
++#define MULTI_UVMFLAGS_INDEX 3
++#define MULTI_UVMDOMID_INDEX 4
++#endif
++
++#define is_running_on_xen() 1
++
++static inline int
++HYPERVISOR_yield(
++ void)
++{
++ int rc = HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
++
++#if CONFIG_XEN_COMPAT <= 0x030002
++ if (rc == -ENOSYS)
++ rc = HYPERVISOR_sched_op_compat(SCHEDOP_yield, 0);
++#endif
++
++ return rc;
++}
++
++static inline int
++HYPERVISOR_block(
++ void)
++{
++ int rc = HYPERVISOR_sched_op(SCHEDOP_block, NULL);
++
++#if CONFIG_XEN_COMPAT <= 0x030002
++ if (rc == -ENOSYS)
++ rc = HYPERVISOR_sched_op_compat(SCHEDOP_block, 0);
++#endif
++
++ return rc;
++}
++
++static inline int
++HYPERVISOR_shutdown(
++ unsigned int reason)
++{
++ struct sched_shutdown sched_shutdown = {
++ .reason = reason
++ };
++
++ int rc = HYPERVISOR_sched_op(SCHEDOP_shutdown, &sched_shutdown);
++
++#if CONFIG_XEN_COMPAT <= 0x030002
++ if (rc == -ENOSYS)
++ rc = HYPERVISOR_sched_op_compat(SCHEDOP_shutdown, reason);
++#endif
++
++ return rc;
++}
++
++static inline int
++HYPERVISOR_poll(
++ evtchn_port_t *ports, unsigned int nr_ports, u64 timeout)
++{
++ int rc;
++ struct sched_poll sched_poll = {
++ .nr_ports = nr_ports,
++ .timeout = jiffies_to_st(timeout)
++ };
++ set_xen_guest_handle(sched_poll.ports, ports);
++
++ rc = HYPERVISOR_sched_op(SCHEDOP_poll, &sched_poll);
++#if CONFIG_XEN_COMPAT <= 0x030002
++ if (rc == -ENOSYS)
++ rc = HYPERVISOR_sched_op_compat(SCHEDOP_yield, 0);
++#endif
++
++ return rc;
++}
++
++static inline void
++MULTI_update_va_mapping(
++ multicall_entry_t *mcl, unsigned long va,
++ pte_t new_val, unsigned long flags)
++{
++ mcl->op = __HYPERVISOR_update_va_mapping;
++ mcl->args[0] = va;
++#if defined(CONFIG_X86_64)
++ mcl->args[1] = new_val.pte;
++#elif defined(CONFIG_X86_PAE)
++ mcl->args[1] = new_val.pte_low;
++ mcl->args[2] = new_val.pte_high;
++#else
++ mcl->args[1] = new_val.pte_low;
++ mcl->args[2] = 0;
++#endif
++ mcl->args[MULTI_UVMFLAGS_INDEX] = flags;
++}
++
++static inline void
++MULTI_grant_table_op(multicall_entry_t *mcl, unsigned int cmd,
++ void *uop, unsigned int count)
++{
++ mcl->op = __HYPERVISOR_grant_table_op;
++ mcl->args[0] = cmd;
++ mcl->args[1] = (unsigned long)uop;
++ mcl->args[2] = count;
++}
++
++static inline void
++MULTI_update_va_mapping_otherdomain(
++ multicall_entry_t *mcl, unsigned long va,
++ pte_t new_val, unsigned long flags, domid_t domid)
++{
++ mcl->op = __HYPERVISOR_update_va_mapping_otherdomain;
++ mcl->args[0] = va;
++#if defined(CONFIG_X86_64)
++ mcl->args[1] = new_val.pte;
++#elif defined(CONFIG_X86_PAE)
++ mcl->args[1] = new_val.pte_low;
++ mcl->args[2] = new_val.pte_high;
++#else
++ mcl->args[1] = new_val.pte_low;
++ mcl->args[2] = 0;
++#endif
++ mcl->args[MULTI_UVMFLAGS_INDEX] = flags;
++ mcl->args[MULTI_UVMDOMID_INDEX] = domid;
++}
++
++#endif /* __HYPERVISOR_H__ */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/asm-i386/mach-xen/asm/io.h 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,390 @@
++#ifndef _ASM_IO_H
++#define _ASM_IO_H
++
++#include <linux/string.h>
++#include <linux/compiler.h>
++
++/*
++ * This file contains the definitions for the x86 IO instructions
++ * inb/inw/inl/outb/outw/outl and the "string versions" of the same
++ * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing"
++ * versions of the single-IO instructions (inb_p/inw_p/..).
++ *
++ * This file is not meant to be obfuscating: it's just complicated
++ * to (a) handle it all in a way that makes gcc able to optimize it
++ * as well as possible and (b) trying to avoid writing the same thing
++ * over and over again with slight variations and possibly making a
++ * mistake somewhere.
++ */
++
++/*
++ * Thanks to James van Artsdalen for a better timing-fix than
++ * the two short jumps: using outb's to a nonexistent port seems
++ * to guarantee better timings even on fast machines.
++ *
++ * On the other hand, I'd like to be sure of a non-existent port:
++ * I feel a bit unsafe about using 0x80 (should be safe, though)
++ *
++ * Linus
++ */
++
++ /*
++ * Bit simplified and optimized by Jan Hubicka
++ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999.
++ *
++ * isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added,
++ * isa_read[wl] and isa_write[wl] fixed
++ * - Arnaldo Carvalho de Melo <acme@conectiva.com.br>
++ */
++
++#define IO_SPACE_LIMIT 0xffff
++
++#define XQUAD_PORTIO_BASE 0xfe400000
++#define XQUAD_PORTIO_QUAD 0x40000 /* 256k per quad. */
++
++#ifdef __KERNEL__
++
++#include <asm-generic/iomap.h>
++
++#include <linux/vmalloc.h>
++#include <asm/fixmap.h>
++
++/*
++ * Convert a physical pointer to a virtual kernel pointer for /dev/mem
++ * access
++ */
++#define xlate_dev_mem_ptr(p, sz) ioremap(p, sz)
++#define xlate_dev_mem_ptr_unmap(p) iounmap(p)
++
++/*
++ * Convert a virtual cached pointer to an uncached pointer
++ */
++#define xlate_dev_kmem_ptr(p) p
++
++/**
++ * virt_to_phys - map virtual addresses to physical
++ * @address: address to remap
++ *
++ * The returned physical address is the physical (CPU) mapping for
++ * the memory address given. It is only valid to use this function on
++ * addresses directly mapped or allocated via kmalloc.
++ *
++ * This function does not give bus mappings for DMA transfers. In
++ * almost all conceivable cases a device driver should not be using
++ * this function
++ */
++
++static inline unsigned long virt_to_phys(volatile void * address)
++{
++ return __pa(address);
++}
++
++/**
++ * phys_to_virt - map physical address to virtual
++ * @address: address to remap
++ *
++ * The returned virtual address is a current CPU mapping for
++ * the memory address given. It is only valid to use this function on
++ * addresses that have a kernel mapping
++ *
++ * This function does not handle bus mappings for DMA transfers. In
++ * almost all conceivable cases a device driver should not be using
++ * this function
++ */
++
++static inline void * phys_to_virt(unsigned long address)
++{
++ return __va(address);
++}
++
++/*
++ * Change "struct page" to physical address.
++ */
++#define page_to_pseudophys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
++#define page_to_phys(page) (phys_to_machine(page_to_pseudophys(page)))
++#define page_to_bus(page) (phys_to_machine(page_to_pseudophys(page)))
++
++#define bio_to_pseudophys(bio) (page_to_pseudophys(bio_page((bio))) + \
++ (unsigned long) bio_offset((bio)))
++#define bvec_to_pseudophys(bv) (page_to_pseudophys((bv)->bv_page) + \
++ (unsigned long) (bv)->bv_offset)
++
++#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
++ (((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2))) && \
++ ((bvec_to_pseudophys((vec1)) + (vec1)->bv_len) == \
++ bvec_to_pseudophys((vec2))))
++
++extern void __iomem * __ioremap(unsigned long offset, unsigned long size, unsigned long flags);
++
++/**
++ * ioremap - map bus memory into CPU space
++ * @offset: bus address of the memory
++ * @size: size of the resource to map
++ *
++ * ioremap performs a platform specific sequence of operations to
++ * make bus memory CPU accessible via the readb/readw/readl/writeb/
++ * writew/writel functions and the other mmio helpers. The returned
++ * address is not guaranteed to be usable directly as a virtual
++ * address.
++ */
++
++static inline void __iomem * ioremap(unsigned long offset, unsigned long size)
++{
++ return __ioremap(offset, size, 0);
++}
++
++extern void __iomem * ioremap_nocache(unsigned long offset, unsigned long size);
++extern void iounmap(volatile void __iomem *addr);
++
++/*
++ * bt_ioremap() and bt_iounmap() are for temporary early boot-time
++ * mappings, before the real ioremap() is functional.
++ * A boot-time mapping is currently limited to at most 16 pages.
++ */
++extern void *bt_ioremap(unsigned long offset, unsigned long size);
++extern void bt_iounmap(void *addr, unsigned long size);
++
++/* Use early IO mappings for DMI because it's initialized early */
++#define dmi_ioremap bt_ioremap
++#define dmi_iounmap bt_iounmap
++#define dmi_alloc alloc_bootmem
++
++/*
++ * ISA I/O bus memory addresses are 1:1 with the physical address.
++ */
++#define isa_virt_to_bus(_x) isa_virt_to_bus_is_UNSUPPORTED->x
++#define isa_page_to_bus(_x) isa_page_to_bus_is_UNSUPPORTED->x
++#define isa_bus_to_virt(_x) (void *)(__fix_to_virt(FIX_ISAMAP_BEGIN) + (_x))
++
++/*
++ * However PCI ones are not necessarily 1:1 and therefore these interfaces
++ * are forbidden in portable PCI drivers.
++ *
++ * Allow them on x86 for legacy drivers, though.
++ */
++#define virt_to_bus(_x) phys_to_machine(__pa(_x))
++#define bus_to_virt(_x) __va(machine_to_phys(_x))
++
++/*
++ * readX/writeX() are used to access memory mapped devices. On some
++ * architectures the memory mapped IO stuff needs to be accessed
++ * differently. On the x86 architecture, we just read/write the
++ * memory location directly.
++ */
++
++static inline unsigned char readb(const volatile void __iomem *addr)
++{
++ return *(volatile unsigned char __force *) addr;
++}
++static inline unsigned short readw(const volatile void __iomem *addr)
++{
++ return *(volatile unsigned short __force *) addr;
++}
++static inline unsigned int readl(const volatile void __iomem *addr)
++{
++ return *(volatile unsigned int __force *) addr;
++}
++#define readb_relaxed(addr) readb(addr)
++#define readw_relaxed(addr) readw(addr)
++#define readl_relaxed(addr) readl(addr)
++#define __raw_readb readb
++#define __raw_readw readw
++#define __raw_readl readl
++
++static inline void writeb(unsigned char b, volatile void __iomem *addr)
++{
++ *(volatile unsigned char __force *) addr = b;
++}
++static inline void writew(unsigned short b, volatile void __iomem *addr)
++{
++ *(volatile unsigned short __force *) addr = b;
++}
++static inline void writel(unsigned int b, volatile void __iomem *addr)
++{
++ *(volatile unsigned int __force *) addr = b;
++}
++#define __raw_writeb writeb
++#define __raw_writew writew
++#define __raw_writel writel
++
++#define mmiowb()
++
++static inline void memset_io(volatile void __iomem *addr, unsigned char val, int count)
++{
++ memset((void __force *) addr, val, count);
++}
++static inline void memcpy_fromio(void *dst, const volatile void __iomem *src, int count)
++{
++ __memcpy(dst, (void __force *) src, count);
++}
++static inline void memcpy_toio(volatile void __iomem *dst, const void *src, int count)
++{
++ __memcpy((void __force *) dst, src, count);
++}
++
++/*
++ * ISA space is 'always mapped' on a typical x86 system, no need to
++ * explicitly ioremap() it. The fact that the ISA IO space is mapped
++ * to PAGE_OFFSET is pure coincidence - it does not mean ISA values
++ * are physical addresses. The following constant pointer can be
++ * used as the IO-area pointer (it can be iounmapped as well, so the
++ * analogy with PCI is quite large):
++ */
++#define __ISA_IO_base ((char __iomem *)(fix_to_virt(FIX_ISAMAP_BEGIN)))
++
++/*
++ * Again, i386 does not require mem IO specific function.
++ */
++
++#define eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),(void __force *)(b),(c),(d))
++
++/**
++ * check_signature - find BIOS signatures
++ * @io_addr: mmio address to check
++ * @signature: signature block
++ * @length: length of signature
++ *
++ * Perform a signature comparison with the mmio address io_addr. This
++ * address should have been obtained by ioremap.
++ * Returns 1 on a match.
++ */
++
++static inline int check_signature(volatile void __iomem * io_addr,
++ const unsigned char *signature, int length)
++{
++ int retval = 0;
++ do {
++ if (readb(io_addr) != *signature)
++ goto out;
++ io_addr++;
++ signature++;
++ length--;
++ } while (length);
++ retval = 1;
++out:
++ return retval;
++}
++
++/*
++ * Cache management
++ *
++ * This needed for two cases
++ * 1. Out of order aware processors
++ * 2. Accidentally out of order processors (PPro errata #51)
++ */
++
++#if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE)
++
++static inline void flush_write_buffers(void)
++{
++ __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory");
++}
++
++#define dma_cache_inv(_start,_size) flush_write_buffers()
++#define dma_cache_wback(_start,_size) flush_write_buffers()
++#define dma_cache_wback_inv(_start,_size) flush_write_buffers()
++
++#else
++
++/* Nothing to do */
++
++#define dma_cache_inv(_start,_size) do { } while (0)
++#define dma_cache_wback(_start,_size) do { } while (0)
++#define dma_cache_wback_inv(_start,_size) do { } while (0)
++#define flush_write_buffers()
++
++#endif
++
++#endif /* __KERNEL__ */
++
++#ifdef SLOW_IO_BY_JUMPING
++#define __SLOW_DOWN_IO "jmp 1f; 1: jmp 1f; 1:"
++#else
++#define __SLOW_DOWN_IO "outb %%al,$0x80;"
++#endif
++
++static inline void slow_down_io(void) {
++ __asm__ __volatile__(
++ __SLOW_DOWN_IO
++#ifdef REALLY_SLOW_IO
++ __SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO
++#endif
++ : : );
++}
++
++#ifdef CONFIG_X86_NUMAQ
++extern void *xquad_portio; /* Where the IO area was mapped */
++#define XQUAD_PORT_ADDR(port, quad) (xquad_portio + (XQUAD_PORTIO_QUAD*quad) + port)
++#define __BUILDIO(bwl,bw,type) \
++static inline void out##bwl##_quad(unsigned type value, int port, int quad) { \
++ if (xquad_portio) \
++ write##bwl(value, XQUAD_PORT_ADDR(port, quad)); \
++ else \
++ out##bwl##_local(value, port); \
++} \
++static inline void out##bwl(unsigned type value, int port) { \
++ out##bwl##_quad(value, port, 0); \
++} \
++static inline unsigned type in##bwl##_quad(int port, int quad) { \
++ if (xquad_portio) \
++ return read##bwl(XQUAD_PORT_ADDR(port, quad)); \
++ else \
++ return in##bwl##_local(port); \
++} \
++static inline unsigned type in##bwl(int port) { \
++ return in##bwl##_quad(port, 0); \
++}
++#else
++#define __BUILDIO(bwl,bw,type) \
++static inline void out##bwl(unsigned type value, int port) { \
++ out##bwl##_local(value, port); \
++} \
++static inline unsigned type in##bwl(int port) { \
++ return in##bwl##_local(port); \
++}
++#endif
++
++
++#define BUILDIO(bwl,bw,type) \
++static inline void out##bwl##_local(unsigned type value, int port) { \
++ __asm__ __volatile__("out" #bwl " %" #bw "0, %w1" : : "a"(value), "Nd"(port)); \
++} \
++static inline unsigned type in##bwl##_local(int port) { \
++ unsigned type value; \
++ __asm__ __volatile__("in" #bwl " %w1, %" #bw "0" : "=a"(value) : "Nd"(port)); \
++ return value; \
++} \
++static inline void out##bwl##_local_p(unsigned type value, int port) { \
++ out##bwl##_local(value, port); \
++ slow_down_io(); \
++} \
++static inline unsigned type in##bwl##_local_p(int port) { \
++ unsigned type value = in##bwl##_local(port); \
++ slow_down_io(); \
++ return value; \
++} \
++__BUILDIO(bwl,bw,type) \
++static inline void out##bwl##_p(unsigned type value, int port) { \
++ out##bwl(value, port); \
++ slow_down_io(); \
++} \
++static inline unsigned type in##bwl##_p(int port) { \
++ unsigned type value = in##bwl(port); \
++ slow_down_io(); \
++ return value; \
++} \
++static inline void outs##bwl(int port, const void *addr, unsigned long count) { \
++ __asm__ __volatile__("rep; outs" #bwl : "+S"(addr), "+c"(count) : "d"(port)); \
++} \
++static inline void ins##bwl(int port, void *addr, unsigned long count) { \
++ __asm__ __volatile__("rep; ins" #bwl : "+D"(addr), "+c"(count) : "d"(port)); \
++}
++
++BUILDIO(b,b,char)
++BUILDIO(w,w,short)
++BUILDIO(l,,int)
++
++/* We will be supplying our own /dev/mem implementation */
++#define ARCH_HAS_DEV_MEM
++
++#endif
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/asm-i386/mach-xen/asm/irqflags.h 2007-08-27 14:02:01.000000000 -0400
+@@ -0,0 +1,127 @@
++/*
++ * include/asm-i386/irqflags.h
++ *
++ * IRQ flags handling
++ *
++ * This file gets included from lowlevel asm headers too, to provide
++ * wrapped versions of the local_irq_*() APIs, based on the
++ * raw_local_irq_*() functions from the lowlevel headers.
++ */
++#ifndef _ASM_IRQFLAGS_H
++#define _ASM_IRQFLAGS_H
++
++#ifndef __ASSEMBLY__
++
++/*
++ * The use of 'barrier' in the following reflects their use as local-lock
++ * operations. Reentrancy must be prevented (e.g., __cli()) /before/ following
++ * critical operations are executed. All critical operations must complete
++ * /before/ reentrancy is permitted (e.g., __sti()). Alpha architecture also
++ * includes these barriers, for example.
++ */
++
++#define __raw_local_save_flags() (current_vcpu_info()->evtchn_upcall_mask)
++
++#define raw_local_save_flags(flags) \
++ do { (flags) = __raw_local_save_flags(); } while (0)
++
++#define raw_local_irq_restore(x) \
++do { \
++ vcpu_info_t *_vcpu; \
++ barrier(); \
++ _vcpu = current_vcpu_info(); \
++ if ((_vcpu->evtchn_upcall_mask = (x)) == 0) { \
++ barrier(); /* unmask then check (avoid races) */ \
++ if (unlikely(_vcpu->evtchn_upcall_pending)) \
++ force_evtchn_callback(); \
++ } \
++} while (0)
++
++#define raw_local_irq_disable() \
++do { \
++ current_vcpu_info()->evtchn_upcall_mask = 1; \
++ barrier(); \
++} while (0)
++
++#define raw_local_irq_enable() \
++do { \
++ vcpu_info_t *_vcpu; \
++ barrier(); \
++ _vcpu = current_vcpu_info(); \
++ _vcpu->evtchn_upcall_mask = 0; \
++ barrier(); /* unmask then check (avoid races) */ \
++ if (unlikely(_vcpu->evtchn_upcall_pending)) \
++ force_evtchn_callback(); \
++} while (0)
++
++/*
++ * Used in the idle loop; sti takes one instruction cycle
++ * to complete:
++ */
++void raw_safe_halt(void);
++
++/*
++ * Used when interrupts are already enabled or to
++ * shutdown the processor:
++ */
++void halt(void);
++
++static inline int raw_irqs_disabled_flags(unsigned long flags)
++{
++ return (flags != 0);
++}
++
++#define raw_irqs_disabled() \
++({ \
++ unsigned long flags = __raw_local_save_flags(); \
++ \
++ raw_irqs_disabled_flags(flags); \
++})
++
++/*
++ * For spinlocks, etc:
++ */
++#define __raw_local_irq_save() \
++({ \
++ unsigned long flags = __raw_local_save_flags(); \
++ \
++ raw_local_irq_disable(); \
++ \
++ flags; \
++})
++
++#define raw_local_irq_save(flags) \
++ do { (flags) = __raw_local_irq_save(); } while (0)
++
++#endif /* __ASSEMBLY__ */
++
++/*
++ * Do the CPU's IRQ-state tracing from assembly code. We call a
++ * C function, so save all the C-clobbered registers:
++ */
++#ifdef CONFIG_TRACE_IRQFLAGS
++
++# define TRACE_IRQS_ON \
++ pushl %eax; \
++ pushl %ecx; \
++ pushl %edx; \
++ call trace_hardirqs_on; \
++ popl %edx; \
++ popl %ecx; \
++ popl %eax;
++
++# define TRACE_IRQS_OFF \
++ pushl %eax; \
++ pushl %ecx; \
++ pushl %edx; \
++ call trace_hardirqs_off; \
++ popl %edx; \
++ popl %ecx; \
++ popl %eax;
++
++#else
++# define TRACE_IRQS_ON
++# define TRACE_IRQS_OFF
++#endif
++
++#endif
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/asm-i386/mach-xen/asm/maddr.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,193 @@
++#ifndef _I386_MADDR_H
++#define _I386_MADDR_H
++
++#include <xen/features.h>
++#include <xen/interface/xen.h>
++
++/**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/
++#define INVALID_P2M_ENTRY (~0UL)
++#define FOREIGN_FRAME_BIT (1UL<<31)
++#define FOREIGN_FRAME(m) ((m) | FOREIGN_FRAME_BIT)
++
++/* Definitions for machine and pseudophysical addresses. */
++#ifdef CONFIG_X86_PAE
++typedef unsigned long long paddr_t;
++typedef unsigned long long maddr_t;
++#else
++typedef unsigned long paddr_t;
++typedef unsigned long maddr_t;
++#endif
++
++#ifdef CONFIG_XEN
++
++extern unsigned long *phys_to_machine_mapping;
++extern unsigned long max_mapnr;
++
++#undef machine_to_phys_mapping
++extern unsigned long *machine_to_phys_mapping;
++extern unsigned int machine_to_phys_order;
++
++static inline unsigned long pfn_to_mfn(unsigned long pfn)
++{
++ if (xen_feature(XENFEAT_auto_translated_physmap))
++ return pfn;
++ BUG_ON(max_mapnr && pfn >= max_mapnr);
++ return phys_to_machine_mapping[pfn] & ~FOREIGN_FRAME_BIT;
++}
++
++static inline int phys_to_machine_mapping_valid(unsigned long pfn)
++{
++ if (xen_feature(XENFEAT_auto_translated_physmap))
++ return 1;
++ BUG_ON(max_mapnr && pfn >= max_mapnr);
++ return (phys_to_machine_mapping[pfn] != INVALID_P2M_ENTRY);
++}
++
++static inline unsigned long mfn_to_pfn(unsigned long mfn)
++{
++ unsigned long pfn;
++
++ if (xen_feature(XENFEAT_auto_translated_physmap))
++ return mfn;
++
++ if (unlikely((mfn >> machine_to_phys_order) != 0))
++ return max_mapnr;
++
++ /* The array access can fail (e.g., device space beyond end of RAM). */
++ asm (
++ "1: movl %1,%0\n"
++ "2:\n"
++ ".section .fixup,\"ax\"\n"
++ "3: movl %2,%0\n"
++ " jmp 2b\n"
++ ".previous\n"
++ ".section __ex_table,\"a\"\n"
++ " .align 4\n"
++ " .long 1b,3b\n"
++ ".previous"
++ : "=r" (pfn)
++ : "m" (machine_to_phys_mapping[mfn]), "m" (max_mapnr) );
++
++ return pfn;
++}
++
++/*
++ * We detect special mappings in one of two ways:
++ * 1. If the MFN is an I/O page then Xen will set the m2p entry
++ * to be outside our maximum possible pseudophys range.
++ * 2. If the MFN belongs to a different domain then we will certainly
++ * not have MFN in our p2m table. Conversely, if the page is ours,
++ * then we'll have p2m(m2p(MFN))==MFN.
++ * If we detect a special mapping then it doesn't have a 'struct page'.
++ * We force !pfn_valid() by returning an out-of-range pointer.
++ *
++ * NB. These checks require that, for any MFN that is not in our reservation,
++ * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if
++ * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN.
++ * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety.
++ *
++ * NB2. When deliberately mapping foreign pages into the p2m table, you *must*
++ * use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we
++ * require. In all the cases we care about, the FOREIGN_FRAME bit is
++ * masked (e.g., pfn_to_mfn()) so behaviour there is correct.
++ */
++static inline unsigned long mfn_to_local_pfn(unsigned long mfn)
++{
++ unsigned long pfn = mfn_to_pfn(mfn);
++ if ((pfn < max_mapnr)
++ && !xen_feature(XENFEAT_auto_translated_physmap)
++ && (phys_to_machine_mapping[pfn] != mfn))
++ return max_mapnr; /* force !pfn_valid() */
++ return pfn;
++}
++
++static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
++{
++ BUG_ON(max_mapnr && pfn >= max_mapnr);
++ if (xen_feature(XENFEAT_auto_translated_physmap)) {
++ BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
++ return;
++ }
++ phys_to_machine_mapping[pfn] = mfn;
++}
++
++static inline maddr_t phys_to_machine(paddr_t phys)
++{
++ maddr_t machine = pfn_to_mfn(phys >> PAGE_SHIFT);
++ machine = (machine << PAGE_SHIFT) | (phys & ~PAGE_MASK);
++ return machine;
++}
++
++static inline paddr_t machine_to_phys(maddr_t machine)
++{
++ paddr_t phys = mfn_to_pfn(machine >> PAGE_SHIFT);
++ phys = (phys << PAGE_SHIFT) | (machine & ~PAGE_MASK);
++ return phys;
++}
++
++#ifdef CONFIG_X86_PAE
++static inline paddr_t pte_phys_to_machine(paddr_t phys)
++{
++ /*
++ * In PAE mode, the NX bit needs to be dealt with in the value
++ * passed to pfn_to_mfn(). On x86_64, we need to mask it off,
++ * but for i386 the conversion to ulong for the argument will
++ * clip it off.
++ */
++ maddr_t machine = pfn_to_mfn(phys >> PAGE_SHIFT);
++ machine = (machine << PAGE_SHIFT) | (phys & ~PHYSICAL_PAGE_MASK);
++ return machine;
++}
++
++static inline paddr_t pte_machine_to_phys(maddr_t machine)
++{
++ /*
++ * In PAE mode, the NX bit needs to be dealt with in the value
++ * passed to mfn_to_pfn(). On x86_64, we need to mask it off,
++ * but for i386 the conversion to ulong for the argument will
++ * clip it off.
++ */
++ paddr_t phys = mfn_to_pfn(machine >> PAGE_SHIFT);
++ phys = (phys << PAGE_SHIFT) | (machine & ~PHYSICAL_PAGE_MASK);
++ return phys;
++}
++#endif
++
++#ifdef CONFIG_X86_PAE
++static inline pte_t pfn_pte_ma(unsigned long page_nr, pgprot_t pgprot)
++{
++ pte_t pte;
++
++ pte.pte_high = (page_nr >> (32 - PAGE_SHIFT)) | \
++ (pgprot_val(pgprot) >> 32);
++ pte.pte_high &= (__supported_pte_mask >> 32);
++ pte.pte_low = ((page_nr << PAGE_SHIFT) | pgprot_val(pgprot)) & \
++ __supported_pte_mask;
++ return pte;
++}
++#else
++#define pfn_pte_ma(pfn, prot) __pte_ma(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
++#endif
++
++#define __pte_ma(x) ((pte_t) { (x) } )
++
++#else /* !CONFIG_XEN */
++
++#define pfn_to_mfn(pfn) (pfn)
++#define mfn_to_pfn(mfn) (mfn)
++#define mfn_to_local_pfn(mfn) (mfn)
++#define set_phys_to_machine(pfn, mfn) ((void)0)
++#define phys_to_machine_mapping_valid(pfn) (1)
++#define phys_to_machine(phys) ((maddr_t)(phys))
++#define machine_to_phys(mach) ((paddr_t)(mach))
++#define pfn_pte_ma(pfn, prot) pfn_pte(pfn, prot)
++#define __pte_ma(x) __pte(x)
++
++#endif /* !CONFIG_XEN */
++
++/* VIRT <-> MACHINE conversion */
++#define virt_to_machine(v) (phys_to_machine(__pa(v)))
++#define virt_to_mfn(v) (pfn_to_mfn(__pa(v) >> PAGE_SHIFT))
++#define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT))
++
++#endif /* _I386_MADDR_H */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/asm-i386/mach-xen/asm/mmu.h 2007-08-27 14:01:58.000000000 -0400
+@@ -0,0 +1,29 @@
++#ifndef __i386_MMU_H
++#define __i386_MMU_H
++
++#include <asm/semaphore.h>
++/*
++ * The i386 doesn't have a mmu context, but
++ * we put the segment information here.
++ *
++ * cpu_vm_mask is used to optimize ldt flushing.
++ */
++typedef struct {
++ int size;
++ struct semaphore sem;
++ void *ldt;
++ void *vdso;
++#ifdef CONFIG_XEN
++ int has_foreign_mappings;
++#endif
++} mm_context_t;
++
++/* mm/memory.c:exit_mmap hook */
++extern void _arch_exit_mmap(struct mm_struct *mm);
++#define arch_exit_mmap(_mm) _arch_exit_mmap(_mm)
++
++/* kernel/fork.c:dup_mmap hook */
++extern void _arch_dup_mmap(struct mm_struct *mm);
++#define arch_dup_mmap(mm, oldmm) ((void)(oldmm), _arch_dup_mmap(mm))
++
++#endif
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/asm-i386/mach-xen/asm/mmu_context.h 2007-08-27 14:02:01.000000000 -0400
+@@ -0,0 +1,108 @@
++#ifndef __I386_SCHED_H
++#define __I386_SCHED_H
++
++#include <asm/desc.h>
++#include <asm/atomic.h>
++#include <asm/pgalloc.h>
++#include <asm/tlbflush.h>
++
++/*
++ * Used for LDT copy/destruction.
++ */
++int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
++void destroy_context(struct mm_struct *mm);
++
++
++static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
++{
++#if 0 /* XEN: no lazy tlb */
++ unsigned cpu = smp_processor_id();
++ if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)
++ per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_LAZY;
++#endif
++}
++
++#define prepare_arch_switch(next) __prepare_arch_switch()
++
++static inline void __prepare_arch_switch(void)
++{
++ /*
++ * Save away %fs and %gs. No need to save %es and %ds, as those
++ * are always kernel segments while inside the kernel. Must
++ * happen before reload of cr3/ldt (i.e., not in __switch_to).
++ */
++ asm volatile ( "mov %%fs,%0 ; mov %%gs,%1"
++ : "=m" (current->thread.fs),
++ "=m" (current->thread.gs));
++ asm volatile ( "movl %0,%%fs ; movl %0,%%gs"
++ : : "r" (0) );
++}
++
++extern void mm_pin(struct mm_struct *mm);
++extern void mm_unpin(struct mm_struct *mm);
++void mm_pin_all(void);
++
++static inline void switch_mm(struct mm_struct *prev,
++ struct mm_struct *next,
++ struct task_struct *tsk)
++{
++ int cpu = smp_processor_id();
++ struct mmuext_op _op[2], *op = _op;
++
++ if (likely(prev != next)) {
++ BUG_ON(!xen_feature(XENFEAT_writable_page_tables) &&
++ !test_bit(PG_pinned, &virt_to_page(next->pgd)->flags));
++
++ /* stop flush ipis for the previous mm */
++ cpu_clear(cpu, prev->cpu_vm_mask);
++#if 0 /* XEN: no lazy tlb */
++ per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK;
++ per_cpu(cpu_tlbstate, cpu).active_mm = next;
++#endif
++ cpu_set(cpu, next->cpu_vm_mask);
++
++ /* Re-load page tables: load_cr3(next->pgd) */
++ op->cmd = MMUEXT_NEW_BASEPTR;
++ op->arg1.mfn = pfn_to_mfn(__pa(next->pgd) >> PAGE_SHIFT);
++ op++;
++
++ /*
++ * load the LDT, if the LDT is different:
++ */
++ if (unlikely(prev->context.ldt != next->context.ldt)) {
++ /* load_LDT_nolock(&next->context, cpu) */
++ op->cmd = MMUEXT_SET_LDT;
++ op->arg1.linear_addr = (unsigned long)next->context.ldt;
++ op->arg2.nr_ents = next->context.size;
++ op++;
++ }
++
++ BUG_ON(HYPERVISOR_mmuext_op(_op, op-_op, NULL, DOMID_SELF));
++ }
++#if 0 /* XEN: no lazy tlb */
++ else {
++ per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK;
++ BUG_ON(per_cpu(cpu_tlbstate, cpu).active_mm != next);
++
++ if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
++ /* We were in lazy tlb mode and leave_mm disabled
++ * tlb flush IPI delivery. We must reload %cr3.
++ */
++ load_cr3(next->pgd);
++ load_LDT_nolock(&next->context, cpu);
++ }
++ }
++#endif
++}
++
++#define deactivate_mm(tsk, mm) \
++ asm("movl %0,%%fs ; movl %0,%%gs": :"r" (0))
++
++static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
++{
++ if (!test_bit(PG_pinned, &virt_to_page(next->pgd)->flags))
++ mm_pin(next);
++ switch_mm(prev, next, NULL);
++}
++
++#endif
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/asm-i386/mach-xen/asm/page.h 2007-08-27 14:02:05.000000000 -0400
+@@ -0,0 +1,229 @@
++#ifndef _I386_PAGE_H
++#define _I386_PAGE_H
++
++/* PAGE_SHIFT determines the page size */
++#define PAGE_SHIFT 12
++#define PAGE_SIZE (1UL << PAGE_SHIFT)
++#define PAGE_MASK (~(PAGE_SIZE-1))
++
++#ifdef CONFIG_X86_PAE
++#define __PHYSICAL_MASK_SHIFT 40
++#define __PHYSICAL_MASK ((1ULL << __PHYSICAL_MASK_SHIFT) - 1)
++#define PHYSICAL_PAGE_MASK (~((1ULL << PAGE_SHIFT) - 1) & __PHYSICAL_MASK)
++#else
++#define __PHYSICAL_MASK_SHIFT 32
++#define __PHYSICAL_MASK (~0UL)
++#define PHYSICAL_PAGE_MASK (PAGE_MASK & __PHYSICAL_MASK)
++#endif
++
++#define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1))
++#define LARGE_PAGE_SIZE (1UL << PMD_SHIFT)
++
++#ifdef __KERNEL__
++
++/*
++ * Need to repeat this here in order to not include pgtable.h (which in turn
++ * depends on definitions made here), but to be able to use the symbolic
++ * below. The preprocessor will warn if the two definitions aren't identical.
++ */
++#define _PAGE_PRESENT 0x001
++
++#ifndef __ASSEMBLY__
++
++#include <linux/string.h>
++#include <linux/types.h>
++#include <linux/kernel.h>
++#include <asm/bug.h>
++#include <xen/interface/xen.h>
++#include <xen/features.h>
++
++#define arch_free_page(_page,_order) \
++({ int foreign = PageForeign(_page); \
++ if (foreign) \
++ PageForeignDestructor(_page); \
++ foreign; \
++})
++#define HAVE_ARCH_FREE_PAGE
++
++#ifdef CONFIG_X86_USE_3DNOW
++
++#include <asm/mmx.h>
++
++#define clear_page(page) mmx_clear_page((void *)(page))
++#define copy_page(to,from) mmx_copy_page(to,from)
++
++#else
++
++#define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr)
++#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
++
++/*
++ * On older X86 processors it's not a win to use MMX here it seems.
++ * Maybe the K6-III ?
++ */
++
++#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
++#define copy_page(to,from) memcpy((void *)(to), (void *)(from), PAGE_SIZE)
++
++#endif
++
++#define clear_user_page(page, vaddr, pg) clear_page(page)
++#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
++
++/*
++ * These are used to make use of C type-checking..
++ */
++extern int nx_enabled;
++#ifdef CONFIG_X86_PAE
++extern unsigned long long __supported_pte_mask;
++typedef struct { unsigned long pte_low, pte_high; } pte_t;
++typedef struct { unsigned long long pmd; } pmd_t;
++typedef struct { unsigned long long pgd; } pgd_t;
++typedef struct { unsigned long long pgprot; } pgprot_t;
++#define pgprot_val(x) ((x).pgprot)
++#include <asm/maddr.h>
++#define __pte(x) ({ unsigned long long _x = (x); \
++ if (_x & _PAGE_PRESENT) _x = pte_phys_to_machine(_x); \
++ ((pte_t) {(unsigned long)(_x), (unsigned long)(_x>>32)}); })
++#define __pgd(x) ({ unsigned long long _x = (x); \
++ (pgd_t) {((_x) & _PAGE_PRESENT) ? pte_phys_to_machine(_x) : (_x)}; })
++#define __pmd(x) ({ unsigned long long _x = (x); \
++ (pmd_t) {((_x) & _PAGE_PRESENT) ? pte_phys_to_machine(_x) : (_x)}; })
++static inline unsigned long long pte_val_ma(pte_t x)
++{
++ return ((unsigned long long)x.pte_high << 32) | x.pte_low;
++}
++static inline unsigned long long pte_val(pte_t x)
++{
++ unsigned long long ret = pte_val_ma(x);
++ if (x.pte_low & _PAGE_PRESENT) ret = pte_machine_to_phys(ret);
++ return ret;
++}
++static inline unsigned long long pmd_val(pmd_t x)
++{
++ unsigned long long ret = x.pmd;
++#if CONFIG_XEN_COMPAT <= 0x030002
++ if (ret) ret = pte_machine_to_phys(ret) | _PAGE_PRESENT;
++#else
++ if (ret & _PAGE_PRESENT) ret = pte_machine_to_phys(ret);
++#endif
++ return ret;
++}
++static inline unsigned long long pgd_val(pgd_t x)
++{
++ unsigned long long ret = x.pgd;
++ if (ret & _PAGE_PRESENT) ret = pte_machine_to_phys(ret);
++ return ret;
++}
++#define HPAGE_SHIFT 21
++#else
++typedef struct { unsigned long pte_low; } pte_t;
++typedef struct { unsigned long pgd; } pgd_t;
++typedef struct { unsigned long pgprot; } pgprot_t;
++#define pgprot_val(x) ((x).pgprot)
++#include <asm/maddr.h>
++#define boot_pte_t pte_t /* or would you rather have a typedef */
++#define pte_val(x) (((x).pte_low & _PAGE_PRESENT) ? \
++ machine_to_phys((x).pte_low) : \
++ (x).pte_low)
++#define pte_val_ma(x) ((x).pte_low)
++#define __pte(x) ({ unsigned long _x = (x); \
++ (pte_t) {((_x) & _PAGE_PRESENT) ? phys_to_machine(_x) : (_x)}; })
++#define __pgd(x) ({ unsigned long _x = (x); \
++ (pgd_t) {((_x) & _PAGE_PRESENT) ? phys_to_machine(_x) : (_x)}; })
++static inline unsigned long pgd_val(pgd_t x)
++{
++ unsigned long ret = x.pgd;
++#if CONFIG_XEN_COMPAT <= 0x030002
++ if (ret) ret = machine_to_phys(ret) | _PAGE_PRESENT;
++#else
++ if (ret & _PAGE_PRESENT) ret = machine_to_phys(ret);
++#endif
++ return ret;
++}
++#define HPAGE_SHIFT 22
++#endif
++#define PTE_MASK PHYSICAL_PAGE_MASK
++
++#ifdef CONFIG_HUGETLB_PAGE
++#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
++#define HPAGE_MASK (~(HPAGE_SIZE - 1))
++#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
++#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
++#endif
++
++#define __pgprot(x) ((pgprot_t) { (x) } )
++
++#endif /* !__ASSEMBLY__ */
++
++/* to align the pointer to the (next) page boundary */
++#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
++
++/*
++ * This handles the memory map.. We could make this a config
++ * option, but too many people screw it up, and too few need
++ * it.
++ *
++ * A __PAGE_OFFSET of 0xC0000000 means that the kernel has
++ * a virtual address space of one gigabyte, which limits the
++ * amount of physical memory you can use to about 950MB.
++ *
++ * If you want more physical memory than this then see the CONFIG_HIGHMEM4G
++ * and CONFIG_HIGHMEM64G options in the kernel configuration.
++ */
++
++#ifndef __ASSEMBLY__
++
++struct vm_area_struct;
++
++/*
++ * This much address space is reserved for vmalloc() and iomap()
++ * as well as fixmap mappings.
++ */
++extern unsigned int __VMALLOC_RESERVE;
++
++extern int sysctl_legacy_va_layout;
++
++extern int page_is_ram(unsigned long pagenr);
++
++#endif /* __ASSEMBLY__ */
++
++#ifdef __ASSEMBLY__
++#define __PAGE_OFFSET CONFIG_PAGE_OFFSET
++#define __PHYSICAL_START CONFIG_PHYSICAL_START
++#else
++#define __PAGE_OFFSET ((unsigned long)CONFIG_PAGE_OFFSET)
++#define __PHYSICAL_START ((unsigned long)CONFIG_PHYSICAL_START)
++#endif
++#define __KERNEL_START (__PAGE_OFFSET + __PHYSICAL_START)
++
++#if CONFIG_XEN_COMPAT <= 0x030002
++#undef LOAD_OFFSET
++#define LOAD_OFFSET 0
++#endif
++
++#define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
++#define VMALLOC_RESERVE ((unsigned long)__VMALLOC_RESERVE)
++#define MAXMEM (__FIXADDR_TOP-__PAGE_OFFSET-__VMALLOC_RESERVE)
++#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
++#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
++#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
++#ifdef CONFIG_FLATMEM
++#define pfn_valid(pfn) ((pfn) < max_mapnr)
++#endif /* CONFIG_FLATMEM */
++#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
++
++#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
++
++#define VM_DATA_DEFAULT_FLAGS \
++ (VM_READ | VM_WRITE | \
++ ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
++ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
++
++#include <asm-generic/memory_model.h>
++#include <asm-generic/page.h>
++
++#define __HAVE_ARCH_GATE_AREA 1
++#endif /* __KERNEL__ */
++
++#endif /* _I386_PAGE_H */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/asm-i386/mach-xen/asm/param.h 2007-08-27 14:02:01.000000000 -0400
+@@ -0,0 +1,23 @@
++#ifndef _ASMi386_PARAM_H
++#define _ASMi386_PARAM_H
++
++#ifdef __KERNEL__
++# define HZ CONFIG_HZ /* Internal kernel timer frequency */
++# define USER_HZ 100 /* .. some user interfaces are in "ticks" */
++# define CLOCKS_PER_SEC (USER_HZ) /* like times() */
++#endif
++
++#ifndef HZ
++#define HZ 100
++#endif
++
++#define EXEC_PAGESIZE 4096
++
++#ifndef NOGROUP
++#define NOGROUP (-1)
++#endif
++
++#define MAXHOSTNAMELEN 64 /* max length of hostname */
++#define COMMAND_LINE_SIZE 256
++
++#endif
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/asm-i386/mach-xen/asm/pci.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,146 @@
++#ifndef __i386_PCI_H
++#define __i386_PCI_H
++
++
++#ifdef __KERNEL__
++#include <linux/mm.h> /* for struct page */
++
++/* Can be used to override the logic in pci_scan_bus for skipping
++ already-configured bus numbers - to be used for buggy BIOSes
++ or architectures with incomplete PCI setup by the loader */
++
++#ifdef CONFIG_PCI
++extern unsigned int pcibios_assign_all_busses(void);
++#else
++#define pcibios_assign_all_busses() 0
++#endif
++#define pcibios_scan_all_fns(a, b) 0
++
++extern unsigned long pci_mem_start;
++#define PCIBIOS_MIN_IO 0x1000
++#define PCIBIOS_MIN_MEM (pci_mem_start)
++
++#define PCIBIOS_MIN_CARDBUS_IO 0x4000
++
++void pcibios_config_init(void);
++struct pci_bus * pcibios_scan_root(int bus);
++
++void pcibios_set_master(struct pci_dev *dev);
++void pcibios_penalize_isa_irq(int irq, int active);
++struct irq_routing_table *pcibios_get_irq_routing_table(void);
++int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq);
++
++/* Dynamic DMA mapping stuff.
++ * i386 has everything mapped statically.
++ */
++
++#include <linux/types.h>
++#include <linux/slab.h>
++#include <asm/scatterlist.h>
++#include <linux/string.h>
++#include <asm/io.h>
++
++struct pci_dev;
++
++#ifdef CONFIG_SWIOTLB
++
++
++/* On Xen we use SWIOTLB instead of blk-specific bounce buffers. */
++#define PCI_DMA_BUS_IS_PHYS (0)
++
++#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
++ dma_addr_t ADDR_NAME;
++#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \
++ __u32 LEN_NAME;
++#define pci_unmap_addr(PTR, ADDR_NAME) \
++ ((PTR)->ADDR_NAME)
++#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
++ (((PTR)->ADDR_NAME) = (VAL))
++#define pci_unmap_len(PTR, LEN_NAME) \
++ ((PTR)->LEN_NAME)
++#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
++ (((PTR)->LEN_NAME) = (VAL))
++
++#else
++
++/* The PCI address space does equal the physical memory
++ * address space. The networking and block device layers use
++ * this boolean for bounce buffer decisions.
++ */
++#define PCI_DMA_BUS_IS_PHYS (1)
++
++/* pci_unmap_{page,single} is a nop so... */
++#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
++#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
++#define pci_unmap_addr(PTR, ADDR_NAME) (0)
++#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
++#define pci_unmap_len(PTR, LEN_NAME) (0)
++#define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
++
++#endif
++
++/* This is always fine. */
++#define pci_dac_dma_supported(pci_dev, mask) (1)
++
++static inline dma64_addr_t
++pci_dac_page_to_dma(struct pci_dev *pdev, struct page *page, unsigned long offset, int direction)
++{
++ return ((dma64_addr_t) page_to_phys(page) +
++ (dma64_addr_t) offset);
++}
++
++static inline struct page *
++pci_dac_dma_to_page(struct pci_dev *pdev, dma64_addr_t dma_addr)
++{
++ return pfn_to_page(dma_addr >> PAGE_SHIFT);
++}
++
++static inline unsigned long
++pci_dac_dma_to_offset(struct pci_dev *pdev, dma64_addr_t dma_addr)
++{
++ return (dma_addr & ~PAGE_MASK);
++}
++
++static inline void
++pci_dac_dma_sync_single_for_cpu(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction)
++{
++}
++
++static inline void
++pci_dac_dma_sync_single_for_device(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction)
++{
++ flush_write_buffers();
++}
++
++#define HAVE_PCI_MMAP
++extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
++ enum pci_mmap_state mmap_state, int write_combine);
++
++
++static inline void pcibios_add_platform_entries(struct pci_dev *dev)
++{
++}
++
++#ifdef CONFIG_PCI
++static inline void pci_dma_burst_advice(struct pci_dev *pdev,
++ enum pci_dma_burst_strategy *strat,
++ unsigned long *strategy_parameter)
++{
++ *strat = PCI_DMA_BURST_INFINITY;
++ *strategy_parameter = ~0UL;
++}
++#endif
++
++#endif /* __KERNEL__ */
++
++#ifdef CONFIG_XEN_PCIDEV_FRONTEND
++#include <xen/pcifront.h>
++#endif /* CONFIG_XEN_PCIDEV_FRONTEND */
++
++/* implement the pci_ DMA API in terms of the generic device dma_ one */
++#include <asm-generic/pci-dma-compat.h>
++
++/* generic pci stuff */
++#include <asm-generic/pci.h>
++
++#endif /* __i386_PCI_H */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/asm-i386/mach-xen/asm/pgalloc.h 2007-08-27 14:01:59.000000000 -0400
+@@ -0,0 +1,59 @@
++#ifndef _I386_PGALLOC_H
++#define _I386_PGALLOC_H
++
++#include <asm/fixmap.h>
++#include <linux/threads.h>
++#include <linux/mm.h> /* for struct page */
++#include <asm/io.h> /* for phys_to_virt and page_to_pseudophys */
++
++#define pmd_populate_kernel(mm, pmd, pte) \
++ set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte)))
++
++#define pmd_populate(mm, pmd, pte) \
++do { \
++ unsigned long pfn = page_to_pfn(pte); \
++ if (test_bit(PG_pinned, &virt_to_page((mm)->pgd)->flags)) { \
++ if (!PageHighMem(pte)) \
++ BUG_ON(HYPERVISOR_update_va_mapping( \
++ (unsigned long)__va(pfn << PAGE_SHIFT), \
++ pfn_pte(pfn, PAGE_KERNEL_RO), 0)); \
++ else if (!test_and_set_bit(PG_pinned, &pte->flags)) \
++ kmap_flush_unused(); \
++ set_pmd(pmd, \
++ __pmd(_PAGE_TABLE + ((paddr_t)pfn << PAGE_SHIFT))); \
++ } else \
++ *(pmd) = __pmd(_PAGE_TABLE + ((paddr_t)pfn << PAGE_SHIFT)); \
++} while (0)
++
++/*
++ * Allocate and free page tables.
++ */
++extern pgd_t *pgd_alloc(struct mm_struct *);
++extern void pgd_free(pgd_t *pgd);
++
++extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
++extern struct page *pte_alloc_one(struct mm_struct *, unsigned long);
++
++static inline void pte_free_kernel(pte_t *pte)
++{
++ free_page((unsigned long)pte);
++ make_lowmem_page_writable(pte, XENFEAT_writable_page_tables);
++}
++
++extern void pte_free(struct page *pte);
++
++#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
++
++#ifdef CONFIG_X86_PAE
++/*
++ * In the PAE case we free the pmds as part of the pgd.
++ */
++#define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
++#define pmd_free(x) do { } while (0)
++#define __pmd_free_tlb(tlb,x) do { } while (0)
++#define pud_populate(mm, pmd, pte) BUG()
++#endif
++
++#define check_pgt_cache() do { } while (0)
++
++#endif /* _I386_PGALLOC_H */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/asm-i386/mach-xen/asm/pgtable-2level-defs.h 2007-08-27 14:01:58.000000000 -0400
+@@ -0,0 +1,20 @@
++#ifndef _I386_PGTABLE_2LEVEL_DEFS_H
++#define _I386_PGTABLE_2LEVEL_DEFS_H
++
++#define HAVE_SHARED_KERNEL_PMD 0
++
++/*
++ * traditional i386 two-level paging structure:
++ */
++
++#define PGDIR_SHIFT 22
++#define PTRS_PER_PGD 1024
++
++/*
++ * the i386 is two-level, so we don't really have any
++ * PMD directory physically.
++ */
++
++#define PTRS_PER_PTE 1024
++
++#endif /* _I386_PGTABLE_2LEVEL_DEFS_H */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/asm-i386/mach-xen/asm/pgtable-2level.h 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,118 @@
++#ifndef _I386_PGTABLE_2LEVEL_H
++#define _I386_PGTABLE_2LEVEL_H
++
++#include <asm-generic/pgtable-nopmd.h>
++
++#define pte_ERROR(e) \
++ printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte_low)
++#define pgd_ERROR(e) \
++ printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
++
++/*
++ * Certain architectures need to do special things when PTEs
++ * within a page table are directly modified. Thus, the following
++ * hook is made available.
++ */
++#define set_pte(pteptr, pteval) (*(pteptr) = pteval)
++
++#define set_pte_at(_mm,addr,ptep,pteval) do { \
++ if (((_mm) != current->mm && (_mm) != &init_mm) || \
++ HYPERVISOR_update_va_mapping((addr), (pteval), 0)) \
++ set_pte((ptep), (pteval)); \
++} while (0)
++
++#define set_pte_at_sync(_mm,addr,ptep,pteval) do { \
++ if (((_mm) != current->mm && (_mm) != &init_mm) || \
++ HYPERVISOR_update_va_mapping((addr), (pteval), UVMF_INVLPG)) { \
++ set_pte((ptep), (pteval)); \
++ xen_invlpg((addr)); \
++ } \
++} while (0)
++
++#define set_pte_atomic(pteptr, pteval) set_pte(pteptr,pteval)
++
++#define set_pmd(pmdptr, pmdval) xen_l2_entry_update((pmdptr), (pmdval))
++
++#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
++#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
++
++#define pte_none(x) (!(x).pte_low)
++
++static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
++{
++ pte_t pte = *ptep;
++ if (!pte_none(pte)) {
++ if (mm != &init_mm)
++ pte = __pte_ma(xchg(&ptep->pte_low, 0));
++ else
++ HYPERVISOR_update_va_mapping(addr, __pte(0), 0);
++ }
++ return pte;
++}
++
++#define ptep_clear_flush(vma, addr, ptep) \
++({ \
++ pte_t *__ptep = (ptep); \
++ pte_t __res = *__ptep; \
++ if (!pte_none(__res) && \
++ ((vma)->vm_mm != current->mm || \
++ HYPERVISOR_update_va_mapping(addr, __pte(0), \
++ (unsigned long)(vma)->vm_mm->cpu_vm_mask.bits| \
++ UVMF_INVLPG|UVMF_MULTI))) { \
++ __ptep->pte_low = 0; \
++ flush_tlb_page(vma, addr); \
++ } \
++ __res; \
++})
++
++#define pte_same(a, b) ((a).pte_low == (b).pte_low)
++
++#define __pte_mfn(_pte) ((_pte).pte_low >> PAGE_SHIFT)
++#define pte_mfn(_pte) ((_pte).pte_low & _PAGE_PRESENT ? \
++ __pte_mfn(_pte) : pfn_to_mfn(__pte_mfn(_pte)))
++#define pte_pfn(_pte) ((_pte).pte_low & _PAGE_PRESENT ? \
++ mfn_to_local_pfn(__pte_mfn(_pte)) : __pte_mfn(_pte))
++
++#define pte_page(_pte) pfn_to_page(pte_pfn(_pte))
++
++#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
++#define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
++
++/*
++ * All present user pages are user-executable:
++ */
++static inline int pte_exec(pte_t pte)
++{
++ return pte_user(pte);
++}
++
++/*
++ * All present pages are kernel-executable:
++ */
++static inline int pte_exec_kernel(pte_t pte)
++{
++ return 1;
++}
++
++/*
++ * Bits 0, 6 and 7 are taken, split up the 29 bits of offset
++ * into this range:
++ */
++#define PTE_FILE_MAX_BITS 29
++
++#define pte_to_pgoff(pte) \
++ ((((pte).pte_low >> 1) & 0x1f ) + (((pte).pte_low >> 8) << 5 ))
++
++#define pgoff_to_pte(off) \
++ ((pte_t) { (((off) & 0x1f) << 1) + (((off) >> 5) << 8) + _PAGE_FILE })
++
++/* Encode and de-code a swap entry */
++#define __swp_type(x) (((x).val >> 1) & 0x1f)
++#define __swp_offset(x) ((x).val >> 8)
++#define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 8) })
++#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low })
++#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
++
++void vmalloc_sync_all(void);
++
++#endif /* _I386_PGTABLE_2LEVEL_H */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/asm-i386/mach-xen/asm/pgtable-3level-defs.h 2007-08-27 14:01:58.000000000 -0400
+@@ -0,0 +1,24 @@
++#ifndef _I386_PGTABLE_3LEVEL_DEFS_H
++#define _I386_PGTABLE_3LEVEL_DEFS_H
++
++#define HAVE_SHARED_KERNEL_PMD 0
++
++/*
++ * PGDIR_SHIFT determines what a top-level page table entry can map
++ */
++#define PGDIR_SHIFT 30
++#define PTRS_PER_PGD 4
++
++/*
++ * PMD_SHIFT determines the size of the area a middle-level
++ * page table can map
++ */
++#define PMD_SHIFT 21
++#define PTRS_PER_PMD 512
++
++/*
++ * entries per page directory level
++ */
++#define PTRS_PER_PTE 512
++
++#endif /* _I386_PGTABLE_3LEVEL_DEFS_H */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/asm-i386/mach-xen/asm/pgtable-3level.h 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,206 @@
++#ifndef _I386_PGTABLE_3LEVEL_H
++#define _I386_PGTABLE_3LEVEL_H
++
++#include <asm-generic/pgtable-nopud.h>
++
++/*
++ * Intel Physical Address Extension (PAE) Mode - three-level page
++ * tables on PPro+ CPUs.
++ *
++ * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
++ */
++
++#define pte_ERROR(e) \
++ printk("%s:%d: bad pte %p(%08lx%08lx).\n", __FILE__, __LINE__, &(e), (e).pte_high, (e).pte_low)
++#define pmd_ERROR(e) \
++ printk("%s:%d: bad pmd %p(%016Lx).\n", __FILE__, __LINE__, &(e), pmd_val(e))
++#define pgd_ERROR(e) \
++ printk("%s:%d: bad pgd %p(%016Lx).\n", __FILE__, __LINE__, &(e), pgd_val(e))
++
++#define pud_none(pud) 0
++#define pud_bad(pud) 0
++#define pud_present(pud) 1
++
++/*
++ * Is the pte executable?
++ */
++static inline int pte_x(pte_t pte)
++{
++ return !(pte_val(pte) & _PAGE_NX);
++}
++
++/*
++ * All present user-pages with !NX bit are user-executable:
++ */
++static inline int pte_exec(pte_t pte)
++{
++ return pte_user(pte) && pte_x(pte);
++}
++/*
++ * All present pages with !NX bit are kernel-executable:
++ */
++static inline int pte_exec_kernel(pte_t pte)
++{
++ return pte_x(pte);
++}
++
++/* Rules for using set_pte: the pte being assigned *must* be
++ * either not present or in a state where the hardware will
++ * not attempt to update the pte. In places where this is
++ * not possible, use pte_get_and_clear to obtain the old pte
++ * value and then use set_pte to update it. -ben
++ */
++#define __HAVE_ARCH_SET_PTE_ATOMIC
++
++static inline void set_pte(pte_t *ptep, pte_t pte)
++{
++ ptep->pte_high = pte.pte_high;
++ smp_wmb();
++ ptep->pte_low = pte.pte_low;
++}
++#define set_pte_atomic(pteptr,pteval) \
++ set_64bit((unsigned long long *)(pteptr),pte_val_ma(pteval))
++
++#define set_pte_at(_mm,addr,ptep,pteval) do { \
++ if (((_mm) != current->mm && (_mm) != &init_mm) || \
++ HYPERVISOR_update_va_mapping((addr), (pteval), 0)) \
++ set_pte((ptep), (pteval)); \
++} while (0)
++
++#define set_pte_at_sync(_mm,addr,ptep,pteval) do { \
++ if (((_mm) != current->mm && (_mm) != &init_mm) || \
++ HYPERVISOR_update_va_mapping((addr), (pteval), UVMF_INVLPG)) { \
++ set_pte((ptep), (pteval)); \
++ xen_invlpg((addr)); \
++ } \
++} while (0)
++
++#define set_pmd(pmdptr,pmdval) \
++ xen_l2_entry_update((pmdptr), (pmdval))
++#define set_pud(pudptr,pudval) \
++ xen_l3_entry_update((pudptr), (pudval))
++
++/*
++ * Pentium-II erratum A13: in PAE mode we explicitly have to flush
++ * the TLB via cr3 if the top-level pgd is changed...
++ * We do not let the generic code free and clear pgd entries due to
++ * this erratum.
++ */
++static inline void pud_clear (pud_t * pud) { }
++
++#define pud_page(pud) \
++((struct page *) __va(pud_val(pud) & PAGE_MASK))
++
++#define pud_page_kernel(pud) \
++((unsigned long) __va(pud_val(pud) & PAGE_MASK))
++
++
++/* Find an entry in the second-level page table.. */
++#define pmd_offset(pud, address) ((pmd_t *) pud_page(*(pud)) + \
++ pmd_index(address))
++
++static inline int pte_none(pte_t pte)
++{
++ return !(pte.pte_low | pte.pte_high);
++}
++
++/*
++ * For PTEs and PDEs, we must clear the P-bit first when clearing a page table
++ * entry, so clear the bottom half first and enforce ordering with a compiler
++ * barrier.
++ */
++static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
++{
++ if ((mm != current->mm && mm != &init_mm)
++ || HYPERVISOR_update_va_mapping(addr, __pte(0), 0)) {
++ ptep->pte_low = 0;
++ smp_wmb();
++ ptep->pte_high = 0;
++ }
++}
++
++#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
++
++static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
++{
++ pte_t pte = *ptep;
++ if (!pte_none(pte)) {
++ if (mm != &init_mm) {
++ uint64_t val = pte_val_ma(pte);
++ if (__cmpxchg64(ptep, val, 0) != val) {
++ /* xchg acts as a barrier before the setting of the high bits */
++ pte.pte_low = xchg(&ptep->pte_low, 0);
++ pte.pte_high = ptep->pte_high;
++ ptep->pte_high = 0;
++ }
++ } else
++ HYPERVISOR_update_va_mapping(addr, __pte(0), 0);
++ }
++ return pte;
++}
++
++#define ptep_clear_flush(vma, addr, ptep) \
++({ \
++ pte_t *__ptep = (ptep); \
++ pte_t __res = *__ptep; \
++ if (!pte_none(__res) && \
++ ((vma)->vm_mm != current->mm || \
++ HYPERVISOR_update_va_mapping(addr, __pte(0), \
++ (unsigned long)(vma)->vm_mm->cpu_vm_mask.bits| \
++ UVMF_INVLPG|UVMF_MULTI))) { \
++ __ptep->pte_low = 0; \
++ smp_wmb(); \
++ __ptep->pte_high = 0; \
++ flush_tlb_page(vma, addr); \
++ } \
++ __res; \
++})
++
++static inline int pte_same(pte_t a, pte_t b)
++{
++ return a.pte_low == b.pte_low && a.pte_high == b.pte_high;
++}
++
++#define pte_page(x) pfn_to_page(pte_pfn(x))
++
++#define __pte_mfn(_pte) (((_pte).pte_low >> PAGE_SHIFT) | \
++ ((_pte).pte_high << (32-PAGE_SHIFT)))
++#define pte_mfn(_pte) ((_pte).pte_low & _PAGE_PRESENT ? \
++ __pte_mfn(_pte) : pfn_to_mfn(__pte_mfn(_pte)))
++#define pte_pfn(_pte) ((_pte).pte_low & _PAGE_PRESENT ? \
++ mfn_to_local_pfn(__pte_mfn(_pte)) : __pte_mfn(_pte))
++
++extern unsigned long long __supported_pte_mask;
++
++static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
++{
++ return __pte((((unsigned long long)page_nr << PAGE_SHIFT) |
++ pgprot_val(pgprot)) & __supported_pte_mask);
++}
++
++static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
++{
++ return __pmd((((unsigned long long)page_nr << PAGE_SHIFT) |
++ pgprot_val(pgprot)) & __supported_pte_mask);
++}
++
++/*
++ * Bits 0, 6 and 7 are taken in the low part of the pte,
++ * put the 32 bits of offset into the high part.
++ */
++#define pte_to_pgoff(pte) ((pte).pte_high)
++#define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) })
++#define PTE_FILE_MAX_BITS 32
++
++/* Encode and de-code a swap entry */
++#define __swp_type(x) (((x).val) & 0x1f)
++#define __swp_offset(x) ((x).val >> 5)
++#define __swp_entry(type, offset) ((swp_entry_t){(type) | (offset) << 5})
++#define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high })
++#define __swp_entry_to_pte(x) ((pte_t){ 0, (x).val })
++
++#define __pmd_free_tlb(tlb, x) do { } while (0)
++
++void vmalloc_sync_all(void);
++
++#endif /* _I386_PGTABLE_3LEVEL_H */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/asm-i386/mach-xen/asm/pgtable.h 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,530 @@
++#ifndef _I386_PGTABLE_H
++#define _I386_PGTABLE_H
++
++#include <asm/hypervisor.h>
++
++/*
++ * The Linux memory management assumes a three-level page table setup. On
++ * the i386, we use that, but "fold" the mid level into the top-level page
++ * table, so that we physically have the same two-level page table as the
++ * i386 mmu expects.
++ *
++ * This file contains the functions and defines necessary to modify and use
++ * the i386 page table tree.
++ */
++#ifndef __ASSEMBLY__
++#include <asm/processor.h>
++#include <asm/fixmap.h>
++#include <linux/threads.h>
++
++#ifndef _I386_BITOPS_H
++#include <asm/bitops.h>
++#endif
++
++#include <linux/slab.h>
++#include <linux/list.h>
++#include <linux/spinlock.h>
++
++/* Is this pagetable pinned? */
++#define PG_pinned PG_arch_1
++
++struct mm_struct;
++struct vm_area_struct;
++
++/*
++ * ZERO_PAGE is a global shared page that is always zero: used
++ * for zero-mapped memory areas etc..
++ */
++#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
++extern unsigned long empty_zero_page[1024];
++extern pgd_t *swapper_pg_dir;
++extern kmem_cache_t *pgd_cache;
++extern kmem_cache_t *pmd_cache;
++extern spinlock_t pgd_lock;
++extern struct page *pgd_list;
++
++void pmd_ctor(void *, kmem_cache_t *, unsigned long);
++void pgd_ctor(void *, kmem_cache_t *, unsigned long);
++void pgd_dtor(void *, kmem_cache_t *, unsigned long);
++void pgtable_cache_init(void);
++void paging_init(void);
++
++/*
++ * The Linux x86 paging architecture is 'compile-time dual-mode', it
++ * implements both the traditional 2-level x86 page tables and the
++ * newer 3-level PAE-mode page tables.
++ */
++#ifdef CONFIG_X86_PAE
++# include <asm/pgtable-3level-defs.h>
++# define PMD_SIZE (1UL << PMD_SHIFT)
++# define PMD_MASK (~(PMD_SIZE-1))
++#else
++# include <asm/pgtable-2level-defs.h>
++#endif
++
++#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
++#define PGDIR_MASK (~(PGDIR_SIZE-1))
++
++#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
++#define FIRST_USER_ADDRESS 0
++
++#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
++#define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
++
++#define TWOLEVEL_PGDIR_SHIFT 22
++#define BOOT_USER_PGD_PTRS (__PAGE_OFFSET >> TWOLEVEL_PGDIR_SHIFT)
++#define BOOT_KERNEL_PGD_PTRS (1024-BOOT_USER_PGD_PTRS)
++
++/* Just any arbitrary offset to the start of the vmalloc VM area: the
++ * current 8MB value just means that there will be a 8MB "hole" after the
++ * physical memory until the kernel virtual memory starts. That means that
++ * any out-of-bounds memory accesses will hopefully be caught.
++ * The vmalloc() routines leaves a hole of 4kB between each vmalloced
++ * area for the same reason. ;)
++ */
++#define VMALLOC_OFFSET (8*1024*1024)
++#define VMALLOC_START (((unsigned long) high_memory + vmalloc_earlyreserve + \
++ 2*VMALLOC_OFFSET-1) & ~(VMALLOC_OFFSET-1))
++#ifdef CONFIG_HIGHMEM
++# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
++#else
++# define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
++#endif
++
++/*
++ * _PAGE_PSE set in the page directory entry just means that
++ * the page directory entry points directly to a 4MB-aligned block of
++ * memory.
++ */
++#define _PAGE_BIT_PRESENT 0
++#define _PAGE_BIT_RW 1
++#define _PAGE_BIT_USER 2
++#define _PAGE_BIT_PWT 3
++#define _PAGE_BIT_PCD 4
++#define _PAGE_BIT_ACCESSED 5
++#define _PAGE_BIT_DIRTY 6
++#define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page, Pentium+, if present.. */
++#define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
++#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
++#define _PAGE_BIT_UNUSED2 10
++#define _PAGE_BIT_UNUSED3 11
++#define _PAGE_BIT_NX 63
++
++#define _PAGE_PRESENT 0x001
++#define _PAGE_RW 0x002
++#define _PAGE_USER 0x004
++#define _PAGE_PWT 0x008
++#define _PAGE_PCD 0x010
++#define _PAGE_ACCESSED 0x020
++#define _PAGE_DIRTY 0x040
++#define _PAGE_PSE 0x080 /* 4 MB (or 2MB) page, Pentium+, if present.. */
++#define _PAGE_GLOBAL 0x100 /* Global TLB entry PPro+ */
++#define _PAGE_UNUSED1 0x200 /* available for programmer */
++#define _PAGE_UNUSED2 0x400
++#define _PAGE_UNUSED3 0x800
++
++/* If _PAGE_PRESENT is clear, we use these: */
++#define _PAGE_FILE 0x040 /* nonlinear file mapping, saved PTE; unset:swap */
++#define _PAGE_PROTNONE 0x080 /* if the user mapped it with PROT_NONE;
++ pte_present gives true */
++#ifdef CONFIG_X86_PAE
++#define _PAGE_NX (1ULL<<_PAGE_BIT_NX)
++#else
++#define _PAGE_NX 0
++#endif
++
++#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
++#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
++#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
++
++#define PAGE_NONE \
++ __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
++#define PAGE_SHARED \
++ __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
++
++#define PAGE_SHARED_EXEC \
++ __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
++#define PAGE_COPY_NOEXEC \
++ __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
++#define PAGE_COPY_EXEC \
++ __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
++#define PAGE_COPY \
++ PAGE_COPY_NOEXEC
++#define PAGE_READONLY \
++ __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
++#define PAGE_READONLY_EXEC \
++ __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
++
++#define _PAGE_KERNEL \
++ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX)
++#define _PAGE_KERNEL_EXEC \
++ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
++
++extern unsigned long long __PAGE_KERNEL, __PAGE_KERNEL_EXEC;
++#define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
++#define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD)
++#define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
++#define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
++
++#define PAGE_KERNEL __pgprot(__PAGE_KERNEL)
++#define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO)
++#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
++#define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE)
++#define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE)
++#define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC)
++
++/*
++ * The i386 can't do page protection for execute, and considers that
++ * the same are read. Also, write permissions imply read permissions.
++ * This is the closest we can get..
++ */
++#define __P000 PAGE_NONE
++#define __P001 PAGE_READONLY
++#define __P010 PAGE_COPY
++#define __P011 PAGE_COPY
++#define __P100 PAGE_READONLY_EXEC
++#define __P101 PAGE_READONLY_EXEC
++#define __P110 PAGE_COPY_EXEC
++#define __P111 PAGE_COPY_EXEC
++
++#define __S000 PAGE_NONE
++#define __S001 PAGE_READONLY
++#define __S010 PAGE_SHARED
++#define __S011 PAGE_SHARED
++#define __S100 PAGE_READONLY_EXEC
++#define __S101 PAGE_READONLY_EXEC
++#define __S110 PAGE_SHARED_EXEC
++#define __S111 PAGE_SHARED_EXEC
++
++/*
++ * Define this if things work differently on an i386 and an i486:
++ * it will (on an i486) warn about kernel memory accesses that are
++ * done without a 'access_ok(VERIFY_WRITE,..)'
++ */
++#undef TEST_ACCESS_OK
++
++/* The boot page tables (all created as a single array) */
++extern unsigned long pg0[];
++
++#define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE))
++
++/* To avoid harmful races, pmd_none(x) should check only the lower when PAE */
++#define pmd_none(x) (!(unsigned long)pmd_val(x))
++#if CONFIG_XEN_COMPAT <= 0x030002
++/* pmd_present doesn't just test the _PAGE_PRESENT bit since wr.p.t.
++ can temporarily clear it. */
++#define pmd_present(x) (pmd_val(x))
++#else
++#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
++#endif
++#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER & ~_PAGE_PRESENT)) != (_KERNPG_TABLE & ~_PAGE_PRESENT))
++
++
++#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
++
++/*
++ * The following only work if pte_present() is true.
++ * Undefined behaviour if not..
++ */
++static inline int pte_user(pte_t pte) { return (pte).pte_low & _PAGE_USER; }
++static inline int pte_read(pte_t pte) { return (pte).pte_low & _PAGE_USER; }
++static inline int pte_dirty(pte_t pte) { return (pte).pte_low & _PAGE_DIRTY; }
++static inline int pte_young(pte_t pte) { return (pte).pte_low & _PAGE_ACCESSED; }
++static inline int pte_write(pte_t pte) { return (pte).pte_low & _PAGE_RW; }
++static inline int pte_huge(pte_t pte) { return (pte).pte_low & _PAGE_PSE; }
++
++/*
++ * The following only works if pte_present() is not true.
++ */
++static inline int pte_file(pte_t pte) { return (pte).pte_low & _PAGE_FILE; }
++
++static inline pte_t pte_rdprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_USER; return pte; }
++static inline pte_t pte_exprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_USER; return pte; }
++static inline pte_t pte_mkclean(pte_t pte) { (pte).pte_low &= ~_PAGE_DIRTY; return pte; }
++static inline pte_t pte_mkold(pte_t pte) { (pte).pte_low &= ~_PAGE_ACCESSED; return pte; }
++static inline pte_t pte_wrprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_RW; return pte; }
++static inline pte_t pte_mkread(pte_t pte) { (pte).pte_low |= _PAGE_USER; return pte; }
++static inline pte_t pte_mkexec(pte_t pte) { (pte).pte_low |= _PAGE_USER; return pte; }
++static inline pte_t pte_mkdirty(pte_t pte) { (pte).pte_low |= _PAGE_DIRTY; return pte; }
++static inline pte_t pte_mkyoung(pte_t pte) { (pte).pte_low |= _PAGE_ACCESSED; return pte; }
++static inline pte_t pte_mkwrite(pte_t pte) { (pte).pte_low |= _PAGE_RW; return pte; }
++static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= _PAGE_PSE; return pte; }
++
++#ifdef CONFIG_X86_PAE
++# include <asm/pgtable-3level.h>
++#else
++# include <asm/pgtable-2level.h>
++#endif
++
++#define ptep_test_and_clear_dirty(vma, addr, ptep) \
++({ \
++ pte_t __pte = *(ptep); \
++ int __ret = pte_dirty(__pte); \
++ if (__ret) { \
++ __pte = pte_mkclean(__pte); \
++ if ((vma)->vm_mm != current->mm || \
++ HYPERVISOR_update_va_mapping(addr, __pte, 0)) \
++ (ptep)->pte_low = __pte.pte_low; \
++ } \
++ __ret; \
++})
++
++#define ptep_test_and_clear_young(vma, addr, ptep) \
++({ \
++ pte_t __pte = *(ptep); \
++ int __ret = pte_young(__pte); \
++ if (__ret) \
++ __pte = pte_mkold(__pte); \
++ if ((vma)->vm_mm != current->mm || \
++ HYPERVISOR_update_va_mapping(addr, __pte, 0)) \
++ (ptep)->pte_low = __pte.pte_low; \
++ __ret; \
++})
++
++#define ptep_get_and_clear_full(mm, addr, ptep, full) \
++ ((full) ? ({ \
++ pte_t __res = *(ptep); \
++ if (test_bit(PG_pinned, &virt_to_page((mm)->pgd)->flags)) \
++ xen_l1_entry_update(ptep, __pte(0)); \
++ else \
++ *(ptep) = __pte(0); \
++ __res; \
++ }) : \
++ ptep_get_and_clear(mm, addr, ptep))
++
++static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
++{
++ pte_t pte = *ptep;
++ if (pte_write(pte))
++ set_pte_at(mm, addr, ptep, pte_wrprotect(pte));
++}
++
++/*
++ * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
++ *
++ * dst - pointer to pgd range anwhere on a pgd page
++ * src - ""
++ * count - the number of pgds to copy.
++ *
++ * dst and src can be on the same page, but the range must not overlap,
++ * and must not cross a page boundary.
++ */
++static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
++{
++ memcpy(dst, src, count * sizeof(pgd_t));
++}
++
++/*
++ * Macro to mark a page protection value as "uncacheable". On processors which do not support
++ * it, this is a no-op.
++ */
++#define pgprot_noncached(prot) ((boot_cpu_data.x86 > 3) \
++ ? (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT)) : (prot))
++
++/*
++ * Conversion functions: convert a page and protection to a page entry,
++ * and a page entry and page directory to the page they refer to.
++ */
++
++#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
++
++static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
++{
++ /*
++ * Since this might change the present bit (which controls whether
++ * a pte_t object has undergone p2m translation), we must use
++ * pte_val() on the input pte and __pte() for the return value.
++ */
++ paddr_t pteval = pte_val(pte);
++
++ pteval &= _PAGE_CHG_MASK;
++ pteval |= pgprot_val(newprot);
++#ifdef CONFIG_X86_PAE
++ pteval &= __supported_pte_mask;
++#endif
++ return __pte(pteval);
++}
++
++#define pmd_large(pmd) \
++((pmd_val(pmd) & (_PAGE_PSE|_PAGE_PRESENT)) == (_PAGE_PSE|_PAGE_PRESENT))
++
++/*
++ * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
++ *
++ * this macro returns the index of the entry in the pgd page which would
++ * control the given virtual address
++ */
++#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
++#define pgd_index_k(addr) pgd_index(addr)
++
++/*
++ * pgd_offset() returns a (pgd_t *)
++ * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
++ */
++#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
++
++/*
++ * a shortcut which implies the use of the kernel's pgd, instead
++ * of a process's
++ */
++#define pgd_offset_k(address) pgd_offset(&init_mm, address)
++
++/*
++ * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
++ *
++ * this macro returns the index of the entry in the pmd page which would
++ * control the given virtual address
++ */
++#define pmd_index(address) \
++ (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
++
++/*
++ * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
++ *
++ * this macro returns the index of the entry in the pte page which would
++ * control the given virtual address
++ */
++#define pte_index(address) \
++ (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
++#define pte_offset_kernel(dir, address) \
++ ((pte_t *) pmd_page_kernel(*(dir)) + pte_index(address))
++
++#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
++
++#define pmd_page_kernel(pmd) \
++ ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
++
++/*
++ * Helper function that returns the kernel pagetable entry controlling
++ * the virtual address 'address'. NULL means no pagetable entry present.
++ * NOTE: the return type is pte_t but if the pmd is PSE then we return it
++ * as a pte too.
++ */
++extern pte_t *lookup_address(unsigned long address);
++
++/*
++ * Make a given kernel text page executable/non-executable.
++ * Returns the previous executability setting of that page (which
++ * is used to restore the previous state). Used by the SMP bootup code.
++ * NOTE: this is an __init function for security reasons.
++ */
++#ifdef CONFIG_X86_PAE
++ extern int set_kernel_exec(unsigned long vaddr, int enable);
++#else
++ static inline int set_kernel_exec(unsigned long vaddr, int enable) { return 0;}
++#endif
++
++extern void noexec_setup(const char *str);
++
++#if defined(CONFIG_HIGHPTE)
++#define pte_offset_map(dir, address) \
++ ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE0) + \
++ pte_index(address))
++#define pte_offset_map_nested(dir, address) \
++ ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE1) + \
++ pte_index(address))
++#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
++#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
++#else
++#define pte_offset_map(dir, address) \
++ ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
++#define pte_offset_map_nested(dir, address) pte_offset_map(dir, address)
++#define pte_unmap(pte) do { } while (0)
++#define pte_unmap_nested(pte) do { } while (0)
++#endif
++
++#define __HAVE_ARCH_PTEP_ESTABLISH
++#define ptep_establish(vma, address, ptep, pteval) \
++ do { \
++ if ( likely((vma)->vm_mm == current->mm) ) { \
++ BUG_ON(HYPERVISOR_update_va_mapping(address, \
++ pteval, \
++ (unsigned long)(vma)->vm_mm->cpu_vm_mask.bits| \
++ UVMF_INVLPG|UVMF_MULTI)); \
++ } else { \
++ xen_l1_entry_update(ptep, pteval); \
++ flush_tlb_page(vma, address); \
++ } \
++ } while (0)
++
++/*
++ * The i386 doesn't have any external MMU info: the kernel page
++ * tables contain all the necessary information.
++ *
++ * Also, we only update the dirty/accessed state if we set
++ * the dirty bit by hand in the kernel, since the hardware
++ * will do the accessed bit for us, and we don't want to
++ * race with other CPU's that might be updating the dirty
++ * bit at the same time.
++ */
++#define update_mmu_cache(vma,address,pte) do { } while (0)
++#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
++#define ptep_set_access_flags(vma, address, ptep, entry, dirty) \
++ do { \
++ if (dirty) \
++ ptep_establish(vma, address, ptep, entry); \
++ } while (0)
++
++#include <xen/features.h>
++void make_lowmem_page_readonly(void *va, unsigned int feature);
++void make_lowmem_page_writable(void *va, unsigned int feature);
++void make_page_readonly(void *va, unsigned int feature);
++void make_page_writable(void *va, unsigned int feature);
++void make_pages_readonly(void *va, unsigned int nr, unsigned int feature);
++void make_pages_writable(void *va, unsigned int nr, unsigned int feature);
++
++#define virt_to_ptep(__va) \
++({ \
++ pgd_t *__pgd = pgd_offset_k((unsigned long)(__va)); \
++ pud_t *__pud = pud_offset(__pgd, (unsigned long)(__va)); \
++ pmd_t *__pmd = pmd_offset(__pud, (unsigned long)(__va)); \
++ pte_offset_kernel(__pmd, (unsigned long)(__va)); \
++})
++
++#define arbitrary_virt_to_machine(__va) \
++({ \
++ maddr_t m = (maddr_t)pte_mfn(*virt_to_ptep(__va)) << PAGE_SHIFT;\
++ m | ((unsigned long)(__va) & (PAGE_SIZE-1)); \
++})
++
++#endif /* !__ASSEMBLY__ */
++
++#ifdef CONFIG_FLATMEM
++#define kern_addr_valid(addr) (1)
++#endif /* CONFIG_FLATMEM */
++
++int direct_remap_pfn_range(struct vm_area_struct *vma,
++ unsigned long address,
++ unsigned long mfn,
++ unsigned long size,
++ pgprot_t prot,
++ domid_t domid);
++int direct_kernel_remap_pfn_range(unsigned long address,
++ unsigned long mfn,
++ unsigned long size,
++ pgprot_t prot,
++ domid_t domid);
++int create_lookup_pte_addr(struct mm_struct *mm,
++ unsigned long address,
++ uint64_t *ptep);
++int touch_pte_range(struct mm_struct *mm,
++ unsigned long address,
++ unsigned long size);
++
++#define io_remap_pfn_range(vma,from,pfn,size,prot) \
++direct_remap_pfn_range(vma,from,pfn,size,prot,DOMID_IO)
++
++#define MK_IOSPACE_PFN(space, pfn) (pfn)
++#define GET_IOSPACE(pfn) 0
++#define GET_PFN(pfn) (pfn)
++
++#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
++#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
++#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
++#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
++#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
++#define __HAVE_ARCH_PTEP_SET_WRPROTECT
++#define __HAVE_ARCH_PTE_SAME
++#include <asm-generic/pgtable.h>
++
++#endif /* _I386_PGTABLE_H */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/asm-i386/mach-xen/asm/processor.h 2007-08-27 14:02:04.000000000 -0400
+@@ -0,0 +1,741 @@
++/*
++ * include/asm-i386/processor.h
++ *
++ * Copyright (C) 1994 Linus Torvalds
++ */
++
++#ifndef __ASM_I386_PROCESSOR_H
++#define __ASM_I386_PROCESSOR_H
++
++#include <asm/vm86.h>
++#include <asm/math_emu.h>
++#include <asm/segment.h>
++#include <asm/page.h>
++#include <asm/types.h>
++#include <asm/sigcontext.h>
++#include <asm/cpufeature.h>
++#include <asm/msr.h>
++#include <asm/system.h>
++#include <linux/cache.h>
++#include <linux/threads.h>
++#include <asm/percpu.h>
++#include <linux/cpumask.h>
++#include <xen/interface/physdev.h>
++
++/* flag for disabling the tsc */
++extern int tsc_disable;
++
++struct desc_struct {
++ unsigned long a,b;
++};
++
++#define desc_empty(desc) \
++ (!((desc)->a | (desc)->b))
++
++#define desc_equal(desc1, desc2) \
++ (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b))
++/*
++ * Default implementation of macro that returns current
++ * instruction pointer ("program counter").
++ */
++#define current_text_addr() ({ void *pc; __asm__("movl $1f,%0\n1:":"=g" (pc)); pc; })
++
++/*
++ * CPU type and hardware bug flags. Kept separately for each CPU.
++ * Members of this structure are referenced in head.S, so think twice
++ * before touching them. [mj]
++ */
++
++struct cpuinfo_x86 {
++ __u8 x86; /* CPU family */
++ __u8 x86_vendor; /* CPU vendor */
++ __u8 x86_model;
++ __u8 x86_mask;
++ char wp_works_ok; /* It doesn't on 386's */
++ char hlt_works_ok; /* Problems on some 486Dx4's and old 386's */
++ char hard_math;
++ char rfu;
++ int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */
++ unsigned long x86_capability[NCAPINTS];
++ char x86_vendor_id[16];
++ char x86_model_id[64];
++ int x86_cache_size; /* in KB - valid for CPUS which support this
++ call */
++ int x86_cache_alignment; /* In bytes */
++ char fdiv_bug;
++ char f00f_bug;
++ char coma_bug;
++ char pad0;
++ int x86_power;
++ unsigned long loops_per_jiffy;
++#ifdef CONFIG_SMP
++ cpumask_t llc_shared_map; /* cpus sharing the last level cache */
++#endif
++ unsigned char x86_max_cores; /* cpuid returned max cores value */
++ unsigned char apicid;
++#ifdef CONFIG_SMP
++ unsigned char booted_cores; /* number of cores as seen by OS */
++ __u8 phys_proc_id; /* Physical processor id. */
++ __u8 cpu_core_id; /* Core id */
++#endif
++} __attribute__((__aligned__(SMP_CACHE_BYTES)));
++
++#define X86_VENDOR_INTEL 0
++#define X86_VENDOR_CYRIX 1
++#define X86_VENDOR_AMD 2
++#define X86_VENDOR_UMC 3
++#define X86_VENDOR_NEXGEN 4
++#define X86_VENDOR_CENTAUR 5
++#define X86_VENDOR_RISE 6
++#define X86_VENDOR_TRANSMETA 7
++#define X86_VENDOR_NSC 8
++#define X86_VENDOR_NUM 9
++#define X86_VENDOR_UNKNOWN 0xff
++
++/*
++ * capabilities of CPUs
++ */
++
++extern struct cpuinfo_x86 boot_cpu_data;
++extern struct cpuinfo_x86 new_cpu_data;
++#ifndef CONFIG_X86_NO_TSS
++extern struct tss_struct doublefault_tss;
++DECLARE_PER_CPU(struct tss_struct, init_tss);
++#endif
++
++#ifdef CONFIG_SMP
++extern struct cpuinfo_x86 cpu_data[];
++#define current_cpu_data cpu_data[smp_processor_id()]
++#else
++#define cpu_data (&boot_cpu_data)
++#define current_cpu_data boot_cpu_data
++#endif
++
++extern int cpu_llc_id[NR_CPUS];
++extern char ignore_fpu_irq;
++
++extern void identify_cpu(struct cpuinfo_x86 *);
++extern void print_cpu_info(struct cpuinfo_x86 *);
++extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
++extern unsigned short num_cache_leaves;
++
++#ifdef CONFIG_X86_HT
++extern void detect_ht(struct cpuinfo_x86 *c);
++#else
++static inline void detect_ht(struct cpuinfo_x86 *c) {}
++#endif
++
++/*
++ * EFLAGS bits
++ */
++#define X86_EFLAGS_CF 0x00000001 /* Carry Flag */
++#define X86_EFLAGS_PF 0x00000004 /* Parity Flag */
++#define X86_EFLAGS_AF 0x00000010 /* Auxillary carry Flag */
++#define X86_EFLAGS_ZF 0x00000040 /* Zero Flag */
++#define X86_EFLAGS_SF 0x00000080 /* Sign Flag */
++#define X86_EFLAGS_TF 0x00000100 /* Trap Flag */
++#define X86_EFLAGS_IF 0x00000200 /* Interrupt Flag */
++#define X86_EFLAGS_DF 0x00000400 /* Direction Flag */
++#define X86_EFLAGS_OF 0x00000800 /* Overflow Flag */
++#define X86_EFLAGS_IOPL 0x00003000 /* IOPL mask */
++#define X86_EFLAGS_NT 0x00004000 /* Nested Task */
++#define X86_EFLAGS_RF 0x00010000 /* Resume Flag */
++#define X86_EFLAGS_VM 0x00020000 /* Virtual Mode */
++#define X86_EFLAGS_AC 0x00040000 /* Alignment Check */
++#define X86_EFLAGS_VIF 0x00080000 /* Virtual Interrupt Flag */
++#define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */
++#define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */
++
++/*
++ * Generic CPUID function
++ * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
++ * resulting in stale register contents being returned.
++ */
++static inline void cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx)
++{
++ __asm__(XEN_CPUID
++ : "=a" (*eax),
++ "=b" (*ebx),
++ "=c" (*ecx),
++ "=d" (*edx)
++ : "0" (op), "c"(0));
++}
++
++/* Some CPUID calls want 'count' to be placed in ecx */
++static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx,
++ int *edx)
++{
++ __asm__(XEN_CPUID
++ : "=a" (*eax),
++ "=b" (*ebx),
++ "=c" (*ecx),
++ "=d" (*edx)
++ : "0" (op), "c" (count));
++}
++
++/*
++ * CPUID functions returning a single datum
++ */
++static inline unsigned int cpuid_eax(unsigned int op)
++{
++ unsigned int eax;
++
++ __asm__(XEN_CPUID
++ : "=a" (eax)
++ : "0" (op)
++ : "bx", "cx", "dx");
++ return eax;
++}
++static inline unsigned int cpuid_ebx(unsigned int op)
++{
++ unsigned int eax, ebx;
++
++ __asm__(XEN_CPUID
++ : "=a" (eax), "=b" (ebx)
++ : "0" (op)
++ : "cx", "dx" );
++ return ebx;
++}
++static inline unsigned int cpuid_ecx(unsigned int op)
++{
++ unsigned int eax, ecx;
++
++ __asm__(XEN_CPUID
++ : "=a" (eax), "=c" (ecx)
++ : "0" (op)
++ : "bx", "dx" );
++ return ecx;
++}
++static inline unsigned int cpuid_edx(unsigned int op)
++{
++ unsigned int eax, edx;
++
++ __asm__(XEN_CPUID
++ : "=a" (eax), "=d" (edx)
++ : "0" (op)
++ : "bx", "cx");
++ return edx;
++}
++
++#define load_cr3(pgdir) write_cr3(__pa(pgdir))
++
++/*
++ * Intel CPU features in CR4
++ */
++#define X86_CR4_VME 0x0001 /* enable vm86 extensions */
++#define X86_CR4_PVI 0x0002 /* virtual interrupts flag enable */
++#define X86_CR4_TSD 0x0004 /* disable time stamp at ipl 3 */
++#define X86_CR4_DE 0x0008 /* enable debugging extensions */
++#define X86_CR4_PSE 0x0010 /* enable page size extensions */
++#define X86_CR4_PAE 0x0020 /* enable physical address extensions */
++#define X86_CR4_MCE 0x0040 /* Machine check enable */
++#define X86_CR4_PGE 0x0080 /* enable global pages */
++#define X86_CR4_PCE 0x0100 /* enable performance counters at ipl 3 */
++#define X86_CR4_OSFXSR 0x0200 /* enable fast FPU save and restore */
++#define X86_CR4_OSXMMEXCPT 0x0400 /* enable unmasked SSE exceptions */
++
++/*
++ * Save the cr4 feature set we're using (ie
++ * Pentium 4MB enable and PPro Global page
++ * enable), so that any CPU's that boot up
++ * after us can get the correct flags.
++ */
++extern unsigned long mmu_cr4_features;
++
++static inline void set_in_cr4 (unsigned long mask)
++{
++ unsigned cr4;
++ mmu_cr4_features |= mask;
++ cr4 = read_cr4();
++ cr4 |= mask;
++ write_cr4(cr4);
++}
++
++static inline void clear_in_cr4 (unsigned long mask)
++{
++ unsigned cr4;
++ mmu_cr4_features &= ~mask;
++ cr4 = read_cr4();
++ cr4 &= ~mask;
++ write_cr4(cr4);
++}
++
++/*
++ * NSC/Cyrix CPU configuration register indexes
++ */
++
++#define CX86_PCR0 0x20
++#define CX86_GCR 0xb8
++#define CX86_CCR0 0xc0
++#define CX86_CCR1 0xc1
++#define CX86_CCR2 0xc2
++#define CX86_CCR3 0xc3
++#define CX86_CCR4 0xe8
++#define CX86_CCR5 0xe9
++#define CX86_CCR6 0xea
++#define CX86_CCR7 0xeb
++#define CX86_PCR1 0xf0
++#define CX86_DIR0 0xfe
++#define CX86_DIR1 0xff
++#define CX86_ARR_BASE 0xc4
++#define CX86_RCR_BASE 0xdc
++
++/*
++ * NSC/Cyrix CPU indexed register access macros
++ */
++
++#define getCx86(reg) ({ outb((reg), 0x22); inb(0x23); })
++
++#define setCx86(reg, data) do { \
++ outb((reg), 0x22); \
++ outb((data), 0x23); \
++} while (0)
++
++/* Stop speculative execution */
++static inline void sync_core(void)
++{
++ int tmp;
++ asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory");
++}
++
++static inline void __monitor(const void *eax, unsigned long ecx,
++ unsigned long edx)
++{
++ /* "monitor %eax,%ecx,%edx;" */
++ asm volatile(
++ ".byte 0x0f,0x01,0xc8;"
++ : :"a" (eax), "c" (ecx), "d"(edx));
++}
++
++static inline void __mwait(unsigned long eax, unsigned long ecx)
++{
++ /* "mwait %eax,%ecx;" */
++ asm volatile(
++ ".byte 0x0f,0x01,0xc9;"
++ : :"a" (eax), "c" (ecx));
++}
++
++/* from system description table in BIOS. Mostly for MCA use, but
++others may find it useful. */
++extern unsigned int machine_id;
++extern unsigned int machine_submodel_id;
++extern unsigned int BIOS_revision;
++extern unsigned int mca_pentium_flag;
++
++/* Boot loader type from the setup header */
++extern int bootloader_type;
++
++/*
++ * User space process size: 3GB (default).
++ */
++#define TASK_SIZE (PAGE_OFFSET)
++
++/* This decides where the kernel will search for a free chunk of vm
++ * space during mmap's.
++ */
++#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
++
++#define HAVE_ARCH_PICK_MMAP_LAYOUT
++
++/*
++ * Size of io_bitmap.
++ */
++#define IO_BITMAP_BITS 65536
++#define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
++#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
++#ifndef CONFIG_X86_NO_TSS
++#define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap)
++#endif
++#define INVALID_IO_BITMAP_OFFSET 0x8000
++#define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000
++
++struct i387_fsave_struct {
++ long cwd;
++ long swd;
++ long twd;
++ long fip;
++ long fcs;
++ long foo;
++ long fos;
++ long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */
++ long status; /* software status information */
++};
++
++struct i387_fxsave_struct {
++ unsigned short cwd;
++ unsigned short swd;
++ unsigned short twd;
++ unsigned short fop;
++ long fip;
++ long fcs;
++ long foo;
++ long fos;
++ long mxcsr;
++ long mxcsr_mask;
++ long st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
++ long xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
++ long padding[56];
++} __attribute__ ((aligned (16)));
++
++struct i387_soft_struct {
++ long cwd;
++ long swd;
++ long twd;
++ long fip;
++ long fcs;
++ long foo;
++ long fos;
++ long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */
++ unsigned char ftop, changed, lookahead, no_update, rm, alimit;
++ struct info *info;
++ unsigned long entry_eip;
++};
++
++union i387_union {
++ struct i387_fsave_struct fsave;
++ struct i387_fxsave_struct fxsave;
++ struct i387_soft_struct soft;
++};
++
++typedef struct {
++ unsigned long seg;
++} mm_segment_t;
++
++struct thread_struct;
++
++#ifndef CONFIG_X86_NO_TSS
++struct tss_struct {
++ unsigned short back_link,__blh;
++ unsigned long esp0;
++ unsigned short ss0,__ss0h;
++ unsigned long esp1;
++ unsigned short ss1,__ss1h; /* ss1 is used to cache MSR_IA32_SYSENTER_CS */
++ unsigned long esp2;
++ unsigned short ss2,__ss2h;
++ unsigned long __cr3;
++ unsigned long eip;
++ unsigned long eflags;
++ unsigned long eax,ecx,edx,ebx;
++ unsigned long esp;
++ unsigned long ebp;
++ unsigned long esi;
++ unsigned long edi;
++ unsigned short es, __esh;
++ unsigned short cs, __csh;
++ unsigned short ss, __ssh;
++ unsigned short ds, __dsh;
++ unsigned short fs, __fsh;
++ unsigned short gs, __gsh;
++ unsigned short ldt, __ldth;
++ unsigned short trace, io_bitmap_base;
++ /*
++ * The extra 1 is there because the CPU will access an
++ * additional byte beyond the end of the IO permission
++ * bitmap. The extra byte must be all 1 bits, and must
++ * be within the limit.
++ */
++ unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
++ /*
++ * Cache the current maximum and the last task that used the bitmap:
++ */
++ unsigned long io_bitmap_max;
++ struct thread_struct *io_bitmap_owner;
++ /*
++ * pads the TSS to be cacheline-aligned (size is 0x100)
++ */
++ unsigned long __cacheline_filler[35];
++ /*
++ * .. and then another 0x100 bytes for emergency kernel stack
++ */
++ unsigned long stack[64];
++} __attribute__((packed));
++#endif
++
++#define ARCH_MIN_TASKALIGN 16
++
++struct thread_struct {
++/* cached TLS descriptors. */
++ struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
++ unsigned long esp0;
++ unsigned long sysenter_cs;
++ unsigned long eip;
++ unsigned long esp;
++ unsigned long fs;
++ unsigned long gs;
++/* Hardware debugging registers */
++ unsigned long debugreg[8]; /* %%db0-7 debug registers */
++/* fault info */
++ unsigned long cr2, trap_no, error_code;
++/* floating point info */
++ union i387_union i387;
++/* virtual 86 mode info */
++ struct vm86_struct __user * vm86_info;
++ unsigned long screen_bitmap;
++ unsigned long v86flags, v86mask, saved_esp0;
++ unsigned int saved_fs, saved_gs;
++/* IO permissions */
++ unsigned long *io_bitmap_ptr;
++ unsigned long iopl;
++/* max allowed port in the bitmap, in bytes: */
++ unsigned long io_bitmap_max;
++};
++
++#define INIT_THREAD { \
++ .vm86_info = NULL, \
++ .sysenter_cs = __KERNEL_CS, \
++ .io_bitmap_ptr = NULL, \
++}
++
++#ifndef CONFIG_X86_NO_TSS
++/*
++ * Note that the .io_bitmap member must be extra-big. This is because
++ * the CPU will access an additional byte beyond the end of the IO
++ * permission bitmap. The extra byte must be all 1 bits, and must
++ * be within the limit.
++ */
++#define INIT_TSS { \
++ .esp0 = sizeof(init_stack) + (long)&init_stack, \
++ .ss0 = __KERNEL_DS, \
++ .ss1 = __KERNEL_CS, \
++ .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
++ .io_bitmap = { [ 0 ... IO_BITMAP_LONGS] = ~0 }, \
++}
++
++static inline void __load_esp0(struct tss_struct *tss, struct thread_struct *thread)
++{
++ tss->esp0 = thread->esp0;
++ /* This can only happen when SEP is enabled, no need to test "SEP"arately */
++ if (unlikely(tss->ss1 != thread->sysenter_cs)) {
++ tss->ss1 = thread->sysenter_cs;
++ wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
++ }
++}
++#define load_esp0(tss, thread) \
++ __load_esp0(tss, thread)
++#else
++#define load_esp0(tss, thread) \
++ HYPERVISOR_stack_switch(__KERNEL_DS, (thread)->esp0)
++#endif
++
++#define start_thread(regs, new_eip, new_esp) do { \
++ __asm__("movl %0,%%fs ; movl %0,%%gs": :"r" (0)); \
++ set_fs(USER_DS); \
++ regs->xds = __USER_DS; \
++ regs->xes = __USER_DS; \
++ regs->xss = __USER_DS; \
++ regs->xcs = __USER_CS; \
++ regs->eip = new_eip; \
++ regs->esp = new_esp; \
++} while (0)
++
++/*
++ * These special macros can be used to get or set a debugging register
++ */
++#define get_debugreg(var, register) \
++ (var) = HYPERVISOR_get_debugreg((register))
++#define set_debugreg(value, register) \
++ HYPERVISOR_set_debugreg((register), (value))
++
++/*
++ * Set IOPL bits in EFLAGS from given mask
++ */
++static inline void set_iopl_mask(unsigned mask)
++{
++ struct physdev_set_iopl set_iopl;
++
++ /* Force the change at ring 0. */
++ set_iopl.iopl = (mask == 0) ? 1 : (mask >> 12) & 3;
++ HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
++}
++
++/* Forward declaration, a strange C thing */
++struct task_struct;
++struct mm_struct;
++
++/* Free all resources held by a thread. */
++extern void release_thread(struct task_struct *);
++
++/* Prepare to copy thread state - unlazy all lazy status */
++extern void prepare_to_copy(struct task_struct *tsk);
++
++/*
++ * create a kernel thread without removing it from tasklists
++ */
++extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
++
++extern unsigned long thread_saved_pc(struct task_struct *tsk);
++void show_trace(struct task_struct *task, struct pt_regs *regs, unsigned long *stack);
++
++unsigned long get_wchan(struct task_struct *p);
++
++#define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
++#define KSTK_TOP(info) \
++({ \
++ unsigned long *__ptr = (unsigned long *)(info); \
++ (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
++})
++
++/*
++ * The below -8 is to reserve 8 bytes on top of the ring0 stack.
++ * This is necessary to guarantee that the entire "struct pt_regs"
++ * is accessable even if the CPU haven't stored the SS/ESP registers
++ * on the stack (interrupt gate does not save these registers
++ * when switching to the same priv ring).
++ * Therefore beware: accessing the xss/esp fields of the
++ * "struct pt_regs" is possible, but they may contain the
++ * completely wrong values.
++ */
++#define task_pt_regs(task) \
++({ \
++ struct pt_regs *__regs__; \
++ __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
++ __regs__ - 1; \
++})
++
++#define KSTK_EIP(task) (task_pt_regs(task)->eip)
++#define KSTK_ESP(task) (task_pt_regs(task)->esp)
++
++
++struct microcode_header {
++ unsigned int hdrver;
++ unsigned int rev;
++ unsigned int date;
++ unsigned int sig;
++ unsigned int cksum;
++ unsigned int ldrver;
++ unsigned int pf;
++ unsigned int datasize;
++ unsigned int totalsize;
++ unsigned int reserved[3];
++};
++
++struct microcode {
++ struct microcode_header hdr;
++ unsigned int bits[0];
++};
++
++typedef struct microcode microcode_t;
++typedef struct microcode_header microcode_header_t;
++
++/* microcode format is extended from prescott processors */
++struct extended_signature {
++ unsigned int sig;
++ unsigned int pf;
++ unsigned int cksum;
++};
++
++struct extended_sigtable {
++ unsigned int count;
++ unsigned int cksum;
++ unsigned int reserved[3];
++ struct extended_signature sigs[0];
++};
++
++/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
++static inline void rep_nop(void)
++{
++ __asm__ __volatile__("rep;nop": : :"memory");
++}
++
++#define cpu_relax() rep_nop()
++
++/* generic versions from gas */
++#define GENERIC_NOP1 ".byte 0x90\n"
++#define GENERIC_NOP2 ".byte 0x89,0xf6\n"
++#define GENERIC_NOP3 ".byte 0x8d,0x76,0x00\n"
++#define GENERIC_NOP4 ".byte 0x8d,0x74,0x26,0x00\n"
++#define GENERIC_NOP5 GENERIC_NOP1 GENERIC_NOP4
++#define GENERIC_NOP6 ".byte 0x8d,0xb6,0x00,0x00,0x00,0x00\n"
++#define GENERIC_NOP7 ".byte 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00\n"
++#define GENERIC_NOP8 GENERIC_NOP1 GENERIC_NOP7
++
++/* Opteron nops */
++#define K8_NOP1 GENERIC_NOP1
++#define K8_NOP2 ".byte 0x66,0x90\n"
++#define K8_NOP3 ".byte 0x66,0x66,0x90\n"
++#define K8_NOP4 ".byte 0x66,0x66,0x66,0x90\n"
++#define K8_NOP5 K8_NOP3 K8_NOP2
++#define K8_NOP6 K8_NOP3 K8_NOP3
++#define K8_NOP7 K8_NOP4 K8_NOP3
++#define K8_NOP8 K8_NOP4 K8_NOP4
++
++/* K7 nops */
++/* uses eax dependencies (arbitary choice) */
++#define K7_NOP1 GENERIC_NOP1
++#define K7_NOP2 ".byte 0x8b,0xc0\n"
++#define K7_NOP3 ".byte 0x8d,0x04,0x20\n"
++#define K7_NOP4 ".byte 0x8d,0x44,0x20,0x00\n"
++#define K7_NOP5 K7_NOP4 ASM_NOP1
++#define K7_NOP6 ".byte 0x8d,0x80,0,0,0,0\n"
++#define K7_NOP7 ".byte 0x8D,0x04,0x05,0,0,0,0\n"
++#define K7_NOP8 K7_NOP7 ASM_NOP1
++
++#ifdef CONFIG_MK8
++#define ASM_NOP1 K8_NOP1
++#define ASM_NOP2 K8_NOP2
++#define ASM_NOP3 K8_NOP3
++#define ASM_NOP4 K8_NOP4
++#define ASM_NOP5 K8_NOP5
++#define ASM_NOP6 K8_NOP6
++#define ASM_NOP7 K8_NOP7
++#define ASM_NOP8 K8_NOP8
++#elif defined(CONFIG_MK7)
++#define ASM_NOP1 K7_NOP1
++#define ASM_NOP2 K7_NOP2
++#define ASM_NOP3 K7_NOP3
++#define ASM_NOP4 K7_NOP4
++#define ASM_NOP5 K7_NOP5
++#define ASM_NOP6 K7_NOP6
++#define ASM_NOP7 K7_NOP7
++#define ASM_NOP8 K7_NOP8
++#else
++#define ASM_NOP1 GENERIC_NOP1
++#define ASM_NOP2 GENERIC_NOP2
++#define ASM_NOP3 GENERIC_NOP3
++#define ASM_NOP4 GENERIC_NOP4
++#define ASM_NOP5 GENERIC_NOP5
++#define ASM_NOP6 GENERIC_NOP6
++#define ASM_NOP7 GENERIC_NOP7
++#define ASM_NOP8 GENERIC_NOP8
++#endif
++
++#define ASM_NOP_MAX 8
++
++/* Prefetch instructions for Pentium III and AMD Athlon */
++/* It's not worth to care about 3dnow! prefetches for the K6
++ because they are microcoded there and very slow.
++ However we don't do prefetches for pre XP Athlons currently
++ That should be fixed. */
++#define ARCH_HAS_PREFETCH
++static inline void prefetch(const void *x)
++{
++ alternative_input(ASM_NOP4,
++ "prefetchnta (%1)",
++ X86_FEATURE_XMM,
++ "r" (x));
++}
++
++#define ARCH_HAS_PREFETCH
++#define ARCH_HAS_PREFETCHW
++#define ARCH_HAS_SPINLOCK_PREFETCH
++
++/* 3dnow! prefetch to get an exclusive cache line. Useful for
++ spinlocks to avoid one state transition in the cache coherency protocol. */
++static inline void prefetchw(const void *x)
++{
++ alternative_input(ASM_NOP4,
++ "prefetchw (%1)",
++ X86_FEATURE_3DNOW,
++ "r" (x));
++}
++#define spin_lock_prefetch(x) prefetchw(x)
++
++extern void select_idle_routine(const struct cpuinfo_x86 *c);
++
++#define cache_line_size() (boot_cpu_data.x86_cache_alignment)
++
++extern unsigned long boot_option_idle_override;
++extern void enable_sep_cpu(void);
++extern int sysenter_setup(void);
++
++#endif /* __ASM_I386_PROCESSOR_H */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/asm-i386/mach-xen/asm/ptrace.h 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,90 @@
++#ifndef _I386_PTRACE_H
++#define _I386_PTRACE_H
++
++#define EBX 0
++#define ECX 1
++#define EDX 2
++#define ESI 3
++#define EDI 4
++#define EBP 5
++#define EAX 6
++#define DS 7
++#define ES 8
++#define FS 9
++#define GS 10
++#define ORIG_EAX 11
++#define EIP 12
++#define CS 13
++#define EFL 14
++#define UESP 15
++#define SS 16
++#define FRAME_SIZE 17
++
++/* this struct defines the way the registers are stored on the
++ stack during a system call. */
++
++struct pt_regs {
++ long ebx;
++ long ecx;
++ long edx;
++ long esi;
++ long edi;
++ long ebp;
++ long eax;
++ int xds;
++ int xes;
++ long orig_eax;
++ long eip;
++ int xcs;
++ long eflags;
++ long esp;
++ int xss;
++};
++
++/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
++#define PTRACE_GETREGS 12
++#define PTRACE_SETREGS 13
++#define PTRACE_GETFPREGS 14
++#define PTRACE_SETFPREGS 15
++#define PTRACE_GETFPXREGS 18
++#define PTRACE_SETFPXREGS 19
++
++#define PTRACE_OLDSETOPTIONS 21
++
++#define PTRACE_GET_THREAD_AREA 25
++#define PTRACE_SET_THREAD_AREA 26
++
++#define PTRACE_SYSEMU 31
++#define PTRACE_SYSEMU_SINGLESTEP 32
++
++#ifdef __KERNEL__
++
++#include <asm/vm86.h>
++
++struct task_struct;
++extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code);
++
++/*
++ * user_mode_vm(regs) determines whether a register set came from user mode.
++ * This is true if V8086 mode was enabled OR if the register set was from
++ * protected mode with RPL-3 CS value. This tricky test checks that with
++ * one comparison. Many places in the kernel can bypass this full check
++ * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
++ */
++static inline int user_mode(struct pt_regs *regs)
++{
++ return (regs->xcs & 2) != 0;
++}
++static inline int user_mode_vm(struct pt_regs *regs)
++{
++ return ((regs->xcs & 2) | (regs->eflags & VM_MASK)) != 0;
++}
++#define instruction_pointer(regs) ((regs)->eip)
++#if defined(CONFIG_SMP) && defined(CONFIG_FRAME_POINTER)
++extern unsigned long profile_pc(struct pt_regs *regs);
++#else
++#define profile_pc(regs) instruction_pointer(regs)
++#endif
++#endif /* __KERNEL__ */
++
++#endif
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/asm-i386/mach-xen/asm/scatterlist.h 2007-08-27 14:01:58.000000000 -0400
+@@ -0,0 +1,22 @@
++#ifndef _I386_SCATTERLIST_H
++#define _I386_SCATTERLIST_H
++
++struct scatterlist {
++ struct page *page;
++ unsigned int offset;
++ unsigned int length;
++ dma_addr_t dma_address;
++ unsigned int dma_length;
++};
++
++/* These macros should be used after a pci_map_sg call has been done
++ * to get bus addresses of each of the SG entries and their lengths.
++ * You should only work with the number of sg entries pci_map_sg
++ * returns.
++ */
++#define sg_dma_address(sg) ((sg)->dma_address)
++#define sg_dma_len(sg) ((sg)->dma_length)
++
++#define ISA_DMA_THRESHOLD (0x00ffffff)
++
++#endif /* !(_I386_SCATTERLIST_H) */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/asm-i386/mach-xen/asm/segment.h 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,117 @@
++#ifndef _ASM_SEGMENT_H
++#define _ASM_SEGMENT_H
++
++/*
++ * The layout of the per-CPU GDT under Linux:
++ *
++ * 0 - null
++ * 1 - reserved
++ * 2 - reserved
++ * 3 - reserved
++ *
++ * 4 - unused <==== new cacheline
++ * 5 - unused
++ *
++ * ------- start of TLS (Thread-Local Storage) segments:
++ *
++ * 6 - TLS segment #1 [ glibc's TLS segment ]
++ * 7 - TLS segment #2 [ Wine's %fs Win32 segment ]
++ * 8 - TLS segment #3
++ * 9 - reserved
++ * 10 - reserved
++ * 11 - reserved
++ *
++ * ------- start of kernel segments:
++ *
++ * 12 - kernel code segment <==== new cacheline
++ * 13 - kernel data segment
++ * 14 - default user CS
++ * 15 - default user DS
++ * 16 - TSS
++ * 17 - LDT
++ * 18 - PNPBIOS support (16->32 gate)
++ * 19 - PNPBIOS support
++ * 20 - PNPBIOS support
++ * 21 - PNPBIOS support
++ * 22 - PNPBIOS support
++ * 23 - APM BIOS support
++ * 24 - APM BIOS support
++ * 25 - APM BIOS support
++ *
++ * 26 - ESPFIX small SS
++ * 27 - unused
++ * 28 - unused
++ * 29 - unused
++ * 30 - unused
++ * 31 - TSS for double fault handler
++ */
++#define GDT_ENTRY_TLS_ENTRIES 3
++#define GDT_ENTRY_TLS_MIN 6
++#define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
++
++#define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8)
++
++#define GDT_ENTRY_DEFAULT_USER_CS 14
++#define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS * 8 + 3)
++
++#define GDT_ENTRY_DEFAULT_USER_DS 15
++#define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS * 8 + 3)
++
++#define GDT_ENTRY_KERNEL_BASE 12
++
++#define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE + 0)
++#define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8)
++#define GET_KERNEL_CS() (__KERNEL_CS | (xen_feature(XENFEAT_supervisor_mode_kernel)?0:1) )
++
++#define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1)
++#define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8)
++#define GET_KERNEL_DS() (__KERNEL_DS | (xen_feature(XENFEAT_supervisor_mode_kernel)?0:1) )
++
++#define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4)
++#define GDT_ENTRY_LDT (GDT_ENTRY_KERNEL_BASE + 5)
++
++#define GDT_ENTRY_PNPBIOS_BASE (GDT_ENTRY_KERNEL_BASE + 6)
++#define GDT_ENTRY_APMBIOS_BASE (GDT_ENTRY_KERNEL_BASE + 11)
++
++#define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14)
++#define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)
++
++#define GDT_ENTRY_DOUBLEFAULT_TSS 31
++
++/*
++ * The GDT has 32 entries
++ */
++#define GDT_ENTRIES 32
++
++#define GDT_SIZE (GDT_ENTRIES * 8)
++
++/* Simple and small GDT entries for booting only */
++
++#define GDT_ENTRY_BOOT_CS 2
++#define __BOOT_CS (GDT_ENTRY_BOOT_CS * 8)
++
++#define GDT_ENTRY_BOOT_DS (GDT_ENTRY_BOOT_CS + 1)
++#define __BOOT_DS (GDT_ENTRY_BOOT_DS * 8)
++
++/* The PnP BIOS entries in the GDT */
++#define GDT_ENTRY_PNPBIOS_CS32 (GDT_ENTRY_PNPBIOS_BASE + 0)
++#define GDT_ENTRY_PNPBIOS_CS16 (GDT_ENTRY_PNPBIOS_BASE + 1)
++#define GDT_ENTRY_PNPBIOS_DS (GDT_ENTRY_PNPBIOS_BASE + 2)
++#define GDT_ENTRY_PNPBIOS_TS1 (GDT_ENTRY_PNPBIOS_BASE + 3)
++#define GDT_ENTRY_PNPBIOS_TS2 (GDT_ENTRY_PNPBIOS_BASE + 4)
++
++/* The PnP BIOS selectors */
++#define PNP_CS32 (GDT_ENTRY_PNPBIOS_CS32 * 8) /* segment for calling fn */
++#define PNP_CS16 (GDT_ENTRY_PNPBIOS_CS16 * 8) /* code segment for BIOS */
++#define PNP_DS (GDT_ENTRY_PNPBIOS_DS * 8) /* data segment for BIOS */
++#define PNP_TS1 (GDT_ENTRY_PNPBIOS_TS1 * 8) /* transfer data segment */
++#define PNP_TS2 (GDT_ENTRY_PNPBIOS_TS2 * 8) /* another data segment */
++
++/*
++ * The interrupt descriptor table has room for 256 idt's,
++ * the global descriptor table is dependent on the number
++ * of tasks we can have..
++ */
++#define IDT_ENTRIES 256
++
++#endif
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/asm-i386/mach-xen/asm/setup.h 2007-08-27 14:02:01.000000000 -0400
+@@ -0,0 +1,81 @@
++/*
++ * Just a place holder. We don't want to have to test x86 before
++ * we include stuff
++ */
++
++#ifndef _i386_SETUP_H
++#define _i386_SETUP_H
++
++#ifdef __KERNEL__
++#include <linux/pfn.h>
++
++/*
++ * Reserved space for vmalloc and iomap - defined in asm/page.h
++ */
++#define MAXMEM_PFN PFN_DOWN(MAXMEM)
++#define MAX_NONPAE_PFN (1 << 20)
++#endif
++
++#define PARAM_SIZE 4096
++#define COMMAND_LINE_SIZE 256
++
++#define OLD_CL_MAGIC_ADDR 0x90020
++#define OLD_CL_MAGIC 0xA33F
++#define OLD_CL_BASE_ADDR 0x90000
++#define OLD_CL_OFFSET 0x90022
++#define NEW_CL_POINTER 0x228 /* Relative to real mode data */
++
++#ifndef __ASSEMBLY__
++/*
++ * This is set up by the setup-routine at boot-time
++ */
++extern unsigned char boot_params[PARAM_SIZE];
++
++#define PARAM (boot_params)
++#define SCREEN_INFO (*(struct screen_info *) (PARAM+0))
++#define EXT_MEM_K (*(unsigned short *) (PARAM+2))
++#define ALT_MEM_K (*(unsigned long *) (PARAM+0x1e0))
++#define E820_MAP_NR (*(char*) (PARAM+E820NR))
++#define E820_MAP ((struct e820entry *) (PARAM+E820MAP))
++#define APM_BIOS_INFO (*(struct apm_bios_info *) (PARAM+0x40))
++#define IST_INFO (*(struct ist_info *) (PARAM+0x60))
++#define DRIVE_INFO (*(struct drive_info_struct *) (PARAM+0x80))
++#define SYS_DESC_TABLE (*(struct sys_desc_table_struct*)(PARAM+0xa0))
++#define EFI_SYSTAB ((efi_system_table_t *) *((unsigned long *)(PARAM+0x1c4)))
++#define EFI_MEMDESC_SIZE (*((unsigned long *) (PARAM+0x1c8)))
++#define EFI_MEMDESC_VERSION (*((unsigned long *) (PARAM+0x1cc)))
++#define EFI_MEMMAP ((void *) *((unsigned long *)(PARAM+0x1d0)))
++#define EFI_MEMMAP_SIZE (*((unsigned long *) (PARAM+0x1d4)))
++#define MOUNT_ROOT_RDONLY (*(unsigned short *) (PARAM+0x1F2))
++#define RAMDISK_FLAGS (*(unsigned short *) (PARAM+0x1F8))
++#define VIDEO_MODE (*(unsigned short *) (PARAM+0x1FA))
++#define ORIG_ROOT_DEV (*(unsigned short *) (PARAM+0x1FC))
++#define AUX_DEVICE_INFO (*(unsigned char *) (PARAM+0x1FF))
++#define LOADER_TYPE (*(unsigned char *) (PARAM+0x210))
++#define KERNEL_START (*(unsigned long *) (PARAM+0x214))
++#define INITRD_START (__pa(xen_start_info->mod_start))
++#define INITRD_SIZE (xen_start_info->mod_len)
++#define EDID_INFO (*(struct edid_info *) (PARAM+0x440))
++#define EDD_NR (*(unsigned char *) (PARAM+EDDNR))
++#define EDD_MBR_SIG_NR (*(unsigned char *) (PARAM+EDD_MBR_SIG_NR_BUF))
++#define EDD_MBR_SIGNATURE ((unsigned int *) (PARAM+EDD_MBR_SIG_BUF))
++#define EDD_BUF ((struct edd_info *) (PARAM+EDDBUF))
++
++/*
++ * Do NOT EVER look at the BIOS memory size location.
++ * It does not work on many machines.
++ */
++#define LOWMEMSIZE() (0x9f000)
++
++struct e820entry;
++
++char * __init machine_specific_memory_setup(void);
++
++int __init copy_e820_map(struct e820entry * biosmap, int nr_map);
++int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map);
++void __init add_memory_region(unsigned long long start,
++ unsigned long long size, int type);
++
++#endif /* __ASSEMBLY__ */
++
++#endif /* _i386_SETUP_H */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/asm-i386/mach-xen/asm/smp.h 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,103 @@
++#ifndef __ASM_SMP_H
++#define __ASM_SMP_H
++
++/*
++ * We need the APIC definitions automatically as part of 'smp.h'
++ */
++#ifndef __ASSEMBLY__
++#include <linux/kernel.h>
++#include <linux/threads.h>
++#include <linux/cpumask.h>
++#endif
++
++#ifdef CONFIG_X86_LOCAL_APIC
++#ifndef __ASSEMBLY__
++#include <asm/fixmap.h>
++#include <asm/bitops.h>
++#include <asm/mpspec.h>
++#ifdef CONFIG_X86_IO_APIC
++#include <asm/io_apic.h>
++#endif
++#include <asm/apic.h>
++#endif
++#endif
++
++#define BAD_APICID 0xFFu
++#ifdef CONFIG_SMP
++#ifndef __ASSEMBLY__
++
++/*
++ * Private routines/data
++ */
++
++extern void smp_alloc_memory(void);
++extern int pic_mode;
++extern int smp_num_siblings;
++extern cpumask_t cpu_sibling_map[];
++extern cpumask_t cpu_core_map[];
++
++extern void (*mtrr_hook) (void);
++extern void zap_low_mappings (void);
++extern void lock_ipi_call_lock(void);
++extern void unlock_ipi_call_lock(void);
++
++#define MAX_APICID 256
++extern u8 x86_cpu_to_apicid[];
++
++#define cpu_physical_id(cpu) x86_cpu_to_apicid[cpu]
++
++#ifdef CONFIG_HOTPLUG_CPU
++extern void cpu_exit_clear(void);
++extern void cpu_uninit(void);
++#endif
++
++/*
++ * This function is needed by all SMP systems. It must _always_ be valid
++ * from the initial startup. We map APIC_BASE very early in page_setup(),
++ * so this is correct in the x86 case.
++ */
++#define raw_smp_processor_id() (current_thread_info()->cpu)
++
++extern cpumask_t cpu_possible_map;
++#define cpu_callin_map cpu_possible_map
++
++/* We don't mark CPUs online until __cpu_up(), so we need another measure */
++static inline int num_booting_cpus(void)
++{
++ return cpus_weight(cpu_possible_map);
++}
++
++#ifdef CONFIG_X86_LOCAL_APIC
++
++#ifdef APIC_DEFINITION
++extern int hard_smp_processor_id(void);
++#else
++#include <mach_apicdef.h>
++static inline int hard_smp_processor_id(void)
++{
++ /* we don't want to mark this access volatile - bad code generation */
++ return GET_APIC_ID(*(unsigned long *)(APIC_BASE+APIC_ID));
++}
++#endif
++
++static __inline int logical_smp_processor_id(void)
++{
++ /* we don't want to mark this access volatile - bad code generation */
++ return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR));
++}
++
++#endif
++
++extern int __cpu_disable(void);
++extern void __cpu_die(unsigned int cpu);
++extern void prefill_possible_map(void);
++#endif /* !__ASSEMBLY__ */
++
++#else /* CONFIG_SMP */
++
++#define cpu_physical_id(cpu) boot_cpu_physical_apicid
++
++#define NO_PROC_ID 0xFF /* No processor magic marker */
++
++#endif
++#endif
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/asm-i386/mach-xen/asm/spinlock.h 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,202 @@
++#ifndef __ASM_SPINLOCK_H
++#define __ASM_SPINLOCK_H
++
++#include <asm/atomic.h>
++#include <asm/rwlock.h>
++#include <asm/page.h>
++#include <linux/compiler.h>
++
++/*
++ * Your basic SMP spinlocks, allowing only a single CPU anywhere
++ *
++ * Simple spin lock operations. There are two variants, one clears IRQ's
++ * on the local processor, one does not.
++ *
++ * We make no fairness assumptions. They have a cost.
++ *
++ * (the type definitions are in asm/spinlock_types.h)
++ */
++
++#define __raw_spin_is_locked(x) \
++ (*(volatile signed char *)(&(x)->slock) <= 0)
++
++#define __raw_spin_lock_string \
++ "\n1:\t" \
++ LOCK_PREFIX " ; decb %0\n\t" \
++ "jns 3f\n" \
++ "2:\t" \
++ "rep;nop\n\t" \
++ "cmpb $0,%0\n\t" \
++ "jle 2b\n\t" \
++ "jmp 1b\n" \
++ "3:\n\t"
++
++/*
++ * NOTE: there's an irqs-on section here, which normally would have to be
++ * irq-traced, but on CONFIG_TRACE_IRQFLAGS we never use
++ * __raw_spin_lock_string_flags().
++ */
++#define __raw_spin_lock_string_flags \
++ "\n1:\t" \
++ LOCK_PREFIX " ; decb %0\n\t" \
++ "jns 5f\n" \
++ "2:\t" \
++ "testl $0x200, %1\n\t" \
++ "jz 4f\n\t" \
++ "#sti\n" \
++ "3:\t" \
++ "rep;nop\n\t" \
++ "cmpb $0, %0\n\t" \
++ "jle 3b\n\t" \
++ "#cli\n\t" \
++ "jmp 1b\n" \
++ "4:\t" \
++ "rep;nop\n\t" \
++ "cmpb $0, %0\n\t" \
++ "jg 1b\n\t" \
++ "jmp 4b\n" \
++ "5:\n\t"
++
++static inline void __raw_spin_lock(raw_spinlock_t *lock)
++{
++ asm(__raw_spin_lock_string : "+m" (lock->slock) : : "memory");
++}
++
++/*
++ * It is easier for the lock validator if interrupts are not re-enabled
++ * in the middle of a lock-acquire. This is a performance feature anyway
++ * so we turn it off:
++ */
++#ifndef CONFIG_PROVE_LOCKING
++static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
++{
++ asm(__raw_spin_lock_string_flags : "+m" (lock->slock) : "r" (flags) : "memory");
++}
++#endif
++
++static inline int __raw_spin_trylock(raw_spinlock_t *lock)
++{
++ char oldval;
++ __asm__ __volatile__(
++ "xchgb %b0,%1"
++ :"=q" (oldval), "+m" (lock->slock)
++ :"0" (0) : "memory");
++ return oldval > 0;
++}
++
++/*
++ * __raw_spin_unlock based on writing $1 to the low byte.
++ * This method works. Despite all the confusion.
++ * (except on PPro SMP or if we are using OOSTORE, so we use xchgb there)
++ * (PPro errata 66, 92)
++ */
++
++#if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE)
++
++#define __raw_spin_unlock_string \
++ "movb $1,%0" \
++ :"+m" (lock->slock) : : "memory"
++
++
++static inline void __raw_spin_unlock(raw_spinlock_t *lock)
++{
++ __asm__ __volatile__(
++ __raw_spin_unlock_string
++ );
++}
++
++#else
++
++#define __raw_spin_unlock_string \
++ "xchgb %b0, %1" \
++ :"=q" (oldval), "+m" (lock->slock) \
++ :"0" (oldval) : "memory"
++
++static inline void __raw_spin_unlock(raw_spinlock_t *lock)
++{
++ char oldval = 1;
++
++ __asm__ __volatile__(
++ __raw_spin_unlock_string
++ );
++}
++
++#endif
++
++#define __raw_spin_unlock_wait(lock) \
++ do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
++
++/*
++ * Read-write spinlocks, allowing multiple readers
++ * but only one writer.
++ *
++ * NOTE! it is quite common to have readers in interrupts
++ * but no interrupt writers. For those circumstances we
++ * can "mix" irq-safe locks - any writer needs to get a
++ * irq-safe write-lock, but readers can get non-irqsafe
++ * read-locks.
++ *
++ * On x86, we implement read-write locks as a 32-bit counter
++ * with the high bit (sign) being the "contended" bit.
++ *
++ * The inline assembly is non-obvious. Think about it.
++ *
++ * Changed to use the same technique as rw semaphores. See
++ * semaphore.h for details. -ben
++ *
++ * the helpers are in arch/i386/kernel/semaphore.c
++ */
++
++/**
++ * read_can_lock - would read_trylock() succeed?
++ * @lock: the rwlock in question.
++ */
++#define __raw_read_can_lock(x) ((int)(x)->lock > 0)
++
++/**
++ * write_can_lock - would write_trylock() succeed?
++ * @lock: the rwlock in question.
++ */
++#define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
++
++static inline void __raw_read_lock(raw_rwlock_t *rw)
++{
++ __build_read_lock(rw, "__read_lock_failed");
++}
++
++static inline void __raw_write_lock(raw_rwlock_t *rw)
++{
++ __build_write_lock(rw, "__write_lock_failed");
++}
++
++static inline int __raw_read_trylock(raw_rwlock_t *lock)
++{
++ atomic_t *count = (atomic_t *)lock;
++ atomic_dec(count);
++ if (atomic_read(count) >= 0)
++ return 1;
++ atomic_inc(count);
++ return 0;
++}
++
++static inline int __raw_write_trylock(raw_rwlock_t *lock)
++{
++ atomic_t *count = (atomic_t *)lock;
++ if (atomic_sub_and_test(RW_LOCK_BIAS, count))
++ return 1;
++ atomic_add(RW_LOCK_BIAS, count);
++ return 0;
++}
++
++static inline void __raw_read_unlock(raw_rwlock_t *rw)
++{
++ asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
++}
++
++static inline void __raw_write_unlock(raw_rwlock_t *rw)
++{
++ asm volatile(LOCK_PREFIX "addl $" RW_LOCK_BIAS_STR ", %0"
++ : "+m" (rw->lock) : : "memory");
++}
++
++#endif /* __ASM_SPINLOCK_H */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/asm-i386/mach-xen/asm/swiotlb.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,43 @@
++#ifndef _ASM_SWIOTLB_H
++#define _ASM_SWIOTLB_H 1
++
++/* SWIOTLB interface */
++
++extern dma_addr_t swiotlb_map_single(struct device *hwdev, void *ptr, size_t size,
++ int dir);
++extern void swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
++ size_t size, int dir);
++extern void swiotlb_sync_single_for_cpu(struct device *hwdev,
++ dma_addr_t dev_addr,
++ size_t size, int dir);
++extern void swiotlb_sync_single_for_device(struct device *hwdev,
++ dma_addr_t dev_addr,
++ size_t size, int dir);
++extern void swiotlb_sync_sg_for_cpu(struct device *hwdev,
++ struct scatterlist *sg, int nelems,
++ int dir);
++extern void swiotlb_sync_sg_for_device(struct device *hwdev,
++ struct scatterlist *sg, int nelems,
++ int dir);
++extern int swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg,
++ int nents, int direction);
++extern void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg,
++ int nents, int direction);
++extern int swiotlb_dma_mapping_error(dma_addr_t dma_addr);
++#ifdef CONFIG_HIGHMEM
++extern dma_addr_t swiotlb_map_page(struct device *hwdev, struct page *page,
++ unsigned long offset, size_t size,
++ enum dma_data_direction direction);
++extern void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dma_address,
++ size_t size, enum dma_data_direction direction);
++#endif
++extern int swiotlb_dma_supported(struct device *hwdev, u64 mask);
++extern void swiotlb_init(void);
++
++#ifdef CONFIG_SWIOTLB
++extern int swiotlb;
++#else
++#define swiotlb 0
++#endif
++
++#endif
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/asm-i386/mach-xen/asm/synch_bitops.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,145 @@
++#ifndef __XEN_SYNCH_BITOPS_H__
++#define __XEN_SYNCH_BITOPS_H__
++
++/*
++ * Copyright 1992, Linus Torvalds.
++ * Heavily modified to provide guaranteed strong synchronisation
++ * when communicating with Xen or other guest OSes running on other CPUs.
++ */
++
++#ifdef HAVE_XEN_PLATFORM_COMPAT_H
++#include <xen/platform-compat.h>
++#endif
++
++#define ADDR (*(volatile long *) addr)
++
++static __inline__ void synch_set_bit(int nr, volatile void * addr)
++{
++ __asm__ __volatile__ (
++ "lock btsl %1,%0"
++ : "+m" (ADDR) : "Ir" (nr) : "memory" );
++}
++
++static __inline__ void synch_clear_bit(int nr, volatile void * addr)
++{
++ __asm__ __volatile__ (
++ "lock btrl %1,%0"
++ : "+m" (ADDR) : "Ir" (nr) : "memory" );
++}
++
++static __inline__ void synch_change_bit(int nr, volatile void * addr)
++{
++ __asm__ __volatile__ (
++ "lock btcl %1,%0"
++ : "+m" (ADDR) : "Ir" (nr) : "memory" );
++}
++
++static __inline__ int synch_test_and_set_bit(int nr, volatile void * addr)
++{
++ int oldbit;
++ __asm__ __volatile__ (
++ "lock btsl %2,%1\n\tsbbl %0,%0"
++ : "=r" (oldbit), "+m" (ADDR) : "Ir" (nr) : "memory");
++ return oldbit;
++}
++
++static __inline__ int synch_test_and_clear_bit(int nr, volatile void * addr)
++{
++ int oldbit;
++ __asm__ __volatile__ (
++ "lock btrl %2,%1\n\tsbbl %0,%0"
++ : "=r" (oldbit), "+m" (ADDR) : "Ir" (nr) : "memory");
++ return oldbit;
++}
++
++static __inline__ int synch_test_and_change_bit(int nr, volatile void * addr)
++{
++ int oldbit;
++
++ __asm__ __volatile__ (
++ "lock btcl %2,%1\n\tsbbl %0,%0"
++ : "=r" (oldbit), "+m" (ADDR) : "Ir" (nr) : "memory");
++ return oldbit;
++}
++
++struct __synch_xchg_dummy { unsigned long a[100]; };
++#define __synch_xg(x) ((struct __synch_xchg_dummy *)(x))
++
++#define synch_cmpxchg(ptr, old, new) \
++((__typeof__(*(ptr)))__synch_cmpxchg((ptr),\
++ (unsigned long)(old), \
++ (unsigned long)(new), \
++ sizeof(*(ptr))))
++
++static inline unsigned long __synch_cmpxchg(volatile void *ptr,
++ unsigned long old,
++ unsigned long new, int size)
++{
++ unsigned long prev;
++ switch (size) {
++ case 1:
++ __asm__ __volatile__("lock; cmpxchgb %b1,%2"
++ : "=a"(prev)
++ : "q"(new), "m"(*__synch_xg(ptr)),
++ "0"(old)
++ : "memory");
++ return prev;
++ case 2:
++ __asm__ __volatile__("lock; cmpxchgw %w1,%2"
++ : "=a"(prev)
++ : "r"(new), "m"(*__synch_xg(ptr)),
++ "0"(old)
++ : "memory");
++ return prev;
++#ifdef CONFIG_X86_64
++ case 4:
++ __asm__ __volatile__("lock; cmpxchgl %k1,%2"
++ : "=a"(prev)
++ : "r"(new), "m"(*__synch_xg(ptr)),
++ "0"(old)
++ : "memory");
++ return prev;
++ case 8:
++ __asm__ __volatile__("lock; cmpxchgq %1,%2"
++ : "=a"(prev)
++ : "r"(new), "m"(*__synch_xg(ptr)),
++ "0"(old)
++ : "memory");
++ return prev;
++#else
++ case 4:
++ __asm__ __volatile__("lock; cmpxchgl %1,%2"
++ : "=a"(prev)
++ : "r"(new), "m"(*__synch_xg(ptr)),
++ "0"(old)
++ : "memory");
++ return prev;
++#endif
++ }
++ return old;
++}
++
++static __always_inline int synch_const_test_bit(int nr,
++ const volatile void * addr)
++{
++ return ((1UL << (nr & 31)) &
++ (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
++}
++
++static __inline__ int synch_var_test_bit(int nr, volatile void * addr)
++{
++ int oldbit;
++ __asm__ __volatile__ (
++ "btl %2,%1\n\tsbbl %0,%0"
++ : "=r" (oldbit) : "m" (ADDR), "Ir" (nr) );
++ return oldbit;
++}
++
++#define synch_test_bit(nr,addr) \
++(__builtin_constant_p(nr) ? \
++ synch_const_test_bit((nr),(addr)) : \
++ synch_var_test_bit((nr),(addr)))
++
++#define synch_cmpxchg_subword synch_cmpxchg
++
++#endif /* __XEN_SYNCH_BITOPS_H__ */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/asm-i386/mach-xen/asm/system.h 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,488 @@
++#ifndef __ASM_SYSTEM_H
++#define __ASM_SYSTEM_H
++
++#include <linux/kernel.h>
++#include <asm/segment.h>
++#include <asm/cpufeature.h>
++#include <linux/bitops.h> /* for LOCK_PREFIX */
++#include <asm/synch_bitops.h>
++#include <asm/hypervisor.h>
++
++#ifdef __KERNEL__
++
++struct task_struct; /* one of the stranger aspects of C forward declarations.. */
++extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next));
++
++/*
++ * Saving eflags is important. It switches not only IOPL between tasks,
++ * it also protects other tasks from NT leaking through sysenter etc.
++ */
++#define switch_to(prev,next,last) do { \
++ unsigned long esi,edi; \
++ asm volatile("pushfl\n\t" /* Save flags */ \
++ "pushl %%ebp\n\t" \
++ "movl %%esp,%0\n\t" /* save ESP */ \
++ "movl %5,%%esp\n\t" /* restore ESP */ \
++ "movl $1f,%1\n\t" /* save EIP */ \
++ "pushl %6\n\t" /* restore EIP */ \
++ "jmp __switch_to\n" \
++ "1:\t" \
++ "popl %%ebp\n\t" \
++ "popfl" \
++ :"=m" (prev->thread.esp),"=m" (prev->thread.eip), \
++ "=a" (last),"=S" (esi),"=D" (edi) \
++ :"m" (next->thread.esp),"m" (next->thread.eip), \
++ "2" (prev), "d" (next)); \
++} while (0)
++
++#define _set_base(addr,base) do { unsigned long __pr; \
++__asm__ __volatile__ ("movw %%dx,%1\n\t" \
++ "rorl $16,%%edx\n\t" \
++ "movb %%dl,%2\n\t" \
++ "movb %%dh,%3" \
++ :"=&d" (__pr) \
++ :"m" (*((addr)+2)), \
++ "m" (*((addr)+4)), \
++ "m" (*((addr)+7)), \
++ "0" (base) \
++ ); } while(0)
++
++#define _set_limit(addr,limit) do { unsigned long __lr; \
++__asm__ __volatile__ ("movw %%dx,%1\n\t" \
++ "rorl $16,%%edx\n\t" \
++ "movb %2,%%dh\n\t" \
++ "andb $0xf0,%%dh\n\t" \
++ "orb %%dh,%%dl\n\t" \
++ "movb %%dl,%2" \
++ :"=&d" (__lr) \
++ :"m" (*(addr)), \
++ "m" (*((addr)+6)), \
++ "0" (limit) \
++ ); } while(0)
++
++#define set_base(ldt,base) _set_base( ((char *)&(ldt)) , (base) )
++#define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , ((limit)-1) )
++
++/*
++ * Load a segment. Fall back on loading the zero
++ * segment if something goes wrong..
++ */
++#define loadsegment(seg,value) \
++ asm volatile("\n" \
++ "1:\t" \
++ "mov %0,%%" #seg "\n" \
++ "2:\n" \
++ ".section .fixup,\"ax\"\n" \
++ "3:\t" \
++ "pushl $0\n\t" \
++ "popl %%" #seg "\n\t" \
++ "jmp 2b\n" \
++ ".previous\n" \
++ ".section __ex_table,\"a\"\n\t" \
++ ".align 4\n\t" \
++ ".long 1b,3b\n" \
++ ".previous" \
++ : :"rm" (value))
++
++/*
++ * Save a segment register away
++ */
++#define savesegment(seg, value) \
++ asm volatile("mov %%" #seg ",%0":"=rm" (value))
++
++#define read_cr0() ({ \
++ unsigned int __dummy; \
++ __asm__ __volatile__( \
++ "movl %%cr0,%0\n\t" \
++ :"=r" (__dummy)); \
++ __dummy; \
++})
++#define write_cr0(x) \
++ __asm__ __volatile__("movl %0,%%cr0": :"r" (x))
++
++#define read_cr2() (current_vcpu_info()->arch.cr2)
++#define write_cr2(x) \
++ __asm__ __volatile__("movl %0,%%cr2": :"r" (x))
++
++#define read_cr3() ({ \
++ unsigned int __dummy; \
++ __asm__ ( \
++ "movl %%cr3,%0\n\t" \
++ :"=r" (__dummy)); \
++ __dummy = xen_cr3_to_pfn(__dummy); \
++ mfn_to_pfn(__dummy) << PAGE_SHIFT; \
++})
++#define write_cr3(x) ({ \
++ unsigned int __dummy = pfn_to_mfn((x) >> PAGE_SHIFT); \
++ __dummy = xen_pfn_to_cr3(__dummy); \
++ __asm__ __volatile__("movl %0,%%cr3": :"r" (__dummy)); \
++})
++#define read_cr4() ({ \
++ unsigned int __dummy; \
++ __asm__( \
++ "movl %%cr4,%0\n\t" \
++ :"=r" (__dummy)); \
++ __dummy; \
++})
++#define read_cr4_safe() ({ \
++ unsigned int __dummy; \
++ /* This could fault if %cr4 does not exist */ \
++ __asm__("1: movl %%cr4, %0 \n" \
++ "2: \n" \
++ ".section __ex_table,\"a\" \n" \
++ ".long 1b,2b \n" \
++ ".previous \n" \
++ : "=r" (__dummy): "0" (0)); \
++ __dummy; \
++})
++
++#define write_cr4(x) \
++ __asm__ __volatile__("movl %0,%%cr4": :"r" (x))
++
++/*
++ * Clear and set 'TS' bit respectively
++ */
++#define clts() (HYPERVISOR_fpu_taskswitch(0))
++#define stts() (HYPERVISOR_fpu_taskswitch(1))
++
++#endif /* __KERNEL__ */
++
++#define wbinvd() \
++ __asm__ __volatile__ ("wbinvd": : :"memory")
++
++static inline unsigned long get_limit(unsigned long segment)
++{
++ unsigned long __limit;
++ __asm__("lsll %1,%0"
++ :"=r" (__limit):"r" (segment));
++ return __limit+1;
++}
++
++#define nop() __asm__ __volatile__ ("nop")
++
++#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
++
++#define tas(ptr) (xchg((ptr),1))
++
++struct __xchg_dummy { unsigned long a[100]; };
++#define __xg(x) ((struct __xchg_dummy *)(x))
++
++
++#ifdef CONFIG_X86_CMPXCHG64
++
++/*
++ * The semantics of XCHGCMP8B are a bit strange, this is why
++ * there is a loop and the loading of %%eax and %%edx has to
++ * be inside. This inlines well in most cases, the cached
++ * cost is around ~38 cycles. (in the future we might want
++ * to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that
++ * might have an implicit FPU-save as a cost, so it's not
++ * clear which path to go.)
++ *
++ * cmpxchg8b must be used with the lock prefix here to allow
++ * the instruction to be executed atomically, see page 3-102
++ * of the instruction set reference 24319102.pdf. We need
++ * the reader side to see the coherent 64bit value.
++ */
++static inline void __set_64bit (unsigned long long * ptr,
++ unsigned int low, unsigned int high)
++{
++ __asm__ __volatile__ (
++ "\n1:\t"
++ "movl (%0), %%eax\n\t"
++ "movl 4(%0), %%edx\n\t"
++ "lock cmpxchg8b (%0)\n\t"
++ "jnz 1b"
++ : /* no outputs */
++ : "D"(ptr),
++ "b"(low),
++ "c"(high)
++ : "ax","dx","memory");
++}
++
++static inline void __set_64bit_constant (unsigned long long *ptr,
++ unsigned long long value)
++{
++ __set_64bit(ptr,(unsigned int)(value), (unsigned int)((value)>>32ULL));
++}
++#define ll_low(x) *(((unsigned int*)&(x))+0)
++#define ll_high(x) *(((unsigned int*)&(x))+1)
++
++static inline void __set_64bit_var (unsigned long long *ptr,
++ unsigned long long value)
++{
++ __set_64bit(ptr,ll_low(value), ll_high(value));
++}
++
++#define set_64bit(ptr,value) \
++(__builtin_constant_p(value) ? \
++ __set_64bit_constant(ptr, value) : \
++ __set_64bit_var(ptr, value) )
++
++#define _set_64bit(ptr,value) \
++(__builtin_constant_p(value) ? \
++ __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \
++ __set_64bit(ptr, ll_low(value), ll_high(value)) )
++
++#endif
++
++/*
++ * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
++ * Note 2: xchg has side effect, so that attribute volatile is necessary,
++ * but generally the primitive is invalid, *ptr is output argument. --ANK
++ */
++static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
++{
++ switch (size) {
++ case 1:
++ __asm__ __volatile__("xchgb %b0,%1"
++ :"=q" (x)
++ :"m" (*__xg(ptr)), "0" (x)
++ :"memory");
++ break;
++ case 2:
++ __asm__ __volatile__("xchgw %w0,%1"
++ :"=r" (x)
++ :"m" (*__xg(ptr)), "0" (x)
++ :"memory");
++ break;
++ case 4:
++ __asm__ __volatile__("xchgl %0,%1"
++ :"=r" (x)
++ :"m" (*__xg(ptr)), "0" (x)
++ :"memory");
++ break;
++ }
++ return x;
++}
++
++/*
++ * Atomic compare and exchange. Compare OLD with MEM, if identical,
++ * store NEW in MEM. Return the initial value in MEM. Success is
++ * indicated by comparing RETURN with OLD.
++ */
++
++#ifdef CONFIG_X86_CMPXCHG
++#define __HAVE_ARCH_CMPXCHG 1
++#define cmpxchg(ptr,o,n)\
++ ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
++ (unsigned long)(n),sizeof(*(ptr))))
++#endif
++
++static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
++ unsigned long new, int size)
++{
++ unsigned long prev;
++ switch (size) {
++ case 1:
++ __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
++ : "=a"(prev)
++ : "q"(new), "m"(*__xg(ptr)), "0"(old)
++ : "memory");
++ return prev;
++ case 2:
++ __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
++ : "=a"(prev)
++ : "r"(new), "m"(*__xg(ptr)), "0"(old)
++ : "memory");
++ return prev;
++ case 4:
++ __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2"
++ : "=a"(prev)
++ : "r"(new), "m"(*__xg(ptr)), "0"(old)
++ : "memory");
++ return prev;
++ }
++ return old;
++}
++
++#ifndef CONFIG_X86_CMPXCHG
++/*
++ * Building a kernel capable running on 80386. It may be necessary to
++ * simulate the cmpxchg on the 80386 CPU. For that purpose we define
++ * a function for each of the sizes we support.
++ */
++
++extern unsigned long cmpxchg_386_u8(volatile void *, u8, u8);
++extern unsigned long cmpxchg_386_u16(volatile void *, u16, u16);
++extern unsigned long cmpxchg_386_u32(volatile void *, u32, u32);
++
++static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old,
++ unsigned long new, int size)
++{
++ switch (size) {
++ case 1:
++ return cmpxchg_386_u8(ptr, old, new);
++ case 2:
++ return cmpxchg_386_u16(ptr, old, new);
++ case 4:
++ return cmpxchg_386_u32(ptr, old, new);
++ }
++ return old;
++}
++
++#define cmpxchg(ptr,o,n) \
++({ \
++ __typeof__(*(ptr)) __ret; \
++ if (likely(boot_cpu_data.x86 > 3)) \
++ __ret = __cmpxchg((ptr), (unsigned long)(o), \
++ (unsigned long)(n), sizeof(*(ptr))); \
++ else \
++ __ret = cmpxchg_386((ptr), (unsigned long)(o), \
++ (unsigned long)(n), sizeof(*(ptr))); \
++ __ret; \
++})
++#endif
++
++#ifdef CONFIG_X86_CMPXCHG64
++
++static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long long old,
++ unsigned long long new)
++{
++ unsigned long long prev;
++ __asm__ __volatile__(LOCK_PREFIX "cmpxchg8b %3"
++ : "=A"(prev)
++ : "b"((unsigned long)new),
++ "c"((unsigned long)(new >> 32)),
++ "m"(*__xg(ptr)),
++ "0"(old)
++ : "memory");
++ return prev;
++}
++
++#define cmpxchg64(ptr,o,n)\
++ ((__typeof__(*(ptr)))__cmpxchg64((ptr),(unsigned long long)(o),\
++ (unsigned long long)(n)))
++
++#endif
++
++/*
++ * Force strict CPU ordering.
++ * And yes, this is required on UP too when we're talking
++ * to devices.
++ *
++ * For now, "wmb()" doesn't actually do anything, as all
++ * Intel CPU's follow what Intel calls a *Processor Order*,
++ * in which all writes are seen in the program order even
++ * outside the CPU.
++ *
++ * I expect future Intel CPU's to have a weaker ordering,
++ * but I'd also expect them to finally get their act together
++ * and add some real memory barriers if so.
++ *
++ * Some non intel clones support out of order store. wmb() ceases to be a
++ * nop for these.
++ */
++
++
++/*
++ * Actually only lfence would be needed for mb() because all stores done
++ * by the kernel should be already ordered. But keep a full barrier for now.
++ */
++
++#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
++#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
++
++/**
++ * read_barrier_depends - Flush all pending reads that subsequents reads
++ * depend on.
++ *
++ * No data-dependent reads from memory-like regions are ever reordered
++ * over this barrier. All reads preceding this primitive are guaranteed
++ * to access memory (but not necessarily other CPUs' caches) before any
++ * reads following this primitive that depend on the data return by
++ * any of the preceding reads. This primitive is much lighter weight than
++ * rmb() on most CPUs, and is never heavier weight than is
++ * rmb().
++ *
++ * These ordering constraints are respected by both the local CPU
++ * and the compiler.
++ *
++ * Ordering is not guaranteed by anything other than these primitives,
++ * not even by data dependencies. See the documentation for
++ * memory_barrier() for examples and URLs to more information.
++ *
++ * For example, the following code would force ordering (the initial
++ * value of "a" is zero, "b" is one, and "p" is "&a"):
++ *
++ * <programlisting>
++ * CPU 0 CPU 1
++ *
++ * b = 2;
++ * memory_barrier();
++ * p = &b; q = p;
++ * read_barrier_depends();
++ * d = *q;
++ * </programlisting>
++ *
++ * because the read of "*q" depends on the read of "p" and these
++ * two reads are separated by a read_barrier_depends(). However,
++ * the following code, with the same initial values for "a" and "b":
++ *
++ * <programlisting>
++ * CPU 0 CPU 1
++ *
++ * a = 2;
++ * memory_barrier();
++ * b = 3; y = b;
++ * read_barrier_depends();
++ * x = a;
++ * </programlisting>
++ *
++ * does not enforce ordering, since there is no data dependency between
++ * the read of "a" and the read of "b". Therefore, on some CPUs, such
++ * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
++ * in cases like this where there are no data dependencies.
++ **/
++
++#define read_barrier_depends() do { } while(0)
++
++#ifdef CONFIG_X86_OOSTORE
++/* Actually there are no OOO store capable CPUs for now that do SSE,
++ but make it already an possibility. */
++#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
++#else
++#define wmb() __asm__ __volatile__ ("": : :"memory")
++#endif
++
++#ifdef CONFIG_SMP
++#define smp_mb() mb()
++#define smp_rmb() rmb()
++#define smp_wmb() wmb()
++#define smp_read_barrier_depends() read_barrier_depends()
++#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
++#else
++#define smp_mb() barrier()
++#define smp_rmb() barrier()
++#define smp_wmb() barrier()
++#define smp_read_barrier_depends() do { } while(0)
++#define set_mb(var, value) do { var = value; barrier(); } while (0)
++#endif
++
++#include <linux/irqflags.h>
++
++/*
++ * disable hlt during certain critical i/o operations
++ */
++#define HAVE_DISABLE_HLT
++void disable_hlt(void);
++void enable_hlt(void);
++
++extern int es7000_plat;
++void cpu_idle_wait(void);
++
++/*
++ * On SMP systems, when the scheduler does migration-cost autodetection,
++ * it needs a way to flush as much of the CPU's caches as possible:
++ */
++static inline void sched_cacheflush(void)
++{
++ wbinvd();
++}
++
++extern unsigned long arch_align_stack(unsigned long sp);
++extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
++
++void default_idle(void);
++
++#endif
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/asm-i386/mach-xen/asm/tlbflush.h 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,101 @@
++#ifndef _I386_TLBFLUSH_H
++#define _I386_TLBFLUSH_H
++
++#include <linux/mm.h>
++#include <asm/processor.h>
++
++#define __flush_tlb() xen_tlb_flush()
++#define __flush_tlb_global() xen_tlb_flush()
++#define __flush_tlb_all() xen_tlb_flush()
++
++extern unsigned long pgkern_mask;
++
++#define cpu_has_invlpg (boot_cpu_data.x86 > 3)
++
++#define __flush_tlb_single(addr) xen_invlpg(addr)
++
++#define __flush_tlb_one(addr) __flush_tlb_single(addr)
++
++/*
++ * TLB flushing:
++ *
++ * - flush_tlb() flushes the current mm struct TLBs
++ * - flush_tlb_all() flushes all processes TLBs
++ * - flush_tlb_mm(mm) flushes the specified mm context TLB's
++ * - flush_tlb_page(vma, vmaddr) flushes one page
++ * - flush_tlb_range(vma, start, end) flushes a range of pages
++ * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
++ * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
++ *
++ * ..but the i386 has somewhat limited tlb flushing capabilities,
++ * and page-granular flushes are available only on i486 and up.
++ */
++
++#ifndef CONFIG_SMP
++
++#define flush_tlb() __flush_tlb()
++#define flush_tlb_all() __flush_tlb_all()
++#define local_flush_tlb() __flush_tlb()
++
++static inline void flush_tlb_mm(struct mm_struct *mm)
++{
++ if (mm == current->active_mm)
++ __flush_tlb();
++}
++
++static inline void flush_tlb_page(struct vm_area_struct *vma,
++ unsigned long addr)
++{
++ if (vma->vm_mm == current->active_mm)
++ __flush_tlb_one(addr);
++}
++
++static inline void flush_tlb_range(struct vm_area_struct *vma,
++ unsigned long start, unsigned long end)
++{
++ if (vma->vm_mm == current->active_mm)
++ __flush_tlb();
++}
++
++#else
++
++#include <asm/smp.h>
++
++#define local_flush_tlb() \
++ __flush_tlb()
++
++extern void flush_tlb_all(void);
++extern void flush_tlb_current_task(void);
++extern void flush_tlb_mm(struct mm_struct *);
++extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
++
++#define flush_tlb() flush_tlb_current_task()
++
++static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long start, unsigned long end)
++{
++ flush_tlb_mm(vma->vm_mm);
++}
++
++#define TLBSTATE_OK 1
++#define TLBSTATE_LAZY 2
++
++struct tlb_state
++{
++ struct mm_struct *active_mm;
++ int state;
++ char __cacheline_padding[L1_CACHE_BYTES-8];
++};
++DECLARE_PER_CPU(struct tlb_state, cpu_tlbstate);
++
++
++#endif
++
++#define flush_tlb_kernel_range(start, end) flush_tlb_all()
++
++static inline void flush_tlb_pgtables(struct mm_struct *mm,
++ unsigned long start, unsigned long end)
++{
++ /* i386 does not keep any page table caches in TLB */
++}
++
++#endif /* _I386_TLBFLUSH_H */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/asm-i386/mach-xen/asm/vga.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,20 @@
++/*
++ * Access to VGA videoram
++ *
++ * (c) 1998 Martin Mares <mj@ucw.cz>
++ */
++
++#ifndef _LINUX_ASM_VGA_H_
++#define _LINUX_ASM_VGA_H_
++
++/*
++ * On the PC, we can just recalculate addresses and then
++ * access the videoram directly without any black magic.
++ */
++
++#define VGA_MAP_MEM(x,s) (unsigned long)isa_bus_to_virt(x)
++
++#define vga_readb(x) (*(x))
++#define vga_writeb(x,y) (*(y) = (x))
++
++#endif
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/asm-i386/mach-xen/asm/xenoprof.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,48 @@
++/******************************************************************************
++ * asm-i386/mach-xen/asm/xenoprof.h
++ *
++ * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
++ * VA Linux Systems Japan K.K.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ */
++#ifndef __ASM_XENOPROF_H__
++#define __ASM_XENOPROF_H__
++#ifdef CONFIG_XEN
++
++struct super_block;
++struct dentry;
++int xenoprof_create_files(struct super_block * sb, struct dentry * root);
++#define HAVE_XENOPROF_CREATE_FILES
++
++struct xenoprof_init;
++void xenoprof_arch_init_counter(struct xenoprof_init *init);
++void xenoprof_arch_counter(void);
++void xenoprof_arch_start(void);
++void xenoprof_arch_stop(void);
++
++struct xenoprof_arch_shared_buffer {
++ /* nothing */
++};
++struct xenoprof_shared_buffer;
++void xenoprof_arch_unmap_shared_buffer(struct xenoprof_shared_buffer* sbuf);
++struct xenoprof_get_buffer;
++int xenoprof_arch_map_shared_buffer(struct xenoprof_get_buffer* get_buffer, struct xenoprof_shared_buffer* sbuf);
++struct xenoprof_passive;
++int xenoprof_arch_set_passive(struct xenoprof_passive* pdomain, struct xenoprof_shared_buffer* sbuf);
++
++#endif /* CONFIG_XEN */
++#endif /* __ASM_XENOPROF_H__ */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/asm-i386/mach-xen/irq_vectors.h 2007-08-27 14:01:51.000000000 -0400
+@@ -0,0 +1,125 @@
++/*
++ * This file should contain #defines for all of the interrupt vector
++ * numbers used by this architecture.
++ *
++ * In addition, there are some standard defines:
++ *
++ * FIRST_EXTERNAL_VECTOR:
++ * The first free place for external interrupts
++ *
++ * SYSCALL_VECTOR:
++ * The IRQ vector a syscall makes the user to kernel transition
++ * under.
++ *
++ * TIMER_IRQ:
++ * The IRQ number the timer interrupt comes in at.
++ *
++ * NR_IRQS:
++ * The total number of interrupt vectors (including all the
++ * architecture specific interrupts) needed.
++ *
++ */
++#ifndef _ASM_IRQ_VECTORS_H
++#define _ASM_IRQ_VECTORS_H
++
++/*
++ * IDT vectors usable for external interrupt sources start
++ * at 0x20:
++ */
++#define FIRST_EXTERNAL_VECTOR 0x20
++
++#define SYSCALL_VECTOR 0x80
++
++/*
++ * Vectors 0x20-0x2f are used for ISA interrupts.
++ */
++
++#if 0
++/*
++ * Special IRQ vectors used by the SMP architecture, 0xf0-0xff
++ *
++ * some of the following vectors are 'rare', they are merged
++ * into a single vector (CALL_FUNCTION_VECTOR) to save vector space.
++ * TLB, reschedule and local APIC vectors are performance-critical.
++ *
++ * Vectors 0xf0-0xfa are free (reserved for future Linux use).
++ */
++#define SPURIOUS_APIC_VECTOR 0xff
++#define ERROR_APIC_VECTOR 0xfe
++#define INVALIDATE_TLB_VECTOR 0xfd
++#define RESCHEDULE_VECTOR 0xfc
++#define CALL_FUNCTION_VECTOR 0xfb
++
++#define THERMAL_APIC_VECTOR 0xf0
++/*
++ * Local APIC timer IRQ vector is on a different priority level,
++ * to work around the 'lost local interrupt if more than 2 IRQ
++ * sources per level' errata.
++ */
++#define LOCAL_TIMER_VECTOR 0xef
++#endif
++
++#define SPURIOUS_APIC_VECTOR 0xff
++#define ERROR_APIC_VECTOR 0xfe
++
++/*
++ * First APIC vector available to drivers: (vectors 0x30-0xee)
++ * we start at 0x31 to spread out vectors evenly between priority
++ * levels. (0x80 is the syscall vector)
++ */
++#define FIRST_DEVICE_VECTOR 0x31
++#define FIRST_SYSTEM_VECTOR 0xef
++
++/*
++ * 16 8259A IRQ's, 208 potential APIC interrupt sources.
++ * Right now the APIC is mostly only used for SMP.
++ * 256 vectors is an architectural limit. (we can have
++ * more than 256 devices theoretically, but they will
++ * have to use shared interrupts)
++ * Since vectors 0x00-0x1f are used/reserved for the CPU,
++ * the usable vector space is 0x20-0xff (224 vectors)
++ */
++
++#define RESCHEDULE_VECTOR 0
++#define CALL_FUNCTION_VECTOR 1
++#define NR_IPIS 2
++
++/*
++ * The maximum number of vectors supported by i386 processors
++ * is limited to 256. For processors other than i386, NR_VECTORS
++ * should be changed accordingly.
++ */
++#define NR_VECTORS 256
++
++#define FPU_IRQ 13
++
++#define FIRST_VM86_IRQ 3
++#define LAST_VM86_IRQ 15
++#define invalid_vm86_irq(irq) ((irq) < 3 || (irq) > 15)
++
++/*
++ * The flat IRQ space is divided into two regions:
++ * 1. A one-to-one mapping of real physical IRQs. This space is only used
++ * if we have physical device-access privilege. This region is at the
++ * start of the IRQ space so that existing device drivers do not need
++ * to be modified to translate physical IRQ numbers into our IRQ space.
++ * 3. A dynamic mapping of inter-domain and Xen-sourced virtual IRQs. These
++ * are bound using the provided bind/unbind functions.
++ */
++
++#define PIRQ_BASE 0
++#define NR_PIRQS 256
++
++#define DYNIRQ_BASE (PIRQ_BASE + NR_PIRQS)
++#define NR_DYNIRQS 256
++
++#define NR_IRQS (NR_PIRQS + NR_DYNIRQS)
++#define NR_IRQ_VECTORS NR_IRQS
++
++#define pirq_to_irq(_x) ((_x) + PIRQ_BASE)
++#define irq_to_pirq(_x) ((_x) - PIRQ_BASE)
++
++#define dynirq_to_irq(_x) ((_x) + DYNIRQ_BASE)
++#define irq_to_dynirq(_x) ((_x) - DYNIRQ_BASE)
++
++#endif /* _ASM_IRQ_VECTORS_H */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/asm-i386/mach-xen/mach_traps.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,33 @@
++/*
++ * include/asm-xen/asm-i386/mach-xen/mach_traps.h
++ *
++ * Machine specific NMI handling for Xen
++ */
++#ifndef _MACH_TRAPS_H
++#define _MACH_TRAPS_H
++
++#include <linux/bitops.h>
++#include <xen/interface/nmi.h>
++
++static inline void clear_mem_error(unsigned char reason) {}
++static inline void clear_io_check_error(unsigned char reason) {}
++
++static inline unsigned char get_nmi_reason(void)
++{
++ shared_info_t *s = HYPERVISOR_shared_info;
++ unsigned char reason = 0;
++
++ /* construct a value which looks like it came from
++ * port 0x61.
++ */
++ if (test_bit(_XEN_NMIREASON_io_error, &s->arch.nmi_reason))
++ reason |= 0x40;
++ if (test_bit(_XEN_NMIREASON_parity_error, &s->arch.nmi_reason))
++ reason |= 0x80;
++
++ return reason;
++}
++
++static inline void reassert_nmi(void) {}
++
++#endif /* !_MACH_TRAPS_H */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/asm-i386/mach-xen/setup_arch.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,5 @@
++/* Hook to call BIOS initialisation function */
++
++#define ARCH_SETUP machine_specific_arch_setup();
++
++void __init machine_specific_arch_setup(void);
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/asm-x86_64/mach-xen/asm/agp.h 2007-08-27 14:01:58.000000000 -0400
+@@ -0,0 +1,35 @@
++#ifndef AGP_H
++#define AGP_H 1
++
++#include <asm/cacheflush.h>
++#include <asm/system.h>
++
++/*
++ * Functions to keep the agpgart mappings coherent.
++ * The GART gives the CPU a physical alias of memory. The alias is
++ * mapped uncacheable. Make sure there are no conflicting mappings
++ * with different cachability attributes for the same page.
++ */
++
++int map_page_into_agp(struct page *page);
++int unmap_page_from_agp(struct page *page);
++#define flush_agp_mappings() global_flush_tlb()
++
++/* Could use CLFLUSH here if the cpu supports it. But then it would
++ need to be called for each cacheline of the whole page so it may not be
++ worth it. Would need a page for it. */
++#define flush_agp_cache() wbinvd()
++
++/* Convert a physical address to an address suitable for the GART. */
++#define phys_to_gart(x) phys_to_machine(x)
++#define gart_to_phys(x) machine_to_phys(x)
++
++/* GATT allocation. Returns/accepts GATT kernel virtual address. */
++#define alloc_gatt_pages(order) ({ \
++ char *_t; dma_addr_t _d; \
++ _t = dma_alloc_coherent(NULL,PAGE_SIZE<<(order),&_d,GFP_KERNEL); \
++ _t; })
++#define free_gatt_pages(table, order) \
++ dma_free_coherent(NULL,PAGE_SIZE<<(order),(table),virt_to_bus(table))
++
++#endif
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/asm-x86_64/mach-xen/asm/arch_hooks.h 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,27 @@
++#ifndef _ASM_ARCH_HOOKS_H
++#define _ASM_ARCH_HOOKS_H
++
++#include <linux/interrupt.h>
++
++/*
++ * linux/include/asm/arch_hooks.h
++ *
++ * define the architecture specific hooks
++ */
++
++/* these aren't arch hooks, they are generic routines
++ * that can be used by the hooks */
++extern void init_ISA_irqs(void);
++extern void apic_intr_init(void);
++extern void smp_intr_init(void);
++extern irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs);
++
++/* these are the defined hooks */
++extern void intr_init_hook(void);
++extern void pre_intr_init_hook(void);
++extern void pre_setup_arch_hook(void);
++extern void trap_init_hook(void);
++extern void time_init_hook(void);
++extern void mca_nmi_hook(void);
++
++#endif
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/asm-x86_64/mach-xen/asm/bootsetup.h 2007-08-27 14:01:59.000000000 -0400
+@@ -0,0 +1,42 @@
++
++#ifndef _X86_64_BOOTSETUP_H
++#define _X86_64_BOOTSETUP_H 1
++
++#define BOOT_PARAM_SIZE 4096
++extern char x86_boot_params[BOOT_PARAM_SIZE];
++
++/*
++ * This is set up by the setup-routine at boot-time
++ */
++#define PARAM ((unsigned char *)x86_boot_params)
++#define SCREEN_INFO (*(struct screen_info *) (PARAM+0))
++#define EXT_MEM_K (*(unsigned short *) (PARAM+2))
++#define ALT_MEM_K (*(unsigned int *) (PARAM+0x1e0))
++#define E820_MAP_NR (*(char*) (PARAM+E820NR))
++#define E820_MAP ((struct e820entry *) (PARAM+E820MAP))
++#define APM_BIOS_INFO (*(struct apm_bios_info *) (PARAM+0x40))
++#define DRIVE_INFO (*(struct drive_info_struct *) (PARAM+0x80))
++#define SYS_DESC_TABLE (*(struct sys_desc_table_struct*)(PARAM+0xa0))
++#define MOUNT_ROOT_RDONLY (*(unsigned short *) (PARAM+0x1F2))
++#define RAMDISK_FLAGS (*(unsigned short *) (PARAM+0x1F8))
++#define SAVED_VIDEO_MODE (*(unsigned short *) (PARAM+0x1FA))
++#define ORIG_ROOT_DEV (*(unsigned short *) (PARAM+0x1FC))
++#define AUX_DEVICE_INFO (*(unsigned char *) (PARAM+0x1FF))
++#define LOADER_TYPE (*(unsigned char *) (PARAM+0x210))
++#define KERNEL_START (*(unsigned int *) (PARAM+0x214))
++
++#define INITRD_START (__pa(xen_start_info->mod_start))
++#define INITRD_SIZE (xen_start_info->mod_len)
++#define EDID_INFO (*(struct edid_info *) (PARAM+0x440))
++
++#define EDD_NR (*(unsigned char *) (PARAM+EDDNR))
++#define EDD_MBR_SIG_NR (*(unsigned char *) (PARAM+EDD_MBR_SIG_NR_BUF))
++#define EDD_MBR_SIGNATURE ((unsigned int *) (PARAM+EDD_MBR_SIG_BUF))
++#define EDD_BUF ((struct edd_info *) (PARAM+EDDBUF))
++#define COMMAND_LINE saved_command_line
++
++#define RAMDISK_IMAGE_START_MASK 0x07FF
++#define RAMDISK_PROMPT_FLAG 0x8000
++#define RAMDISK_LOAD_FLAG 0x4000
++
++#endif
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/asm-x86_64/mach-xen/asm/desc.h 2007-08-27 14:02:01.000000000 -0400
+@@ -0,0 +1,263 @@
++/* Written 2000 by Andi Kleen */
++#ifndef __ARCH_DESC_H
++#define __ARCH_DESC_H
++
++#include <linux/threads.h>
++#include <asm/ldt.h>
++
++#ifndef __ASSEMBLY__
++
++#include <linux/string.h>
++#include <linux/smp.h>
++
++#include <asm/segment.h>
++#include <asm/mmu.h>
++
++// 8 byte segment descriptor
++struct desc_struct {
++ u16 limit0;
++ u16 base0;
++ unsigned base1 : 8, type : 4, s : 1, dpl : 2, p : 1;
++ unsigned limit : 4, avl : 1, l : 1, d : 1, g : 1, base2 : 8;
++} __attribute__((packed));
++
++struct n_desc_struct {
++ unsigned int a,b;
++};
++
++enum {
++ GATE_INTERRUPT = 0xE,
++ GATE_TRAP = 0xF,
++ GATE_CALL = 0xC,
++};
++
++// 16byte gate
++struct gate_struct {
++ u16 offset_low;
++ u16 segment;
++ unsigned ist : 3, zero0 : 5, type : 5, dpl : 2, p : 1;
++ u16 offset_middle;
++ u32 offset_high;
++ u32 zero1;
++} __attribute__((packed));
++
++#define PTR_LOW(x) ((unsigned long)(x) & 0xFFFF)
++#define PTR_MIDDLE(x) (((unsigned long)(x) >> 16) & 0xFFFF)
++#define PTR_HIGH(x) ((unsigned long)(x) >> 32)
++
++enum {
++ DESC_TSS = 0x9,
++ DESC_LDT = 0x2,
++};
++
++// LDT or TSS descriptor in the GDT. 16 bytes.
++struct ldttss_desc {
++ u16 limit0;
++ u16 base0;
++ unsigned base1 : 8, type : 5, dpl : 2, p : 1;
++ unsigned limit1 : 4, zero0 : 3, g : 1, base2 : 8;
++ u32 base3;
++ u32 zero1;
++} __attribute__((packed));
++
++struct desc_ptr {
++ unsigned short size;
++ unsigned long address;
++} __attribute__((packed)) ;
++
++extern struct desc_ptr idt_descr, cpu_gdt_descr[NR_CPUS];
++
++extern struct desc_struct cpu_gdt_table[GDT_ENTRIES];
++
++#define load_TR_desc() asm volatile("ltr %w0"::"r" (GDT_ENTRY_TSS*8))
++#define load_LDT_desc() asm volatile("lldt %w0"::"r" (GDT_ENTRY_LDT*8))
++
++static inline void clear_LDT(void)
++{
++ int cpu = get_cpu();
++
++ /*
++ * NB. We load the default_ldt for lcall7/27 handling on demand, as
++ * it slows down context switching. Noone uses it anyway.
++ */
++ cpu = cpu; /* XXX avoid compiler warning */
++ xen_set_ldt(0UL, 0);
++ put_cpu();
++}
++
++/*
++ * This is the ldt that every process will get unless we need
++ * something other than this.
++ */
++extern struct desc_struct default_ldt[];
++#ifndef CONFIG_X86_NO_IDT
++extern struct gate_struct idt_table[];
++#endif
++extern struct desc_ptr cpu_gdt_descr[];
++
++/* the cpu gdt accessor */
++#define cpu_gdt(_cpu) ((struct desc_struct *)cpu_gdt_descr[_cpu].address)
++
++static inline void _set_gate(void *adr, unsigned type, unsigned long func, unsigned dpl, unsigned ist)
++{
++ struct gate_struct s;
++ s.offset_low = PTR_LOW(func);
++ s.segment = __KERNEL_CS;
++ s.ist = ist;
++ s.p = 1;
++ s.dpl = dpl;
++ s.zero0 = 0;
++ s.zero1 = 0;
++ s.type = type;
++ s.offset_middle = PTR_MIDDLE(func);
++ s.offset_high = PTR_HIGH(func);
++ /* does not need to be atomic because it is only done once at setup time */
++ memcpy(adr, &s, 16);
++}
++
++#ifndef CONFIG_X86_NO_IDT
++static inline void set_intr_gate(int nr, void *func)
++{
++ BUG_ON((unsigned)nr > 0xFF);
++ _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 0, 0);
++}
++
++static inline void set_intr_gate_ist(int nr, void *func, unsigned ist)
++{
++ BUG_ON((unsigned)nr > 0xFF);
++ _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 0, ist);
++}
++
++static inline void set_system_gate(int nr, void *func)
++{
++ BUG_ON((unsigned)nr > 0xFF);
++ _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 3, 0);
++}
++
++static inline void set_system_gate_ist(int nr, void *func, unsigned ist)
++{
++ _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 3, ist);
++}
++#endif
++
++static inline void set_tssldt_descriptor(void *ptr, unsigned long tss, unsigned type,
++ unsigned size)
++{
++ struct ldttss_desc d;
++ memset(&d,0,sizeof(d));
++ d.limit0 = size & 0xFFFF;
++ d.base0 = PTR_LOW(tss);
++ d.base1 = PTR_MIDDLE(tss) & 0xFF;
++ d.type = type;
++ d.p = 1;
++ d.limit1 = (size >> 16) & 0xF;
++ d.base2 = (PTR_MIDDLE(tss) >> 8) & 0xFF;
++ d.base3 = PTR_HIGH(tss);
++ memcpy(ptr, &d, 16);
++}
++
++#ifndef CONFIG_X86_NO_TSS
++static inline void set_tss_desc(unsigned cpu, void *addr)
++{
++ /*
++ * sizeof(unsigned long) coming from an extra "long" at the end
++ * of the iobitmap. See tss_struct definition in processor.h
++ *
++ * -1? seg base+limit should be pointing to the address of the
++ * last valid byte
++ */
++ set_tssldt_descriptor(&cpu_gdt(cpu)[GDT_ENTRY_TSS],
++ (unsigned long)addr, DESC_TSS,
++ IO_BITMAP_OFFSET + IO_BITMAP_BYTES + sizeof(unsigned long) - 1);
++}
++#endif
++
++static inline void set_ldt_desc(unsigned cpu, void *addr, int size)
++{
++ set_tssldt_descriptor(&cpu_gdt(cpu)[GDT_ENTRY_LDT], (unsigned long)addr,
++ DESC_LDT, size * 8 - 1);
++}
++
++static inline void set_seg_base(unsigned cpu, int entry, void *base)
++{
++ struct desc_struct *d = &cpu_gdt(cpu)[entry];
++ u32 addr = (u32)(u64)base;
++ BUG_ON((u64)base >> 32);
++ d->base0 = addr & 0xffff;
++ d->base1 = (addr >> 16) & 0xff;
++ d->base2 = (addr >> 24) & 0xff;
++}
++
++#define LDT_entry_a(info) \
++ ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff))
++/* Don't allow setting of the lm bit. It is useless anyways because
++ 64bit system calls require __USER_CS. */
++#define LDT_entry_b(info) \
++ (((info)->base_addr & 0xff000000) | \
++ (((info)->base_addr & 0x00ff0000) >> 16) | \
++ ((info)->limit & 0xf0000) | \
++ (((info)->read_exec_only ^ 1) << 9) | \
++ ((info)->contents << 10) | \
++ (((info)->seg_not_present ^ 1) << 15) | \
++ ((info)->seg_32bit << 22) | \
++ ((info)->limit_in_pages << 23) | \
++ ((info)->useable << 20) | \
++ /* ((info)->lm << 21) | */ \
++ 0x7000)
++
++#define LDT_empty(info) (\
++ (info)->base_addr == 0 && \
++ (info)->limit == 0 && \
++ (info)->contents == 0 && \
++ (info)->read_exec_only == 1 && \
++ (info)->seg_32bit == 0 && \
++ (info)->limit_in_pages == 0 && \
++ (info)->seg_not_present == 1 && \
++ (info)->useable == 0 && \
++ (info)->lm == 0)
++
++#if TLS_SIZE != 24
++# error update this code.
++#endif
++
++static inline void load_TLS(struct thread_struct *t, unsigned int cpu)
++{
++#if 0
++ u64 *gdt = (u64 *)(cpu_gdt(cpu) + GDT_ENTRY_TLS_MIN);
++ gdt[0] = t->tls_array[0];
++ gdt[1] = t->tls_array[1];
++ gdt[2] = t->tls_array[2];
++#endif
++#define C(i) \
++ HYPERVISOR_update_descriptor(virt_to_machine(&cpu_gdt(cpu)[GDT_ENTRY_TLS_MIN + i]), t->tls_array[i])
++
++ C(0); C(1); C(2);
++#undef C
++}
++
++/*
++ * load one particular LDT into the current CPU
++ */
++static inline void load_LDT_nolock (mm_context_t *pc, int cpu)
++{
++ void *segments = pc->ldt;
++ int count = pc->size;
++
++ if (likely(!count))
++ segments = NULL;
++
++ xen_set_ldt((unsigned long)segments, count);
++}
++
++static inline void load_LDT(mm_context_t *pc)
++{
++ int cpu = get_cpu();
++ load_LDT_nolock(pc, cpu);
++ put_cpu();
++}
++
++extern struct desc_ptr idt_descr;
++
++#endif /* !__ASSEMBLY__ */
++
++#endif
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/asm-x86_64/mach-xen/asm/dma-mapping.h 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,207 @@
++#ifndef _X8664_DMA_MAPPING_H
++#define _X8664_DMA_MAPPING_H 1
++
++/*
++ * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for
++ * documentation.
++ */
++
++
++#include <asm/scatterlist.h>
++#include <asm/io.h>
++#include <asm/swiotlb.h>
++
++struct dma_mapping_ops {
++ int (*mapping_error)(dma_addr_t dma_addr);
++ void* (*alloc_coherent)(struct device *dev, size_t size,
++ dma_addr_t *dma_handle, gfp_t gfp);
++ void (*free_coherent)(struct device *dev, size_t size,
++ void *vaddr, dma_addr_t dma_handle);
++ dma_addr_t (*map_single)(struct device *hwdev, void *ptr,
++ size_t size, int direction);
++ /* like map_single, but doesn't check the device mask */
++ dma_addr_t (*map_simple)(struct device *hwdev, char *ptr,
++ size_t size, int direction);
++ void (*unmap_single)(struct device *dev, dma_addr_t addr,
++ size_t size, int direction);
++ void (*sync_single_for_cpu)(struct device *hwdev,
++ dma_addr_t dma_handle, size_t size,
++ int direction);
++ void (*sync_single_for_device)(struct device *hwdev,
++ dma_addr_t dma_handle, size_t size,
++ int direction);
++ void (*sync_single_range_for_cpu)(struct device *hwdev,
++ dma_addr_t dma_handle, unsigned long offset,
++ size_t size, int direction);
++ void (*sync_single_range_for_device)(struct device *hwdev,
++ dma_addr_t dma_handle, unsigned long offset,
++ size_t size, int direction);
++ void (*sync_sg_for_cpu)(struct device *hwdev,
++ struct scatterlist *sg, int nelems,
++ int direction);
++ void (*sync_sg_for_device)(struct device *hwdev,
++ struct scatterlist *sg, int nelems,
++ int direction);
++ int (*map_sg)(struct device *hwdev, struct scatterlist *sg,
++ int nents, int direction);
++ void (*unmap_sg)(struct device *hwdev,
++ struct scatterlist *sg, int nents,
++ int direction);
++ int (*dma_supported)(struct device *hwdev, u64 mask);
++ int is_phys;
++};
++
++extern dma_addr_t bad_dma_address;
++extern struct dma_mapping_ops* dma_ops;
++extern int iommu_merge;
++
++static inline int valid_dma_direction(int dma_direction)
++{
++ return ((dma_direction == DMA_BIDIRECTIONAL) ||
++ (dma_direction == DMA_TO_DEVICE) ||
++ (dma_direction == DMA_FROM_DEVICE));
++}
++
++#if 0
++static inline int dma_mapping_error(dma_addr_t dma_addr)
++{
++ if (dma_ops->mapping_error)
++ return dma_ops->mapping_error(dma_addr);
++
++ return (dma_addr == bad_dma_address);
++}
++
++extern void *dma_alloc_coherent(struct device *dev, size_t size,
++ dma_addr_t *dma_handle, gfp_t gfp);
++extern void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
++ dma_addr_t dma_handle);
++
++static inline dma_addr_t
++dma_map_single(struct device *hwdev, void *ptr, size_t size,
++ int direction)
++{
++ BUG_ON(!valid_dma_direction(direction));
++ return dma_ops->map_single(hwdev, ptr, size, direction);
++}
++
++static inline void
++dma_unmap_single(struct device *dev, dma_addr_t addr,size_t size,
++ int direction)
++{
++ BUG_ON(!valid_dma_direction(direction));
++ dma_ops->unmap_single(dev, addr, size, direction);
++}
++
++#define dma_map_page(dev,page,offset,size,dir) \
++ dma_map_single((dev), page_address(page)+(offset), (size), (dir))
++
++#define dma_unmap_page dma_unmap_single
++
++static inline void
++dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
++ size_t size, int direction)
++{
++ BUG_ON(!valid_dma_direction(direction));
++ if (dma_ops->sync_single_for_cpu)
++ dma_ops->sync_single_for_cpu(hwdev, dma_handle, size,
++ direction);
++ flush_write_buffers();
++}
++
++static inline void
++dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
++ size_t size, int direction)
++{
++ BUG_ON(!valid_dma_direction(direction));
++ if (dma_ops->sync_single_for_device)
++ dma_ops->sync_single_for_device(hwdev, dma_handle, size,
++ direction);
++ flush_write_buffers();
++}
++
++static inline void
++dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
++ unsigned long offset, size_t size, int direction)
++{
++ BUG_ON(!valid_dma_direction(direction));
++ if (dma_ops->sync_single_range_for_cpu) {
++ dma_ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, size, direction);
++ }
++
++ flush_write_buffers();
++}
++
++static inline void
++dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
++ unsigned long offset, size_t size, int direction)
++{
++ BUG_ON(!valid_dma_direction(direction));
++ if (dma_ops->sync_single_range_for_device)
++ dma_ops->sync_single_range_for_device(hwdev, dma_handle,
++ offset, size, direction);
++
++ flush_write_buffers();
++}
++
++static inline void
++dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
++ int nelems, int direction)
++{
++ BUG_ON(!valid_dma_direction(direction));
++ if (dma_ops->sync_sg_for_cpu)
++ dma_ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
++ flush_write_buffers();
++}
++
++static inline void
++dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
++ int nelems, int direction)
++{
++ BUG_ON(!valid_dma_direction(direction));
++ if (dma_ops->sync_sg_for_device) {
++ dma_ops->sync_sg_for_device(hwdev, sg, nelems, direction);
++ }
++
++ flush_write_buffers();
++}
++
++static inline int
++dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, int direction)
++{
++ BUG_ON(!valid_dma_direction(direction));
++ return dma_ops->map_sg(hwdev, sg, nents, direction);
++}
++
++static inline void
++dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
++ int direction)
++{
++ BUG_ON(!valid_dma_direction(direction));
++ dma_ops->unmap_sg(hwdev, sg, nents, direction);
++}
++
++extern int dma_supported(struct device *hwdev, u64 mask);
++
++/* same for gart, swiotlb, and nommu */
++static inline int dma_get_cache_alignment(void)
++{
++ return boot_cpu_data.x86_clflush_size;
++}
++
++#define dma_is_consistent(h) 1
++
++extern int dma_set_mask(struct device *dev, u64 mask);
++
++static inline void
++dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction dir)
++{
++ flush_write_buffers();
++}
++
++extern struct device fallback_dev;
++extern int panic_on_overflow;
++#endif
++
++#endif /* _X8664_DMA_MAPPING_H */
++
++#include <asm-i386/mach-xen/asm/dma-mapping.h>
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/asm-x86_64/mach-xen/asm/e820.h 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,66 @@
++/*
++ * structures and definitions for the int 15, ax=e820 memory map
++ * scheme.
++ *
++ * In a nutshell, setup.S populates a scratch table in the
++ * empty_zero_block that contains a list of usable address/size
++ * duples. setup.c, this information is transferred into the e820map,
++ * and in init.c/numa.c, that new information is used to mark pages
++ * reserved or not.
++ */
++#ifndef __E820_HEADER
++#define __E820_HEADER
++
++#include <linux/mmzone.h>
++
++#define E820MAP 0x2d0 /* our map */
++#define E820MAX 128 /* number of entries in E820MAP */
++#define E820NR 0x1e8 /* # entries in E820MAP */
++
++#define E820_RAM 1
++#define E820_RESERVED 2
++#define E820_ACPI 3 /* usable as RAM once ACPI tables have been read */
++#define E820_NVS 4
++
++#define HIGH_MEMORY (1024*1024)
++
++#define LOWMEMSIZE() (0x9f000)
++
++#ifndef __ASSEMBLY__
++struct e820entry {
++ u64 addr; /* start of memory segment */
++ u64 size; /* size of memory segment */
++ u32 type; /* type of memory segment */
++} __attribute__((packed));
++
++struct e820map {
++ int nr_map;
++ struct e820entry map[E820MAX];
++};
++
++extern unsigned long find_e820_area(unsigned long start, unsigned long end,
++ unsigned size);
++extern void add_memory_region(unsigned long start, unsigned long size,
++ int type);
++extern void setup_memory_region(void);
++extern void contig_e820_setup(void);
++extern unsigned long e820_end_of_ram(void);
++extern void e820_reserve_resources(struct e820entry *e820, int nr_map);
++extern void e820_print_map(char *who);
++extern int e820_any_mapped(unsigned long start, unsigned long end, unsigned type);
++extern int e820_all_mapped(unsigned long start, unsigned long end, unsigned type);
++
++extern void e820_bootmem_free(pg_data_t *pgdat, unsigned long start,unsigned long end);
++extern void e820_setup_gap(struct e820entry *e820, int nr_map);
++extern unsigned long e820_hole_size(unsigned long start_pfn,
++ unsigned long end_pfn);
++
++extern void __init parse_memopt(char *p, char **end);
++extern void __init parse_memmapopt(char *p, char **end);
++
++extern struct e820map e820;
++
++extern unsigned ebda_addr, ebda_size;
++#endif/*!__ASSEMBLY__*/
++
++#endif/*__E820_HEADER*/
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/asm-x86_64/mach-xen/asm/fixmap.h 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,112 @@
++/*
++ * fixmap.h: compile-time virtual memory allocation
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file "COPYING" in the main directory of this archive
++ * for more details.
++ *
++ * Copyright (C) 1998 Ingo Molnar
++ */
++
++#ifndef _ASM_FIXMAP_H
++#define _ASM_FIXMAP_H
++
++#include <linux/kernel.h>
++#include <asm/apicdef.h>
++#include <asm/page.h>
++#include <asm/vsyscall.h>
++#include <asm/vsyscall32.h>
++#include <asm/acpi.h>
++
++/*
++ * Here we define all the compile-time 'special' virtual
++ * addresses. The point is to have a constant address at
++ * compile time, but to set the physical address only
++ * in the boot process.
++ *
++ * these 'compile-time allocated' memory buffers are
++ * fixed-size 4k pages. (or larger if used with an increment
++ * highger than 1) use fixmap_set(idx,phys) to associate
++ * physical memory with fixmap indices.
++ *
++ * TLB entries of such buffers will not be flushed across
++ * task switches.
++ */
++
++enum fixed_addresses {
++ VSYSCALL_LAST_PAGE,
++ VSYSCALL_FIRST_PAGE = VSYSCALL_LAST_PAGE + ((VSYSCALL_END-VSYSCALL_START) >> PAGE_SHIFT) - 1,
++ VSYSCALL_HPET,
++ FIX_HPET_BASE,
++#ifdef CONFIG_X86_LOCAL_APIC
++ FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */
++#endif
++#ifdef CONFIG_X86_IO_APIC
++ FIX_IO_APIC_BASE_0,
++ FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS-1,
++#endif
++#ifdef CONFIG_ACPI
++ FIX_ACPI_BEGIN,
++ FIX_ACPI_END = FIX_ACPI_BEGIN + FIX_ACPI_PAGES - 1,
++#endif
++ FIX_SHARED_INFO,
++#define NR_FIX_ISAMAPS 256
++ FIX_ISAMAP_END,
++ FIX_ISAMAP_BEGIN = FIX_ISAMAP_END + NR_FIX_ISAMAPS - 1,
++ __end_of_permanent_fixed_addresses,
++ /* temporary boot-time mappings, used before ioremap() is functional */
++#define NR_FIX_BTMAPS 16
++ FIX_BTMAP_END = __end_of_permanent_fixed_addresses,
++ FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS - 1,
++ __end_of_fixed_addresses
++};
++
++extern void __set_fixmap (enum fixed_addresses idx,
++ unsigned long phys, pgprot_t flags);
++
++#define set_fixmap(idx, phys) \
++ __set_fixmap(idx, phys, PAGE_KERNEL)
++/*
++ * Some hardware wants to get fixmapped without caching.
++ */
++#define set_fixmap_nocache(idx, phys) \
++ __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE)
++
++#define clear_fixmap(idx) \
++ __set_fixmap(idx, 0, __pgprot(0))
++
++#define FIXADDR_TOP (VSYSCALL_END-PAGE_SIZE)
++#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
++#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
++
++/* Only covers 32bit vsyscalls currently. Need another set for 64bit. */
++#define FIXADDR_USER_START ((unsigned long)VSYSCALL32_VSYSCALL)
++#define FIXADDR_USER_END (FIXADDR_USER_START + PAGE_SIZE)
++
++#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
++
++extern void __this_fixmap_does_not_exist(void);
++
++/*
++ * 'index to address' translation. If anyone tries to use the idx
++ * directly without translation, we catch the bug with a NULL-deference
++ * kernel oops. Illegal ranges of incoming indices are caught too.
++ */
++static __always_inline unsigned long fix_to_virt(const unsigned int idx)
++{
++ /*
++ * this branch gets completely eliminated after inlining,
++ * except when someone tries to use fixaddr indices in an
++ * illegal way. (such as mixing up address types or using
++ * out-of-range indices).
++ *
++ * If it doesn't get removed, the linker will complain
++ * loudly with a reasonably clear error message..
++ */
++ if (idx >= __end_of_fixed_addresses)
++ __this_fixmap_does_not_exist();
++
++ return __fix_to_virt(idx);
++}
++
++#endif
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/asm-x86_64/mach-xen/asm/floppy.h 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,206 @@
++/*
++ * Architecture specific parts of the Floppy driver
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file "COPYING" in the main directory of this archive
++ * for more details.
++ *
++ * Copyright (C) 1995
++ *
++ * Modifications for Xen are Copyright (c) 2004, Keir Fraser.
++ */
++#ifndef __ASM_XEN_X86_64_FLOPPY_H
++#define __ASM_XEN_X86_64_FLOPPY_H
++
++#include <linux/vmalloc.h>
++
++/*
++ * The DMA channel used by the floppy controller cannot access data at
++ * addresses >= 16MB
++ *
++ * Went back to the 1MB limit, as some people had problems with the floppy
++ * driver otherwise. It doesn't matter much for performance anyway, as most
++ * floppy accesses go through the track buffer.
++ */
++#define _CROSS_64KB(a,s,vdma) \
++(!(vdma) && ((unsigned long)(a)/K_64 != ((unsigned long)(a) + (s) - 1) / K_64))
++
++/* XEN: Hit DMA paths on the head. This trick from asm-m68k/floppy.h. */
++#include <asm/dma.h>
++#undef MAX_DMA_ADDRESS
++#define MAX_DMA_ADDRESS 0
++#define CROSS_64KB(a,s) (0)
++
++#define fd_inb(port) inb_p(port)
++#define fd_outb(value,port) outb_p(value,port)
++
++#define fd_request_dma() (0)
++#define fd_free_dma() ((void)0)
++#define fd_enable_irq() enable_irq(FLOPPY_IRQ)
++#define fd_disable_irq() disable_irq(FLOPPY_IRQ)
++#define fd_free_irq() free_irq(FLOPPY_IRQ, NULL)
++#define fd_get_dma_residue() vdma_get_dma_residue(FLOPPY_DMA)
++/*
++ * Do not use vmalloc/vfree: floppy_release_irq_and_dma() gets called from
++ * softirq context via motor_off_callback. A generic bug we happen to trigger.
++ */
++#define fd_dma_mem_alloc(size) __get_free_pages(GFP_KERNEL|__GFP_NORETRY, get_order(size))
++#define fd_dma_mem_free(addr, size) free_pages(addr, get_order(size))
++#define fd_dma_setup(addr, size, mode, io) vdma_dma_setup(addr, size, mode, io)
++
++static int virtual_dma_count;
++static int virtual_dma_residue;
++static char *virtual_dma_addr;
++static int virtual_dma_mode;
++static int doing_pdma;
++
++static irqreturn_t floppy_hardint(int irq, void *dev_id, struct pt_regs * regs)
++{
++ register unsigned char st;
++
++#undef TRACE_FLPY_INT
++
++#ifdef TRACE_FLPY_INT
++ static int calls=0;
++ static int bytes=0;
++ static int dma_wait=0;
++#endif
++ if (!doing_pdma)
++ return floppy_interrupt(irq, dev_id, regs);
++
++#ifdef TRACE_FLPY_INT
++ if(!calls)
++ bytes = virtual_dma_count;
++#endif
++
++ {
++ register int lcount;
++ register char *lptr;
++
++ st = 1;
++ for(lcount=virtual_dma_count, lptr=virtual_dma_addr;
++ lcount; lcount--, lptr++) {
++ st=inb(virtual_dma_port+4) & 0xa0 ;
++ if(st != 0xa0)
++ break;
++ if(virtual_dma_mode)
++ outb_p(*lptr, virtual_dma_port+5);
++ else
++ *lptr = inb_p(virtual_dma_port+5);
++ }
++ virtual_dma_count = lcount;
++ virtual_dma_addr = lptr;
++ st = inb(virtual_dma_port+4);
++ }
++
++#ifdef TRACE_FLPY_INT
++ calls++;
++#endif
++ if(st == 0x20)
++ return IRQ_HANDLED;
++ if(!(st & 0x20)) {
++ virtual_dma_residue += virtual_dma_count;
++ virtual_dma_count=0;
++#ifdef TRACE_FLPY_INT
++ printk("count=%x, residue=%x calls=%d bytes=%d dma_wait=%d\n",
++ virtual_dma_count, virtual_dma_residue, calls, bytes,
++ dma_wait);
++ calls = 0;
++ dma_wait=0;
++#endif
++ doing_pdma = 0;
++ floppy_interrupt(irq, dev_id, regs);
++ return IRQ_HANDLED;
++ }
++#ifdef TRACE_FLPY_INT
++ if(!virtual_dma_count)
++ dma_wait++;
++#endif
++ return IRQ_HANDLED;
++}
++
++static void fd_disable_dma(void)
++{
++ doing_pdma = 0;
++ virtual_dma_residue += virtual_dma_count;
++ virtual_dma_count=0;
++}
++
++static int vdma_get_dma_residue(unsigned int dummy)
++{
++ return virtual_dma_count + virtual_dma_residue;
++}
++
++
++static int fd_request_irq(void)
++{
++ return request_irq(FLOPPY_IRQ, floppy_hardint,
++ IRQF_DISABLED, "floppy", NULL);
++}
++
++#if 0
++static unsigned long vdma_mem_alloc(unsigned long size)
++{
++ return (unsigned long) vmalloc(size);
++
++}
++
++static void vdma_mem_free(unsigned long addr, unsigned long size)
++{
++ vfree((void *)addr);
++}
++#endif
++
++static int vdma_dma_setup(char *addr, unsigned long size, int mode, int io)
++{
++ doing_pdma = 1;
++ virtual_dma_port = io;
++ virtual_dma_mode = (mode == DMA_MODE_WRITE);
++ virtual_dma_addr = addr;
++ virtual_dma_count = size;
++ virtual_dma_residue = 0;
++ return 0;
++}
++
++/* XEN: This trick to force 'virtual DMA' is from include/asm-m68k/floppy.h. */
++#define FDC1 xen_floppy_init()
++static int FDC2 = -1;
++
++static int xen_floppy_init(void)
++{
++ use_virtual_dma = 1;
++ can_use_virtual_dma = 1;
++ return 0x3f0;
++}
++
++/*
++ * Floppy types are stored in the rtc's CMOS RAM and so rtc_lock
++ * is needed to prevent corrupted CMOS RAM in case "insmod floppy"
++ * coincides with another rtc CMOS user. Paul G.
++ */
++#define FLOPPY0_TYPE ({ \
++ unsigned long flags; \
++ unsigned char val; \
++ spin_lock_irqsave(&rtc_lock, flags); \
++ val = (CMOS_READ(0x10) >> 4) & 15; \
++ spin_unlock_irqrestore(&rtc_lock, flags); \
++ val; \
++})
++
++#define FLOPPY1_TYPE ({ \
++ unsigned long flags; \
++ unsigned char val; \
++ spin_lock_irqsave(&rtc_lock, flags); \
++ val = CMOS_READ(0x10) & 15; \
++ spin_unlock_irqrestore(&rtc_lock, flags); \
++ val; \
++})
++
++#define N_FDC 2
++#define N_DRIVE 8
++
++#define FLOPPY_MOTOR_MASK 0xf0
++
++#define EXTRA_FLOPPY_PARAMS
++
++#endif /* __ASM_XEN_X86_64_FLOPPY_H */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/asm-x86_64/mach-xen/asm/hw_irq.h 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,136 @@
++#ifndef _ASM_HW_IRQ_H
++#define _ASM_HW_IRQ_H
++
++/*
++ * linux/include/asm/hw_irq.h
++ *
++ * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar
++ *
++ * moved some of the old arch/i386/kernel/irq.h to here. VY
++ *
++ * IRQ/IPI changes taken from work by Thomas Radke
++ * <tomsoft@informatik.tu-chemnitz.de>
++ *
++ * hacked by Andi Kleen for x86-64.
++ */
++
++#ifndef __ASSEMBLY__
++#include <asm/atomic.h>
++#include <asm/irq.h>
++#include <linux/profile.h>
++#include <linux/smp.h>
++
++struct hw_interrupt_type;
++#endif
++
++#define NMI_VECTOR 0x02
++/*
++ * IDT vectors usable for external interrupt sources start
++ * at 0x20:
++ */
++#define FIRST_EXTERNAL_VECTOR 0x20
++
++#define IA32_SYSCALL_VECTOR 0x80
++
++
++/*
++ * Vectors 0x20-0x2f are used for ISA interrupts.
++ */
++
++/*
++ * Special IRQ vectors used by the SMP architecture, 0xf0-0xff
++ *
++ * some of the following vectors are 'rare', they are merged
++ * into a single vector (CALL_FUNCTION_VECTOR) to save vector space.
++ * TLB, reschedule and local APIC vectors are performance-critical.
++ */
++#ifndef CONFIG_XEN
++#define SPURIOUS_APIC_VECTOR 0xff
++#define ERROR_APIC_VECTOR 0xfe
++#define RESCHEDULE_VECTOR 0xfd
++#define CALL_FUNCTION_VECTOR 0xfc
++/* fb free - please don't readd KDB here because it's useless
++ (hint - think what a NMI bit does to a vector) */
++#define THERMAL_APIC_VECTOR 0xfa
++#define THRESHOLD_APIC_VECTOR 0xf9
++/* f8 free */
++#define INVALIDATE_TLB_VECTOR_END 0xf7
++#define INVALIDATE_TLB_VECTOR_START 0xf0 /* f0-f7 used for TLB flush */
++
++#define NUM_INVALIDATE_TLB_VECTORS 8
++#endif
++
++/*
++ * Local APIC timer IRQ vector is on a different priority level,
++ * to work around the 'lost local interrupt if more than 2 IRQ
++ * sources per level' errata.
++ */
++#define LOCAL_TIMER_VECTOR 0xef
++
++/*
++ * First APIC vector available to drivers: (vectors 0x30-0xee)
++ * we start at 0x31 to spread out vectors evenly between priority
++ * levels. (0x80 is the syscall vector)
++ */
++#define FIRST_DEVICE_VECTOR 0x31
++#define FIRST_SYSTEM_VECTOR 0xef /* duplicated in irq.h */
++
++
++#ifndef __ASSEMBLY__
++extern u8 irq_vector[NR_IRQ_VECTORS];
++#define IO_APIC_VECTOR(irq) (irq_vector[irq])
++#define AUTO_ASSIGN -1
++
++/*
++ * Various low-level irq details needed by irq.c, process.c,
++ * time.c, io_apic.c and smp.c
++ *
++ * Interrupt entry/exit code at both C and assembly level
++ */
++
++extern void disable_8259A_irq(unsigned int irq);
++extern void enable_8259A_irq(unsigned int irq);
++extern int i8259A_irq_pending(unsigned int irq);
++extern void make_8259A_irq(unsigned int irq);
++extern void init_8259A(int aeoi);
++extern void FASTCALL(send_IPI_self(int vector));
++extern void init_VISWS_APIC_irqs(void);
++extern void setup_IO_APIC(void);
++extern void disable_IO_APIC(void);
++extern void print_IO_APIC(void);
++extern int IO_APIC_get_PCI_irq_vector(int bus, int slot, int fn);
++extern void send_IPI(int dest, int vector);
++extern void setup_ioapic_dest(void);
++
++extern unsigned long io_apic_irqs;
++
++extern atomic_t irq_err_count;
++extern atomic_t irq_mis_count;
++
++#define IO_APIC_IRQ(x) (((x) >= 16) || ((1<<(x)) & io_apic_irqs))
++
++#define __STR(x) #x
++#define STR(x) __STR(x)
++
++#include <asm/ptrace.h>
++
++#define IRQ_NAME2(nr) nr##_interrupt(void)
++#define IRQ_NAME(nr) IRQ_NAME2(IRQ##nr)
++
++/*
++ * SMP has a few special interrupts for IPI messages
++ */
++
++#define BUILD_IRQ(nr) \
++asmlinkage void IRQ_NAME(nr); \
++__asm__( \
++"\n.p2align\n" \
++"IRQ" #nr "_interrupt:\n\t" \
++ "push $~(" #nr ") ; " \
++ "jmp common_interrupt");
++
++#define platform_legacy_irq(irq) ((irq) < 16)
++
++#endif
++
++#endif /* _ASM_HW_IRQ_H */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/asm-x86_64/mach-xen/asm/hypercall.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,406 @@
++/******************************************************************************
++ * hypercall.h
++ *
++ * Linux-specific hypervisor handling.
++ *
++ * Copyright (c) 2002-2004, K A Fraser
++ *
++ * 64-bit updates:
++ * Benjamin Liu <benjamin.liu@intel.com>
++ * Jun Nakajima <jun.nakajima@intel.com>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#ifndef __HYPERCALL_H__
++#define __HYPERCALL_H__
++
++#include <linux/string.h> /* memcpy() */
++
++#ifndef __HYPERVISOR_H__
++# error "please don't include this file directly"
++#endif
++
++#define __STR(x) #x
++#define STR(x) __STR(x)
++
++#ifdef CONFIG_XEN
++#define HYPERCALL_STR(name) \
++ "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"
++#else
++#define HYPERCALL_STR(name) \
++ "mov hypercall_stubs,%%rax; " \
++ "add $("STR(__HYPERVISOR_##name)" * 32),%%rax; " \
++ "call *%%rax"
++#endif
++
++#define _hypercall0(type, name) \
++({ \
++ long __res; \
++ asm volatile ( \
++ HYPERCALL_STR(name) \
++ : "=a" (__res) \
++ : \
++ : "memory" ); \
++ (type)__res; \
++})
++
++#define _hypercall1(type, name, a1) \
++({ \
++ long __res, __ign1; \
++ asm volatile ( \
++ HYPERCALL_STR(name) \
++ : "=a" (__res), "=D" (__ign1) \
++ : "1" ((long)(a1)) \
++ : "memory" ); \
++ (type)__res; \
++})
++
++#define _hypercall2(type, name, a1, a2) \
++({ \
++ long __res, __ign1, __ign2; \
++ asm volatile ( \
++ HYPERCALL_STR(name) \
++ : "=a" (__res), "=D" (__ign1), "=S" (__ign2) \
++ : "1" ((long)(a1)), "2" ((long)(a2)) \
++ : "memory" ); \
++ (type)__res; \
++})
++
++#define _hypercall3(type, name, a1, a2, a3) \
++({ \
++ long __res, __ign1, __ign2, __ign3; \
++ asm volatile ( \
++ HYPERCALL_STR(name) \
++ : "=a" (__res), "=D" (__ign1), "=S" (__ign2), \
++ "=d" (__ign3) \
++ : "1" ((long)(a1)), "2" ((long)(a2)), \
++ "3" ((long)(a3)) \
++ : "memory" ); \
++ (type)__res; \
++})
++
++#define _hypercall4(type, name, a1, a2, a3, a4) \
++({ \
++ long __res, __ign1, __ign2, __ign3; \
++ asm volatile ( \
++ "movq %7,%%r10; " \
++ HYPERCALL_STR(name) \
++ : "=a" (__res), "=D" (__ign1), "=S" (__ign2), \
++ "=d" (__ign3) \
++ : "1" ((long)(a1)), "2" ((long)(a2)), \
++ "3" ((long)(a3)), "g" ((long)(a4)) \
++ : "memory", "r10" ); \
++ (type)__res; \
++})
++
++#define _hypercall5(type, name, a1, a2, a3, a4, a5) \
++({ \
++ long __res, __ign1, __ign2, __ign3; \
++ asm volatile ( \
++ "movq %7,%%r10; movq %8,%%r8; " \
++ HYPERCALL_STR(name) \
++ : "=a" (__res), "=D" (__ign1), "=S" (__ign2), \
++ "=d" (__ign3) \
++ : "1" ((long)(a1)), "2" ((long)(a2)), \
++ "3" ((long)(a3)), "g" ((long)(a4)), \
++ "g" ((long)(a5)) \
++ : "memory", "r10", "r8" ); \
++ (type)__res; \
++})
++
++static inline int
++HYPERVISOR_set_trap_table(
++ trap_info_t *table)
++{
++ return _hypercall1(int, set_trap_table, table);
++}
++
++static inline int
++HYPERVISOR_mmu_update(
++ mmu_update_t *req, int count, int *success_count, domid_t domid)
++{
++ return _hypercall4(int, mmu_update, req, count, success_count, domid);
++}
++
++static inline int
++HYPERVISOR_mmuext_op(
++ struct mmuext_op *op, int count, int *success_count, domid_t domid)
++{
++ return _hypercall4(int, mmuext_op, op, count, success_count, domid);
++}
++
++static inline int
++HYPERVISOR_set_gdt(
++ unsigned long *frame_list, int entries)
++{
++ return _hypercall2(int, set_gdt, frame_list, entries);
++}
++
++static inline int
++HYPERVISOR_stack_switch(
++ unsigned long ss, unsigned long esp)
++{
++ return _hypercall2(int, stack_switch, ss, esp);
++}
++
++static inline int
++HYPERVISOR_set_callbacks(
++ unsigned long event_address, unsigned long failsafe_address,
++ unsigned long syscall_address)
++{
++ return _hypercall3(int, set_callbacks,
++ event_address, failsafe_address, syscall_address);
++}
++
++static inline int
++HYPERVISOR_fpu_taskswitch(
++ int set)
++{
++ return _hypercall1(int, fpu_taskswitch, set);
++}
++
++static inline int
++HYPERVISOR_sched_op_compat(
++ int cmd, unsigned long arg)
++{
++ return _hypercall2(int, sched_op_compat, cmd, arg);
++}
++
++static inline int
++HYPERVISOR_sched_op(
++ int cmd, void *arg)
++{
++ return _hypercall2(int, sched_op, cmd, arg);
++}
++
++static inline long
++HYPERVISOR_set_timer_op(
++ u64 timeout)
++{
++ return _hypercall1(long, set_timer_op, timeout);
++}
++
++static inline int
++HYPERVISOR_platform_op(
++ struct xen_platform_op *platform_op)
++{
++ platform_op->interface_version = XENPF_INTERFACE_VERSION;
++ return _hypercall1(int, platform_op, platform_op);
++}
++
++static inline int
++HYPERVISOR_set_debugreg(
++ int reg, unsigned long value)
++{
++ return _hypercall2(int, set_debugreg, reg, value);
++}
++
++static inline unsigned long
++HYPERVISOR_get_debugreg(
++ int reg)
++{
++ return _hypercall1(unsigned long, get_debugreg, reg);
++}
++
++static inline int
++HYPERVISOR_update_descriptor(
++ unsigned long ma, unsigned long word)
++{
++ return _hypercall2(int, update_descriptor, ma, word);
++}
++
++static inline int
++HYPERVISOR_memory_op(
++ unsigned int cmd, void *arg)
++{
++ return _hypercall2(int, memory_op, cmd, arg);
++}
++
++static inline int
++HYPERVISOR_multicall(
++ multicall_entry_t *call_list, int nr_calls)
++{
++ return _hypercall2(int, multicall, call_list, nr_calls);
++}
++
++static inline int
++HYPERVISOR_update_va_mapping(
++ unsigned long va, pte_t new_val, unsigned long flags)
++{
++ return _hypercall3(int, update_va_mapping, va, new_val.pte, flags);
++}
++
++static inline int
++HYPERVISOR_event_channel_op(
++ int cmd, void *arg)
++{
++ int rc = _hypercall2(int, event_channel_op, cmd, arg);
++
++#if CONFIG_XEN_COMPAT <= 0x030002
++ if (unlikely(rc == -ENOSYS)) {
++ struct evtchn_op op;
++ op.cmd = cmd;
++ memcpy(&op.u, arg, sizeof(op.u));
++ rc = _hypercall1(int, event_channel_op_compat, &op);
++ memcpy(arg, &op.u, sizeof(op.u));
++ }
++#endif
++
++ return rc;
++}
++
++static inline int
++HYPERVISOR_acm_op(
++ int cmd, void *arg)
++{
++ return _hypercall2(int, acm_op, cmd, arg);
++}
++
++static inline int
++HYPERVISOR_xen_version(
++ int cmd, void *arg)
++{
++ return _hypercall2(int, xen_version, cmd, arg);
++}
++
++static inline int
++HYPERVISOR_console_io(
++ int cmd, int count, char *str)
++{
++ return _hypercall3(int, console_io, cmd, count, str);
++}
++
++static inline int
++HYPERVISOR_physdev_op(
++ int cmd, void *arg)
++{
++ int rc = _hypercall2(int, physdev_op, cmd, arg);
++
++#if CONFIG_XEN_COMPAT <= 0x030002
++ if (unlikely(rc == -ENOSYS)) {
++ struct physdev_op op;
++ op.cmd = cmd;
++ memcpy(&op.u, arg, sizeof(op.u));
++ rc = _hypercall1(int, physdev_op_compat, &op);
++ memcpy(arg, &op.u, sizeof(op.u));
++ }
++#endif
++
++ return rc;
++}
++
++static inline int
++HYPERVISOR_grant_table_op(
++ unsigned int cmd, void *uop, unsigned int count)
++{
++ return _hypercall3(int, grant_table_op, cmd, uop, count);
++}
++
++static inline int
++HYPERVISOR_update_va_mapping_otherdomain(
++ unsigned long va, pte_t new_val, unsigned long flags, domid_t domid)
++{
++ return _hypercall4(int, update_va_mapping_otherdomain, va,
++ new_val.pte, flags, domid);
++}
++
++static inline int
++HYPERVISOR_vm_assist(
++ unsigned int cmd, unsigned int type)
++{
++ return _hypercall2(int, vm_assist, cmd, type);
++}
++
++static inline int
++HYPERVISOR_vcpu_op(
++ int cmd, int vcpuid, void *extra_args)
++{
++ return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args);
++}
++
++static inline int
++HYPERVISOR_set_segment_base(
++ int reg, unsigned long value)
++{
++ return _hypercall2(int, set_segment_base, reg, value);
++}
++
++static inline int
++HYPERVISOR_suspend(
++ unsigned long srec)
++{
++ struct sched_shutdown sched_shutdown = {
++ .reason = SHUTDOWN_suspend
++ };
++
++ int rc = _hypercall3(int, sched_op, SCHEDOP_shutdown,
++ &sched_shutdown, srec);
++
++#if CONFIG_XEN_COMPAT <= 0x030002
++ if (rc == -ENOSYS)
++ rc = _hypercall3(int, sched_op_compat, SCHEDOP_shutdown,
++ SHUTDOWN_suspend, srec);
++#endif
++
++ return rc;
++}
++
++static inline int
++HYPERVISOR_nmi_op(
++ unsigned long op, void *arg)
++{
++ return _hypercall2(int, nmi_op, op, arg);
++}
++
++static inline unsigned long
++HYPERVISOR_hvm_op(
++ int op, void *arg)
++{
++ return _hypercall2(unsigned long, hvm_op, op, arg);
++}
++
++static inline int
++HYPERVISOR_callback_op(
++ int cmd, void *arg)
++{
++ return _hypercall2(int, callback_op, cmd, arg);
++}
++
++static inline int
++HYPERVISOR_xenoprof_op(
++ int op, void *arg)
++{
++ return _hypercall2(int, xenoprof_op, op, arg);
++}
++
++static inline int
++HYPERVISOR_kexec_op(
++ unsigned long op, void *args)
++{
++ return _hypercall2(int, kexec_op, op, args);
++}
++
++#endif /* __HYPERCALL_H__ */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/asm-x86_64/mach-xen/asm/hypervisor.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,2 @@
++
++#include <asm-i386/mach-xen/asm/hypervisor.h>
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/asm-x86_64/mach-xen/asm/io.h 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,330 @@
++#ifndef _ASM_IO_H
++#define _ASM_IO_H
++
++#include <asm/fixmap.h>
++
++/*
++ * This file contains the definitions for the x86 IO instructions
++ * inb/inw/inl/outb/outw/outl and the "string versions" of the same
++ * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing"
++ * versions of the single-IO instructions (inb_p/inw_p/..).
++ *
++ * This file is not meant to be obfuscating: it's just complicated
++ * to (a) handle it all in a way that makes gcc able to optimize it
++ * as well as possible and (b) trying to avoid writing the same thing
++ * over and over again with slight variations and possibly making a
++ * mistake somewhere.
++ */
++
++/*
++ * Thanks to James van Artsdalen for a better timing-fix than
++ * the two short jumps: using outb's to a nonexistent port seems
++ * to guarantee better timings even on fast machines.
++ *
++ * On the other hand, I'd like to be sure of a non-existent port:
++ * I feel a bit unsafe about using 0x80 (should be safe, though)
++ *
++ * Linus
++ */
++
++ /*
++ * Bit simplified and optimized by Jan Hubicka
++ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999.
++ *
++ * isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added,
++ * isa_read[wl] and isa_write[wl] fixed
++ * - Arnaldo Carvalho de Melo <acme@conectiva.com.br>
++ */
++
++#define __SLOW_DOWN_IO "\noutb %%al,$0x80"
++
++#ifdef REALLY_SLOW_IO
++#define __FULL_SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO
++#else
++#define __FULL_SLOW_DOWN_IO __SLOW_DOWN_IO
++#endif
++
++/*
++ * Talk about misusing macros..
++ */
++#define __OUT1(s,x) \
++static inline void out##s(unsigned x value, unsigned short port) {
++
++#define __OUT2(s,s1,s2) \
++__asm__ __volatile__ ("out" #s " %" s1 "0,%" s2 "1"
++
++#define __OUT(s,s1,x) \
++__OUT1(s,x) __OUT2(s,s1,"w") : : "a" (value), "Nd" (port)); } \
++__OUT1(s##_p,x) __OUT2(s,s1,"w") __FULL_SLOW_DOWN_IO : : "a" (value), "Nd" (port));} \
++
++#define __IN1(s) \
++static inline RETURN_TYPE in##s(unsigned short port) { RETURN_TYPE _v;
++
++#define __IN2(s,s1,s2) \
++__asm__ __volatile__ ("in" #s " %" s2 "1,%" s1 "0"
++
++#define __IN(s,s1,i...) \
++__IN1(s) __IN2(s,s1,"w") : "=a" (_v) : "Nd" (port) ,##i ); return _v; } \
++__IN1(s##_p) __IN2(s,s1,"w") __FULL_SLOW_DOWN_IO : "=a" (_v) : "Nd" (port) ,##i ); return _v; } \
++
++#define __INS(s) \
++static inline void ins##s(unsigned short port, void * addr, unsigned long count) \
++{ __asm__ __volatile__ ("rep ; ins" #s \
++: "=D" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); }
++
++#define __OUTS(s) \
++static inline void outs##s(unsigned short port, const void * addr, unsigned long count) \
++{ __asm__ __volatile__ ("rep ; outs" #s \
++: "=S" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); }
++
++#define RETURN_TYPE unsigned char
++__IN(b,"")
++#undef RETURN_TYPE
++#define RETURN_TYPE unsigned short
++__IN(w,"")
++#undef RETURN_TYPE
++#define RETURN_TYPE unsigned int
++__IN(l,"")
++#undef RETURN_TYPE
++
++__OUT(b,"b",char)
++__OUT(w,"w",short)
++__OUT(l,,int)
++
++__INS(b)
++__INS(w)
++__INS(l)
++
++__OUTS(b)
++__OUTS(w)
++__OUTS(l)
++
++#define IO_SPACE_LIMIT 0xffff
++
++#if defined(__KERNEL__) && __x86_64__
++
++#include <linux/vmalloc.h>
++
++#ifndef __i386__
++/*
++ * Change virtual addresses to physical addresses and vv.
++ * These are pretty trivial
++ */
++static inline unsigned long virt_to_phys(volatile void * address)
++{
++ return __pa(address);
++}
++
++static inline void * phys_to_virt(unsigned long address)
++{
++ return __va(address);
++}
++
++#define virt_to_bus(_x) phys_to_machine(__pa(_x))
++#define bus_to_virt(_x) __va(machine_to_phys(_x))
++#endif
++
++/*
++ * Change "struct page" to physical address.
++ */
++#define page_to_pseudophys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
++#define page_to_phys(page) (phys_to_machine(page_to_pseudophys(page)))
++#define page_to_bus(page) (phys_to_machine(page_to_pseudophys(page)))
++
++#define bio_to_pseudophys(bio) (page_to_pseudophys(bio_page((bio))) + \
++ (unsigned long) bio_offset((bio)))
++#define bvec_to_pseudophys(bv) (page_to_pseudophys((bv)->bv_page) + \
++ (unsigned long) (bv)->bv_offset)
++
++#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
++ (((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2))) && \
++ ((bvec_to_pseudophys((vec1)) + (vec1)->bv_len) == \
++ bvec_to_pseudophys((vec2))))
++
++#include <asm-generic/iomap.h>
++
++extern void __iomem *__ioremap(unsigned long offset, unsigned long size, unsigned long flags);
++
++static inline void __iomem * ioremap (unsigned long offset, unsigned long size)
++{
++ return __ioremap(offset, size, 0);
++}
++
++extern void *bt_ioremap(unsigned long addr, unsigned long size);
++extern void bt_iounmap(void *addr, unsigned long size);
++#define early_ioremap bt_ioremap
++#define early_iounmap bt_iounmap
++
++/*
++ * This one maps high address device memory and turns off caching for that area.
++ * it's useful if some control registers are in such an area and write combining
++ * or read caching is not desirable:
++ */
++extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size);
++extern void iounmap(volatile void __iomem *addr);
++
++/*
++ * ISA I/O bus memory addresses are 1:1 with the physical address.
++ */
++
++#define isa_virt_to_bus(_x) isa_virt_to_bus_is_UNSUPPORTED->x
++#define isa_page_to_bus(_x) isa_page_to_bus_is_UNSUPPORTED->x
++#define isa_bus_to_virt(_x) (void *)(__fix_to_virt(FIX_ISAMAP_BEGIN) + (_x))
++
++/*
++ * However PCI ones are not necessarily 1:1 and therefore these interfaces
++ * are forbidden in portable PCI drivers.
++ *
++ * Allow them on x86 for legacy drivers, though.
++ */
++#define virt_to_bus(_x) phys_to_machine(__pa(_x))
++#define bus_to_virt(_x) __va(machine_to_phys(_x))
++
++/*
++ * readX/writeX() are used to access memory mapped devices. On some
++ * architectures the memory mapped IO stuff needs to be accessed
++ * differently. On the x86 architecture, we just read/write the
++ * memory location directly.
++ */
++
++static inline __u8 __readb(const volatile void __iomem *addr)
++{
++ return *(__force volatile __u8 *)addr;
++}
++static inline __u16 __readw(const volatile void __iomem *addr)
++{
++ return *(__force volatile __u16 *)addr;
++}
++static __always_inline __u32 __readl(const volatile void __iomem *addr)
++{
++ return *(__force volatile __u32 *)addr;
++}
++static inline __u64 __readq(const volatile void __iomem *addr)
++{
++ return *(__force volatile __u64 *)addr;
++}
++#define readb(x) __readb(x)
++#define readw(x) __readw(x)
++#define readl(x) __readl(x)
++#define readq(x) __readq(x)
++#define readb_relaxed(a) readb(a)
++#define readw_relaxed(a) readw(a)
++#define readl_relaxed(a) readl(a)
++#define readq_relaxed(a) readq(a)
++#define __raw_readb readb
++#define __raw_readw readw
++#define __raw_readl readl
++#define __raw_readq readq
++
++#define mmiowb()
++
++static inline void __writel(__u32 b, volatile void __iomem *addr)
++{
++ *(__force volatile __u32 *)addr = b;
++}
++static inline void __writeq(__u64 b, volatile void __iomem *addr)
++{
++ *(__force volatile __u64 *)addr = b;
++}
++static inline void __writeb(__u8 b, volatile void __iomem *addr)
++{
++ *(__force volatile __u8 *)addr = b;
++}
++static inline void __writew(__u16 b, volatile void __iomem *addr)
++{
++ *(__force volatile __u16 *)addr = b;
++}
++#define writeq(val,addr) __writeq((val),(addr))
++#define writel(val,addr) __writel((val),(addr))
++#define writew(val,addr) __writew((val),(addr))
++#define writeb(val,addr) __writeb((val),(addr))
++#define __raw_writeb writeb
++#define __raw_writew writew
++#define __raw_writel writel
++#define __raw_writeq writeq
++
++void __memcpy_fromio(void*,unsigned long,unsigned);
++void __memcpy_toio(unsigned long,const void*,unsigned);
++
++static inline void memcpy_fromio(void *to, const volatile void __iomem *from, unsigned len)
++{
++ __memcpy_fromio(to,(unsigned long)from,len);
++}
++static inline void memcpy_toio(volatile void __iomem *to, const void *from, unsigned len)
++{
++ __memcpy_toio((unsigned long)to,from,len);
++}
++
++void memset_io(volatile void __iomem *a, int b, size_t c);
++
++/*
++ * ISA space is 'always mapped' on a typical x86 system, no need to
++ * explicitly ioremap() it. The fact that the ISA IO space is mapped
++ * to PAGE_OFFSET is pure coincidence - it does not mean ISA values
++ * are physical addresses. The following constant pointer can be
++ * used as the IO-area pointer (it can be iounmapped as well, so the
++ * analogy with PCI is quite large):
++ */
++#define __ISA_IO_base ((char __iomem *)(fix_to_virt(FIX_ISAMAP_BEGIN)))
++
++/*
++ * Again, x86-64 does not require mem IO specific function.
++ */
++
++#define eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),(void *)(b),(c),(d))
++
++/**
++ * check_signature - find BIOS signatures
++ * @io_addr: mmio address to check
++ * @signature: signature block
++ * @length: length of signature
++ *
++ * Perform a signature comparison with the mmio address io_addr. This
++ * address should have been obtained by ioremap.
++ * Returns 1 on a match.
++ */
++
++static inline int check_signature(void __iomem *io_addr,
++ const unsigned char *signature, int length)
++{
++ int retval = 0;
++ do {
++ if (readb(io_addr) != *signature)
++ goto out;
++ io_addr++;
++ signature++;
++ length--;
++ } while (length);
++ retval = 1;
++out:
++ return retval;
++}
++
++/* Nothing to do */
++
++#define dma_cache_inv(_start,_size) do { } while (0)
++#define dma_cache_wback(_start,_size) do { } while (0)
++#define dma_cache_wback_inv(_start,_size) do { } while (0)
++
++#define flush_write_buffers()
++
++extern int iommu_bio_merge;
++#define BIO_VMERGE_BOUNDARY iommu_bio_merge
++
++/*
++ * Convert a physical pointer to a virtual kernel pointer for /dev/mem
++ * access
++ */
++#define xlate_dev_mem_ptr(p, sz) ioremap(p, sz)
++#define xlate_dev_mem_ptr_unmap(p) iounmap(p)
++
++/*
++ * Convert a virtual cached pointer to an uncached pointer
++ */
++#define xlate_dev_kmem_ptr(p) p
++
++#endif /* __KERNEL__ */
++
++#define ARCH_HAS_DEV_MEM
++
++#endif
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/asm-x86_64/mach-xen/asm/irq.h 2007-08-27 14:01:51.000000000 -0400
+@@ -0,0 +1,38 @@
++#ifndef _ASM_IRQ_H
++#define _ASM_IRQ_H
++
++/*
++ * linux/include/asm/irq.h
++ *
++ * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar
++ *
++ * IRQ/IPI changes taken from work by Thomas Radke
++ * <tomsoft@informatik.tu-chemnitz.de>
++ */
++
++#include <linux/sched.h>
++/* include comes from machine specific directory */
++#include "irq_vectors.h"
++#include <asm/thread_info.h>
++
++static __inline__ int irq_canonicalize(int irq)
++{
++ return ((irq == 2) ? 9 : irq);
++}
++
++#ifdef CONFIG_X86_LOCAL_APIC
++#define ARCH_HAS_NMI_WATCHDOG /* See include/linux/nmi.h */
++#endif
++
++#define KDB_VECTOR 0xf9
++
++# define irq_ctx_init(cpu) do { } while (0)
++
++#ifdef CONFIG_HOTPLUG_CPU
++#include <linux/cpumask.h>
++extern void fixup_irqs(cpumask_t map);
++#endif
++
++#define __ARCH_HAS_DO_SOFTIRQ 1
++
++#endif /* _ASM_IRQ_H */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/asm-x86_64/mach-xen/asm/irqflags.h 2007-08-27 14:01:58.000000000 -0400
+@@ -0,0 +1,139 @@
++/*
++ * include/asm-x86_64/irqflags.h
++ *
++ * IRQ flags handling
++ *
++ * This file gets included from lowlevel asm headers too, to provide
++ * wrapped versions of the local_irq_*() APIs, based on the
++ * raw_local_irq_*() functions from the lowlevel headers.
++ */
++#ifndef _ASM_IRQFLAGS_H
++#define _ASM_IRQFLAGS_H
++
++#ifndef __ASSEMBLY__
++/*
++ * Interrupt control:
++ */
++
++/*
++ * The use of 'barrier' in the following reflects their use as local-lock
++ * operations. Reentrancy must be prevented (e.g., __cli()) /before/ following
++ * critical operations are executed. All critical operations must complete
++ * /before/ reentrancy is permitted (e.g., __sti()). Alpha architecture also
++ * includes these barriers, for example.
++ */
++
++#define __raw_local_save_flags() (current_vcpu_info()->evtchn_upcall_mask)
++
++#define raw_local_save_flags(flags) \
++ do { (flags) = __raw_local_save_flags(); } while (0)
++
++#define raw_local_irq_restore(x) \
++do { \
++ vcpu_info_t *_vcpu; \
++ barrier(); \
++ _vcpu = current_vcpu_info(); \
++ if ((_vcpu->evtchn_upcall_mask = (x)) == 0) { \
++ barrier(); /* unmask then check (avoid races) */ \
++ if ( unlikely(_vcpu->evtchn_upcall_pending) ) \
++ force_evtchn_callback(); \
++ } \
++} while (0)
++
++#ifdef CONFIG_X86_VSMP
++
++/*
++ * Interrupt control for the VSMP architecture:
++ */
++
++static inline void raw_local_irq_disable(void)
++{
++ unsigned long flags = __raw_local_save_flags();
++
++ raw_local_irq_restore((flags & ~(1 << 9)) | (1 << 18));
++}
++
++static inline void raw_local_irq_enable(void)
++{
++ unsigned long flags = __raw_local_save_flags();
++
++ raw_local_irq_restore((flags | (1 << 9)) & ~(1 << 18));
++}
++
++static inline int raw_irqs_disabled_flags(unsigned long flags)
++{
++ return !(flags & (1<<9)) || (flags & (1 << 18));
++}
++
++#else /* CONFIG_X86_VSMP */
++
++#define raw_local_irq_disable() \
++do { \
++ current_vcpu_info()->evtchn_upcall_mask = 1; \
++ barrier(); \
++} while (0)
++
++#define raw_local_irq_enable() \
++do { \
++ vcpu_info_t *_vcpu; \
++ barrier(); \
++ _vcpu = current_vcpu_info(); \
++ _vcpu->evtchn_upcall_mask = 0; \
++ barrier(); /* unmask then check (avoid races) */ \
++ if ( unlikely(_vcpu->evtchn_upcall_pending) ) \
++ force_evtchn_callback(); \
++} while (0)
++
++static inline int raw_irqs_disabled_flags(unsigned long flags)
++{
++ return (flags != 0);
++}
++
++#endif
++
++/*
++ * For spinlocks, etc.:
++ */
++
++#define __raw_local_irq_save() \
++({ \
++ unsigned long flags = __raw_local_save_flags(); \
++ \
++ raw_local_irq_disable(); \
++ \
++ flags; \
++})
++
++#define raw_local_irq_save(flags) \
++ do { (flags) = __raw_local_irq_save(); } while (0)
++
++#define raw_irqs_disabled() \
++({ \
++ unsigned long flags = __raw_local_save_flags(); \
++ \
++ raw_irqs_disabled_flags(flags); \
++})
++
++/*
++ * Used in the idle loop; sti takes one instruction cycle
++ * to complete:
++ */
++void raw_safe_halt(void);
++
++/*
++ * Used when interrupts are already enabled or to
++ * shutdown the processor:
++ */
++void halt(void);
++
++#else /* __ASSEMBLY__: */
++# ifdef CONFIG_TRACE_IRQFLAGS
++# define TRACE_IRQS_ON call trace_hardirqs_on_thunk
++# define TRACE_IRQS_OFF call trace_hardirqs_off_thunk
++# else
++# define TRACE_IRQS_ON
++# define TRACE_IRQS_OFF
++# endif
++#endif
++
++#endif
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/asm-x86_64/mach-xen/asm/maddr.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,161 @@
++#ifndef _X86_64_MADDR_H
++#define _X86_64_MADDR_H
++
++#include <xen/features.h>
++#include <xen/interface/xen.h>
++
++/**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/
++#define INVALID_P2M_ENTRY (~0UL)
++#define FOREIGN_FRAME_BIT (1UL<<63)
++#define FOREIGN_FRAME(m) ((m) | FOREIGN_FRAME_BIT)
++
++/* Definitions for machine and pseudophysical addresses. */
++typedef unsigned long paddr_t;
++typedef unsigned long maddr_t;
++
++#ifdef CONFIG_XEN
++
++extern unsigned long *phys_to_machine_mapping;
++
++#undef machine_to_phys_mapping
++extern unsigned long *machine_to_phys_mapping;
++extern unsigned int machine_to_phys_order;
++
++static inline unsigned long pfn_to_mfn(unsigned long pfn)
++{
++ if (xen_feature(XENFEAT_auto_translated_physmap))
++ return pfn;
++ BUG_ON(end_pfn && pfn >= end_pfn);
++ return phys_to_machine_mapping[pfn] & ~FOREIGN_FRAME_BIT;
++}
++
++static inline int phys_to_machine_mapping_valid(unsigned long pfn)
++{
++ if (xen_feature(XENFEAT_auto_translated_physmap))
++ return 1;
++ BUG_ON(end_pfn && pfn >= end_pfn);
++ return (phys_to_machine_mapping[pfn] != INVALID_P2M_ENTRY);
++}
++
++static inline unsigned long mfn_to_pfn(unsigned long mfn)
++{
++ unsigned long pfn;
++
++ if (xen_feature(XENFEAT_auto_translated_physmap))
++ return mfn;
++
++ if (unlikely((mfn >> machine_to_phys_order) != 0))
++ return end_pfn;
++
++ /* The array access can fail (e.g., device space beyond end of RAM). */
++ asm (
++ "1: movq %1,%0\n"
++ "2:\n"
++ ".section .fixup,\"ax\"\n"
++ "3: movq %2,%0\n"
++ " jmp 2b\n"
++ ".previous\n"
++ ".section __ex_table,\"a\"\n"
++ " .align 8\n"
++ " .quad 1b,3b\n"
++ ".previous"
++ : "=r" (pfn)
++ : "m" (machine_to_phys_mapping[mfn]), "m" (end_pfn) );
++
++ return pfn;
++}
++
++/*
++ * We detect special mappings in one of two ways:
++ * 1. If the MFN is an I/O page then Xen will set the m2p entry
++ * to be outside our maximum possible pseudophys range.
++ * 2. If the MFN belongs to a different domain then we will certainly
++ * not have MFN in our p2m table. Conversely, if the page is ours,
++ * then we'll have p2m(m2p(MFN))==MFN.
++ * If we detect a special mapping then it doesn't have a 'struct page'.
++ * We force !pfn_valid() by returning an out-of-range pointer.
++ *
++ * NB. These checks require that, for any MFN that is not in our reservation,
++ * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if
++ * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN.
++ * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety.
++ *
++ * NB2. When deliberately mapping foreign pages into the p2m table, you *must*
++ * use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we
++ * require. In all the cases we care about, the FOREIGN_FRAME bit is
++ * masked (e.g., pfn_to_mfn()) so behaviour there is correct.
++ */
++static inline unsigned long mfn_to_local_pfn(unsigned long mfn)
++{
++ unsigned long pfn = mfn_to_pfn(mfn);
++ if ((pfn < end_pfn)
++ && !xen_feature(XENFEAT_auto_translated_physmap)
++ && (phys_to_machine_mapping[pfn] != mfn))
++ return end_pfn; /* force !pfn_valid() */
++ return pfn;
++}
++
++static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
++{
++ BUG_ON(end_pfn && pfn >= end_pfn);
++ if (xen_feature(XENFEAT_auto_translated_physmap)) {
++ BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
++ return;
++ }
++ phys_to_machine_mapping[pfn] = mfn;
++}
++
++static inline maddr_t phys_to_machine(paddr_t phys)
++{
++ maddr_t machine = pfn_to_mfn(phys >> PAGE_SHIFT);
++ machine = (machine << PAGE_SHIFT) | (phys & ~PAGE_MASK);
++ return machine;
++}
++
++static inline paddr_t machine_to_phys(maddr_t machine)
++{
++ paddr_t phys = mfn_to_pfn(machine >> PAGE_SHIFT);
++ phys = (phys << PAGE_SHIFT) | (machine & ~PAGE_MASK);
++ return phys;
++}
++
++static inline paddr_t pte_phys_to_machine(paddr_t phys)
++{
++ maddr_t machine;
++ machine = pfn_to_mfn((phys & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT);
++ machine = (machine << PAGE_SHIFT) | (phys & ~PHYSICAL_PAGE_MASK);
++ return machine;
++}
++
++static inline paddr_t pte_machine_to_phys(maddr_t machine)
++{
++ paddr_t phys;
++ phys = mfn_to_pfn((machine & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT);
++ phys = (phys << PAGE_SHIFT) | (machine & ~PHYSICAL_PAGE_MASK);
++ return phys;
++}
++
++#define __pte_ma(x) ((pte_t) { (x) } )
++#define pfn_pte_ma(pfn, prot) __pte_ma((((pfn) << PAGE_SHIFT) | pgprot_val(prot)) & __supported_pte_mask)
++
++#else /* !CONFIG_XEN */
++
++#define pfn_to_mfn(pfn) (pfn)
++#define mfn_to_pfn(mfn) (mfn)
++#define mfn_to_local_pfn(mfn) (mfn)
++#define set_phys_to_machine(pfn, mfn) ((void)0)
++#define phys_to_machine_mapping_valid(pfn) (1)
++#define phys_to_machine(phys) ((maddr_t)(phys))
++#define machine_to_phys(mach) ((paddr_t)(mach))
++#define pfn_pte_ma(pfn, prot) pfn_pte(pfn, prot)
++#define __pte_ma(x) __pte(x)
++
++#endif /* !CONFIG_XEN */
++
++/* VIRT <-> MACHINE conversion */
++#define virt_to_machine(v) (phys_to_machine(__pa(v)))
++#define virt_to_mfn(v) (pfn_to_mfn(__pa(v) >> PAGE_SHIFT))
++#define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT))
++
++#endif /* _X86_64_MADDR_H */
++
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/asm-x86_64/mach-xen/asm/mmu.h 2007-08-27 14:01:58.000000000 -0400
+@@ -0,0 +1,38 @@
++#ifndef __x86_64_MMU_H
++#define __x86_64_MMU_H
++
++#include <linux/spinlock.h>
++#include <asm/semaphore.h>
++
++/*
++ * The x86_64 doesn't have a mmu context, but
++ * we put the segment information here.
++ *
++ * cpu_vm_mask is used to optimize ldt flushing.
++ */
++typedef struct {
++ void *ldt;
++ rwlock_t ldtlock;
++ int size;
++ struct semaphore sem;
++#ifdef CONFIG_XEN
++ unsigned pinned:1;
++ unsigned has_foreign_mappings:1;
++ struct list_head unpinned;
++#endif
++} mm_context_t;
++
++#ifdef CONFIG_XEN
++extern struct list_head mm_unpinned;
++extern spinlock_t mm_unpinned_lock;
++
++/* mm/memory.c:exit_mmap hook */
++extern void _arch_exit_mmap(struct mm_struct *mm);
++#define arch_exit_mmap(_mm) _arch_exit_mmap(_mm)
++
++/* kernel/fork.c:dup_mmap hook */
++extern void _arch_dup_mmap(struct mm_struct *mm);
++#define arch_dup_mmap(mm, oldmm) ((void)(oldmm), _arch_dup_mmap(mm))
++#endif
++
++#endif
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/asm-x86_64/mach-xen/asm/mmu_context.h 2007-08-27 14:01:58.000000000 -0400
+@@ -0,0 +1,136 @@
++#ifndef __X86_64_MMU_CONTEXT_H
++#define __X86_64_MMU_CONTEXT_H
++
++#include <asm/desc.h>
++#include <asm/atomic.h>
++#include <asm/pgalloc.h>
++#include <asm/page.h>
++#include <asm/pda.h>
++#include <asm/pgtable.h>
++#include <asm/tlbflush.h>
++
++/*
++ * possibly do the LDT unload here?
++ */
++int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
++void destroy_context(struct mm_struct *mm);
++
++static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
++{
++#if defined(CONFIG_SMP) && !defined(CONFIG_XEN)
++ if (read_pda(mmu_state) == TLBSTATE_OK)
++ write_pda(mmu_state, TLBSTATE_LAZY);
++#endif
++}
++
++#define prepare_arch_switch(next) __prepare_arch_switch()
++
++static inline void __prepare_arch_switch(void)
++{
++ /*
++ * Save away %es, %ds, %fs and %gs. Must happen before reload
++ * of cr3/ldt (i.e., not in __switch_to).
++ */
++ __asm__ __volatile__ (
++ "mov %%es,%0 ; mov %%ds,%1 ; mov %%fs,%2 ; mov %%gs,%3"
++ : "=m" (current->thread.es),
++ "=m" (current->thread.ds),
++ "=m" (current->thread.fsindex),
++ "=m" (current->thread.gsindex) );
++
++ if (current->thread.ds)
++ __asm__ __volatile__ ( "movl %0,%%ds" : : "r" (0) );
++
++ if (current->thread.es)
++ __asm__ __volatile__ ( "movl %0,%%es" : : "r" (0) );
++
++ if (current->thread.fsindex) {
++ __asm__ __volatile__ ( "movl %0,%%fs" : : "r" (0) );
++ current->thread.fs = 0;
++ }
++
++ if (current->thread.gsindex) {
++ load_gs_index(0);
++ current->thread.gs = 0;
++ }
++}
++
++extern void mm_pin(struct mm_struct *mm);
++extern void mm_unpin(struct mm_struct *mm);
++void mm_pin_all(void);
++
++static inline void load_cr3(pgd_t *pgd)
++{
++ asm volatile("movq %0,%%cr3" :: "r" (phys_to_machine(__pa(pgd))) :
++ "memory");
++}
++
++static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
++ struct task_struct *tsk)
++{
++ unsigned cpu = smp_processor_id();
++ struct mmuext_op _op[3], *op = _op;
++
++ if (likely(prev != next)) {
++ BUG_ON(!xen_feature(XENFEAT_writable_page_tables) &&
++ !next->context.pinned);
++
++ /* stop flush ipis for the previous mm */
++ cpu_clear(cpu, prev->cpu_vm_mask);
++#if defined(CONFIG_SMP) && !defined(CONFIG_XEN)
++ write_pda(mmu_state, TLBSTATE_OK);
++ write_pda(active_mm, next);
++#endif
++ cpu_set(cpu, next->cpu_vm_mask);
++
++ /* load_cr3(next->pgd) */
++ op->cmd = MMUEXT_NEW_BASEPTR;
++ op->arg1.mfn = pfn_to_mfn(__pa(next->pgd) >> PAGE_SHIFT);
++ op++;
++
++ /* xen_new_user_pt(__pa(__user_pgd(next->pgd))) */
++ op->cmd = MMUEXT_NEW_USER_BASEPTR;
++ op->arg1.mfn = pfn_to_mfn(__pa(__user_pgd(next->pgd)) >> PAGE_SHIFT);
++ op++;
++
++ if (unlikely(next->context.ldt != prev->context.ldt)) {
++ /* load_LDT_nolock(&next->context, cpu) */
++ op->cmd = MMUEXT_SET_LDT;
++ op->arg1.linear_addr = (unsigned long)next->context.ldt;
++ op->arg2.nr_ents = next->context.size;
++ op++;
++ }
++
++ BUG_ON(HYPERVISOR_mmuext_op(_op, op-_op, NULL, DOMID_SELF));
++ }
++#if defined(CONFIG_SMP) && !defined(CONFIG_XEN)
++ else {
++ write_pda(mmu_state, TLBSTATE_OK);
++ if (read_pda(active_mm) != next)
++ out_of_line_bug();
++ if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
++ /* We were in lazy tlb mode and leave_mm disabled
++ * tlb flush IPI delivery. We must reload CR3
++ * to make sure to use no freed page tables.
++ */
++ load_cr3(next->pgd);
++ xen_new_user_pt(__pa(__user_pgd(next->pgd)));
++ load_LDT_nolock(&next->context, cpu);
++ }
++ }
++#endif
++}
++
++#define deactivate_mm(tsk,mm) do { \
++ load_gs_index(0); \
++ asm volatile("movl %0,%%fs"::"r"(0)); \
++} while(0)
++
++static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
++{
++ if (!next->context.pinned)
++ mm_pin(next);
++ switch_mm(prev, next, NULL);
++}
++
++#endif
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/asm-x86_64/mach-xen/asm/msr.h 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,399 @@
++#ifndef X86_64_MSR_H
++#define X86_64_MSR_H 1
++
++#ifndef __ASSEMBLY__
++/*
++ * Access to machine-specific registers (available on 586 and better only)
++ * Note: the rd* operations modify the parameters directly (without using
++ * pointer indirection), this allows gcc to optimize better
++ */
++
++#define rdmsr(msr,val1,val2) \
++ __asm__ __volatile__("rdmsr" \
++ : "=a" (val1), "=d" (val2) \
++ : "c" (msr))
++
++
++#define rdmsrl(msr,val) do { unsigned long a__,b__; \
++ __asm__ __volatile__("rdmsr" \
++ : "=a" (a__), "=d" (b__) \
++ : "c" (msr)); \
++ val = a__ | (b__<<32); \
++} while(0)
++
++#define wrmsr(msr,val1,val2) \
++ __asm__ __volatile__("wrmsr" \
++ : /* no outputs */ \
++ : "c" (msr), "a" (val1), "d" (val2))
++
++#define wrmsrl(msr,val) wrmsr(msr,(__u32)((__u64)(val)),((__u64)(val))>>32)
++
++/* wrmsr with exception handling */
++#define wrmsr_safe(msr,a,b) ({ int ret__; \
++ asm volatile("2: wrmsr ; xorl %0,%0\n" \
++ "1:\n\t" \
++ ".section .fixup,\"ax\"\n\t" \
++ "3: movl %4,%0 ; jmp 1b\n\t" \
++ ".previous\n\t" \
++ ".section __ex_table,\"a\"\n" \
++ " .align 8\n\t" \
++ " .quad 2b,3b\n\t" \
++ ".previous" \
++ : "=a" (ret__) \
++ : "c" (msr), "0" (a), "d" (b), "i" (-EFAULT)); \
++ ret__; })
++
++#define checking_wrmsrl(msr,val) wrmsr_safe(msr,(u32)(val),(u32)((val)>>32))
++
++#define rdmsr_safe(msr,a,b) \
++ ({ int ret__; \
++ asm volatile ("1: rdmsr\n" \
++ "2:\n" \
++ ".section .fixup,\"ax\"\n" \
++ "3: movl %4,%0\n" \
++ " jmp 2b\n" \
++ ".previous\n" \
++ ".section __ex_table,\"a\"\n" \
++ " .align 8\n" \
++ " .quad 1b,3b\n" \
++ ".previous":"=&bDS" (ret__), "=a"(*(a)), "=d"(*(b))\
++ :"c"(msr), "i"(-EIO), "0"(0)); \
++ ret__; })
++
++#define rdtsc(low,high) \
++ __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high))
++
++#define rdtscl(low) \
++ __asm__ __volatile__ ("rdtsc" : "=a" (low) : : "edx")
++
++#define rdtscll(val) do { \
++ unsigned int __a,__d; \
++ asm volatile("rdtsc" : "=a" (__a), "=d" (__d)); \
++ (val) = ((unsigned long)__a) | (((unsigned long)__d)<<32); \
++} while(0)
++
++#define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
++
++#define rdpmc(counter,low,high) \
++ __asm__ __volatile__("rdpmc" \
++ : "=a" (low), "=d" (high) \
++ : "c" (counter))
++
++static inline void cpuid(int op, unsigned int *eax, unsigned int *ebx,
++ unsigned int *ecx, unsigned int *edx)
++{
++ __asm__(XEN_CPUID
++ : "=a" (*eax),
++ "=b" (*ebx),
++ "=c" (*ecx),
++ "=d" (*edx)
++ : "0" (op));
++}
++
++/* Some CPUID calls want 'count' to be placed in ecx */
++static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx,
++ int *edx)
++{
++ __asm__(XEN_CPUID
++ : "=a" (*eax),
++ "=b" (*ebx),
++ "=c" (*ecx),
++ "=d" (*edx)
++ : "0" (op), "c" (count));
++}
++
++/*
++ * CPUID functions returning a single datum
++ */
++static inline unsigned int cpuid_eax(unsigned int op)
++{
++ unsigned int eax;
++
++ __asm__(XEN_CPUID
++ : "=a" (eax)
++ : "0" (op)
++ : "bx", "cx", "dx");
++ return eax;
++}
++static inline unsigned int cpuid_ebx(unsigned int op)
++{
++ unsigned int eax, ebx;
++
++ __asm__(XEN_CPUID
++ : "=a" (eax), "=b" (ebx)
++ : "0" (op)
++ : "cx", "dx" );
++ return ebx;
++}
++static inline unsigned int cpuid_ecx(unsigned int op)
++{
++ unsigned int eax, ecx;
++
++ __asm__(XEN_CPUID
++ : "=a" (eax), "=c" (ecx)
++ : "0" (op)
++ : "bx", "dx" );
++ return ecx;
++}
++static inline unsigned int cpuid_edx(unsigned int op)
++{
++ unsigned int eax, edx;
++
++ __asm__(XEN_CPUID
++ : "=a" (eax), "=d" (edx)
++ : "0" (op)
++ : "bx", "cx");
++ return edx;
++}
++
++#define MSR_IA32_UCODE_WRITE 0x79
++#define MSR_IA32_UCODE_REV 0x8b
++
++
++#endif
++
++/* AMD/K8 specific MSRs */
++#define MSR_EFER 0xc0000080 /* extended feature register */
++#define MSR_STAR 0xc0000081 /* legacy mode SYSCALL target */
++#define MSR_LSTAR 0xc0000082 /* long mode SYSCALL target */
++#define MSR_CSTAR 0xc0000083 /* compatibility mode SYSCALL target */
++#define MSR_SYSCALL_MASK 0xc0000084 /* EFLAGS mask for syscall */
++#define MSR_FS_BASE 0xc0000100 /* 64bit GS base */
++#define MSR_GS_BASE 0xc0000101 /* 64bit FS base */
++#define MSR_KERNEL_GS_BASE 0xc0000102 /* SwapGS GS shadow (or USER_GS from kernel) */
++/* EFER bits: */
++#define _EFER_SCE 0 /* SYSCALL/SYSRET */
++#define _EFER_LME 8 /* Long mode enable */
++#define _EFER_LMA 10 /* Long mode active (read-only) */
++#define _EFER_NX 11 /* No execute enable */
++
++#define EFER_SCE (1<<_EFER_SCE)
++#define EFER_LME (1<<_EFER_LME)
++#define EFER_LMA (1<<_EFER_LMA)
++#define EFER_NX (1<<_EFER_NX)
++
++/* Intel MSRs. Some also available on other CPUs */
++#define MSR_IA32_TSC 0x10
++#define MSR_IA32_PLATFORM_ID 0x17
++
++#define MSR_IA32_PERFCTR0 0xc1
++#define MSR_IA32_PERFCTR1 0xc2
++
++#define MSR_MTRRcap 0x0fe
++#define MSR_IA32_BBL_CR_CTL 0x119
++
++#define MSR_IA32_SYSENTER_CS 0x174
++#define MSR_IA32_SYSENTER_ESP 0x175
++#define MSR_IA32_SYSENTER_EIP 0x176
++
++#define MSR_IA32_MCG_CAP 0x179
++#define MSR_IA32_MCG_STATUS 0x17a
++#define MSR_IA32_MCG_CTL 0x17b
++
++#define MSR_IA32_EVNTSEL0 0x186
++#define MSR_IA32_EVNTSEL1 0x187
++
++#define MSR_IA32_DEBUGCTLMSR 0x1d9
++#define MSR_IA32_LASTBRANCHFROMIP 0x1db
++#define MSR_IA32_LASTBRANCHTOIP 0x1dc
++#define MSR_IA32_LASTINTFROMIP 0x1dd
++#define MSR_IA32_LASTINTTOIP 0x1de
++
++#define MSR_MTRRfix64K_00000 0x250
++#define MSR_MTRRfix16K_80000 0x258
++#define MSR_MTRRfix16K_A0000 0x259
++#define MSR_MTRRfix4K_C0000 0x268
++#define MSR_MTRRfix4K_C8000 0x269
++#define MSR_MTRRfix4K_D0000 0x26a
++#define MSR_MTRRfix4K_D8000 0x26b
++#define MSR_MTRRfix4K_E0000 0x26c
++#define MSR_MTRRfix4K_E8000 0x26d
++#define MSR_MTRRfix4K_F0000 0x26e
++#define MSR_MTRRfix4K_F8000 0x26f
++#define MSR_MTRRdefType 0x2ff
++
++#define MSR_IA32_MC0_CTL 0x400
++#define MSR_IA32_MC0_STATUS 0x401
++#define MSR_IA32_MC0_ADDR 0x402
++#define MSR_IA32_MC0_MISC 0x403
++
++#define MSR_P6_PERFCTR0 0xc1
++#define MSR_P6_PERFCTR1 0xc2
++#define MSR_P6_EVNTSEL0 0x186
++#define MSR_P6_EVNTSEL1 0x187
++
++/* K7/K8 MSRs. Not complete. See the architecture manual for a more complete list. */
++#define MSR_K7_EVNTSEL0 0xC0010000
++#define MSR_K7_PERFCTR0 0xC0010004
++#define MSR_K7_EVNTSEL1 0xC0010001
++#define MSR_K7_PERFCTR1 0xC0010005
++#define MSR_K7_EVNTSEL2 0xC0010002
++#define MSR_K7_PERFCTR2 0xC0010006
++#define MSR_K7_EVNTSEL3 0xC0010003
++#define MSR_K7_PERFCTR3 0xC0010007
++#define MSR_K8_TOP_MEM1 0xC001001A
++#define MSR_K8_TOP_MEM2 0xC001001D
++#define MSR_K8_SYSCFG 0xC0010010
++#define MSR_K8_HWCR 0xC0010015
++
++/* K6 MSRs */
++#define MSR_K6_EFER 0xC0000080
++#define MSR_K6_STAR 0xC0000081
++#define MSR_K6_WHCR 0xC0000082
++#define MSR_K6_UWCCR 0xC0000085
++#define MSR_K6_PSOR 0xC0000087
++#define MSR_K6_PFIR 0xC0000088
++
++/* Centaur-Hauls/IDT defined MSRs. */
++#define MSR_IDT_FCR1 0x107
++#define MSR_IDT_FCR2 0x108
++#define MSR_IDT_FCR3 0x109
++#define MSR_IDT_FCR4 0x10a
++
++#define MSR_IDT_MCR0 0x110
++#define MSR_IDT_MCR1 0x111
++#define MSR_IDT_MCR2 0x112
++#define MSR_IDT_MCR3 0x113
++#define MSR_IDT_MCR4 0x114
++#define MSR_IDT_MCR5 0x115
++#define MSR_IDT_MCR6 0x116
++#define MSR_IDT_MCR7 0x117
++#define MSR_IDT_MCR_CTRL 0x120
++
++/* VIA Cyrix defined MSRs*/
++#define MSR_VIA_FCR 0x1107
++#define MSR_VIA_LONGHAUL 0x110a
++#define MSR_VIA_RNG 0x110b
++#define MSR_VIA_BCR2 0x1147
++
++/* Intel defined MSRs. */
++#define MSR_IA32_P5_MC_ADDR 0
++#define MSR_IA32_P5_MC_TYPE 1
++#define MSR_IA32_PLATFORM_ID 0x17
++#define MSR_IA32_EBL_CR_POWERON 0x2a
++
++#define MSR_IA32_APICBASE 0x1b
++#define MSR_IA32_APICBASE_BSP (1<<8)
++#define MSR_IA32_APICBASE_ENABLE (1<<11)
++#define MSR_IA32_APICBASE_BASE (0xfffff<<12)
++
++/* P4/Xeon+ specific */
++#define MSR_IA32_MCG_EAX 0x180
++#define MSR_IA32_MCG_EBX 0x181
++#define MSR_IA32_MCG_ECX 0x182
++#define MSR_IA32_MCG_EDX 0x183
++#define MSR_IA32_MCG_ESI 0x184
++#define MSR_IA32_MCG_EDI 0x185
++#define MSR_IA32_MCG_EBP 0x186
++#define MSR_IA32_MCG_ESP 0x187
++#define MSR_IA32_MCG_EFLAGS 0x188
++#define MSR_IA32_MCG_EIP 0x189
++#define MSR_IA32_MCG_RESERVED 0x18A
++
++#define MSR_P6_EVNTSEL0 0x186
++#define MSR_P6_EVNTSEL1 0x187
++
++#define MSR_IA32_PERF_STATUS 0x198
++#define MSR_IA32_PERF_CTL 0x199
++
++#define MSR_IA32_THERM_CONTROL 0x19a
++#define MSR_IA32_THERM_INTERRUPT 0x19b
++#define MSR_IA32_THERM_STATUS 0x19c
++#define MSR_IA32_MISC_ENABLE 0x1a0
++
++#define MSR_IA32_DEBUGCTLMSR 0x1d9
++#define MSR_IA32_LASTBRANCHFROMIP 0x1db
++#define MSR_IA32_LASTBRANCHTOIP 0x1dc
++#define MSR_IA32_LASTINTFROMIP 0x1dd
++#define MSR_IA32_LASTINTTOIP 0x1de
++
++#define MSR_IA32_MC0_CTL 0x400
++#define MSR_IA32_MC0_STATUS 0x401
++#define MSR_IA32_MC0_ADDR 0x402
++#define MSR_IA32_MC0_MISC 0x403
++
++/* Pentium IV performance counter MSRs */
++#define MSR_P4_BPU_PERFCTR0 0x300
++#define MSR_P4_BPU_PERFCTR1 0x301
++#define MSR_P4_BPU_PERFCTR2 0x302
++#define MSR_P4_BPU_PERFCTR3 0x303
++#define MSR_P4_MS_PERFCTR0 0x304
++#define MSR_P4_MS_PERFCTR1 0x305
++#define MSR_P4_MS_PERFCTR2 0x306
++#define MSR_P4_MS_PERFCTR3 0x307
++#define MSR_P4_FLAME_PERFCTR0 0x308
++#define MSR_P4_FLAME_PERFCTR1 0x309
++#define MSR_P4_FLAME_PERFCTR2 0x30a
++#define MSR_P4_FLAME_PERFCTR3 0x30b
++#define MSR_P4_IQ_PERFCTR0 0x30c
++#define MSR_P4_IQ_PERFCTR1 0x30d
++#define MSR_P4_IQ_PERFCTR2 0x30e
++#define MSR_P4_IQ_PERFCTR3 0x30f
++#define MSR_P4_IQ_PERFCTR4 0x310
++#define MSR_P4_IQ_PERFCTR5 0x311
++#define MSR_P4_BPU_CCCR0 0x360
++#define MSR_P4_BPU_CCCR1 0x361
++#define MSR_P4_BPU_CCCR2 0x362
++#define MSR_P4_BPU_CCCR3 0x363
++#define MSR_P4_MS_CCCR0 0x364
++#define MSR_P4_MS_CCCR1 0x365
++#define MSR_P4_MS_CCCR2 0x366
++#define MSR_P4_MS_CCCR3 0x367
++#define MSR_P4_FLAME_CCCR0 0x368
++#define MSR_P4_FLAME_CCCR1 0x369
++#define MSR_P4_FLAME_CCCR2 0x36a
++#define MSR_P4_FLAME_CCCR3 0x36b
++#define MSR_P4_IQ_CCCR0 0x36c
++#define MSR_P4_IQ_CCCR1 0x36d
++#define MSR_P4_IQ_CCCR2 0x36e
++#define MSR_P4_IQ_CCCR3 0x36f
++#define MSR_P4_IQ_CCCR4 0x370
++#define MSR_P4_IQ_CCCR5 0x371
++#define MSR_P4_ALF_ESCR0 0x3ca
++#define MSR_P4_ALF_ESCR1 0x3cb
++#define MSR_P4_BPU_ESCR0 0x3b2
++#define MSR_P4_BPU_ESCR1 0x3b3
++#define MSR_P4_BSU_ESCR0 0x3a0
++#define MSR_P4_BSU_ESCR1 0x3a1
++#define MSR_P4_CRU_ESCR0 0x3b8
++#define MSR_P4_CRU_ESCR1 0x3b9
++#define MSR_P4_CRU_ESCR2 0x3cc
++#define MSR_P4_CRU_ESCR3 0x3cd
++#define MSR_P4_CRU_ESCR4 0x3e0
++#define MSR_P4_CRU_ESCR5 0x3e1
++#define MSR_P4_DAC_ESCR0 0x3a8
++#define MSR_P4_DAC_ESCR1 0x3a9
++#define MSR_P4_FIRM_ESCR0 0x3a4
++#define MSR_P4_FIRM_ESCR1 0x3a5
++#define MSR_P4_FLAME_ESCR0 0x3a6
++#define MSR_P4_FLAME_ESCR1 0x3a7
++#define MSR_P4_FSB_ESCR0 0x3a2
++#define MSR_P4_FSB_ESCR1 0x3a3
++#define MSR_P4_IQ_ESCR0 0x3ba
++#define MSR_P4_IQ_ESCR1 0x3bb
++#define MSR_P4_IS_ESCR0 0x3b4
++#define MSR_P4_IS_ESCR1 0x3b5
++#define MSR_P4_ITLB_ESCR0 0x3b6
++#define MSR_P4_ITLB_ESCR1 0x3b7
++#define MSR_P4_IX_ESCR0 0x3c8
++#define MSR_P4_IX_ESCR1 0x3c9
++#define MSR_P4_MOB_ESCR0 0x3aa
++#define MSR_P4_MOB_ESCR1 0x3ab
++#define MSR_P4_MS_ESCR0 0x3c0
++#define MSR_P4_MS_ESCR1 0x3c1
++#define MSR_P4_PMH_ESCR0 0x3ac
++#define MSR_P4_PMH_ESCR1 0x3ad
++#define MSR_P4_RAT_ESCR0 0x3bc
++#define MSR_P4_RAT_ESCR1 0x3bd
++#define MSR_P4_SAAT_ESCR0 0x3ae
++#define MSR_P4_SAAT_ESCR1 0x3af
++#define MSR_P4_SSU_ESCR0 0x3be
++#define MSR_P4_SSU_ESCR1 0x3bf /* guess: not defined in manual */
++#define MSR_P4_TBPU_ESCR0 0x3c2
++#define MSR_P4_TBPU_ESCR1 0x3c3
++#define MSR_P4_TC_ESCR0 0x3c4
++#define MSR_P4_TC_ESCR1 0x3c5
++#define MSR_P4_U2L_ESCR0 0x3b0
++#define MSR_P4_U2L_ESCR1 0x3b1
++
++#endif
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/asm-x86_64/mach-xen/asm/nmi.h 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,93 @@
++/*
++ * linux/include/asm-i386/nmi.h
++ */
++#ifndef ASM_NMI_H
++#define ASM_NMI_H
++
++#include <linux/pm.h>
++#include <asm/io.h>
++
++#include <xen/interface/nmi.h>
++
++struct pt_regs;
++
++typedef int (*nmi_callback_t)(struct pt_regs * regs, int cpu);
++
++/**
++ * set_nmi_callback
++ *
++ * Set a handler for an NMI. Only one handler may be
++ * set. Return 1 if the NMI was handled.
++ */
++void set_nmi_callback(nmi_callback_t callback);
++
++/**
++ * unset_nmi_callback
++ *
++ * Remove the handler previously set.
++ */
++void unset_nmi_callback(void);
++
++#ifdef CONFIG_PM
++
++/** Replace the PM callback routine for NMI. */
++struct pm_dev * set_nmi_pm_callback(pm_callback callback);
++
++/** Unset the PM callback routine back to the default. */
++void unset_nmi_pm_callback(struct pm_dev * dev);
++
++#else
++
++static inline struct pm_dev * set_nmi_pm_callback(pm_callback callback)
++{
++ return 0;
++}
++
++static inline void unset_nmi_pm_callback(struct pm_dev * dev)
++{
++}
++
++#endif /* CONFIG_PM */
++
++extern void default_do_nmi(struct pt_regs *);
++extern void die_nmi(char *str, struct pt_regs *regs);
++
++static inline unsigned char get_nmi_reason(void)
++{
++ shared_info_t *s = HYPERVISOR_shared_info;
++ unsigned char reason = 0;
++
++ /* construct a value which looks like it came from
++ * port 0x61.
++ */
++ if (test_bit(_XEN_NMIREASON_io_error, &s->arch.nmi_reason))
++ reason |= 0x40;
++ if (test_bit(_XEN_NMIREASON_parity_error, &s->arch.nmi_reason))
++ reason |= 0x80;
++
++ return reason;
++}
++
++extern int panic_on_timeout;
++extern int unknown_nmi_panic;
++
++extern int check_nmi_watchdog(void);
++
++extern void setup_apic_nmi_watchdog (void);
++extern int reserve_lapic_nmi(void);
++extern void release_lapic_nmi(void);
++extern void disable_timer_nmi_watchdog(void);
++extern void enable_timer_nmi_watchdog(void);
++extern void nmi_watchdog_tick (struct pt_regs * regs, unsigned reason);
++
++extern void nmi_watchdog_default(void);
++extern int setup_nmi_watchdog(char *);
++
++extern unsigned int nmi_watchdog;
++#define NMI_DEFAULT -1
++#define NMI_NONE 0
++#define NMI_IO_APIC 1
++#define NMI_LOCAL_APIC 2
++#define NMI_INVALID 3
++
++#endif /* ASM_NMI_H */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/asm-x86_64/mach-xen/asm/page.h 2007-08-27 14:02:04.000000000 -0400
+@@ -0,0 +1,214 @@
++#ifndef _X86_64_PAGE_H
++#define _X86_64_PAGE_H
++
++/* #include <linux/string.h> */
++#ifndef __ASSEMBLY__
++#include <linux/kernel.h>
++#include <linux/types.h>
++#include <asm/bug.h>
++#endif
++#include <xen/interface/xen.h>
++
++/*
++ * Need to repeat this here in order to not include pgtable.h (which in turn
++ * depends on definitions made here), but to be able to use the symbolic
++ * below. The preprocessor will warn if the two definitions aren't identical.
++ */
++#define _PAGE_PRESENT 0x001
++
++#define arch_free_page(_page,_order) \
++({ int foreign = PageForeign(_page); \
++ if (foreign) \
++ PageForeignDestructor(_page); \
++ foreign; \
++})
++#define HAVE_ARCH_FREE_PAGE
++
++/* PAGE_SHIFT determines the page size */
++#define PAGE_SHIFT 12
++#ifdef __ASSEMBLY__
++#define PAGE_SIZE (0x1 << PAGE_SHIFT)
++#else
++#define PAGE_SIZE (1UL << PAGE_SHIFT)
++#endif
++#define PAGE_MASK (~(PAGE_SIZE-1))
++
++/* See Documentation/x86_64/mm.txt for a description of the memory map. */
++#define __PHYSICAL_MASK_SHIFT 46
++#define __PHYSICAL_MASK ((1UL << __PHYSICAL_MASK_SHIFT) - 1)
++#define __VIRTUAL_MASK_SHIFT 48
++#define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1)
++
++#define PHYSICAL_PAGE_MASK (~(PAGE_SIZE-1) & __PHYSICAL_MASK)
++
++#define THREAD_ORDER 1
++#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
++#define CURRENT_MASK (~(THREAD_SIZE-1))
++
++#define EXCEPTION_STACK_ORDER 0
++#define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER)
++
++#define DEBUG_STACK_ORDER (EXCEPTION_STACK_ORDER + 1)
++#define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER)
++
++#define IRQSTACK_ORDER 2
++#define IRQSTACKSIZE (PAGE_SIZE << IRQSTACK_ORDER)
++
++#define STACKFAULT_STACK 1
++#define DOUBLEFAULT_STACK 2
++#define NMI_STACK 3
++#define DEBUG_STACK 4
++#define MCE_STACK 5
++#define N_EXCEPTION_STACKS 5 /* hw limit: 7 */
++
++#define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1))
++#define LARGE_PAGE_SIZE (1UL << PMD_SHIFT)
++
++#define HPAGE_SHIFT PMD_SHIFT
++#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
++#define HPAGE_MASK (~(HPAGE_SIZE - 1))
++#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
++
++#ifdef __KERNEL__
++#ifndef __ASSEMBLY__
++
++extern unsigned long end_pfn;
++
++#include <asm/maddr.h>
++
++void clear_page(void *);
++void copy_page(void *, void *);
++
++#define clear_user_page(page, vaddr, pg) clear_page(page)
++#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
++
++#define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr)
++#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
++
++/*
++ * These are used to make use of C type-checking..
++ */
++typedef struct { unsigned long pte; } pte_t;
++typedef struct { unsigned long pmd; } pmd_t;
++typedef struct { unsigned long pud; } pud_t;
++typedef struct { unsigned long pgd; } pgd_t;
++#define PTE_MASK PHYSICAL_PAGE_MASK
++
++typedef struct { unsigned long pgprot; } pgprot_t;
++
++#define pte_val(x) (((x).pte & _PAGE_PRESENT) ? \
++ pte_machine_to_phys((x).pte) : \
++ (x).pte)
++#define pte_val_ma(x) ((x).pte)
++
++static inline unsigned long pmd_val(pmd_t x)
++{
++ unsigned long ret = x.pmd;
++#if CONFIG_XEN_COMPAT <= 0x030002
++ if (ret) ret = pte_machine_to_phys(ret) | _PAGE_PRESENT;
++#else
++ if (ret & _PAGE_PRESENT) ret = pte_machine_to_phys(ret);
++#endif
++ return ret;
++}
++
++static inline unsigned long pud_val(pud_t x)
++{
++ unsigned long ret = x.pud;
++ if (ret & _PAGE_PRESENT) ret = pte_machine_to_phys(ret);
++ return ret;
++}
++
++static inline unsigned long pgd_val(pgd_t x)
++{
++ unsigned long ret = x.pgd;
++ if (ret & _PAGE_PRESENT) ret = pte_machine_to_phys(ret);
++ return ret;
++}
++
++#define pgprot_val(x) ((x).pgprot)
++
++static inline pte_t __pte(unsigned long x)
++{
++ if (x & _PAGE_PRESENT) x = pte_phys_to_machine(x);
++ return ((pte_t) { (x) });
++}
++
++static inline pmd_t __pmd(unsigned long x)
++{
++ if (x & _PAGE_PRESENT) x = pte_phys_to_machine(x);
++ return ((pmd_t) { (x) });
++}
++
++static inline pud_t __pud(unsigned long x)
++{
++ if (x & _PAGE_PRESENT) x = pte_phys_to_machine(x);
++ return ((pud_t) { (x) });
++}
++
++static inline pgd_t __pgd(unsigned long x)
++{
++ if (x & _PAGE_PRESENT) x = pte_phys_to_machine(x);
++ return ((pgd_t) { (x) });
++}
++
++#define __pgprot(x) ((pgprot_t) { (x) } )
++
++#define __PHYSICAL_START ((unsigned long)CONFIG_PHYSICAL_START)
++#define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START)
++#define __START_KERNEL_map 0xffffffff80000000UL
++#define __PAGE_OFFSET 0xffff880000000000UL
++
++#else
++#define __PHYSICAL_START CONFIG_PHYSICAL_START
++#define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START)
++#define __START_KERNEL_map 0xffffffff80000000
++#define __PAGE_OFFSET 0xffff880000000000
++#endif /* !__ASSEMBLY__ */
++
++#if CONFIG_XEN_COMPAT <= 0x030002
++#undef LOAD_OFFSET
++#define LOAD_OFFSET 0
++#endif
++
++/* to align the pointer to the (next) page boundary */
++#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
++
++#define KERNEL_TEXT_SIZE (40UL*1024*1024)
++#define KERNEL_TEXT_START 0xffffffff80000000UL
++
++#define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
++
++/* Note: __pa(&symbol_visible_to_c) should be always replaced with __pa_symbol.
++ Otherwise you risk miscompilation. */
++#define __pa(x) (((unsigned long)(x)>=__START_KERNEL_map)?(unsigned long)(x) - (unsigned long)__START_KERNEL_map:(unsigned long)(x) - PAGE_OFFSET)
++/* __pa_symbol should be used for C visible symbols.
++ This seems to be the official gcc blessed way to do such arithmetic. */
++#define __pa_symbol(x) \
++ ({unsigned long v; \
++ asm("" : "=r" (v) : "0" (x)); \
++ __pa(v); })
++
++#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
++#define __boot_va(x) __va(x)
++#define __boot_pa(x) __pa(x)
++#ifdef CONFIG_FLATMEM
++#define pfn_valid(pfn) ((pfn) < end_pfn)
++#endif
++
++#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
++#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
++#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
++
++#define VM_DATA_DEFAULT_FLAGS \
++ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
++ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
++
++#define __HAVE_ARCH_GATE_AREA 1
++
++#include <asm-generic/memory_model.h>
++#include <asm-generic/page.h>
++
++#endif /* __KERNEL__ */
++
++#endif /* _X86_64_PAGE_H */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/asm-x86_64/mach-xen/asm/pci.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,166 @@
++#ifndef __x8664_PCI_H
++#define __x8664_PCI_H
++
++#include <asm/io.h>
++
++#ifdef __KERNEL__
++
++#include <linux/mm.h> /* for struct page */
++
++/* Can be used to override the logic in pci_scan_bus for skipping
++ already-configured bus numbers - to be used for buggy BIOSes
++ or architectures with incomplete PCI setup by the loader */
++
++#ifdef CONFIG_PCI
++extern unsigned int pcibios_assign_all_busses(void);
++#else
++#define pcibios_assign_all_busses() 0
++#endif
++#define pcibios_scan_all_fns(a, b) 0
++
++extern unsigned long pci_mem_start;
++#define PCIBIOS_MIN_IO 0x1000
++#define PCIBIOS_MIN_MEM (pci_mem_start)
++
++#define PCIBIOS_MIN_CARDBUS_IO 0x4000
++
++void pcibios_config_init(void);
++struct pci_bus * pcibios_scan_root(int bus);
++extern int (*pci_config_read)(int seg, int bus, int dev, int fn, int reg, int len, u32 *value);
++extern int (*pci_config_write)(int seg, int bus, int dev, int fn, int reg, int len, u32 value);
++
++void pcibios_set_master(struct pci_dev *dev);
++void pcibios_penalize_isa_irq(int irq, int active);
++struct irq_routing_table *pcibios_get_irq_routing_table(void);
++int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq);
++
++#include <linux/types.h>
++#include <linux/slab.h>
++#include <asm/scatterlist.h>
++#include <linux/string.h>
++#include <asm/page.h>
++
++extern void pci_iommu_alloc(void);
++extern int iommu_setup(char *opt);
++
++/* The PCI address space does equal the physical memory
++ * address space. The networking and block device layers use
++ * this boolean for bounce buffer decisions
++ *
++ * On AMD64 it mostly equals, but we set it to zero if a hardware
++ * IOMMU (gart) of sotware IOMMU (swiotlb) is available.
++ */
++#define PCI_DMA_BUS_IS_PHYS (dma_ops->is_phys)
++
++#if defined(CONFIG_IOMMU) || defined(CONFIG_CALGARY_IOMMU)
++
++/*
++ * x86-64 always supports DAC, but sometimes it is useful to force
++ * devices through the IOMMU to get automatic sg list merging.
++ * Optional right now.
++ */
++extern int iommu_sac_force;
++#define pci_dac_dma_supported(pci_dev, mask) (!iommu_sac_force)
++
++#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
++ dma_addr_t ADDR_NAME;
++#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \
++ __u32 LEN_NAME;
++#define pci_unmap_addr(PTR, ADDR_NAME) \
++ ((PTR)->ADDR_NAME)
++#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
++ (((PTR)->ADDR_NAME) = (VAL))
++#define pci_unmap_len(PTR, LEN_NAME) \
++ ((PTR)->LEN_NAME)
++#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
++ (((PTR)->LEN_NAME) = (VAL))
++
++#elif defined(CONFIG_SWIOTLB)
++
++#define pci_dac_dma_supported(pci_dev, mask) 1
++
++#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
++ dma_addr_t ADDR_NAME;
++#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \
++ __u32 LEN_NAME;
++#define pci_unmap_addr(PTR, ADDR_NAME) \
++ ((PTR)->ADDR_NAME)
++#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
++ (((PTR)->ADDR_NAME) = (VAL))
++#define pci_unmap_len(PTR, LEN_NAME) \
++ ((PTR)->LEN_NAME)
++#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
++ (((PTR)->LEN_NAME) = (VAL))
++
++#else
++/* No IOMMU */
++
++#define pci_dac_dma_supported(pci_dev, mask) 1
++
++#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
++#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
++#define pci_unmap_addr(PTR, ADDR_NAME) (0)
++#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
++#define pci_unmap_len(PTR, LEN_NAME) (0)
++#define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
++
++#endif
++
++#include <asm-generic/pci-dma-compat.h>
++
++static inline dma64_addr_t
++pci_dac_page_to_dma(struct pci_dev *pdev, struct page *page, unsigned long offset, int direction)
++{
++ return ((dma64_addr_t) page_to_phys(page) +
++ (dma64_addr_t) offset);
++}
++
++static inline struct page *
++pci_dac_dma_to_page(struct pci_dev *pdev, dma64_addr_t dma_addr)
++{
++ return virt_to_page(__va(dma_addr));
++}
++
++static inline unsigned long
++pci_dac_dma_to_offset(struct pci_dev *pdev, dma64_addr_t dma_addr)
++{
++ return (dma_addr & ~PAGE_MASK);
++}
++
++static inline void
++pci_dac_dma_sync_single_for_cpu(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction)
++{
++}
++
++static inline void
++pci_dac_dma_sync_single_for_device(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction)
++{
++ flush_write_buffers();
++}
++
++#ifdef CONFIG_PCI
++static inline void pci_dma_burst_advice(struct pci_dev *pdev,
++ enum pci_dma_burst_strategy *strat,
++ unsigned long *strategy_parameter)
++{
++ *strat = PCI_DMA_BURST_INFINITY;
++ *strategy_parameter = ~0UL;
++}
++#endif
++
++#define HAVE_PCI_MMAP
++extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
++ enum pci_mmap_state mmap_state, int write_combine);
++
++static inline void pcibios_add_platform_entries(struct pci_dev *dev)
++{
++}
++
++#endif /* __KERNEL__ */
++
++/* generic pci stuff */
++#ifdef CONFIG_PCI
++#include <asm-generic/pci.h>
++#endif
++
++#endif /* __x8664_PCI_H */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/asm-x86_64/mach-xen/asm/pgalloc.h 2007-08-27 14:02:10.000000000 -0400
+@@ -0,0 +1,204 @@
++#ifndef _X86_64_PGALLOC_H
++#define _X86_64_PGALLOC_H
++
++#include <asm/fixmap.h>
++#include <asm/pda.h>
++#include <linux/threads.h>
++#include <linux/mm.h>
++#include <asm/io.h> /* for phys_to_virt and page_to_pseudophys */
++
++#include <xen/features.h>
++void make_page_readonly(void *va, unsigned int feature);
++void make_page_writable(void *va, unsigned int feature);
++void make_pages_readonly(void *va, unsigned int nr, unsigned int feature);
++void make_pages_writable(void *va, unsigned int nr, unsigned int feature);
++
++#define __user_pgd(pgd) ((pgd) + PTRS_PER_PGD)
++
++static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
++{
++ set_pmd(pmd, __pmd(_PAGE_TABLE | __pa(pte)));
++}
++
++static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *pte)
++{
++ if (unlikely((mm)->context.pinned)) {
++ BUG_ON(HYPERVISOR_update_va_mapping(
++ (unsigned long)__va(page_to_pfn(pte) << PAGE_SHIFT),
++ pfn_pte(page_to_pfn(pte), PAGE_KERNEL_RO), 0));
++ set_pmd(pmd, __pmd(_PAGE_TABLE | (page_to_pfn(pte) << PAGE_SHIFT)));
++ } else {
++ *(pmd) = __pmd(_PAGE_TABLE | (page_to_pfn(pte) << PAGE_SHIFT));
++ }
++}
++
++static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
++{
++ if (unlikely((mm)->context.pinned)) {
++ BUG_ON(HYPERVISOR_update_va_mapping(
++ (unsigned long)pmd,
++ pfn_pte(virt_to_phys(pmd)>>PAGE_SHIFT,
++ PAGE_KERNEL_RO), 0));
++ set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
++ } else {
++ *(pud) = __pud(_PAGE_TABLE | __pa(pmd));
++ }
++}
++
++/*
++ * We need to use the batch mode here, but pgd_pupulate() won't be
++ * be called frequently.
++ */
++static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
++{
++ if (unlikely((mm)->context.pinned)) {
++ BUG_ON(HYPERVISOR_update_va_mapping(
++ (unsigned long)pud,
++ pfn_pte(virt_to_phys(pud)>>PAGE_SHIFT,
++ PAGE_KERNEL_RO), 0));
++ set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
++ set_pgd(__user_pgd(pgd), __pgd(_PAGE_TABLE | __pa(pud)));
++ } else {
++ *(pgd) = __pgd(_PAGE_TABLE | __pa(pud));
++ *(__user_pgd(pgd)) = *(pgd);
++ }
++}
++
++extern struct page *pte_alloc_one(struct mm_struct *mm, unsigned long addr);
++extern void pte_free(struct page *pte);
++
++static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
++{
++ struct page *pg;
++
++ pg = pte_alloc_one(mm, addr);
++ return pg ? page_address(pg) : NULL;
++}
++
++static inline void pmd_free(pmd_t *pmd)
++{
++ BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
++ pte_free(virt_to_page(pmd));
++}
++
++static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
++{
++ struct page *pg;
++
++ pg = pte_alloc_one(mm, addr);
++ return pg ? page_address(pg) : NULL;
++}
++
++static inline void pud_free(pud_t *pud)
++{
++ BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
++ pte_free(virt_to_page(pud));
++}
++
++static inline void pgd_list_add(pgd_t *pgd)
++{
++ struct page *page = virt_to_page(pgd);
++
++ spin_lock(&pgd_lock);
++ page->index = (pgoff_t)pgd_list;
++ if (pgd_list)
++ pgd_list->private = (unsigned long)&page->index;
++ pgd_list = page;
++ page->private = (unsigned long)&pgd_list;
++ spin_unlock(&pgd_lock);
++}
++
++static inline void pgd_list_del(pgd_t *pgd)
++{
++ struct page *next, **pprev, *page = virt_to_page(pgd);
++
++ spin_lock(&pgd_lock);
++ next = (struct page *)page->index;
++ pprev = (struct page **)page->private;
++ *pprev = next;
++ if (next)
++ next->private = (unsigned long)pprev;
++ spin_unlock(&pgd_lock);
++}
++
++static inline pgd_t *pgd_alloc(struct mm_struct *mm)
++{
++ /*
++ * We allocate two contiguous pages for kernel and user.
++ */
++ unsigned boundary;
++ pgd_t *pgd = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT, 1);
++ if (!pgd)
++ return NULL;
++ pgd_list_add(pgd);
++ /*
++ * Copy kernel pointers in from init.
++ * Could keep a freelist or slab cache of those because the kernel
++ * part never changes.
++ */
++ boundary = pgd_index(__PAGE_OFFSET);
++ memset(pgd, 0, boundary * sizeof(pgd_t));
++ memcpy(pgd + boundary,
++ init_level4_pgt + boundary,
++ (PTRS_PER_PGD - boundary) * sizeof(pgd_t));
++
++ memset(__user_pgd(pgd), 0, PAGE_SIZE); /* clean up user pgd */
++ /*
++ * Set level3_user_pgt for vsyscall area
++ */
++ set_pgd(__user_pgd(pgd) + pgd_index(VSYSCALL_START),
++ mk_kernel_pgd(__pa_symbol(level3_user_pgt)));
++ return pgd;
++}
++
++static inline void pgd_free(pgd_t *pgd)
++{
++ pte_t *ptep = virt_to_ptep(pgd);
++
++ if (!pte_write(*ptep)) {
++ xen_pgd_unpin(__pa(pgd));
++ BUG_ON(HYPERVISOR_update_va_mapping(
++ (unsigned long)pgd,
++ pfn_pte(virt_to_phys(pgd)>>PAGE_SHIFT, PAGE_KERNEL),
++ 0));
++ }
++
++ ptep = virt_to_ptep(__user_pgd(pgd));
++
++ if (!pte_write(*ptep)) {
++ xen_pgd_unpin(__pa(__user_pgd(pgd)));
++ BUG_ON(HYPERVISOR_update_va_mapping(
++ (unsigned long)__user_pgd(pgd),
++ pfn_pte(virt_to_phys(__user_pgd(pgd))>>PAGE_SHIFT,
++ PAGE_KERNEL),
++ 0));
++ }
++
++ pgd_list_del(pgd);
++ free_pages((unsigned long)pgd, 1);
++}
++
++static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
++{
++ pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
++ if (pte)
++ make_page_readonly(pte, XENFEAT_writable_page_tables);
++
++ return pte;
++}
++
++/* Should really implement gc for free page table pages. This could be
++ done with a reference count in struct page. */
++
++static inline void pte_free_kernel(pte_t *pte)
++{
++ BUG_ON((unsigned long)pte & (PAGE_SIZE-1));
++ make_page_writable(pte, XENFEAT_writable_page_tables);
++ free_page((unsigned long)pte);
++}
++
++#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
++#define __pmd_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x))
++#define __pud_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x))
++
++#endif /* _X86_64_PGALLOC_H */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/asm-x86_64/mach-xen/asm/pgtable.h 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,573 @@
++#ifndef _X86_64_PGTABLE_H
++#define _X86_64_PGTABLE_H
++
++/*
++ * This file contains the functions and defines necessary to modify and use
++ * the x86-64 page table tree.
++ */
++#include <asm/processor.h>
++#include <asm/fixmap.h>
++#include <asm/bitops.h>
++#include <linux/threads.h>
++#include <linux/sched.h>
++#include <asm/pda.h>
++#ifdef CONFIG_XEN
++#include <asm/hypervisor.h>
++
++extern pud_t level3_user_pgt[512];
++extern pud_t init_level4_user_pgt[];
++
++extern void xen_init_pt(void);
++
++#define virt_to_ptep(__va) \
++({ \
++ pgd_t *__pgd = pgd_offset_k((unsigned long)(__va)); \
++ pud_t *__pud = pud_offset(__pgd, (unsigned long)(__va)); \
++ pmd_t *__pmd = pmd_offset(__pud, (unsigned long)(__va)); \
++ pte_offset_kernel(__pmd, (unsigned long)(__va)); \
++})
++
++#define arbitrary_virt_to_machine(__va) \
++({ \
++ maddr_t m = (maddr_t)pte_mfn(*virt_to_ptep(__va)) << PAGE_SHIFT;\
++ m | ((unsigned long)(__va) & (PAGE_SIZE-1)); \
++})
++#endif
++
++extern pud_t level3_kernel_pgt[512];
++extern pud_t level3_physmem_pgt[512];
++extern pud_t level3_ident_pgt[512];
++extern pmd_t level2_kernel_pgt[512];
++extern pgd_t init_level4_pgt[];
++extern pgd_t boot_level4_pgt[];
++extern unsigned long __supported_pte_mask;
++
++#define swapper_pg_dir init_level4_pgt
++
++extern int nonx_setup(char *str);
++extern void paging_init(void);
++extern void clear_kernel_mapping(unsigned long addr, unsigned long size);
++
++extern unsigned long pgkern_mask;
++
++/*
++ * ZERO_PAGE is a global shared page that is always zero: used
++ * for zero-mapped memory areas etc..
++ */
++extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
++#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
++
++/*
++ * PGDIR_SHIFT determines what a top-level page table entry can map
++ */
++#define PGDIR_SHIFT 39
++#define PTRS_PER_PGD 512
++
++/*
++ * 3rd level page
++ */
++#define PUD_SHIFT 30
++#define PTRS_PER_PUD 512
++
++/*
++ * PMD_SHIFT determines the size of the area a middle-level
++ * page table can map
++ */
++#define PMD_SHIFT 21
++#define PTRS_PER_PMD 512
++
++/*
++ * entries per page directory level
++ */
++#define PTRS_PER_PTE 512
++
++#define pte_ERROR(e) \
++ printk("%s:%d: bad pte %p(%016lx).\n", __FILE__, __LINE__, &(e), pte_val(e))
++#define pmd_ERROR(e) \
++ printk("%s:%d: bad pmd %p(%016lx).\n", __FILE__, __LINE__, &(e), pmd_val(e))
++#define pud_ERROR(e) \
++ printk("%s:%d: bad pud %p(%016lx).\n", __FILE__, __LINE__, &(e), pud_val(e))
++#define pgd_ERROR(e) \
++ printk("%s:%d: bad pgd %p(%016lx).\n", __FILE__, __LINE__, &(e), pgd_val(e))
++
++#define pgd_none(x) (!pgd_val(x))
++#define pud_none(x) (!pud_val(x))
++
++static inline void set_pte(pte_t *dst, pte_t val)
++{
++ *dst = val;
++}
++
++#define set_pmd(pmdptr, pmdval) xen_l2_entry_update(pmdptr, (pmdval))
++#define set_pud(pudptr, pudval) xen_l3_entry_update(pudptr, (pudval))
++#define set_pgd(pgdptr, pgdval) xen_l4_entry_update(pgdptr, (pgdval))
++
++static inline void pud_clear (pud_t * pud)
++{
++ set_pud(pud, __pud(0));
++}
++
++#define __user_pgd(pgd) ((pgd) + PTRS_PER_PGD)
++
++static inline void pgd_clear (pgd_t * pgd)
++{
++ set_pgd(pgd, __pgd(0));
++ set_pgd(__user_pgd(pgd), __pgd(0));
++}
++
++#define pud_page(pud) \
++ ((unsigned long) __va(pud_val(pud) & PHYSICAL_PAGE_MASK))
++
++#define pte_same(a, b) ((a).pte == (b).pte)
++
++#define pte_pgprot(a) (__pgprot((a).pte & ~PHYSICAL_PAGE_MASK))
++
++#define PMD_SIZE (1UL << PMD_SHIFT)
++#define PMD_MASK (~(PMD_SIZE-1))
++#define PUD_SIZE (1UL << PUD_SHIFT)
++#define PUD_MASK (~(PUD_SIZE-1))
++#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
++#define PGDIR_MASK (~(PGDIR_SIZE-1))
++
++#define USER_PTRS_PER_PGD ((TASK_SIZE-1)/PGDIR_SIZE+1)
++#define FIRST_USER_ADDRESS 0
++
++#ifndef __ASSEMBLY__
++#define MAXMEM 0x3fffffffffffUL
++#define VMALLOC_START 0xffffc20000000000UL
++#define VMALLOC_END 0xffffe1ffffffffffUL
++#define MODULES_VADDR 0xffffffff88000000UL
++#define MODULES_END 0xfffffffffff00000UL
++#define MODULES_LEN (MODULES_END - MODULES_VADDR)
++
++#define _PAGE_BIT_PRESENT 0
++#define _PAGE_BIT_RW 1
++#define _PAGE_BIT_USER 2
++#define _PAGE_BIT_PWT 3
++#define _PAGE_BIT_PCD 4
++#define _PAGE_BIT_ACCESSED 5
++#define _PAGE_BIT_DIRTY 6
++#define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
++#define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
++#define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
++
++#define _PAGE_PRESENT 0x001
++#define _PAGE_RW 0x002
++#define _PAGE_USER 0x004
++#define _PAGE_PWT 0x008
++#define _PAGE_PCD 0x010
++#define _PAGE_ACCESSED 0x020
++#define _PAGE_DIRTY 0x040
++#define _PAGE_PSE 0x080 /* 2MB page */
++#define _PAGE_FILE 0x040 /* nonlinear file mapping, saved PTE; unset:swap */
++#define _PAGE_GLOBAL 0x100 /* Global TLB entry */
++
++#define _PAGE_PROTNONE 0x080 /* If not present */
++#define _PAGE_NX (1UL<<_PAGE_BIT_NX)
++
++#if CONFIG_XEN_COMPAT <= 0x030002
++extern unsigned int __kernel_page_user;
++#else
++#define __kernel_page_user 0
++#endif
++
++#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
++#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY | __kernel_page_user)
++
++#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
++
++#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
++#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
++#define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
++#define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
++#define PAGE_COPY PAGE_COPY_NOEXEC
++#define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
++#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
++#define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
++#define __PAGE_KERNEL \
++ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX | __kernel_page_user)
++#define __PAGE_KERNEL_EXEC \
++ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | __kernel_page_user)
++#define __PAGE_KERNEL_NOCACHE \
++ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_PCD | _PAGE_ACCESSED | _PAGE_NX | __kernel_page_user)
++#define __PAGE_KERNEL_RO \
++ (_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX | __kernel_page_user)
++#define __PAGE_KERNEL_VSYSCALL \
++ (_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
++#define __PAGE_KERNEL_VSYSCALL_NOCACHE \
++ (_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_PCD)
++#define __PAGE_KERNEL_LARGE \
++ (__PAGE_KERNEL | _PAGE_PSE)
++#define __PAGE_KERNEL_LARGE_EXEC \
++ (__PAGE_KERNEL_EXEC | _PAGE_PSE)
++
++/*
++ * We don't support GLOBAL page in xenolinux64
++ */
++#define MAKE_GLOBAL(x) __pgprot((x))
++
++#define PAGE_KERNEL MAKE_GLOBAL(__PAGE_KERNEL)
++#define PAGE_KERNEL_EXEC MAKE_GLOBAL(__PAGE_KERNEL_EXEC)
++#define PAGE_KERNEL_RO MAKE_GLOBAL(__PAGE_KERNEL_RO)
++#define PAGE_KERNEL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_NOCACHE)
++#define PAGE_KERNEL_VSYSCALL32 __pgprot(__PAGE_KERNEL_VSYSCALL)
++#define PAGE_KERNEL_VSYSCALL MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL)
++#define PAGE_KERNEL_LARGE MAKE_GLOBAL(__PAGE_KERNEL_LARGE)
++#define PAGE_KERNEL_VSYSCALL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL_NOCACHE)
++
++/* xwr */
++#define __P000 PAGE_NONE
++#define __P001 PAGE_READONLY
++#define __P010 PAGE_COPY
++#define __P011 PAGE_COPY
++#define __P100 PAGE_READONLY_EXEC
++#define __P101 PAGE_READONLY_EXEC
++#define __P110 PAGE_COPY_EXEC
++#define __P111 PAGE_COPY_EXEC
++
++#define __S000 PAGE_NONE
++#define __S001 PAGE_READONLY
++#define __S010 PAGE_SHARED
++#define __S011 PAGE_SHARED
++#define __S100 PAGE_READONLY_EXEC
++#define __S101 PAGE_READONLY_EXEC
++#define __S110 PAGE_SHARED_EXEC
++#define __S111 PAGE_SHARED_EXEC
++
++static inline unsigned long pgd_bad(pgd_t pgd)
++{
++ unsigned long val = pgd_val(pgd);
++ val &= ~PTE_MASK;
++ val &= ~(_PAGE_USER | _PAGE_DIRTY);
++ return val & ~(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED);
++}
++
++static inline unsigned long pud_bad(pud_t pud)
++{
++ unsigned long val = pud_val(pud);
++ val &= ~PTE_MASK;
++ val &= ~(_PAGE_USER | _PAGE_DIRTY);
++ return val & ~(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED);
++}
++
++#define set_pte_at(_mm,addr,ptep,pteval) do { \
++ if (((_mm) != current->mm && (_mm) != &init_mm) || \
++ HYPERVISOR_update_va_mapping((addr), (pteval), 0)) \
++ set_pte((ptep), (pteval)); \
++} while (0)
++
++#define pte_none(x) (!(x).pte)
++#define pte_present(x) ((x).pte & (_PAGE_PRESENT | _PAGE_PROTNONE))
++#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
++
++#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
++
++#define __pte_mfn(_pte) (((_pte).pte & PTE_MASK) >> PAGE_SHIFT)
++#define pte_mfn(_pte) ((_pte).pte & _PAGE_PRESENT ? \
++ __pte_mfn(_pte) : pfn_to_mfn(__pte_mfn(_pte)))
++#define pte_pfn(_pte) ((_pte).pte & _PAGE_PRESENT ? \
++ mfn_to_local_pfn(__pte_mfn(_pte)) : __pte_mfn(_pte))
++
++#define pte_page(x) pfn_to_page(pte_pfn(x))
++
++static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
++{
++ unsigned long pte = page_nr << PAGE_SHIFT;
++ pte |= pgprot_val(pgprot);
++ pte &= __supported_pte_mask;
++ return __pte(pte);
++}
++
++static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
++{
++ pte_t pte = *ptep;
++ if (!pte_none(pte)) {
++ if (mm != &init_mm)
++ pte = __pte_ma(xchg(&ptep->pte, 0));
++ else
++ HYPERVISOR_update_va_mapping(addr, __pte(0), 0);
++ }
++ return pte;
++}
++
++static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full)
++{
++ if (full) {
++ pte_t pte = *ptep;
++ if (mm->context.pinned)
++ xen_l1_entry_update(ptep, __pte(0));
++ else
++ *ptep = __pte(0);
++ return pte;
++ }
++ return ptep_get_and_clear(mm, addr, ptep);
++}
++
++#define ptep_clear_flush(vma, addr, ptep) \
++({ \
++ pte_t *__ptep = (ptep); \
++ pte_t __res = *__ptep; \
++ if (!pte_none(__res) && \
++ ((vma)->vm_mm != current->mm || \
++ HYPERVISOR_update_va_mapping(addr, __pte(0), \
++ (unsigned long)(vma)->vm_mm->cpu_vm_mask.bits| \
++ UVMF_INVLPG|UVMF_MULTI))) { \
++ __ptep->pte = 0; \
++ flush_tlb_page(vma, addr); \
++ } \
++ __res; \
++})
++
++/*
++ * The following only work if pte_present() is true.
++ * Undefined behaviour if not..
++ */
++#define __pte_val(x) ((x).pte)
++
++#define __LARGE_PTE (_PAGE_PSE|_PAGE_PRESENT)
++static inline int pte_user(pte_t pte) { return __pte_val(pte) & _PAGE_USER; }
++static inline int pte_read(pte_t pte) { return __pte_val(pte) & _PAGE_USER; }
++static inline int pte_exec(pte_t pte) { return __pte_val(pte) & _PAGE_USER; }
++static inline int pte_dirty(pte_t pte) { return __pte_val(pte) & _PAGE_DIRTY; }
++static inline int pte_young(pte_t pte) { return __pte_val(pte) & _PAGE_ACCESSED; }
++static inline int pte_write(pte_t pte) { return __pte_val(pte) & _PAGE_RW; }
++static inline int pte_file(pte_t pte) { return __pte_val(pte) & _PAGE_FILE; }
++static inline int pte_huge(pte_t pte) { return __pte_val(pte) & _PAGE_PSE; }
++
++static inline pte_t pte_rdprotect(pte_t pte) { __pte_val(pte) &= ~_PAGE_USER; return pte; }
++static inline pte_t pte_exprotect(pte_t pte) { __pte_val(pte) &= ~_PAGE_USER; return pte; }
++static inline pte_t pte_mkclean(pte_t pte) { __pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
++static inline pte_t pte_mkold(pte_t pte) { __pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
++static inline pte_t pte_wrprotect(pte_t pte) { __pte_val(pte) &= ~_PAGE_RW; return pte; }
++static inline pte_t pte_mkread(pte_t pte) { __pte_val(pte) |= _PAGE_USER; return pte; }
++static inline pte_t pte_mkexec(pte_t pte) { __pte_val(pte) |= _PAGE_USER; return pte; }
++static inline pte_t pte_mkdirty(pte_t pte) { __pte_val(pte) |= _PAGE_DIRTY; return pte; }
++static inline pte_t pte_mkyoung(pte_t pte) { __pte_val(pte) |= _PAGE_ACCESSED; return pte; }
++static inline pte_t pte_mkwrite(pte_t pte) { __pte_val(pte) |= _PAGE_RW; return pte; }
++static inline pte_t pte_mkhuge(pte_t pte) { __pte_val(pte) |= _PAGE_PSE; return pte; }
++
++#define ptep_test_and_clear_dirty(vma, addr, ptep) \
++({ \
++ pte_t __pte = *(ptep); \
++ int __ret = pte_dirty(__pte); \
++ if (__ret) \
++ set_pte_at((vma)->vm_mm, addr, ptep, pte_mkclean(__pte)); \
++ __ret; \
++})
++
++#define ptep_test_and_clear_young(vma, addr, ptep) \
++({ \
++ pte_t __pte = *(ptep); \
++ int __ret = pte_young(__pte); \
++ if (__ret) \
++ set_pte_at((vma)->vm_mm, addr, ptep, pte_mkold(__pte)); \
++ __ret; \
++})
++
++static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
++{
++ pte_t pte = *ptep;
++ if (pte_write(pte))
++ set_pte_at(mm, addr, ptep, pte_wrprotect(pte));
++}
++
++/*
++ * Macro to mark a page protection value as "uncacheable".
++ */
++#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT))
++
++static inline int pmd_large(pmd_t pte) {
++ return (pmd_val(pte) & __LARGE_PTE) == __LARGE_PTE;
++}
++
++
++/*
++ * Conversion functions: convert a page and protection to a page entry,
++ * and a page entry and page directory to the page they refer to.
++ */
++
++/*
++ * Level 4 access.
++ * Never use these in the common code.
++ */
++#define pgd_page(pgd) ((unsigned long) __va(pgd_val(pgd) & PTE_MASK))
++#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
++#define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
++#define pgd_offset_k(address) (pgd_t *)(init_level4_pgt + pgd_index(address))
++#define pgd_present(pgd) (pgd_val(pgd) & _PAGE_PRESENT)
++#define mk_kernel_pgd(address) __pgd((address) | _KERNPG_TABLE)
++
++/* PUD - Level3 access */
++/* to find an entry in a page-table-directory. */
++#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
++#define pud_offset(pgd, address) ((pud_t *) pgd_page(*(pgd)) + pud_index(address))
++#define pud_present(pud) (pud_val(pud) & _PAGE_PRESENT)
++
++/* PMD - Level 2 access */
++#define pmd_page_kernel(pmd) ((unsigned long) __va(pmd_val(pmd) & PTE_MASK))
++#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
++
++#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
++#define pmd_offset(dir, address) ((pmd_t *) pud_page(*(dir)) + \
++ pmd_index(address))
++#define pmd_none(x) (!pmd_val(x))
++#if CONFIG_XEN_COMPAT <= 0x030002
++/* pmd_present doesn't just test the _PAGE_PRESENT bit since wr.p.t.
++ can temporarily clear it. */
++#define pmd_present(x) (pmd_val(x))
++#else
++#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
++#endif
++#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
++#define pmd_bad(x) ((pmd_val(x) & ~(PTE_MASK | _PAGE_USER | _PAGE_PRESENT)) \
++ != (_KERNPG_TABLE & ~(_PAGE_USER | _PAGE_PRESENT)))
++#define pfn_pmd(nr,prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val(prot)))
++#define pmd_pfn(x) ((pmd_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT)
++
++#define pte_to_pgoff(pte) ((pte_val(pte) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT)
++#define pgoff_to_pte(off) ((pte_t) { ((off) << PAGE_SHIFT) | _PAGE_FILE })
++#define PTE_FILE_MAX_BITS __PHYSICAL_MASK_SHIFT
++
++/* PTE - Level 1 access. */
++
++/* page, protection -> pte */
++#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
++#define mk_pte_huge(entry) (pte_val(entry) |= _PAGE_PRESENT | _PAGE_PSE)
++
++/* physical address -> PTE */
++static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
++{
++ unsigned long pteval;
++ pteval = physpage | pgprot_val(pgprot);
++ return __pte(pteval);
++}
++
++/* Change flags of a PTE */
++static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
++{
++ /*
++ * Since this might change the present bit (which controls whether
++ * a pte_t object has undergone p2m translation), we must use
++ * pte_val() on the input pte and __pte() for the return value.
++ */
++ unsigned long pteval = pte_val(pte);
++
++ pteval &= _PAGE_CHG_MASK;
++ pteval |= pgprot_val(newprot);
++ pteval &= __supported_pte_mask;
++ return __pte(pteval);
++}
++
++#define pte_index(address) \
++ (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
++#define pte_offset_kernel(dir, address) ((pte_t *) pmd_page_kernel(*(dir)) + \
++ pte_index(address))
++
++/* x86-64 always has all page tables mapped. */
++#define pte_offset_map(dir,address) pte_offset_kernel(dir,address)
++#define pte_offset_map_nested(dir,address) pte_offset_kernel(dir,address)
++#define pte_unmap(pte) /* NOP */
++#define pte_unmap_nested(pte) /* NOP */
++
++#define update_mmu_cache(vma,address,pte) do { } while (0)
++
++/*
++ * Rules for using ptep_establish: the pte MUST be a user pte, and
++ * must be a present->present transition.
++ */
++#define __HAVE_ARCH_PTEP_ESTABLISH
++#define ptep_establish(vma, address, ptep, pteval) \
++ do { \
++ if ( likely((vma)->vm_mm == current->mm) ) { \
++ BUG_ON(HYPERVISOR_update_va_mapping(address, \
++ pteval, \
++ (unsigned long)(vma)->vm_mm->cpu_vm_mask.bits| \
++ UVMF_INVLPG|UVMF_MULTI)); \
++ } else { \
++ xen_l1_entry_update(ptep, pteval); \
++ flush_tlb_page(vma, address); \
++ } \
++ } while (0)
++
++/* We only update the dirty/accessed state if we set
++ * the dirty bit by hand in the kernel, since the hardware
++ * will do the accessed bit for us, and we don't want to
++ * race with other CPU's that might be updating the dirty
++ * bit at the same time. */
++#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
++#define ptep_set_access_flags(vma, address, ptep, entry, dirty) \
++ do { \
++ if (dirty) \
++ ptep_establish(vma, address, ptep, entry); \
++ } while (0)
++
++/* Encode and de-code a swap entry */
++#define __swp_type(x) (((x).val >> 1) & 0x3f)
++#define __swp_offset(x) ((x).val >> 8)
++#define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 8) })
++#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
++#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
++
++extern spinlock_t pgd_lock;
++extern struct page *pgd_list;
++void vmalloc_sync_all(void);
++
++#endif /* !__ASSEMBLY__ */
++
++extern int kern_addr_valid(unsigned long addr);
++
++#define DOMID_LOCAL (0xFFFFU)
++
++struct vm_area_struct;
++
++int direct_remap_pfn_range(struct vm_area_struct *vma,
++ unsigned long address,
++ unsigned long mfn,
++ unsigned long size,
++ pgprot_t prot,
++ domid_t domid);
++
++int direct_kernel_remap_pfn_range(unsigned long address,
++ unsigned long mfn,
++ unsigned long size,
++ pgprot_t prot,
++ domid_t domid);
++
++int create_lookup_pte_addr(struct mm_struct *mm,
++ unsigned long address,
++ uint64_t *ptep);
++
++int touch_pte_range(struct mm_struct *mm,
++ unsigned long address,
++ unsigned long size);
++
++#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
++ direct_remap_pfn_range(vma,vaddr,pfn,size,prot,DOMID_IO)
++
++#define MK_IOSPACE_PFN(space, pfn) (pfn)
++#define GET_IOSPACE(pfn) 0
++#define GET_PFN(pfn) (pfn)
++
++#define HAVE_ARCH_UNMAPPED_AREA
++
++#define pgtable_cache_init() do { } while (0)
++#define check_pgt_cache() do { } while (0)
++
++#define PAGE_AGP PAGE_KERNEL_NOCACHE
++#define HAVE_PAGE_AGP 1
++
++/* fs/proc/kcore.c */
++#define kc_vaddr_to_offset(v) ((v) & __VIRTUAL_MASK)
++#define kc_offset_to_vaddr(o) \
++ (((o) & (1UL << (__VIRTUAL_MASK_SHIFT-1))) ? ((o) | (~__VIRTUAL_MASK)) : (o))
++
++#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
++#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
++#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
++#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
++#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
++#define __HAVE_ARCH_PTEP_SET_WRPROTECT
++#define __HAVE_ARCH_PTE_SAME
++#include <asm-generic/pgtable.h>
++
++#endif /* _X86_64_PGTABLE_H */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/asm-x86_64/mach-xen/asm/processor.h 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,506 @@
++/*
++ * include/asm-x86_64/processor.h
++ *
++ * Copyright (C) 1994 Linus Torvalds
++ */
++
++#ifndef __ASM_X86_64_PROCESSOR_H
++#define __ASM_X86_64_PROCESSOR_H
++
++#include <asm/segment.h>
++#include <asm/page.h>
++#include <asm/types.h>
++#include <asm/sigcontext.h>
++#include <asm/cpufeature.h>
++#include <linux/threads.h>
++#include <asm/msr.h>
++#include <asm/current.h>
++#include <asm/system.h>
++#include <asm/mmsegment.h>
++#include <asm/percpu.h>
++#include <linux/personality.h>
++#include <linux/cpumask.h>
++
++#define TF_MASK 0x00000100
++#define IF_MASK 0x00000200
++#define IOPL_MASK 0x00003000
++#define NT_MASK 0x00004000
++#define VM_MASK 0x00020000
++#define AC_MASK 0x00040000
++#define VIF_MASK 0x00080000 /* virtual interrupt flag */
++#define VIP_MASK 0x00100000 /* virtual interrupt pending */
++#define ID_MASK 0x00200000
++
++#define desc_empty(desc) \
++ (!((desc)->a | (desc)->b))
++
++#define desc_equal(desc1, desc2) \
++ (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b))
++
++/*
++ * Default implementation of macro that returns current
++ * instruction pointer ("program counter").
++ */
++#define current_text_addr() ({ void *pc; asm volatile("leaq 1f(%%rip),%0\n1:":"=r"(pc)); pc; })
++
++/*
++ * CPU type and hardware bug flags. Kept separately for each CPU.
++ */
++
++struct cpuinfo_x86 {
++ __u8 x86; /* CPU family */
++ __u8 x86_vendor; /* CPU vendor */
++ __u8 x86_model;
++ __u8 x86_mask;
++ int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */
++ __u32 x86_capability[NCAPINTS];
++ char x86_vendor_id[16];
++ char x86_model_id[64];
++ int x86_cache_size; /* in KB */
++ int x86_clflush_size;
++ int x86_cache_alignment;
++ int x86_tlbsize; /* number of 4K pages in DTLB/ITLB combined(in pages)*/
++ __u8 x86_virt_bits, x86_phys_bits;
++ __u8 x86_max_cores; /* cpuid returned max cores value */
++ __u32 x86_power;
++ __u32 extended_cpuid_level; /* Max extended CPUID function supported */
++ unsigned long loops_per_jiffy;
++#ifdef CONFIG_SMP
++ cpumask_t llc_shared_map; /* cpus sharing the last level cache */
++#endif
++ __u8 apicid;
++#ifdef CONFIG_SMP
++ __u8 booted_cores; /* number of cores as seen by OS */
++ __u8 phys_proc_id; /* Physical Processor id. */
++ __u8 cpu_core_id; /* Core id. */
++#endif
++} ____cacheline_aligned;
++
++#define X86_VENDOR_INTEL 0
++#define X86_VENDOR_CYRIX 1
++#define X86_VENDOR_AMD 2
++#define X86_VENDOR_UMC 3
++#define X86_VENDOR_NEXGEN 4
++#define X86_VENDOR_CENTAUR 5
++#define X86_VENDOR_RISE 6
++#define X86_VENDOR_TRANSMETA 7
++#define X86_VENDOR_NUM 8
++#define X86_VENDOR_UNKNOWN 0xff
++
++#ifdef CONFIG_SMP
++extern struct cpuinfo_x86 cpu_data[];
++#define current_cpu_data cpu_data[smp_processor_id()]
++#else
++#define cpu_data (&boot_cpu_data)
++#define current_cpu_data boot_cpu_data
++#endif
++
++extern char ignore_irq13;
++
++extern void identify_cpu(struct cpuinfo_x86 *);
++extern void print_cpu_info(struct cpuinfo_x86 *);
++extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
++extern unsigned short num_cache_leaves;
++
++/*
++ * EFLAGS bits
++ */
++#define X86_EFLAGS_CF 0x00000001 /* Carry Flag */
++#define X86_EFLAGS_PF 0x00000004 /* Parity Flag */
++#define X86_EFLAGS_AF 0x00000010 /* Auxillary carry Flag */
++#define X86_EFLAGS_ZF 0x00000040 /* Zero Flag */
++#define X86_EFLAGS_SF 0x00000080 /* Sign Flag */
++#define X86_EFLAGS_TF 0x00000100 /* Trap Flag */
++#define X86_EFLAGS_IF 0x00000200 /* Interrupt Flag */
++#define X86_EFLAGS_DF 0x00000400 /* Direction Flag */
++#define X86_EFLAGS_OF 0x00000800 /* Overflow Flag */
++#define X86_EFLAGS_IOPL 0x00003000 /* IOPL mask */
++#define X86_EFLAGS_NT 0x00004000 /* Nested Task */
++#define X86_EFLAGS_RF 0x00010000 /* Resume Flag */
++#define X86_EFLAGS_VM 0x00020000 /* Virtual Mode */
++#define X86_EFLAGS_AC 0x00040000 /* Alignment Check */
++#define X86_EFLAGS_VIF 0x00080000 /* Virtual Interrupt Flag */
++#define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */
++#define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */
++
++/*
++ * Intel CPU features in CR4
++ */
++#define X86_CR4_VME 0x0001 /* enable vm86 extensions */
++#define X86_CR4_PVI 0x0002 /* virtual interrupts flag enable */
++#define X86_CR4_TSD 0x0004 /* disable time stamp at ipl 3 */
++#define X86_CR4_DE 0x0008 /* enable debugging extensions */
++#define X86_CR4_PSE 0x0010 /* enable page size extensions */
++#define X86_CR4_PAE 0x0020 /* enable physical address extensions */
++#define X86_CR4_MCE 0x0040 /* Machine check enable */
++#define X86_CR4_PGE 0x0080 /* enable global pages */
++#define X86_CR4_PCE 0x0100 /* enable performance counters at ipl 3 */
++#define X86_CR4_OSFXSR 0x0200 /* enable fast FPU save and restore */
++#define X86_CR4_OSXMMEXCPT 0x0400 /* enable unmasked SSE exceptions */
++
++/*
++ * Save the cr4 feature set we're using (ie
++ * Pentium 4MB enable and PPro Global page
++ * enable), so that any CPU's that boot up
++ * after us can get the correct flags.
++ */
++extern unsigned long mmu_cr4_features;
++
++static inline void set_in_cr4 (unsigned long mask)
++{
++ mmu_cr4_features |= mask;
++ __asm__("movq %%cr4,%%rax\n\t"
++ "orq %0,%%rax\n\t"
++ "movq %%rax,%%cr4\n"
++ : : "irg" (mask)
++ :"ax");
++}
++
++static inline void clear_in_cr4 (unsigned long mask)
++{
++ mmu_cr4_features &= ~mask;
++ __asm__("movq %%cr4,%%rax\n\t"
++ "andq %0,%%rax\n\t"
++ "movq %%rax,%%cr4\n"
++ : : "irg" (~mask)
++ :"ax");
++}
++
++
++/*
++ * Bus types
++ */
++#define MCA_bus 0
++#define MCA_bus__is_a_macro
++
++/*
++ * User space process size. 47bits minus one guard page.
++ */
++#define TASK_SIZE64 (0x800000000000UL - 4096)
++
++/* This decides where the kernel will search for a free chunk of vm
++ * space during mmap's.
++ */
++#define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? 0xc0000000 : 0xFFFFe000)
++
++#define TASK_SIZE (test_thread_flag(TIF_IA32) ? IA32_PAGE_OFFSET : TASK_SIZE64)
++#define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_IA32)) ? IA32_PAGE_OFFSET : TASK_SIZE64)
++
++#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE/3)
++
++/*
++ * Size of io_bitmap.
++ */
++#define IO_BITMAP_BITS 65536
++#define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
++#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
++#ifndef CONFIG_X86_NO_TSS
++#define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap)
++#endif
++#define INVALID_IO_BITMAP_OFFSET 0x8000
++
++struct i387_fxsave_struct {
++ u16 cwd;
++ u16 swd;
++ u16 twd;
++ u16 fop;
++ u64 rip;
++ u64 rdp;
++ u32 mxcsr;
++ u32 mxcsr_mask;
++ u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
++ u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 128 bytes */
++ u32 padding[24];
++} __attribute__ ((aligned (16)));
++
++union i387_union {
++ struct i387_fxsave_struct fxsave;
++};
++
++#ifndef CONFIG_X86_NO_TSS
++struct tss_struct {
++ u32 reserved1;
++ u64 rsp0;
++ u64 rsp1;
++ u64 rsp2;
++ u64 reserved2;
++ u64 ist[7];
++ u32 reserved3;
++ u32 reserved4;
++ u16 reserved5;
++ u16 io_bitmap_base;
++ /*
++ * The extra 1 is there because the CPU will access an
++ * additional byte beyond the end of the IO permission
++ * bitmap. The extra byte must be all 1 bits, and must
++ * be within the limit. Thus we have:
++ *
++ * 128 bytes, the bitmap itself, for ports 0..0x3ff
++ * 8 bytes, for an extra "long" of ~0UL
++ */
++ unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
++} __attribute__((packed)) ____cacheline_aligned;
++
++DECLARE_PER_CPU(struct tss_struct,init_tss);
++#endif
++
++
++extern struct cpuinfo_x86 boot_cpu_data;
++#ifndef CONFIG_X86_NO_TSS
++/* Save the original ist values for checking stack pointers during debugging */
++struct orig_ist {
++ unsigned long ist[7];
++};
++DECLARE_PER_CPU(struct orig_ist, orig_ist);
++#endif
++
++#ifdef CONFIG_X86_VSMP
++#define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT)
++#define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT)
++#else
++#define ARCH_MIN_TASKALIGN 16
++#define ARCH_MIN_MMSTRUCT_ALIGN 0
++#endif
++
++struct thread_struct {
++ unsigned long rsp0;
++ unsigned long rsp;
++ unsigned long userrsp; /* Copy from PDA */
++ unsigned long fs;
++ unsigned long gs;
++ unsigned short es, ds, fsindex, gsindex;
++/* Hardware debugging registers */
++ unsigned long debugreg0;
++ unsigned long debugreg1;
++ unsigned long debugreg2;
++ unsigned long debugreg3;
++ unsigned long debugreg6;
++ unsigned long debugreg7;
++/* fault info */
++ unsigned long cr2, trap_no, error_code;
++/* floating point info */
++ union i387_union i387 __attribute__((aligned(16)));
++/* IO permissions. the bitmap could be moved into the GDT, that would make
++ switch faster for a limited number of ioperm using tasks. -AK */
++ int ioperm;
++ unsigned long *io_bitmap_ptr;
++ unsigned io_bitmap_max;
++/* cached TLS descriptors. */
++ u64 tls_array[GDT_ENTRY_TLS_ENTRIES];
++ unsigned int iopl;
++} __attribute__((aligned(16)));
++
++#define INIT_THREAD { \
++ .rsp0 = (unsigned long)&init_stack + sizeof(init_stack) \
++}
++
++#ifndef CONFIG_X86_NO_TSS
++#define INIT_TSS { \
++ .rsp0 = (unsigned long)&init_stack + sizeof(init_stack) \
++}
++#endif
++
++#define INIT_MMAP \
++{ &init_mm, 0, 0, NULL, PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC, 1, NULL, NULL }
++
++#define start_thread(regs,new_rip,new_rsp) do { \
++ asm volatile("movl %0,%%fs; movl %0,%%es; movl %0,%%ds": :"r" (0)); \
++ load_gs_index(0); \
++ (regs)->rip = (new_rip); \
++ (regs)->rsp = (new_rsp); \
++ write_pda(oldrsp, (new_rsp)); \
++ (regs)->cs = __USER_CS; \
++ (regs)->ss = __USER_DS; \
++ (regs)->eflags = 0x200; \
++ set_fs(USER_DS); \
++} while(0)
++
++#define get_debugreg(var, register) \
++ var = HYPERVISOR_get_debugreg(register)
++#define set_debugreg(value, register) \
++ HYPERVISOR_set_debugreg(register, value)
++
++struct task_struct;
++struct mm_struct;
++
++/* Free all resources held by a thread. */
++extern void release_thread(struct task_struct *);
++
++/* Prepare to copy thread state - unlazy all lazy status */
++extern void prepare_to_copy(struct task_struct *tsk);
++
++/*
++ * create a kernel thread without removing it from tasklists
++ */
++extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
++
++/*
++ * Return saved PC of a blocked thread.
++ * What is this good for? it will be always the scheduler or ret_from_fork.
++ */
++#define thread_saved_pc(t) (*(unsigned long *)((t)->thread.rsp - 8))
++
++extern unsigned long get_wchan(struct task_struct *p);
++#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.rsp0 - 1)
++#define KSTK_EIP(tsk) (task_pt_regs(tsk)->rip)
++#define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */
++
++
++struct microcode_header {
++ unsigned int hdrver;
++ unsigned int rev;
++ unsigned int date;
++ unsigned int sig;
++ unsigned int cksum;
++ unsigned int ldrver;
++ unsigned int pf;
++ unsigned int datasize;
++ unsigned int totalsize;
++ unsigned int reserved[3];
++};
++
++struct microcode {
++ struct microcode_header hdr;
++ unsigned int bits[0];
++};
++
++typedef struct microcode microcode_t;
++typedef struct microcode_header microcode_header_t;
++
++/* microcode format is extended from prescott processors */
++struct extended_signature {
++ unsigned int sig;
++ unsigned int pf;
++ unsigned int cksum;
++};
++
++struct extended_sigtable {
++ unsigned int count;
++ unsigned int cksum;
++ unsigned int reserved[3];
++ struct extended_signature sigs[0];
++};
++
++
++#define ASM_NOP1 K8_NOP1
++#define ASM_NOP2 K8_NOP2
++#define ASM_NOP3 K8_NOP3
++#define ASM_NOP4 K8_NOP4
++#define ASM_NOP5 K8_NOP5
++#define ASM_NOP6 K8_NOP6
++#define ASM_NOP7 K8_NOP7
++#define ASM_NOP8 K8_NOP8
++
++/* Opteron nops */
++#define K8_NOP1 ".byte 0x90\n"
++#define K8_NOP2 ".byte 0x66,0x90\n"
++#define K8_NOP3 ".byte 0x66,0x66,0x90\n"
++#define K8_NOP4 ".byte 0x66,0x66,0x66,0x90\n"
++#define K8_NOP5 K8_NOP3 K8_NOP2
++#define K8_NOP6 K8_NOP3 K8_NOP3
++#define K8_NOP7 K8_NOP4 K8_NOP3
++#define K8_NOP8 K8_NOP4 K8_NOP4
++
++#define ASM_NOP_MAX 8
++
++/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
++static inline void rep_nop(void)
++{
++ __asm__ __volatile__("rep;nop": : :"memory");
++}
++
++/* Stop speculative execution */
++static inline void sync_core(void)
++{
++ int tmp;
++ asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory");
++}
++
++#define cpu_has_fpu 1
++
++#define ARCH_HAS_PREFETCH
++static inline void prefetch(void *x)
++{
++ asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x));
++}
++
++#define ARCH_HAS_PREFETCHW 1
++static inline void prefetchw(void *x)
++{
++ alternative_input("prefetcht0 (%1)",
++ "prefetchw (%1)",
++ X86_FEATURE_3DNOW,
++ "r" (x));
++}
++
++#define ARCH_HAS_SPINLOCK_PREFETCH 1
++
++#define spin_lock_prefetch(x) prefetchw(x)
++
++#define cpu_relax() rep_nop()
++
++/*
++ * NSC/Cyrix CPU configuration register indexes
++ */
++#define CX86_CCR0 0xc0
++#define CX86_CCR1 0xc1
++#define CX86_CCR2 0xc2
++#define CX86_CCR3 0xc3
++#define CX86_CCR4 0xe8
++#define CX86_CCR5 0xe9
++#define CX86_CCR6 0xea
++#define CX86_CCR7 0xeb
++#define CX86_DIR0 0xfe
++#define CX86_DIR1 0xff
++#define CX86_ARR_BASE 0xc4
++#define CX86_RCR_BASE 0xdc
++
++/*
++ * NSC/Cyrix CPU indexed register access macros
++ */
++
++#define getCx86(reg) ({ outb((reg), 0x22); inb(0x23); })
++
++#define setCx86(reg, data) do { \
++ outb((reg), 0x22); \
++ outb((data), 0x23); \
++} while (0)
++
++static inline void serialize_cpu(void)
++{
++ __asm__ __volatile__ ("cpuid" : : : "ax", "bx", "cx", "dx");
++}
++
++static inline void __monitor(const void *eax, unsigned long ecx,
++ unsigned long edx)
++{
++ /* "monitor %eax,%ecx,%edx;" */
++ asm volatile(
++ ".byte 0x0f,0x01,0xc8;"
++ : :"a" (eax), "c" (ecx), "d"(edx));
++}
++
++static inline void __mwait(unsigned long eax, unsigned long ecx)
++{
++ /* "mwait %eax,%ecx;" */
++ asm volatile(
++ ".byte 0x0f,0x01,0xc9;"
++ : :"a" (eax), "c" (ecx));
++}
++
++#define stack_current() \
++({ \
++ struct thread_info *ti; \
++ asm("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
++ ti->task; \
++})
++
++#define cache_line_size() (boot_cpu_data.x86_cache_alignment)
++
++extern unsigned long boot_option_idle_override;
++/* Boot loader type from the setup header */
++extern int bootloader_type;
++
++#define HAVE_ARCH_PICK_MMAP_LAYOUT 1
++
++#endif /* __ASM_X86_64_PROCESSOR_H */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/asm-x86_64/mach-xen/asm/ptrace.h 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,127 @@
++#ifndef _X86_64_PTRACE_H
++#define _X86_64_PTRACE_H
++
++#if defined(__ASSEMBLY__) || defined(__FRAME_OFFSETS)
++#define R15 0
++#define R14 8
++#define R13 16
++#define R12 24
++#define RBP 32
++#define RBX 40
++/* arguments: interrupts/non tracing syscalls only save upto here*/
++#define R11 48
++#define R10 56
++#define R9 64
++#define R8 72
++#define RAX 80
++#define RCX 88
++#define RDX 96
++#define RSI 104
++#define RDI 112
++#define ORIG_RAX 120 /* = ERROR */
++/* end of arguments */
++/* cpu exception frame or undefined in case of fast syscall. */
++#define RIP 128
++#define CS 136
++#define EFLAGS 144
++#define RSP 152
++#define SS 160
++#define ARGOFFSET R11
++#endif /* __ASSEMBLY__ */
++
++/* top of stack page */
++#define FRAME_SIZE 168
++
++#define PTRACE_OLDSETOPTIONS 21
++
++#ifndef __ASSEMBLY__
++
++struct pt_regs {
++ unsigned long r15;
++ unsigned long r14;
++ unsigned long r13;
++ unsigned long r12;
++ unsigned long rbp;
++ unsigned long rbx;
++/* arguments: non interrupts/non tracing syscalls only save upto here*/
++ unsigned long r11;
++ unsigned long r10;
++ unsigned long r9;
++ unsigned long r8;
++ unsigned long rax;
++ unsigned long rcx;
++ unsigned long rdx;
++ unsigned long rsi;
++ unsigned long rdi;
++ unsigned long orig_rax;
++/* end of arguments */
++/* cpu exception frame or undefined */
++ unsigned long rip;
++ unsigned long cs;
++ unsigned long eflags;
++ unsigned long rsp;
++ unsigned long ss;
++/* top of stack page */
++};
++
++#endif
++
++/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
++#define PTRACE_GETREGS 12
++#define PTRACE_SETREGS 13
++#define PTRACE_GETFPREGS 14
++#define PTRACE_SETFPREGS 15
++#define PTRACE_GETFPXREGS 18
++#define PTRACE_SETFPXREGS 19
++
++/* only useful for access 32bit programs */
++#define PTRACE_GET_THREAD_AREA 25
++#define PTRACE_SET_THREAD_AREA 26
++
++#define PTRACE_ARCH_PRCTL 30 /* arch_prctl for child */
++
++#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
++#define user_mode(regs) (!!((regs)->cs & 3))
++#define user_mode_vm(regs) user_mode(regs)
++#define instruction_pointer(regs) ((regs)->rip)
++#if defined(CONFIG_SMP) && defined(CONFIG_FRAME_POINTER)
++extern unsigned long profile_pc(struct pt_regs *regs);
++#else
++#define profile_pc(regs) instruction_pointer(regs)
++#endif
++
++#include <linux/compiler.h>
++
++void signal_fault(struct pt_regs *regs, void __user *frame, char *where);
++
++struct task_struct;
++
++extern unsigned long
++convert_rip_to_linear(struct task_struct *child, struct pt_regs *regs);
++
++enum {
++ EF_CF = 0x00000001,
++ EF_PF = 0x00000004,
++ EF_AF = 0x00000010,
++ EF_ZF = 0x00000040,
++ EF_SF = 0x00000080,
++ EF_TF = 0x00000100,
++ EF_IE = 0x00000200,
++ EF_DF = 0x00000400,
++ EF_OF = 0x00000800,
++ EF_IOPL = 0x00003000,
++ EF_IOPL_RING0 = 0x00000000,
++ EF_IOPL_RING1 = 0x00001000,
++ EF_IOPL_RING2 = 0x00002000,
++ EF_NT = 0x00004000, /* nested task */
++ EF_RF = 0x00010000, /* resume */
++ EF_VM = 0x00020000, /* virtual mode */
++ EF_AC = 0x00040000, /* alignment */
++ EF_VIF = 0x00080000, /* virtual interrupt */
++ EF_VIP = 0x00100000, /* virtual interrupt pending */
++ EF_ID = 0x00200000, /* id */
++};
++
++#endif
++
++#endif
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/asm-x86_64/mach-xen/asm/smp.h 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,150 @@
++#ifndef __ASM_SMP_H
++#define __ASM_SMP_H
++
++/*
++ * We need the APIC definitions automatically as part of 'smp.h'
++ */
++#ifndef __ASSEMBLY__
++#include <linux/threads.h>
++#include <linux/cpumask.h>
++#include <linux/bitops.h>
++extern int disable_apic;
++#endif
++
++#ifdef CONFIG_X86_LOCAL_APIC
++#ifndef __ASSEMBLY__
++#include <asm/fixmap.h>
++#include <asm/mpspec.h>
++#ifdef CONFIG_X86_IO_APIC
++#include <asm/io_apic.h>
++#endif
++#include <asm/apic.h>
++#include <asm/thread_info.h>
++#endif
++#endif
++
++#ifdef CONFIG_SMP
++#ifndef ASSEMBLY
++
++#include <asm/pda.h>
++
++struct pt_regs;
++
++extern cpumask_t cpu_present_mask;
++extern cpumask_t cpu_possible_map;
++extern cpumask_t cpu_online_map;
++extern cpumask_t cpu_initialized;
++
++/*
++ * Private routines/data
++ */
++
++extern void smp_alloc_memory(void);
++extern volatile unsigned long smp_invalidate_needed;
++extern int pic_mode;
++extern void lock_ipi_call_lock(void);
++extern void unlock_ipi_call_lock(void);
++extern int smp_num_siblings;
++extern void smp_send_reschedule(int cpu);
++void smp_stop_cpu(void);
++extern int smp_call_function_single(int cpuid, void (*func) (void *info),
++ void *info, int retry, int wait);
++
++extern cpumask_t cpu_sibling_map[NR_CPUS];
++extern cpumask_t cpu_core_map[NR_CPUS];
++extern u8 cpu_llc_id[NR_CPUS];
++
++#define SMP_TRAMPOLINE_BASE 0x6000
++
++/*
++ * On x86 all CPUs are mapped 1:1 to the APIC space.
++ * This simplifies scheduling and IPI sending and
++ * compresses data structures.
++ */
++
++static inline int num_booting_cpus(void)
++{
++ return cpus_weight(cpu_possible_map);
++}
++
++#define raw_smp_processor_id() read_pda(cpunumber)
++
++#ifdef CONFIG_X86_LOCAL_APIC
++static inline int hard_smp_processor_id(void)
++{
++ /* we don't want to mark this access volatile - bad code generation */
++ return GET_APIC_ID(*(unsigned int *)(APIC_BASE+APIC_ID));
++}
++#endif
++
++extern int safe_smp_processor_id(void);
++extern int __cpu_disable(void);
++extern void __cpu_die(unsigned int cpu);
++extern void prefill_possible_map(void);
++extern unsigned num_processors;
++extern unsigned disabled_cpus;
++
++#endif /* !ASSEMBLY */
++
++#define NO_PROC_ID 0xFF /* No processor magic marker */
++
++#endif
++
++#ifndef ASSEMBLY
++/*
++ * Some lowlevel functions might want to know about
++ * the real APIC ID <-> CPU # mapping.
++ */
++extern u8 x86_cpu_to_apicid[NR_CPUS]; /* physical ID */
++extern u8 x86_cpu_to_log_apicid[NR_CPUS];
++extern u8 bios_cpu_apicid[];
++
++#ifdef CONFIG_X86_LOCAL_APIC
++static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
++{
++ return cpus_addr(cpumask)[0];
++}
++
++static inline int cpu_present_to_apicid(int mps_cpu)
++{
++ if (mps_cpu < NR_CPUS)
++ return (int)bios_cpu_apicid[mps_cpu];
++ else
++ return BAD_APICID;
++}
++#endif
++
++#endif /* !ASSEMBLY */
++
++#ifndef CONFIG_SMP
++#define stack_smp_processor_id() 0
++#define safe_smp_processor_id() 0
++#define cpu_logical_map(x) (x)
++#else
++#include <asm/thread_info.h>
++#define stack_smp_processor_id() \
++({ \
++ struct thread_info *ti; \
++ __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
++ ti->cpu; \
++})
++#endif
++
++#ifndef __ASSEMBLY__
++#ifdef CONFIG_X86_LOCAL_APIC
++static __inline int logical_smp_processor_id(void)
++{
++ /* we don't want to mark this access volatile - bad code generation */
++ return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR));
++}
++#endif
++#endif
++
++#ifdef CONFIG_SMP
++#define cpu_physical_id(cpu) x86_cpu_to_apicid[cpu]
++#else
++#define cpu_physical_id(cpu) boot_cpu_id
++#endif
++
++#endif
++
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/asm-x86_64/mach-xen/asm/synch_bitops.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,2 @@
++
++#include <asm-i386/mach-xen/asm/synch_bitops.h>
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/asm-x86_64/mach-xen/asm/system.h 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,262 @@
++#ifndef __ASM_SYSTEM_H
++#define __ASM_SYSTEM_H
++
++#include <linux/kernel.h>
++#include <asm/segment.h>
++#include <asm/alternative.h>
++
++#include <asm/synch_bitops.h>
++#include <asm/hypervisor.h>
++#include <xen/interface/arch-x86_64.h>
++
++#ifdef __KERNEL__
++
++#ifdef CONFIG_SMP
++#define __vcpu_id smp_processor_id()
++#else
++#define __vcpu_id 0
++#endif
++
++#define __STR(x) #x
++#define STR(x) __STR(x)
++
++#define __SAVE(reg,offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t"
++#define __RESTORE(reg,offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t"
++
++/* frame pointer must be last for get_wchan */
++#define SAVE_CONTEXT "pushq %%rbp ; movq %%rsi,%%rbp\n\t"
++#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp\n\t"
++
++#define __EXTRA_CLOBBER \
++ ,"rcx","rbx","rdx","r8","r9","r10","r11","r12","r13","r14","r15"
++
++#define switch_to(prev,next,last) \
++ asm volatile(SAVE_CONTEXT \
++ "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \
++ "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \
++ "call __switch_to\n\t" \
++ ".globl thread_return\n" \
++ "thread_return:\n\t" \
++ "movq %%gs:%P[pda_pcurrent],%%rsi\n\t" \
++ "movq %P[thread_info](%%rsi),%%r8\n\t" \
++ LOCK_PREFIX "btr %[tif_fork],%P[ti_flags](%%r8)\n\t" \
++ "movq %%rax,%%rdi\n\t" \
++ "jc ret_from_fork\n\t" \
++ RESTORE_CONTEXT \
++ : "=a" (last) \
++ : [next] "S" (next), [prev] "D" (prev), \
++ [threadrsp] "i" (offsetof(struct task_struct, thread.rsp)), \
++ [ti_flags] "i" (offsetof(struct thread_info, flags)),\
++ [tif_fork] "i" (TIF_FORK), \
++ [thread_info] "i" (offsetof(struct task_struct, thread_info)), \
++ [pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent)) \
++ : "memory", "cc" __EXTRA_CLOBBER)
++
++extern void load_gs_index(unsigned);
++
++/*
++ * Load a segment. Fall back on loading the zero
++ * segment if something goes wrong..
++ */
++#define loadsegment(seg,value) \
++ asm volatile("\n" \
++ "1:\t" \
++ "movl %k0,%%" #seg "\n" \
++ "2:\n" \
++ ".section .fixup,\"ax\"\n" \
++ "3:\t" \
++ "movl %1,%%" #seg "\n\t" \
++ "jmp 2b\n" \
++ ".previous\n" \
++ ".section __ex_table,\"a\"\n\t" \
++ ".align 8\n\t" \
++ ".quad 1b,3b\n" \
++ ".previous" \
++ : :"r" (value), "r" (0))
++
++/*
++ * Clear and set 'TS' bit respectively
++ */
++#define clts() (HYPERVISOR_fpu_taskswitch(0))
++
++static inline unsigned long read_cr0(void)
++{
++ unsigned long cr0;
++ asm volatile("movq %%cr0,%0" : "=r" (cr0));
++ return cr0;
++}
++
++static inline void write_cr0(unsigned long val)
++{
++ asm volatile("movq %0,%%cr0" :: "r" (val));
++}
++
++#define read_cr3() ({ \
++ unsigned long __dummy; \
++ asm("movq %%cr3,%0" : "=r" (__dummy)); \
++ machine_to_phys(__dummy); \
++})
++
++static inline unsigned long read_cr4(void)
++{
++ unsigned long cr4;
++ asm("movq %%cr4,%0" : "=r" (cr4));
++ return cr4;
++}
++
++static inline void write_cr4(unsigned long val)
++{
++ asm volatile("movq %0,%%cr4" :: "r" (val));
++}
++
++#define stts() (HYPERVISOR_fpu_taskswitch(1))
++
++#define wbinvd() \
++ __asm__ __volatile__ ("wbinvd": : :"memory");
++
++/*
++ * On SMP systems, when the scheduler does migration-cost autodetection,
++ * it needs a way to flush as much of the CPU's caches as possible.
++ */
++static inline void sched_cacheflush(void)
++{
++ wbinvd();
++}
++
++#endif /* __KERNEL__ */
++
++#define nop() __asm__ __volatile__ ("nop")
++
++#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
++
++#define tas(ptr) (xchg((ptr),1))
++
++#define __xg(x) ((volatile long *)(x))
++
++static inline void set_64bit(volatile unsigned long *ptr, unsigned long val)
++{
++ *ptr = val;
++}
++
++#define _set_64bit set_64bit
++
++/*
++ * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
++ * Note 2: xchg has side effect, so that attribute volatile is necessary,
++ * but generally the primitive is invalid, *ptr is output argument. --ANK
++ */
++static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
++{
++ switch (size) {
++ case 1:
++ __asm__ __volatile__("xchgb %b0,%1"
++ :"=q" (x)
++ :"m" (*__xg(ptr)), "0" (x)
++ :"memory");
++ break;
++ case 2:
++ __asm__ __volatile__("xchgw %w0,%1"
++ :"=r" (x)
++ :"m" (*__xg(ptr)), "0" (x)
++ :"memory");
++ break;
++ case 4:
++ __asm__ __volatile__("xchgl %k0,%1"
++ :"=r" (x)
++ :"m" (*__xg(ptr)), "0" (x)
++ :"memory");
++ break;
++ case 8:
++ __asm__ __volatile__("xchgq %0,%1"
++ :"=r" (x)
++ :"m" (*__xg(ptr)), "0" (x)
++ :"memory");
++ break;
++ }
++ return x;
++}
++
++/*
++ * Atomic compare and exchange. Compare OLD with MEM, if identical,
++ * store NEW in MEM. Return the initial value in MEM. Success is
++ * indicated by comparing RETURN with OLD.
++ */
++
++#define __HAVE_ARCH_CMPXCHG 1
++
++static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
++ unsigned long new, int size)
++{
++ unsigned long prev;
++ switch (size) {
++ case 1:
++ __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
++ : "=a"(prev)
++ : "q"(new), "m"(*__xg(ptr)), "0"(old)
++ : "memory");
++ return prev;
++ case 2:
++ __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
++ : "=a"(prev)
++ : "r"(new), "m"(*__xg(ptr)), "0"(old)
++ : "memory");
++ return prev;
++ case 4:
++ __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %k1,%2"
++ : "=a"(prev)
++ : "r"(new), "m"(*__xg(ptr)), "0"(old)
++ : "memory");
++ return prev;
++ case 8:
++ __asm__ __volatile__(LOCK_PREFIX "cmpxchgq %1,%2"
++ : "=a"(prev)
++ : "r"(new), "m"(*__xg(ptr)), "0"(old)
++ : "memory");
++ return prev;
++ }
++ return old;
++}
++
++#define cmpxchg(ptr,o,n)\
++ ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
++ (unsigned long)(n),sizeof(*(ptr))))
++
++#ifdef CONFIG_SMP
++#define smp_mb() mb()
++#define smp_rmb() rmb()
++#define smp_wmb() wmb()
++#define smp_read_barrier_depends() do {} while(0)
++#else
++#define smp_mb() barrier()
++#define smp_rmb() barrier()
++#define smp_wmb() barrier()
++#define smp_read_barrier_depends() do {} while(0)
++#endif
++
++
++/*
++ * Force strict CPU ordering.
++ * And yes, this is required on UP too when we're talking
++ * to devices.
++ */
++#define mb() asm volatile("mfence":::"memory")
++#define rmb() asm volatile("lfence":::"memory")
++
++#ifdef CONFIG_UNORDERED_IO
++#define wmb() asm volatile("sfence" ::: "memory")
++#else
++#define wmb() asm volatile("" ::: "memory")
++#endif
++#define read_barrier_depends() do {} while(0)
++#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
++
++#define warn_if_not_ulong(x) do { unsigned long foo; (void) (&(x) == &foo); } while (0)
++
++#include <linux/irqflags.h>
++
++void cpu_idle_wait(void);
++
++extern unsigned long arch_align_stack(unsigned long sp);
++extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
++
++#endif
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/asm-x86_64/mach-xen/asm/timer.h 2007-08-27 14:02:04.000000000 -0400
+@@ -0,0 +1,67 @@
++#ifndef _ASMi386_TIMER_H
++#define _ASMi386_TIMER_H
++#include <linux/init.h>
++
++/**
++ * struct timer_ops - used to define a timer source
++ *
++ * @name: name of the timer.
++ * @init: Probes and initializes the timer. Takes clock= override
++ * string as an argument. Returns 0 on success, anything else
++ * on failure.
++ * @mark_offset: called by the timer interrupt.
++ * @get_offset: called by gettimeofday(). Returns the number of microseconds
++ * since the last timer interupt.
++ * @monotonic_clock: returns the number of nanoseconds since the init of the
++ * timer.
++ * @delay: delays this many clock cycles.
++ */
++struct timer_opts {
++ char* name;
++ void (*mark_offset)(void);
++ unsigned long (*get_offset)(void);
++ unsigned long long (*monotonic_clock)(void);
++ void (*delay)(unsigned long);
++ unsigned long (*read_timer)(void);
++ int (*suspend)(pm_message_t state);
++ int (*resume)(void);
++};
++
++struct init_timer_opts {
++ int (*init)(char *override);
++ struct timer_opts *opts;
++};
++
++#define TICK_SIZE (tick_nsec / 1000)
++
++extern struct timer_opts* __init select_timer(void);
++extern void clock_fallback(void);
++void setup_pit_timer(void);
++
++/* Modifiers for buggy PIT handling */
++
++extern int pit_latch_buggy;
++
++extern struct timer_opts *cur_timer;
++extern int timer_ack;
++
++/* list of externed timers */
++extern struct timer_opts timer_none;
++extern struct timer_opts timer_pit;
++extern struct init_timer_opts timer_pit_init;
++extern struct init_timer_opts timer_tsc_init;
++#ifdef CONFIG_X86_CYCLONE_TIMER
++extern struct init_timer_opts timer_cyclone_init;
++#endif
++
++extern unsigned long calibrate_tsc(void);
++extern void init_cpu_khz(void);
++#ifdef CONFIG_HPET_TIMER
++extern struct init_timer_opts timer_hpet_init;
++extern unsigned long calibrate_tsc_hpet(unsigned long *tsc_hpet_quotient_ptr);
++#endif
++
++#ifdef CONFIG_X86_PM_TIMER
++extern struct init_timer_opts timer_pmtmr_init;
++#endif
++#endif
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/asm-x86_64/mach-xen/asm/tlbflush.h 2007-08-27 14:01:58.000000000 -0400
+@@ -0,0 +1,103 @@
++#ifndef _X8664_TLBFLUSH_H
++#define _X8664_TLBFLUSH_H
++
++#include <linux/mm.h>
++#include <asm/processor.h>
++
++#define __flush_tlb() xen_tlb_flush()
++
++/*
++ * Global pages have to be flushed a bit differently. Not a real
++ * performance problem because this does not happen often.
++ */
++#define __flush_tlb_global() xen_tlb_flush()
++
++
++extern unsigned long pgkern_mask;
++
++#define __flush_tlb_all() __flush_tlb_global()
++
++#define __flush_tlb_one(addr) xen_invlpg((unsigned long)addr)
++
++
++/*
++ * TLB flushing:
++ *
++ * - flush_tlb() flushes the current mm struct TLBs
++ * - flush_tlb_all() flushes all processes TLBs
++ * - flush_tlb_mm(mm) flushes the specified mm context TLB's
++ * - flush_tlb_page(vma, vmaddr) flushes one page
++ * - flush_tlb_range(vma, start, end) flushes a range of pages
++ * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
++ * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
++ *
++ * x86-64 can only flush individual pages or full VMs. For a range flush
++ * we always do the full VM. Might be worth trying if for a small
++ * range a few INVLPGs in a row are a win.
++ */
++
++#ifndef CONFIG_SMP
++
++#define flush_tlb() __flush_tlb()
++#define flush_tlb_all() __flush_tlb_all()
++#define local_flush_tlb() __flush_tlb()
++
++static inline void flush_tlb_mm(struct mm_struct *mm)
++{
++ if (mm == current->active_mm)
++ __flush_tlb();
++}
++
++static inline void flush_tlb_page(struct vm_area_struct *vma,
++ unsigned long addr)
++{
++ if (vma->vm_mm == current->active_mm)
++ __flush_tlb_one(addr);
++}
++
++static inline void flush_tlb_range(struct vm_area_struct *vma,
++ unsigned long start, unsigned long end)
++{
++ if (vma->vm_mm == current->active_mm)
++ __flush_tlb();
++}
++
++#else
++
++#include <asm/smp.h>
++
++#define local_flush_tlb() \
++ __flush_tlb()
++
++extern void flush_tlb_all(void);
++extern void flush_tlb_current_task(void);
++extern void flush_tlb_mm(struct mm_struct *);
++extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
++
++#define flush_tlb() flush_tlb_current_task()
++
++static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long start, unsigned long end)
++{
++ flush_tlb_mm(vma->vm_mm);
++}
++
++#define TLBSTATE_OK 1
++#define TLBSTATE_LAZY 2
++
++/* Roughly an IPI every 20MB with 4k pages for freeing page table
++ ranges. Cost is about 42k of memory for each CPU. */
++#define ARCH_FREE_PTE_NR 5350
++
++#endif
++
++#define flush_tlb_kernel_range(start, end) flush_tlb_all()
++
++static inline void flush_tlb_pgtables(struct mm_struct *mm,
++ unsigned long start, unsigned long end)
++{
++ /* x86_64 does not keep any page table caches in a software TLB.
++ The CPUs do in their hardware TLBs, but they are handled
++ by the normal TLB flushing algorithms. */
++}
++
++#endif /* _X8664_TLBFLUSH_H */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/asm-x86_64/mach-xen/asm/vga.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,20 @@
++/*
++ * Access to VGA videoram
++ *
++ * (c) 1998 Martin Mares <mj@ucw.cz>
++ */
++
++#ifndef _LINUX_ASM_VGA_H_
++#define _LINUX_ASM_VGA_H_
++
++/*
++ * On the PC, we can just recalculate addresses and then
++ * access the videoram directly without any black magic.
++ */
++
++#define VGA_MAP_MEM(x,s) (unsigned long)isa_bus_to_virt(x)
++
++#define vga_readb(x) (*(x))
++#define vga_writeb(x,y) (*(y) = (x))
++
++#endif
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/asm-x86_64/mach-xen/asm/xenoprof.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1 @@
++#include <asm-i386/mach-xen/asm/xenoprof.h>
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/asm-x86_64/mach-xen/asm/xor.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,328 @@
++/*
++ * x86-64 changes / gcc fixes from Andi Kleen.
++ * Copyright 2002 Andi Kleen, SuSE Labs.
++ *
++ * This hasn't been optimized for the hammer yet, but there are likely
++ * no advantages to be gotten from x86-64 here anyways.
++ */
++
++typedef struct { unsigned long a,b; } __attribute__((aligned(16))) xmm_store_t;
++
++/* Doesn't use gcc to save the XMM registers, because there is no easy way to
++ tell it to do a clts before the register saving. */
++#define XMMS_SAVE do { \
++ preempt_disable(); \
++ if (!(current_thread_info()->status & TS_USEDFPU)) \
++ clts(); \
++ __asm__ __volatile__ ( \
++ "movups %%xmm0,(%1) ;\n\t" \
++ "movups %%xmm1,0x10(%1) ;\n\t" \
++ "movups %%xmm2,0x20(%1) ;\n\t" \
++ "movups %%xmm3,0x30(%1) ;\n\t" \
++ : "=&r" (cr0) \
++ : "r" (xmm_save) \
++ : "memory"); \
++} while(0)
++
++#define XMMS_RESTORE do { \
++ asm volatile ( \
++ "sfence ;\n\t" \
++ "movups (%1),%%xmm0 ;\n\t" \
++ "movups 0x10(%1),%%xmm1 ;\n\t" \
++ "movups 0x20(%1),%%xmm2 ;\n\t" \
++ "movups 0x30(%1),%%xmm3 ;\n\t" \
++ : \
++ : "r" (cr0), "r" (xmm_save) \
++ : "memory"); \
++ if (!(current_thread_info()->status & TS_USEDFPU)) \
++ stts(); \
++ preempt_enable(); \
++} while(0)
++
++#define OFFS(x) "16*("#x")"
++#define PF_OFFS(x) "256+16*("#x")"
++#define PF0(x) " prefetchnta "PF_OFFS(x)"(%[p1]) ;\n"
++#define LD(x,y) " movaps "OFFS(x)"(%[p1]), %%xmm"#y" ;\n"
++#define ST(x,y) " movaps %%xmm"#y", "OFFS(x)"(%[p1]) ;\n"
++#define PF1(x) " prefetchnta "PF_OFFS(x)"(%[p2]) ;\n"
++#define PF2(x) " prefetchnta "PF_OFFS(x)"(%[p3]) ;\n"
++#define PF3(x) " prefetchnta "PF_OFFS(x)"(%[p4]) ;\n"
++#define PF4(x) " prefetchnta "PF_OFFS(x)"(%[p5]) ;\n"
++#define PF5(x) " prefetchnta "PF_OFFS(x)"(%[p6]) ;\n"
++#define XO1(x,y) " xorps "OFFS(x)"(%[p2]), %%xmm"#y" ;\n"
++#define XO2(x,y) " xorps "OFFS(x)"(%[p3]), %%xmm"#y" ;\n"
++#define XO3(x,y) " xorps "OFFS(x)"(%[p4]), %%xmm"#y" ;\n"
++#define XO4(x,y) " xorps "OFFS(x)"(%[p5]), %%xmm"#y" ;\n"
++#define XO5(x,y) " xorps "OFFS(x)"(%[p6]), %%xmm"#y" ;\n"
++
++
++static void
++xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
++{
++ unsigned int lines = bytes >> 8;
++ unsigned long cr0;
++ xmm_store_t xmm_save[4];
++
++ XMMS_SAVE;
++
++ asm volatile (
++#undef BLOCK
++#define BLOCK(i) \
++ LD(i,0) \
++ LD(i+1,1) \
++ PF1(i) \
++ PF1(i+2) \
++ LD(i+2,2) \
++ LD(i+3,3) \
++ PF0(i+4) \
++ PF0(i+6) \
++ XO1(i,0) \
++ XO1(i+1,1) \
++ XO1(i+2,2) \
++ XO1(i+3,3) \
++ ST(i,0) \
++ ST(i+1,1) \
++ ST(i+2,2) \
++ ST(i+3,3) \
++
++
++ PF0(0)
++ PF0(2)
++
++ " .align 32 ;\n"
++ " 1: ;\n"
++
++ BLOCK(0)
++ BLOCK(4)
++ BLOCK(8)
++ BLOCK(12)
++
++ " addq %[inc], %[p1] ;\n"
++ " addq %[inc], %[p2] ;\n"
++ " decl %[cnt] ; jnz 1b"
++ : [p1] "+r" (p1), [p2] "+r" (p2), [cnt] "+r" (lines)
++ : [inc] "r" (256UL)
++ : "memory");
++
++ XMMS_RESTORE;
++}
++
++static void
++xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
++ unsigned long *p3)
++{
++ unsigned int lines = bytes >> 8;
++ xmm_store_t xmm_save[4];
++ unsigned long cr0;
++
++ XMMS_SAVE;
++
++ __asm__ __volatile__ (
++#undef BLOCK
++#define BLOCK(i) \
++ PF1(i) \
++ PF1(i+2) \
++ LD(i,0) \
++ LD(i+1,1) \
++ LD(i+2,2) \
++ LD(i+3,3) \
++ PF2(i) \
++ PF2(i+2) \
++ PF0(i+4) \
++ PF0(i+6) \
++ XO1(i,0) \
++ XO1(i+1,1) \
++ XO1(i+2,2) \
++ XO1(i+3,3) \
++ XO2(i,0) \
++ XO2(i+1,1) \
++ XO2(i+2,2) \
++ XO2(i+3,3) \
++ ST(i,0) \
++ ST(i+1,1) \
++ ST(i+2,2) \
++ ST(i+3,3) \
++
++
++ PF0(0)
++ PF0(2)
++
++ " .align 32 ;\n"
++ " 1: ;\n"
++
++ BLOCK(0)
++ BLOCK(4)
++ BLOCK(8)
++ BLOCK(12)
++
++ " addq %[inc], %[p1] ;\n"
++ " addq %[inc], %[p2] ;\n"
++ " addq %[inc], %[p3] ;\n"
++ " decl %[cnt] ; jnz 1b"
++ : [cnt] "+r" (lines),
++ [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3)
++ : [inc] "r" (256UL)
++ : "memory");
++ XMMS_RESTORE;
++}
++
++static void
++xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
++ unsigned long *p3, unsigned long *p4)
++{
++ unsigned int lines = bytes >> 8;
++ xmm_store_t xmm_save[4];
++ unsigned long cr0;
++
++ XMMS_SAVE;
++
++ __asm__ __volatile__ (
++#undef BLOCK
++#define BLOCK(i) \
++ PF1(i) \
++ PF1(i+2) \
++ LD(i,0) \
++ LD(i+1,1) \
++ LD(i+2,2) \
++ LD(i+3,3) \
++ PF2(i) \
++ PF2(i+2) \
++ XO1(i,0) \
++ XO1(i+1,1) \
++ XO1(i+2,2) \
++ XO1(i+3,3) \
++ PF3(i) \
++ PF3(i+2) \
++ PF0(i+4) \
++ PF0(i+6) \
++ XO2(i,0) \
++ XO2(i+1,1) \
++ XO2(i+2,2) \
++ XO2(i+3,3) \
++ XO3(i,0) \
++ XO3(i+1,1) \
++ XO3(i+2,2) \
++ XO3(i+3,3) \
++ ST(i,0) \
++ ST(i+1,1) \
++ ST(i+2,2) \
++ ST(i+3,3) \
++
++
++ PF0(0)
++ PF0(2)
++
++ " .align 32 ;\n"
++ " 1: ;\n"
++
++ BLOCK(0)
++ BLOCK(4)
++ BLOCK(8)
++ BLOCK(12)
++
++ " addq %[inc], %[p1] ;\n"
++ " addq %[inc], %[p2] ;\n"
++ " addq %[inc], %[p3] ;\n"
++ " addq %[inc], %[p4] ;\n"
++ " decl %[cnt] ; jnz 1b"
++ : [cnt] "+c" (lines),
++ [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4)
++ : [inc] "r" (256UL)
++ : "memory" );
++
++ XMMS_RESTORE;
++}
++
++static void
++xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
++ unsigned long *p3, unsigned long *p4, unsigned long *p5)
++{
++ unsigned int lines = bytes >> 8;
++ xmm_store_t xmm_save[4];
++ unsigned long cr0;
++
++ XMMS_SAVE;
++
++ __asm__ __volatile__ (
++#undef BLOCK
++#define BLOCK(i) \
++ PF1(i) \
++ PF1(i+2) \
++ LD(i,0) \
++ LD(i+1,1) \
++ LD(i+2,2) \
++ LD(i+3,3) \
++ PF2(i) \
++ PF2(i+2) \
++ XO1(i,0) \
++ XO1(i+1,1) \
++ XO1(i+2,2) \
++ XO1(i+3,3) \
++ PF3(i) \
++ PF3(i+2) \
++ XO2(i,0) \
++ XO2(i+1,1) \
++ XO2(i+2,2) \
++ XO2(i+3,3) \
++ PF4(i) \
++ PF4(i+2) \
++ PF0(i+4) \
++ PF0(i+6) \
++ XO3(i,0) \
++ XO3(i+1,1) \
++ XO3(i+2,2) \
++ XO3(i+3,3) \
++ XO4(i,0) \
++ XO4(i+1,1) \
++ XO4(i+2,2) \
++ XO4(i+3,3) \
++ ST(i,0) \
++ ST(i+1,1) \
++ ST(i+2,2) \
++ ST(i+3,3) \
++
++
++ PF0(0)
++ PF0(2)
++
++ " .align 32 ;\n"
++ " 1: ;\n"
++
++ BLOCK(0)
++ BLOCK(4)
++ BLOCK(8)
++ BLOCK(12)
++
++ " addq %[inc], %[p1] ;\n"
++ " addq %[inc], %[p2] ;\n"
++ " addq %[inc], %[p3] ;\n"
++ " addq %[inc], %[p4] ;\n"
++ " addq %[inc], %[p5] ;\n"
++ " decl %[cnt] ; jnz 1b"
++ : [cnt] "+c" (lines),
++ [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4),
++ [p5] "+r" (p5)
++ : [inc] "r" (256UL)
++ : "memory");
++
++ XMMS_RESTORE;
++}
++
++static struct xor_block_template xor_block_sse = {
++ .name = "generic_sse",
++ .do_2 = xor_sse_2,
++ .do_3 = xor_sse_3,
++ .do_4 = xor_sse_4,
++ .do_5 = xor_sse_5,
++};
++
++#undef XOR_TRY_TEMPLATES
++#define XOR_TRY_TEMPLATES \
++ do { \
++ xor_speed(&xor_block_sse); \
++ } while (0)
++
++/* We force the use of the SSE xor block because it can write around L2.
++ We may also be able to load into the L1 only depending on how the cpu
++ deals with a load to a line that is being prefetched. */
++#define XOR_SELECT_TEMPLATE(FASTEST) (&xor_block_sse)
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/asm-x86_64/mach-xen/irq_vectors.h 2007-08-27 14:01:51.000000000 -0400
+@@ -0,0 +1,123 @@
++/*
++ * This file should contain #defines for all of the interrupt vector
++ * numbers used by this architecture.
++ *
++ * In addition, there are some standard defines:
++ *
++ * FIRST_EXTERNAL_VECTOR:
++ * The first free place for external interrupts
++ *
++ * SYSCALL_VECTOR:
++ * The IRQ vector a syscall makes the user to kernel transition
++ * under.
++ *
++ * TIMER_IRQ:
++ * The IRQ number the timer interrupt comes in at.
++ *
++ * NR_IRQS:
++ * The total number of interrupt vectors (including all the
++ * architecture specific interrupts) needed.
++ *
++ */
++#ifndef _ASM_IRQ_VECTORS_H
++#define _ASM_IRQ_VECTORS_H
++
++/*
++ * IDT vectors usable for external interrupt sources start
++ * at 0x20:
++ */
++#define FIRST_EXTERNAL_VECTOR 0x20
++
++#define SYSCALL_VECTOR 0x80
++
++/*
++ * Vectors 0x20-0x2f are used for ISA interrupts.
++ */
++
++#if 0
++/*
++ * Special IRQ vectors used by the SMP architecture, 0xf0-0xff
++ *
++ * some of the following vectors are 'rare', they are merged
++ * into a single vector (CALL_FUNCTION_VECTOR) to save vector space.
++ * TLB, reschedule and local APIC vectors are performance-critical.
++ *
++ * Vectors 0xf0-0xfa are free (reserved for future Linux use).
++ */
++#define INVALIDATE_TLB_VECTOR 0xfd
++#define RESCHEDULE_VECTOR 0xfc
++#define CALL_FUNCTION_VECTOR 0xfb
++
++#define THERMAL_APIC_VECTOR 0xf0
++/*
++ * Local APIC timer IRQ vector is on a different priority level,
++ * to work around the 'lost local interrupt if more than 2 IRQ
++ * sources per level' errata.
++ */
++#define LOCAL_TIMER_VECTOR 0xef
++#endif
++
++#define SPURIOUS_APIC_VECTOR 0xff
++#define ERROR_APIC_VECTOR 0xfe
++
++/*
++ * First APIC vector available to drivers: (vectors 0x30-0xee)
++ * we start at 0x31 to spread out vectors evenly between priority
++ * levels. (0x80 is the syscall vector)
++ */
++#define FIRST_DEVICE_VECTOR 0x31
++#define FIRST_SYSTEM_VECTOR 0xef
++
++/*
++ * 16 8259A IRQ's, 208 potential APIC interrupt sources.
++ * Right now the APIC is mostly only used for SMP.
++ * 256 vectors is an architectural limit. (we can have
++ * more than 256 devices theoretically, but they will
++ * have to use shared interrupts)
++ * Since vectors 0x00-0x1f are used/reserved for the CPU,
++ * the usable vector space is 0x20-0xff (224 vectors)
++ */
++
++#define RESCHEDULE_VECTOR 0
++#define CALL_FUNCTION_VECTOR 1
++#define NR_IPIS 2
++
++/*
++ * The maximum number of vectors supported by i386 processors
++ * is limited to 256. For processors other than i386, NR_VECTORS
++ * should be changed accordingly.
++ */
++#define NR_VECTORS 256
++
++#define FPU_IRQ 13
++
++#define FIRST_VM86_IRQ 3
++#define LAST_VM86_IRQ 15
++#define invalid_vm86_irq(irq) ((irq) < 3 || (irq) > 15)
++
++/*
++ * The flat IRQ space is divided into two regions:
++ * 1. A one-to-one mapping of real physical IRQs. This space is only used
++ * if we have physical device-access privilege. This region is at the
++ * start of the IRQ space so that existing device drivers do not need
++ * to be modified to translate physical IRQ numbers into our IRQ space.
++ * 3. A dynamic mapping of inter-domain and Xen-sourced virtual IRQs. These
++ * are bound using the provided bind/unbind functions.
++ */
++
++#define PIRQ_BASE 0
++#define NR_PIRQS 256
++
++#define DYNIRQ_BASE (PIRQ_BASE + NR_PIRQS)
++#define NR_DYNIRQS 256
++
++#define NR_IRQS (NR_PIRQS + NR_DYNIRQS)
++#define NR_IRQ_VECTORS NR_IRQS
++
++#define pirq_to_irq(_x) ((_x) + PIRQ_BASE)
++#define irq_to_pirq(_x) ((_x) - PIRQ_BASE)
++
++#define dynirq_to_irq(_x) ((_x) + DYNIRQ_BASE)
++#define irq_to_dynirq(_x) ((_x) - DYNIRQ_BASE)
++
++#endif /* _ASM_IRQ_VECTORS_H */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/asm-x86_64/mach-xen/mach_time.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,111 @@
++/*
++ * include/asm-i386/mach-default/mach_time.h
++ *
++ * Machine specific set RTC function for generic.
++ * Split out from time.c by Osamu Tomita <tomita@cinet.co.jp>
++ */
++#ifndef _MACH_TIME_H
++#define _MACH_TIME_H
++
++#include <asm-i386/mc146818rtc.h>
++
++/* for check timing call set_rtc_mmss() 500ms */
++/* used in arch/i386/time.c::do_timer_interrupt() */
++#define USEC_AFTER 500000
++#define USEC_BEFORE 500000
++
++/*
++ * In order to set the CMOS clock precisely, set_rtc_mmss has to be
++ * called 500 ms after the second nowtime has started, because when
++ * nowtime is written into the registers of the CMOS clock, it will
++ * jump to the next second precisely 500 ms later. Check the Motorola
++ * MC146818A or Dallas DS12887 data sheet for details.
++ *
++ * BUG: This routine does not handle hour overflow properly; it just
++ * sets the minutes. Usually you'll only notice that after reboot!
++ */
++static inline int mach_set_rtc_mmss(unsigned long nowtime)
++{
++ int retval = 0;
++ int real_seconds, real_minutes, cmos_minutes;
++ unsigned char save_control, save_freq_select;
++
++ save_control = CMOS_READ(RTC_CONTROL); /* tell the clock it's being set */
++ CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
++
++ save_freq_select = CMOS_READ(RTC_FREQ_SELECT); /* stop and reset prescaler */
++ CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
++
++ cmos_minutes = CMOS_READ(RTC_MINUTES);
++ if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
++ BCD_TO_BIN(cmos_minutes);
++
++ /*
++ * since we're only adjusting minutes and seconds,
++ * don't interfere with hour overflow. This avoids
++ * messing with unknown time zones but requires your
++ * RTC not to be off by more than 15 minutes
++ */
++ real_seconds = nowtime % 60;
++ real_minutes = nowtime / 60;
++ if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1)
++ real_minutes += 30; /* correct for half hour time zone */
++ real_minutes %= 60;
++
++ if (abs(real_minutes - cmos_minutes) < 30) {
++ if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
++ BIN_TO_BCD(real_seconds);
++ BIN_TO_BCD(real_minutes);
++ }
++ CMOS_WRITE(real_seconds,RTC_SECONDS);
++ CMOS_WRITE(real_minutes,RTC_MINUTES);
++ } else {
++ printk(KERN_WARNING
++ "set_rtc_mmss: can't update from %d to %d\n",
++ cmos_minutes, real_minutes);
++ retval = -1;
++ }
++
++ /* The following flags have to be released exactly in this order,
++ * otherwise the DS12887 (popular MC146818A clone with integrated
++ * battery and quartz) will not reset the oscillator and will not
++ * update precisely 500 ms later. You won't find this mentioned in
++ * the Dallas Semiconductor data sheets, but who believes data
++ * sheets anyway ... -- Markus Kuhn
++ */
++ CMOS_WRITE(save_control, RTC_CONTROL);
++ CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
++
++ return retval;
++}
++
++static inline unsigned long mach_get_cmos_time(void)
++{
++ unsigned int year, mon, day, hour, min, sec;
++
++ do {
++ sec = CMOS_READ(RTC_SECONDS);
++ min = CMOS_READ(RTC_MINUTES);
++ hour = CMOS_READ(RTC_HOURS);
++ day = CMOS_READ(RTC_DAY_OF_MONTH);
++ mon = CMOS_READ(RTC_MONTH);
++ year = CMOS_READ(RTC_YEAR);
++ } while (sec != CMOS_READ(RTC_SECONDS));
++
++ if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
++ BCD_TO_BIN(sec);
++ BCD_TO_BIN(min);
++ BCD_TO_BIN(hour);
++ BCD_TO_BIN(day);
++ BCD_TO_BIN(mon);
++ BCD_TO_BIN(year);
++ }
++
++ year += 1900;
++ if (year < 1970)
++ year += 100;
++
++ return mktime(year, mon, day, hour, min, sec);
++}
++
++#endif /* !_MACH_TIME_H */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/asm-x86_64/mach-xen/mach_timer.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,50 @@
++/*
++ * include/asm-i386/mach-default/mach_timer.h
++ *
++ * Machine specific calibrate_tsc() for generic.
++ * Split out from timer_tsc.c by Osamu Tomita <tomita@cinet.co.jp>
++ */
++/* ------ Calibrate the TSC -------
++ * Return 2^32 * (1 / (TSC clocks per usec)) for do_fast_gettimeoffset().
++ * Too much 64-bit arithmetic here to do this cleanly in C, and for
++ * accuracy's sake we want to keep the overhead on the CTC speaker (channel 2)
++ * output busy loop as low as possible. We avoid reading the CTC registers
++ * directly because of the awkward 8-bit access mechanism of the 82C54
++ * device.
++ */
++#ifndef _MACH_TIMER_H
++#define _MACH_TIMER_H
++
++#define CALIBRATE_TIME_MSEC 30 /* 30 msecs */
++#define CALIBRATE_LATCH \
++ ((CLOCK_TICK_RATE * CALIBRATE_TIME_MSEC + 1000/2)/1000)
++
++static inline void mach_prepare_counter(void)
++{
++ /* Set the Gate high, disable speaker */
++ outb((inb(0x61) & ~0x02) | 0x01, 0x61);
++
++ /*
++ * Now let's take care of CTC channel 2
++ *
++ * Set the Gate high, program CTC channel 2 for mode 0,
++ * (interrupt on terminal count mode), binary count,
++ * load 5 * LATCH count, (LSB and MSB) to begin countdown.
++ *
++ * Some devices need a delay here.
++ */
++ outb(0xb0, 0x43); /* binary, mode 0, LSB/MSB, Ch 2 */
++ outb_p(CALIBRATE_LATCH & 0xff, 0x42); /* LSB of count */
++ outb_p(CALIBRATE_LATCH >> 8, 0x42); /* MSB of count */
++}
++
++static inline void mach_countup(unsigned long *count_p)
++{
++ unsigned long count = 0;
++ do {
++ count++;
++ } while ((inb_p(0x61) & 0x20) == 0);
++ *count_p = count;
++}
++
++#endif /* !_MACH_TIMER_H */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/asm-x86_64/mach-xen/setup_arch_post.h 2007-08-27 14:01:51.000000000 -0400
+@@ -0,0 +1,63 @@
++/**
++ * machine_specific_* - Hooks for machine specific setup.
++ *
++ * Description:
++ * This is included late in kernel/setup.c so that it can make
++ * use of all of the static functions.
++ **/
++
++#include <xen/interface/callback.h>
++
++extern void hypervisor_callback(void);
++extern void failsafe_callback(void);
++extern void nmi(void);
++
++static void __init machine_specific_arch_setup(void)
++{
++ int ret;
++ static struct callback_register __initdata event = {
++ .type = CALLBACKTYPE_event,
++ .address = (unsigned long) hypervisor_callback,
++ };
++ static struct callback_register __initdata failsafe = {
++ .type = CALLBACKTYPE_failsafe,
++ .address = (unsigned long)failsafe_callback,
++ };
++ static struct callback_register __initdata syscall = {
++ .type = CALLBACKTYPE_syscall,
++ .address = (unsigned long)system_call,
++ };
++#ifdef CONFIG_X86_LOCAL_APIC
++ static struct callback_register __initdata nmi_cb = {
++ .type = CALLBACKTYPE_nmi,
++ .address = (unsigned long)nmi,
++ };
++#endif
++
++ ret = HYPERVISOR_callback_op(CALLBACKOP_register, &event);
++ if (ret == 0)
++ ret = HYPERVISOR_callback_op(CALLBACKOP_register, &failsafe);
++ if (ret == 0)
++ ret = HYPERVISOR_callback_op(CALLBACKOP_register, &syscall);
++#if CONFIG_XEN_COMPAT <= 0x030002
++ if (ret == -ENOSYS)
++ ret = HYPERVISOR_set_callbacks(
++ event.address,
++ failsafe.address,
++ syscall.address);
++#endif
++ BUG_ON(ret);
++
++#ifdef CONFIG_X86_LOCAL_APIC
++ ret = HYPERVISOR_callback_op(CALLBACKOP_register, &nmi_cb);
++#if CONFIG_XEN_COMPAT <= 0x030002
++ if (ret == -ENOSYS) {
++ static struct xennmi_callback __initdata cb = {
++ .handler_address = (unsigned long)nmi
++ };
++
++ HYPERVISOR_nmi_op(XENNMI_register_callback, &cb);
++ }
++#endif
++#endif
++}
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/asm-x86_64/mach-xen/setup_arch_pre.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,5 @@
++/* Hook to call BIOS initialisation function */
++
++#define ARCH_SETUP machine_specific_arch_setup();
++
++static void __init machine_specific_arch_setup(void);
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/xen/balloon.h 2007-08-27 14:01:52.000000000 -0400
+@@ -0,0 +1,57 @@
++/******************************************************************************
++ * balloon.h
++ *
++ * Xen balloon driver - enables returning/claiming memory to/from Xen.
++ *
++ * Copyright (c) 2003, B Dragovic
++ * Copyright (c) 2003-2004, M Williamson, K Fraser
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#ifndef __ASM_BALLOON_H__
++#define __ASM_BALLOON_H__
++
++/*
++ * Inform the balloon driver that it should allow some slop for device-driver
++ * memory activities.
++ */
++void balloon_update_driver_allowance(long delta);
++
++/* Allocate/free a set of empty pages in low memory (i.e., no RAM mapped). */
++struct page **alloc_empty_pages_and_pagevec(int nr_pages);
++void free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages);
++
++void balloon_release_driver_page(struct page *page);
++
++/*
++ * Prevent the balloon driver from changing the memory reservation during
++ * a driver critical region.
++ */
++extern spinlock_t balloon_lock;
++#define balloon_lock(__flags) spin_lock_irqsave(&balloon_lock, __flags)
++#define balloon_unlock(__flags) spin_unlock_irqrestore(&balloon_lock, __flags)
++
++#endif /* __ASM_BALLOON_H__ */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/xen/blkif.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,97 @@
++#ifndef __XEN_BLKIF_H__
++#define __XEN_BLKIF_H__
++
++#include <xen/interface/io/ring.h>
++#include <xen/interface/io/blkif.h>
++#include <xen/interface/io/protocols.h>
++
++/* Not a real protocol. Used to generate ring structs which contain
++ * the elements common to all protocols only. This way we get a
++ * compiler-checkable way to use common struct elements, so we can
++ * avoid using switch(protocol) in a number of places. */
++struct blkif_common_request {
++ char dummy;
++};
++struct blkif_common_response {
++ char dummy;
++};
++
++/* i386 protocol version */
++#pragma pack(push, 4)
++struct blkif_x86_32_request {
++ uint8_t operation; /* BLKIF_OP_??? */
++ uint8_t nr_segments; /* number of segments */
++ blkif_vdev_t handle; /* only for read/write requests */
++ uint64_t id; /* private guest value, echoed in resp */
++ blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
++ struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
++};
++struct blkif_x86_32_response {
++ uint64_t id; /* copied from request */
++ uint8_t operation; /* copied from request */
++ int16_t status; /* BLKIF_RSP_??? */
++};
++typedef struct blkif_x86_32_request blkif_x86_32_request_t;
++typedef struct blkif_x86_32_response blkif_x86_32_response_t;
++#pragma pack(pop)
++
++/* x86_64 protocol version */
++struct blkif_x86_64_request {
++ uint8_t operation; /* BLKIF_OP_??? */
++ uint8_t nr_segments; /* number of segments */
++ blkif_vdev_t handle; /* only for read/write requests */
++ uint64_t __attribute__((__aligned__(8))) id;
++ blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
++ struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
++};
++struct blkif_x86_64_response {
++ uint64_t __attribute__((__aligned__(8))) id;
++ uint8_t operation; /* copied from request */
++ int16_t status; /* BLKIF_RSP_??? */
++};
++typedef struct blkif_x86_64_request blkif_x86_64_request_t;
++typedef struct blkif_x86_64_response blkif_x86_64_response_t;
++
++DEFINE_RING_TYPES(blkif_common, struct blkif_common_request, struct blkif_common_response);
++DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request, struct blkif_x86_32_response);
++DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request, struct blkif_x86_64_response);
++
++union blkif_back_rings {
++ blkif_back_ring_t native;
++ blkif_common_back_ring_t common;
++ blkif_x86_32_back_ring_t x86_32;
++ blkif_x86_64_back_ring_t x86_64;
++};
++typedef union blkif_back_rings blkif_back_rings_t;
++
++enum blkif_protocol {
++ BLKIF_PROTOCOL_NATIVE = 1,
++ BLKIF_PROTOCOL_X86_32 = 2,
++ BLKIF_PROTOCOL_X86_64 = 3,
++};
++
++static void inline blkif_get_x86_32_req(blkif_request_t *dst, blkif_x86_32_request_t *src)
++{
++ int i;
++ dst->operation = src->operation;
++ dst->nr_segments = src->nr_segments;
++ dst->handle = src->handle;
++ dst->id = src->id;
++ dst->sector_number = src->sector_number;
++ for (i = 0; i < src->nr_segments; i++)
++ dst->seg[i] = src->seg[i];
++}
++
++static void inline blkif_get_x86_64_req(blkif_request_t *dst, blkif_x86_64_request_t *src)
++{
++ int i;
++ dst->operation = src->operation;
++ dst->nr_segments = src->nr_segments;
++ dst->handle = src->handle;
++ dst->id = src->id;
++ dst->sector_number = src->sector_number;
++ for (i = 0; i < src->nr_segments; i++)
++ dst->seg[i] = src->seg[i];
++}
++
++#endif /* __XEN_BLKIF_H__ */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/xen/cpu_hotplug.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,44 @@
++#ifndef __XEN_CPU_HOTPLUG_H__
++#define __XEN_CPU_HOTPLUG_H__
++
++#include <linux/kernel.h>
++#include <linux/cpumask.h>
++
++#if defined(CONFIG_X86) && defined(CONFIG_SMP)
++extern cpumask_t cpu_initialized_map;
++#define cpu_set_initialized(cpu) cpu_set(cpu, cpu_initialized_map)
++#else
++#define cpu_set_initialized(cpu) ((void)0)
++#endif
++
++#if defined(CONFIG_HOTPLUG_CPU)
++
++int cpu_up_check(unsigned int cpu);
++void init_xenbus_allowed_cpumask(void);
++int smp_suspend(void);
++void smp_resume(void);
++
++void cpu_bringup(void);
++
++#else /* !defined(CONFIG_HOTPLUG_CPU) */
++
++#define cpu_up_check(cpu) (0)
++#define init_xenbus_allowed_cpumask() ((void)0)
++
++static inline int smp_suspend(void)
++{
++ if (num_online_cpus() > 1) {
++ printk(KERN_WARNING "Can't suspend SMP guests "
++ "without CONFIG_HOTPLUG_CPU\n");
++ return -EOPNOTSUPP;
++ }
++ return 0;
++}
++
++static inline void smp_resume(void)
++{
++}
++
++#endif /* !defined(CONFIG_HOTPLUG_CPU) */
++
++#endif /* __XEN_CPU_HOTPLUG_H__ */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/xen/driver_util.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,14 @@
++
++#ifndef __ASM_XEN_DRIVER_UTIL_H__
++#define __ASM_XEN_DRIVER_UTIL_H__
++
++#include <linux/vmalloc.h>
++#include <linux/device.h>
++
++/* Allocate/destroy a 'vmalloc' VM area. */
++extern struct vm_struct *alloc_vm_area(unsigned long size);
++extern void free_vm_area(struct vm_struct *area);
++
++extern struct class *get_xen_class(void);
++
++#endif /* __ASM_XEN_DRIVER_UTIL_H__ */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/xen/evtchn.h 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,126 @@
++/******************************************************************************
++ * evtchn.h
++ *
++ * Communication via Xen event channels.
++ * Also definitions for the device that demuxes notifications to userspace.
++ *
++ * Copyright (c) 2004-2005, K A Fraser
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#ifndef __ASM_EVTCHN_H__
++#define __ASM_EVTCHN_H__
++
++#include <linux/interrupt.h>
++#include <asm/hypervisor.h>
++#include <asm/ptrace.h>
++#include <asm/synch_bitops.h>
++#include <xen/interface/event_channel.h>
++#include <linux/smp.h>
++
++/*
++ * LOW-LEVEL DEFINITIONS
++ */
++
++/*
++ * Dynamically bind an event source to an IRQ-like callback handler.
++ * On some platforms this may not be implemented via the Linux IRQ subsystem.
++ * The IRQ argument passed to the callback handler is the same as returned
++ * from the bind call. It may not correspond to a Linux IRQ number.
++ * Returns IRQ or negative errno.
++ */
++int bind_caller_port_to_irqhandler(
++ unsigned int caller_port,
++ irqreturn_t (*handler)(int, void *, struct pt_regs *),
++ unsigned long irqflags,
++ const char *devname,
++ void *dev_id);
++int bind_listening_port_to_irqhandler(
++ unsigned int remote_domain,
++ irqreturn_t (*handler)(int, void *, struct pt_regs *),
++ unsigned long irqflags,
++ const char *devname,
++ void *dev_id);
++int bind_interdomain_evtchn_to_irqhandler(
++ unsigned int remote_domain,
++ unsigned int remote_port,
++ irqreturn_t (*handler)(int, void *, struct pt_regs *),
++ unsigned long irqflags,
++ const char *devname,
++ void *dev_id);
++int bind_virq_to_irqhandler(
++ unsigned int virq,
++ unsigned int cpu,
++ irqreturn_t (*handler)(int, void *, struct pt_regs *),
++ unsigned long irqflags,
++ const char *devname,
++ void *dev_id);
++int bind_ipi_to_irqhandler(
++ unsigned int ipi,
++ unsigned int cpu,
++ irqreturn_t (*handler)(int, void *, struct pt_regs *),
++ unsigned long irqflags,
++ const char *devname,
++ void *dev_id);
++
++/*
++ * Common unbind function for all event sources. Takes IRQ to unbind from.
++ * Automatically closes the underlying event channel (except for bindings
++ * made with bind_caller_port_to_irqhandler()).
++ */
++void unbind_from_irqhandler(unsigned int irq, void *dev_id);
++
++void irq_resume(void);
++
++/* Entry point for notifications into Linux subsystems. */
++asmlinkage void evtchn_do_upcall(struct pt_regs *regs);
++
++/* Entry point for notifications into the userland character device. */
++void evtchn_device_upcall(int port);
++
++void mask_evtchn(int port);
++void unmask_evtchn(int port);
++
++static inline void clear_evtchn(int port)
++{
++ shared_info_t *s = HYPERVISOR_shared_info;
++ synch_clear_bit(port, s->evtchn_pending);
++}
++
++static inline void notify_remote_via_evtchn(int port)
++{
++ struct evtchn_send send = { .port = port };
++ (void)HYPERVISOR_event_channel_op(EVTCHNOP_send, &send);
++}
++
++/*
++ * Use these to access the event channel underlying the IRQ handle returned
++ * by bind_*_to_irqhandler().
++ */
++void notify_remote_via_irq(int irq);
++int irq_to_evtchn_port(int irq);
++
++#endif /* __ASM_EVTCHN_H__ */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/xen/features.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,20 @@
++/******************************************************************************
++ * features.h
++ *
++ * Query the features reported by Xen.
++ *
++ * Copyright (c) 2006, Ian Campbell
++ */
++
++#ifndef __ASM_XEN_FEATURES_H__
++#define __ASM_XEN_FEATURES_H__
++
++#include <xen/interface/version.h>
++
++extern void setup_xen_features(void);
++
++extern u8 xen_features[XENFEAT_NR_SUBMAPS * 32];
++
++#define xen_feature(flag) (xen_features[flag])
++
++#endif /* __ASM_XEN_FEATURES_H__ */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/xen/gnttab.h 2007-08-27 14:02:10.000000000 -0400
+@@ -0,0 +1,138 @@
++/******************************************************************************
++ * gnttab.h
++ *
++ * Two sets of functionality:
++ * 1. Granting foreign access to our memory reservation.
++ * 2. Accessing others' memory reservations via grant references.
++ * (i.e., mechanisms for both sender and recipient of grant references)
++ *
++ * Copyright (c) 2004-2005, K A Fraser
++ * Copyright (c) 2005, Christopher Clark
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#ifndef __ASM_GNTTAB_H__
++#define __ASM_GNTTAB_H__
++
++#include <asm/hypervisor.h>
++#include <asm/maddr.h> /* maddr_t */
++#include <xen/interface/grant_table.h>
++#include <xen/features.h>
++
++struct gnttab_free_callback {
++ struct gnttab_free_callback *next;
++ void (*fn)(void *);
++ void *arg;
++ u16 count;
++};
++
++int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
++ int readonly);
++
++/*
++ * End access through the given grant reference, iff the grant entry is no
++ * longer in use. Return 1 if the grant entry was freed, 0 if it is still in
++ * use.
++ */
++int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly);
++
++/*
++ * Eventually end access through the given grant reference, and once that
++ * access has been ended, free the given page too. Access will be ended
++ * immediately iff the grant entry is not in use, otherwise it will happen
++ * some time later. page may be 0, in which case no freeing will occur.
++ */
++void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
++ unsigned long page);
++
++int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn);
++
++unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref);
++unsigned long gnttab_end_foreign_transfer(grant_ref_t ref);
++
++int gnttab_query_foreign_access(grant_ref_t ref);
++
++/*
++ * operations on reserved batches of grant references
++ */
++int gnttab_alloc_grant_references(u16 count, grant_ref_t *pprivate_head);
++
++void gnttab_free_grant_reference(grant_ref_t ref);
++
++void gnttab_free_grant_references(grant_ref_t head);
++
++int gnttab_empty_grant_references(const grant_ref_t *pprivate_head);
++
++int gnttab_claim_grant_reference(grant_ref_t *pprivate_head);
++
++void gnttab_release_grant_reference(grant_ref_t *private_head,
++ grant_ref_t release);
++
++void gnttab_request_free_callback(struct gnttab_free_callback *callback,
++ void (*fn)(void *), void *arg, u16 count);
++void gnttab_cancel_free_callback(struct gnttab_free_callback *callback);
++
++void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
++ unsigned long frame, int readonly);
++
++void gnttab_grant_foreign_transfer_ref(grant_ref_t, domid_t domid,
++ unsigned long pfn);
++
++int gnttab_suspend(void);
++int gnttab_resume(void);
++
++static inline void
++gnttab_set_map_op(struct gnttab_map_grant_ref *map, maddr_t addr,
++ uint32_t flags, grant_ref_t ref, domid_t domid)
++{
++ if (flags & GNTMAP_contains_pte)
++ map->host_addr = addr;
++ else if (xen_feature(XENFEAT_auto_translated_physmap))
++ map->host_addr = __pa(addr);
++ else
++ map->host_addr = addr;
++
++ map->flags = flags;
++ map->ref = ref;
++ map->dom = domid;
++}
++
++static inline void
++gnttab_set_unmap_op(struct gnttab_unmap_grant_ref *unmap, maddr_t addr,
++ uint32_t flags, grant_handle_t handle)
++{
++ if (flags & GNTMAP_contains_pte)
++ unmap->host_addr = addr;
++ else if (xen_feature(XENFEAT_auto_translated_physmap))
++ unmap->host_addr = __pa(addr);
++ else
++ unmap->host_addr = addr;
++
++ unmap->handle = handle;
++ unmap->dev_bus_addr = 0;
++}
++
++#endif /* __ASM_GNTTAB_H__ */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/xen/hvm.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,23 @@
++/* Simple wrappers around HVM functions */
++#ifndef XEN_HVM_H__
++#define XEN_HVM_H__
++
++#include <xen/interface/hvm/params.h>
++
++static inline unsigned long hvm_get_parameter(int idx)
++{
++ struct xen_hvm_param xhv;
++ int r;
++
++ xhv.domid = DOMID_SELF;
++ xhv.index = idx;
++ r = HYPERVISOR_hvm_op(HVMOP_get_param, &xhv);
++ if (r < 0) {
++ printk(KERN_ERR "cannot get hvm parameter %d: %d.\n",
++ idx, r);
++ return 0;
++ }
++ return xhv.value;
++}
++
++#endif /* XEN_HVM_H__ */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/xen/hypercall.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,24 @@
++#ifndef __XEN_HYPERCALL_H__
++#define __XEN_HYPERCALL_H__
++
++#include <asm/hypercall.h>
++
++static inline int
++HYPERVISOR_multicall_check(
++ multicall_entry_t *call_list, int nr_calls,
++ const unsigned long *rc_list)
++{
++ int rc = HYPERVISOR_multicall(call_list, nr_calls);
++
++ if (unlikely(rc < 0))
++ return rc;
++ BUG_ON(rc);
++
++ for ( ; nr_calls > 0; --nr_calls, ++call_list)
++ if (unlikely(call_list->result != (rc_list ? *rc_list++ : 0)))
++ return nr_calls;
++
++ return 0;
++}
++
++#endif /* __XEN_HYPERCALL_H__ */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/xen/hypervisor_sysfs.h 2007-08-27 14:02:04.000000000 -0400
+@@ -0,0 +1,32 @@
++/*
++ * copyright (c) 2006 IBM Corporation
++ * Authored by: Mike D. Day <ncmike@us.ibm.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#ifndef _HYP_SYSFS_H_
++#define _HYP_SYSFS_H_
++
++#include <linux/kobject.h>
++#include <linux/sysfs.h>
++
++#define HYPERVISOR_ATTR_RO(_name) \
++static struct hyp_sysfs_attr _name##_attr = __ATTR_RO(_name)
++
++#define HYPERVISOR_ATTR_RW(_name) \
++static struct hyp_sysfs_attr _name##_attr = \
++ __ATTR(_name, 0644, _name##_show, _name##_store)
++
++extern struct subsystem hypervisor_subsys;
++
++struct hyp_sysfs_attr {
++ struct attribute attr;
++ ssize_t (*show)(struct hyp_sysfs_attr *, char *);
++ ssize_t (*store)(struct hyp_sysfs_attr *, const char *, size_t);
++ void *hyp_attr_data;
++};
++
++#endif /* _HYP_SYSFS_H_ */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/xen/pcifront.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,76 @@
++/*
++ * PCI Frontend - arch-dependendent declarations
++ *
++ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
++ */
++#ifndef __XEN_ASM_PCIFRONT_H__
++#define __XEN_ASM_PCIFRONT_H__
++
++#include <linux/spinlock.h>
++
++#ifdef __KERNEL__
++
++#ifndef __ia64__
++
++struct pcifront_device;
++struct pci_bus;
++
++struct pcifront_sd {
++ int domain;
++ struct pcifront_device *pdev;
++};
++
++static inline struct pcifront_device *
++pcifront_get_pdev(struct pcifront_sd *sd)
++{
++ return sd->pdev;
++}
++
++static inline void pcifront_init_sd(struct pcifront_sd *sd, int domain,
++ struct pcifront_device *pdev)
++{
++ sd->domain = domain;
++ sd->pdev = pdev;
++}
++
++#if defined(CONFIG_PCI_DOMAINS)
++static inline int pci_domain_nr(struct pci_bus *bus)
++{
++ struct pcifront_sd *sd = bus->sysdata;
++ return sd->domain;
++}
++static inline int pci_proc_domain(struct pci_bus *bus)
++{
++ return pci_domain_nr(bus);
++}
++#endif /* CONFIG_PCI_DOMAINS */
++
++#else /* __ia64__ */
++
++#include <asm/pci.h>
++#define pcifront_sd pci_controller
++
++static inline struct pcifront_device *
++pcifront_get_pdev(struct pcifront_sd *sd)
++{
++ return (struct pcifront_device *)sd->platform_data;
++}
++
++static inline void pcifront_init_sd(struct pcifront_sd *sd, int domain,
++ struct pcifront_device *pdev)
++{
++ sd->segment = domain;
++ sd->acpi_handle = NULL;
++ sd->iommu = NULL;
++ sd->windows = 0;
++ sd->window = NULL;
++ sd->platform_data = pdev;
++}
++
++#endif /* __ia64__ */
++
++extern struct rw_semaphore pci_bus_sem;
++
++#endif /* __KERNEL__ */
++
++#endif /* __XEN_ASM_PCIFRONT_H__ */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/xen/public/evtchn.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,88 @@
++/******************************************************************************
++ * evtchn.h
++ *
++ * Interface to /dev/xen/evtchn.
++ *
++ * Copyright (c) 2003-2005, K A Fraser
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#ifndef __LINUX_PUBLIC_EVTCHN_H__
++#define __LINUX_PUBLIC_EVTCHN_H__
++
++/*
++ * Bind a fresh port to VIRQ @virq.
++ * Return allocated port.
++ */
++#define IOCTL_EVTCHN_BIND_VIRQ \
++ _IOC(_IOC_NONE, 'E', 0, sizeof(struct ioctl_evtchn_bind_virq))
++struct ioctl_evtchn_bind_virq {
++ unsigned int virq;
++};
++
++/*
++ * Bind a fresh port to remote <@remote_domain, @remote_port>.
++ * Return allocated port.
++ */
++#define IOCTL_EVTCHN_BIND_INTERDOMAIN \
++ _IOC(_IOC_NONE, 'E', 1, sizeof(struct ioctl_evtchn_bind_interdomain))
++struct ioctl_evtchn_bind_interdomain {
++ unsigned int remote_domain, remote_port;
++};
++
++/*
++ * Allocate a fresh port for binding to @remote_domain.
++ * Return allocated port.
++ */
++#define IOCTL_EVTCHN_BIND_UNBOUND_PORT \
++ _IOC(_IOC_NONE, 'E', 2, sizeof(struct ioctl_evtchn_bind_unbound_port))
++struct ioctl_evtchn_bind_unbound_port {
++ unsigned int remote_domain;
++};
++
++/*
++ * Unbind previously allocated @port.
++ */
++#define IOCTL_EVTCHN_UNBIND \
++ _IOC(_IOC_NONE, 'E', 3, sizeof(struct ioctl_evtchn_unbind))
++struct ioctl_evtchn_unbind {
++ unsigned int port;
++};
++
++/*
++ * Unbind previously allocated @port.
++ */
++#define IOCTL_EVTCHN_NOTIFY \
++ _IOC(_IOC_NONE, 'E', 4, sizeof(struct ioctl_evtchn_notify))
++struct ioctl_evtchn_notify {
++ unsigned int port;
++};
++
++/* Clear and reinitialise the event buffer. Clear error condition. */
++#define IOCTL_EVTCHN_RESET \
++ _IOC(_IOC_NONE, 'E', 5, 0)
++
++#endif /* __LINUX_PUBLIC_EVTCHN_H__ */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/xen/public/gntdev.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,105 @@
++/******************************************************************************
++ * gntdev.h
++ *
++ * Interface to /dev/xen/gntdev.
++ *
++ * Copyright (c) 2007, D G Murray
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#ifndef __LINUX_PUBLIC_GNTDEV_H__
++#define __LINUX_PUBLIC_GNTDEV_H__
++
++struct ioctl_gntdev_grant_ref {
++ /* The domain ID of the grant to be mapped. */
++ uint32_t domid;
++ /* The grant reference of the grant to be mapped. */
++ uint32_t ref;
++};
++
++/*
++ * Inserts the grant references into the mapping table of an instance
++ * of gntdev. N.B. This does not perform the mapping, which is deferred
++ * until mmap() is called with @index as the offset.
++ */
++#define IOCTL_GNTDEV_MAP_GRANT_REF \
++_IOC(_IOC_NONE, 'G', 0, sizeof(struct ioctl_gntdev_map_grant_ref))
++struct ioctl_gntdev_map_grant_ref {
++ /* IN parameters */
++ /* The number of grants to be mapped. */
++ uint32_t count;
++ uint32_t pad;
++ /* OUT parameters */
++ /* The offset to be used on a subsequent call to mmap(). */
++ uint64_t index;
++ /* Variable IN parameter. */
++ /* Array of grant references, of size @count. */
++ struct ioctl_gntdev_grant_ref refs[1];
++};
++
++/*
++ * Removes the grant references from the mapping table of an instance of
++ * of gntdev. N.B. munmap() must be called on the relevant virtual address(es)
++ * before this ioctl is called, or an error will result.
++ */
++#define IOCTL_GNTDEV_UNMAP_GRANT_REF \
++_IOC(_IOC_NONE, 'G', 1, sizeof(struct ioctl_gntdev_unmap_grant_ref))
++struct ioctl_gntdev_unmap_grant_ref {
++ /* IN parameters */
++ /* The offset was returned by the corresponding map operation. */
++ uint64_t index;
++ /* The number of pages to be unmapped. */
++ uint32_t count;
++ uint32_t pad;
++};
++
++/*
++ * Returns the offset in the driver's address space that corresponds
++ * to @vaddr. This can be used to perform a munmap(), followed by an
++ * UNMAP_GRANT_REF ioctl, where no state about the offset is retained by
++ * the caller. The number of pages that were allocated at the same time as
++ * @vaddr is returned in @count.
++ *
++ * N.B. Where more than one page has been mapped into a contiguous range, the
++ * supplied @vaddr must correspond to the start of the range; otherwise
++ * an error will result. It is only possible to munmap() the entire
++ * contiguously-allocated range at once, and not any subrange thereof.
++ */
++#define IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR \
++_IOC(_IOC_NONE, 'G', 2, sizeof(struct ioctl_gntdev_get_offset_for_vaddr))
++struct ioctl_gntdev_get_offset_for_vaddr {
++ /* IN parameters */
++ /* The virtual address of the first mapped page in a range. */
++ uint64_t vaddr;
++ /* OUT parameters */
++ /* The offset that was used in the initial mmap() operation. */
++ uint64_t offset;
++ /* The number of pages mapped in the VM area that begins at @vaddr. */
++ uint32_t count;
++ uint32_t pad;
++};
++
++#endif /* __LINUX_PUBLIC_GNTDEV_H__ */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/xen/public/privcmd.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,79 @@
++/******************************************************************************
++ * privcmd.h
++ *
++ * Interface to /proc/xen/privcmd.
++ *
++ * Copyright (c) 2003-2005, K A Fraser
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#ifndef __LINUX_PUBLIC_PRIVCMD_H__
++#define __LINUX_PUBLIC_PRIVCMD_H__
++
++#include <linux/types.h>
++
++#ifndef __user
++#define __user
++#endif
++
++typedef struct privcmd_hypercall
++{
++ __u64 op;
++ __u64 arg[5];
++} privcmd_hypercall_t;
++
++typedef struct privcmd_mmap_entry {
++ __u64 va;
++ __u64 mfn;
++ __u64 npages;
++} privcmd_mmap_entry_t;
++
++typedef struct privcmd_mmap {
++ int num;
++ domid_t dom; /* target domain */
++ privcmd_mmap_entry_t __user *entry;
++} privcmd_mmap_t;
++
++typedef struct privcmd_mmapbatch {
++ int num; /* number of pages to populate */
++ domid_t dom; /* target domain */
++ __u64 addr; /* virtual address */
++ xen_pfn_t __user *arr; /* array of mfns - top nibble set on err */
++} privcmd_mmapbatch_t;
++
++/*
++ * @cmd: IOCTL_PRIVCMD_HYPERCALL
++ * @arg: &privcmd_hypercall_t
++ * Return: Value returned from execution of the specified hypercall.
++ */
++#define IOCTL_PRIVCMD_HYPERCALL \
++ _IOC(_IOC_NONE, 'P', 0, sizeof(privcmd_hypercall_t))
++#define IOCTL_PRIVCMD_MMAP \
++ _IOC(_IOC_NONE, 'P', 2, sizeof(privcmd_mmap_t))
++#define IOCTL_PRIVCMD_MMAPBATCH \
++ _IOC(_IOC_NONE, 'P', 3, sizeof(privcmd_mmapbatch_t))
++
++#endif /* __LINUX_PUBLIC_PRIVCMD_H__ */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/xen/xen_proc.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,12 @@
++
++#ifndef __ASM_XEN_PROC_H__
++#define __ASM_XEN_PROC_H__
++
++#include <linux/proc_fs.h>
++
++extern struct proc_dir_entry *create_xen_proc_entry(
++ const char *name, mode_t mode);
++extern void remove_xen_proc_entry(
++ const char *name);
++
++#endif /* __ASM_XEN_PROC_H__ */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/xen/xenbus.h 2007-08-27 14:02:08.000000000 -0400
+@@ -0,0 +1,302 @@
++/******************************************************************************
++ * xenbus.h
++ *
++ * Talks to Xen Store to figure out what devices we have.
++ *
++ * Copyright (C) 2005 Rusty Russell, IBM Corporation
++ * Copyright (C) 2005 XenSource Ltd.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#ifndef _XEN_XENBUS_H
++#define _XEN_XENBUS_H
++
++#include <linux/device.h>
++#include <linux/notifier.h>
++#include <linux/mutex.h>
++#include <linux/completion.h>
++#include <linux/init.h>
++#include <xen/interface/xen.h>
++#include <xen/interface/grant_table.h>
++#include <xen/interface/io/xenbus.h>
++#include <xen/interface/io/xs_wire.h>
++
++/* Register callback to watch this node. */
++struct xenbus_watch
++{
++ struct list_head list;
++
++ /* Path being watched. */
++ const char *node;
++
++ /* Callback (executed in a process context with no locks held). */
++ void (*callback)(struct xenbus_watch *,
++ const char **vec, unsigned int len);
++
++ /* See XBWF_ definitions below. */
++ unsigned long flags;
++};
++
++/*
++ * Execute callback in its own kthread. Useful if the callback is long
++ * running or heavily serialised, to avoid taking out the main xenwatch thread
++ * for a long period of time (or even unwittingly causing a deadlock).
++ */
++#define XBWF_new_thread 1
++
++/* A xenbus device. */
++struct xenbus_device {
++ const char *devicetype;
++ const char *nodename;
++ const char *otherend;
++ int otherend_id;
++ struct xenbus_watch otherend_watch;
++ struct device dev;
++ enum xenbus_state state;
++ struct completion down;
++};
++
++static inline struct xenbus_device *to_xenbus_device(struct device *dev)
++{
++ return container_of(dev, struct xenbus_device, dev);
++}
++
++struct xenbus_device_id
++{
++ /* .../device/<device_type>/<identifier> */
++ char devicetype[32]; /* General class of device. */
++};
++
++/* A xenbus driver. */
++struct xenbus_driver {
++ char *name;
++ struct module *owner;
++ const struct xenbus_device_id *ids;
++ int (*probe)(struct xenbus_device *dev,
++ const struct xenbus_device_id *id);
++ void (*otherend_changed)(struct xenbus_device *dev,
++ enum xenbus_state backend_state);
++ int (*remove)(struct xenbus_device *dev);
++ int (*suspend)(struct xenbus_device *dev);
++ int (*suspend_cancel)(struct xenbus_device *dev);
++ int (*resume)(struct xenbus_device *dev);
++ int (*uevent)(struct xenbus_device *, char **, int, char *, int);
++ struct device_driver driver;
++ int (*read_otherend_details)(struct xenbus_device *dev);
++};
++
++static inline struct xenbus_driver *to_xenbus_driver(struct device_driver *drv)
++{
++ return container_of(drv, struct xenbus_driver, driver);
++}
++
++int xenbus_register_frontend(struct xenbus_driver *drv);
++int xenbus_register_backend(struct xenbus_driver *drv);
++void xenbus_unregister_driver(struct xenbus_driver *drv);
++
++struct xenbus_transaction
++{
++ u32 id;
++};
++
++/* Nil transaction ID. */
++#define XBT_NIL ((struct xenbus_transaction) { 0 })
++
++char **xenbus_directory(struct xenbus_transaction t,
++ const char *dir, const char *node, unsigned int *num);
++void *xenbus_read(struct xenbus_transaction t,
++ const char *dir, const char *node, unsigned int *len);
++int xenbus_write(struct xenbus_transaction t,
++ const char *dir, const char *node, const char *string);
++int xenbus_mkdir(struct xenbus_transaction t,
++ const char *dir, const char *node);
++int xenbus_exists(struct xenbus_transaction t,
++ const char *dir, const char *node);
++int xenbus_rm(struct xenbus_transaction t, const char *dir, const char *node);
++int xenbus_transaction_start(struct xenbus_transaction *t);
++int xenbus_transaction_end(struct xenbus_transaction t, int abort);
++
++/* Single read and scanf: returns -errno or num scanned if > 0. */
++int xenbus_scanf(struct xenbus_transaction t,
++ const char *dir, const char *node, const char *fmt, ...)
++ __attribute__((format(scanf, 4, 5)));
++
++/* Single printf and write: returns -errno or 0. */
++int xenbus_printf(struct xenbus_transaction t,
++ const char *dir, const char *node, const char *fmt, ...)
++ __attribute__((format(printf, 4, 5)));
++
++/* Generic read function: NULL-terminated triples of name,
++ * sprintf-style type string, and pointer. Returns 0 or errno.*/
++int xenbus_gather(struct xenbus_transaction t, const char *dir, ...);
++
++/* notifer routines for when the xenstore comes up */
++int register_xenstore_notifier(struct notifier_block *nb);
++void unregister_xenstore_notifier(struct notifier_block *nb);
++
++int register_xenbus_watch(struct xenbus_watch *watch);
++void unregister_xenbus_watch(struct xenbus_watch *watch);
++void xs_suspend(void);
++void xs_resume(void);
++void xs_suspend_cancel(void);
++
++/* Used by xenbus_dev to borrow kernel's store connection. */
++void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg);
++
++/* Prepare for domain suspend: then resume or cancel the suspend. */
++void xenbus_suspend(void);
++void xenbus_resume(void);
++void xenbus_suspend_cancel(void);
++
++#define XENBUS_IS_ERR_READ(str) ({ \
++ if (!IS_ERR(str) && strlen(str) == 0) { \
++ kfree(str); \
++ str = ERR_PTR(-ERANGE); \
++ } \
++ IS_ERR(str); \
++})
++
++#define XENBUS_EXIST_ERR(err) ((err) == -ENOENT || (err) == -ERANGE)
++
++
++/**
++ * Register a watch on the given path, using the given xenbus_watch structure
++ * for storage, and the given callback function as the callback. Return 0 on
++ * success, or -errno on error. On success, the given path will be saved as
++ * watch->node, and remains the caller's to free. On error, watch->node will
++ * be NULL, the device will switch to XenbusStateClosing, and the error will
++ * be saved in the store.
++ */
++int xenbus_watch_path(struct xenbus_device *dev, const char *path,
++ struct xenbus_watch *watch,
++ void (*callback)(struct xenbus_watch *,
++ const char **, unsigned int));
++
++
++/**
++ * Register a watch on the given path/path2, using the given xenbus_watch
++ * structure for storage, and the given callback function as the callback.
++ * Return 0 on success, or -errno on error. On success, the watched path
++ * (path/path2) will be saved as watch->node, and becomes the caller's to
++ * kfree(). On error, watch->node will be NULL, so the caller has nothing to
++ * free, the device will switch to XenbusStateClosing, and the error will be
++ * saved in the store.
++ */
++int xenbus_watch_path2(struct xenbus_device *dev, const char *path,
++ const char *path2, struct xenbus_watch *watch,
++ void (*callback)(struct xenbus_watch *,
++ const char **, unsigned int));
++
++
++/**
++ * Advertise in the store a change of the given driver to the given new_state.
++ * Return 0 on success, or -errno on error. On error, the device will switch
++ * to XenbusStateClosing, and the error will be saved in the store.
++ */
++int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state new_state);
++
++
++/**
++ * Grant access to the given ring_mfn to the peer of the given device. Return
++ * 0 on success, or -errno on error. On error, the device will switch to
++ * XenbusStateClosing, and the error will be saved in the store.
++ */
++int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn);
++
++
++/**
++ * Map a page of memory into this domain from another domain's grant table.
++ * xenbus_map_ring_valloc allocates a page of virtual address space, maps the
++ * page to that address, and sets *vaddr to that address.
++ * xenbus_map_ring does not allocate the virtual address space (you must do
++ * this yourself!). It only maps in the page to the specified address.
++ * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h)
++ * or -ENOMEM on error. If an error is returned, device will switch to
++ * XenbusStateClosing and the error message will be saved in XenStore.
++ */
++struct vm_struct *xenbus_map_ring_valloc(struct xenbus_device *dev,
++ int gnt_ref);
++int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref,
++ grant_handle_t *handle, void *vaddr);
++
++
++/**
++ * Unmap a page of memory in this domain that was imported from another domain.
++ * Use xenbus_unmap_ring_vfree if you mapped in your memory with
++ * xenbus_map_ring_valloc (it will free the virtual address space).
++ * Returns 0 on success and returns GNTST_* on error
++ * (see xen/include/interface/grant_table.h).
++ */
++int xenbus_unmap_ring_vfree(struct xenbus_device *dev, struct vm_struct *);
++int xenbus_unmap_ring(struct xenbus_device *dev,
++ grant_handle_t handle, void *vaddr);
++
++
++/**
++ * Allocate an event channel for the given xenbus_device, assigning the newly
++ * created local port to *port. Return 0 on success, or -errno on error. On
++ * error, the device will switch to XenbusStateClosing, and the error will be
++ * saved in the store.
++ */
++int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port);
++
++
++/**
++ * Free an existing event channel. Returns 0 on success or -errno on error.
++ */
++int xenbus_free_evtchn(struct xenbus_device *dev, int port);
++
++
++/**
++ * Return the state of the driver rooted at the given store path, or
++ * XenbusStateUnknown if no state can be read.
++ */
++enum xenbus_state xenbus_read_driver_state(const char *path);
++
++
++/***
++ * Report the given negative errno into the store, along with the given
++ * formatted message.
++ */
++void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt,
++ ...);
++
++
++/***
++ * Equivalent to xenbus_dev_error(dev, err, fmt, args), followed by
++ * xenbus_switch_state(dev, NULL, XenbusStateClosing) to schedule an orderly
++ * closedown of this driver and its peer.
++ */
++void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt,
++ ...);
++
++int xenbus_dev_init(void);
++
++const char *xenbus_strstate(enum xenbus_state state);
++int xenbus_dev_is_online(struct xenbus_device *dev);
++int xenbus_frontend_closed(struct xenbus_device *dev);
++
++#endif /* _XEN_XENBUS_H */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/xen/xencons.h 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,19 @@
++#ifndef __ASM_XENCONS_H__
++#define __ASM_XENCONS_H__
++
++struct dom0_vga_console_info;
++void dom0_init_screen_info(const struct dom0_vga_console_info *info);
++
++void xencons_force_flush(void);
++void xencons_resume(void);
++
++/* Interrupt work hooks. Receive data, or kick data out. */
++void xencons_rx(char *buf, unsigned len, struct pt_regs *regs);
++void xencons_tx(void);
++
++int xencons_ring_init(void);
++int xencons_ring_send(const char *data, unsigned len);
++
++void xencons_early_setup(void);
++
++#endif /* __ASM_XENCONS_H__ */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/xen/xenoprof.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,42 @@
++/******************************************************************************
++ * xen/xenoprof.h
++ *
++ * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
++ * VA Linux Systems Japan K.K.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ */
++
++#ifndef __XEN_XENOPROF_H__
++#define __XEN_XENOPROF_H__
++#ifdef CONFIG_XEN
++
++#include <asm/xenoprof.h>
++
++struct oprofile_operations;
++int xenoprofile_init(struct oprofile_operations * ops);
++void xenoprofile_exit(void);
++
++struct xenoprof_shared_buffer {
++ char *buffer;
++ struct xenoprof_arch_shared_buffer arch;
++};
++#else
++#define xenoprofile_init(ops) (-ENOSYS)
++#define xenoprofile_exit() do { } while (0)
++
++#endif /* CONFIG_XEN */
++#endif /* __XEN_XENOPROF_H__ */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/scripts/Makefile.xen 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,14 @@
++
++# cherrypickxen($1 = allobj)
++cherrypickxen = $(foreach var, $(1), \
++ $(shell o=$(var); \
++ c=$${o%.o}-xen.c; \
++ s=$${o%.o}-xen.S; \
++ oxen=$${o%.o}-xen.o; \
++ [ -f $(srctree)/$(src)/$${c} ] || \
++ [ -f $(srctree)/$(src)/$${s} ] \
++ && echo $$oxen \
++ || echo $(var) ) \
++ )
++# filterxen($1 = allobj, $2 = noobjs)
++filterxen = $(filter-out $(2), $(1))
diff --git a/trunk/2.6.22/20012_xen3-auto-xen-drivers.patch1 b/trunk/2.6.22/20012_xen3-auto-xen-drivers.patch1
new file mode 100644
index 0000000..5b134b5
--- /dev/null
+++ b/trunk/2.6.22/20012_xen3-auto-xen-drivers.patch1
@@ -0,0 +1,28404 @@
+Subject: xen3 xen-drivers
+From: http://xenbits.xensource.com/xen-3.1-testing.hg (tip 15042)
+Patch-mainline: obsolete
+Acked-by: jbeulich@novell.com
+
+---
+ drivers/Makefile | 1
+ drivers/xen/Makefile | 20
+ drivers/xen/balloon/Makefile | 2
+ drivers/xen/balloon/balloon.c | 663 +++++++
+ drivers/xen/balloon/common.h | 58
+ drivers/xen/balloon/sysfs.c | 170 +
+ drivers/xen/blkback/Makefile | 3
+ drivers/xen/blkback/blkback.c | 614 ++++++
+ drivers/xen/blkback/common.h | 139 +
+ drivers/xen/blkback/interface.c | 181 ++
+ drivers/xen/blkback/vbd.c | 118 +
+ drivers/xen/blkback/xenbus.c | 533 +++++
+ drivers/xen/blkfront/Makefile | 5
+ drivers/xen/blkfront/blkfront.c | 902 ++++++++++
+ drivers/xen/blkfront/block.h | 142 +
+ drivers/xen/blkfront/vbd.c | 372 ++++
+ drivers/xen/blktap/Makefile | 5
+ drivers/xen/blktap/blktap.c | 1528 +++++++++++++++++
+ drivers/xen/blktap/common.h | 121 +
+ drivers/xen/blktap/interface.c | 174 +
+ drivers/xen/blktap/xenbus.c | 473 +++++
+ drivers/xen/char/Makefile | 2
+ drivers/xen/char/mem.c | 203 ++
+ drivers/xen/console/Makefile | 2
+ drivers/xen/console/console.c | 721 ++++++++
+ drivers/xen/console/xencons_ring.c | 143 +
+ drivers/xen/core/Makefile | 12
+ drivers/xen/core/cpu_hotplug.c | 172 +
+ drivers/xen/core/evtchn.c | 1015 +++++++++++
+ drivers/xen/core/features.c | 34
+ drivers/xen/core/gnttab.c | 631 +++++++
+ drivers/xen/core/hypervisor_sysfs.c | 59
+ drivers/xen/core/machine_kexec.c | 189 ++
+ drivers/xen/core/machine_reboot.c | 241 ++
+ drivers/xen/core/reboot.c | 249 ++
+ drivers/xen/core/smpboot.c | 452 +++++
+ drivers/xen/core/xen_proc.c | 23
+ drivers/xen/core/xen_sysfs.c | 378 ++++
+ drivers/xen/evtchn/Makefile | 2
+ drivers/xen/evtchn/evtchn.c | 469 +++++
+ drivers/xen/fbfront/Makefile | 2
+ drivers/xen/fbfront/xenfb.c | 752 ++++++++
+ drivers/xen/fbfront/xenkbd.c | 333 +++
+ drivers/xen/gntdev/Makefile | 1
+ drivers/xen/gntdev/gntdev.c | 973 ++++++++++
+ drivers/xen/netback/Makefile | 5
+ drivers/xen/netback/common.h | 157 +
+ drivers/xen/netback/interface.c | 336 +++
+ drivers/xen/netback/loopback.c | 320 +++
+ drivers/xen/netback/netback.c | 1496 ++++++++++++++++
+ drivers/xen/netback/xenbus.c | 448 +++++
+ drivers/xen/netfront/Makefile | 4
+ drivers/xen/netfront/netfront.c | 2133 ++++++++++++++++++++++++
+ drivers/xen/pciback/Makefile | 15
+ drivers/xen/pciback/conf_space.c | 426 ++++
+ drivers/xen/pciback/conf_space.h | 126 +
+ drivers/xen/pciback/conf_space_capability.c | 71
+ drivers/xen/pciback/conf_space_capability.h | 23
+ drivers/xen/pciback/conf_space_capability_pm.c | 128 +
+ drivers/xen/pciback/conf_space_capability_vpd.c | 42
+ drivers/xen/pciback/conf_space_header.c | 309 +++
+ drivers/xen/pciback/conf_space_quirks.c | 126 +
+ drivers/xen/pciback/conf_space_quirks.h | 35
+ drivers/xen/pciback/passthrough.c | 157 +
+ drivers/xen/pciback/pci_stub.c | 929 ++++++++++
+ drivers/xen/pciback/pciback.h | 93 +
+ drivers/xen/pciback/pciback_ops.c | 95 +
+ drivers/xen/pciback/slot.c | 151 +
+ drivers/xen/pciback/vpci.c | 204 ++
+ drivers/xen/pciback/xenbus.c | 454 +++++
+ drivers/xen/pcifront/Makefile | 7
+ drivers/xen/pcifront/pci.c | 46
+ drivers/xen/pcifront/pci_op.c | 268 +++
+ drivers/xen/pcifront/pcifront.h | 40
+ drivers/xen/pcifront/xenbus.c | 295 +++
+ drivers/xen/privcmd/Makefile | 2
+ drivers/xen/privcmd/privcmd.c | 284 +++
+ drivers/xen/tpmback/Makefile | 4
+ drivers/xen/tpmback/common.h | 85
+ drivers/xen/tpmback/interface.c | 167 +
+ drivers/xen/tpmback/tpmback.c | 944 ++++++++++
+ drivers/xen/tpmback/xenbus.c | 289 +++
+ drivers/xen/util.c | 70
+ drivers/xen/xenbus/Makefile | 9
+ drivers/xen/xenbus/xenbus_backend_client.c | 147 +
+ drivers/xen/xenbus/xenbus_client.c | 283 +++
+ drivers/xen/xenbus/xenbus_comms.c | 232 ++
+ drivers/xen/xenbus/xenbus_comms.h | 46
+ drivers/xen/xenbus/xenbus_dev.c | 404 ++++
+ drivers/xen/xenbus/xenbus_probe.c | 1086 ++++++++++++
+ drivers/xen/xenbus/xenbus_probe.h | 75
+ drivers/xen/xenbus/xenbus_probe_backend.c | 286 +++
+ drivers/xen/xenbus/xenbus_xs.c | 880 +++++++++
+ drivers/xen/xenoprof/xenoprofile.c | 500 +++++
+ 94 files changed, 28014 insertions(+)
+
+--- a/drivers/Makefile 2007-08-27 14:01:24.000000000 -0400
++++ b/drivers/Makefile 2007-08-27 14:01:25.000000000 -0400
+@@ -31,6 +31,7 @@ obj-y += base/ block/ misc/ mfd/ net/
+ obj-$(CONFIG_NUBUS) += nubus/
+ obj-$(CONFIG_ATM) += atm/
+ obj-y += macintosh/
++obj-$(CONFIG_XEN) += xen/
+ obj-$(CONFIG_IDE) += ide/
+ obj-$(CONFIG_FC4) += fc4/
+ obj-$(CONFIG_SCSI) += scsi/
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/Makefile 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,20 @@
++obj-y += core/
++obj-y += console/
++obj-y += evtchn/
++obj-y += privcmd/
++obj-y += xenbus/
++obj-y += gntdev/
++obj-y += balloon/
++obj-y += char/
++
++obj-y += util.o
++obj-$(CONFIG_XEN_BLKDEV_BACKEND) += blkback/
++obj-$(CONFIG_XEN_BLKDEV_TAP) += blktap/
++obj-$(CONFIG_XEN_NETDEV_BACKEND) += netback/
++obj-$(CONFIG_XEN_TPMDEV_BACKEND) += tpmback/
++obj-$(CONFIG_XEN_BLKDEV_FRONTEND) += blkfront/
++obj-$(CONFIG_XEN_NETDEV_FRONTEND) += netfront/
++obj-$(CONFIG_XEN_PCIDEV_BACKEND) += pciback/
++obj-$(CONFIG_XEN_PCIDEV_FRONTEND) += pcifront/
++obj-$(CONFIG_XEN_FRAMEBUFFER) += fbfront/
++obj-$(CONFIG_XEN_KEYBOARD) += fbfront/
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/balloon/Makefile 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,2 @@
++
++obj-y := balloon.o sysfs.o
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/balloon/balloon.c 2007-08-27 14:02:09.000000000 -0400
+@@ -0,0 +1,663 @@
++/******************************************************************************
++ * balloon.c
++ *
++ * Xen balloon driver - enables returning/claiming memory to/from Xen.
++ *
++ * Copyright (c) 2003, B Dragovic
++ * Copyright (c) 2003-2004, M Williamson, K Fraser
++ * Copyright (c) 2005 Dan M. Smith, IBM Corporation
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/sched.h>
++#include <linux/errno.h>
++#include <linux/mm.h>
++#include <linux/mman.h>
++#include <linux/smp_lock.h>
++#include <linux/pagemap.h>
++#include <linux/bootmem.h>
++#include <linux/highmem.h>
++#include <linux/vmalloc.h>
++#include <linux/mutex.h>
++#include <xen/xen_proc.h>
++#include <asm/hypervisor.h>
++#include <xen/balloon.h>
++#include <xen/interface/memory.h>
++#include <asm/maddr.h>
++#include <asm/page.h>
++#include <asm/pgalloc.h>
++#include <asm/pgtable.h>
++#include <asm/uaccess.h>
++#include <asm/tlb.h>
++#include <linux/highmem.h>
++#include <linux/list.h>
++#include <xen/xenbus.h>
++#include "common.h"
++
++#ifdef HAVE_XEN_PLATFORM_COMPAT_H
++#include <xen/platform-compat.h>
++#endif
++
++#ifdef CONFIG_PROC_FS
++static struct proc_dir_entry *balloon_pde;
++#endif
++
++static DEFINE_MUTEX(balloon_mutex);
++
++/*
++ * Protects atomic reservation decrease/increase against concurrent increases.
++ * Also protects non-atomic updates of current_pages and driver_pages, and
++ * balloon lists.
++ */
++DEFINE_SPINLOCK(balloon_lock);
++
++struct balloon_stats balloon_stats;
++
++/* We increase/decrease in batches which fit in a page */
++static unsigned long frame_list[PAGE_SIZE / sizeof(unsigned long)];
++
++/* VM /proc information for memory */
++extern unsigned long totalram_pages;
++
++/* List of ballooned pages, threaded through the mem_map array. */
++static LIST_HEAD(ballooned_pages);
++
++/* Main work function, always executed in process context. */
++static void balloon_process(void *unused);
++static DECLARE_WORK(balloon_worker, balloon_process, NULL);
++static struct timer_list balloon_timer;
++
++/* When ballooning out (allocating memory to return to Xen) we don't really
++ want the kernel to try too hard since that can trigger the oom killer. */
++#define GFP_BALLOON \
++ (GFP_HIGHUSER | __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC)
++
++#define PAGE_TO_LIST(p) (&(p)->lru)
++#define LIST_TO_PAGE(l) list_entry((l), struct page, lru)
++#define UNLIST_PAGE(p) \
++ do { \
++ list_del(PAGE_TO_LIST(p)); \
++ PAGE_TO_LIST(p)->next = NULL; \
++ PAGE_TO_LIST(p)->prev = NULL; \
++ } while(0)
++
++#define IPRINTK(fmt, args...) \
++ printk(KERN_INFO "xen_mem: " fmt, ##args)
++#define WPRINTK(fmt, args...) \
++ printk(KERN_WARNING "xen_mem: " fmt, ##args)
++
++/* balloon_append: add the given page to the balloon. */
++static void balloon_append(struct page *page)
++{
++ /* Lowmem is re-populated first, so highmem pages go at list tail. */
++ if (PageHighMem(page)) {
++ list_add_tail(PAGE_TO_LIST(page), &ballooned_pages);
++ bs.balloon_high++;
++ } else {
++ list_add(PAGE_TO_LIST(page), &ballooned_pages);
++ bs.balloon_low++;
++ }
++}
++
++/* balloon_retrieve: rescue a page from the balloon, if it is not empty. */
++static struct page *balloon_retrieve(void)
++{
++ struct page *page;
++
++ if (list_empty(&ballooned_pages))
++ return NULL;
++
++ page = LIST_TO_PAGE(ballooned_pages.next);
++ UNLIST_PAGE(page);
++
++ if (PageHighMem(page))
++ bs.balloon_high--;
++ else
++ bs.balloon_low--;
++
++ return page;
++}
++
++static struct page *balloon_first_page(void)
++{
++ if (list_empty(&ballooned_pages))
++ return NULL;
++ return LIST_TO_PAGE(ballooned_pages.next);
++}
++
++static struct page *balloon_next_page(struct page *page)
++{
++ struct list_head *next = PAGE_TO_LIST(page)->next;
++ if (next == &ballooned_pages)
++ return NULL;
++ return LIST_TO_PAGE(next);
++}
++
++static void balloon_alarm(unsigned long unused)
++{
++ schedule_work(&balloon_worker);
++}
++
++static unsigned long current_target(void)
++{
++ unsigned long target = min(bs.target_pages, bs.hard_limit);
++ if (target > (bs.current_pages + bs.balloon_low + bs.balloon_high))
++ target = bs.current_pages + bs.balloon_low + bs.balloon_high;
++ return target;
++}
++
++static int increase_reservation(unsigned long nr_pages)
++{
++ unsigned long pfn, i, flags;
++ struct page *page;
++ long rc;
++ struct xen_memory_reservation reservation = {
++ .address_bits = 0,
++ .extent_order = 0,
++ .domid = DOMID_SELF
++ };
++
++ if (nr_pages > ARRAY_SIZE(frame_list))
++ nr_pages = ARRAY_SIZE(frame_list);
++
++ balloon_lock(flags);
++
++ page = balloon_first_page();
++ for (i = 0; i < nr_pages; i++) {
++ BUG_ON(page == NULL);
++ frame_list[i] = page_to_pfn(page);;
++ page = balloon_next_page(page);
++ }
++
++ set_xen_guest_handle(reservation.extent_start, frame_list);
++ reservation.nr_extents = nr_pages;
++ rc = HYPERVISOR_memory_op(
++ XENMEM_populate_physmap, &reservation);
++ if (rc < nr_pages) {
++ if (rc > 0) {
++ int ret;
++
++ /* We hit the Xen hard limit: reprobe. */
++ reservation.nr_extents = rc;
++ ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
++ &reservation);
++ BUG_ON(ret != rc);
++ }
++ if (rc >= 0)
++ bs.hard_limit = (bs.current_pages + rc -
++ bs.driver_pages);
++ goto out;
++ }
++
++ for (i = 0; i < nr_pages; i++) {
++ page = balloon_retrieve();
++ BUG_ON(page == NULL);
++
++ pfn = page_to_pfn(page);
++ BUG_ON(!xen_feature(XENFEAT_auto_translated_physmap) &&
++ phys_to_machine_mapping_valid(pfn));
++
++ set_phys_to_machine(pfn, frame_list[i]);
++
++#ifdef CONFIG_XEN
++ /* Link back into the page tables if not highmem. */
++ if (pfn < max_low_pfn) {
++ int ret;
++ ret = HYPERVISOR_update_va_mapping(
++ (unsigned long)__va(pfn << PAGE_SHIFT),
++ pfn_pte_ma(frame_list[i], PAGE_KERNEL),
++ 0);
++ BUG_ON(ret);
++ }
++#endif
++
++ /* Relinquish the page back to the allocator. */
++ ClearPageReserved(page);
++ init_page_count(page);
++ __free_page(page);
++ }
++
++ bs.current_pages += nr_pages;
++ totalram_pages = bs.current_pages;
++
++ out:
++ balloon_unlock(flags);
++
++ return 0;
++}
++
++static int decrease_reservation(unsigned long nr_pages)
++{
++ unsigned long pfn, i, flags;
++ struct page *page;
++ void *v;
++ int need_sleep = 0;
++ int ret;
++ struct xen_memory_reservation reservation = {
++ .address_bits = 0,
++ .extent_order = 0,
++ .domid = DOMID_SELF
++ };
++
++ if (nr_pages > ARRAY_SIZE(frame_list))
++ nr_pages = ARRAY_SIZE(frame_list);
++
++ for (i = 0; i < nr_pages; i++) {
++ if ((page = alloc_page(GFP_BALLOON)) == NULL) {
++ nr_pages = i;
++ need_sleep = 1;
++ break;
++ }
++
++ pfn = page_to_pfn(page);
++ frame_list[i] = pfn_to_mfn(pfn);
++
++ if (!PageHighMem(page)) {
++ v = phys_to_virt(pfn << PAGE_SHIFT);
++ scrub_pages(v, 1);
++#ifdef CONFIG_XEN
++ ret = HYPERVISOR_update_va_mapping(
++ (unsigned long)v, __pte_ma(0), 0);
++ BUG_ON(ret);
++#endif
++ }
++#ifdef CONFIG_XEN_SCRUB_PAGES
++ else {
++ v = kmap(page);
++ scrub_pages(v, 1);
++ kunmap(page);
++ }
++#endif
++ }
++
++#ifdef CONFIG_XEN
++ /* Ensure that ballooned highmem pages don't have kmaps. */
++ kmap_flush_unused();
++ flush_tlb_all();
++#endif
++
++ balloon_lock(flags);
++
++ /* No more mappings: invalidate P2M and add to balloon. */
++ for (i = 0; i < nr_pages; i++) {
++ pfn = mfn_to_pfn(frame_list[i]);
++ set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
++ balloon_append(pfn_to_page(pfn));
++ }
++
++ set_xen_guest_handle(reservation.extent_start, frame_list);
++ reservation.nr_extents = nr_pages;
++ ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
++ BUG_ON(ret != nr_pages);
++
++ bs.current_pages -= nr_pages;
++ totalram_pages = bs.current_pages;
++
++ balloon_unlock(flags);
++
++ return need_sleep;
++}
++
++/*
++ * We avoid multiple worker processes conflicting via the balloon mutex.
++ * We may of course race updates of the target counts (which are protected
++ * by the balloon lock), or with changes to the Xen hard limit, but we will
++ * recover from these in time.
++ */
++static void balloon_process(void *unused)
++{
++ int need_sleep = 0;
++ long credit;
++
++ mutex_lock(&balloon_mutex);
++
++ do {
++ credit = current_target() - bs.current_pages;
++ if (credit > 0)
++ need_sleep = (increase_reservation(credit) != 0);
++ if (credit < 0)
++ need_sleep = (decrease_reservation(-credit) != 0);
++
++#ifndef CONFIG_PREEMPT
++ if (need_resched())
++ schedule();
++#endif
++ } while ((credit != 0) && !need_sleep);
++
++ /* Schedule more work if there is some still to be done. */
++ if (current_target() != bs.current_pages)
++ mod_timer(&balloon_timer, jiffies + HZ);
++
++ mutex_unlock(&balloon_mutex);
++}
++
++/* Resets the Xen limit, sets new target, and kicks off processing. */
++void balloon_set_new_target(unsigned long target)
++{
++ /* No need for lock. Not read-modify-write updates. */
++ bs.hard_limit = ~0UL;
++ bs.target_pages = target;
++ schedule_work(&balloon_worker);
++}
++
++static struct xenbus_watch target_watch =
++{
++ .node = "memory/target"
++};
++
++/* React to a change in the target key */
++static void watch_target(struct xenbus_watch *watch,
++ const char **vec, unsigned int len)
++{
++ unsigned long long new_target;
++ int err;
++
++ err = xenbus_scanf(XBT_NIL, "memory", "target", "%llu", &new_target);
++ if (err != 1) {
++ /* This is ok (for domain0 at least) - so just return */
++ return;
++ }
++
++ /* The given memory/target value is in KiB, so it needs converting to
++ * pages. PAGE_SHIFT converts bytes to pages, hence PAGE_SHIFT - 10.
++ */
++ balloon_set_new_target(new_target >> (PAGE_SHIFT - 10));
++}
++
++static int balloon_init_watcher(struct notifier_block *notifier,
++ unsigned long event,
++ void *data)
++{
++ int err;
++
++ err = register_xenbus_watch(&target_watch);
++ if (err)
++ printk(KERN_ERR "Failed to set balloon watcher\n");
++
++ return NOTIFY_DONE;
++}
++
++#ifdef CONFIG_PROC_FS
++static int balloon_write(struct file *file, const char __user *buffer,
++ unsigned long count, void *data)
++{
++ char memstring[64], *endchar;
++ unsigned long long target_bytes;
++
++ if (!capable(CAP_SYS_ADMIN))
++ return -EPERM;
++
++ if (count <= 1)
++ return -EBADMSG; /* runt */
++ if (count > sizeof(memstring))
++ return -EFBIG; /* too long */
++
++ if (copy_from_user(memstring, buffer, count))
++ return -EFAULT;
++ memstring[sizeof(memstring)-1] = '\0';
++
++ target_bytes = memparse(memstring, &endchar);
++ balloon_set_new_target(target_bytes >> PAGE_SHIFT);
++
++ return count;
++}
++
++static int balloon_read(char *page, char **start, off_t off,
++ int count, int *eof, void *data)
++{
++ int len;
++
++ len = sprintf(
++ page,
++ "Current allocation: %8lu kB\n"
++ "Requested target: %8lu kB\n"
++ "Low-mem balloon: %8lu kB\n"
++ "High-mem balloon: %8lu kB\n"
++ "Driver pages: %8lu kB\n"
++ "Xen hard limit: ",
++ PAGES2KB(bs.current_pages), PAGES2KB(bs.target_pages),
++ PAGES2KB(bs.balloon_low), PAGES2KB(bs.balloon_high),
++ PAGES2KB(bs.driver_pages));
++
++ if (bs.hard_limit != ~0UL)
++ len += sprintf(page + len, "%8lu kB\n",
++ PAGES2KB(bs.hard_limit));
++ else
++ len += sprintf(page + len, " ??? kB\n");
++
++ *eof = 1;
++ return len;
++}
++#endif
++
++static struct notifier_block xenstore_notifier;
++
++static int __init balloon_init(void)
++{
++#if defined(CONFIG_X86) && defined(CONFIG_XEN)
++ unsigned long pfn;
++ struct page *page;
++#endif
++
++ if (!is_running_on_xen())
++ return -ENODEV;
++
++ IPRINTK("Initialising balloon driver.\n");
++
++#ifdef CONFIG_XEN
++ bs.current_pages = min(xen_start_info->nr_pages, max_pfn);
++ totalram_pages = bs.current_pages;
++#else
++ bs.current_pages = totalram_pages;
++#endif
++ bs.target_pages = bs.current_pages;
++ bs.balloon_low = 0;
++ bs.balloon_high = 0;
++ bs.driver_pages = 0UL;
++ bs.hard_limit = ~0UL;
++
++ init_timer(&balloon_timer);
++ balloon_timer.data = 0;
++ balloon_timer.function = balloon_alarm;
++
++#ifdef CONFIG_PROC_FS
++ if ((balloon_pde = create_xen_proc_entry("balloon", 0644)) == NULL) {
++ WPRINTK("Unable to create /proc/xen/balloon.\n");
++ return -1;
++ }
++
++ balloon_pde->read_proc = balloon_read;
++ balloon_pde->write_proc = balloon_write;
++#endif
++ balloon_sysfs_init();
++
++#if defined(CONFIG_X86) && defined(CONFIG_XEN)
++ /* Initialise the balloon with excess memory space. */
++ for (pfn = xen_start_info->nr_pages; pfn < max_pfn; pfn++) {
++ page = pfn_to_page(pfn);
++ if (!PageReserved(page))
++ balloon_append(page);
++ }
++#endif
++
++ target_watch.callback = watch_target;
++ xenstore_notifier.notifier_call = balloon_init_watcher;
++
++ register_xenstore_notifier(&xenstore_notifier);
++
++ return 0;
++}
++
++subsys_initcall(balloon_init);
++
++static void balloon_exit(void)
++{
++ /* XXX - release balloon here */
++ return;
++}
++
++module_exit(balloon_exit);
++
++void balloon_update_driver_allowance(long delta)
++{
++ unsigned long flags;
++
++ balloon_lock(flags);
++ bs.driver_pages += delta;
++ balloon_unlock(flags);
++}
++
++#ifdef CONFIG_XEN
++static int dealloc_pte_fn(
++ pte_t *pte, struct page *pmd_page, unsigned long addr, void *data)
++{
++ unsigned long mfn = pte_mfn(*pte);
++ int ret;
++ struct xen_memory_reservation reservation = {
++ .nr_extents = 1,
++ .extent_order = 0,
++ .domid = DOMID_SELF
++ };
++ set_xen_guest_handle(reservation.extent_start, &mfn);
++ set_pte_at(&init_mm, addr, pte, __pte_ma(0));
++ set_phys_to_machine(__pa(addr) >> PAGE_SHIFT, INVALID_P2M_ENTRY);
++ ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
++ BUG_ON(ret != 1);
++ return 0;
++}
++#endif
++
++struct page **alloc_empty_pages_and_pagevec(int nr_pages)
++{
++ unsigned long vaddr, flags;
++ struct page *page, **pagevec;
++ int i, ret;
++
++ pagevec = kmalloc(sizeof(page) * nr_pages, GFP_KERNEL);
++ if (pagevec == NULL)
++ return NULL;
++
++ for (i = 0; i < nr_pages; i++) {
++ page = pagevec[i] = alloc_page(GFP_KERNEL);
++ if (page == NULL)
++ goto err;
++
++ vaddr = (unsigned long)page_address(page);
++
++ scrub_pages(vaddr, 1);
++
++ balloon_lock(flags);
++
++ if (xen_feature(XENFEAT_auto_translated_physmap)) {
++ unsigned long gmfn = page_to_pfn(page);
++ struct xen_memory_reservation reservation = {
++ .nr_extents = 1,
++ .extent_order = 0,
++ .domid = DOMID_SELF
++ };
++ set_xen_guest_handle(reservation.extent_start, &gmfn);
++ ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
++ &reservation);
++ if (ret == 1)
++ ret = 0; /* success */
++ } else {
++#ifdef CONFIG_XEN
++ ret = apply_to_page_range(&init_mm, vaddr, PAGE_SIZE,
++ dealloc_pte_fn, NULL);
++#else
++ /* Cannot handle non-auto translate mode. */
++ ret = 1;
++#endif
++ }
++
++ if (ret != 0) {
++ balloon_unlock(flags);
++ __free_page(page);
++ goto err;
++ }
++
++ totalram_pages = --bs.current_pages;
++
++ balloon_unlock(flags);
++ }
++
++ out:
++ schedule_work(&balloon_worker);
++#ifdef CONFIG_XEN
++ flush_tlb_all();
++#endif
++ return pagevec;
++
++ err:
++ balloon_lock(flags);
++ while (--i >= 0)
++ balloon_append(pagevec[i]);
++ balloon_unlock(flags);
++ kfree(pagevec);
++ pagevec = NULL;
++ goto out;
++}
++
++void free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages)
++{
++ unsigned long flags;
++ int i;
++
++ if (pagevec == NULL)
++ return;
++
++ balloon_lock(flags);
++ for (i = 0; i < nr_pages; i++) {
++ BUG_ON(page_count(pagevec[i]) != 1);
++ balloon_append(pagevec[i]);
++ }
++ balloon_unlock(flags);
++
++ kfree(pagevec);
++
++ schedule_work(&balloon_worker);
++}
++
++void balloon_release_driver_page(struct page *page)
++{
++ unsigned long flags;
++
++ balloon_lock(flags);
++ balloon_append(page);
++ bs.driver_pages--;
++ balloon_unlock(flags);
++
++ schedule_work(&balloon_worker);
++}
++
++EXPORT_SYMBOL_GPL(balloon_update_driver_allowance);
++EXPORT_SYMBOL_GPL(alloc_empty_pages_and_pagevec);
++EXPORT_SYMBOL_GPL(free_empty_pages_and_pagevec);
++EXPORT_SYMBOL_GPL(balloon_release_driver_page);
++
++MODULE_LICENSE("Dual BSD/GPL");
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/balloon/common.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,58 @@
++/******************************************************************************
++ * balloon/common.h
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#ifndef __XEN_BALLOON_COMMON_H__
++#define __XEN_BALLOON_COMMON_H__
++
++#define PAGES2KB(_p) ((_p)<<(PAGE_SHIFT-10))
++
++struct balloon_stats {
++ /* We aim for 'current allocation' == 'target allocation'. */
++ unsigned long current_pages;
++ unsigned long target_pages;
++ /* We may hit the hard limit in Xen. If we do then we remember it. */
++ unsigned long hard_limit;
++ /*
++ * Drivers may alter the memory reservation independently, but they
++ * must inform the balloon driver so we avoid hitting the hard limit.
++ */
++ unsigned long driver_pages;
++ /* Number of pages in high- and low-memory balloons. */
++ unsigned long balloon_low;
++ unsigned long balloon_high;
++};
++
++extern struct balloon_stats balloon_stats;
++#define bs balloon_stats
++
++int balloon_sysfs_init(void);
++void balloon_sysfs_exit(void);
++
++void balloon_set_new_target(unsigned long target);
++
++#endif /* __XEN_BALLOON_COMMON_H__ */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/balloon/sysfs.c 2007-08-27 14:01:59.000000000 -0400
+@@ -0,0 +1,170 @@
++/******************************************************************************
++ * balloon/sysfs.c
++ *
++ * Xen balloon driver - sysfs interfaces.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <linux/capability.h>
++#include <linux/errno.h>
++#include <linux/stat.h>
++#include <linux/string.h>
++#include <linux/sysdev.h>
++#include "common.h"
++
++#ifdef HAVE_XEN_PLATFORM_COMPAT_H
++#include <xen/platform-compat.h>
++#endif
++
++#define BALLOON_CLASS_NAME "memory"
++
++#define BALLOON_SHOW(name, format, args...) \
++ static ssize_t show_##name(struct sys_device *dev, \
++ char *buf) \
++ { \
++ return sprintf(buf, format, ##args); \
++ } \
++ static SYSDEV_ATTR(name, S_IRUGO, show_##name, NULL)
++
++BALLOON_SHOW(current_kb, "%lu\n", PAGES2KB(bs.current_pages));
++BALLOON_SHOW(low_kb, "%lu\n", PAGES2KB(bs.balloon_low));
++BALLOON_SHOW(high_kb, "%lu\n", PAGES2KB(bs.balloon_high));
++BALLOON_SHOW(hard_limit_kb,
++ (bs.hard_limit!=~0UL) ? "%lu\n" : "???\n",
++ (bs.hard_limit!=~0UL) ? PAGES2KB(bs.hard_limit) : 0);
++BALLOON_SHOW(driver_kb, "%lu\n", PAGES2KB(bs.driver_pages));
++
++static ssize_t show_target_kb(struct sys_device *dev, char *buf)
++{
++ return sprintf(buf, "%lu\n", PAGES2KB(bs.target_pages));
++}
++
++static ssize_t store_target_kb(struct sys_device *dev,
++ const char *buf,
++ size_t count)
++{
++ char memstring[64], *endchar;
++ unsigned long long target_bytes;
++
++ if (!capable(CAP_SYS_ADMIN))
++ return -EPERM;
++
++ if (count <= 1)
++ return -EBADMSG; /* runt */
++ if (count > sizeof(memstring))
++ return -EFBIG; /* too long */
++ strcpy(memstring, buf);
++
++ target_bytes = memparse(memstring, &endchar);
++ balloon_set_new_target(target_bytes >> PAGE_SHIFT);
++
++ return count;
++}
++
++static SYSDEV_ATTR(target_kb, S_IRUGO | S_IWUSR,
++ show_target_kb, store_target_kb);
++
++static struct sysdev_attribute *balloon_attrs[] = {
++ &attr_target_kb,
++};
++
++static struct attribute *balloon_info_attrs[] = {
++ &attr_current_kb.attr,
++ &attr_low_kb.attr,
++ &attr_high_kb.attr,
++ &attr_hard_limit_kb.attr,
++ &attr_driver_kb.attr,
++ NULL
++};
++
++static struct attribute_group balloon_info_group = {
++ .name = "info",
++ .attrs = balloon_info_attrs,
++};
++
++static struct sysdev_class balloon_sysdev_class = {
++ set_kset_name(BALLOON_CLASS_NAME),
++};
++
++static struct sys_device balloon_sysdev;
++
++static int register_balloon(struct sys_device *sysdev)
++{
++ int i, error;
++
++ error = sysdev_class_register(&balloon_sysdev_class);
++ if (error)
++ return error;
++
++ sysdev->id = 0;
++ sysdev->cls = &balloon_sysdev_class;
++
++ error = sysdev_register(sysdev);
++ if (error) {
++ sysdev_class_unregister(&balloon_sysdev_class);
++ return error;
++ }
++
++ for (i = 0; i < ARRAY_SIZE(balloon_attrs); i++) {
++ error = sysdev_create_file(sysdev, balloon_attrs[i]);
++ if (error)
++ goto fail;
++ }
++
++ error = sysfs_create_group(&sysdev->kobj, &balloon_info_group);
++ if (error)
++ goto fail;
++
++ return 0;
++
++ fail:
++ while (--i >= 0)
++ sysdev_remove_file(sysdev, balloon_attrs[i]);
++ sysdev_unregister(sysdev);
++ sysdev_class_unregister(&balloon_sysdev_class);
++ return error;
++}
++
++static void unregister_balloon(struct sys_device *sysdev)
++{
++ int i;
++
++ sysfs_remove_group(&sysdev->kobj, &balloon_info_group);
++ for (i = 0; i < ARRAY_SIZE(balloon_attrs); i++)
++ sysdev_remove_file(sysdev, balloon_attrs[i]);
++ sysdev_unregister(sysdev);
++ sysdev_class_unregister(&balloon_sysdev_class);
++}
++
++int balloon_sysfs_init(void)
++{
++ return register_balloon(&balloon_sysdev);
++}
++
++void balloon_sysfs_exit(void)
++{
++ unregister_balloon(&balloon_sysdev);
++}
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/blkback/Makefile 2007-08-27 14:01:47.000000000 -0400
+@@ -0,0 +1,3 @@
++obj-$(CONFIG_XEN_BLKDEV_BACKEND) := blkbk.o
++
++blkbk-y := blkback.o xenbus.o interface.o vbd.o
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/blkback/blkback.c 2007-08-27 14:02:10.000000000 -0400
+@@ -0,0 +1,614 @@
++/******************************************************************************
++ * arch/xen/drivers/blkif/backend/main.c
++ *
++ * Back-end of the driver for virtual block devices. This portion of the
++ * driver exports a 'unified' block-device interface that can be accessed
++ * by any operating system that implements a compatible front end. A
++ * reference front-end implementation can be found in:
++ * arch/xen/drivers/blkif/frontend
++ *
++ * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
++ * Copyright (c) 2005, Christopher Clark
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <linux/spinlock.h>
++#include <linux/kthread.h>
++#include <linux/list.h>
++#include <xen/balloon.h>
++#include <asm/hypervisor.h>
++#include "common.h"
++
++/*
++ * These are rather arbitrary. They are fairly large because adjacent requests
++ * pulled from a communication ring are quite likely to end up being part of
++ * the same scatter/gather request at the disc.
++ *
++ * ** TRY INCREASING 'blkif_reqs' IF WRITE SPEEDS SEEM TOO LOW **
++ *
++ * This will increase the chances of being able to write whole tracks.
++ * 64 should be enough to keep us competitive with Linux.
++ */
++static int blkif_reqs = 64;
++module_param_named(reqs, blkif_reqs, int, 0);
++MODULE_PARM_DESC(reqs, "Number of blkback requests to allocate");
++
++/* Run-time switchable: /sys/module/blkback/parameters/ */
++static unsigned int log_stats = 0;
++static unsigned int debug_lvl = 0;
++module_param(log_stats, int, 0644);
++module_param(debug_lvl, int, 0644);
++
++/*
++ * Each outstanding request that we've passed to the lower device layers has a
++ * 'pending_req' allocated to it. Each buffer_head that completes decrements
++ * the pendcnt towards zero. When it hits zero, the specified domain has a
++ * response queued for it, with the saved 'id' passed back.
++ */
++typedef struct {
++ blkif_t *blkif;
++ u64 id;
++ int nr_pages;
++ atomic_t pendcnt;
++ unsigned short operation;
++ int status;
++ struct list_head free_list;
++} pending_req_t;
++
++static pending_req_t *pending_reqs;
++static struct list_head pending_free;
++static DEFINE_SPINLOCK(pending_free_lock);
++static DECLARE_WAIT_QUEUE_HEAD(pending_free_wq);
++
++#define BLKBACK_INVALID_HANDLE (~0)
++
++static struct page **pending_pages;
++static grant_handle_t *pending_grant_handles;
++
++static inline int vaddr_pagenr(pending_req_t *req, int seg)
++{
++ return (req - pending_reqs) * BLKIF_MAX_SEGMENTS_PER_REQUEST + seg;
++}
++
++static inline unsigned long vaddr(pending_req_t *req, int seg)
++{
++ unsigned long pfn = page_to_pfn(pending_pages[vaddr_pagenr(req, seg)]);
++ return (unsigned long)pfn_to_kaddr(pfn);
++}
++
++#define pending_handle(_req, _seg) \
++ (pending_grant_handles[vaddr_pagenr(_req, _seg)])
++
++
++static int do_block_io_op(blkif_t *blkif);
++static void dispatch_rw_block_io(blkif_t *blkif,
++ blkif_request_t *req,
++ pending_req_t *pending_req);
++static void make_response(blkif_t *blkif, u64 id,
++ unsigned short op, int st);
++
++/******************************************************************
++ * misc small helpers
++ */
++static pending_req_t* alloc_req(void)
++{
++ pending_req_t *req = NULL;
++ unsigned long flags;
++
++ spin_lock_irqsave(&pending_free_lock, flags);
++ if (!list_empty(&pending_free)) {
++ req = list_entry(pending_free.next, pending_req_t, free_list);
++ list_del(&req->free_list);
++ }
++ spin_unlock_irqrestore(&pending_free_lock, flags);
++ return req;
++}
++
++static void free_req(pending_req_t *req)
++{
++ unsigned long flags;
++ int was_empty;
++
++ spin_lock_irqsave(&pending_free_lock, flags);
++ was_empty = list_empty(&pending_free);
++ list_add(&req->free_list, &pending_free);
++ spin_unlock_irqrestore(&pending_free_lock, flags);
++ if (was_empty)
++ wake_up(&pending_free_wq);
++}
++
++static void unplug_queue(blkif_t *blkif)
++{
++ if (blkif->plug == NULL)
++ return;
++ if (blkif->plug->unplug_fn)
++ blkif->plug->unplug_fn(blkif->plug);
++ blk_put_queue(blkif->plug);
++ blkif->plug = NULL;
++}
++
++static void plug_queue(blkif_t *blkif, struct bio *bio)
++{
++ request_queue_t *q = bdev_get_queue(bio->bi_bdev);
++
++ if (q == blkif->plug)
++ return;
++ unplug_queue(blkif);
++ blk_get_queue(q);
++ blkif->plug = q;
++}
++
++static void fast_flush_area(pending_req_t *req)
++{
++ struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
++ unsigned int i, invcount = 0;
++ grant_handle_t handle;
++ int ret;
++
++ for (i = 0; i < req->nr_pages; i++) {
++ handle = pending_handle(req, i);
++ if (handle == BLKBACK_INVALID_HANDLE)
++ continue;
++ gnttab_set_unmap_op(&unmap[i], vaddr(req, i), GNTMAP_host_map,
++ handle);
++ pending_handle(req, i) = BLKBACK_INVALID_HANDLE;
++ invcount++;
++ }
++
++ ret = HYPERVISOR_grant_table_op(
++ GNTTABOP_unmap_grant_ref, unmap, invcount);
++ BUG_ON(ret);
++}
++
++/******************************************************************
++ * SCHEDULER FUNCTIONS
++ */
++
++static void print_stats(blkif_t *blkif)
++{
++ printk(KERN_DEBUG "%s: oo %3d | rd %4d | wr %4d | br %4d\n",
++ current->comm, blkif->st_oo_req,
++ blkif->st_rd_req, blkif->st_wr_req, blkif->st_br_req);
++ blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000);
++ blkif->st_rd_req = 0;
++ blkif->st_wr_req = 0;
++ blkif->st_oo_req = 0;
++}
++
++int blkif_schedule(void *arg)
++{
++ blkif_t *blkif = arg;
++
++ blkif_get(blkif);
++
++ if (debug_lvl)
++ printk(KERN_DEBUG "%s: started\n", current->comm);
++
++ while (!kthread_should_stop()) {
++ wait_event_interruptible(
++ blkif->wq,
++ blkif->waiting_reqs || kthread_should_stop());
++ wait_event_interruptible(
++ pending_free_wq,
++ !list_empty(&pending_free) || kthread_should_stop());
++
++ blkif->waiting_reqs = 0;
++ smp_mb(); /* clear flag *before* checking for work */
++
++ if (do_block_io_op(blkif))
++ blkif->waiting_reqs = 1;
++ unplug_queue(blkif);
++
++ if (log_stats && time_after(jiffies, blkif->st_print))
++ print_stats(blkif);
++ }
++
++ if (log_stats)
++ print_stats(blkif);
++ if (debug_lvl)
++ printk(KERN_DEBUG "%s: exiting\n", current->comm);
++
++ blkif->xenblkd = NULL;
++ blkif_put(blkif);
++
++ return 0;
++}
++
++/******************************************************************
++ * COMPLETION CALLBACK -- Called as bh->b_end_io()
++ */
++
++static void __end_block_io_op(pending_req_t *pending_req, int error)
++{
++ /* An error fails the entire request. */
++ if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) &&
++ (error == -EOPNOTSUPP)) {
++ DPRINTK("blkback: write barrier op failed, not supported\n");
++ blkback_barrier(XBT_NIL, pending_req->blkif->be, 0);
++ pending_req->status = BLKIF_RSP_EOPNOTSUPP;
++ } else if (error) {
++ DPRINTK("Buffer not up-to-date at end of operation, "
++ "error=%d\n", error);
++ pending_req->status = BLKIF_RSP_ERROR;
++ }
++
++ if (atomic_dec_and_test(&pending_req->pendcnt)) {
++ fast_flush_area(pending_req);
++ make_response(pending_req->blkif, pending_req->id,
++ pending_req->operation, pending_req->status);
++ blkif_put(pending_req->blkif);
++ free_req(pending_req);
++ }
++}
++
++static int end_block_io_op(struct bio *bio, unsigned int done, int error)
++{
++ if (bio->bi_size != 0)
++ return 1;
++ __end_block_io_op(bio->bi_private, error);
++ bio_put(bio);
++ return error;
++}
++
++
++/******************************************************************************
++ * NOTIFICATION FROM GUEST OS.
++ */
++
++static void blkif_notify_work(blkif_t *blkif)
++{
++ blkif->waiting_reqs = 1;
++ wake_up(&blkif->wq);
++}
++
++irqreturn_t blkif_be_int(int irq, void *dev_id, struct pt_regs *regs)
++{
++ blkif_notify_work(dev_id);
++ return IRQ_HANDLED;
++}
++
++
++
++/******************************************************************
++ * DOWNWARD CALLS -- These interface with the block-device layer proper.
++ */
++
++static int do_block_io_op(blkif_t *blkif)
++{
++ blkif_back_rings_t *blk_rings = &blkif->blk_rings;
++ blkif_request_t req;
++ pending_req_t *pending_req;
++ RING_IDX rc, rp;
++ int more_to_do = 0;
++
++ rc = blk_rings->common.req_cons;
++ rp = blk_rings->common.sring->req_prod;
++ rmb(); /* Ensure we see queued requests up to 'rp'. */
++
++ while ((rc != rp)) {
++
++ if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc))
++ break;
++
++ pending_req = alloc_req();
++ if (NULL == pending_req) {
++ blkif->st_oo_req++;
++ more_to_do = 1;
++ break;
++ }
++
++ switch (blkif->blk_protocol) {
++ case BLKIF_PROTOCOL_NATIVE:
++ memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc), sizeof(req));
++ break;
++ case BLKIF_PROTOCOL_X86_32:
++ blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc));
++ break;
++ case BLKIF_PROTOCOL_X86_64:
++ blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc));
++ break;
++ default:
++ BUG();
++ }
++ blk_rings->common.req_cons = ++rc; /* before make_response() */
++
++ switch (req.operation) {
++ case BLKIF_OP_READ:
++ blkif->st_rd_req++;
++ dispatch_rw_block_io(blkif, &req, pending_req);
++ break;
++ case BLKIF_OP_WRITE_BARRIER:
++ blkif->st_br_req++;
++ /* fall through */
++ case BLKIF_OP_WRITE:
++ blkif->st_wr_req++;
++ dispatch_rw_block_io(blkif, &req, pending_req);
++ break;
++ default:
++ DPRINTK("error: unknown block io operation [%d]\n",
++ req.operation);
++ make_response(blkif, req.id, req.operation,
++ BLKIF_RSP_ERROR);
++ free_req(pending_req);
++ break;
++ }
++ }
++ return more_to_do;
++}
++
++static void dispatch_rw_block_io(blkif_t *blkif,
++ blkif_request_t *req,
++ pending_req_t *pending_req)
++{
++ extern void ll_rw_block(int rw, int nr, struct buffer_head * bhs[]);
++ struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
++ struct phys_req preq;
++ struct {
++ unsigned long buf; unsigned int nsec;
++ } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
++ unsigned int nseg;
++ struct bio *bio = NULL, *biolist[BLKIF_MAX_SEGMENTS_PER_REQUEST];
++ int ret, i, nbio = 0;
++ int operation;
++
++ switch (req->operation) {
++ case BLKIF_OP_READ:
++ operation = READ;
++ break;
++ case BLKIF_OP_WRITE:
++ operation = WRITE;
++ break;
++ case BLKIF_OP_WRITE_BARRIER:
++ operation = WRITE_BARRIER;
++ break;
++ default:
++ operation = 0; /* make gcc happy */
++ BUG();
++ }
++
++ /* Check that number of segments is sane. */
++ nseg = req->nr_segments;
++ if (unlikely(nseg == 0) ||
++ unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
++ DPRINTK("Bad number of segments in request (%d)\n", nseg);
++ goto fail_response;
++ }
++
++ preq.dev = req->handle;
++ preq.sector_number = req->sector_number;
++ preq.nr_sects = 0;
++
++ pending_req->blkif = blkif;
++ pending_req->id = req->id;
++ pending_req->operation = req->operation;
++ pending_req->status = BLKIF_RSP_OKAY;
++ pending_req->nr_pages = nseg;
++
++ for (i = 0; i < nseg; i++) {
++ uint32_t flags;
++
++ seg[i].nsec = req->seg[i].last_sect -
++ req->seg[i].first_sect + 1;
++
++ if ((req->seg[i].last_sect >= (PAGE_SIZE >> 9)) ||
++ (req->seg[i].last_sect < req->seg[i].first_sect))
++ goto fail_response;
++ preq.nr_sects += seg[i].nsec;
++
++ flags = GNTMAP_host_map;
++ if (operation != READ)
++ flags |= GNTMAP_readonly;
++ gnttab_set_map_op(&map[i], vaddr(pending_req, i), flags,
++ req->seg[i].gref, blkif->domid);
++ }
++
++ ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, nseg);
++ BUG_ON(ret);
++
++ for (i = 0; i < nseg; i++) {
++ if (unlikely(map[i].status != 0)) {
++ DPRINTK("invalid buffer -- could not remap it\n");
++ map[i].handle = BLKBACK_INVALID_HANDLE;
++ ret |= 1;
++ }
++
++ pending_handle(pending_req, i) = map[i].handle;
++
++ if (ret)
++ continue;
++
++ set_phys_to_machine(__pa(vaddr(
++ pending_req, i)) >> PAGE_SHIFT,
++ FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT));
++ seg[i].buf = map[i].dev_bus_addr |
++ (req->seg[i].first_sect << 9);
++ }
++
++ if (ret)
++ goto fail_flush;
++
++ if (vbd_translate(&preq, blkif, operation) != 0) {
++ DPRINTK("access denied: %s of [%llu,%llu] on dev=%04x\n",
++ operation == READ ? "read" : "write",
++ preq.sector_number,
++ preq.sector_number + preq.nr_sects, preq.dev);
++ goto fail_flush;
++ }
++
++ for (i = 0; i < nseg; i++) {
++ if (((int)preq.sector_number|(int)seg[i].nsec) &
++ ((bdev_hardsect_size(preq.bdev) >> 9) - 1)) {
++ DPRINTK("Misaligned I/O request from domain %d",
++ blkif->domid);
++ goto fail_put_bio;
++ }
++
++ while ((bio == NULL) ||
++ (bio_add_page(bio,
++ virt_to_page(vaddr(pending_req, i)),
++ seg[i].nsec << 9,
++ seg[i].buf & ~PAGE_MASK) == 0)) {
++ bio = biolist[nbio++] = bio_alloc(GFP_KERNEL, nseg-i);
++ if (unlikely(bio == NULL))
++ goto fail_put_bio;
++
++ bio->bi_bdev = preq.bdev;
++ bio->bi_private = pending_req;
++ bio->bi_end_io = end_block_io_op;
++ bio->bi_sector = preq.sector_number;
++ }
++
++ preq.sector_number += seg[i].nsec;
++ }
++
++ plug_queue(blkif, bio);
++ atomic_set(&pending_req->pendcnt, nbio);
++ blkif_get(blkif);
++
++ for (i = 0; i < nbio; i++)
++ submit_bio(operation, biolist[i]);
++
++ if (operation == READ)
++ blkif->st_rd_sect += preq.nr_sects;
++ else if (operation == WRITE)
++ blkif->st_wr_sect += preq.nr_sects;
++
++ return;
++
++ fail_put_bio:
++ for (i = 0; i < (nbio-1); i++)
++ bio_put(biolist[i]);
++ fail_flush:
++ fast_flush_area(pending_req);
++ fail_response:
++ make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
++ free_req(pending_req);
++}
++
++
++
++/******************************************************************
++ * MISCELLANEOUS SETUP / TEARDOWN / DEBUGGING
++ */
++
++
++static void make_response(blkif_t *blkif, u64 id,
++ unsigned short op, int st)
++{
++ blkif_response_t resp;
++ unsigned long flags;
++ blkif_back_rings_t *blk_rings = &blkif->blk_rings;
++ int more_to_do = 0;
++ int notify;
++
++ resp.id = id;
++ resp.operation = op;
++ resp.status = st;
++
++ spin_lock_irqsave(&blkif->blk_ring_lock, flags);
++ /* Place on the response ring for the relevant domain. */
++ switch (blkif->blk_protocol) {
++ case BLKIF_PROTOCOL_NATIVE:
++ memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt),
++ &resp, sizeof(resp));
++ break;
++ case BLKIF_PROTOCOL_X86_32:
++ memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt),
++ &resp, sizeof(resp));
++ break;
++ case BLKIF_PROTOCOL_X86_64:
++ memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt),
++ &resp, sizeof(resp));
++ break;
++ default:
++ BUG();
++ }
++ blk_rings->common.rsp_prod_pvt++;
++ RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
++ if (blk_rings->common.rsp_prod_pvt == blk_rings->common.req_cons) {
++ /*
++ * Tail check for pending requests. Allows frontend to avoid
++ * notifications if requests are already in flight (lower
++ * overheads and promotes batching).
++ */
++ RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
++
++ } else if (RING_HAS_UNCONSUMED_REQUESTS(&blk_rings->common)) {
++ more_to_do = 1;
++ }
++
++ spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
++
++ if (more_to_do)
++ blkif_notify_work(blkif);
++ if (notify)
++ notify_remote_via_irq(blkif->irq);
++}
++
++static int __init blkif_init(void)
++{
++ int i, mmap_pages;
++
++ if (!is_running_on_xen())
++ return -ENODEV;
++
++ mmap_pages = blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST;
++
++ pending_reqs = kmalloc(sizeof(pending_reqs[0]) *
++ blkif_reqs, GFP_KERNEL);
++ pending_grant_handles = kmalloc(sizeof(pending_grant_handles[0]) *
++ mmap_pages, GFP_KERNEL);
++ pending_pages = alloc_empty_pages_and_pagevec(mmap_pages);
++
++ if (!pending_reqs || !pending_grant_handles || !pending_pages)
++ goto out_of_memory;
++
++ for (i = 0; i < mmap_pages; i++)
++ pending_grant_handles[i] = BLKBACK_INVALID_HANDLE;
++
++ blkif_interface_init();
++
++ memset(pending_reqs, 0, sizeof(pending_reqs));
++ INIT_LIST_HEAD(&pending_free);
++
++ for (i = 0; i < blkif_reqs; i++)
++ list_add_tail(&pending_reqs[i].free_list, &pending_free);
++
++ blkif_xenbus_init();
++
++ return 0;
++
++ out_of_memory:
++ kfree(pending_reqs);
++ kfree(pending_grant_handles);
++ free_empty_pages_and_pagevec(pending_pages, mmap_pages);
++ printk("%s: out of memory\n", __FUNCTION__);
++ return -ENOMEM;
++}
++
++module_init(blkif_init);
++
++MODULE_LICENSE("Dual BSD/GPL");
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/blkback/common.h 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,139 @@
++/*
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#ifndef __BLKIF__BACKEND__COMMON_H__
++#define __BLKIF__BACKEND__COMMON_H__
++
++#include <linux/version.h>
++#include <linux/module.h>
++#include <linux/interrupt.h>
++#include <linux/slab.h>
++#include <linux/blkdev.h>
++#include <linux/vmalloc.h>
++#include <linux/wait.h>
++#include <asm/io.h>
++#include <asm/setup.h>
++#include <asm/pgalloc.h>
++#include <xen/evtchn.h>
++#include <asm/hypervisor.h>
++#include <xen/blkif.h>
++#include <xen/gnttab.h>
++#include <xen/driver_util.h>
++#include <xen/xenbus.h>
++
++#define DPRINTK(_f, _a...) \
++ pr_debug("(file=%s, line=%d) " _f, \
++ __FILE__ , __LINE__ , ## _a )
++
++struct vbd {
++ blkif_vdev_t handle; /* what the domain refers to this vbd as */
++ unsigned char readonly; /* Non-zero -> read-only */
++ unsigned char type; /* VDISK_xxx */
++ u32 pdevice; /* phys device that this vbd maps to */
++ struct block_device *bdev;
++};
++
++struct backend_info;
++
++typedef struct blkif_st {
++ /* Unique identifier for this interface. */
++ domid_t domid;
++ unsigned int handle;
++ /* Physical parameters of the comms window. */
++ unsigned int irq;
++ /* Comms information. */
++ enum blkif_protocol blk_protocol;
++ blkif_back_rings_t blk_rings;
++ struct vm_struct *blk_ring_area;
++ /* The VBD attached to this interface. */
++ struct vbd vbd;
++ /* Back pointer to the backend_info. */
++ struct backend_info *be;
++ /* Private fields. */
++ spinlock_t blk_ring_lock;
++ atomic_t refcnt;
++
++ wait_queue_head_t wq;
++ struct task_struct *xenblkd;
++ unsigned int waiting_reqs;
++ request_queue_t *plug;
++
++ /* statistics */
++ unsigned long st_print;
++ int st_rd_req;
++ int st_wr_req;
++ int st_oo_req;
++ int st_br_req;
++ int st_rd_sect;
++ int st_wr_sect;
++
++ wait_queue_head_t waiting_to_free;
++
++ grant_handle_t shmem_handle;
++ grant_ref_t shmem_ref;
++} blkif_t;
++
++blkif_t *blkif_alloc(domid_t domid);
++void blkif_disconnect(blkif_t *blkif);
++void blkif_free(blkif_t *blkif);
++int blkif_map(blkif_t *blkif, unsigned long shared_page, unsigned int evtchn);
++
++#define blkif_get(_b) (atomic_inc(&(_b)->refcnt))
++#define blkif_put(_b) \
++ do { \
++ if (atomic_dec_and_test(&(_b)->refcnt)) \
++ wake_up(&(_b)->waiting_to_free);\
++ } while (0)
++
++/* Create a vbd. */
++int vbd_create(blkif_t *blkif, blkif_vdev_t vdevice, unsigned major,
++ unsigned minor, int readonly);
++void vbd_free(struct vbd *vbd);
++
++unsigned long long vbd_size(struct vbd *vbd);
++unsigned int vbd_info(struct vbd *vbd);
++unsigned long vbd_secsize(struct vbd *vbd);
++
++struct phys_req {
++ unsigned short dev;
++ unsigned short nr_sects;
++ struct block_device *bdev;
++ blkif_sector_t sector_number;
++};
++
++int vbd_translate(struct phys_req *req, blkif_t *blkif, int operation);
++
++void blkif_interface_init(void);
++
++void blkif_xenbus_init(void);
++
++irqreturn_t blkif_be_int(int irq, void *dev_id, struct pt_regs *regs);
++int blkif_schedule(void *arg);
++
++int blkback_barrier(struct xenbus_transaction xbt,
++ struct backend_info *be, int state);
++
++#endif /* __BLKIF__BACKEND__COMMON_H__ */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/blkback/interface.c 2007-08-27 14:02:01.000000000 -0400
+@@ -0,0 +1,181 @@
++/******************************************************************************
++ * arch/xen/drivers/blkif/backend/interface.c
++ *
++ * Block-device interface management.
++ *
++ * Copyright (c) 2004, Keir Fraser
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include "common.h"
++#include <xen/evtchn.h>
++#include <linux/kthread.h>
++
++static kmem_cache_t *blkif_cachep;
++
++blkif_t *blkif_alloc(domid_t domid)
++{
++ blkif_t *blkif;
++
++ blkif = kmem_cache_alloc(blkif_cachep, GFP_KERNEL);
++ if (!blkif)
++ return ERR_PTR(-ENOMEM);
++
++ memset(blkif, 0, sizeof(*blkif));
++ blkif->domid = domid;
++ spin_lock_init(&blkif->blk_ring_lock);
++ atomic_set(&blkif->refcnt, 1);
++ init_waitqueue_head(&blkif->wq);
++ blkif->st_print = jiffies;
++ init_waitqueue_head(&blkif->waiting_to_free);
++
++ return blkif;
++}
++
++static int map_frontend_page(blkif_t *blkif, unsigned long shared_page)
++{
++ struct gnttab_map_grant_ref op;
++
++ gnttab_set_map_op(&op, (unsigned long)blkif->blk_ring_area->addr,
++ GNTMAP_host_map, shared_page, blkif->domid);
++
++ if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
++ BUG();
++
++ if (op.status) {
++ DPRINTK(" Grant table operation failure !\n");
++ return op.status;
++ }
++
++ blkif->shmem_ref = shared_page;
++ blkif->shmem_handle = op.handle;
++
++ return 0;
++}
++
++static void unmap_frontend_page(blkif_t *blkif)
++{
++ struct gnttab_unmap_grant_ref op;
++
++ gnttab_set_unmap_op(&op, (unsigned long)blkif->blk_ring_area->addr,
++ GNTMAP_host_map, blkif->shmem_handle);
++
++ if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
++ BUG();
++}
++
++int blkif_map(blkif_t *blkif, unsigned long shared_page, unsigned int evtchn)
++{
++ int err;
++
++ /* Already connected through? */
++ if (blkif->irq)
++ return 0;
++
++ if ( (blkif->blk_ring_area = alloc_vm_area(PAGE_SIZE)) == NULL )
++ return -ENOMEM;
++
++ err = map_frontend_page(blkif, shared_page);
++ if (err) {
++ free_vm_area(blkif->blk_ring_area);
++ return err;
++ }
++
++ switch (blkif->blk_protocol) {
++ case BLKIF_PROTOCOL_NATIVE:
++ {
++ blkif_sring_t *sring;
++ sring = (blkif_sring_t *)blkif->blk_ring_area->addr;
++ BACK_RING_INIT(&blkif->blk_rings.native, sring, PAGE_SIZE);
++ break;
++ }
++ case BLKIF_PROTOCOL_X86_32:
++ {
++ blkif_x86_32_sring_t *sring_x86_32;
++ sring_x86_32 = (blkif_x86_32_sring_t *)blkif->blk_ring_area->addr;
++ BACK_RING_INIT(&blkif->blk_rings.x86_32, sring_x86_32, PAGE_SIZE);
++ break;
++ }
++ case BLKIF_PROTOCOL_X86_64:
++ {
++ blkif_x86_64_sring_t *sring_x86_64;
++ sring_x86_64 = (blkif_x86_64_sring_t *)blkif->blk_ring_area->addr;
++ BACK_RING_INIT(&blkif->blk_rings.x86_64, sring_x86_64, PAGE_SIZE);
++ break;
++ }
++ default:
++ BUG();
++ }
++
++ err = bind_interdomain_evtchn_to_irqhandler(
++ blkif->domid, evtchn, blkif_be_int, 0, "blkif-backend", blkif);
++ if (err < 0)
++ {
++ unmap_frontend_page(blkif);
++ free_vm_area(blkif->blk_ring_area);
++ blkif->blk_rings.common.sring = NULL;
++ return err;
++ }
++ blkif->irq = err;
++
++ return 0;
++}
++
++void blkif_disconnect(blkif_t *blkif)
++{
++ if (blkif->xenblkd) {
++ kthread_stop(blkif->xenblkd);
++ blkif->xenblkd = NULL;
++ }
++
++ atomic_dec(&blkif->refcnt);
++ wait_event(blkif->waiting_to_free, atomic_read(&blkif->refcnt) == 0);
++ atomic_inc(&blkif->refcnt);
++
++ if (blkif->irq) {
++ unbind_from_irqhandler(blkif->irq, blkif);
++ blkif->irq = 0;
++ }
++
++ if (blkif->blk_rings.common.sring) {
++ unmap_frontend_page(blkif);
++ free_vm_area(blkif->blk_ring_area);
++ blkif->blk_rings.common.sring = NULL;
++ }
++}
++
++void blkif_free(blkif_t *blkif)
++{
++ if (!atomic_dec_and_test(&blkif->refcnt))
++ BUG();
++ kmem_cache_free(blkif_cachep, blkif);
++}
++
++void __init blkif_interface_init(void)
++{
++ blkif_cachep = kmem_cache_create("blkif_cache", sizeof(blkif_t),
++ 0, 0, NULL, NULL);
++}
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/blkback/vbd.c 2007-08-27 14:01:47.000000000 -0400
+@@ -0,0 +1,118 @@
++/******************************************************************************
++ * blkback/vbd.c
++ *
++ * Routines for managing virtual block devices (VBDs).
++ *
++ * Copyright (c) 2003-2005, Keir Fraser & Steve Hand
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include "common.h"
++
++#define vbd_sz(_v) ((_v)->bdev->bd_part ? \
++ (_v)->bdev->bd_part->nr_sects : (_v)->bdev->bd_disk->capacity)
++
++unsigned long long vbd_size(struct vbd *vbd)
++{
++ return vbd_sz(vbd);
++}
++
++unsigned int vbd_info(struct vbd *vbd)
++{
++ return vbd->type | (vbd->readonly?VDISK_READONLY:0);
++}
++
++unsigned long vbd_secsize(struct vbd *vbd)
++{
++ return bdev_hardsect_size(vbd->bdev);
++}
++
++int vbd_create(blkif_t *blkif, blkif_vdev_t handle, unsigned major,
++ unsigned minor, int readonly)
++{
++ struct vbd *vbd;
++ struct block_device *bdev;
++
++ vbd = &blkif->vbd;
++ vbd->handle = handle;
++ vbd->readonly = readonly;
++ vbd->type = 0;
++
++ vbd->pdevice = MKDEV(major, minor);
++
++ bdev = open_by_devnum(vbd->pdevice,
++ vbd->readonly ? FMODE_READ : FMODE_WRITE);
++
++ if (IS_ERR(bdev)) {
++ DPRINTK("vbd_creat: device %08x could not be opened.\n",
++ vbd->pdevice);
++ return -ENOENT;
++ }
++
++ vbd->bdev = bdev;
++
++ if (vbd->bdev->bd_disk == NULL) {
++ DPRINTK("vbd_creat: device %08x doesn't exist.\n",
++ vbd->pdevice);
++ vbd_free(vbd);
++ return -ENOENT;
++ }
++
++ if (vbd->bdev->bd_disk->flags & GENHD_FL_CD)
++ vbd->type |= VDISK_CDROM;
++ if (vbd->bdev->bd_disk->flags & GENHD_FL_REMOVABLE)
++ vbd->type |= VDISK_REMOVABLE;
++
++ DPRINTK("Successful creation of handle=%04x (dom=%u)\n",
++ handle, blkif->domid);
++ return 0;
++}
++
++void vbd_free(struct vbd *vbd)
++{
++ if (vbd->bdev)
++ blkdev_put(vbd->bdev);
++ vbd->bdev = NULL;
++}
++
++int vbd_translate(struct phys_req *req, blkif_t *blkif, int operation)
++{
++ struct vbd *vbd = &blkif->vbd;
++ int rc = -EACCES;
++
++ if ((operation != READ) && vbd->readonly)
++ goto out;
++
++ if (unlikely((req->sector_number + req->nr_sects) > vbd_sz(vbd)))
++ goto out;
++
++ req->dev = vbd->pdevice;
++ req->bdev = vbd->bdev;
++ rc = 0;
++
++ out:
++ return rc;
++}
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/blkback/xenbus.c 2007-08-27 14:02:09.000000000 -0400
+@@ -0,0 +1,533 @@
++/* Xenbus code for blkif backend
++ Copyright (C) 2005 Rusty Russell <rusty@rustcorp.com.au>
++ Copyright (C) 2005 XenSource Ltd
++
++ This program is free software; you can redistribute it and/or modify
++ it under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 2 of the License, or
++ (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ GNU General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program; if not, write to the Free Software
++ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++*/
++
++#include <stdarg.h>
++#include <linux/module.h>
++#include <linux/kthread.h>
++#include "common.h"
++
++#undef DPRINTK
++#define DPRINTK(fmt, args...) \
++ pr_debug("blkback/xenbus (%s:%d) " fmt ".\n", \
++ __FUNCTION__, __LINE__, ##args)
++
++struct backend_info
++{
++ struct xenbus_device *dev;
++ blkif_t *blkif;
++ struct xenbus_watch backend_watch;
++ unsigned major;
++ unsigned minor;
++ char *mode;
++};
++
++static void connect(struct backend_info *);
++static int connect_ring(struct backend_info *);
++static void backend_changed(struct xenbus_watch *, const char **,
++ unsigned int);
++
++static int blkback_name(blkif_t *blkif, char *buf)
++{
++ char *devpath, *devname;
++ struct xenbus_device *dev = blkif->be->dev;
++
++ devpath = xenbus_read(XBT_NIL, dev->nodename, "dev", NULL);
++ if (IS_ERR(devpath))
++ return PTR_ERR(devpath);
++
++ if ((devname = strstr(devpath, "/dev/")) != NULL)
++ devname += strlen("/dev/");
++ else
++ devname = devpath;
++
++ snprintf(buf, TASK_COMM_LEN, "blkback.%d.%s", blkif->domid, devname);
++ kfree(devpath);
++
++ return 0;
++}
++
++static void update_blkif_status(blkif_t *blkif)
++{
++ int err;
++ char name[TASK_COMM_LEN];
++
++ /* Not ready to connect? */
++ if (!blkif->irq || !blkif->vbd.bdev)
++ return;
++
++ /* Already connected? */
++ if (blkif->be->dev->state == XenbusStateConnected)
++ return;
++
++ /* Attempt to connect: exit if we fail to. */
++ connect(blkif->be);
++ if (blkif->be->dev->state != XenbusStateConnected)
++ return;
++
++ err = blkback_name(blkif, name);
++ if (err) {
++ xenbus_dev_error(blkif->be->dev, err, "get blkback dev name");
++ return;
++ }
++
++ blkif->xenblkd = kthread_run(blkif_schedule, blkif, name);
++ if (IS_ERR(blkif->xenblkd)) {
++ err = PTR_ERR(blkif->xenblkd);
++ blkif->xenblkd = NULL;
++ xenbus_dev_error(blkif->be->dev, err, "start xenblkd");
++ }
++}
++
++
++/****************************************************************
++ * sysfs interface for VBD I/O requests
++ */
++
++#define VBD_SHOW(name, format, args...) \
++ static ssize_t show_##name(struct device *_dev, \
++ struct device_attribute *attr, \
++ char *buf) \
++ { \
++ struct xenbus_device *dev = to_xenbus_device(_dev); \
++ struct backend_info *be = dev->dev.driver_data; \
++ \
++ return sprintf(buf, format, ##args); \
++ } \
++ DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
++
++VBD_SHOW(oo_req, "%d\n", be->blkif->st_oo_req);
++VBD_SHOW(rd_req, "%d\n", be->blkif->st_rd_req);
++VBD_SHOW(wr_req, "%d\n", be->blkif->st_wr_req);
++VBD_SHOW(br_req, "%d\n", be->blkif->st_br_req);
++VBD_SHOW(rd_sect, "%d\n", be->blkif->st_rd_sect);
++VBD_SHOW(wr_sect, "%d\n", be->blkif->st_wr_sect);
++
++static struct attribute *vbdstat_attrs[] = {
++ &dev_attr_oo_req.attr,
++ &dev_attr_rd_req.attr,
++ &dev_attr_wr_req.attr,
++ &dev_attr_br_req.attr,
++ &dev_attr_rd_sect.attr,
++ &dev_attr_wr_sect.attr,
++ NULL
++};
++
++static struct attribute_group vbdstat_group = {
++ .name = "statistics",
++ .attrs = vbdstat_attrs,
++};
++
++VBD_SHOW(physical_device, "%x:%x\n", be->major, be->minor);
++VBD_SHOW(mode, "%s\n", be->mode);
++
++int xenvbd_sysfs_addif(struct xenbus_device *dev)
++{
++ int error;
++
++ error = device_create_file(&dev->dev, &dev_attr_physical_device);
++ if (error)
++ goto fail1;
++
++ error = device_create_file(&dev->dev, &dev_attr_mode);
++ if (error)
++ goto fail2;
++
++ error = sysfs_create_group(&dev->dev.kobj, &vbdstat_group);
++ if (error)
++ goto fail3;
++
++ return 0;
++
++fail3: sysfs_remove_group(&dev->dev.kobj, &vbdstat_group);
++fail2: device_remove_file(&dev->dev, &dev_attr_mode);
++fail1: device_remove_file(&dev->dev, &dev_attr_physical_device);
++ return error;
++}
++
++void xenvbd_sysfs_delif(struct xenbus_device *dev)
++{
++ sysfs_remove_group(&dev->dev.kobj, &vbdstat_group);
++ device_remove_file(&dev->dev, &dev_attr_mode);
++ device_remove_file(&dev->dev, &dev_attr_physical_device);
++}
++
++static int blkback_remove(struct xenbus_device *dev)
++{
++ struct backend_info *be = dev->dev.driver_data;
++
++ DPRINTK("");
++
++ if (be->backend_watch.node) {
++ unregister_xenbus_watch(&be->backend_watch);
++ kfree(be->backend_watch.node);
++ be->backend_watch.node = NULL;
++ }
++
++ if (be->blkif) {
++ blkif_disconnect(be->blkif);
++ vbd_free(&be->blkif->vbd);
++ blkif_free(be->blkif);
++ be->blkif = NULL;
++ }
++
++ if (be->major || be->minor)
++ xenvbd_sysfs_delif(dev);
++
++ kfree(be);
++ dev->dev.driver_data = NULL;
++ return 0;
++}
++
++int blkback_barrier(struct xenbus_transaction xbt,
++ struct backend_info *be, int state)
++{
++ struct xenbus_device *dev = be->dev;
++ int err;
++
++ err = xenbus_printf(xbt, dev->nodename, "feature-barrier",
++ "%d", state);
++ if (err)
++ xenbus_dev_fatal(dev, err, "writing feature-barrier");
++
++ return err;
++}
++
++/**
++ * Entry point to this code when a new device is created. Allocate the basic
++ * structures, and watch the store waiting for the hotplug scripts to tell us
++ * the device's physical major and minor numbers. Switch to InitWait.
++ */
++static int blkback_probe(struct xenbus_device *dev,
++ const struct xenbus_device_id *id)
++{
++ int err;
++ struct backend_info *be = kzalloc(sizeof(struct backend_info),
++ GFP_KERNEL);
++ if (!be) {
++ xenbus_dev_fatal(dev, -ENOMEM,
++ "allocating backend structure");
++ return -ENOMEM;
++ }
++ be->dev = dev;
++ dev->dev.driver_data = be;
++
++ be->blkif = blkif_alloc(dev->otherend_id);
++ if (IS_ERR(be->blkif)) {
++ err = PTR_ERR(be->blkif);
++ be->blkif = NULL;
++ xenbus_dev_fatal(dev, err, "creating block interface");
++ goto fail;
++ }
++
++ /* setup back pointer */
++ be->blkif->be = be;
++
++ err = xenbus_watch_path2(dev, dev->nodename, "physical-device",
++ &be->backend_watch, backend_changed);
++ if (err)
++ goto fail;
++
++ err = xenbus_switch_state(dev, XenbusStateInitWait);
++ if (err)
++ goto fail;
++
++ return 0;
++
++fail:
++ DPRINTK("failed");
++ blkback_remove(dev);
++ return err;
++}
++
++
++/**
++ * Callback received when the hotplug scripts have placed the physical-device
++ * node. Read it and the mode node, and create a vbd. If the frontend is
++ * ready, connect.
++ */
++static void backend_changed(struct xenbus_watch *watch,
++ const char **vec, unsigned int len)
++{
++ int err;
++ unsigned major;
++ unsigned minor;
++ struct backend_info *be
++ = container_of(watch, struct backend_info, backend_watch);
++ struct xenbus_device *dev = be->dev;
++
++ DPRINTK("");
++
++ err = xenbus_scanf(XBT_NIL, dev->nodename, "physical-device", "%x:%x",
++ &major, &minor);
++ if (XENBUS_EXIST_ERR(err)) {
++ /* Since this watch will fire once immediately after it is
++ registered, we expect this. Ignore it, and wait for the
++ hotplug scripts. */
++ return;
++ }
++ if (err != 2) {
++ xenbus_dev_fatal(dev, err, "reading physical-device");
++ return;
++ }
++
++ if ((be->major || be->minor) &&
++ ((be->major != major) || (be->minor != minor))) {
++ printk(KERN_WARNING
++ "blkback: changing physical device (from %x:%x to "
++ "%x:%x) not supported.\n", be->major, be->minor,
++ major, minor);
++ return;
++ }
++
++ be->mode = xenbus_read(XBT_NIL, dev->nodename, "mode", NULL);
++ if (IS_ERR(be->mode)) {
++ err = PTR_ERR(be->mode);
++ be->mode = NULL;
++ xenbus_dev_fatal(dev, err, "reading mode");
++ return;
++ }
++
++ if (be->major == 0 && be->minor == 0) {
++ /* Front end dir is a number, which is used as the handle. */
++
++ char *p = strrchr(dev->otherend, '/') + 1;
++ long handle = simple_strtoul(p, NULL, 0);
++
++ be->major = major;
++ be->minor = minor;
++
++ err = vbd_create(be->blkif, handle, major, minor,
++ (NULL == strchr(be->mode, 'w')));
++ if (err) {
++ be->major = be->minor = 0;
++ xenbus_dev_fatal(dev, err, "creating vbd structure");
++ return;
++ }
++
++ err = xenvbd_sysfs_addif(dev);
++ if (err) {
++ vbd_free(&be->blkif->vbd);
++ be->major = be->minor = 0;
++ xenbus_dev_fatal(dev, err, "creating sysfs entries");
++ return;
++ }
++
++ /* We're potentially connected now */
++ update_blkif_status(be->blkif);
++ }
++}
++
++
++/**
++ * Callback received when the frontend's state changes.
++ */
++static void frontend_changed(struct xenbus_device *dev,
++ enum xenbus_state frontend_state)
++{
++ struct backend_info *be = dev->dev.driver_data;
++ int err;
++
++ DPRINTK("%s", xenbus_strstate(frontend_state));
++
++ switch (frontend_state) {
++ case XenbusStateInitialising:
++ if (dev->state == XenbusStateClosed) {
++ printk(KERN_INFO "%s: %s: prepare for reconnect\n",
++ __FUNCTION__, dev->nodename);
++ xenbus_switch_state(dev, XenbusStateInitWait);
++ }
++ break;
++
++ case XenbusStateInitialised:
++ case XenbusStateConnected:
++ /* Ensure we connect even when two watches fire in
++ close successsion and we miss the intermediate value
++ of frontend_state. */
++ if (dev->state == XenbusStateConnected)
++ break;
++
++ err = connect_ring(be);
++ if (err)
++ break;
++ update_blkif_status(be->blkif);
++ break;
++
++ case XenbusStateClosing:
++ blkif_disconnect(be->blkif);
++ xenbus_switch_state(dev, XenbusStateClosing);
++ break;
++
++ case XenbusStateClosed:
++ xenbus_switch_state(dev, XenbusStateClosed);
++ if (xenbus_dev_is_online(dev))
++ break;
++ /* fall through if not online */
++ case XenbusStateUnknown:
++ device_unregister(&dev->dev);
++ break;
++
++ default:
++ xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
++ frontend_state);
++ break;
++ }
++}
++
++
++/* ** Connection ** */
++
++
++/**
++ * Write the physical details regarding the block device to the store, and
++ * switch to Connected state.
++ */
++static void connect(struct backend_info *be)
++{
++ struct xenbus_transaction xbt;
++ int err;
++ struct xenbus_device *dev = be->dev;
++
++ DPRINTK("%s", dev->otherend);
++
++ /* Supply the information about the device the frontend needs */
++again:
++ err = xenbus_transaction_start(&xbt);
++ if (err) {
++ xenbus_dev_fatal(dev, err, "starting transaction");
++ return;
++ }
++
++ err = blkback_barrier(xbt, be, 1);
++ if (err)
++ goto abort;
++
++ err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
++ vbd_size(&be->blkif->vbd));
++ if (err) {
++ xenbus_dev_fatal(dev, err, "writing %s/sectors",
++ dev->nodename);
++ goto abort;
++ }
++
++ /* FIXME: use a typename instead */
++ err = xenbus_printf(xbt, dev->nodename, "info", "%u",
++ vbd_info(&be->blkif->vbd));
++ if (err) {
++ xenbus_dev_fatal(dev, err, "writing %s/info",
++ dev->nodename);
++ goto abort;
++ }
++ err = xenbus_printf(xbt, dev->nodename, "sector-size", "%lu",
++ vbd_secsize(&be->blkif->vbd));
++ if (err) {
++ xenbus_dev_fatal(dev, err, "writing %s/sector-size",
++ dev->nodename);
++ goto abort;
++ }
++
++ err = xenbus_transaction_end(xbt, 0);
++ if (err == -EAGAIN)
++ goto again;
++ if (err)
++ xenbus_dev_fatal(dev, err, "ending transaction");
++
++ err = xenbus_switch_state(dev, XenbusStateConnected);
++ if (err)
++ xenbus_dev_fatal(dev, err, "switching to Connected state",
++ dev->nodename);
++
++ return;
++ abort:
++ xenbus_transaction_end(xbt, 1);
++}
++
++
++static int connect_ring(struct backend_info *be)
++{
++ struct xenbus_device *dev = be->dev;
++ unsigned long ring_ref;
++ unsigned int evtchn;
++ char protocol[64] = "";
++ int err;
++
++ DPRINTK("%s", dev->otherend);
++
++ err = xenbus_gather(XBT_NIL, dev->otherend, "ring-ref", "%lu", &ring_ref,
++ "event-channel", "%u", &evtchn, NULL);
++ if (err) {
++ xenbus_dev_fatal(dev, err,
++ "reading %s/ring-ref and event-channel",
++ dev->otherend);
++ return err;
++ }
++
++ be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE;
++ err = xenbus_gather(XBT_NIL, dev->otherend, "protocol",
++ "%63s", protocol, NULL);
++ if (err)
++ strcpy(protocol, "unspecified, assuming native");
++ else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_NATIVE))
++ be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE;
++ else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_32))
++ be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_32;
++ else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_64))
++ be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_64;
++ else {
++ xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol);
++ return -1;
++ }
++ printk(KERN_INFO
++ "blkback: ring-ref %ld, event-channel %d, protocol %d (%s)\n",
++ ring_ref, evtchn, be->blkif->blk_protocol, protocol);
++
++ /* Map the shared frame, irq etc. */
++ err = blkif_map(be->blkif, ring_ref, evtchn);
++ if (err) {
++ xenbus_dev_fatal(dev, err, "mapping ring-ref %lu port %u",
++ ring_ref, evtchn);
++ return err;
++ }
++
++ return 0;
++}
++
++
++/* ** Driver Registration ** */
++
++
++static struct xenbus_device_id blkback_ids[] = {
++ { "vbd" },
++ { "" }
++};
++
++
++static struct xenbus_driver blkback = {
++ .name = "vbd",
++ .owner = THIS_MODULE,
++ .ids = blkback_ids,
++ .probe = blkback_probe,
++ .remove = blkback_remove,
++ .otherend_changed = frontend_changed
++};
++
++
++void blkif_xenbus_init(void)
++{
++ xenbus_register_backend(&blkback);
++}
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/blkfront/Makefile 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,5 @@
++
++obj-$(CONFIG_XEN_BLKDEV_FRONTEND) := xenblk.o
++
++xenblk-objs := blkfront.o vbd.o
++
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/blkfront/blkfront.c 2007-08-27 14:02:08.000000000 -0400
+@@ -0,0 +1,902 @@
++/******************************************************************************
++ * blkfront.c
++ *
++ * XenLinux virtual block-device driver.
++ *
++ * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
++ * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
++ * Copyright (c) 2004, Christian Limpach
++ * Copyright (c) 2004, Andrew Warfield
++ * Copyright (c) 2005, Christopher Clark
++ * Copyright (c) 2005, XenSource Ltd
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <linux/version.h>
++#include "block.h"
++#include <linux/cdrom.h>
++#include <linux/sched.h>
++#include <linux/interrupt.h>
++#include <scsi/scsi.h>
++#include <xen/evtchn.h>
++#include <xen/xenbus.h>
++#include <xen/interface/grant_table.h>
++#include <xen/interface/io/protocols.h>
++#include <xen/gnttab.h>
++#include <asm/hypervisor.h>
++#include <asm/maddr.h>
++
++#ifdef HAVE_XEN_PLATFORM_COMPAT_H
++#include <xen/platform-compat.h>
++#endif
++
++#define BLKIF_STATE_DISCONNECTED 0
++#define BLKIF_STATE_CONNECTED 1
++#define BLKIF_STATE_SUSPENDED 2
++
++#define MAXIMUM_OUTSTANDING_BLOCK_REQS \
++ (BLKIF_MAX_SEGMENTS_PER_REQUEST * BLK_RING_SIZE)
++#define GRANT_INVALID_REF 0
++
++static void connect(struct blkfront_info *);
++static void blkfront_closing(struct xenbus_device *);
++static int blkfront_remove(struct xenbus_device *);
++static int talk_to_backend(struct xenbus_device *, struct blkfront_info *);
++static int setup_blkring(struct xenbus_device *, struct blkfront_info *);
++
++static void kick_pending_request_queues(struct blkfront_info *);
++
++static irqreturn_t blkif_int(int irq, void *dev_id, struct pt_regs *ptregs);
++static void blkif_restart_queue(void *arg);
++static void blkif_recover(struct blkfront_info *);
++static void blkif_completion(struct blk_shadow *);
++static void blkif_free(struct blkfront_info *, int);
++
++
++/**
++ * Entry point to this code when a new device is created. Allocate the basic
++ * structures and the ring buffer for communication with the backend, and
++ * inform the backend of the appropriate details for those. Switch to
++ * Initialised state.
++ */
++static int blkfront_probe(struct xenbus_device *dev,
++ const struct xenbus_device_id *id)
++{
++ int err, vdevice, i;
++ struct blkfront_info *info;
++
++ /* FIXME: Use dynamic device id if this is not set. */
++ err = xenbus_scanf(XBT_NIL, dev->nodename,
++ "virtual-device", "%i", &vdevice);
++ if (err != 1) {
++ xenbus_dev_fatal(dev, err, "reading virtual-device");
++ return err;
++ }
++
++ info = kzalloc(sizeof(*info), GFP_KERNEL);
++ if (!info) {
++ xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure");
++ return -ENOMEM;
++ }
++
++ info->xbdev = dev;
++ info->vdevice = vdevice;
++ info->connected = BLKIF_STATE_DISCONNECTED;
++ INIT_WORK(&info->work, blkif_restart_queue, (void *)info);
++
++ for (i = 0; i < BLK_RING_SIZE; i++)
++ info->shadow[i].req.id = i+1;
++ info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff;
++
++ /* Front end dir is a number, which is used as the id. */
++ info->handle = simple_strtoul(strrchr(dev->nodename,'/')+1, NULL, 0);
++ dev->dev.driver_data = info;
++
++ err = talk_to_backend(dev, info);
++ if (err) {
++ kfree(info);
++ dev->dev.driver_data = NULL;
++ return err;
++ }
++
++ return 0;
++}
++
++
++/**
++ * We are reconnecting to the backend, due to a suspend/resume, or a backend
++ * driver restart. We tear down our blkif structure and recreate it, but
++ * leave the device-layer structures intact so that this is transparent to the
++ * rest of the kernel.
++ */
++static int blkfront_resume(struct xenbus_device *dev)
++{
++ struct blkfront_info *info = dev->dev.driver_data;
++ int err;
++
++ DPRINTK("blkfront_resume: %s\n", dev->nodename);
++
++ blkif_free(info, info->connected == BLKIF_STATE_CONNECTED);
++
++ err = talk_to_backend(dev, info);
++ if (info->connected == BLKIF_STATE_SUSPENDED && !err)
++ blkif_recover(info);
++
++ return err;
++}
++
++
++/* Common code used when first setting up, and when resuming. */
++static int talk_to_backend(struct xenbus_device *dev,
++ struct blkfront_info *info)
++{
++ const char *message = NULL;
++ struct xenbus_transaction xbt;
++ int err;
++
++ /* Create shared ring, alloc event channel. */
++ err = setup_blkring(dev, info);
++ if (err)
++ goto out;
++
++again:
++ err = xenbus_transaction_start(&xbt);
++ if (err) {
++ xenbus_dev_fatal(dev, err, "starting transaction");
++ goto destroy_blkring;
++ }
++
++ err = xenbus_printf(xbt, dev->nodename,
++ "ring-ref","%u", info->ring_ref);
++ if (err) {
++ message = "writing ring-ref";
++ goto abort_transaction;
++ }
++ err = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
++ irq_to_evtchn_port(info->irq));
++ if (err) {
++ message = "writing event-channel";
++ goto abort_transaction;
++ }
++ err = xenbus_printf(xbt, dev->nodename, "protocol", "%s",
++ XEN_IO_PROTO_ABI_NATIVE);
++ if (err) {
++ message = "writing protocol";
++ goto abort_transaction;
++ }
++
++ err = xenbus_transaction_end(xbt, 0);
++ if (err) {
++ if (err == -EAGAIN)
++ goto again;
++ xenbus_dev_fatal(dev, err, "completing transaction");
++ goto destroy_blkring;
++ }
++
++ xenbus_switch_state(dev, XenbusStateInitialised);
++
++ return 0;
++
++ abort_transaction:
++ xenbus_transaction_end(xbt, 1);
++ if (message)
++ xenbus_dev_fatal(dev, err, "%s", message);
++ destroy_blkring:
++ blkif_free(info, 0);
++ out:
++ return err;
++}
++
++
++static int setup_blkring(struct xenbus_device *dev,
++ struct blkfront_info *info)
++{
++ blkif_sring_t *sring;
++ int err;
++
++ info->ring_ref = GRANT_INVALID_REF;
++
++ sring = (blkif_sring_t *)__get_free_page(GFP_KERNEL);
++ if (!sring) {
++ xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
++ return -ENOMEM;
++ }
++ SHARED_RING_INIT(sring);
++ FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE);
++
++ err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring));
++ if (err < 0) {
++ free_page((unsigned long)sring);
++ info->ring.sring = NULL;
++ goto fail;
++ }
++ info->ring_ref = err;
++
++ err = bind_listening_port_to_irqhandler(
++ dev->otherend_id, blkif_int, SA_SAMPLE_RANDOM, "blkif", info);
++ if (err <= 0) {
++ xenbus_dev_fatal(dev, err,
++ "bind_listening_port_to_irqhandler");
++ goto fail;
++ }
++ info->irq = err;
++
++ return 0;
++fail:
++ blkif_free(info, 0);
++ return err;
++}
++
++
++/**
++ * Callback received when the backend's state changes.
++ */
++static void backend_changed(struct xenbus_device *dev,
++ enum xenbus_state backend_state)
++{
++ struct blkfront_info *info = dev->dev.driver_data;
++ struct block_device *bd;
++
++ DPRINTK("blkfront:backend_changed.\n");
++
++ switch (backend_state) {
++ case XenbusStateInitialising:
++ case XenbusStateInitWait:
++ case XenbusStateInitialised:
++ case XenbusStateUnknown:
++ case XenbusStateClosed:
++ break;
++
++ case XenbusStateConnected:
++ connect(info);
++ break;
++
++ case XenbusStateClosing:
++ bd = bdget(info->dev);
++ if (bd == NULL)
++ xenbus_dev_fatal(dev, -ENODEV, "bdget failed");
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
++ down(&bd->bd_sem);
++#else
++ mutex_lock(&bd->bd_mutex);
++#endif
++ if (info->users > 0)
++ xenbus_dev_error(dev, -EBUSY,
++ "Device in use; refusing to close");
++ else
++ blkfront_closing(dev);
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
++ up(&bd->bd_sem);
++#else
++ mutex_unlock(&bd->bd_mutex);
++#endif
++ bdput(bd);
++ break;
++ }
++}
++
++
++/* ** Connection ** */
++
++
++/*
++ * Invoked when the backend is finally 'ready' (and has told produced
++ * the details about the physical device - #sectors, size, etc).
++ */
++static void connect(struct blkfront_info *info)
++{
++ unsigned long long sectors;
++ unsigned long sector_size;
++ unsigned int binfo;
++ int err;
++
++ if ((info->connected == BLKIF_STATE_CONNECTED) ||
++ (info->connected == BLKIF_STATE_SUSPENDED) )
++ return;
++
++ DPRINTK("blkfront.c:connect:%s.\n", info->xbdev->otherend);
++
++ err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
++ "sectors", "%Lu", &sectors,
++ "info", "%u", &binfo,
++ "sector-size", "%lu", &sector_size,
++ NULL);
++ if (err) {
++ xenbus_dev_fatal(info->xbdev, err,
++ "reading backend fields at %s",
++ info->xbdev->otherend);
++ return;
++ }
++
++ err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
++ "feature-barrier", "%lu", &info->feature_barrier,
++ NULL);
++ if (err)
++ info->feature_barrier = 0;
++
++ err = xlvbd_add(sectors, info->vdevice, binfo, sector_size, info);
++ if (err) {
++ xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s",
++ info->xbdev->otherend);
++ return;
++ }
++
++ (void)xenbus_switch_state(info->xbdev, XenbusStateConnected);
++
++ /* Kick pending requests. */
++ spin_lock_irq(&blkif_io_lock);
++ info->connected = BLKIF_STATE_CONNECTED;
++ kick_pending_request_queues(info);
++ spin_unlock_irq(&blkif_io_lock);
++
++ add_disk(info->gd);
++}
++
++/**
++ * Handle the change of state of the backend to Closing. We must delete our
++ * device-layer structures now, to ensure that writes are flushed through to
++ * the backend. Once is this done, we can switch to Closed in
++ * acknowledgement.
++ */
++static void blkfront_closing(struct xenbus_device *dev)
++{
++ struct blkfront_info *info = dev->dev.driver_data;
++ unsigned long flags;
++
++ DPRINTK("blkfront_closing: %s removed\n", dev->nodename);
++
++ if (info->rq == NULL)
++ goto out;
++
++ spin_lock_irqsave(&blkif_io_lock, flags);
++ /* No more blkif_request(). */
++ blk_stop_queue(info->rq);
++ /* No more gnttab callback work. */
++ gnttab_cancel_free_callback(&info->callback);
++ spin_unlock_irqrestore(&blkif_io_lock, flags);
++
++ /* Flush gnttab callback work. Must be done with no locks held. */
++ flush_scheduled_work();
++
++ xlvbd_del(info);
++
++ out:
++ xenbus_frontend_closed(dev);
++}
++
++
++static int blkfront_remove(struct xenbus_device *dev)
++{
++ struct blkfront_info *info = dev->dev.driver_data;
++
++ DPRINTK("blkfront_remove: %s removed\n", dev->nodename);
++
++ blkif_free(info, 0);
++
++ kfree(info);
++
++ return 0;
++}
++
++
++static inline int GET_ID_FROM_FREELIST(
++ struct blkfront_info *info)
++{
++ unsigned long free = info->shadow_free;
++ BUG_ON(free > BLK_RING_SIZE);
++ info->shadow_free = info->shadow[free].req.id;
++ info->shadow[free].req.id = 0x0fffffee; /* debug */
++ return free;
++}
++
++static inline void ADD_ID_TO_FREELIST(
++ struct blkfront_info *info, unsigned long id)
++{
++ info->shadow[id].req.id = info->shadow_free;
++ info->shadow[id].request = 0;
++ info->shadow_free = id;
++}
++
++static inline void flush_requests(struct blkfront_info *info)
++{
++ int notify;
++
++ RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->ring, notify);
++
++ if (notify)
++ notify_remote_via_irq(info->irq);
++}
++
++static void kick_pending_request_queues(struct blkfront_info *info)
++{
++ if (!RING_FULL(&info->ring)) {
++ /* Re-enable calldowns. */
++ blk_start_queue(info->rq);
++ /* Kick things off immediately. */
++ do_blkif_request(info->rq);
++ }
++}
++
++static void blkif_restart_queue(void *arg)
++{
++ struct blkfront_info *info = (struct blkfront_info *)arg;
++ spin_lock_irq(&blkif_io_lock);
++ if (info->connected == BLKIF_STATE_CONNECTED)
++ kick_pending_request_queues(info);
++ spin_unlock_irq(&blkif_io_lock);
++}
++
++static void blkif_restart_queue_callback(void *arg)
++{
++ struct blkfront_info *info = (struct blkfront_info *)arg;
++ schedule_work(&info->work);
++}
++
++int blkif_open(struct inode *inode, struct file *filep)
++{
++ struct blkfront_info *info = inode->i_bdev->bd_disk->private_data;
++ info->users++;
++ return 0;
++}
++
++
++int blkif_release(struct inode *inode, struct file *filep)
++{
++ struct blkfront_info *info = inode->i_bdev->bd_disk->private_data;
++ info->users--;
++ if (info->users == 0) {
++ /* Check whether we have been instructed to close. We will
++ have ignored this request initially, as the device was
++ still mounted. */
++ struct xenbus_device * dev = info->xbdev;
++ enum xenbus_state state = xenbus_read_driver_state(dev->otherend);
++
++ if (state == XenbusStateClosing)
++ blkfront_closing(dev);
++ }
++ return 0;
++}
++
++
++int blkif_ioctl(struct inode *inode, struct file *filep,
++ unsigned command, unsigned long argument)
++{
++ int i;
++
++ DPRINTK_IOCTL("command: 0x%x, argument: 0x%lx, dev: 0x%04x\n",
++ command, (long)argument, inode->i_rdev);
++
++ switch (command) {
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
++ case HDIO_GETGEO: {
++ struct block_device *bd = inode->i_bdev;
++ struct hd_geometry geo;
++ int ret;
++
++ if (!argument)
++ return -EINVAL;
++
++ geo.start = get_start_sect(bd);
++ ret = blkif_getgeo(bd, &geo);
++ if (ret)
++ return ret;
++
++ if (copy_to_user((struct hd_geometry __user *)argument, &geo,
++ sizeof(geo)))
++ return -EFAULT;
++
++ return 0;
++ }
++#endif
++ case CDROMMULTISESSION:
++ DPRINTK("FIXME: support multisession CDs later\n");
++ for (i = 0; i < sizeof(struct cdrom_multisession); i++)
++ if (put_user(0, (char __user *)(argument + i)))
++ return -EFAULT;
++ return 0;
++
++ default:
++ /*printk(KERN_ALERT "ioctl %08x not supported by Xen blkdev\n",
++ command);*/
++ return -EINVAL; /* same return as native Linux */
++ }
++
++ return 0;
++}
++
++
++int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg)
++{
++ /* We don't have real geometry info, but let's at least return
++ values consistent with the size of the device */
++ sector_t nsect = get_capacity(bd->bd_disk);
++ sector_t cylinders = nsect;
++
++ hg->heads = 0xff;
++ hg->sectors = 0x3f;
++ sector_div(cylinders, hg->heads * hg->sectors);
++ hg->cylinders = cylinders;
++ if ((sector_t)(hg->cylinders + 1) * hg->heads * hg->sectors < nsect)
++ hg->cylinders = 0xffff;
++ return 0;
++}
++
++
++/*
++ * blkif_queue_request
++ *
++ * request block io
++ *
++ * id: for guest use only.
++ * operation: BLKIF_OP_{READ,WRITE,PROBE}
++ * buffer: buffer to read/write into. this should be a
++ * virtual address in the guest os.
++ */
++static int blkif_queue_request(struct request *req)
++{
++ struct blkfront_info *info = req->rq_disk->private_data;
++ unsigned long buffer_mfn;
++ blkif_request_t *ring_req;
++ struct bio *bio;
++ struct bio_vec *bvec;
++ int idx;
++ unsigned long id;
++ unsigned int fsect, lsect;
++ int ref;
++ grant_ref_t gref_head;
++
++ if (unlikely(info->connected != BLKIF_STATE_CONNECTED))
++ return 1;
++
++ if (gnttab_alloc_grant_references(
++ BLKIF_MAX_SEGMENTS_PER_REQUEST, &gref_head) < 0) {
++ gnttab_request_free_callback(
++ &info->callback,
++ blkif_restart_queue_callback,
++ info,
++ BLKIF_MAX_SEGMENTS_PER_REQUEST);
++ return 1;
++ }
++
++ /* Fill out a communications ring structure. */
++ ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt);
++ id = GET_ID_FROM_FREELIST(info);
++ info->shadow[id].request = (unsigned long)req;
++
++ ring_req->id = id;
++ ring_req->sector_number = (blkif_sector_t)req->sector;
++ ring_req->handle = info->handle;
++
++ ring_req->operation = rq_data_dir(req) ?
++ BLKIF_OP_WRITE : BLKIF_OP_READ;
++ if (blk_barrier_rq(req))
++ ring_req->operation = BLKIF_OP_WRITE_BARRIER;
++
++ ring_req->nr_segments = 0;
++ rq_for_each_bio (bio, req) {
++ bio_for_each_segment (bvec, bio, idx) {
++ BUG_ON(ring_req->nr_segments
++ == BLKIF_MAX_SEGMENTS_PER_REQUEST);
++ buffer_mfn = page_to_phys(bvec->bv_page) >> PAGE_SHIFT;
++ fsect = bvec->bv_offset >> 9;
++ lsect = fsect + (bvec->bv_len >> 9) - 1;
++ /* install a grant reference. */
++ ref = gnttab_claim_grant_reference(&gref_head);
++ BUG_ON(ref == -ENOSPC);
++
++ gnttab_grant_foreign_access_ref(
++ ref,
++ info->xbdev->otherend_id,
++ buffer_mfn,
++ rq_data_dir(req) );
++
++ info->shadow[id].frame[ring_req->nr_segments] =
++ mfn_to_pfn(buffer_mfn);
++
++ ring_req->seg[ring_req->nr_segments] =
++ (struct blkif_request_segment) {
++ .gref = ref,
++ .first_sect = fsect,
++ .last_sect = lsect };
++
++ ring_req->nr_segments++;
++ }
++ }
++
++ info->ring.req_prod_pvt++;
++
++ /* Keep a private copy so we can reissue requests when recovering. */
++ info->shadow[id].req = *ring_req;
++
++ gnttab_free_grant_references(gref_head);
++
++ return 0;
++}
++
++/*
++ * do_blkif_request
++ * read a block; request is in a request queue
++ */
++void do_blkif_request(request_queue_t *rq)
++{
++ struct blkfront_info *info = NULL;
++ struct request *req;
++ int queued;
++
++ DPRINTK("Entered do_blkif_request\n");
++
++ queued = 0;
++
++ while ((req = elv_next_request(rq)) != NULL) {
++ info = req->rq_disk->private_data;
++ if (!blk_fs_request(req)) {
++ end_request(req, 0);
++ continue;
++ }
++
++ if (RING_FULL(&info->ring))
++ goto wait;
++
++ DPRINTK("do_blk_req %p: cmd %p, sec %llx, "
++ "(%u/%li) buffer:%p [%s]\n",
++ req, req->cmd, (long long)req->sector,
++ req->current_nr_sectors,
++ req->nr_sectors, req->buffer,
++ rq_data_dir(req) ? "write" : "read");
++
++
++ blkdev_dequeue_request(req);
++ if (blkif_queue_request(req)) {
++ blk_requeue_request(rq, req);
++ wait:
++ /* Avoid pointless unplugs. */
++ blk_stop_queue(rq);
++ break;
++ }
++
++ queued++;
++ }
++
++ if (queued != 0)
++ flush_requests(info);
++}
++
++
++static irqreturn_t blkif_int(int irq, void *dev_id, struct pt_regs *ptregs)
++{
++ struct request *req;
++ blkif_response_t *bret;
++ RING_IDX i, rp;
++ unsigned long flags;
++ struct blkfront_info *info = (struct blkfront_info *)dev_id;
++ int uptodate;
++
++ spin_lock_irqsave(&blkif_io_lock, flags);
++
++ if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) {
++ spin_unlock_irqrestore(&blkif_io_lock, flags);
++ return IRQ_HANDLED;
++ }
++
++ again:
++ rp = info->ring.sring->rsp_prod;
++ rmb(); /* Ensure we see queued responses up to 'rp'. */
++
++ for (i = info->ring.rsp_cons; i != rp; i++) {
++ unsigned long id;
++ int ret;
++
++ bret = RING_GET_RESPONSE(&info->ring, i);
++ id = bret->id;
++ req = (struct request *)info->shadow[id].request;
++
++ blkif_completion(&info->shadow[id]);
++
++ ADD_ID_TO_FREELIST(info, id);
++
++ uptodate = (bret->status == BLKIF_RSP_OKAY);
++ switch (bret->operation) {
++ case BLKIF_OP_WRITE_BARRIER:
++ if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
++ printk("blkfront: %s: write barrier op failed\n",
++ info->gd->disk_name);
++ uptodate = -EOPNOTSUPP;
++ info->feature_barrier = 0;
++ xlvbd_barrier(info);
++ }
++ /* fall through */
++ case BLKIF_OP_READ:
++ case BLKIF_OP_WRITE:
++ if (unlikely(bret->status != BLKIF_RSP_OKAY))
++ DPRINTK("Bad return from blkdev data "
++ "request: %x\n", bret->status);
++
++ ret = end_that_request_first(req, uptodate,
++ req->hard_nr_sectors);
++ BUG_ON(ret);
++ end_that_request_last(req, uptodate);
++ break;
++ default:
++ BUG();
++ }
++ }
++
++ info->ring.rsp_cons = i;
++
++ if (i != info->ring.req_prod_pvt) {
++ int more_to_do;
++ RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do);
++ if (more_to_do)
++ goto again;
++ } else
++ info->ring.sring->rsp_event = i + 1;
++
++ kick_pending_request_queues(info);
++
++ spin_unlock_irqrestore(&blkif_io_lock, flags);
++
++ return IRQ_HANDLED;
++}
++
++static void blkif_free(struct blkfront_info *info, int suspend)
++{
++ /* Prevent new requests being issued until we fix things up. */
++ spin_lock_irq(&blkif_io_lock);
++ info->connected = suspend ?
++ BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED;
++ /* No more blkif_request(). */
++ if (info->rq)
++ blk_stop_queue(info->rq);
++ /* No more gnttab callback work. */
++ gnttab_cancel_free_callback(&info->callback);
++ spin_unlock_irq(&blkif_io_lock);
++
++ /* Flush gnttab callback work. Must be done with no locks held. */
++ flush_scheduled_work();
++
++ /* Free resources associated with old device channel. */
++ if (info->ring_ref != GRANT_INVALID_REF) {
++ gnttab_end_foreign_access(info->ring_ref, 0,
++ (unsigned long)info->ring.sring);
++ info->ring_ref = GRANT_INVALID_REF;
++ info->ring.sring = NULL;
++ }
++ if (info->irq)
++ unbind_from_irqhandler(info->irq, info);
++ info->irq = 0;
++}
++
++static void blkif_completion(struct blk_shadow *s)
++{
++ int i;
++ for (i = 0; i < s->req.nr_segments; i++)
++ gnttab_end_foreign_access(s->req.seg[i].gref, 0, 0UL);
++}
++
++static void blkif_recover(struct blkfront_info *info)
++{
++ int i;
++ blkif_request_t *req;
++ struct blk_shadow *copy;
++ int j;
++
++ /* Stage 1: Make a safe copy of the shadow state. */
++ copy = kmalloc(sizeof(info->shadow), GFP_KERNEL | __GFP_NOFAIL);
++ memcpy(copy, info->shadow, sizeof(info->shadow));
++
++ /* Stage 2: Set up free list. */
++ memset(&info->shadow, 0, sizeof(info->shadow));
++ for (i = 0; i < BLK_RING_SIZE; i++)
++ info->shadow[i].req.id = i+1;
++ info->shadow_free = info->ring.req_prod_pvt;
++ info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff;
++
++ /* Stage 3: Find pending requests and requeue them. */
++ for (i = 0; i < BLK_RING_SIZE; i++) {
++ /* Not in use? */
++ if (copy[i].request == 0)
++ continue;
++
++ /* Grab a request slot and copy shadow state into it. */
++ req = RING_GET_REQUEST(
++ &info->ring, info->ring.req_prod_pvt);
++ *req = copy[i].req;
++
++ /* We get a new request id, and must reset the shadow state. */
++ req->id = GET_ID_FROM_FREELIST(info);
++ memcpy(&info->shadow[req->id], &copy[i], sizeof(copy[i]));
++
++ /* Rewrite any grant references invalidated by susp/resume. */
++ for (j = 0; j < req->nr_segments; j++)
++ gnttab_grant_foreign_access_ref(
++ req->seg[j].gref,
++ info->xbdev->otherend_id,
++ pfn_to_mfn(info->shadow[req->id].frame[j]),
++ rq_data_dir(
++ (struct request *)
++ info->shadow[req->id].request));
++ info->shadow[req->id].req = *req;
++
++ info->ring.req_prod_pvt++;
++ }
++
++ kfree(copy);
++
++ (void)xenbus_switch_state(info->xbdev, XenbusStateConnected);
++
++ spin_lock_irq(&blkif_io_lock);
++
++ /* Now safe for us to use the shared ring */
++ info->connected = BLKIF_STATE_CONNECTED;
++
++ /* Send off requeued requests */
++ flush_requests(info);
++
++ /* Kick any other new requests queued since we resumed */
++ kick_pending_request_queues(info);
++
++ spin_unlock_irq(&blkif_io_lock);
++}
++
++
++/* ** Driver Registration ** */
++
++
++static struct xenbus_device_id blkfront_ids[] = {
++ { "vbd" },
++ { "" }
++};
++
++
++static struct xenbus_driver blkfront = {
++ .name = "vbd",
++ .owner = THIS_MODULE,
++ .ids = blkfront_ids,
++ .probe = blkfront_probe,
++ .remove = blkfront_remove,
++ .resume = blkfront_resume,
++ .otherend_changed = backend_changed,
++};
++
++
++static int __init xlblk_init(void)
++{
++ if (!is_running_on_xen())
++ return -ENODEV;
++
++ return xenbus_register_frontend(&blkfront);
++}
++module_init(xlblk_init);
++
++
++static void xlblk_exit(void)
++{
++ return xenbus_unregister_driver(&blkfront);
++}
++module_exit(xlblk_exit);
++
++MODULE_LICENSE("Dual BSD/GPL");
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/blkfront/block.h 2007-08-27 14:02:08.000000000 -0400
+@@ -0,0 +1,142 @@
++/******************************************************************************
++ * block.h
++ *
++ * Shared definitions between all levels of XenLinux Virtual block devices.
++ *
++ * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
++ * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
++ * Copyright (c) 2004-2005, Christian Limpach
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#ifndef __XEN_DRIVERS_BLOCK_H__
++#define __XEN_DRIVERS_BLOCK_H__
++
++#include <linux/version.h>
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/slab.h>
++#include <linux/string.h>
++#include <linux/errno.h>
++#include <linux/fs.h>
++#include <linux/hdreg.h>
++#include <linux/blkdev.h>
++#include <linux/major.h>
++#include <asm/hypervisor.h>
++#include <xen/xenbus.h>
++#include <xen/gnttab.h>
++#include <xen/interface/xen.h>
++#include <xen/interface/io/blkif.h>
++#include <xen/interface/io/ring.h>
++#include <asm/io.h>
++#include <asm/atomic.h>
++#include <asm/uaccess.h>
++
++#define DPRINTK(_f, _a...) pr_debug(_f, ## _a)
++
++#if 0
++#define DPRINTK_IOCTL(_f, _a...) printk(KERN_ALERT _f, ## _a)
++#else
++#define DPRINTK_IOCTL(_f, _a...) ((void)0)
++#endif
++
++struct xlbd_type_info
++{
++ int partn_shift;
++ int disks_per_major;
++ char *devname;
++ char *diskname;
++};
++
++struct xlbd_major_info
++{
++ int major;
++ int index;
++ int usage;
++ struct xlbd_type_info *type;
++};
++
++struct blk_shadow {
++ blkif_request_t req;
++ unsigned long request;
++ unsigned long frame[BLKIF_MAX_SEGMENTS_PER_REQUEST];
++};
++
++#define BLK_RING_SIZE __RING_SIZE((blkif_sring_t *)0, PAGE_SIZE)
++
++/*
++ * We have one of these per vbd, whether ide, scsi or 'other'. They
++ * hang in private_data off the gendisk structure. We may end up
++ * putting all kinds of interesting stuff here :-)
++ */
++struct blkfront_info
++{
++ struct xenbus_device *xbdev;
++ dev_t dev;
++ struct gendisk *gd;
++ int vdevice;
++ blkif_vdev_t handle;
++ int connected;
++ int ring_ref;
++ blkif_front_ring_t ring;
++ unsigned int irq;
++ struct xlbd_major_info *mi;
++ request_queue_t *rq;
++ struct work_struct work;
++ struct gnttab_free_callback callback;
++ struct blk_shadow shadow[BLK_RING_SIZE];
++ unsigned long shadow_free;
++ int feature_barrier;
++
++ /**
++ * The number of people holding this device open. We won't allow a
++ * hot-unplug unless this is 0.
++ */
++ int users;
++};
++
++extern spinlock_t blkif_io_lock;
++
++extern int blkif_open(struct inode *inode, struct file *filep);
++extern int blkif_release(struct inode *inode, struct file *filep);
++extern int blkif_ioctl(struct inode *inode, struct file *filep,
++ unsigned command, unsigned long argument);
++extern int blkif_getgeo(struct block_device *, struct hd_geometry *);
++extern int blkif_check(dev_t dev);
++extern int blkif_revalidate(dev_t dev);
++extern void do_blkif_request (request_queue_t *rq);
++
++/* Virtual block-device subsystem. */
++/* Note that xlvbd_add doesn't call add_disk for you: you're expected
++ to call add_disk on info->gd once the disk is properly connected
++ up. */
++int xlvbd_add(blkif_sector_t capacity, int device,
++ u16 vdisk_info, u16 sector_size, struct blkfront_info *info);
++void xlvbd_del(struct blkfront_info *info);
++int xlvbd_barrier(struct blkfront_info *info);
++
++#endif /* __XEN_DRIVERS_BLOCK_H__ */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/blkfront/vbd.c 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,372 @@
++/******************************************************************************
++ * vbd.c
++ *
++ * XenLinux virtual block-device driver (xvd).
++ *
++ * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
++ * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
++ * Copyright (c) 2004-2005, Christian Limpach
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include "block.h"
++#include <linux/blkdev.h>
++#include <linux/list.h>
++
++#ifdef HAVE_XEN_PLATFORM_COMPAT_H
++#include <xen/platform-compat.h>
++#endif
++
++#define BLKIF_MAJOR(dev) ((dev)>>8)
++#define BLKIF_MINOR(dev) ((dev) & 0xff)
++
++/*
++ * For convenience we distinguish between ide, scsi and 'other' (i.e.,
++ * potentially combinations of the two) in the naming scheme and in a few other
++ * places.
++ */
++
++#define NUM_IDE_MAJORS 10
++#define NUM_SCSI_MAJORS 17
++#define NUM_VBD_MAJORS 1
++
++static struct xlbd_type_info xlbd_ide_type = {
++ .partn_shift = 6,
++ .disks_per_major = 2,
++ .devname = "ide",
++ .diskname = "hd",
++};
++
++static struct xlbd_type_info xlbd_scsi_type = {
++ .partn_shift = 4,
++ .disks_per_major = 16,
++ .devname = "sd",
++ .diskname = "sd",
++};
++
++static struct xlbd_type_info xlbd_vbd_type = {
++ .partn_shift = 4,
++ .disks_per_major = 16,
++ .devname = "xvd",
++ .diskname = "xvd",
++};
++
++static struct xlbd_major_info *major_info[NUM_IDE_MAJORS + NUM_SCSI_MAJORS +
++ NUM_VBD_MAJORS];
++
++#define XLBD_MAJOR_IDE_START 0
++#define XLBD_MAJOR_SCSI_START (NUM_IDE_MAJORS)
++#define XLBD_MAJOR_VBD_START (NUM_IDE_MAJORS + NUM_SCSI_MAJORS)
++
++#define XLBD_MAJOR_IDE_RANGE XLBD_MAJOR_IDE_START ... XLBD_MAJOR_SCSI_START - 1
++#define XLBD_MAJOR_SCSI_RANGE XLBD_MAJOR_SCSI_START ... XLBD_MAJOR_VBD_START - 1
++#define XLBD_MAJOR_VBD_RANGE XLBD_MAJOR_VBD_START ... XLBD_MAJOR_VBD_START + NUM_VBD_MAJORS - 1
++
++/* Information about our VBDs. */
++#define MAX_VBDS 64
++static LIST_HEAD(vbds_list);
++
++static struct block_device_operations xlvbd_block_fops =
++{
++ .owner = THIS_MODULE,
++ .open = blkif_open,
++ .release = blkif_release,
++ .ioctl = blkif_ioctl,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
++ .getgeo = blkif_getgeo
++#endif
++};
++
++DEFINE_SPINLOCK(blkif_io_lock);
++
++static struct xlbd_major_info *
++xlbd_alloc_major_info(int major, int minor, int index)
++{
++ struct xlbd_major_info *ptr;
++
++ ptr = kzalloc(sizeof(struct xlbd_major_info), GFP_KERNEL);
++ if (ptr == NULL)
++ return NULL;
++
++ ptr->major = major;
++
++ switch (index) {
++ case XLBD_MAJOR_IDE_RANGE:
++ ptr->type = &xlbd_ide_type;
++ ptr->index = index - XLBD_MAJOR_IDE_START;
++ break;
++ case XLBD_MAJOR_SCSI_RANGE:
++ ptr->type = &xlbd_scsi_type;
++ ptr->index = index - XLBD_MAJOR_SCSI_START;
++ break;
++ case XLBD_MAJOR_VBD_RANGE:
++ ptr->type = &xlbd_vbd_type;
++ ptr->index = index - XLBD_MAJOR_VBD_START;
++ break;
++ }
++
++ if (register_blkdev(ptr->major, ptr->type->devname)) {
++ kfree(ptr);
++ return NULL;
++ }
++
++ printk("xen-vbd: registered block device major %i\n", ptr->major);
++ major_info[index] = ptr;
++ return ptr;
++}
++
++static struct xlbd_major_info *
++xlbd_get_major_info(int vdevice)
++{
++ struct xlbd_major_info *mi;
++ int major, minor, index;
++
++ major = BLKIF_MAJOR(vdevice);
++ minor = BLKIF_MINOR(vdevice);
++
++ switch (major) {
++ case IDE0_MAJOR: index = 0; break;
++ case IDE1_MAJOR: index = 1; break;
++ case IDE2_MAJOR: index = 2; break;
++ case IDE3_MAJOR: index = 3; break;
++ case IDE4_MAJOR: index = 4; break;
++ case IDE5_MAJOR: index = 5; break;
++ case IDE6_MAJOR: index = 6; break;
++ case IDE7_MAJOR: index = 7; break;
++ case IDE8_MAJOR: index = 8; break;
++ case IDE9_MAJOR: index = 9; break;
++ case SCSI_DISK0_MAJOR: index = 10; break;
++ case SCSI_DISK1_MAJOR ... SCSI_DISK7_MAJOR:
++ index = 11 + major - SCSI_DISK1_MAJOR;
++ break;
++ case SCSI_DISK8_MAJOR ... SCSI_DISK15_MAJOR:
++ index = 18 + major - SCSI_DISK8_MAJOR;
++ break;
++ case SCSI_CDROM_MAJOR: index = 26; break;
++ default: index = 27; break;
++ }
++
++ mi = ((major_info[index] != NULL) ? major_info[index] :
++ xlbd_alloc_major_info(major, minor, index));
++ if (mi)
++ mi->usage++;
++ return mi;
++}
++
++static void
++xlbd_put_major_info(struct xlbd_major_info *mi)
++{
++ mi->usage--;
++ /* XXX: release major if 0 */
++}
++
++static int
++xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
++{
++ request_queue_t *rq;
++
++ rq = blk_init_queue(do_blkif_request, &blkif_io_lock);
++ if (rq == NULL)
++ return -1;
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
++ elevator_init(rq, "noop");
++#else
++ elevator_init(rq, &elevator_noop);
++#endif
++
++ /* Hard sector size and max sectors impersonate the equiv. hardware. */
++ blk_queue_hardsect_size(rq, sector_size);
++ blk_queue_max_sectors(rq, 512);
++
++ /* Each segment in a request is up to an aligned page in size. */
++ blk_queue_segment_boundary(rq, PAGE_SIZE - 1);
++ blk_queue_max_segment_size(rq, PAGE_SIZE);
++
++ /* Ensure a merged request will fit in a single I/O ring slot. */
++ blk_queue_max_phys_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST);
++ blk_queue_max_hw_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST);
++
++ /* Make sure buffer addresses are sector-aligned. */
++ blk_queue_dma_alignment(rq, 511);
++
++ gd->queue = rq;
++
++ return 0;
++}
++
++static int
++xlvbd_alloc_gendisk(int minor, blkif_sector_t capacity, int vdevice,
++ u16 vdisk_info, u16 sector_size,
++ struct blkfront_info *info)
++{
++ struct gendisk *gd;
++ struct xlbd_major_info *mi;
++ int nr_minors = 1;
++ int err = -ENODEV;
++ unsigned int offset;
++
++ BUG_ON(info->gd != NULL);
++ BUG_ON(info->mi != NULL);
++ BUG_ON(info->rq != NULL);
++
++ mi = xlbd_get_major_info(vdevice);
++ if (mi == NULL)
++ goto out;
++ info->mi = mi;
++
++ if ((minor & ((1 << mi->type->partn_shift) - 1)) == 0)
++ nr_minors = 1 << mi->type->partn_shift;
++
++ gd = alloc_disk(nr_minors);
++ if (gd == NULL)
++ goto out;
++
++ offset = mi->index * mi->type->disks_per_major +
++ (minor >> mi->type->partn_shift);
++ if (nr_minors > 1) {
++ if (offset < 26) {
++ sprintf(gd->disk_name, "%s%c",
++ mi->type->diskname, 'a' + offset );
++ }
++ else {
++ sprintf(gd->disk_name, "%s%c%c",
++ mi->type->diskname,
++ 'a' + ((offset/26)-1), 'a' + (offset%26) );
++ }
++ }
++ else {
++ if (offset < 26) {
++ sprintf(gd->disk_name, "%s%c%d",
++ mi->type->diskname,
++ 'a' + offset,
++ minor & ((1 << mi->type->partn_shift) - 1));
++ }
++ else {
++ sprintf(gd->disk_name, "%s%c%c%d",
++ mi->type->diskname,
++ 'a' + ((offset/26)-1), 'a' + (offset%26),
++ minor & ((1 << mi->type->partn_shift) - 1));
++ }
++ }
++
++ gd->major = mi->major;
++ gd->first_minor = minor;
++ gd->fops = &xlvbd_block_fops;
++ gd->private_data = info;
++ gd->driverfs_dev = &(info->xbdev->dev);
++ set_capacity(gd, capacity);
++
++ if (xlvbd_init_blk_queue(gd, sector_size)) {
++ del_gendisk(gd);
++ goto out;
++ }
++
++ info->rq = gd->queue;
++ info->gd = gd;
++
++ if (info->feature_barrier)
++ xlvbd_barrier(info);
++
++ if (vdisk_info & VDISK_READONLY)
++ set_disk_ro(gd, 1);
++
++ if (vdisk_info & VDISK_REMOVABLE)
++ gd->flags |= GENHD_FL_REMOVABLE;
++
++ if (vdisk_info & VDISK_CDROM)
++ gd->flags |= GENHD_FL_CD;
++
++ return 0;
++
++ out:
++ if (mi)
++ xlbd_put_major_info(mi);
++ info->mi = NULL;
++ return err;
++}
++
++int
++xlvbd_add(blkif_sector_t capacity, int vdevice, u16 vdisk_info,
++ u16 sector_size, struct blkfront_info *info)
++{
++ struct block_device *bd;
++ int err = 0;
++
++ info->dev = MKDEV(BLKIF_MAJOR(vdevice), BLKIF_MINOR(vdevice));
++
++ bd = bdget(info->dev);
++ if (bd == NULL)
++ return -ENODEV;
++
++ err = xlvbd_alloc_gendisk(BLKIF_MINOR(vdevice), capacity, vdevice,
++ vdisk_info, sector_size, info);
++
++ bdput(bd);
++ return err;
++}
++
++void
++xlvbd_del(struct blkfront_info *info)
++{
++ if (info->mi == NULL)
++ return;
++
++ BUG_ON(info->gd == NULL);
++ del_gendisk(info->gd);
++ put_disk(info->gd);
++ info->gd = NULL;
++
++ xlbd_put_major_info(info->mi);
++ info->mi = NULL;
++
++ BUG_ON(info->rq == NULL);
++ blk_cleanup_queue(info->rq);
++ info->rq = NULL;
++}
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
++int
++xlvbd_barrier(struct blkfront_info *info)
++{
++ int err;
++
++ err = blk_queue_ordered(info->rq,
++ info->feature_barrier ? QUEUE_ORDERED_DRAIN : QUEUE_ORDERED_NONE, NULL);
++ if (err)
++ return err;
++ printk("blkfront: %s: barriers %s\n",
++ info->gd->disk_name, info->feature_barrier ? "enabled" : "disabled");
++ return 0;
++}
++#else
++int
++xlvbd_barrier(struct blkfront_info *info)
++{
++ printk("blkfront: %s: barriers disabled\n", info->gd->disk_name);
++ return -ENOSYS;
++}
++#endif
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/blktap/Makefile 2007-08-27 14:01:54.000000000 -0400
+@@ -0,0 +1,5 @@
++LINUXINCLUDE += -I../xen/include/public/io
++
++obj-$(CONFIG_XEN_BLKDEV_TAP) := xenblktap.o
++
++xenblktap-y := xenbus.o interface.o blktap.o
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/blktap/blktap.c 2007-08-27 14:02:10.000000000 -0400
+@@ -0,0 +1,1528 @@
++/******************************************************************************
++ * drivers/xen/blktap/blktap.c
++ *
++ * Back-end driver for user level virtual block devices. This portion of the
++ * driver exports a 'unified' block-device interface that can be accessed
++ * by any operating system that implements a compatible front end. Requests
++ * are remapped to a user-space memory region.
++ *
++ * Based on the blkback driver code.
++ *
++ * Copyright (c) 2004-2005, Andrew Warfield and Julian Chesterfield
++ *
++ * Clean ups and fix ups:
++ * Copyright (c) 2006, Steven Rostedt - Red Hat, Inc.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <linux/spinlock.h>
++#include <linux/kthread.h>
++#include <linux/list.h>
++#include <asm/hypervisor.h>
++#include "common.h"
++#include <xen/balloon.h>
++#include <xen/driver_util.h>
++#include <linux/kernel.h>
++#include <linux/fs.h>
++#include <linux/mm.h>
++#include <linux/errno.h>
++#include <linux/major.h>
++#include <linux/gfp.h>
++#include <linux/poll.h>
++#include <asm/tlbflush.h>
++
++#define MAX_TAP_DEV 256 /*the maximum number of tapdisk ring devices */
++#define MAX_DEV_NAME 100 /*the max tapdisk ring device name e.g. blktap0 */
++
++/*
++ * The maximum number of requests that can be outstanding at any time
++ * is determined by
++ *
++ * [mmap_alloc * MAX_PENDING_REQS * BLKIF_MAX_SEGMENTS_PER_REQUEST]
++ *
++ * where mmap_alloc < MAX_DYNAMIC_MEM.
++ *
++ * TODO:
++ * mmap_alloc is initialised to 2 and should be adjustable on the fly via
++ * sysfs.
++ */
++#define BLK_RING_SIZE __RING_SIZE((blkif_sring_t *)0, PAGE_SIZE)
++#define MAX_DYNAMIC_MEM BLK_RING_SIZE
++#define MAX_PENDING_REQS BLK_RING_SIZE
++#define MMAP_PAGES (MAX_PENDING_REQS * BLKIF_MAX_SEGMENTS_PER_REQUEST)
++#define MMAP_VADDR(_start, _req,_seg) \
++ (_start + \
++ ((_req) * BLKIF_MAX_SEGMENTS_PER_REQUEST * PAGE_SIZE) + \
++ ((_seg) * PAGE_SIZE))
++static int blkif_reqs = MAX_PENDING_REQS;
++static int mmap_pages = MMAP_PAGES;
++
++#define RING_PAGES 1 /* BLKTAP - immediately before the mmap area, we
++ * have a bunch of pages reserved for shared
++ * memory rings.
++ */
++
++/*Data struct handed back to userspace for tapdisk device to VBD mapping*/
++typedef struct domid_translate {
++ unsigned short domid;
++ unsigned short busid;
++} domid_translate_t ;
++
++/*Data struct associated with each of the tapdisk devices*/
++typedef struct tap_blkif {
++ struct vm_area_struct *vma; /*Shared memory area */
++ unsigned long rings_vstart; /*Kernel memory mapping */
++ unsigned long user_vstart; /*User memory mapping */
++ unsigned long dev_inuse; /*One process opens device at a time. */
++ unsigned long dev_pending; /*In process of being opened */
++ unsigned long ring_ok; /*make this ring->state */
++ blkif_front_ring_t ufe_ring; /*Rings up to user space. */
++ wait_queue_head_t wait; /*for poll */
++ unsigned long mode; /*current switching mode */
++ int minor; /*Minor number for tapdisk device */
++ pid_t pid; /*tapdisk process id */
++ enum { RUNNING, CLEANSHUTDOWN } status; /*Detect a clean userspace
++ shutdown */
++ unsigned long *idx_map; /*Record the user ring id to kern
++ [req id, idx] tuple */
++ blkif_t *blkif; /*Associate blkif with tapdev */
++ struct domid_translate trans; /*Translation from domid to bus. */
++} tap_blkif_t;
++
++static struct tap_blkif *tapfds[MAX_TAP_DEV];
++static int blktap_next_minor;
++
++static int __init set_blkif_reqs(char *str)
++{
++ get_option(&str, &blkif_reqs);
++ return 1;
++}
++__setup("blkif_reqs=", set_blkif_reqs);
++
++/* Run-time switchable: /sys/module/blktap/parameters/ */
++static unsigned int log_stats = 0;
++static unsigned int debug_lvl = 0;
++module_param(log_stats, int, 0644);
++module_param(debug_lvl, int, 0644);
++
++/*
++ * Each outstanding request that we've passed to the lower device layers has a
++ * 'pending_req' allocated to it. Each buffer_head that completes decrements
++ * the pendcnt towards zero. When it hits zero, the specified domain has a
++ * response queued for it, with the saved 'id' passed back.
++ */
++typedef struct {
++ blkif_t *blkif;
++ u64 id;
++ unsigned short mem_idx;
++ int nr_pages;
++ atomic_t pendcnt;
++ unsigned short operation;
++ int status;
++ struct list_head free_list;
++ int inuse;
++} pending_req_t;
++
++static pending_req_t *pending_reqs[MAX_PENDING_REQS];
++static struct list_head pending_free;
++static DEFINE_SPINLOCK(pending_free_lock);
++static DECLARE_WAIT_QUEUE_HEAD (pending_free_wq);
++static int alloc_pending_reqs;
++
++typedef unsigned int PEND_RING_IDX;
++
++static inline int MASK_PEND_IDX(int i) {
++ return (i & (MAX_PENDING_REQS-1));
++}
++
++static inline unsigned int RTN_PEND_IDX(pending_req_t *req, int idx) {
++ return (req - pending_reqs[idx]);
++}
++
++#define NR_PENDING_REQS (MAX_PENDING_REQS - pending_prod + pending_cons)
++
++#define BLKBACK_INVALID_HANDLE (~0)
++
++static struct page **foreign_pages[MAX_DYNAMIC_MEM];
++static inline unsigned long idx_to_kaddr(
++ unsigned int mmap_idx, unsigned int req_idx, unsigned int sg_idx)
++{
++ unsigned int arr_idx = req_idx*BLKIF_MAX_SEGMENTS_PER_REQUEST + sg_idx;
++ unsigned long pfn = page_to_pfn(foreign_pages[mmap_idx][arr_idx]);
++ return (unsigned long)pfn_to_kaddr(pfn);
++}
++
++static unsigned short mmap_alloc = 0;
++static unsigned short mmap_lock = 0;
++static unsigned short mmap_inuse = 0;
++
++/******************************************************************
++ * GRANT HANDLES
++ */
++
++/* When using grant tables to map a frame for device access then the
++ * handle returned must be used to unmap the frame. This is needed to
++ * drop the ref count on the frame.
++ */
++struct grant_handle_pair
++{
++ grant_handle_t kernel;
++ grant_handle_t user;
++};
++#define INVALID_GRANT_HANDLE 0xFFFF
++
++static struct grant_handle_pair
++ pending_grant_handles[MAX_DYNAMIC_MEM][MMAP_PAGES];
++#define pending_handle(_id, _idx, _i) \
++ (pending_grant_handles[_id][((_idx) * BLKIF_MAX_SEGMENTS_PER_REQUEST) \
++ + (_i)])
++
++
++static int blktap_read_ufe_ring(tap_blkif_t *info); /*local prototypes*/
++
++#define BLKTAP_MINOR 0 /*/dev/xen/blktap has a dynamic major */
++#define BLKTAP_DEV_DIR "/dev/xen"
++
++static int blktap_major;
++
++/* blktap IOCTLs: */
++#define BLKTAP_IOCTL_KICK_FE 1
++#define BLKTAP_IOCTL_KICK_BE 2 /* currently unused */
++#define BLKTAP_IOCTL_SETMODE 3
++#define BLKTAP_IOCTL_SENDPID 4
++#define BLKTAP_IOCTL_NEWINTF 5
++#define BLKTAP_IOCTL_MINOR 6
++#define BLKTAP_IOCTL_MAJOR 7
++#define BLKTAP_QUERY_ALLOC_REQS 8
++#define BLKTAP_IOCTL_FREEINTF 9
++#define BLKTAP_IOCTL_PRINT_IDXS 100
++
++/* blktap switching modes: (Set with BLKTAP_IOCTL_SETMODE) */
++#define BLKTAP_MODE_PASSTHROUGH 0x00000000 /* default */
++#define BLKTAP_MODE_INTERCEPT_FE 0x00000001
++#define BLKTAP_MODE_INTERCEPT_BE 0x00000002 /* unimp. */
++
++#define BLKTAP_MODE_INTERPOSE \
++ (BLKTAP_MODE_INTERCEPT_FE | BLKTAP_MODE_INTERCEPT_BE)
++
++
++static inline int BLKTAP_MODE_VALID(unsigned long arg)
++{
++ return ((arg == BLKTAP_MODE_PASSTHROUGH ) ||
++ (arg == BLKTAP_MODE_INTERCEPT_FE) ||
++ (arg == BLKTAP_MODE_INTERPOSE ));
++}
++
++/* Requests passing through the tap to userspace are re-assigned an ID.
++ * We must record a mapping between the BE [IDX,ID] tuple and the userspace
++ * ring ID.
++ */
++
++static inline unsigned long MAKE_ID(domid_t fe_dom, PEND_RING_IDX idx)
++{
++ return ((fe_dom << 16) | MASK_PEND_IDX(idx));
++}
++
++extern inline PEND_RING_IDX ID_TO_IDX(unsigned long id)
++{
++ return (PEND_RING_IDX)(id & 0x0000ffff);
++}
++
++extern inline int ID_TO_MIDX(unsigned long id)
++{
++ return (int)(id >> 16);
++}
++
++#define INVALID_REQ 0xdead0000
++
++/*TODO: Convert to a free list*/
++static inline int GET_NEXT_REQ(unsigned long *idx_map)
++{
++ int i;
++ for (i = 0; i < MAX_PENDING_REQS; i++)
++ if (idx_map[i] == INVALID_REQ)
++ return i;
++
++ return INVALID_REQ;
++}
++
++
++#define BLKTAP_INVALID_HANDLE(_g) \
++ (((_g->kernel) == INVALID_GRANT_HANDLE) && \
++ ((_g->user) == INVALID_GRANT_HANDLE))
++
++#define BLKTAP_INVALIDATE_HANDLE(_g) do { \
++ (_g)->kernel = INVALID_GRANT_HANDLE; (_g)->user = INVALID_GRANT_HANDLE; \
++ } while(0)
++
++
++/******************************************************************
++ * BLKTAP VM OPS
++ */
++
++static struct page *blktap_nopage(struct vm_area_struct *vma,
++ unsigned long address,
++ int *type)
++{
++ /*
++ * if the page has not been mapped in by the driver then return
++ * NOPAGE_SIGBUS to the domain.
++ */
++
++ return NOPAGE_SIGBUS;
++}
++
++struct vm_operations_struct blktap_vm_ops = {
++ nopage: blktap_nopage,
++};
++
++/******************************************************************
++ * BLKTAP FILE OPS
++ */
++
++/*Function Declarations*/
++static tap_blkif_t *get_next_free_dev(void);
++static int blktap_open(struct inode *inode, struct file *filp);
++static int blktap_release(struct inode *inode, struct file *filp);
++static int blktap_mmap(struct file *filp, struct vm_area_struct *vma);
++static int blktap_ioctl(struct inode *inode, struct file *filp,
++ unsigned int cmd, unsigned long arg);
++static unsigned int blktap_poll(struct file *file, poll_table *wait);
++
++static const struct file_operations blktap_fops = {
++ .owner = THIS_MODULE,
++ .poll = blktap_poll,
++ .ioctl = blktap_ioctl,
++ .open = blktap_open,
++ .release = blktap_release,
++ .mmap = blktap_mmap,
++};
++
++
++static tap_blkif_t *get_next_free_dev(void)
++{
++ struct class *class;
++ tap_blkif_t *info;
++ int minor;
++
++ /*
++ * This is called only from the ioctl, which
++ * means we should always have interrupts enabled.
++ */
++ BUG_ON(irqs_disabled());
++
++ spin_lock_irq(&pending_free_lock);
++
++ /* tapfds[0] is always NULL */
++
++ for (minor = 1; minor < blktap_next_minor; minor++) {
++ info = tapfds[minor];
++ /* we could have failed a previous attempt. */
++ if (!info ||
++ ((info->dev_inuse == 0) &&
++ (info->dev_pending == 0)) ) {
++ info->dev_pending = 1;
++ goto found;
++ }
++ }
++ info = NULL;
++ minor = -1;
++
++ /*
++ * We didn't find free device. If we can still allocate
++ * more, then we grab the next device minor that is
++ * available. This is done while we are still under
++ * the protection of the pending_free_lock.
++ */
++ if (blktap_next_minor < MAX_TAP_DEV)
++ minor = blktap_next_minor++;
++found:
++ spin_unlock_irq(&pending_free_lock);
++
++ if (!info && minor > 0) {
++ info = kzalloc(sizeof(*info), GFP_KERNEL);
++ if (unlikely(!info)) {
++ /*
++ * If we failed here, try to put back
++ * the next minor number. But if one
++ * was just taken, then we just lose this
++ * minor. We can try to allocate this
++ * minor again later.
++ */
++ spin_lock_irq(&pending_free_lock);
++ if (blktap_next_minor == minor+1)
++ blktap_next_minor--;
++ spin_unlock_irq(&pending_free_lock);
++ goto out;
++ }
++
++ info->minor = minor;
++ /*
++ * Make sure that we have a minor before others can
++ * see us.
++ */
++ wmb();
++ tapfds[minor] = info;
++
++ if ((class = get_xen_class()) != NULL)
++ class_device_create(class, NULL,
++ MKDEV(blktap_major, minor), NULL,
++ "blktap%d", minor);
++ }
++
++out:
++ return info;
++}
++
++int dom_to_devid(domid_t domid, int xenbus_id, blkif_t *blkif)
++{
++ tap_blkif_t *info;
++ int i;
++
++ for (i = 1; i < blktap_next_minor; i++) {
++ info = tapfds[i];
++ if ( info &&
++ (info->trans.domid == domid) &&
++ (info->trans.busid == xenbus_id) ) {
++ info->blkif = blkif;
++ info->status = RUNNING;
++ return i;
++ }
++ }
++ return -1;
++}
++
++void signal_tapdisk(int idx)
++{
++ tap_blkif_t *info;
++ struct task_struct *ptask;
++
++ info = tapfds[idx];
++ if ((idx < 0) || (idx > MAX_TAP_DEV) || !info)
++ return;
++
++ if (info->pid > 0) {
++ ptask = find_task_by_pid(info->pid);
++ if (ptask)
++ info->status = CLEANSHUTDOWN;
++ }
++ info->blkif = NULL;
++
++ return;
++}
++
++static int blktap_open(struct inode *inode, struct file *filp)
++{
++ blkif_sring_t *sring;
++ int idx = iminor(inode) - BLKTAP_MINOR;
++ tap_blkif_t *info;
++ int i;
++
++ /* ctrl device, treat differently */
++ if (!idx)
++ return 0;
++
++ info = tapfds[idx];
++
++ if ((idx < 0) || (idx > MAX_TAP_DEV) || !info) {
++ WPRINTK("Unable to open device /dev/xen/blktap%d\n",
++ idx);
++ return -ENODEV;
++ }
++
++ DPRINTK("Opening device /dev/xen/blktap%d\n",idx);
++
++ /*Only one process can access device at a time*/
++ if (test_and_set_bit(0, &info->dev_inuse))
++ return -EBUSY;
++
++ info->dev_pending = 0;
++
++ /* Allocate the fe ring. */
++ sring = (blkif_sring_t *)get_zeroed_page(GFP_KERNEL);
++ if (sring == NULL)
++ goto fail_nomem;
++
++ SetPageReserved(virt_to_page(sring));
++
++ SHARED_RING_INIT(sring);
++ FRONT_RING_INIT(&info->ufe_ring, sring, PAGE_SIZE);
++
++ filp->private_data = info;
++ info->vma = NULL;
++
++ info->idx_map = kmalloc(sizeof(unsigned long) * MAX_PENDING_REQS,
++ GFP_KERNEL);
++
++ if (idx > 0) {
++ init_waitqueue_head(&info->wait);
++ for (i = 0; i < MAX_PENDING_REQS; i++)
++ info->idx_map[i] = INVALID_REQ;
++ }
++
++ DPRINTK("Tap open: device /dev/xen/blktap%d\n",idx);
++ return 0;
++
++ fail_nomem:
++ return -ENOMEM;
++}
++
++static int blktap_release(struct inode *inode, struct file *filp)
++{
++ tap_blkif_t *info = filp->private_data;
++
++ /* check for control device */
++ if (!info)
++ return 0;
++
++ info->dev_inuse = 0;
++ DPRINTK("Freeing device [/dev/xen/blktap%d]\n",info->minor);
++
++ /* Free the ring page. */
++ ClearPageReserved(virt_to_page(info->ufe_ring.sring));
++ free_page((unsigned long) info->ufe_ring.sring);
++
++ /* Clear any active mappings and free foreign map table */
++ if (info->vma) {
++ zap_page_range(
++ info->vma, info->vma->vm_start,
++ info->vma->vm_end - info->vma->vm_start, NULL);
++ info->vma = NULL;
++ }
++
++ if ( (info->status != CLEANSHUTDOWN) && (info->blkif != NULL) ) {
++ if (info->blkif->xenblkd != NULL) {
++ kthread_stop(info->blkif->xenblkd);
++ info->blkif->xenblkd = NULL;
++ }
++ info->status = CLEANSHUTDOWN;
++ }
++ return 0;
++}
++
++
++/* Note on mmap:
++ * We need to map pages to user space in a way that will allow the block
++ * subsystem set up direct IO to them. This couldn't be done before, because
++ * there isn't really a sane way to translate a user virtual address down to a
++ * physical address when the page belongs to another domain.
++ *
++ * My first approach was to map the page in to kernel memory, add an entry
++ * for it in the physical frame list (using alloc_lomem_region as in blkback)
++ * and then attempt to map that page up to user space. This is disallowed
++ * by xen though, which realizes that we don't really own the machine frame
++ * underlying the physical page.
++ *
++ * The new approach is to provide explicit support for this in xen linux.
++ * The VMA now has a flag, VM_FOREIGN, to indicate that it contains pages
++ * mapped from other vms. vma->vm_private_data is set up as a mapping
++ * from pages to actual page structs. There is a new clause in get_user_pages
++ * that does the right thing for this sort of mapping.
++ */
++static int blktap_mmap(struct file *filp, struct vm_area_struct *vma)
++{
++ int size;
++ struct page **map;
++ int i;
++ tap_blkif_t *info = filp->private_data;
++
++ if (info == NULL) {
++ WPRINTK("blktap: mmap, retrieving idx failed\n");
++ return -ENOMEM;
++ }
++
++ vma->vm_flags |= VM_RESERVED;
++ vma->vm_ops = &blktap_vm_ops;
++
++ size = vma->vm_end - vma->vm_start;
++ if (size != ((mmap_pages + RING_PAGES) << PAGE_SHIFT)) {
++ WPRINTK("you _must_ map exactly %d pages!\n",
++ mmap_pages + RING_PAGES);
++ return -EAGAIN;
++ }
++
++ size >>= PAGE_SHIFT;
++ info->rings_vstart = vma->vm_start;
++ info->user_vstart = info->rings_vstart + (RING_PAGES << PAGE_SHIFT);
++
++ /* Map the ring pages to the start of the region and reserve it. */
++ if (remap_pfn_range(vma, vma->vm_start,
++ __pa(info->ufe_ring.sring) >> PAGE_SHIFT,
++ PAGE_SIZE, vma->vm_page_prot)) {
++ WPRINTK("Mapping user ring failed!\n");
++ goto fail;
++ }
++
++ /* Mark this VM as containing foreign pages, and set up mappings. */
++ map = kzalloc(((vma->vm_end - vma->vm_start) >> PAGE_SHIFT)
++ * sizeof(struct page_struct*),
++ GFP_KERNEL);
++ if (map == NULL) {
++ WPRINTK("Couldn't alloc VM_FOREIGN map.\n");
++ goto fail;
++ }
++
++ for (i = 0; i < ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT); i++)
++ map[i] = NULL;
++
++ vma->vm_private_data = map;
++ vma->vm_flags |= VM_FOREIGN;
++
++ info->vma = vma;
++ info->ring_ok = 1;
++ return 0;
++ fail:
++ /* Clear any active mappings. */
++ zap_page_range(vma, vma->vm_start,
++ vma->vm_end - vma->vm_start, NULL);
++
++ return -ENOMEM;
++}
++
++
++static int blktap_ioctl(struct inode *inode, struct file *filp,
++ unsigned int cmd, unsigned long arg)
++{
++ tap_blkif_t *info = filp->private_data;
++
++ switch(cmd) {
++ case BLKTAP_IOCTL_KICK_FE:
++ {
++ /* There are fe messages to process. */
++ return blktap_read_ufe_ring(info);
++ }
++ case BLKTAP_IOCTL_SETMODE:
++ {
++ if (info) {
++ if (BLKTAP_MODE_VALID(arg)) {
++ info->mode = arg;
++ /* XXX: may need to flush rings here. */
++ DPRINTK("blktap: set mode to %lx\n",
++ arg);
++ return 0;
++ }
++ }
++ return 0;
++ }
++ case BLKTAP_IOCTL_PRINT_IDXS:
++ {
++ if (info) {
++ printk("User Rings: \n-----------\n");
++ printk("UF: rsp_cons: %2d, req_prod_prv: %2d "
++ "| req_prod: %2d, rsp_prod: %2d\n",
++ info->ufe_ring.rsp_cons,
++ info->ufe_ring.req_prod_pvt,
++ info->ufe_ring.sring->req_prod,
++ info->ufe_ring.sring->rsp_prod);
++ }
++ return 0;
++ }
++ case BLKTAP_IOCTL_SENDPID:
++ {
++ if (info) {
++ info->pid = (pid_t)arg;
++ DPRINTK("blktap: pid received %d\n",
++ info->pid);
++ }
++ return 0;
++ }
++ case BLKTAP_IOCTL_NEWINTF:
++ {
++ uint64_t val = (uint64_t)arg;
++ domid_translate_t *tr = (domid_translate_t *)&val;
++
++ DPRINTK("NEWINTF Req for domid %d and bus id %d\n",
++ tr->domid, tr->busid);
++ info = get_next_free_dev();
++ if (!info) {
++ WPRINTK("Error initialising /dev/xen/blktap - "
++ "No more devices\n");
++ return -1;
++ }
++ info->trans.domid = tr->domid;
++ info->trans.busid = tr->busid;
++ return info->minor;
++ }
++ case BLKTAP_IOCTL_FREEINTF:
++ {
++ unsigned long dev = arg;
++ unsigned long flags;
++
++ info = tapfds[dev];
++
++ if ((dev > MAX_TAP_DEV) || !info)
++ return 0; /* should this be an error? */
++
++ spin_lock_irqsave(&pending_free_lock, flags);
++ if (info->dev_pending)
++ info->dev_pending = 0;
++ spin_unlock_irqrestore(&pending_free_lock, flags);
++
++ return 0;
++ }
++ case BLKTAP_IOCTL_MINOR:
++ {
++ unsigned long dev = arg;
++
++ info = tapfds[dev];
++
++ if ((dev > MAX_TAP_DEV) || !info)
++ return -EINVAL;
++
++ return info->minor;
++ }
++ case BLKTAP_IOCTL_MAJOR:
++ return blktap_major;
++
++ case BLKTAP_QUERY_ALLOC_REQS:
++ {
++ WPRINTK("BLKTAP_QUERY_ALLOC_REQS ioctl: %d/%d\n",
++ alloc_pending_reqs, blkif_reqs);
++ return (alloc_pending_reqs/blkif_reqs) * 100;
++ }
++ }
++ return -ENOIOCTLCMD;
++}
++
++static unsigned int blktap_poll(struct file *filp, poll_table *wait)
++{
++ tap_blkif_t *info = filp->private_data;
++
++ /* do not work on the control device */
++ if (!info)
++ return 0;
++
++ poll_wait(filp, &info->wait, wait);
++ if (info->ufe_ring.req_prod_pvt != info->ufe_ring.sring->req_prod) {
++ RING_PUSH_REQUESTS(&info->ufe_ring);
++ return POLLIN | POLLRDNORM;
++ }
++ return 0;
++}
++
++void blktap_kick_user(int idx)
++{
++ tap_blkif_t *info;
++
++ info = tapfds[idx];
++
++ if ((idx < 0) || (idx > MAX_TAP_DEV) || !info)
++ return;
++
++ wake_up_interruptible(&info->wait);
++
++ return;
++}
++
++static int do_block_io_op(blkif_t *blkif);
++static void dispatch_rw_block_io(blkif_t *blkif,
++ blkif_request_t *req,
++ pending_req_t *pending_req);
++static void make_response(blkif_t *blkif, u64 id,
++ unsigned short op, int st);
++
++/******************************************************************
++ * misc small helpers
++ */
++static int req_increase(void)
++{
++ int i, j;
++
++ if (mmap_alloc >= MAX_PENDING_REQS || mmap_lock)
++ return -EINVAL;
++
++ pending_reqs[mmap_alloc] = kzalloc(sizeof(pending_req_t)
++ * blkif_reqs, GFP_KERNEL);
++ foreign_pages[mmap_alloc] = alloc_empty_pages_and_pagevec(mmap_pages);
++
++ if (!pending_reqs[mmap_alloc] || !foreign_pages[mmap_alloc])
++ goto out_of_memory;
++
++ DPRINTK("%s: reqs=%d, pages=%d\n",
++ __FUNCTION__, blkif_reqs, mmap_pages);
++
++ for (i = 0; i < MAX_PENDING_REQS; i++) {
++ list_add_tail(&pending_reqs[mmap_alloc][i].free_list,
++ &pending_free);
++ pending_reqs[mmap_alloc][i].mem_idx = mmap_alloc;
++ for (j = 0; j < BLKIF_MAX_SEGMENTS_PER_REQUEST; j++)
++ BLKTAP_INVALIDATE_HANDLE(&pending_handle(mmap_alloc,
++ i, j));
++ }
++
++ mmap_alloc++;
++ DPRINTK("# MMAPs increased to %d\n",mmap_alloc);
++ return 0;
++
++ out_of_memory:
++ free_empty_pages_and_pagevec(foreign_pages[mmap_alloc], mmap_pages);
++ kfree(pending_reqs[mmap_alloc]);
++ WPRINTK("%s: out of memory\n", __FUNCTION__);
++ return -ENOMEM;
++}
++
++static void mmap_req_del(int mmap)
++{
++ BUG_ON(!spin_is_locked(&pending_free_lock));
++
++ kfree(pending_reqs[mmap]);
++ pending_reqs[mmap] = NULL;
++
++ free_empty_pages_and_pagevec(foreign_pages[mmap_alloc], mmap_pages);
++ foreign_pages[mmap] = NULL;
++
++ mmap_lock = 0;
++ DPRINTK("# MMAPs decreased to %d\n",mmap_alloc);
++ mmap_alloc--;
++}
++
++static pending_req_t* alloc_req(void)
++{
++ pending_req_t *req = NULL;
++ unsigned long flags;
++
++ spin_lock_irqsave(&pending_free_lock, flags);
++
++ if (!list_empty(&pending_free)) {
++ req = list_entry(pending_free.next, pending_req_t, free_list);
++ list_del(&req->free_list);
++ }
++
++ if (req) {
++ req->inuse = 1;
++ alloc_pending_reqs++;
++ }
++ spin_unlock_irqrestore(&pending_free_lock, flags);
++
++ return req;
++}
++
++static void free_req(pending_req_t *req)
++{
++ unsigned long flags;
++ int was_empty;
++
++ spin_lock_irqsave(&pending_free_lock, flags);
++
++ alloc_pending_reqs--;
++ req->inuse = 0;
++ if (mmap_lock && (req->mem_idx == mmap_alloc-1)) {
++ mmap_inuse--;
++ if (mmap_inuse == 0) mmap_req_del(mmap_alloc-1);
++ spin_unlock_irqrestore(&pending_free_lock, flags);
++ return;
++ }
++ was_empty = list_empty(&pending_free);
++ list_add(&req->free_list, &pending_free);
++
++ spin_unlock_irqrestore(&pending_free_lock, flags);
++
++ if (was_empty)
++ wake_up(&pending_free_wq);
++}
++
++static void fast_flush_area(pending_req_t *req, int k_idx, int u_idx,
++ int tapidx)
++{
++ struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST*2];
++ unsigned int i, invcount = 0;
++ struct grant_handle_pair *khandle;
++ uint64_t ptep;
++ int ret, mmap_idx;
++ unsigned long kvaddr, uvaddr;
++ tap_blkif_t *info;
++
++
++ info = tapfds[tapidx];
++
++ if ((tapidx < 0) || (tapidx > MAX_TAP_DEV) || !info) {
++ WPRINTK("fast_flush: Couldn't get info!\n");
++ return;
++ }
++
++ if (info->vma != NULL &&
++ xen_feature(XENFEAT_auto_translated_physmap)) {
++ down_write(&info->vma->vm_mm->mmap_sem);
++ zap_page_range(info->vma,
++ MMAP_VADDR(info->user_vstart, u_idx, 0),
++ req->nr_pages << PAGE_SHIFT, NULL);
++ up_write(&info->vma->vm_mm->mmap_sem);
++ }
++
++ mmap_idx = req->mem_idx;
++
++ for (i = 0; i < req->nr_pages; i++) {
++ kvaddr = idx_to_kaddr(mmap_idx, k_idx, i);
++ uvaddr = MMAP_VADDR(info->user_vstart, u_idx, i);
++
++ khandle = &pending_handle(mmap_idx, k_idx, i);
++
++ if (khandle->kernel != INVALID_GRANT_HANDLE) {
++ gnttab_set_unmap_op(&unmap[invcount],
++ idx_to_kaddr(mmap_idx, k_idx, i),
++ GNTMAP_host_map, khandle->kernel);
++ invcount++;
++ }
++
++ if (khandle->user != INVALID_GRANT_HANDLE) {
++ BUG_ON(xen_feature(XENFEAT_auto_translated_physmap));
++ if (create_lookup_pte_addr(
++ info->vma->vm_mm,
++ MMAP_VADDR(info->user_vstart, u_idx, i),
++ &ptep) !=0) {
++ WPRINTK("Couldn't get a pte addr!\n");
++ return;
++ }
++
++ gnttab_set_unmap_op(&unmap[invcount], ptep,
++ GNTMAP_host_map
++ | GNTMAP_application_map
++ | GNTMAP_contains_pte,
++ khandle->user);
++ invcount++;
++ }
++
++ BLKTAP_INVALIDATE_HANDLE(khandle);
++ }
++ ret = HYPERVISOR_grant_table_op(
++ GNTTABOP_unmap_grant_ref, unmap, invcount);
++ BUG_ON(ret);
++
++ if (info->vma != NULL && !xen_feature(XENFEAT_auto_translated_physmap))
++ zap_page_range(info->vma,
++ MMAP_VADDR(info->user_vstart, u_idx, 0),
++ req->nr_pages << PAGE_SHIFT, NULL);
++}
++
++/******************************************************************
++ * SCHEDULER FUNCTIONS
++ */
++
++static void print_stats(blkif_t *blkif)
++{
++ printk(KERN_DEBUG "%s: oo %3d | rd %4d | wr %4d\n",
++ current->comm, blkif->st_oo_req,
++ blkif->st_rd_req, blkif->st_wr_req);
++ blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000);
++ blkif->st_rd_req = 0;
++ blkif->st_wr_req = 0;
++ blkif->st_oo_req = 0;
++}
++
++int tap_blkif_schedule(void *arg)
++{
++ blkif_t *blkif = arg;
++
++ blkif_get(blkif);
++
++ if (debug_lvl)
++ printk(KERN_DEBUG "%s: started\n", current->comm);
++
++ while (!kthread_should_stop()) {
++ wait_event_interruptible(
++ blkif->wq,
++ blkif->waiting_reqs || kthread_should_stop());
++ wait_event_interruptible(
++ pending_free_wq,
++ !list_empty(&pending_free) || kthread_should_stop());
++
++ blkif->waiting_reqs = 0;
++ smp_mb(); /* clear flag *before* checking for work */
++
++ if (do_block_io_op(blkif))
++ blkif->waiting_reqs = 1;
++
++ if (log_stats && time_after(jiffies, blkif->st_print))
++ print_stats(blkif);
++ }
++
++ if (log_stats)
++ print_stats(blkif);
++ if (debug_lvl)
++ printk(KERN_DEBUG "%s: exiting\n", current->comm);
++
++ blkif->xenblkd = NULL;
++ blkif_put(blkif);
++
++ return 0;
++}
++
++/******************************************************************
++ * COMPLETION CALLBACK -- Called by user level ioctl()
++ */
++
++static int blktap_read_ufe_ring(tap_blkif_t *info)
++{
++ /* This is called to read responses from the UFE ring. */
++ RING_IDX i, j, rp;
++ blkif_response_t *resp;
++ blkif_t *blkif=NULL;
++ int pending_idx, usr_idx, mmap_idx;
++ pending_req_t *pending_req;
++
++ if (!info)
++ return 0;
++
++ /* We currently only forward packets in INTERCEPT_FE mode. */
++ if (!(info->mode & BLKTAP_MODE_INTERCEPT_FE))
++ return 0;
++
++ /* for each outstanding message on the UFEring */
++ rp = info->ufe_ring.sring->rsp_prod;
++ rmb();
++
++ for (i = info->ufe_ring.rsp_cons; i != rp; i++) {
++ blkif_response_t res;
++ resp = RING_GET_RESPONSE(&info->ufe_ring, i);
++ memcpy(&res, resp, sizeof(res));
++ mb(); /* rsp_cons read by RING_FULL() in do_block_io_op(). */
++ ++info->ufe_ring.rsp_cons;
++
++ /*retrieve [usr_idx] to [mmap_idx,pending_idx] mapping*/
++ usr_idx = (int)res.id;
++ pending_idx = MASK_PEND_IDX(ID_TO_IDX(info->idx_map[usr_idx]));
++ mmap_idx = ID_TO_MIDX(info->idx_map[usr_idx]);
++
++ if ( (mmap_idx >= mmap_alloc) ||
++ (ID_TO_IDX(info->idx_map[usr_idx]) >= MAX_PENDING_REQS) )
++ WPRINTK("Incorrect req map"
++ "[%d], internal map [%d,%d (%d)]\n",
++ usr_idx, mmap_idx,
++ ID_TO_IDX(info->idx_map[usr_idx]),
++ MASK_PEND_IDX(
++ ID_TO_IDX(info->idx_map[usr_idx])));
++
++ pending_req = &pending_reqs[mmap_idx][pending_idx];
++ blkif = pending_req->blkif;
++
++ for (j = 0; j < pending_req->nr_pages; j++) {
++
++ unsigned long kvaddr, uvaddr;
++ struct page **map = info->vma->vm_private_data;
++ struct page *pg;
++ int offset;
++
++ uvaddr = MMAP_VADDR(info->user_vstart, usr_idx, j);
++ kvaddr = idx_to_kaddr(mmap_idx, pending_idx, j);
++
++ pg = pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
++ ClearPageReserved(pg);
++ offset = (uvaddr - info->vma->vm_start)
++ >> PAGE_SHIFT;
++ map[offset] = NULL;
++ }
++ fast_flush_area(pending_req, pending_idx, usr_idx, info->minor);
++ info->idx_map[usr_idx] = INVALID_REQ;
++ make_response(blkif, pending_req->id, res.operation,
++ res.status);
++ blkif_put(pending_req->blkif);
++ free_req(pending_req);
++ }
++
++ return 0;
++}
++
++
++/******************************************************************************
++ * NOTIFICATION FROM GUEST OS.
++ */
++
++static void blkif_notify_work(blkif_t *blkif)
++{
++ blkif->waiting_reqs = 1;
++ wake_up(&blkif->wq);
++}
++
++irqreturn_t tap_blkif_be_int(int irq, void *dev_id, struct pt_regs *regs)
++{
++ blkif_notify_work(dev_id);
++ return IRQ_HANDLED;
++}
++
++
++
++/******************************************************************
++ * DOWNWARD CALLS -- These interface with the block-device layer proper.
++ */
++static int print_dbug = 1;
++static int do_block_io_op(blkif_t *blkif)
++{
++ blkif_back_rings_t *blk_rings = &blkif->blk_rings;
++ blkif_request_t req;
++ pending_req_t *pending_req;
++ RING_IDX rc, rp;
++ int more_to_do = 0;
++ tap_blkif_t *info;
++
++ rc = blk_rings->common.req_cons;
++ rp = blk_rings->common.sring->req_prod;
++ rmb(); /* Ensure we see queued requests up to 'rp'. */
++
++ /*Check blkif has corresponding UE ring*/
++ if (blkif->dev_num < 0) {
++ /*oops*/
++ if (print_dbug) {
++ WPRINTK("Corresponding UE "
++ "ring does not exist!\n");
++ print_dbug = 0; /*We only print this message once*/
++ }
++ return 0;
++ }
++
++ info = tapfds[blkif->dev_num];
++
++ if (blkif->dev_num > MAX_TAP_DEV || !info || !info->dev_inuse) {
++ if (print_dbug) {
++ WPRINTK("Can't get UE info!\n");
++ print_dbug = 0;
++ }
++ return 0;
++ }
++
++ while (rc != rp) {
++
++ if (RING_FULL(&info->ufe_ring)) {
++ WPRINTK("RING_FULL! More to do\n");
++ more_to_do = 1;
++ break;
++ }
++
++ if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc)) {
++ WPRINTK("RING_REQUEST_CONS_OVERFLOW!"
++ " More to do\n");
++ more_to_do = 1;
++ break;
++ }
++
++ pending_req = alloc_req();
++ if (NULL == pending_req) {
++ blkif->st_oo_req++;
++ more_to_do = 1;
++ break;
++ }
++
++ switch (blkif->blk_protocol) {
++ case BLKIF_PROTOCOL_NATIVE:
++ memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc),
++ sizeof(req));
++ break;
++ case BLKIF_PROTOCOL_X86_32:
++ blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc));
++ break;
++ case BLKIF_PROTOCOL_X86_64:
++ blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc));
++ break;
++ default:
++ BUG();
++ }
++ blk_rings->common.req_cons = ++rc; /* before make_response() */
++
++ switch (req.operation) {
++ case BLKIF_OP_READ:
++ blkif->st_rd_req++;
++ dispatch_rw_block_io(blkif, &req, pending_req);
++ break;
++
++ case BLKIF_OP_WRITE:
++ blkif->st_wr_req++;
++ dispatch_rw_block_io(blkif, &req, pending_req);
++ break;
++
++ default:
++ WPRINTK("unknown operation [%d]\n",
++ req.operation);
++ make_response(blkif, req.id, req.operation,
++ BLKIF_RSP_ERROR);
++ free_req(pending_req);
++ break;
++ }
++ }
++
++ blktap_kick_user(blkif->dev_num);
++
++ return more_to_do;
++}
++
++static void dispatch_rw_block_io(blkif_t *blkif,
++ blkif_request_t *req,
++ pending_req_t *pending_req)
++{
++ extern void ll_rw_block(int rw, int nr, struct buffer_head * bhs[]);
++ int op, operation = (req->operation == BLKIF_OP_WRITE) ? WRITE : READ;
++ struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST*2];
++ unsigned int nseg;
++ int ret, i, nr_sects = 0;
++ tap_blkif_t *info;
++ uint64_t sector;
++ blkif_request_t *target;
++ int pending_idx = RTN_PEND_IDX(pending_req,pending_req->mem_idx);
++ int usr_idx;
++ uint16_t mmap_idx = pending_req->mem_idx;
++
++ if (blkif->dev_num < 0 || blkif->dev_num > MAX_TAP_DEV)
++ goto fail_response;
++
++ info = tapfds[blkif->dev_num];
++ if (info == NULL)
++ goto fail_response;
++
++ /* Check we have space on user ring - should never fail. */
++ usr_idx = GET_NEXT_REQ(info->idx_map);
++ if (usr_idx == INVALID_REQ) {
++ BUG();
++ goto fail_response;
++ }
++
++ /* Check that number of segments is sane. */
++ nseg = req->nr_segments;
++ if ( unlikely(nseg == 0) ||
++ unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST) ) {
++ WPRINTK("Bad number of segments in request (%d)\n", nseg);
++ goto fail_response;
++ }
++
++ /* Make sure userspace is ready. */
++ if (!info->ring_ok) {
++ WPRINTK("blktap: ring not ready for requests!\n");
++ goto fail_response;
++ }
++
++ if (RING_FULL(&info->ufe_ring)) {
++ WPRINTK("blktap: fe_ring is full, can't add "
++ "IO Request will be dropped. %d %d\n",
++ RING_SIZE(&info->ufe_ring),
++ RING_SIZE(&blkif->blk_rings.common));
++ goto fail_response;
++ }
++
++ pending_req->blkif = blkif;
++ pending_req->id = req->id;
++ pending_req->operation = operation;
++ pending_req->status = BLKIF_RSP_OKAY;
++ pending_req->nr_pages = nseg;
++ op = 0;
++ for (i = 0; i < nseg; i++) {
++ unsigned long uvaddr;
++ unsigned long kvaddr;
++ uint64_t ptep;
++ uint32_t flags;
++
++ uvaddr = MMAP_VADDR(info->user_vstart, usr_idx, i);
++ kvaddr = idx_to_kaddr(mmap_idx, pending_idx, i);
++
++ sector = req->sector_number + ((PAGE_SIZE / 512) * i);
++ if( (blkif->sectors > 0) && (sector >= blkif->sectors) ) {
++ WPRINTK("BLKTAP: Sector request greater"
++ "than size\n");
++ WPRINTK("BLKTAP: %s request sector"
++ "[%llu,%llu], Total [%llu]\n",
++ (req->operation ==
++ BLKIF_OP_WRITE ? "WRITE" : "READ"),
++ (long long unsigned) sector,
++ (long long unsigned) sector>>9,
++ (long long unsigned) blkif->sectors);
++ }
++
++ flags = GNTMAP_host_map;
++ if (operation == WRITE)
++ flags |= GNTMAP_readonly;
++ gnttab_set_map_op(&map[op], kvaddr, flags,
++ req->seg[i].gref, blkif->domid);
++ op++;
++
++ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
++ /* Now map it to user. */
++ ret = create_lookup_pte_addr(info->vma->vm_mm,
++ uvaddr, &ptep);
++ if (ret) {
++ WPRINTK("Couldn't get a pte addr!\n");
++ goto fail_flush;
++ }
++
++ flags = GNTMAP_host_map | GNTMAP_application_map
++ | GNTMAP_contains_pte;
++ if (operation == WRITE)
++ flags |= GNTMAP_readonly;
++ gnttab_set_map_op(&map[op], ptep, flags,
++ req->seg[i].gref, blkif->domid);
++ op++;
++ }
++
++ nr_sects += (req->seg[i].last_sect -
++ req->seg[i].first_sect + 1);
++ }
++
++ ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, op);
++ BUG_ON(ret);
++
++ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
++ for (i = 0; i < (nseg*2); i+=2) {
++ unsigned long uvaddr;
++ unsigned long kvaddr;
++ unsigned long offset;
++ struct page *pg;
++
++ uvaddr = MMAP_VADDR(info->user_vstart, usr_idx, i/2);
++ kvaddr = idx_to_kaddr(mmap_idx, pending_idx, i/2);
++
++ if (unlikely(map[i].status != 0)) {
++ WPRINTK("invalid kernel buffer -- "
++ "could not remap it\n");
++ ret |= 1;
++ map[i].handle = INVALID_GRANT_HANDLE;
++ }
++
++ if (unlikely(map[i+1].status != 0)) {
++ WPRINTK("invalid user buffer -- "
++ "could not remap it\n");
++ ret |= 1;
++ map[i+1].handle = INVALID_GRANT_HANDLE;
++ }
++
++ pending_handle(mmap_idx, pending_idx, i/2).kernel
++ = map[i].handle;
++ pending_handle(mmap_idx, pending_idx, i/2).user
++ = map[i+1].handle;
++
++ if (ret)
++ continue;
++
++ set_phys_to_machine(__pa(kvaddr) >> PAGE_SHIFT,
++ FOREIGN_FRAME(map[i].dev_bus_addr
++ >> PAGE_SHIFT));
++ offset = (uvaddr - info->vma->vm_start) >> PAGE_SHIFT;
++ pg = pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
++ ((struct page **)info->vma->vm_private_data)[offset] =
++ pg;
++ }
++ } else {
++ for (i = 0; i < nseg; i++) {
++ unsigned long uvaddr;
++ unsigned long kvaddr;
++ unsigned long offset;
++ struct page *pg;
++
++ uvaddr = MMAP_VADDR(info->user_vstart, usr_idx, i);
++ kvaddr = idx_to_kaddr(mmap_idx, pending_idx, i);
++
++ if (unlikely(map[i].status != 0)) {
++ WPRINTK("invalid kernel buffer -- "
++ "could not remap it\n");
++ ret |= 1;
++ map[i].handle = INVALID_GRANT_HANDLE;
++ }
++
++ pending_handle(mmap_idx, pending_idx, i).kernel
++ = map[i].handle;
++
++ if (ret)
++ continue;
++
++ offset = (uvaddr - info->vma->vm_start) >> PAGE_SHIFT;
++ pg = pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
++ ((struct page **)info->vma->vm_private_data)[offset] =
++ pg;
++ }
++ }
++
++ if (ret)
++ goto fail_flush;
++
++ if (xen_feature(XENFEAT_auto_translated_physmap))
++ down_write(&info->vma->vm_mm->mmap_sem);
++ /* Mark mapped pages as reserved: */
++ for (i = 0; i < req->nr_segments; i++) {
++ unsigned long kvaddr;
++ struct page *pg;
++
++ kvaddr = idx_to_kaddr(mmap_idx, pending_idx, i);
++ pg = pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
++ SetPageReserved(pg);
++ if (xen_feature(XENFEAT_auto_translated_physmap)) {
++ ret = vm_insert_page(info->vma,
++ MMAP_VADDR(info->user_vstart,
++ usr_idx, i), pg);
++ if (ret) {
++ up_write(&info->vma->vm_mm->mmap_sem);
++ goto fail_flush;
++ }
++ }
++ }
++ if (xen_feature(XENFEAT_auto_translated_physmap))
++ up_write(&info->vma->vm_mm->mmap_sem);
++
++ /*record [mmap_idx,pending_idx] to [usr_idx] mapping*/
++ info->idx_map[usr_idx] = MAKE_ID(mmap_idx, pending_idx);
++
++ blkif_get(blkif);
++ /* Finally, write the request message to the user ring. */
++ target = RING_GET_REQUEST(&info->ufe_ring,
++ info->ufe_ring.req_prod_pvt);
++ memcpy(target, req, sizeof(*req));
++ target->id = usr_idx;
++ wmb(); /* blktap_poll() reads req_prod_pvt asynchronously */
++ info->ufe_ring.req_prod_pvt++;
++
++ if (operation == READ)
++ blkif->st_rd_sect += nr_sects;
++ else if (operation == WRITE)
++ blkif->st_wr_sect += nr_sects;
++
++ return;
++
++ fail_flush:
++ WPRINTK("Reached Fail_flush\n");
++ fast_flush_area(pending_req, pending_idx, usr_idx, blkif->dev_num);
++ fail_response:
++ make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
++ free_req(pending_req);
++}
++
++
++
++/******************************************************************
++ * MISCELLANEOUS SETUP / TEARDOWN / DEBUGGING
++ */
++
++
++static void make_response(blkif_t *blkif, u64 id,
++ unsigned short op, int st)
++{
++ blkif_response_t resp;
++ unsigned long flags;
++ blkif_back_rings_t *blk_rings = &blkif->blk_rings;
++ int more_to_do = 0;
++ int notify;
++
++ resp.id = id;
++ resp.operation = op;
++ resp.status = st;
++
++ spin_lock_irqsave(&blkif->blk_ring_lock, flags);
++ /* Place on the response ring for the relevant domain. */
++ switch (blkif->blk_protocol) {
++ case BLKIF_PROTOCOL_NATIVE:
++ memcpy(RING_GET_RESPONSE(&blk_rings->native,
++ blk_rings->native.rsp_prod_pvt),
++ &resp, sizeof(resp));
++ break;
++ case BLKIF_PROTOCOL_X86_32:
++ memcpy(RING_GET_RESPONSE(&blk_rings->x86_32,
++ blk_rings->x86_32.rsp_prod_pvt),
++ &resp, sizeof(resp));
++ break;
++ case BLKIF_PROTOCOL_X86_64:
++ memcpy(RING_GET_RESPONSE(&blk_rings->x86_64,
++ blk_rings->x86_64.rsp_prod_pvt),
++ &resp, sizeof(resp));
++ break;
++ default:
++ BUG();
++ }
++ blk_rings->common.rsp_prod_pvt++;
++ RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
++
++ if (blk_rings->common.rsp_prod_pvt == blk_rings->common.req_cons) {
++ /*
++ * Tail check for pending requests. Allows frontend to avoid
++ * notifications if requests are already in flight (lower
++ * overheads and promotes batching).
++ */
++ RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
++ } else if (RING_HAS_UNCONSUMED_REQUESTS(&blk_rings->common)) {
++ more_to_do = 1;
++ }
++
++ spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
++ if (more_to_do)
++ blkif_notify_work(blkif);
++ if (notify)
++ notify_remote_via_irq(blkif->irq);
++}
++
++static int __init blkif_init(void)
++{
++ int i, ret;
++ struct class *class;
++
++ if (!is_running_on_xen())
++ return -ENODEV;
++
++ INIT_LIST_HEAD(&pending_free);
++ for(i = 0; i < 2; i++) {
++ ret = req_increase();
++ if (ret)
++ break;
++ }
++ if (i == 0)
++ return ret;
++
++ tap_blkif_interface_init();
++
++ alloc_pending_reqs = 0;
++
++ tap_blkif_xenbus_init();
++
++ /* Dynamically allocate a major for this device */
++ ret = register_chrdev(0, "blktap", &blktap_fops);
++
++ if (ret < 0) {
++ WPRINTK("Couldn't register /dev/xen/blktap\n");
++ return -ENOMEM;
++ }
++
++ blktap_major = ret;
++
++ /* tapfds[0] is always NULL */
++ blktap_next_minor++;
++
++ DPRINTK("Created misc_dev [/dev/xen/blktap%d]\n",i);
++
++ /* Make sure the xen class exists */
++ if ((class = get_xen_class()) != NULL) {
++ /*
++ * This will allow udev to create the blktap ctrl device.
++ * We only want to create blktap0 first. We don't want
++ * to flood the sysfs system with needless blktap devices.
++ * We only create the device when a request of a new device is
++ * made.
++ */
++ class_device_create(class, NULL,
++ MKDEV(blktap_major, 0), NULL,
++ "blktap0");
++ } else {
++ /* this is bad, but not fatal */
++ WPRINTK("blktap: sysfs xen_class not created\n");
++ }
++
++ DPRINTK("Blktap device successfully created\n");
++
++ return 0;
++}
++
++module_init(blkif_init);
++
++MODULE_LICENSE("Dual BSD/GPL");
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/blktap/common.h 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,121 @@
++/*
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#ifndef __BLKIF__BACKEND__COMMON_H__
++#define __BLKIF__BACKEND__COMMON_H__
++
++#include <linux/version.h>
++#include <linux/module.h>
++#include <linux/interrupt.h>
++#include <linux/slab.h>
++#include <linux/blkdev.h>
++#include <linux/vmalloc.h>
++#include <asm/io.h>
++#include <asm/setup.h>
++#include <asm/pgalloc.h>
++#include <xen/evtchn.h>
++#include <asm/hypervisor.h>
++#include <xen/blkif.h>
++#include <xen/gnttab.h>
++#include <xen/driver_util.h>
++
++#define DPRINTK(_f, _a...) pr_debug("(file=%s, line=%d) " _f, \
++ __FILE__ , __LINE__ , ## _a )
++
++#define WPRINTK(fmt, args...) printk(KERN_WARNING "blk_tap: " fmt, ##args)
++
++struct backend_info;
++
++typedef struct blkif_st {
++ /* Unique identifier for this interface. */
++ domid_t domid;
++ unsigned int handle;
++ /* Physical parameters of the comms window. */
++ unsigned int irq;
++ /* Comms information. */
++ enum blkif_protocol blk_protocol;
++ blkif_back_rings_t blk_rings;
++ struct vm_struct *blk_ring_area;
++ /* Back pointer to the backend_info. */
++ struct backend_info *be;
++ /* Private fields. */
++ spinlock_t blk_ring_lock;
++ atomic_t refcnt;
++
++ wait_queue_head_t wq;
++ struct task_struct *xenblkd;
++ unsigned int waiting_reqs;
++ request_queue_t *plug;
++
++ /* statistics */
++ unsigned long st_print;
++ int st_rd_req;
++ int st_wr_req;
++ int st_oo_req;
++ int st_rd_sect;
++ int st_wr_sect;
++
++ wait_queue_head_t waiting_to_free;
++
++ grant_handle_t shmem_handle;
++ grant_ref_t shmem_ref;
++
++ int dev_num;
++ uint64_t sectors;
++} blkif_t;
++
++blkif_t *tap_alloc_blkif(domid_t domid);
++void tap_blkif_free(blkif_t *blkif);
++int tap_blkif_map(blkif_t *blkif, unsigned long shared_page,
++ unsigned int evtchn);
++void tap_blkif_unmap(blkif_t *blkif);
++
++#define blkif_get(_b) (atomic_inc(&(_b)->refcnt))
++#define blkif_put(_b) \
++ do { \
++ if (atomic_dec_and_test(&(_b)->refcnt)) \
++ wake_up(&(_b)->waiting_to_free);\
++ } while (0)
++
++
++struct phys_req {
++ unsigned short dev;
++ unsigned short nr_sects;
++ struct block_device *bdev;
++ blkif_sector_t sector_number;
++};
++
++void tap_blkif_interface_init(void);
++
++void tap_blkif_xenbus_init(void);
++
++irqreturn_t tap_blkif_be_int(int irq, void *dev_id, struct pt_regs *regs);
++int tap_blkif_schedule(void *arg);
++
++int dom_to_devid(domid_t domid, int xenbus_id, blkif_t *blkif);
++void signal_tapdisk(int idx);
++
++#endif /* __BLKIF__BACKEND__COMMON_H__ */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/blktap/interface.c 2007-08-27 14:02:01.000000000 -0400
+@@ -0,0 +1,174 @@
++/******************************************************************************
++ * drivers/xen/blktap/interface.c
++ *
++ * Block-device interface management.
++ *
++ * Copyright (c) 2004, Keir Fraser
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++
++ */
++
++#include "common.h"
++#include <xen/evtchn.h>
++
++static kmem_cache_t *blkif_cachep;
++
++blkif_t *tap_alloc_blkif(domid_t domid)
++{
++ blkif_t *blkif;
++
++ blkif = kmem_cache_alloc(blkif_cachep, GFP_KERNEL);
++ if (!blkif)
++ return ERR_PTR(-ENOMEM);
++
++ memset(blkif, 0, sizeof(*blkif));
++ blkif->domid = domid;
++ spin_lock_init(&blkif->blk_ring_lock);
++ atomic_set(&blkif->refcnt, 1);
++ init_waitqueue_head(&blkif->wq);
++ blkif->st_print = jiffies;
++ init_waitqueue_head(&blkif->waiting_to_free);
++
++ return blkif;
++}
++
++static int map_frontend_page(blkif_t *blkif, unsigned long shared_page)
++{
++ struct gnttab_map_grant_ref op;
++
++ gnttab_set_map_op(&op, (unsigned long)blkif->blk_ring_area->addr,
++ GNTMAP_host_map, shared_page, blkif->domid);
++
++ if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
++ BUG();
++
++ if (op.status) {
++ DPRINTK(" Grant table operation failure !\n");
++ return op.status;
++ }
++
++ blkif->shmem_ref = shared_page;
++ blkif->shmem_handle = op.handle;
++
++ return 0;
++}
++
++static void unmap_frontend_page(blkif_t *blkif)
++{
++ struct gnttab_unmap_grant_ref op;
++
++ gnttab_set_unmap_op(&op, (unsigned long)blkif->blk_ring_area->addr,
++ GNTMAP_host_map, blkif->shmem_handle);
++
++ if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
++ BUG();
++}
++
++int tap_blkif_map(blkif_t *blkif, unsigned long shared_page,
++ unsigned int evtchn)
++{
++ int err;
++
++ /* Already connected through? */
++ if (blkif->irq)
++ return 0;
++
++ if ( (blkif->blk_ring_area = alloc_vm_area(PAGE_SIZE)) == NULL )
++ return -ENOMEM;
++
++ err = map_frontend_page(blkif, shared_page);
++ if (err) {
++ free_vm_area(blkif->blk_ring_area);
++ return err;
++ }
++
++ switch (blkif->blk_protocol) {
++ case BLKIF_PROTOCOL_NATIVE:
++ {
++ blkif_sring_t *sring;
++ sring = (blkif_sring_t *)blkif->blk_ring_area->addr;
++ BACK_RING_INIT(&blkif->blk_rings.native, sring, PAGE_SIZE);
++ break;
++ }
++ case BLKIF_PROTOCOL_X86_32:
++ {
++ blkif_x86_32_sring_t *sring_x86_32;
++ sring_x86_32 = (blkif_x86_32_sring_t *)blkif->blk_ring_area->addr;
++ BACK_RING_INIT(&blkif->blk_rings.x86_32, sring_x86_32, PAGE_SIZE);
++ break;
++ }
++ case BLKIF_PROTOCOL_X86_64:
++ {
++ blkif_x86_64_sring_t *sring_x86_64;
++ sring_x86_64 = (blkif_x86_64_sring_t *)blkif->blk_ring_area->addr;
++ BACK_RING_INIT(&blkif->blk_rings.x86_64, sring_x86_64, PAGE_SIZE);
++ break;
++ }
++ default:
++ BUG();
++ }
++
++ err = bind_interdomain_evtchn_to_irqhandler(
++ blkif->domid, evtchn, tap_blkif_be_int,
++ 0, "blkif-backend", blkif);
++ if (err < 0) {
++ unmap_frontend_page(blkif);
++ free_vm_area(blkif->blk_ring_area);
++ blkif->blk_rings.common.sring = NULL;
++ return err;
++ }
++ blkif->irq = err;
++
++ return 0;
++}
++
++void tap_blkif_unmap(blkif_t *blkif)
++{
++ if (blkif->irq) {
++ unbind_from_irqhandler(blkif->irq, blkif);
++ blkif->irq = 0;
++ }
++ if (blkif->blk_rings.common.sring) {
++ unmap_frontend_page(blkif);
++ free_vm_area(blkif->blk_ring_area);
++ blkif->blk_rings.common.sring = NULL;
++ }
++}
++
++void tap_blkif_free(blkif_t *blkif)
++{
++ atomic_dec(&blkif->refcnt);
++ wait_event(blkif->waiting_to_free, atomic_read(&blkif->refcnt) == 0);
++
++ tap_blkif_unmap(blkif);
++ kmem_cache_free(blkif_cachep, blkif);
++}
++
++void __init tap_blkif_interface_init(void)
++{
++ blkif_cachep = kmem_cache_create("blktapif_cache", sizeof(blkif_t),
++ 0, 0, NULL, NULL);
++}
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/blktap/xenbus.c 2007-08-27 14:02:09.000000000 -0400
+@@ -0,0 +1,473 @@
++/* drivers/xen/blktap/xenbus.c
++ *
++ * Xenbus code for blktap
++ *
++ * Copyright (c) 2004-2005, Andrew Warfield and Julian Chesterfield
++ *
++ * Based on the blkback xenbus code:
++ *
++ * Copyright (C) 2005 Rusty Russell <rusty@rustcorp.com.au>
++ * Copyright (C) 2005 XenSource Ltd
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <stdarg.h>
++#include <linux/module.h>
++#include <linux/kthread.h>
++#include <xen/xenbus.h>
++#include "common.h"
++
++
++struct backend_info
++{
++ struct xenbus_device *dev;
++ blkif_t *blkif;
++ struct xenbus_watch backend_watch;
++ int xenbus_id;
++ int group_added;
++};
++
++
++static void connect(struct backend_info *);
++static int connect_ring(struct backend_info *);
++static int blktap_remove(struct xenbus_device *dev);
++static int blktap_probe(struct xenbus_device *dev,
++ const struct xenbus_device_id *id);
++static void tap_backend_changed(struct xenbus_watch *, const char **,
++ unsigned int);
++static void tap_frontend_changed(struct xenbus_device *dev,
++ enum xenbus_state frontend_state);
++
++static int strsep_len(const char *str, char c, unsigned int len)
++{
++ unsigned int i;
++
++ for (i = 0; str[i]; i++)
++ if (str[i] == c) {
++ if (len == 0)
++ return i;
++ len--;
++ }
++ return (len == 0) ? i : -ERANGE;
++}
++
++static long get_id(const char *str)
++{
++ int len,end;
++ const char *ptr;
++ char *tptr, num[10];
++
++ len = strsep_len(str, '/', 2);
++ end = strlen(str);
++ if ( (len < 0) || (end < 0) ) return -1;
++
++ ptr = str + len + 1;
++ strncpy(num,ptr,end - len);
++ tptr = num + (end - (len + 1));
++ *tptr = '\0';
++ DPRINTK("Get_id called for %s (%s)\n",str,num);
++
++ return simple_strtol(num, NULL, 10);
++}
++
++static int blktap_name(blkif_t *blkif, char *buf)
++{
++ char *devpath, *devname;
++ struct xenbus_device *dev = blkif->be->dev;
++
++ devpath = xenbus_read(XBT_NIL, dev->nodename, "dev", NULL);
++ if (IS_ERR(devpath))
++ return PTR_ERR(devpath);
++
++ if ((devname = strstr(devpath, "/dev/")) != NULL)
++ devname += strlen("/dev/");
++ else
++ devname = devpath;
++
++ snprintf(buf, TASK_COMM_LEN, "blktap.%d.%s", blkif->domid, devname);
++ kfree(devpath);
++
++ return 0;
++}
++
++/****************************************************************
++ * sysfs interface for VBD I/O requests
++ */
++
++#define VBD_SHOW(name, format, args...) \
++ static ssize_t show_##name(struct device *_dev, \
++ struct device_attribute *attr, \
++ char *buf) \
++ { \
++ struct xenbus_device *dev = to_xenbus_device(_dev); \
++ struct backend_info *be = dev->dev.driver_data; \
++ \
++ return sprintf(buf, format, ##args); \
++ } \
++ DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
++
++VBD_SHOW(tap_oo_req, "%d\n", be->blkif->st_oo_req);
++VBD_SHOW(tap_rd_req, "%d\n", be->blkif->st_rd_req);
++VBD_SHOW(tap_wr_req, "%d\n", be->blkif->st_wr_req);
++VBD_SHOW(tap_rd_sect, "%d\n", be->blkif->st_rd_sect);
++VBD_SHOW(tap_wr_sect, "%d\n", be->blkif->st_wr_sect);
++
++static struct attribute *tapstat_attrs[] = {
++ &dev_attr_tap_oo_req.attr,
++ &dev_attr_tap_rd_req.attr,
++ &dev_attr_tap_wr_req.attr,
++ &dev_attr_tap_rd_sect.attr,
++ &dev_attr_tap_wr_sect.attr,
++ NULL
++};
++
++static struct attribute_group tapstat_group = {
++ .name = "statistics",
++ .attrs = tapstat_attrs,
++};
++
++int xentap_sysfs_addif(struct xenbus_device *dev)
++{
++ int err;
++ struct backend_info *be = dev->dev.driver_data;
++ err = sysfs_create_group(&dev->dev.kobj, &tapstat_group);
++ if (!err)
++ be->group_added = 1;
++ return err;
++}
++
++void xentap_sysfs_delif(struct xenbus_device *dev)
++{
++ sysfs_remove_group(&dev->dev.kobj, &tapstat_group);
++}
++
++static int blktap_remove(struct xenbus_device *dev)
++{
++ struct backend_info *be = dev->dev.driver_data;
++
++ if (be->backend_watch.node) {
++ unregister_xenbus_watch(&be->backend_watch);
++ kfree(be->backend_watch.node);
++ be->backend_watch.node = NULL;
++ }
++ if (be->blkif) {
++ if (be->blkif->xenblkd)
++ kthread_stop(be->blkif->xenblkd);
++ signal_tapdisk(be->blkif->dev_num);
++ tap_blkif_free(be->blkif);
++ be->blkif = NULL;
++ }
++ if (be->group_added)
++ xentap_sysfs_delif(be->dev);
++ kfree(be);
++ dev->dev.driver_data = NULL;
++ return 0;
++}
++
++static void tap_update_blkif_status(blkif_t *blkif)
++{
++ int err;
++ char name[TASK_COMM_LEN];
++
++ /* Not ready to connect? */
++ if(!blkif->irq || !blkif->sectors) {
++ return;
++ }
++
++ /* Already connected? */
++ if (blkif->be->dev->state == XenbusStateConnected)
++ return;
++
++ /* Attempt to connect: exit if we fail to. */
++ connect(blkif->be);
++ if (blkif->be->dev->state != XenbusStateConnected)
++ return;
++
++ err = blktap_name(blkif, name);
++ if (err) {
++ xenbus_dev_error(blkif->be->dev, err, "get blktap dev name");
++ return;
++ }
++
++ err = xentap_sysfs_addif(blkif->be->dev);
++ if (err) {
++ xenbus_dev_fatal(blkif->be->dev, err,
++ "creating sysfs entries");
++ return;
++ }
++
++ blkif->xenblkd = kthread_run(tap_blkif_schedule, blkif, name);
++ if (IS_ERR(blkif->xenblkd)) {
++ err = PTR_ERR(blkif->xenblkd);
++ blkif->xenblkd = NULL;
++ xenbus_dev_fatal(blkif->be->dev, err, "start xenblkd");
++ WPRINTK("Error starting thread\n");
++ }
++}
++
++/**
++ * Entry point to this code when a new device is created. Allocate
++ * the basic structures, and watch the store waiting for the
++ * user-space program to tell us the physical device info. Switch to
++ * InitWait.
++ */
++static int blktap_probe(struct xenbus_device *dev,
++ const struct xenbus_device_id *id)
++{
++ int err;
++ struct backend_info *be = kzalloc(sizeof(struct backend_info),
++ GFP_KERNEL);
++ if (!be) {
++ xenbus_dev_fatal(dev, -ENOMEM,
++ "allocating backend structure");
++ return -ENOMEM;
++ }
++
++ be->dev = dev;
++ dev->dev.driver_data = be;
++ be->xenbus_id = get_id(dev->nodename);
++
++ be->blkif = tap_alloc_blkif(dev->otherend_id);
++ if (IS_ERR(be->blkif)) {
++ err = PTR_ERR(be->blkif);
++ be->blkif = NULL;
++ xenbus_dev_fatal(dev, err, "creating block interface");
++ goto fail;
++ }
++
++ /* setup back pointer */
++ be->blkif->be = be;
++ be->blkif->sectors = 0;
++
++ /* set a watch on disk info, waiting for userspace to update details*/
++ err = xenbus_watch_path2(dev, dev->nodename, "info",
++ &be->backend_watch, tap_backend_changed);
++ if (err)
++ goto fail;
++
++ err = xenbus_switch_state(dev, XenbusStateInitWait);
++ if (err)
++ goto fail;
++ return 0;
++
++fail:
++ DPRINTK("blktap probe failed\n");
++ blktap_remove(dev);
++ return err;
++}
++
++
++/**
++ * Callback received when the user space code has placed the device
++ * information in xenstore.
++ */
++static void tap_backend_changed(struct xenbus_watch *watch,
++ const char **vec, unsigned int len)
++{
++ int err;
++ unsigned long info;
++ struct backend_info *be
++ = container_of(watch, struct backend_info, backend_watch);
++ struct xenbus_device *dev = be->dev;
++
++ /**
++ * Check to see whether userspace code has opened the image
++ * and written sector
++ * and disk info to xenstore
++ */
++ err = xenbus_gather(XBT_NIL, dev->nodename, "info", "%lu", &info,
++ NULL);
++ if (XENBUS_EXIST_ERR(err))
++ return;
++ if (err) {
++ xenbus_dev_error(dev, err, "getting info");
++ return;
++ }
++
++ DPRINTK("Userspace update on disk info, %lu\n",info);
++
++ err = xenbus_gather(XBT_NIL, dev->nodename, "sectors", "%llu",
++ &be->blkif->sectors, NULL);
++
++ /* Associate tap dev with domid*/
++ be->blkif->dev_num = dom_to_devid(be->blkif->domid, be->xenbus_id,
++ be->blkif);
++ DPRINTK("Thread started for domid [%d], connecting disk\n",
++ be->blkif->dev_num);
++
++ tap_update_blkif_status(be->blkif);
++}
++
++/**
++ * Callback received when the frontend's state changes.
++ */
++static void tap_frontend_changed(struct xenbus_device *dev,
++ enum xenbus_state frontend_state)
++{
++ struct backend_info *be = dev->dev.driver_data;
++ int err;
++
++ DPRINTK("\n");
++
++ switch (frontend_state) {
++ case XenbusStateInitialising:
++ if (dev->state == XenbusStateClosed) {
++ printk(KERN_INFO "%s: %s: prepare for reconnect\n",
++ __FUNCTION__, dev->nodename);
++ xenbus_switch_state(dev, XenbusStateInitWait);
++ }
++ break;
++
++ case XenbusStateInitialised:
++ case XenbusStateConnected:
++ /* Ensure we connect even when two watches fire in
++ close successsion and we miss the intermediate value
++ of frontend_state. */
++ if (dev->state == XenbusStateConnected)
++ break;
++
++ err = connect_ring(be);
++ if (err)
++ break;
++ tap_update_blkif_status(be->blkif);
++ break;
++
++ case XenbusStateClosing:
++ if (be->blkif->xenblkd) {
++ kthread_stop(be->blkif->xenblkd);
++ be->blkif->xenblkd = NULL;
++ }
++ xenbus_switch_state(dev, XenbusStateClosing);
++ break;
++
++ case XenbusStateClosed:
++ xenbus_switch_state(dev, XenbusStateClosed);
++ if (xenbus_dev_is_online(dev))
++ break;
++ /* fall through if not online */
++ case XenbusStateUnknown:
++ device_unregister(&dev->dev);
++ break;
++
++ default:
++ xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
++ frontend_state);
++ break;
++ }
++}
++
++
++/**
++ * Switch to Connected state.
++ */
++static void connect(struct backend_info *be)
++{
++ int err;
++
++ struct xenbus_device *dev = be->dev;
++
++ err = xenbus_switch_state(dev, XenbusStateConnected);
++ if (err)
++ xenbus_dev_fatal(dev, err, "switching to Connected state",
++ dev->nodename);
++
++ return;
++}
++
++
++static int connect_ring(struct backend_info *be)
++{
++ struct xenbus_device *dev = be->dev;
++ unsigned long ring_ref;
++ unsigned int evtchn;
++ char protocol[64];
++ int err;
++
++ DPRINTK("%s\n", dev->otherend);
++
++ err = xenbus_gather(XBT_NIL, dev->otherend, "ring-ref", "%lu",
++ &ring_ref, "event-channel", "%u", &evtchn, NULL);
++ if (err) {
++ xenbus_dev_fatal(dev, err,
++ "reading %s/ring-ref and event-channel",
++ dev->otherend);
++ return err;
++ }
++
++ be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE;
++ err = xenbus_gather(XBT_NIL, dev->otherend, "protocol",
++ "%63s", protocol, NULL);
++ if (err)
++ strcpy(protocol, "unspecified, assuming native");
++ else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_NATIVE))
++ be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE;
++ else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_32))
++ be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_32;
++ else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_64))
++ be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_64;
++ else {
++ xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol);
++ return -1;
++ }
++ printk(KERN_INFO
++ "blktap: ring-ref %ld, event-channel %d, protocol %d (%s)\n",
++ ring_ref, evtchn, be->blkif->blk_protocol, protocol);
++
++ /* Map the shared frame, irq etc. */
++ err = tap_blkif_map(be->blkif, ring_ref, evtchn);
++ if (err) {
++ xenbus_dev_fatal(dev, err, "mapping ring-ref %lu port %u",
++ ring_ref, evtchn);
++ return err;
++ }
++
++ return 0;
++}
++
++
++/* ** Driver Registration ** */
++
++
++static struct xenbus_device_id blktap_ids[] = {
++ { "tap" },
++ { "" }
++};
++
++
++static struct xenbus_driver blktap = {
++ .name = "tap",
++ .owner = THIS_MODULE,
++ .ids = blktap_ids,
++ .probe = blktap_probe,
++ .remove = blktap_remove,
++ .otherend_changed = tap_frontend_changed
++};
++
++
++void tap_blkif_xenbus_init(void)
++{
++ xenbus_register_backend(&blktap);
++}
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/char/Makefile 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,2 @@
++
++obj-y := mem.o
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/char/mem.c 2007-08-27 14:02:01.000000000 -0400
+@@ -0,0 +1,203 @@
++/*
++ * Originally from linux/drivers/char/mem.c
++ *
++ * Copyright (C) 1991, 1992 Linus Torvalds
++ *
++ * Added devfs support.
++ * Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
++ * Shared /dev/zero mmaping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
++ */
++
++#include <linux/mm.h>
++#include <linux/miscdevice.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/mman.h>
++#include <linux/random.h>
++#include <linux/init.h>
++#include <linux/raw.h>
++#include <linux/tty.h>
++#include <linux/capability.h>
++#include <linux/smp_lock.h>
++#include <linux/ptrace.h>
++#include <linux/device.h>
++#include <asm/pgalloc.h>
++#include <asm/uaccess.h>
++#include <asm/io.h>
++#include <asm/hypervisor.h>
++
++#ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
++static inline int valid_phys_addr_range(unsigned long addr, size_t count)
++{
++ return 1;
++}
++#endif
++
++/*
++ * This funcion reads the *physical* memory. The f_pos points directly to the
++ * memory location.
++ */
++static ssize_t read_mem(struct file * file, char __user * buf,
++ size_t count, loff_t *ppos)
++{
++ unsigned long p = *ppos, ignored;
++ ssize_t read = 0, sz;
++ void __iomem *v;
++
++ if (!valid_phys_addr_range(p, count))
++ return -EFAULT;
++
++ while (count > 0) {
++ /*
++ * Handle first page in case it's not aligned
++ */
++ if (-p & (PAGE_SIZE - 1))
++ sz = -p & (PAGE_SIZE - 1);
++ else
++ sz = PAGE_SIZE;
++
++ sz = min_t(unsigned long, sz, count);
++
++ v = xlate_dev_mem_ptr(p, sz);
++ if (IS_ERR(v) || v == NULL) {
++ /*
++ * Some programs (e.g., dmidecode) groove off into
++ * weird RAM areas where no tables can possibly exist
++ * (because Xen will have stomped on them!). These
++ * programs get rather upset if we let them know that
++ * Xen failed their access, so we fake out a read of
++ * all zeroes.
++ */
++ if (clear_user(buf, count))
++ return -EFAULT;
++ read += count;
++ break;
++ }
++
++ ignored = copy_to_user(buf, v, sz);
++ xlate_dev_mem_ptr_unmap(v);
++ if (ignored)
++ return -EFAULT;
++ buf += sz;
++ p += sz;
++ count -= sz;
++ read += sz;
++ }
++
++ *ppos += read;
++ return read;
++}
++
++static ssize_t write_mem(struct file * file, const char __user * buf,
++ size_t count, loff_t *ppos)
++{
++ unsigned long p = *ppos, ignored;
++ ssize_t written = 0, sz;
++ void __iomem *v;
++
++ if (!valid_phys_addr_range(p, count))
++ return -EFAULT;
++
++ while (count > 0) {
++ /*
++ * Handle first page in case it's not aligned
++ */
++ if (-p & (PAGE_SIZE - 1))
++ sz = -p & (PAGE_SIZE - 1);
++ else
++ sz = PAGE_SIZE;
++
++ sz = min_t(unsigned long, sz, count);
++
++ v = xlate_dev_mem_ptr(p, sz);
++ if (v == NULL)
++ break;
++ if (IS_ERR(v)) {
++ if (written == 0)
++ return PTR_ERR(v);
++ break;
++ }
++
++ ignored = copy_from_user(v, buf, sz);
++ xlate_dev_mem_ptr_unmap(v);
++ if (ignored) {
++ written += sz - ignored;
++ if (written)
++ break;
++ return -EFAULT;
++ }
++ buf += sz;
++ p += sz;
++ count -= sz;
++ written += sz;
++ }
++
++ *ppos += written;
++ return written;
++}
++
++#ifndef ARCH_HAS_DEV_MEM_MMAP_MEM
++static inline int uncached_access(struct file *file)
++{
++ if (file->f_flags & O_SYNC)
++ return 1;
++ /* Xen sets correct MTRR type on non-RAM for us. */
++ return 0;
++}
++
++static int xen_mmap_mem(struct file * file, struct vm_area_struct * vma)
++{
++ size_t size = vma->vm_end - vma->vm_start;
++
++ if (uncached_access(file))
++ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
++
++ /* We want to return the real error code, not EAGAIN. */
++ return direct_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
++ size, vma->vm_page_prot, DOMID_IO);
++}
++#endif
++
++/*
++ * The memory devices use the full 32/64 bits of the offset, and so we cannot
++ * check against negative addresses: they are ok. The return value is weird,
++ * though, in that case (0).
++ *
++ * also note that seeking relative to the "end of file" isn't supported:
++ * it has no meaning, so it returns -EINVAL.
++ */
++static loff_t memory_lseek(struct file * file, loff_t offset, int orig)
++{
++ loff_t ret;
++
++ mutex_lock(&file->f_dentry->d_inode->i_mutex);
++ switch (orig) {
++ case 0:
++ file->f_pos = offset;
++ ret = file->f_pos;
++ force_successful_syscall_return();
++ break;
++ case 1:
++ file->f_pos += offset;
++ ret = file->f_pos;
++ force_successful_syscall_return();
++ break;
++ default:
++ ret = -EINVAL;
++ }
++ mutex_unlock(&file->f_dentry->d_inode->i_mutex);
++ return ret;
++}
++
++static int open_mem(struct inode * inode, struct file * filp)
++{
++ return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
++}
++
++const struct file_operations mem_fops = {
++ .llseek = memory_lseek,
++ .read = read_mem,
++ .write = write_mem,
++ .mmap = xen_mmap_mem,
++ .open = open_mem,
++};
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/console/Makefile 2007-08-27 14:01:53.000000000 -0400
+@@ -0,0 +1,2 @@
++
++obj-y := console.o xencons_ring.o
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/console/console.c 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,721 @@
++/******************************************************************************
++ * console.c
++ *
++ * Virtual console driver.
++ *
++ * Copyright (c) 2002-2004, K A Fraser.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <linux/version.h>
++#include <linux/module.h>
++#include <linux/errno.h>
++#include <linux/signal.h>
++#include <linux/sched.h>
++#include <linux/interrupt.h>
++#include <linux/tty.h>
++#include <linux/tty_flip.h>
++#include <linux/serial.h>
++#include <linux/major.h>
++#include <linux/ptrace.h>
++#include <linux/ioport.h>
++#include <linux/mm.h>
++#include <linux/slab.h>
++#include <linux/init.h>
++#include <linux/console.h>
++#include <linux/bootmem.h>
++#include <linux/sysrq.h>
++#include <linux/screen_info.h>
++#include <linux/vt.h>
++#include <asm/io.h>
++#include <asm/irq.h>
++#include <asm/uaccess.h>
++#include <xen/interface/xen.h>
++#include <xen/interface/event_channel.h>
++#include <asm/hypervisor.h>
++#include <xen/evtchn.h>
++#include <xen/xenbus.h>
++#include <xen/xencons.h>
++
++/*
++ * Modes:
++ * 'xencons=off' [XC_OFF]: Console is disabled.
++ * 'xencons=tty' [XC_TTY]: Console attached to '/dev/tty[0-9]+'.
++ * 'xencons=ttyS' [XC_SERIAL]: Console attached to '/dev/ttyS[0-9]+'.
++ * 'xencons=xvc' [XC_XVC]: Console attached to '/dev/xvc0'.
++ * default: DOM0 -> XC_SERIAL ; all others -> XC_TTY.
++ *
++ * NB. In mode XC_TTY, we create dummy consoles for tty2-63. This suppresses
++ * warnings from standard distro startup scripts.
++ */
++static enum {
++ XC_OFF, XC_TTY, XC_SERIAL, XC_XVC
++} xc_mode;
++static int xc_num = -1;
++
++/* /dev/xvc0 device number allocated by lanana.org. */
++#define XEN_XVC_MAJOR 204
++#define XEN_XVC_MINOR 191
++
++#ifdef CONFIG_MAGIC_SYSRQ
++static unsigned long sysrq_requested;
++extern int sysrq_enabled;
++#endif
++
++void xencons_early_setup(void)
++{
++ extern int console_use_vt;
++
++ if (is_initial_xendomain()) {
++ xc_mode = XC_SERIAL;
++ } else {
++ xc_mode = XC_TTY;
++ console_use_vt = 0;
++ }
++}
++
++static int __init xencons_setup(char *str)
++{
++ char *q;
++ int n;
++ extern int console_use_vt;
++
++ console_use_vt = 1;
++ if (!strncmp(str, "ttyS", 4)) {
++ xc_mode = XC_SERIAL;
++ str += 4;
++ } else if (!strncmp(str, "tty", 3)) {
++ xc_mode = XC_TTY;
++ str += 3;
++ console_use_vt = 0;
++ } else if (!strncmp(str, "xvc", 3)) {
++ xc_mode = XC_XVC;
++ str += 3;
++ } else if (!strncmp(str, "off", 3)) {
++ xc_mode = XC_OFF;
++ str += 3;
++ }
++
++ n = simple_strtol(str, &q, 10);
++ if (q != str)
++ xc_num = n;
++
++ return 1;
++}
++__setup("xencons=", xencons_setup);
++
++/* The kernel and user-land drivers share a common transmit buffer. */
++static unsigned int wbuf_size = 4096;
++#define WBUF_MASK(_i) ((_i)&(wbuf_size-1))
++static char *wbuf;
++static unsigned int wc, wp; /* write_cons, write_prod */
++
++static int __init xencons_bufsz_setup(char *str)
++{
++ unsigned int goal;
++ goal = simple_strtoul(str, NULL, 0);
++ if (goal) {
++ goal = roundup_pow_of_two(goal);
++ if (wbuf_size < goal)
++ wbuf_size = goal;
++ }
++ return 1;
++}
++__setup("xencons_bufsz=", xencons_bufsz_setup);
++
++/* This lock protects accesses to the common transmit buffer. */
++static DEFINE_SPINLOCK(xencons_lock);
++
++/* Common transmit-kick routine. */
++static void __xencons_tx_flush(void);
++
++static struct tty_driver *xencons_driver;
++
++/******************** Kernel console driver ********************************/
++
++static void kcons_write(struct console *c, const char *s, unsigned int count)
++{
++ int i = 0;
++ unsigned long flags;
++
++ spin_lock_irqsave(&xencons_lock, flags);
++
++ while (i < count) {
++ for (; i < count; i++) {
++ if ((wp - wc) >= (wbuf_size - 1))
++ break;
++ if ((wbuf[WBUF_MASK(wp++)] = s[i]) == '\n')
++ wbuf[WBUF_MASK(wp++)] = '\r';
++ }
++
++ __xencons_tx_flush();
++ }
++
++ spin_unlock_irqrestore(&xencons_lock, flags);
++}
++
++static void kcons_write_dom0(struct console *c, const char *s, unsigned int count)
++{
++
++ while (count > 0) {
++ int rc;
++ rc = HYPERVISOR_console_io( CONSOLEIO_write, count, (char *)s);
++ if (rc <= 0)
++ break;
++ count -= rc;
++ s += rc;
++ }
++}
++
++static struct tty_driver *kcons_device(struct console *c, int *index)
++{
++ *index = 0;
++ return xencons_driver;
++}
++
++static struct console kcons_info = {
++ .device = kcons_device,
++ .flags = CON_PRINTBUFFER | CON_ENABLED,
++ .index = -1,
++};
++
++static int __init xen_console_init(void)
++{
++ if (!is_running_on_xen())
++ goto out;
++
++ if (is_initial_xendomain()) {
++ kcons_info.write = kcons_write_dom0;
++ } else {
++ if (!xen_start_info->console.domU.evtchn)
++ goto out;
++ kcons_info.write = kcons_write;
++ }
++
++ switch (xc_mode) {
++ case XC_XVC:
++ strcpy(kcons_info.name, "xvc");
++ if (xc_num == -1)
++ xc_num = 0;
++ break;
++
++ case XC_SERIAL:
++ strcpy(kcons_info.name, "ttyS");
++ if (xc_num == -1)
++ xc_num = 0;
++ break;
++
++ case XC_TTY:
++ strcpy(kcons_info.name, "tty");
++ if (xc_num == -1)
++ xc_num = 1;
++ break;
++
++ default:
++ goto out;
++ }
++
++ wbuf = alloc_bootmem(wbuf_size);
++
++ register_console(&kcons_info);
++
++ out:
++ return 0;
++}
++console_initcall(xen_console_init);
++
++/*** Useful function for console debugging -- goes straight to Xen. ***/
++asmlinkage int xprintk(const char *fmt, ...)
++{
++ va_list args;
++ int printk_len;
++ static char printk_buf[1024];
++
++ /* Emit the output into the temporary buffer */
++ va_start(args, fmt);
++ printk_len = vsnprintf(printk_buf, sizeof(printk_buf), fmt, args);
++ va_end(args);
++
++ /* Send the processed output directly to Xen. */
++ kcons_write_dom0(NULL, printk_buf, printk_len);
++
++ return 0;
++}
++
++/*** Forcibly flush console data before dying. ***/
++void xencons_force_flush(void)
++{
++ int sz;
++
++ /* Emergency console is synchronous, so there's nothing to flush. */
++ if (!is_running_on_xen() ||
++ is_initial_xendomain() ||
++ !xen_start_info->console.domU.evtchn)
++ return;
++
++ /* Spin until console data is flushed through to the daemon. */
++ while (wc != wp) {
++ int sent = 0;
++ if ((sz = wp - wc) == 0)
++ continue;
++ sent = xencons_ring_send(&wbuf[WBUF_MASK(wc)], sz);
++ if (sent > 0)
++ wc += sent;
++ }
++}
++
++
++void dom0_init_screen_info(const struct dom0_vga_console_info *info)
++{
++ switch (info->video_type) {
++ case XEN_VGATYPE_TEXT_MODE_3:
++ screen_info.orig_video_mode = 3;
++ screen_info.orig_video_ega_bx = 3;
++ screen_info.orig_video_isVGA = 1;
++ screen_info.orig_video_lines = info->u.text_mode_3.rows;
++ screen_info.orig_video_cols = info->u.text_mode_3.columns;
++ screen_info.orig_x = info->u.text_mode_3.cursor_x;
++ screen_info.orig_y = info->u.text_mode_3.cursor_y;
++ screen_info.orig_video_points =
++ info->u.text_mode_3.font_height;
++ break;
++ case XEN_VGATYPE_VESA_LFB:
++ screen_info.orig_video_isVGA = VIDEO_TYPE_VLFB;
++ screen_info.lfb_width = info->u.vesa_lfb.width;
++ screen_info.lfb_height = info->u.vesa_lfb.height;
++ screen_info.lfb_depth = info->u.vesa_lfb.bits_per_pixel;
++ screen_info.lfb_base = info->u.vesa_lfb.lfb_base;
++ screen_info.lfb_size = info->u.vesa_lfb.lfb_size;
++ screen_info.lfb_linelength = info->u.vesa_lfb.bytes_per_line;
++ screen_info.red_size = info->u.vesa_lfb.red_size;
++ screen_info.red_pos = info->u.vesa_lfb.red_pos;
++ screen_info.green_size = info->u.vesa_lfb.green_size;
++ screen_info.green_pos = info->u.vesa_lfb.green_pos;
++ screen_info.blue_size = info->u.vesa_lfb.blue_size;
++ screen_info.blue_pos = info->u.vesa_lfb.blue_pos;
++ screen_info.rsvd_size = info->u.vesa_lfb.rsvd_size;
++ screen_info.rsvd_pos = info->u.vesa_lfb.rsvd_pos;
++ break;
++ }
++}
++
++
++/******************** User-space console driver (/dev/console) ************/
++
++#define DRV(_d) (_d)
++#define DUMMY_TTY(_tty) ((xc_mode == XC_TTY) && \
++ ((_tty)->index != (xc_num - 1)))
++
++static struct termios *xencons_termios[MAX_NR_CONSOLES];
++static struct termios *xencons_termios_locked[MAX_NR_CONSOLES];
++static struct tty_struct *xencons_tty;
++static int xencons_priv_irq;
++static char x_char;
++
++void xencons_rx(char *buf, unsigned len, struct pt_regs *regs)
++{
++ int i;
++ unsigned long flags;
++
++ spin_lock_irqsave(&xencons_lock, flags);
++ if (xencons_tty == NULL)
++ goto out;
++
++ for (i = 0; i < len; i++) {
++#ifdef CONFIG_MAGIC_SYSRQ
++ if (sysrq_enabled) {
++ if (buf[i] == '\x0f') { /* ^O */
++ if (!sysrq_requested) {
++ sysrq_requested = jiffies;
++ continue; /* don't print sysrq key */
++ }
++ sysrq_requested = 0;
++ } else if (sysrq_requested) {
++ unsigned long sysrq_timeout =
++ sysrq_requested + HZ*2;
++ sysrq_requested = 0;
++ if (time_before(jiffies, sysrq_timeout)) {
++ spin_unlock_irqrestore(
++ &xencons_lock, flags);
++ handle_sysrq(
++ buf[i], regs, xencons_tty);
++ spin_lock_irqsave(
++ &xencons_lock, flags);
++ continue;
++ }
++ }
++ }
++#endif
++ tty_insert_flip_char(xencons_tty, buf[i], 0);
++ }
++ tty_flip_buffer_push(xencons_tty);
++
++ out:
++ spin_unlock_irqrestore(&xencons_lock, flags);
++}
++
++static void __xencons_tx_flush(void)
++{
++ int sent, sz, work_done = 0;
++
++ if (x_char) {
++ if (is_initial_xendomain())
++ kcons_write_dom0(NULL, &x_char, 1);
++ else
++ while (x_char)
++ if (xencons_ring_send(&x_char, 1) == 1)
++ break;
++ x_char = 0;
++ work_done = 1;
++ }
++
++ while (wc != wp) {
++ sz = wp - wc;
++ if (sz > (wbuf_size - WBUF_MASK(wc)))
++ sz = wbuf_size - WBUF_MASK(wc);
++ if (is_initial_xendomain()) {
++ kcons_write_dom0(NULL, &wbuf[WBUF_MASK(wc)], sz);
++ wc += sz;
++ } else {
++ sent = xencons_ring_send(&wbuf[WBUF_MASK(wc)], sz);
++ if (sent == 0)
++ break;
++ wc += sent;
++ }
++ work_done = 1;
++ }
++
++ if (work_done && (xencons_tty != NULL)) {
++ wake_up_interruptible(&xencons_tty->write_wait);
++ if ((xencons_tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) &&
++ (xencons_tty->ldisc.write_wakeup != NULL))
++ (xencons_tty->ldisc.write_wakeup)(xencons_tty);
++ }
++}
++
++void xencons_tx(void)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&xencons_lock, flags);
++ __xencons_tx_flush();
++ spin_unlock_irqrestore(&xencons_lock, flags);
++}
++
++/* Privileged receive callback and transmit kicker. */
++static irqreturn_t xencons_priv_interrupt(int irq, void *dev_id,
++ struct pt_regs *regs)
++{
++ static char rbuf[16];
++ int l;
++
++ while ((l = HYPERVISOR_console_io(CONSOLEIO_read, 16, rbuf)) > 0)
++ xencons_rx(rbuf, l, regs);
++
++ xencons_tx();
++
++ return IRQ_HANDLED;
++}
++
++static int xencons_write_room(struct tty_struct *tty)
++{
++ return wbuf_size - (wp - wc);
++}
++
++static int xencons_chars_in_buffer(struct tty_struct *tty)
++{
++ return wp - wc;
++}
++
++static void xencons_send_xchar(struct tty_struct *tty, char ch)
++{
++ unsigned long flags;
++
++ if (DUMMY_TTY(tty))
++ return;
++
++ spin_lock_irqsave(&xencons_lock, flags);
++ x_char = ch;
++ __xencons_tx_flush();
++ spin_unlock_irqrestore(&xencons_lock, flags);
++}
++
++static void xencons_throttle(struct tty_struct *tty)
++{
++ if (DUMMY_TTY(tty))
++ return;
++
++ if (I_IXOFF(tty))
++ xencons_send_xchar(tty, STOP_CHAR(tty));
++}
++
++static void xencons_unthrottle(struct tty_struct *tty)
++{
++ if (DUMMY_TTY(tty))
++ return;
++
++ if (I_IXOFF(tty)) {
++ if (x_char != 0)
++ x_char = 0;
++ else
++ xencons_send_xchar(tty, START_CHAR(tty));
++ }
++}
++
++static void xencons_flush_buffer(struct tty_struct *tty)
++{
++ unsigned long flags;
++
++ if (DUMMY_TTY(tty))
++ return;
++
++ spin_lock_irqsave(&xencons_lock, flags);
++ wc = wp = 0;
++ spin_unlock_irqrestore(&xencons_lock, flags);
++}
++
++static inline int __xencons_put_char(int ch)
++{
++ char _ch = (char)ch;
++ if ((wp - wc) == wbuf_size)
++ return 0;
++ wbuf[WBUF_MASK(wp++)] = _ch;
++ return 1;
++}
++
++static int xencons_write(
++ struct tty_struct *tty,
++ const unsigned char *buf,
++ int count)
++{
++ int i;
++ unsigned long flags;
++
++ if (DUMMY_TTY(tty))
++ return count;
++
++ spin_lock_irqsave(&xencons_lock, flags);
++
++ for (i = 0; i < count; i++)
++ if (!__xencons_put_char(buf[i]))
++ break;
++
++ if (i != 0)
++ __xencons_tx_flush();
++
++ spin_unlock_irqrestore(&xencons_lock, flags);
++
++ return i;
++}
++
++static void xencons_put_char(struct tty_struct *tty, u_char ch)
++{
++ unsigned long flags;
++
++ if (DUMMY_TTY(tty))
++ return;
++
++ spin_lock_irqsave(&xencons_lock, flags);
++ (void)__xencons_put_char(ch);
++ spin_unlock_irqrestore(&xencons_lock, flags);
++}
++
++static void xencons_flush_chars(struct tty_struct *tty)
++{
++ unsigned long flags;
++
++ if (DUMMY_TTY(tty))
++ return;
++
++ spin_lock_irqsave(&xencons_lock, flags);
++ __xencons_tx_flush();
++ spin_unlock_irqrestore(&xencons_lock, flags);
++}
++
++static void xencons_wait_until_sent(struct tty_struct *tty, int timeout)
++{
++ unsigned long orig_jiffies = jiffies;
++
++ if (DUMMY_TTY(tty))
++ return;
++
++ while (DRV(tty->driver)->chars_in_buffer(tty)) {
++ set_current_state(TASK_INTERRUPTIBLE);
++ schedule_timeout(1);
++ if (signal_pending(current))
++ break;
++ if (timeout && time_after(jiffies, orig_jiffies + timeout))
++ break;
++ }
++
++ set_current_state(TASK_RUNNING);
++}
++
++static int xencons_open(struct tty_struct *tty, struct file *filp)
++{
++ unsigned long flags;
++
++ if (DUMMY_TTY(tty))
++ return 0;
++
++ spin_lock_irqsave(&xencons_lock, flags);
++ tty->driver_data = NULL;
++ if (xencons_tty == NULL)
++ xencons_tty = tty;
++ __xencons_tx_flush();
++ spin_unlock_irqrestore(&xencons_lock, flags);
++
++ return 0;
++}
++
++static void xencons_close(struct tty_struct *tty, struct file *filp)
++{
++ unsigned long flags;
++
++ if (DUMMY_TTY(tty))
++ return;
++
++ mutex_lock(&tty_mutex);
++
++ if (tty->count != 1) {
++ mutex_unlock(&tty_mutex);
++ return;
++ }
++
++ /* Prevent other threads from re-opening this tty. */
++ set_bit(TTY_CLOSING, &tty->flags);
++ mutex_unlock(&tty_mutex);
++
++ tty->closing = 1;
++ tty_wait_until_sent(tty, 0);
++ if (DRV(tty->driver)->flush_buffer != NULL)
++ DRV(tty->driver)->flush_buffer(tty);
++ if (tty->ldisc.flush_buffer != NULL)
++ tty->ldisc.flush_buffer(tty);
++ tty->closing = 0;
++ spin_lock_irqsave(&xencons_lock, flags);
++ xencons_tty = NULL;
++ spin_unlock_irqrestore(&xencons_lock, flags);
++}
++
++static struct tty_operations xencons_ops = {
++ .open = xencons_open,
++ .close = xencons_close,
++ .write = xencons_write,
++ .write_room = xencons_write_room,
++ .put_char = xencons_put_char,
++ .flush_chars = xencons_flush_chars,
++ .chars_in_buffer = xencons_chars_in_buffer,
++ .send_xchar = xencons_send_xchar,
++ .flush_buffer = xencons_flush_buffer,
++ .throttle = xencons_throttle,
++ .unthrottle = xencons_unthrottle,
++ .wait_until_sent = xencons_wait_until_sent,
++};
++
++static int __init xencons_init(void)
++{
++ int rc;
++
++ if (!is_running_on_xen())
++ return -ENODEV;
++
++ if (xc_mode == XC_OFF)
++ return 0;
++
++ if (!is_initial_xendomain()) {
++ rc = xencons_ring_init();
++ if (rc)
++ return rc;
++ }
++
++ xencons_driver = alloc_tty_driver((xc_mode == XC_TTY) ?
++ MAX_NR_CONSOLES : 1);
++ if (xencons_driver == NULL)
++ return -ENOMEM;
++
++ DRV(xencons_driver)->name = "xencons";
++ DRV(xencons_driver)->major = TTY_MAJOR;
++ DRV(xencons_driver)->type = TTY_DRIVER_TYPE_SERIAL;
++ DRV(xencons_driver)->subtype = SERIAL_TYPE_NORMAL;
++ DRV(xencons_driver)->init_termios = tty_std_termios;
++ DRV(xencons_driver)->flags =
++ TTY_DRIVER_REAL_RAW |
++ TTY_DRIVER_RESET_TERMIOS;
++ DRV(xencons_driver)->termios = xencons_termios;
++ DRV(xencons_driver)->termios_locked = xencons_termios_locked;
++
++ switch (xc_mode) {
++ case XC_XVC:
++ DRV(xencons_driver)->name = "xvc";
++ DRV(xencons_driver)->major = XEN_XVC_MAJOR;
++ DRV(xencons_driver)->minor_start = XEN_XVC_MINOR;
++ DRV(xencons_driver)->name_base = xc_num;
++ break;
++ case XC_SERIAL:
++ DRV(xencons_driver)->name = "ttyS";
++ DRV(xencons_driver)->minor_start = 64 + xc_num;
++ DRV(xencons_driver)->name_base = xc_num;
++ break;
++ default:
++ DRV(xencons_driver)->name = "tty";
++ DRV(xencons_driver)->minor_start = 1;
++ DRV(xencons_driver)->name_base = 1;
++ break;
++ }
++
++ tty_set_operations(xencons_driver, &xencons_ops);
++
++ if ((rc = tty_register_driver(DRV(xencons_driver))) != 0) {
++ printk("WARNING: Failed to register Xen virtual "
++ "console driver as '%s%d'\n",
++ DRV(xencons_driver)->name,
++ DRV(xencons_driver)->name_base);
++ put_tty_driver(xencons_driver);
++ xencons_driver = NULL;
++ return rc;
++ }
++
++ if (is_initial_xendomain()) {
++ xencons_priv_irq = bind_virq_to_irqhandler(
++ VIRQ_CONSOLE,
++ 0,
++ xencons_priv_interrupt,
++ 0,
++ "console",
++ NULL);
++ BUG_ON(xencons_priv_irq < 0);
++ }
++
++ printk("Xen virtual console successfully installed as %s%d\n",
++ DRV(xencons_driver)->name, xc_num);
++
++ return 0;
++}
++
++module_init(xencons_init);
++
++MODULE_LICENSE("Dual BSD/GPL");
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/console/xencons_ring.c 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,143 @@
++/*
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <linux/version.h>
++#include <linux/module.h>
++#include <linux/errno.h>
++#include <linux/signal.h>
++#include <linux/sched.h>
++#include <linux/interrupt.h>
++#include <linux/tty.h>
++#include <linux/tty_flip.h>
++#include <linux/serial.h>
++#include <linux/major.h>
++#include <linux/ptrace.h>
++#include <linux/ioport.h>
++#include <linux/mm.h>
++#include <linux/slab.h>
++
++#include <asm/hypervisor.h>
++#include <xen/evtchn.h>
++#include <xen/xencons.h>
++#include <linux/wait.h>
++#include <linux/interrupt.h>
++#include <linux/sched.h>
++#include <linux/err.h>
++#include <xen/interface/io/console.h>
++
++static int xencons_irq;
++
++static inline struct xencons_interface *xencons_interface(void)
++{
++ return mfn_to_virt(xen_start_info->console.domU.mfn);
++}
++
++static inline void notify_daemon(void)
++{
++ /* Use evtchn: this is called early, before irq is set up. */
++ notify_remote_via_evtchn(xen_start_info->console.domU.evtchn);
++}
++
++int xencons_ring_send(const char *data, unsigned len)
++{
++ int sent = 0;
++ struct xencons_interface *intf = xencons_interface();
++ XENCONS_RING_IDX cons, prod;
++
++ cons = intf->out_cons;
++ prod = intf->out_prod;
++ mb();
++ BUG_ON((prod - cons) > sizeof(intf->out));
++
++ while ((sent < len) && ((prod - cons) < sizeof(intf->out)))
++ intf->out[MASK_XENCONS_IDX(prod++, intf->out)] = data[sent++];
++
++ wmb();
++ intf->out_prod = prod;
++
++ notify_daemon();
++
++ return sent;
++}
++
++static irqreturn_t handle_input(int irq, void *unused, struct pt_regs *regs)
++{
++ struct xencons_interface *intf = xencons_interface();
++ XENCONS_RING_IDX cons, prod;
++
++ cons = intf->in_cons;
++ prod = intf->in_prod;
++ mb();
++ BUG_ON((prod - cons) > sizeof(intf->in));
++
++ while (cons != prod) {
++ xencons_rx(intf->in+MASK_XENCONS_IDX(cons,intf->in), 1, regs);
++ cons++;
++ }
++
++ mb();
++ intf->in_cons = cons;
++
++ notify_daemon();
++
++ xencons_tx();
++
++ return IRQ_HANDLED;
++}
++
++int xencons_ring_init(void)
++{
++ int irq;
++
++ if (xencons_irq)
++ unbind_from_irqhandler(xencons_irq, NULL);
++ xencons_irq = 0;
++
++ if (!is_running_on_xen() ||
++ is_initial_xendomain() ||
++ !xen_start_info->console.domU.evtchn)
++ return -ENODEV;
++
++ irq = bind_caller_port_to_irqhandler(
++ xen_start_info->console.domU.evtchn,
++ handle_input, 0, "xencons", NULL);
++ if (irq < 0) {
++ printk(KERN_ERR "XEN console request irq failed %i\n", irq);
++ return irq;
++ }
++
++ xencons_irq = irq;
++
++ /* In case we have in-flight data after save/restore... */
++ notify_daemon();
++
++ return 0;
++}
++
++void xencons_resume(void)
++{
++ (void)xencons_ring_init();
++}
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/core/Makefile 2007-08-27 14:02:09.000000000 -0400
+@@ -0,0 +1,12 @@
++#
++# Makefile for the linux kernel.
++#
++
++obj-y := evtchn.o gnttab.o features.o reboot.o machine_reboot.o
++
++obj-$(CONFIG_PROC_FS) += xen_proc.o
++obj-$(CONFIG_SYSFS) += hypervisor_sysfs.o
++obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o
++obj-$(CONFIG_XEN_SYSFS) += xen_sysfs.o
++obj-$(CONFIG_XEN_SMPBOOT) += smpboot.o
++obj-$(CONFIG_KEXEC) += machine_kexec.o
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/core/cpu_hotplug.c 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,172 @@
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/notifier.h>
++#include <linux/cpu.h>
++#include <xen/cpu_hotplug.h>
++#include <xen/xenbus.h>
++
++/*
++ * Set of CPUs that remote admin software will allow us to bring online.
++ * Notified to us via xenbus.
++ */
++static cpumask_t xenbus_allowed_cpumask;
++
++/* Set of CPUs that local admin will allow us to bring online. */
++static cpumask_t local_allowed_cpumask = CPU_MASK_ALL;
++
++static int local_cpu_hotplug_request(void)
++{
++ /*
++ * We assume a CPU hotplug request comes from local admin if it is made
++ * via a userspace process (i.e., one with a real mm_struct).
++ */
++ return (current->mm != NULL);
++}
++
++static void vcpu_hotplug(unsigned int cpu)
++{
++ int err;
++ char dir[32], state[32];
++
++ if ((cpu >= NR_CPUS) || !cpu_possible(cpu))
++ return;
++
++ sprintf(dir, "cpu/%d", cpu);
++ err = xenbus_scanf(XBT_NIL, dir, "availability", "%s", state);
++ if (err != 1) {
++ printk(KERN_ERR "XENBUS: Unable to read cpu state\n");
++ return;
++ }
++
++ if (strcmp(state, "online") == 0) {
++ cpu_set(cpu, xenbus_allowed_cpumask);
++ (void)cpu_up(cpu);
++ } else if (strcmp(state, "offline") == 0) {
++ cpu_clear(cpu, xenbus_allowed_cpumask);
++ (void)cpu_down(cpu);
++ } else {
++ printk(KERN_ERR "XENBUS: unknown state(%s) on CPU%d\n",
++ state, cpu);
++ }
++}
++
++static void handle_vcpu_hotplug_event(
++ struct xenbus_watch *watch, const char **vec, unsigned int len)
++{
++ int cpu;
++ char *cpustr;
++ const char *node = vec[XS_WATCH_PATH];
++
++ if ((cpustr = strstr(node, "cpu/")) != NULL) {
++ sscanf(cpustr, "cpu/%d", &cpu);
++ vcpu_hotplug(cpu);
++ }
++}
++
++static int smpboot_cpu_notify(struct notifier_block *notifier,
++ unsigned long action, void *hcpu)
++{
++ int cpu = (long)hcpu;
++
++ /*
++ * We do this in a callback notifier rather than __cpu_disable()
++ * because local_cpu_hotplug_request() does not work in the latter
++ * as it's always executed from within a stopmachine kthread.
++ */
++ if ((action == CPU_DOWN_PREPARE) && local_cpu_hotplug_request())
++ cpu_clear(cpu, local_allowed_cpumask);
++
++ return NOTIFY_OK;
++}
++
++static int setup_cpu_watcher(struct notifier_block *notifier,
++ unsigned long event, void *data)
++{
++ int i;
++
++ static struct xenbus_watch cpu_watch = {
++ .node = "cpu",
++ .callback = handle_vcpu_hotplug_event,
++ .flags = XBWF_new_thread };
++ (void)register_xenbus_watch(&cpu_watch);
++
++ if (!is_initial_xendomain()) {
++ for_each_possible_cpu(i)
++ vcpu_hotplug(i);
++ printk(KERN_INFO "Brought up %ld CPUs\n",
++ (long)num_online_cpus());
++ }
++
++ return NOTIFY_DONE;
++}
++
++static int __init setup_vcpu_hotplug_event(void)
++{
++ static struct notifier_block hotplug_cpu = {
++ .notifier_call = smpboot_cpu_notify };
++ static struct notifier_block xsn_cpu = {
++ .notifier_call = setup_cpu_watcher };
++
++ if (!is_running_on_xen())
++ return -ENODEV;
++
++ register_cpu_notifier(&hotplug_cpu);
++ register_xenstore_notifier(&xsn_cpu);
++
++ return 0;
++}
++
++arch_initcall(setup_vcpu_hotplug_event);
++
++int smp_suspend(void)
++{
++ int cpu, err;
++
++ for_each_online_cpu(cpu) {
++ if (cpu == 0)
++ continue;
++ err = cpu_down(cpu);
++ if (err) {
++ printk(KERN_CRIT "Failed to take all CPUs "
++ "down: %d.\n", err);
++ for_each_possible_cpu(cpu)
++ vcpu_hotplug(cpu);
++ return err;
++ }
++ }
++
++ return 0;
++}
++
++void smp_resume(void)
++{
++ int cpu;
++
++ for_each_possible_cpu(cpu)
++ vcpu_hotplug(cpu);
++}
++
++int cpu_up_check(unsigned int cpu)
++{
++ int rc = 0;
++
++ if (local_cpu_hotplug_request()) {
++ cpu_set(cpu, local_allowed_cpumask);
++ if (!cpu_isset(cpu, xenbus_allowed_cpumask)) {
++ printk("%s: attempt to bring up CPU %u disallowed by "
++ "remote admin.\n", __FUNCTION__, cpu);
++ rc = -EBUSY;
++ }
++ } else if (!cpu_isset(cpu, local_allowed_cpumask) ||
++ !cpu_isset(cpu, xenbus_allowed_cpumask)) {
++ rc = -EBUSY;
++ }
++
++ return rc;
++}
++
++void init_xenbus_allowed_cpumask(void)
++{
++ xenbus_allowed_cpumask = cpu_present_map;
++}
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/core/evtchn.c 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,1015 @@
++/******************************************************************************
++ * evtchn.c
++ *
++ * Communication via Xen event channels.
++ *
++ * Copyright (c) 2002-2005, K A Fraser
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <linux/module.h>
++#include <linux/irq.h>
++#include <linux/interrupt.h>
++#include <linux/sched.h>
++#include <linux/kernel_stat.h>
++#include <linux/version.h>
++#include <asm/atomic.h>
++#include <asm/system.h>
++#include <asm/ptrace.h>
++#include <asm/synch_bitops.h>
++#include <xen/evtchn.h>
++#include <xen/interface/event_channel.h>
++#include <xen/interface/physdev.h>
++#include <asm/hypervisor.h>
++#include <linux/mc146818rtc.h> /* RTC_IRQ */
++
++/*
++ * This lock protects updates to the following mapping and reference-count
++ * arrays. The lock does not need to be acquired to read the mapping tables.
++ */
++static DEFINE_SPINLOCK(irq_mapping_update_lock);
++
++/* IRQ <-> event-channel mappings. */
++static int evtchn_to_irq[NR_EVENT_CHANNELS] = {
++ [0 ... NR_EVENT_CHANNELS-1] = -1 };
++
++/* Packed IRQ information: binding type, sub-type index, and event channel. */
++static u32 irq_info[NR_IRQS];
++
++/* Binding types. */
++enum {
++ IRQT_UNBOUND,
++ IRQT_PIRQ,
++ IRQT_VIRQ,
++ IRQT_IPI,
++ IRQT_LOCAL_PORT,
++ IRQT_CALLER_PORT
++};
++
++/* Constructor for packed IRQ information. */
++static inline u32 mk_irq_info(u32 type, u32 index, u32 evtchn)
++{
++ return ((type << 24) | (index << 16) | evtchn);
++}
++
++/* Convenient shorthand for packed representation of an unbound IRQ. */
++#define IRQ_UNBOUND mk_irq_info(IRQT_UNBOUND, 0, 0)
++
++/*
++ * Accessors for packed IRQ information.
++ */
++
++static inline unsigned int evtchn_from_irq(int irq)
++{
++ return (u16)(irq_info[irq]);
++}
++
++static inline unsigned int index_from_irq(int irq)
++{
++ return (u8)(irq_info[irq] >> 16);
++}
++
++static inline unsigned int type_from_irq(int irq)
++{
++ return (u8)(irq_info[irq] >> 24);
++}
++
++/* IRQ <-> VIRQ mapping. */
++DEFINE_PER_CPU(int, virq_to_irq[NR_VIRQS]) = {[0 ... NR_VIRQS-1] = -1};
++
++/* IRQ <-> IPI mapping. */
++#ifndef NR_IPIS
++#define NR_IPIS 1
++#endif
++DEFINE_PER_CPU(int, ipi_to_irq[NR_IPIS]) = {[0 ... NR_IPIS-1] = -1};
++
++/* Reference counts for bindings to IRQs. */
++static int irq_bindcount[NR_IRQS];
++
++/* Bitmap indicating which PIRQs require Xen to be notified on unmask. */
++static DECLARE_BITMAP(pirq_needs_eoi, NR_PIRQS);
++
++#ifdef CONFIG_SMP
++
++static u8 cpu_evtchn[NR_EVENT_CHANNELS];
++static unsigned long cpu_evtchn_mask[NR_CPUS][NR_EVENT_CHANNELS/BITS_PER_LONG];
++
++static inline unsigned long active_evtchns(unsigned int cpu, shared_info_t *sh,
++ unsigned int idx)
++{
++ return (sh->evtchn_pending[idx] &
++ cpu_evtchn_mask[cpu][idx] &
++ ~sh->evtchn_mask[idx]);
++}
++
++static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
++{
++ int irq = evtchn_to_irq[chn];
++
++ BUG_ON(irq == -1);
++ set_native_irq_info(irq, cpumask_of_cpu(cpu));
++
++ clear_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu_evtchn[chn]]);
++ set_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu]);
++ cpu_evtchn[chn] = cpu;
++}
++
++static void init_evtchn_cpu_bindings(void)
++{
++ int i;
++
++ /* By default all event channels notify CPU#0. */
++ for (i = 0; i < NR_IRQS; i++)
++ set_native_irq_info(i, cpumask_of_cpu(0));
++
++ memset(cpu_evtchn, 0, sizeof(cpu_evtchn));
++ memset(cpu_evtchn_mask[0], ~0, sizeof(cpu_evtchn_mask[0]));
++}
++
++static inline unsigned int cpu_from_evtchn(unsigned int evtchn)
++{
++ return cpu_evtchn[evtchn];
++}
++
++#else
++
++static inline unsigned long active_evtchns(unsigned int cpu, shared_info_t *sh,
++ unsigned int idx)
++{
++ return (sh->evtchn_pending[idx] & ~sh->evtchn_mask[idx]);
++}
++
++static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
++{
++}
++
++static void init_evtchn_cpu_bindings(void)
++{
++}
++
++static inline unsigned int cpu_from_evtchn(unsigned int evtchn)
++{
++ return 0;
++}
++
++#endif
++
++/* Upcall to generic IRQ layer. */
++#ifdef CONFIG_X86
++extern fastcall unsigned int do_IRQ(struct pt_regs *regs);
++void __init xen_init_IRQ(void);
++void __init init_IRQ(void)
++{
++ irq_ctx_init(0);
++ xen_init_IRQ();
++}
++#if defined (__i386__)
++static inline void exit_idle(void) {}
++#define IRQ_REG orig_eax
++#elif defined (__x86_64__)
++#include <asm/idle.h>
++#define IRQ_REG orig_rax
++#endif
++#define do_IRQ(irq, regs) do { \
++ (regs)->IRQ_REG = ~(irq); \
++ do_IRQ((regs)); \
++} while (0)
++#endif
++
++/* Xen will never allocate port zero for any purpose. */
++#define VALID_EVTCHN(chn) ((chn) != 0)
++
++/*
++ * Force a proper event-channel callback from Xen after clearing the
++ * callback mask. We do this in a very simple manner, by making a call
++ * down into Xen. The pending flag will be checked by Xen on return.
++ */
++void force_evtchn_callback(void)
++{
++ (void)HYPERVISOR_xen_version(0, NULL);
++}
++/* Not a GPL symbol: used in ubiquitous macros, so too restrictive. */
++EXPORT_SYMBOL(force_evtchn_callback);
++
++static DEFINE_PER_CPU(unsigned int, upcall_count) = { 0 };
++
++/* NB. Interrupts are disabled on entry. */
++asmlinkage void evtchn_do_upcall(struct pt_regs *regs)
++{
++ unsigned long l1, l2;
++ unsigned int l1i, l2i, port, count;
++ int irq, cpu = smp_processor_id();
++ shared_info_t *s = HYPERVISOR_shared_info;
++ vcpu_info_t *vcpu_info = &s->vcpu_info[cpu];
++
++ do {
++ /* Avoid a callback storm when we reenable delivery. */
++ vcpu_info->evtchn_upcall_pending = 0;
++
++ /* Nested invocations bail immediately. */
++ if (unlikely(per_cpu(upcall_count, cpu)++))
++ return;
++
++#ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
++ /* Clear master flag /before/ clearing selector flag. */
++ rmb();
++#endif
++ l1 = xchg(&vcpu_info->evtchn_pending_sel, 0);
++ while (l1 != 0) {
++ l1i = __ffs(l1);
++ l1 &= ~(1UL << l1i);
++
++ while ((l2 = active_evtchns(cpu, s, l1i)) != 0) {
++ l2i = __ffs(l2);
++
++ port = (l1i * BITS_PER_LONG) + l2i;
++ if ((irq = evtchn_to_irq[port]) != -1)
++ do_IRQ(irq, regs);
++ else {
++ exit_idle();
++ evtchn_device_upcall(port);
++ }
++ }
++ }
++
++ /* If there were nested callbacks then we have more to do. */
++ count = per_cpu(upcall_count, cpu);
++ per_cpu(upcall_count, cpu) = 0;
++ } while (unlikely(count != 1));
++}
++
++static int find_unbound_irq(void)
++{
++ static int warned;
++ int dynirq, irq;
++
++ for (dynirq = 0; dynirq < NR_DYNIRQS; dynirq++) {
++ irq = dynirq_to_irq(dynirq);
++ if (irq_bindcount[irq] == 0)
++ return irq;
++ }
++
++ if (!warned) {
++ warned = 1;
++ printk(KERN_WARNING "No available IRQ to bind to: "
++ "increase NR_DYNIRQS.\n");
++ }
++
++ return -ENOSPC;
++}
++
++static int bind_caller_port_to_irq(unsigned int caller_port)
++{
++ int irq;
++
++ spin_lock(&irq_mapping_update_lock);
++
++ if ((irq = evtchn_to_irq[caller_port]) == -1) {
++ if ((irq = find_unbound_irq()) < 0)
++ goto out;
++
++ evtchn_to_irq[caller_port] = irq;
++ irq_info[irq] = mk_irq_info(IRQT_CALLER_PORT, 0, caller_port);
++ }
++
++ irq_bindcount[irq]++;
++
++ out:
++ spin_unlock(&irq_mapping_update_lock);
++ return irq;
++}
++
++static int bind_local_port_to_irq(unsigned int local_port)
++{
++ int irq;
++
++ spin_lock(&irq_mapping_update_lock);
++
++ BUG_ON(evtchn_to_irq[local_port] != -1);
++
++ if ((irq = find_unbound_irq()) < 0) {
++ struct evtchn_close close = { .port = local_port };
++ if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
++ BUG();
++ goto out;
++ }
++
++ evtchn_to_irq[local_port] = irq;
++ irq_info[irq] = mk_irq_info(IRQT_LOCAL_PORT, 0, local_port);
++ irq_bindcount[irq]++;
++
++ out:
++ spin_unlock(&irq_mapping_update_lock);
++ return irq;
++}
++
++static int bind_listening_port_to_irq(unsigned int remote_domain)
++{
++ struct evtchn_alloc_unbound alloc_unbound;
++ int err;
++
++ alloc_unbound.dom = DOMID_SELF;
++ alloc_unbound.remote_dom = remote_domain;
++
++ err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
++ &alloc_unbound);
++
++ return err ? : bind_local_port_to_irq(alloc_unbound.port);
++}
++
++static int bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
++ unsigned int remote_port)
++{
++ struct evtchn_bind_interdomain bind_interdomain;
++ int err;
++
++ bind_interdomain.remote_dom = remote_domain;
++ bind_interdomain.remote_port = remote_port;
++
++ err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
++ &bind_interdomain);
++
++ return err ? : bind_local_port_to_irq(bind_interdomain.local_port);
++}
++
++static int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
++{
++ struct evtchn_bind_virq bind_virq;
++ int evtchn, irq;
++
++ spin_lock(&irq_mapping_update_lock);
++
++ if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) {
++ if ((irq = find_unbound_irq()) < 0)
++ goto out;
++
++ bind_virq.virq = virq;
++ bind_virq.vcpu = cpu;
++ if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
++ &bind_virq) != 0)
++ BUG();
++ evtchn = bind_virq.port;
++
++ evtchn_to_irq[evtchn] = irq;
++ irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
++
++ per_cpu(virq_to_irq, cpu)[virq] = irq;
++
++ bind_evtchn_to_cpu(evtchn, cpu);
++ }
++
++ irq_bindcount[irq]++;
++
++ out:
++ spin_unlock(&irq_mapping_update_lock);
++ return irq;
++}
++
++static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
++{
++ struct evtchn_bind_ipi bind_ipi;
++ int evtchn, irq;
++
++ spin_lock(&irq_mapping_update_lock);
++
++ if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) {
++ if ((irq = find_unbound_irq()) < 0)
++ goto out;
++
++ bind_ipi.vcpu = cpu;
++ if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
++ &bind_ipi) != 0)
++ BUG();
++ evtchn = bind_ipi.port;
++
++ evtchn_to_irq[evtchn] = irq;
++ irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
++
++ per_cpu(ipi_to_irq, cpu)[ipi] = irq;
++
++ bind_evtchn_to_cpu(evtchn, cpu);
++ }
++
++ irq_bindcount[irq]++;
++
++ out:
++ spin_unlock(&irq_mapping_update_lock);
++ return irq;
++}
++
++static void unbind_from_irq(unsigned int irq)
++{
++ struct evtchn_close close;
++ int cpu, evtchn = evtchn_from_irq(irq);
++
++ spin_lock(&irq_mapping_update_lock);
++
++ if ((--irq_bindcount[irq] == 0) && VALID_EVTCHN(evtchn)) {
++ close.port = evtchn;
++ if ((type_from_irq(irq) != IRQT_CALLER_PORT) &&
++ HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
++ BUG();
++
++ switch (type_from_irq(irq)) {
++ case IRQT_VIRQ:
++ per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
++ [index_from_irq(irq)] = -1;
++ break;
++ case IRQT_IPI:
++ per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))
++ [index_from_irq(irq)] = -1;
++ break;
++ default:
++ break;
++ }
++
++ /* Closed ports are implicitly re-bound to VCPU0. */
++ bind_evtchn_to_cpu(evtchn, 0);
++
++ evtchn_to_irq[evtchn] = -1;
++ irq_info[irq] = IRQ_UNBOUND;
++
++ /* Zap stats across IRQ changes of use. */
++ for_each_possible_cpu(cpu)
++ kstat_cpu(cpu).irqs[irq] = 0;
++ }
++
++ spin_unlock(&irq_mapping_update_lock);
++}
++
++int bind_caller_port_to_irqhandler(
++ unsigned int caller_port,
++ irqreturn_t (*handler)(int, void *, struct pt_regs *),
++ unsigned long irqflags,
++ const char *devname,
++ void *dev_id)
++{
++ int irq, retval;
++
++ irq = bind_caller_port_to_irq(caller_port);
++ if (irq < 0)
++ return irq;
++
++ retval = request_irq(irq, handler, irqflags, devname, dev_id);
++ if (retval != 0) {
++ unbind_from_irq(irq);
++ return retval;
++ }
++
++ return irq;
++}
++EXPORT_SYMBOL_GPL(bind_caller_port_to_irqhandler);
++
++int bind_listening_port_to_irqhandler(
++ unsigned int remote_domain,
++ irqreturn_t (*handler)(int, void *, struct pt_regs *),
++ unsigned long irqflags,
++ const char *devname,
++ void *dev_id)
++{
++ int irq, retval;
++
++ irq = bind_listening_port_to_irq(remote_domain);
++ if (irq < 0)
++ return irq;
++
++ retval = request_irq(irq, handler, irqflags, devname, dev_id);
++ if (retval != 0) {
++ unbind_from_irq(irq);
++ return retval;
++ }
++
++ return irq;
++}
++EXPORT_SYMBOL_GPL(bind_listening_port_to_irqhandler);
++
++int bind_interdomain_evtchn_to_irqhandler(
++ unsigned int remote_domain,
++ unsigned int remote_port,
++ irqreturn_t (*handler)(int, void *, struct pt_regs *),
++ unsigned long irqflags,
++ const char *devname,
++ void *dev_id)
++{
++ int irq, retval;
++
++ irq = bind_interdomain_evtchn_to_irq(remote_domain, remote_port);
++ if (irq < 0)
++ return irq;
++
++ retval = request_irq(irq, handler, irqflags, devname, dev_id);
++ if (retval != 0) {
++ unbind_from_irq(irq);
++ return retval;
++ }
++
++ return irq;
++}
++EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irqhandler);
++
++int bind_virq_to_irqhandler(
++ unsigned int virq,
++ unsigned int cpu,
++ irqreturn_t (*handler)(int, void *, struct pt_regs *),
++ unsigned long irqflags,
++ const char *devname,
++ void *dev_id)
++{
++ int irq, retval;
++
++ irq = bind_virq_to_irq(virq, cpu);
++ if (irq < 0)
++ return irq;
++
++ retval = request_irq(irq, handler, irqflags, devname, dev_id);
++ if (retval != 0) {
++ unbind_from_irq(irq);
++ return retval;
++ }
++
++ return irq;
++}
++EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
++
++int bind_ipi_to_irqhandler(
++ unsigned int ipi,
++ unsigned int cpu,
++ irqreturn_t (*handler)(int, void *, struct pt_regs *),
++ unsigned long irqflags,
++ const char *devname,
++ void *dev_id)
++{
++ int irq, retval;
++
++ irq = bind_ipi_to_irq(ipi, cpu);
++ if (irq < 0)
++ return irq;
++
++ retval = request_irq(irq, handler, irqflags, devname, dev_id);
++ if (retval != 0) {
++ unbind_from_irq(irq);
++ return retval;
++ }
++
++ return irq;
++}
++EXPORT_SYMBOL_GPL(bind_ipi_to_irqhandler);
++
++void unbind_from_irqhandler(unsigned int irq, void *dev_id)
++{
++ free_irq(irq, dev_id);
++ unbind_from_irq(irq);
++}
++EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
++
++#ifdef CONFIG_SMP
++/* Rebind an evtchn so that it gets delivered to a specific cpu */
++static void rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
++{
++ struct evtchn_bind_vcpu bind_vcpu;
++ int evtchn = evtchn_from_irq(irq);
++
++ if (!VALID_EVTCHN(evtchn))
++ return;
++
++ /* Send future instances of this interrupt to other vcpu. */
++ bind_vcpu.port = evtchn;
++ bind_vcpu.vcpu = tcpu;
++
++ /*
++ * If this fails, it usually just indicates that we're dealing with a
++ * virq or IPI channel, which don't actually need to be rebound. Ignore
++ * it, but don't do the xenlinux-level rebind in that case.
++ */
++ if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
++ bind_evtchn_to_cpu(evtchn, tcpu);
++}
++
++static void set_affinity_irq(unsigned irq, cpumask_t dest)
++{
++ unsigned tcpu = first_cpu(dest);
++ rebind_irq_to_cpu(irq, tcpu);
++}
++#endif
++
++int resend_irq_on_evtchn(unsigned int irq)
++{
++ int masked, evtchn = evtchn_from_irq(irq);
++ shared_info_t *s = HYPERVISOR_shared_info;
++
++ if (!VALID_EVTCHN(evtchn))
++ return 1;
++
++ masked = synch_test_and_set_bit(evtchn, s->evtchn_mask);
++ synch_set_bit(evtchn, s->evtchn_pending);
++ if (!masked)
++ unmask_evtchn(evtchn);
++
++ return 1;
++}
++
++/*
++ * Interface to generic handling in irq.c
++ */
++
++static unsigned int startup_dynirq(unsigned int irq)
++{
++ int evtchn = evtchn_from_irq(irq);
++
++ if (VALID_EVTCHN(evtchn))
++ unmask_evtchn(evtchn);
++ return 0;
++}
++
++static void shutdown_dynirq(unsigned int irq)
++{
++ int evtchn = evtchn_from_irq(irq);
++
++ if (VALID_EVTCHN(evtchn))
++ mask_evtchn(evtchn);
++}
++
++static void enable_dynirq(unsigned int irq)
++{
++ int evtchn = evtchn_from_irq(irq);
++
++ if (VALID_EVTCHN(evtchn))
++ unmask_evtchn(evtchn);
++}
++
++static void disable_dynirq(unsigned int irq)
++{
++ int evtchn = evtchn_from_irq(irq);
++
++ if (VALID_EVTCHN(evtchn))
++ mask_evtchn(evtchn);
++}
++
++static void ack_dynirq(unsigned int irq)
++{
++ int evtchn = evtchn_from_irq(irq);
++
++ move_native_irq(irq);
++
++ if (VALID_EVTCHN(evtchn)) {
++ mask_evtchn(evtchn);
++ clear_evtchn(evtchn);
++ }
++}
++
++static void end_dynirq(unsigned int irq)
++{
++ int evtchn = evtchn_from_irq(irq);
++
++ if (VALID_EVTCHN(evtchn) && !(irq_desc[irq].status & IRQ_DISABLED))
++ unmask_evtchn(evtchn);
++}
++
++static struct hw_interrupt_type dynirq_type = {
++ .typename = "Dynamic-irq",
++ .startup = startup_dynirq,
++ .shutdown = shutdown_dynirq,
++ .enable = enable_dynirq,
++ .disable = disable_dynirq,
++ .ack = ack_dynirq,
++ .end = end_dynirq,
++#ifdef CONFIG_SMP
++ .set_affinity = set_affinity_irq,
++#endif
++ .retrigger = resend_irq_on_evtchn,
++};
++
++static inline void pirq_unmask_notify(int pirq)
++{
++ struct physdev_eoi eoi = { .irq = pirq };
++ if (unlikely(test_bit(pirq, pirq_needs_eoi)))
++ (void)HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
++}
++
++static inline void pirq_query_unmask(int pirq)
++{
++ struct physdev_irq_status_query irq_status;
++ irq_status.irq = pirq;
++ (void)HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status);
++ clear_bit(pirq, pirq_needs_eoi);
++ if (irq_status.flags & XENIRQSTAT_needs_eoi)
++ set_bit(pirq, pirq_needs_eoi);
++}
++
++/*
++ * On startup, if there is no action associated with the IRQ then we are
++ * probing. In this case we should not share with others as it will confuse us.
++ */
++#define probing_irq(_irq) (irq_desc[(_irq)].action == NULL)
++
++static unsigned int startup_pirq(unsigned int irq)
++{
++ struct evtchn_bind_pirq bind_pirq;
++ int evtchn = evtchn_from_irq(irq);
++
++ if (VALID_EVTCHN(evtchn))
++ goto out;
++
++ bind_pirq.pirq = irq;
++ /* NB. We are happy to share unless we are probing. */
++ bind_pirq.flags = probing_irq(irq) ? 0 : BIND_PIRQ__WILL_SHARE;
++ if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq) != 0) {
++ if (!probing_irq(irq))
++ printk(KERN_INFO "Failed to obtain physical IRQ %d\n",
++ irq);
++ return 0;
++ }
++ evtchn = bind_pirq.port;
++
++ pirq_query_unmask(irq_to_pirq(irq));
++
++ evtchn_to_irq[evtchn] = irq;
++ bind_evtchn_to_cpu(evtchn, 0);
++ irq_info[irq] = mk_irq_info(IRQT_PIRQ, irq, evtchn);
++
++ out:
++ unmask_evtchn(evtchn);
++ pirq_unmask_notify(irq_to_pirq(irq));
++
++ return 0;
++}
++
++static void shutdown_pirq(unsigned int irq)
++{
++ struct evtchn_close close;
++ int evtchn = evtchn_from_irq(irq);
++
++ if (!VALID_EVTCHN(evtchn))
++ return;
++
++ mask_evtchn(evtchn);
++
++ close.port = evtchn;
++ if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
++ BUG();
++
++ bind_evtchn_to_cpu(evtchn, 0);
++ evtchn_to_irq[evtchn] = -1;
++ irq_info[irq] = IRQ_UNBOUND;
++}
++
++static void enable_pirq(unsigned int irq)
++{
++ int evtchn = evtchn_from_irq(irq);
++
++ if (VALID_EVTCHN(evtchn)) {
++ unmask_evtchn(evtchn);
++ pirq_unmask_notify(irq_to_pirq(irq));
++ }
++}
++
++static void disable_pirq(unsigned int irq)
++{
++ int evtchn = evtchn_from_irq(irq);
++
++ if (VALID_EVTCHN(evtchn))
++ mask_evtchn(evtchn);
++}
++
++static void ack_pirq(unsigned int irq)
++{
++ int evtchn = evtchn_from_irq(irq);
++
++ move_native_irq(irq);
++
++ if (VALID_EVTCHN(evtchn)) {
++ mask_evtchn(evtchn);
++ clear_evtchn(evtchn);
++ }
++}
++
++static void end_pirq(unsigned int irq)
++{
++ int evtchn = evtchn_from_irq(irq);
++
++ if (VALID_EVTCHN(evtchn) && !(irq_desc[irq].status & IRQ_DISABLED)) {
++ unmask_evtchn(evtchn);
++ pirq_unmask_notify(irq_to_pirq(irq));
++ }
++}
++
++static struct hw_interrupt_type pirq_type = {
++ .typename = "Phys-irq",
++ .startup = startup_pirq,
++ .shutdown = shutdown_pirq,
++ .enable = enable_pirq,
++ .disable = disable_pirq,
++ .ack = ack_pirq,
++ .end = end_pirq,
++#ifdef CONFIG_SMP
++ .set_affinity = set_affinity_irq,
++#endif
++ .retrigger = resend_irq_on_evtchn,
++};
++
++int irq_ignore_unhandled(unsigned int irq)
++{
++ struct physdev_irq_status_query irq_status = { .irq = irq };
++
++ if (!is_running_on_xen())
++ return 0;
++
++ (void)HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status);
++ return !!(irq_status.flags & XENIRQSTAT_shared);
++}
++
++void notify_remote_via_irq(int irq)
++{
++ int evtchn = evtchn_from_irq(irq);
++
++ if (VALID_EVTCHN(evtchn))
++ notify_remote_via_evtchn(evtchn);
++}
++EXPORT_SYMBOL_GPL(notify_remote_via_irq);
++
++int irq_to_evtchn_port(int irq)
++{
++ return evtchn_from_irq(irq);
++}
++EXPORT_SYMBOL_GPL(irq_to_evtchn_port);
++
++void mask_evtchn(int port)
++{
++ shared_info_t *s = HYPERVISOR_shared_info;
++ synch_set_bit(port, s->evtchn_mask);
++}
++EXPORT_SYMBOL_GPL(mask_evtchn);
++
++void unmask_evtchn(int port)
++{
++ shared_info_t *s = HYPERVISOR_shared_info;
++ unsigned int cpu = smp_processor_id();
++ vcpu_info_t *vcpu_info = &s->vcpu_info[cpu];
++
++ BUG_ON(!irqs_disabled());
++
++ /* Slow path (hypercall) if this is a non-local port. */
++ if (unlikely(cpu != cpu_from_evtchn(port))) {
++ struct evtchn_unmask unmask = { .port = port };
++ (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
++ return;
++ }
++
++ synch_clear_bit(port, s->evtchn_mask);
++
++ /* Did we miss an interrupt 'edge'? Re-fire if so. */
++ if (synch_test_bit(port, s->evtchn_pending) &&
++ !synch_test_and_set_bit(port / BITS_PER_LONG,
++ &vcpu_info->evtchn_pending_sel))
++ vcpu_info->evtchn_upcall_pending = 1;
++}
++EXPORT_SYMBOL_GPL(unmask_evtchn);
++
++static void restore_cpu_virqs(int cpu)
++{
++ struct evtchn_bind_virq bind_virq;
++ int virq, irq, evtchn;
++
++ for (virq = 0; virq < NR_VIRQS; virq++) {
++ if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
++ continue;
++
++ BUG_ON(irq_info[irq] != mk_irq_info(IRQT_VIRQ, virq, 0));
++
++ /* Get a new binding from Xen. */
++ bind_virq.virq = virq;
++ bind_virq.vcpu = cpu;
++ if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
++ &bind_virq) != 0)
++ BUG();
++ evtchn = bind_virq.port;
++
++ /* Record the new mapping. */
++ evtchn_to_irq[evtchn] = irq;
++ irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
++ bind_evtchn_to_cpu(evtchn, cpu);
++
++ /* Ready for use. */
++ unmask_evtchn(evtchn);
++ }
++}
++
++static void restore_cpu_ipis(int cpu)
++{
++ struct evtchn_bind_ipi bind_ipi;
++ int ipi, irq, evtchn;
++
++ for (ipi = 0; ipi < NR_IPIS; ipi++) {
++ if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
++ continue;
++
++ BUG_ON(irq_info[irq] != mk_irq_info(IRQT_IPI, ipi, 0));
++
++ /* Get a new binding from Xen. */
++ bind_ipi.vcpu = cpu;
++ if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
++ &bind_ipi) != 0)
++ BUG();
++ evtchn = bind_ipi.port;
++
++ /* Record the new mapping. */
++ evtchn_to_irq[evtchn] = irq;
++ irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
++ bind_evtchn_to_cpu(evtchn, cpu);
++
++ /* Ready for use. */
++ unmask_evtchn(evtchn);
++
++ }
++}
++
++void irq_resume(void)
++{
++ int cpu, pirq, irq, evtchn;
++
++ init_evtchn_cpu_bindings();
++
++ /* New event-channel space is not 'live' yet. */
++ for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
++ mask_evtchn(evtchn);
++
++ /* Check that no PIRQs are still bound. */
++ for (pirq = 0; pirq < NR_PIRQS; pirq++)
++ BUG_ON(irq_info[pirq_to_irq(pirq)] != IRQ_UNBOUND);
++
++ /* No IRQ <-> event-channel mappings. */
++ for (irq = 0; irq < NR_IRQS; irq++)
++ irq_info[irq] &= ~0xFFFF; /* zap event-channel binding */
++ for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
++ evtchn_to_irq[evtchn] = -1;
++
++ for_each_possible_cpu(cpu) {
++ restore_cpu_virqs(cpu);
++ restore_cpu_ipis(cpu);
++ }
++
++}
++
++void __init xen_init_IRQ(void)
++{
++ int i;
++
++ init_evtchn_cpu_bindings();
++
++ /* No event channels are 'live' right now. */
++ for (i = 0; i < NR_EVENT_CHANNELS; i++)
++ mask_evtchn(i);
++
++ /* No IRQ -> event-channel mappings. */
++ for (i = 0; i < NR_IRQS; i++)
++ irq_info[i] = IRQ_UNBOUND;
++
++ /* Dynamic IRQ space is currently unbound. Zero the refcnts. */
++ for (i = 0; i < NR_DYNIRQS; i++) {
++ irq_bindcount[dynirq_to_irq(i)] = 0;
++
++ irq_desc[dynirq_to_irq(i)].status = IRQ_DISABLED;
++ irq_desc[dynirq_to_irq(i)].action = NULL;
++ irq_desc[dynirq_to_irq(i)].depth = 1;
++ irq_desc[dynirq_to_irq(i)].chip = &dynirq_type;
++ }
++
++ /* Phys IRQ space is statically bound (1:1 mapping). Nail refcnts. */
++ for (i = 0; i < NR_PIRQS; i++) {
++ irq_bindcount[pirq_to_irq(i)] = 1;
++
++#ifdef RTC_IRQ
++ /* If not domain 0, force our RTC driver to fail its probe. */
++ if ((i == RTC_IRQ) && !is_initial_xendomain())
++ continue;
++#endif
++
++ irq_desc[pirq_to_irq(i)].status = IRQ_DISABLED;
++ irq_desc[pirq_to_irq(i)].action = NULL;
++ irq_desc[pirq_to_irq(i)].depth = 1;
++ irq_desc[pirq_to_irq(i)].chip = &pirq_type;
++ }
++}
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/core/features.c 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,34 @@
++/******************************************************************************
++ * features.c
++ *
++ * Xen feature flags.
++ *
++ * Copyright (c) 2006, Ian Campbell, XenSource Inc.
++ */
++#include <linux/types.h>
++#include <linux/cache.h>
++#include <linux/module.h>
++#include <asm/hypervisor.h>
++#include <xen/features.h>
++
++#ifdef HAVE_XEN_PLATFORM_COMPAT_H
++#include <xen/platform-compat.h>
++#endif
++
++u8 xen_features[XENFEAT_NR_SUBMAPS * 32] __read_mostly;
++/* Not a GPL symbol: used in ubiquitous macros, so too restrictive. */
++EXPORT_SYMBOL(xen_features);
++
++void setup_xen_features(void)
++{
++ xen_feature_info_t fi;
++ int i, j;
++
++ for (i = 0; i < XENFEAT_NR_SUBMAPS; i++) {
++ fi.submap_idx = i;
++ if (HYPERVISOR_xen_version(XENVER_get_features, &fi) < 0)
++ break;
++ for (j=0; j<32; j++)
++ xen_features[i*32+j] = !!(fi.submap & 1<<j);
++ }
++}
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/core/gnttab.c 2007-08-27 14:02:10.000000000 -0400
+@@ -0,0 +1,631 @@
++/******************************************************************************
++ * gnttab.c
++ *
++ * Granting foreign access to our memory reservation.
++ *
++ * Copyright (c) 2005-2006, Christopher Clark
++ * Copyright (c) 2004-2005, K A Fraser
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <linux/module.h>
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <xen/interface/xen.h>
++#include <xen/gnttab.h>
++#include <asm/pgtable.h>
++#include <asm/uaccess.h>
++#include <asm/synch_bitops.h>
++#include <asm/io.h>
++#include <xen/interface/memory.h>
++#include <xen/driver_util.h>
++
++#ifdef HAVE_XEN_PLATFORM_COMPAT_H
++#include <xen/platform-compat.h>
++#endif
++
++/* External tools reserve first few grant table entries. */
++#define NR_RESERVED_ENTRIES 8
++#define GNTTAB_LIST_END 0xffffffff
++#define GREFS_PER_GRANT_FRAME (PAGE_SIZE / sizeof(grant_entry_t))
++
++static grant_ref_t **gnttab_list;
++static unsigned int nr_grant_frames;
++static unsigned int boot_max_nr_grant_frames;
++static int gnttab_free_count;
++static grant_ref_t gnttab_free_head;
++static DEFINE_SPINLOCK(gnttab_list_lock);
++
++static struct grant_entry *shared;
++
++static struct gnttab_free_callback *gnttab_free_callback_list;
++
++static int gnttab_expand(unsigned int req_entries);
++
++#define RPP (PAGE_SIZE / sizeof(grant_ref_t))
++#define gnttab_entry(entry) (gnttab_list[(entry) / RPP][(entry) % RPP])
++
++static int get_free_entries(int count)
++{
++ unsigned long flags;
++ int ref, rc;
++ grant_ref_t head;
++
++ spin_lock_irqsave(&gnttab_list_lock, flags);
++
++ if ((gnttab_free_count < count) &&
++ ((rc = gnttab_expand(count - gnttab_free_count)) < 0)) {
++ spin_unlock_irqrestore(&gnttab_list_lock, flags);
++ return rc;
++ }
++
++ ref = head = gnttab_free_head;
++ gnttab_free_count -= count;
++ while (count-- > 1)
++ head = gnttab_entry(head);
++ gnttab_free_head = gnttab_entry(head);
++ gnttab_entry(head) = GNTTAB_LIST_END;
++
++ spin_unlock_irqrestore(&gnttab_list_lock, flags);
++
++ return ref;
++}
++
++#define get_free_entry() get_free_entries(1)
++
++static void do_free_callbacks(void)
++{
++ struct gnttab_free_callback *callback, *next;
++
++ callback = gnttab_free_callback_list;
++ gnttab_free_callback_list = NULL;
++
++ while (callback != NULL) {
++ next = callback->next;
++ if (gnttab_free_count >= callback->count) {
++ callback->next = NULL;
++ callback->fn(callback->arg);
++ } else {
++ callback->next = gnttab_free_callback_list;
++ gnttab_free_callback_list = callback;
++ }
++ callback = next;
++ }
++}
++
++static inline void check_free_callbacks(void)
++{
++ if (unlikely(gnttab_free_callback_list))
++ do_free_callbacks();
++}
++
++static void put_free_entry(grant_ref_t ref)
++{
++ unsigned long flags;
++ spin_lock_irqsave(&gnttab_list_lock, flags);
++ gnttab_entry(ref) = gnttab_free_head;
++ gnttab_free_head = ref;
++ gnttab_free_count++;
++ check_free_callbacks();
++ spin_unlock_irqrestore(&gnttab_list_lock, flags);
++}
++
++/*
++ * Public grant-issuing interface functions
++ */
++
++int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
++ int readonly)
++{
++ int ref;
++
++ if (unlikely((ref = get_free_entry()) < 0))
++ return -ENOSPC;
++
++ shared[ref].frame = frame;
++ shared[ref].domid = domid;
++ wmb();
++ shared[ref].flags = GTF_permit_access | (readonly ? GTF_readonly : 0);
++
++ return ref;
++}
++EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access);
++
++void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
++ unsigned long frame, int readonly)
++{
++ shared[ref].frame = frame;
++ shared[ref].domid = domid;
++ wmb();
++ shared[ref].flags = GTF_permit_access | (readonly ? GTF_readonly : 0);
++}
++EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_ref);
++
++
++int gnttab_query_foreign_access(grant_ref_t ref)
++{
++ u16 nflags;
++
++ nflags = shared[ref].flags;
++
++ return (nflags & (GTF_reading|GTF_writing));
++}
++EXPORT_SYMBOL_GPL(gnttab_query_foreign_access);
++
++int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
++{
++ u16 flags, nflags;
++
++ nflags = shared[ref].flags;
++ do {
++ if ((flags = nflags) & (GTF_reading|GTF_writing)) {
++ printk(KERN_ALERT "WARNING: g.e. still in use!\n");
++ return 0;
++ }
++ } while ((nflags = synch_cmpxchg_subword(&shared[ref].flags, flags, 0)) !=
++ flags);
++
++ return 1;
++}
++EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref);
++
++void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
++ unsigned long page)
++{
++ if (gnttab_end_foreign_access_ref(ref, readonly)) {
++ put_free_entry(ref);
++ if (page != 0)
++ free_page(page);
++ } else {
++ /* XXX This needs to be fixed so that the ref and page are
++ placed on a list to be freed up later. */
++ printk(KERN_WARNING
++ "WARNING: leaking g.e. and page still in use!\n");
++ }
++}
++EXPORT_SYMBOL_GPL(gnttab_end_foreign_access);
++
++int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn)
++{
++ int ref;
++
++ if (unlikely((ref = get_free_entry()) < 0))
++ return -ENOSPC;
++ gnttab_grant_foreign_transfer_ref(ref, domid, pfn);
++
++ return ref;
++}
++EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer);
++
++void gnttab_grant_foreign_transfer_ref(grant_ref_t ref, domid_t domid,
++ unsigned long pfn)
++{
++ shared[ref].frame = pfn;
++ shared[ref].domid = domid;
++ wmb();
++ shared[ref].flags = GTF_accept_transfer;
++}
++EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer_ref);
++
++unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref)
++{
++ unsigned long frame;
++ u16 flags;
++
++ /*
++ * If a transfer is not even yet started, try to reclaim the grant
++ * reference and return failure (== 0).
++ */
++ while (!((flags = shared[ref].flags) & GTF_transfer_committed)) {
++ if (synch_cmpxchg_subword(&shared[ref].flags, flags, 0) == flags)
++ return 0;
++ cpu_relax();
++ }
++
++ /* If a transfer is in progress then wait until it is completed. */
++ while (!(flags & GTF_transfer_completed)) {
++ flags = shared[ref].flags;
++ cpu_relax();
++ }
++
++ /* Read the frame number /after/ reading completion status. */
++ rmb();
++ frame = shared[ref].frame;
++ BUG_ON(frame == 0);
++
++ return frame;
++}
++EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer_ref);
++
++unsigned long gnttab_end_foreign_transfer(grant_ref_t ref)
++{
++ unsigned long frame = gnttab_end_foreign_transfer_ref(ref);
++ put_free_entry(ref);
++ return frame;
++}
++EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer);
++
++void gnttab_free_grant_reference(grant_ref_t ref)
++{
++ put_free_entry(ref);
++}
++EXPORT_SYMBOL_GPL(gnttab_free_grant_reference);
++
++void gnttab_free_grant_references(grant_ref_t head)
++{
++ grant_ref_t ref;
++ unsigned long flags;
++ int count = 1;
++ if (head == GNTTAB_LIST_END)
++ return;
++ spin_lock_irqsave(&gnttab_list_lock, flags);
++ ref = head;
++ while (gnttab_entry(ref) != GNTTAB_LIST_END) {
++ ref = gnttab_entry(ref);
++ count++;
++ }
++ gnttab_entry(ref) = gnttab_free_head;
++ gnttab_free_head = head;
++ gnttab_free_count += count;
++ check_free_callbacks();
++ spin_unlock_irqrestore(&gnttab_list_lock, flags);
++}
++EXPORT_SYMBOL_GPL(gnttab_free_grant_references);
++
++int gnttab_alloc_grant_references(u16 count, grant_ref_t *head)
++{
++ int h = get_free_entries(count);
++
++ if (h < 0)
++ return -ENOSPC;
++
++ *head = h;
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(gnttab_alloc_grant_references);
++
++int gnttab_empty_grant_references(const grant_ref_t *private_head)
++{
++ return (*private_head == GNTTAB_LIST_END);
++}
++EXPORT_SYMBOL_GPL(gnttab_empty_grant_references);
++
++int gnttab_claim_grant_reference(grant_ref_t *private_head)
++{
++ grant_ref_t g = *private_head;
++ if (unlikely(g == GNTTAB_LIST_END))
++ return -ENOSPC;
++ *private_head = gnttab_entry(g);
++ return g;
++}
++EXPORT_SYMBOL_GPL(gnttab_claim_grant_reference);
++
++void gnttab_release_grant_reference(grant_ref_t *private_head,
++ grant_ref_t release)
++{
++ gnttab_entry(release) = *private_head;
++ *private_head = release;
++}
++EXPORT_SYMBOL_GPL(gnttab_release_grant_reference);
++
++void gnttab_request_free_callback(struct gnttab_free_callback *callback,
++ void (*fn)(void *), void *arg, u16 count)
++{
++ unsigned long flags;
++ spin_lock_irqsave(&gnttab_list_lock, flags);
++ if (callback->next)
++ goto out;
++ callback->fn = fn;
++ callback->arg = arg;
++ callback->count = count;
++ callback->next = gnttab_free_callback_list;
++ gnttab_free_callback_list = callback;
++ check_free_callbacks();
++out:
++ spin_unlock_irqrestore(&gnttab_list_lock, flags);
++}
++EXPORT_SYMBOL_GPL(gnttab_request_free_callback);
++
++void gnttab_cancel_free_callback(struct gnttab_free_callback *callback)
++{
++ struct gnttab_free_callback **pcb;
++ unsigned long flags;
++
++ spin_lock_irqsave(&gnttab_list_lock, flags);
++ for (pcb = &gnttab_free_callback_list; *pcb; pcb = &(*pcb)->next) {
++ if (*pcb == callback) {
++ *pcb = callback->next;
++ break;
++ }
++ }
++ spin_unlock_irqrestore(&gnttab_list_lock, flags);
++}
++EXPORT_SYMBOL_GPL(gnttab_cancel_free_callback);
++
++static int grow_gnttab_list(unsigned int more_frames)
++{
++ unsigned int new_nr_grant_frames, extra_entries, i;
++
++ new_nr_grant_frames = nr_grant_frames + more_frames;
++ extra_entries = more_frames * GREFS_PER_GRANT_FRAME;
++
++ for (i = nr_grant_frames; i < new_nr_grant_frames; i++)
++ {
++ gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC);
++ if (!gnttab_list[i])
++ goto grow_nomem;
++ }
++
++
++ for (i = GREFS_PER_GRANT_FRAME * nr_grant_frames;
++ i < GREFS_PER_GRANT_FRAME * new_nr_grant_frames - 1; i++)
++ gnttab_entry(i) = i + 1;
++
++ gnttab_entry(i) = gnttab_free_head;
++ gnttab_free_head = GREFS_PER_GRANT_FRAME * nr_grant_frames;
++ gnttab_free_count += extra_entries;
++
++ nr_grant_frames = new_nr_grant_frames;
++
++ check_free_callbacks();
++
++ return 0;
++
++grow_nomem:
++ for ( ; i >= nr_grant_frames; i--)
++ free_page((unsigned long) gnttab_list[i]);
++ return -ENOMEM;
++}
++
++static unsigned int __max_nr_grant_frames(void)
++{
++ struct gnttab_query_size query;
++ int rc;
++
++ query.dom = DOMID_SELF;
++
++ rc = HYPERVISOR_grant_table_op(GNTTABOP_query_size, &query, 1);
++ if ((rc < 0) || (query.status != GNTST_okay))
++ return 4; /* Legacy max supported number of frames */
++
++ return query.max_nr_frames;
++}
++
++static inline unsigned int max_nr_grant_frames(void)
++{
++ unsigned int xen_max = __max_nr_grant_frames();
++
++ if (xen_max > boot_max_nr_grant_frames)
++ return boot_max_nr_grant_frames;
++ return xen_max;
++}
++
++#ifdef CONFIG_XEN
++
++#ifndef __ia64__
++static int map_pte_fn(pte_t *pte, struct page *pmd_page,
++ unsigned long addr, void *data)
++{
++ unsigned long **frames = (unsigned long **)data;
++
++ set_pte_at(&init_mm, addr, pte, pfn_pte_ma((*frames)[0], PAGE_KERNEL));
++ (*frames)++;
++ return 0;
++}
++
++static int unmap_pte_fn(pte_t *pte, struct page *pmd_page,
++ unsigned long addr, void *data)
++{
++
++ set_pte_at(&init_mm, addr, pte, __pte(0));
++ return 0;
++}
++#endif
++
++static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
++{
++ struct gnttab_setup_table setup;
++ unsigned long *frames;
++ unsigned int nr_gframes = end_idx + 1;
++ int rc;
++
++ frames = kmalloc(nr_gframes * sizeof(unsigned long), GFP_ATOMIC);
++ if (!frames)
++ return -ENOMEM;
++
++ setup.dom = DOMID_SELF;
++ setup.nr_frames = nr_gframes;
++ set_xen_guest_handle(setup.frame_list, frames);
++
++ rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1);
++ if (rc == -ENOSYS) {
++ kfree(frames);
++ return -ENOSYS;
++ }
++
++ BUG_ON(rc || setup.status);
++
++#ifndef __ia64__
++ if (shared == NULL) {
++ struct vm_struct *area;
++ area = alloc_vm_area(PAGE_SIZE * max_nr_grant_frames());
++ BUG_ON(area == NULL);
++ shared = area->addr;
++ }
++ rc = apply_to_page_range(&init_mm, (unsigned long)shared,
++ PAGE_SIZE * nr_gframes,
++ map_pte_fn, &frames);
++ BUG_ON(rc);
++ frames -= nr_gframes; /* adjust after map_pte_fn() */
++#else
++ shared = __va(frames[0] << PAGE_SHIFT);
++#endif
++
++ kfree(frames);
++
++ return 0;
++}
++
++int gnttab_resume(void)
++{
++ if (max_nr_grant_frames() < nr_grant_frames)
++ return -ENOSYS;
++ return gnttab_map(0, nr_grant_frames - 1);
++}
++
++int gnttab_suspend(void)
++{
++#ifndef __ia64__
++ apply_to_page_range(&init_mm, (unsigned long)shared,
++ PAGE_SIZE * nr_grant_frames,
++ unmap_pte_fn, NULL);
++#endif
++ return 0;
++}
++
++#else /* !CONFIG_XEN */
++
++#include <platform-pci.h>
++
++static unsigned long resume_frames;
++
++static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
++{
++ struct xen_add_to_physmap xatp;
++ unsigned int i = end_idx;
++
++ /* Loop backwards, so that the first hypercall has the largest index,
++ * ensuring that the table will grow only once.
++ */
++ do {
++ xatp.domid = DOMID_SELF;
++ xatp.idx = i;
++ xatp.space = XENMAPSPACE_grant_table;
++ xatp.gpfn = (resume_frames >> PAGE_SHIFT) + i;
++ if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))
++ BUG();
++ } while (i-- > start_idx);
++
++ return 0;
++}
++
++int gnttab_resume(void)
++{
++ unsigned int max_nr_gframes, nr_gframes;
++
++ nr_gframes = nr_grant_frames;
++ max_nr_gframes = max_nr_grant_frames();
++ if (max_nr_gframes < nr_gframes)
++ return -ENOSYS;
++
++ if (!resume_frames) {
++ resume_frames = alloc_xen_mmio(PAGE_SIZE * max_nr_gframes);
++ shared = ioremap(resume_frames, PAGE_SIZE * max_nr_gframes);
++ if (shared == NULL) {
++ printk("error to ioremap gnttab share frames\n");
++ return -1;
++ }
++ }
++
++ gnttab_map(0, nr_gframes - 1);
++
++ return 0;
++}
++
++#endif /* !CONFIG_XEN */
++
++static int gnttab_expand(unsigned int req_entries)
++{
++ int rc;
++ unsigned int cur, extra;
++
++ cur = nr_grant_frames;
++ extra = ((req_entries + (GREFS_PER_GRANT_FRAME-1)) /
++ GREFS_PER_GRANT_FRAME);
++ if (cur + extra > max_nr_grant_frames())
++ return -ENOSPC;
++
++ if ((rc = gnttab_map(cur, cur + extra - 1)) == 0)
++ rc = grow_gnttab_list(extra);
++
++ return rc;
++}
++
++int __devinit gnttab_init(void)
++{
++ int i;
++ unsigned int max_nr_glist_frames;
++ unsigned int nr_init_grefs;
++
++ if (!is_running_on_xen())
++ return -ENODEV;
++
++ nr_grant_frames = 1;
++ boot_max_nr_grant_frames = __max_nr_grant_frames();
++
++ /* Determine the maximum number of frames required for the
++ * grant reference free list on the current hypervisor.
++ */
++ max_nr_glist_frames = (boot_max_nr_grant_frames *
++ GREFS_PER_GRANT_FRAME /
++ (PAGE_SIZE / sizeof(grant_ref_t)));
++
++ gnttab_list = kmalloc(max_nr_glist_frames * sizeof(grant_ref_t *),
++ GFP_KERNEL);
++ if (gnttab_list == NULL)
++ return -ENOMEM;
++
++ for (i = 0; i < nr_grant_frames; i++) {
++ gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL);
++ if (gnttab_list[i] == NULL)
++ goto ini_nomem;
++ }
++
++ if (gnttab_resume() < 0)
++ return -ENODEV;
++
++ nr_init_grefs = nr_grant_frames * GREFS_PER_GRANT_FRAME;
++
++ for (i = NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++)
++ gnttab_entry(i) = i + 1;
++
++ gnttab_entry(nr_init_grefs - 1) = GNTTAB_LIST_END;
++ gnttab_free_count = nr_init_grefs - NR_RESERVED_ENTRIES;
++ gnttab_free_head = NR_RESERVED_ENTRIES;
++
++ return 0;
++
++ ini_nomem:
++ for (i--; i >= 0; i--)
++ free_page((unsigned long)gnttab_list[i]);
++ kfree(gnttab_list);
++ return -ENOMEM;
++}
++
++#ifdef CONFIG_XEN
++core_initcall(gnttab_init);
++#endif
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/core/hypervisor_sysfs.c 2007-08-27 14:02:04.000000000 -0400
+@@ -0,0 +1,59 @@
++/*
++ * copyright (c) 2006 IBM Corporation
++ * Authored by: Mike D. Day <ncmike@us.ibm.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/kobject.h>
++#include <xen/hypervisor_sysfs.h>
++
++decl_subsys(hypervisor, NULL, NULL);
++
++static ssize_t hyp_sysfs_show(struct kobject *kobj,
++ struct attribute *attr,
++ char *buffer)
++{
++ struct hyp_sysfs_attr *hyp_attr;
++ hyp_attr = container_of(attr, struct hyp_sysfs_attr, attr);
++ if (hyp_attr->show)
++ return hyp_attr->show(hyp_attr, buffer);
++ return 0;
++}
++
++static ssize_t hyp_sysfs_store(struct kobject *kobj,
++ struct attribute *attr,
++ const char *buffer,
++ size_t len)
++{
++ struct hyp_sysfs_attr *hyp_attr;
++ hyp_attr = container_of(attr, struct hyp_sysfs_attr, attr);
++ if (hyp_attr->store)
++ return hyp_attr->store(hyp_attr, buffer, len);
++ return 0;
++}
++
++struct sysfs_ops hyp_sysfs_ops = {
++ .show = hyp_sysfs_show,
++ .store = hyp_sysfs_store,
++};
++
++static struct kobj_type hyp_sysfs_kobj_type = {
++ .sysfs_ops = &hyp_sysfs_ops,
++};
++
++static int __init hypervisor_subsys_init(void)
++{
++ if (!is_running_on_xen())
++ return -ENODEV;
++
++ hypervisor_subsys.kset.kobj.ktype = &hyp_sysfs_kobj_type;
++ return subsystem_register(&hypervisor_subsys);
++}
++
++device_initcall(hypervisor_subsys_init);
++EXPORT_SYMBOL_GPL(hypervisor_subsys);
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/core/machine_kexec.c 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,189 @@
++/*
++ * drivers/xen/core/machine_kexec.c
++ * handle transition of Linux booting another kernel
++ */
++
++#include <linux/kexec.h>
++#include <xen/interface/kexec.h>
++#include <linux/mm.h>
++#include <linux/bootmem.h>
++
++extern void machine_kexec_setup_load_arg(xen_kexec_image_t *xki,
++ struct kimage *image);
++
++int xen_max_nr_phys_cpus;
++struct resource xen_hypervisor_res;
++struct resource *xen_phys_cpus;
++
++void xen_machine_kexec_setup_resources(void)
++{
++ xen_kexec_range_t range;
++ struct resource *res;
++ int k = 0;
++
++ if (!is_initial_xendomain())
++ return;
++
++ /* determine maximum number of physical cpus */
++
++ while (1) {
++ memset(&range, 0, sizeof(range));
++ range.range = KEXEC_RANGE_MA_CPU;
++ range.nr = k;
++
++ if(HYPERVISOR_kexec_op(KEXEC_CMD_kexec_get_range, &range))
++ break;
++
++ k++;
++ }
++
++ if (k == 0)
++ return;
++
++ xen_max_nr_phys_cpus = k;
++
++ /* allocate xen_phys_cpus */
++
++ xen_phys_cpus = alloc_bootmem_low(k * sizeof(struct resource));
++ BUG_ON(xen_phys_cpus == NULL);
++
++ /* fill in xen_phys_cpus with per-cpu crash note information */
++
++ for (k = 0; k < xen_max_nr_phys_cpus; k++) {
++ memset(&range, 0, sizeof(range));
++ range.range = KEXEC_RANGE_MA_CPU;
++ range.nr = k;
++
++ if (HYPERVISOR_kexec_op(KEXEC_CMD_kexec_get_range, &range))
++ goto err;
++
++ res = xen_phys_cpus + k;
++
++ memset(res, 0, sizeof(*res));
++ res->name = "Crash note";
++ res->start = range.start;
++ res->end = range.start + range.size - 1;
++ res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
++ }
++
++ /* fill in xen_hypervisor_res with hypervisor machine address range */
++
++ memset(&range, 0, sizeof(range));
++ range.range = KEXEC_RANGE_MA_XEN;
++
++ if (HYPERVISOR_kexec_op(KEXEC_CMD_kexec_get_range, &range))
++ goto err;
++
++ xen_hypervisor_res.name = "Hypervisor code and data";
++ xen_hypervisor_res.start = range.start;
++ xen_hypervisor_res.end = range.start + range.size - 1;
++ xen_hypervisor_res.flags = IORESOURCE_BUSY | IORESOURCE_MEM;
++
++ /* fill in crashk_res if range is reserved by hypervisor */
++
++ memset(&range, 0, sizeof(range));
++ range.range = KEXEC_RANGE_MA_CRASH;
++
++ if (HYPERVISOR_kexec_op(KEXEC_CMD_kexec_get_range, &range))
++ return;
++
++ if (range.size) {
++ crashk_res.start = range.start;
++ crashk_res.end = range.start + range.size - 1;
++ }
++
++ return;
++
++ err:
++ /*
++ * It isn't possible to free xen_phys_cpus this early in the
++ * boot. Failure at this stage is unexpected and the amount of
++ * memory is small therefore we tolerate the potential leak.
++ */
++ xen_max_nr_phys_cpus = 0;
++ return;
++}
++
++void xen_machine_kexec_register_resources(struct resource *res)
++{
++ int k;
++
++ request_resource(res, &xen_hypervisor_res);
++
++ for (k = 0; k < xen_max_nr_phys_cpus; k++)
++ request_resource(&xen_hypervisor_res, xen_phys_cpus + k);
++
++}
++
++static void setup_load_arg(xen_kexec_image_t *xki, struct kimage *image)
++{
++ machine_kexec_setup_load_arg(xki, image);
++
++ xki->indirection_page = image->head;
++ xki->start_address = image->start;
++}
++
++/*
++ * Load the image into xen so xen can kdump itself
++ * This might have been done in prepare, but prepare
++ * is currently called too early. It might make sense
++ * to move prepare, but for now, just add an extra hook.
++ */
++int xen_machine_kexec_load(struct kimage *image)
++{
++ xen_kexec_load_t xkl;
++
++ memset(&xkl, 0, sizeof(xkl));
++ xkl.type = image->type;
++ setup_load_arg(&xkl.image, image);
++ return HYPERVISOR_kexec_op(KEXEC_CMD_kexec_load, &xkl);
++}
++
++/*
++ * Unload the image that was stored by machine_kexec_load()
++ * This might have been done in machine_kexec_cleanup() but it
++ * is called too late, and its possible xen could try and kdump
++ * using resources that have been freed.
++ */
++void xen_machine_kexec_unload(struct kimage *image)
++{
++ xen_kexec_load_t xkl;
++
++ memset(&xkl, 0, sizeof(xkl));
++ xkl.type = image->type;
++ HYPERVISOR_kexec_op(KEXEC_CMD_kexec_unload, &xkl);
++}
++
++/*
++ * Do not allocate memory (or fail in any way) in machine_kexec().
++ * We are past the point of no return, committed to rebooting now.
++ *
++ * This has the hypervisor move to the prefered reboot CPU,
++ * stop all CPUs and kexec. That is it combines machine_shutdown()
++ * and machine_kexec() in Linux kexec terms.
++ */
++NORET_TYPE void machine_kexec(struct kimage *image)
++{
++ xen_kexec_exec_t xke;
++
++ memset(&xke, 0, sizeof(xke));
++ xke.type = image->type;
++ HYPERVISOR_kexec_op(KEXEC_CMD_kexec, &xke);
++ panic("KEXEC_CMD_kexec hypercall should not return\n");
++}
++
++void machine_shutdown(void)
++{
++ /* do nothing */
++}
++
++
++/*
++ * Local variables:
++ * c-file-style: "linux"
++ * indent-tabs-mode: t
++ * c-indent-level: 8
++ * c-basic-offset: 8
++ * tab-width: 8
++ * End:
++ */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/core/machine_reboot.c 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,241 @@
++#include <linux/version.h>
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/unistd.h>
++#include <linux/module.h>
++#include <linux/reboot.h>
++#include <linux/sysrq.h>
++#include <linux/stringify.h>
++#include <linux/stop_machine.h>
++#include <asm/irq.h>
++#include <asm/mmu_context.h>
++#include <xen/evtchn.h>
++#include <asm/hypervisor.h>
++#include <xen/xenbus.h>
++#include <linux/cpu.h>
++#include <linux/kthread.h>
++#include <xen/gnttab.h>
++#include <xen/xencons.h>
++#include <xen/cpu_hotplug.h>
++#include <xen/interface/vcpu.h>
++
++#if defined(__i386__) || defined(__x86_64__)
++
++/*
++ * Power off function, if any
++ */
++void (*pm_power_off)(void);
++EXPORT_SYMBOL(pm_power_off);
++
++void machine_emergency_restart(void)
++{
++ /* We really want to get pending console data out before we die. */
++ xencons_force_flush();
++ HYPERVISOR_shutdown(SHUTDOWN_reboot);
++}
++
++void machine_restart(char * __unused)
++{
++ machine_emergency_restart();
++}
++
++void machine_halt(void)
++{
++ machine_power_off();
++}
++
++void machine_power_off(void)
++{
++ /* We really want to get pending console data out before we die. */
++ xencons_force_flush();
++ if (pm_power_off)
++ pm_power_off();
++ HYPERVISOR_shutdown(SHUTDOWN_poweroff);
++}
++
++int reboot_thru_bios = 0; /* for dmi_scan.c */
++EXPORT_SYMBOL(machine_restart);
++EXPORT_SYMBOL(machine_halt);
++EXPORT_SYMBOL(machine_power_off);
++
++static void pre_suspend(void)
++{
++ HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
++ HYPERVISOR_update_va_mapping(fix_to_virt(FIX_SHARED_INFO),
++ __pte_ma(0), 0);
++
++ xen_start_info->store_mfn = mfn_to_pfn(xen_start_info->store_mfn);
++ xen_start_info->console.domU.mfn =
++ mfn_to_pfn(xen_start_info->console.domU.mfn);
++}
++
++static void post_suspend(int suspend_cancelled)
++{
++ int i, j, k, fpp;
++ unsigned long shinfo_mfn;
++ extern unsigned long max_pfn;
++ extern unsigned long *pfn_to_mfn_frame_list_list;
++ extern unsigned long *pfn_to_mfn_frame_list[];
++
++ if (suspend_cancelled) {
++ xen_start_info->store_mfn =
++ pfn_to_mfn(xen_start_info->store_mfn);
++ xen_start_info->console.domU.mfn =
++ pfn_to_mfn(xen_start_info->console.domU.mfn);
++ } else {
++#ifdef CONFIG_SMP
++ cpu_initialized_map = cpu_online_map;
++#endif
++ }
++
++ shinfo_mfn = xen_start_info->shared_info >> PAGE_SHIFT;
++ HYPERVISOR_update_va_mapping(fix_to_virt(FIX_SHARED_INFO),
++ pfn_pte_ma(shinfo_mfn, PAGE_KERNEL), 0);
++ HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
++
++ memset(empty_zero_page, 0, PAGE_SIZE);
++
++ fpp = PAGE_SIZE/sizeof(unsigned long);
++ for (i = 0, j = 0, k = -1; i < max_pfn; i += fpp, j++) {
++ if ((j % fpp) == 0) {
++ k++;
++ pfn_to_mfn_frame_list_list[k] =
++ virt_to_mfn(pfn_to_mfn_frame_list[k]);
++ j = 0;
++ }
++ pfn_to_mfn_frame_list[k][j] =
++ virt_to_mfn(&phys_to_machine_mapping[i]);
++ }
++ HYPERVISOR_shared_info->arch.max_pfn = max_pfn;
++ HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
++ virt_to_mfn(pfn_to_mfn_frame_list_list);
++}
++
++#else /* !(defined(__i386__) || defined(__x86_64__)) */
++
++#ifndef HAVE_XEN_PRE_SUSPEND
++#define xen_pre_suspend() ((void)0)
++#endif
++
++#ifndef HAVE_XEN_POST_SUSPEND
++#define xen_post_suspend(x) ((void)0)
++#endif
++
++#define switch_idle_mm() ((void)0)
++#define mm_pin_all() ((void)0)
++#define pre_suspend() xen_pre_suspend()
++#define post_suspend(x) xen_post_suspend(x)
++
++#endif
++
++static int take_machine_down(void *p_fast_suspend)
++{
++ int fast_suspend = *(int *)p_fast_suspend;
++ int suspend_cancelled, err;
++ extern void time_resume(void);
++
++ if (fast_suspend) {
++ BUG_ON(!irqs_disabled());
++ } else {
++ BUG_ON(irqs_disabled());
++
++ for (;;) {
++ err = smp_suspend();
++ if (err)
++ return err;
++
++ xenbus_suspend();
++ preempt_disable();
++
++ if (num_online_cpus() == 1)
++ break;
++
++ preempt_enable();
++ xenbus_suspend_cancel();
++ }
++
++ local_irq_disable();
++ }
++
++ mm_pin_all();
++ gnttab_suspend();
++ pre_suspend();
++
++ /*
++ * This hypercall returns 1 if suspend was cancelled or the domain was
++ * merely checkpointed, and 0 if it is resuming in a new domain.
++ */
++ suspend_cancelled = HYPERVISOR_suspend(virt_to_mfn(xen_start_info));
++
++ post_suspend(suspend_cancelled);
++ gnttab_resume();
++ if (!suspend_cancelled) {
++ irq_resume();
++#ifdef __x86_64__
++ /*
++ * Older versions of Xen do not save/restore the user %cr3.
++ * We do it here just in case, but there's no need if we are
++ * in fast-suspend mode as that implies a new enough Xen.
++ */
++ if (!fast_suspend) {
++ struct mmuext_op op;
++ op.cmd = MMUEXT_NEW_USER_BASEPTR;
++ op.arg1.mfn = pfn_to_mfn(__pa(__user_pgd(
++ current->active_mm->pgd)) >> PAGE_SHIFT);
++ if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
++ BUG();
++ }
++#endif
++ }
++ time_resume();
++
++ if (!fast_suspend)
++ local_irq_enable();
++
++ return suspend_cancelled;
++}
++
++int __xen_suspend(int fast_suspend)
++{
++ int err, suspend_cancelled;
++
++ BUG_ON(smp_processor_id() != 0);
++ BUG_ON(in_interrupt());
++
++#if defined(__i386__) || defined(__x86_64__)
++ if (xen_feature(XENFEAT_auto_translated_physmap)) {
++ printk(KERN_WARNING "Cannot suspend in "
++ "auto_translated_physmap mode.\n");
++ return -EOPNOTSUPP;
++ }
++#endif
++
++ /* If we are definitely UP then 'slow mode' is actually faster. */
++ if (num_possible_cpus() == 1)
++ fast_suspend = 0;
++
++ if (fast_suspend) {
++ xenbus_suspend();
++ err = stop_machine_run(take_machine_down, &fast_suspend, 0);
++ if (err < 0)
++ xenbus_suspend_cancel();
++ } else {
++ err = take_machine_down(&fast_suspend);
++ }
++
++ if (err < 0)
++ return err;
++
++ suspend_cancelled = err;
++ if (!suspend_cancelled) {
++ xencons_resume();
++ xenbus_resume();
++ } else {
++ xenbus_suspend_cancel();
++ }
++
++ if (!fast_suspend)
++ smp_resume();
++
++ return 0;
++}
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/core/reboot.c 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,249 @@
++#define __KERNEL_SYSCALLS__
++#include <linux/version.h>
++#include <linux/kernel.h>
++#include <linux/unistd.h>
++#include <linux/module.h>
++#include <linux/reboot.h>
++#include <linux/sysrq.h>
++#include <asm/hypervisor.h>
++#include <xen/xenbus.h>
++#include <linux/kthread.h>
++
++#ifdef HAVE_XEN_PLATFORM_COMPAT_H
++#include <xen/platform-compat.h>
++#endif
++
++MODULE_LICENSE("Dual BSD/GPL");
++
++#define SHUTDOWN_INVALID -1
++#define SHUTDOWN_POWEROFF 0
++#define SHUTDOWN_SUSPEND 2
++/* Code 3 is SHUTDOWN_CRASH, which we don't use because the domain can only
++ * report a crash, not be instructed to crash!
++ * HALT is the same as POWEROFF, as far as we're concerned. The tools use
++ * the distinction when we return the reason code to them.
++ */
++#define SHUTDOWN_HALT 4
++
++/* Ignore multiple shutdown requests. */
++static int shutting_down = SHUTDOWN_INVALID;
++
++/* Can we leave APs online when we suspend? */
++static int fast_suspend;
++
++static void __shutdown_handler(void *unused);
++static DECLARE_WORK(shutdown_work, __shutdown_handler, NULL);
++
++int __xen_suspend(int fast_suspend);
++
++static int shutdown_process(void *__unused)
++{
++ static char *envp[] = { "HOME=/", "TERM=linux",
++ "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL };
++ static char *poweroff_argv[] = { "/sbin/poweroff", NULL };
++
++ extern asmlinkage long sys_reboot(int magic1, int magic2,
++ unsigned int cmd, void *arg);
++
++ if ((shutting_down == SHUTDOWN_POWEROFF) ||
++ (shutting_down == SHUTDOWN_HALT)) {
++ if (call_usermodehelper("/sbin/poweroff", poweroff_argv,
++ envp, 0) < 0) {
++#ifdef CONFIG_XEN
++ sys_reboot(LINUX_REBOOT_MAGIC1,
++ LINUX_REBOOT_MAGIC2,
++ LINUX_REBOOT_CMD_POWER_OFF,
++ NULL);
++#endif /* CONFIG_XEN */
++ }
++ }
++
++ shutting_down = SHUTDOWN_INVALID; /* could try again */
++
++ return 0;
++}
++
++static int xen_suspend(void *__unused)
++{
++ int err = __xen_suspend(fast_suspend);
++ if (err)
++ printk(KERN_ERR "Xen suspend failed (%d)\n", err);
++ shutting_down = SHUTDOWN_INVALID;
++ return 0;
++}
++
++static int kthread_create_on_cpu(int (*f)(void *arg),
++ void *arg,
++ const char *name,
++ int cpu)
++{
++ struct task_struct *p;
++ p = kthread_create(f, arg, name);
++ if (IS_ERR(p))
++ return PTR_ERR(p);
++ kthread_bind(p, cpu);
++ wake_up_process(p);
++ return 0;
++}
++
++static void __shutdown_handler(void *unused)
++{
++ int err;
++
++ if (shutting_down != SHUTDOWN_SUSPEND)
++ err = kernel_thread(shutdown_process, NULL,
++ CLONE_FS | CLONE_FILES);
++ else
++ err = kthread_create_on_cpu(xen_suspend, NULL, "suspend", 0);
++
++ if (err < 0) {
++ printk(KERN_WARNING "Error creating shutdown process (%d): "
++ "retrying...\n", -err);
++ schedule_delayed_work(&shutdown_work, HZ/2);
++ }
++}
++
++static void shutdown_handler(struct xenbus_watch *watch,
++ const char **vec, unsigned int len)
++{
++ extern void ctrl_alt_del(void);
++ char *str;
++ struct xenbus_transaction xbt;
++ int err;
++
++ if (shutting_down != SHUTDOWN_INVALID)
++ return;
++
++ again:
++ err = xenbus_transaction_start(&xbt);
++ if (err)
++ return;
++
++ str = (char *)xenbus_read(xbt, "control", "shutdown", NULL);
++ /* Ignore read errors and empty reads. */
++ if (XENBUS_IS_ERR_READ(str)) {
++ xenbus_transaction_end(xbt, 1);
++ return;
++ }
++
++ xenbus_write(xbt, "control", "shutdown", "");
++
++ err = xenbus_transaction_end(xbt, 0);
++ if (err == -EAGAIN) {
++ kfree(str);
++ goto again;
++ }
++
++ if (strcmp(str, "poweroff") == 0)
++ shutting_down = SHUTDOWN_POWEROFF;
++ else if (strcmp(str, "reboot") == 0)
++ ctrl_alt_del();
++ else if (strcmp(str, "suspend") == 0)
++ shutting_down = SHUTDOWN_SUSPEND;
++ else if (strcmp(str, "halt") == 0)
++ shutting_down = SHUTDOWN_HALT;
++ else {
++ printk("Ignoring shutdown request: %s\n", str);
++ shutting_down = SHUTDOWN_INVALID;
++ }
++
++ if (shutting_down != SHUTDOWN_INVALID)
++ schedule_work(&shutdown_work);
++
++ kfree(str);
++}
++
++static void sysrq_handler(struct xenbus_watch *watch, const char **vec,
++ unsigned int len)
++{
++ char sysrq_key = '\0';
++ struct xenbus_transaction xbt;
++ int err;
++
++ again:
++ err = xenbus_transaction_start(&xbt);
++ if (err)
++ return;
++ if (!xenbus_scanf(xbt, "control", "sysrq", "%c", &sysrq_key)) {
++ printk(KERN_ERR "Unable to read sysrq code in "
++ "control/sysrq\n");
++ xenbus_transaction_end(xbt, 1);
++ return;
++ }
++
++ if (sysrq_key != '\0')
++ xenbus_printf(xbt, "control", "sysrq", "%c", '\0');
++
++ err = xenbus_transaction_end(xbt, 0);
++ if (err == -EAGAIN)
++ goto again;
++
++#ifdef CONFIG_MAGIC_SYSRQ
++ if (sysrq_key != '\0')
++ handle_sysrq(sysrq_key, NULL, NULL);
++#endif
++}
++
++static struct xenbus_watch shutdown_watch = {
++ .node = "control/shutdown",
++ .callback = shutdown_handler
++};
++
++static struct xenbus_watch sysrq_watch = {
++ .node = "control/sysrq",
++ .callback = sysrq_handler
++};
++
++static int setup_shutdown_watcher(void)
++{
++ int err;
++
++ xenbus_scanf(XBT_NIL, "control",
++ "platform-feature-multiprocessor-suspend",
++ "%d", &fast_suspend);
++
++ err = register_xenbus_watch(&shutdown_watch);
++ if (err) {
++ printk(KERN_ERR "Failed to set shutdown watcher\n");
++ return err;
++ }
++
++ err = register_xenbus_watch(&sysrq_watch);
++ if (err) {
++ printk(KERN_ERR "Failed to set sysrq watcher\n");
++ return err;
++ }
++
++ return 0;
++}
++
++#ifdef CONFIG_XEN
++
++static int shutdown_event(struct notifier_block *notifier,
++ unsigned long event,
++ void *data)
++{
++ setup_shutdown_watcher();
++ return NOTIFY_DONE;
++}
++
++static int __init setup_shutdown_event(void)
++{
++ static struct notifier_block xenstore_notifier = {
++ .notifier_call = shutdown_event
++ };
++ register_xenstore_notifier(&xenstore_notifier);
++
++ return 0;
++}
++
++subsys_initcall(setup_shutdown_event);
++
++#else /* !defined(CONFIG_XEN) */
++
++int xen_reboot_init(void)
++{
++ return setup_shutdown_watcher();
++}
++
++#endif /* !defined(CONFIG_XEN) */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/core/smpboot.c 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,452 @@
++/*
++ * Xen SMP booting functions
++ *
++ * See arch/i386/kernel/smpboot.c for copyright and credits for derived
++ * portions of this file.
++ */
++
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/sched.h>
++#include <linux/kernel_stat.h>
++#include <linux/smp_lock.h>
++#include <linux/irq.h>
++#include <linux/bootmem.h>
++#include <linux/notifier.h>
++#include <linux/cpu.h>
++#include <linux/percpu.h>
++#include <asm/desc.h>
++#include <asm/arch_hooks.h>
++#include <asm/pgalloc.h>
++#include <xen/evtchn.h>
++#include <xen/interface/vcpu.h>
++#include <xen/cpu_hotplug.h>
++#include <xen/xenbus.h>
++
++extern irqreturn_t smp_reschedule_interrupt(int, void *, struct pt_regs *);
++extern irqreturn_t smp_call_function_interrupt(int, void *, struct pt_regs *);
++
++extern int local_setup_timer(unsigned int cpu);
++extern void local_teardown_timer(unsigned int cpu);
++
++extern void hypervisor_callback(void);
++extern void failsafe_callback(void);
++extern void system_call(void);
++extern void smp_trap_init(trap_info_t *);
++
++/* Number of siblings per CPU package */
++int smp_num_siblings = 1;
++int phys_proc_id[NR_CPUS]; /* Package ID of each logical CPU */
++EXPORT_SYMBOL(phys_proc_id);
++int cpu_core_id[NR_CPUS]; /* Core ID of each logical CPU */
++EXPORT_SYMBOL(cpu_core_id);
++
++cpumask_t cpu_online_map;
++EXPORT_SYMBOL(cpu_online_map);
++cpumask_t cpu_possible_map;
++EXPORT_SYMBOL(cpu_possible_map);
++cpumask_t cpu_initialized_map;
++
++struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
++EXPORT_SYMBOL(cpu_data);
++
++#ifdef CONFIG_HOTPLUG_CPU
++DEFINE_PER_CPU(int, cpu_state) = { 0 };
++#endif
++
++static DEFINE_PER_CPU(int, resched_irq);
++static DEFINE_PER_CPU(int, callfunc_irq);
++static char resched_name[NR_CPUS][15];
++static char callfunc_name[NR_CPUS][15];
++
++u8 cpu_2_logical_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
++
++void *xquad_portio;
++
++cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned;
++cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned;
++EXPORT_SYMBOL(cpu_core_map);
++
++#if defined(__i386__)
++u8 x86_cpu_to_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = 0xff };
++EXPORT_SYMBOL(x86_cpu_to_apicid);
++#elif !defined(CONFIG_X86_IO_APIC)
++unsigned int maxcpus = NR_CPUS;
++#endif
++
++void __init prefill_possible_map(void)
++{
++ int i, rc;
++
++ for_each_possible_cpu(i)
++ if (i != smp_processor_id())
++ return;
++
++ for (i = 0; i < NR_CPUS; i++) {
++ rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
++ if (rc >= 0)
++ cpu_set(i, cpu_possible_map);
++ }
++}
++
++void __init smp_alloc_memory(void)
++{
++}
++
++static inline void
++set_cpu_sibling_map(int cpu)
++{
++ phys_proc_id[cpu] = cpu;
++ cpu_core_id[cpu] = 0;
++
++ cpu_sibling_map[cpu] = cpumask_of_cpu(cpu);
++ cpu_core_map[cpu] = cpumask_of_cpu(cpu);
++
++ cpu_data[cpu].booted_cores = 1;
++}
++
++static void
++remove_siblinginfo(int cpu)
++{
++ phys_proc_id[cpu] = BAD_APICID;
++ cpu_core_id[cpu] = BAD_APICID;
++
++ cpus_clear(cpu_sibling_map[cpu]);
++ cpus_clear(cpu_core_map[cpu]);
++
++ cpu_data[cpu].booted_cores = 0;
++}
++
++static int xen_smp_intr_init(unsigned int cpu)
++{
++ int rc;
++
++ per_cpu(resched_irq, cpu) = per_cpu(callfunc_irq, cpu) = -1;
++
++ sprintf(resched_name[cpu], "resched%d", cpu);
++ rc = bind_ipi_to_irqhandler(RESCHEDULE_VECTOR,
++ cpu,
++ smp_reschedule_interrupt,
++ SA_INTERRUPT,
++ resched_name[cpu],
++ NULL);
++ if (rc < 0)
++ goto fail;
++ per_cpu(resched_irq, cpu) = rc;
++
++ sprintf(callfunc_name[cpu], "callfunc%d", cpu);
++ rc = bind_ipi_to_irqhandler(CALL_FUNCTION_VECTOR,
++ cpu,
++ smp_call_function_interrupt,
++ SA_INTERRUPT,
++ callfunc_name[cpu],
++ NULL);
++ if (rc < 0)
++ goto fail;
++ per_cpu(callfunc_irq, cpu) = rc;
++
++ if ((cpu != 0) && ((rc = local_setup_timer(cpu)) != 0))
++ goto fail;
++
++ return 0;
++
++ fail:
++ if (per_cpu(resched_irq, cpu) >= 0)
++ unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL);
++ if (per_cpu(callfunc_irq, cpu) >= 0)
++ unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL);
++ return rc;
++}
++
++#ifdef CONFIG_HOTPLUG_CPU
++static void xen_smp_intr_exit(unsigned int cpu)
++{
++ if (cpu != 0)
++ local_teardown_timer(cpu);
++
++ unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL);
++ unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL);
++}
++#endif
++
++void cpu_bringup(void)
++{
++ cpu_init();
++ touch_softlockup_watchdog();
++ preempt_disable();
++ local_irq_enable();
++}
++
++static void cpu_bringup_and_idle(void)
++{
++ cpu_bringup();
++ cpu_idle();
++}
++
++static void cpu_initialize_context(unsigned int cpu)
++{
++ vcpu_guest_context_t ctxt;
++ struct task_struct *idle = idle_task(cpu);
++#ifdef __x86_64__
++ struct desc_ptr *gdt_descr = &cpu_gdt_descr[cpu];
++#else
++ struct Xgt_desc_struct *gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
++#endif
++
++ if (cpu_test_and_set(cpu, cpu_initialized_map))
++ return;
++
++ memset(&ctxt, 0, sizeof(ctxt));
++
++ ctxt.flags = VGCF_IN_KERNEL;
++ ctxt.user_regs.ds = __USER_DS;
++ ctxt.user_regs.es = __USER_DS;
++ ctxt.user_regs.fs = 0;
++ ctxt.user_regs.gs = 0;
++ ctxt.user_regs.ss = __KERNEL_DS;
++ ctxt.user_regs.eip = (unsigned long)cpu_bringup_and_idle;
++ ctxt.user_regs.eflags = X86_EFLAGS_IF | 0x1000; /* IOPL_RING1 */
++
++ memset(&ctxt.fpu_ctxt, 0, sizeof(ctxt.fpu_ctxt));
++
++ smp_trap_init(ctxt.trap_ctxt);
++
++ ctxt.ldt_ents = 0;
++
++ ctxt.gdt_frames[0] = virt_to_mfn(gdt_descr->address);
++ ctxt.gdt_ents = gdt_descr->size / 8;
++
++#ifdef __i386__
++ ctxt.user_regs.cs = __KERNEL_CS;
++ ctxt.user_regs.esp = idle->thread.esp0 - sizeof(struct pt_regs);
++
++ ctxt.kernel_ss = __KERNEL_DS;
++ ctxt.kernel_sp = idle->thread.esp0;
++
++ ctxt.event_callback_cs = __KERNEL_CS;
++ ctxt.event_callback_eip = (unsigned long)hypervisor_callback;
++ ctxt.failsafe_callback_cs = __KERNEL_CS;
++ ctxt.failsafe_callback_eip = (unsigned long)failsafe_callback;
++
++ ctxt.ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir));
++#else /* __x86_64__ */
++ ctxt.user_regs.cs = __KERNEL_CS;
++ ctxt.user_regs.esp = idle->thread.rsp0 - sizeof(struct pt_regs);
++
++ ctxt.kernel_ss = __KERNEL_DS;
++ ctxt.kernel_sp = idle->thread.rsp0;
++
++ ctxt.event_callback_eip = (unsigned long)hypervisor_callback;
++ ctxt.failsafe_callback_eip = (unsigned long)failsafe_callback;
++ ctxt.syscall_callback_eip = (unsigned long)system_call;
++
++ ctxt.ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(init_level4_pgt));
++
++ ctxt.gs_base_kernel = (unsigned long)(cpu_pda(cpu));
++#endif
++
++ BUG_ON(HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, &ctxt));
++}
++
++void __init smp_prepare_cpus(unsigned int max_cpus)
++{
++ int cpu;
++ struct task_struct *idle;
++#ifdef __x86_64__
++ struct desc_ptr *gdt_descr;
++#else
++ struct Xgt_desc_struct *gdt_descr;
++#endif
++
++ boot_cpu_data.apicid = 0;
++ cpu_data[0] = boot_cpu_data;
++
++ cpu_2_logical_apicid[0] = 0;
++ x86_cpu_to_apicid[0] = 0;
++
++ current_thread_info()->cpu = 0;
++
++ for (cpu = 0; cpu < NR_CPUS; cpu++) {
++ cpus_clear(cpu_sibling_map[cpu]);
++ cpus_clear(cpu_core_map[cpu]);
++ }
++
++ set_cpu_sibling_map(0);
++
++ if (xen_smp_intr_init(0))
++ BUG();
++
++ cpu_initialized_map = cpumask_of_cpu(0);
++
++ /* Restrict the possible_map according to max_cpus. */
++ while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) {
++ for (cpu = NR_CPUS-1; !cpu_isset(cpu, cpu_possible_map); cpu--)
++ continue;
++ cpu_clear(cpu, cpu_possible_map);
++ }
++
++ for_each_possible_cpu (cpu) {
++ if (cpu == 0)
++ continue;
++
++#ifdef __x86_64__
++ gdt_descr = &cpu_gdt_descr[cpu];
++#else
++ gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
++#endif
++ gdt_descr->address = get_zeroed_page(GFP_KERNEL);
++ if (unlikely(!gdt_descr->address)) {
++ printk(KERN_CRIT "CPU%d failed to allocate GDT\n",
++ cpu);
++ continue;
++ }
++ gdt_descr->size = GDT_SIZE;
++ memcpy((void *)gdt_descr->address, cpu_gdt_table, GDT_SIZE);
++ make_page_readonly(
++ (void *)gdt_descr->address,
++ XENFEAT_writable_descriptor_tables);
++
++ cpu_data[cpu] = boot_cpu_data;
++ cpu_data[cpu].apicid = cpu;
++
++ cpu_2_logical_apicid[cpu] = cpu;
++ x86_cpu_to_apicid[cpu] = cpu;
++
++ idle = fork_idle(cpu);
++ if (IS_ERR(idle))
++ panic("failed fork for CPU %d", cpu);
++
++#ifdef __x86_64__
++ cpu_pda(cpu)->pcurrent = idle;
++ cpu_pda(cpu)->cpunumber = cpu;
++ clear_ti_thread_flag(idle->thread_info, TIF_FORK);
++#endif
++
++ irq_ctx_init(cpu);
++
++#ifdef CONFIG_HOTPLUG_CPU
++ if (is_initial_xendomain())
++ cpu_set(cpu, cpu_present_map);
++#else
++ cpu_set(cpu, cpu_present_map);
++#endif
++ }
++
++ init_xenbus_allowed_cpumask();
++
++#ifdef CONFIG_X86_IO_APIC
++ /*
++ * Here we can be sure that there is an IO-APIC in the system. Let's
++ * go and set it up:
++ */
++ if (!skip_ioapic_setup && nr_ioapics)
++ setup_IO_APIC();
++#endif
++}
++
++void __devinit smp_prepare_boot_cpu(void)
++{
++ prefill_possible_map();
++}
++
++#ifdef CONFIG_HOTPLUG_CPU
++
++/*
++ * Initialize cpu_present_map late to skip SMP boot code in init/main.c.
++ * But do it early enough to catch critical for_each_present_cpu() loops
++ * in i386-specific code.
++ */
++static int __init initialize_cpu_present_map(void)
++{
++ cpu_present_map = cpu_possible_map;
++ return 0;
++}
++core_initcall(initialize_cpu_present_map);
++
++int __cpu_disable(void)
++{
++ cpumask_t map = cpu_online_map;
++ int cpu = smp_processor_id();
++
++ if (cpu == 0)
++ return -EBUSY;
++
++ remove_siblinginfo(cpu);
++
++ cpu_clear(cpu, map);
++ fixup_irqs(map);
++ cpu_clear(cpu, cpu_online_map);
++
++ return 0;
++}
++
++void __cpu_die(unsigned int cpu)
++{
++ while (HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) {
++ current->state = TASK_UNINTERRUPTIBLE;
++ schedule_timeout(HZ/10);
++ }
++
++ xen_smp_intr_exit(cpu);
++
++ if (num_online_cpus() == 1)
++ alternatives_smp_switch(0);
++}
++
++#else /* !CONFIG_HOTPLUG_CPU */
++
++int __cpu_disable(void)
++{
++ return -ENOSYS;
++}
++
++void __cpu_die(unsigned int cpu)
++{
++ BUG();
++}
++
++#endif /* CONFIG_HOTPLUG_CPU */
++
++int __devinit __cpu_up(unsigned int cpu)
++{
++ int rc;
++
++ rc = cpu_up_check(cpu);
++ if (rc)
++ return rc;
++
++ cpu_initialize_context(cpu);
++
++ if (num_online_cpus() == 1)
++ alternatives_smp_switch(1);
++
++ /* This must be done before setting cpu_online_map */
++ set_cpu_sibling_map(cpu);
++ wmb();
++
++ rc = xen_smp_intr_init(cpu);
++ if (rc) {
++ remove_siblinginfo(cpu);
++ return rc;
++ }
++
++ cpu_set(cpu, cpu_online_map);
++
++ rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL);
++ BUG_ON(rc);
++
++ return 0;
++}
++
++void __init smp_cpus_done(unsigned int max_cpus)
++{
++}
++
++#ifndef CONFIG_X86_LOCAL_APIC
++int setup_profiling_timer(unsigned int multiplier)
++{
++ return -EINVAL;
++}
++#endif
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/core/xen_proc.c 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,23 @@
++
++#include <linux/module.h>
++#include <linux/proc_fs.h>
++#include <xen/xen_proc.h>
++
++static struct proc_dir_entry *xen_base;
++
++struct proc_dir_entry *create_xen_proc_entry(const char *name, mode_t mode)
++{
++ if ( xen_base == NULL )
++ if ( (xen_base = proc_mkdir("xen", &proc_root)) == NULL )
++ panic("Couldn't create /proc/xen");
++ return create_proc_entry(name, mode, xen_base);
++}
++
++EXPORT_SYMBOL_GPL(create_xen_proc_entry);
++
++void remove_xen_proc_entry(const char *name)
++{
++ remove_proc_entry(name, xen_base);
++}
++
++EXPORT_SYMBOL_GPL(remove_xen_proc_entry);
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/core/xen_sysfs.c 2007-08-27 14:01:58.000000000 -0400
+@@ -0,0 +1,378 @@
++/*
++ * copyright (c) 2006 IBM Corporation
++ * Authored by: Mike D. Day <ncmike@us.ibm.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <linux/err.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/init.h>
++#include <asm/hypervisor.h>
++#include <xen/features.h>
++#include <xen/hypervisor_sysfs.h>
++#include <xen/xenbus.h>
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Mike D. Day <ncmike@us.ibm.com>");
++
++static ssize_t type_show(struct hyp_sysfs_attr *attr, char *buffer)
++{
++ return sprintf(buffer, "xen\n");
++}
++
++HYPERVISOR_ATTR_RO(type);
++
++static int __init xen_sysfs_type_init(void)
++{
++ return sysfs_create_file(&hypervisor_subsys.kset.kobj, &type_attr.attr);
++}
++
++static void xen_sysfs_type_destroy(void)
++{
++ sysfs_remove_file(&hypervisor_subsys.kset.kobj, &type_attr.attr);
++}
++
++/* xen version attributes */
++static ssize_t major_show(struct hyp_sysfs_attr *attr, char *buffer)
++{
++ int version = HYPERVISOR_xen_version(XENVER_version, NULL);
++ if (version)
++ return sprintf(buffer, "%d\n", version >> 16);
++ return -ENODEV;
++}
++
++HYPERVISOR_ATTR_RO(major);
++
++static ssize_t minor_show(struct hyp_sysfs_attr *attr, char *buffer)
++{
++ int version = HYPERVISOR_xen_version(XENVER_version, NULL);
++ if (version)
++ return sprintf(buffer, "%d\n", version & 0xff);
++ return -ENODEV;
++}
++
++HYPERVISOR_ATTR_RO(minor);
++
++static ssize_t extra_show(struct hyp_sysfs_attr *attr, char *buffer)
++{
++ int ret = -ENOMEM;
++ char *extra;
++
++ extra = kmalloc(XEN_EXTRAVERSION_LEN, GFP_KERNEL);
++ if (extra) {
++ ret = HYPERVISOR_xen_version(XENVER_extraversion, extra);
++ if (!ret)
++ ret = sprintf(buffer, "%s\n", extra);
++ kfree(extra);
++ }
++
++ return ret;
++}
++
++HYPERVISOR_ATTR_RO(extra);
++
++static struct attribute *version_attrs[] = {
++ &major_attr.attr,
++ &minor_attr.attr,
++ &extra_attr.attr,
++ NULL
++};
++
++static struct attribute_group version_group = {
++ .name = "version",
++ .attrs = version_attrs,
++};
++
++static int __init xen_sysfs_version_init(void)
++{
++ return sysfs_create_group(&hypervisor_subsys.kset.kobj,
++ &version_group);
++}
++
++static void xen_sysfs_version_destroy(void)
++{
++ sysfs_remove_group(&hypervisor_subsys.kset.kobj, &version_group);
++}
++
++/* UUID */
++
++static ssize_t uuid_show(struct hyp_sysfs_attr *attr, char *buffer)
++{
++ char *vm, *val;
++ int ret;
++
++ vm = xenbus_read(XBT_NIL, "vm", "", NULL);
++ if (IS_ERR(vm))
++ return PTR_ERR(vm);
++ val = xenbus_read(XBT_NIL, vm, "uuid", NULL);
++ kfree(vm);
++ if (IS_ERR(val))
++ return PTR_ERR(val);
++ ret = sprintf(buffer, "%s\n", val);
++ kfree(val);
++ return ret;
++}
++
++HYPERVISOR_ATTR_RO(uuid);
++
++static int __init xen_sysfs_uuid_init(void)
++{
++ return sysfs_create_file(&hypervisor_subsys.kset.kobj, &uuid_attr.attr);
++}
++
++static void xen_sysfs_uuid_destroy(void)
++{
++ sysfs_remove_file(&hypervisor_subsys.kset.kobj, &uuid_attr.attr);
++}
++
++/* xen compilation attributes */
++
++static ssize_t compiler_show(struct hyp_sysfs_attr *attr, char *buffer)
++{
++ int ret = -ENOMEM;
++ struct xen_compile_info *info;
++
++ info = kmalloc(sizeof(struct xen_compile_info), GFP_KERNEL);
++ if (info) {
++ ret = HYPERVISOR_xen_version(XENVER_compile_info, info);
++ if (!ret)
++ ret = sprintf(buffer, "%s\n", info->compiler);
++ kfree(info);
++ }
++
++ return ret;
++}
++
++HYPERVISOR_ATTR_RO(compiler);
++
++static ssize_t compiled_by_show(struct hyp_sysfs_attr *attr, char *buffer)
++{
++ int ret = -ENOMEM;
++ struct xen_compile_info *info;
++
++ info = kmalloc(sizeof(struct xen_compile_info), GFP_KERNEL);
++ if (info) {
++ ret = HYPERVISOR_xen_version(XENVER_compile_info, info);
++ if (!ret)
++ ret = sprintf(buffer, "%s\n", info->compile_by);
++ kfree(info);
++ }
++
++ return ret;
++}
++
++HYPERVISOR_ATTR_RO(compiled_by);
++
++static ssize_t compile_date_show(struct hyp_sysfs_attr *attr, char *buffer)
++{
++ int ret = -ENOMEM;
++ struct xen_compile_info *info;
++
++ info = kmalloc(sizeof(struct xen_compile_info), GFP_KERNEL);
++ if (info) {
++ ret = HYPERVISOR_xen_version(XENVER_compile_info, info);
++ if (!ret)
++ ret = sprintf(buffer, "%s\n", info->compile_date);
++ kfree(info);
++ }
++
++ return ret;
++}
++
++HYPERVISOR_ATTR_RO(compile_date);
++
++static struct attribute *xen_compile_attrs[] = {
++ &compiler_attr.attr,
++ &compiled_by_attr.attr,
++ &compile_date_attr.attr,
++ NULL
++};
++
++static struct attribute_group xen_compilation_group = {
++ .name = "compilation",
++ .attrs = xen_compile_attrs,
++};
++
++int __init static xen_compilation_init(void)
++{
++ return sysfs_create_group(&hypervisor_subsys.kset.kobj,
++ &xen_compilation_group);
++}
++
++static void xen_compilation_destroy(void)
++{
++ sysfs_remove_group(&hypervisor_subsys.kset.kobj,
++ &xen_compilation_group);
++}
++
++/* xen properties info */
++
++static ssize_t capabilities_show(struct hyp_sysfs_attr *attr, char *buffer)
++{
++ int ret = -ENOMEM;
++ char *caps;
++
++ caps = kmalloc(XEN_CAPABILITIES_INFO_LEN, GFP_KERNEL);
++ if (caps) {
++ ret = HYPERVISOR_xen_version(XENVER_capabilities, caps);
++ if (!ret)
++ ret = sprintf(buffer, "%s\n", caps);
++ kfree(caps);
++ }
++
++ return ret;
++}
++
++HYPERVISOR_ATTR_RO(capabilities);
++
++static ssize_t changeset_show(struct hyp_sysfs_attr *attr, char *buffer)
++{
++ int ret = -ENOMEM;
++ char *cset;
++
++ cset = kmalloc(XEN_CHANGESET_INFO_LEN, GFP_KERNEL);
++ if (cset) {
++ ret = HYPERVISOR_xen_version(XENVER_changeset, cset);
++ if (!ret)
++ ret = sprintf(buffer, "%s\n", cset);
++ kfree(cset);
++ }
++
++ return ret;
++}
++
++HYPERVISOR_ATTR_RO(changeset);
++
++static ssize_t virtual_start_show(struct hyp_sysfs_attr *attr, char *buffer)
++{
++ int ret = -ENOMEM;
++ struct xen_platform_parameters *parms;
++
++ parms = kmalloc(sizeof(struct xen_platform_parameters), GFP_KERNEL);
++ if (parms) {
++ ret = HYPERVISOR_xen_version(XENVER_platform_parameters,
++ parms);
++ if (!ret)
++ ret = sprintf(buffer, "%lx\n", parms->virt_start);
++ kfree(parms);
++ }
++
++ return ret;
++}
++
++HYPERVISOR_ATTR_RO(virtual_start);
++
++static ssize_t pagesize_show(struct hyp_sysfs_attr *attr, char *buffer)
++{
++ int ret;
++
++ ret = HYPERVISOR_xen_version(XENVER_pagesize, NULL);
++ if (ret > 0)
++ ret = sprintf(buffer, "%x\n", ret);
++
++ return ret;
++}
++
++HYPERVISOR_ATTR_RO(pagesize);
++
++/* eventually there will be several more features to export */
++static ssize_t xen_feature_show(int index, char *buffer)
++{
++ int ret = -ENOMEM;
++ struct xen_feature_info *info;
++
++ info = kmalloc(sizeof(struct xen_feature_info), GFP_KERNEL);
++ if (info) {
++ info->submap_idx = index;
++ ret = HYPERVISOR_xen_version(XENVER_get_features, info);
++ if (!ret)
++ ret = sprintf(buffer, "%d\n", info->submap);
++ kfree(info);
++ }
++
++ return ret;
++}
++
++static ssize_t writable_pt_show(struct hyp_sysfs_attr *attr, char *buffer)
++{
++ return xen_feature_show(XENFEAT_writable_page_tables, buffer);
++}
++
++HYPERVISOR_ATTR_RO(writable_pt);
++
++static struct attribute *xen_properties_attrs[] = {
++ &capabilities_attr.attr,
++ &changeset_attr.attr,
++ &virtual_start_attr.attr,
++ &pagesize_attr.attr,
++ &writable_pt_attr.attr,
++ NULL
++};
++
++static struct attribute_group xen_properties_group = {
++ .name = "properties",
++ .attrs = xen_properties_attrs,
++};
++
++static int __init xen_properties_init(void)
++{
++ return sysfs_create_group(&hypervisor_subsys.kset.kobj,
++ &xen_properties_group);
++}
++
++static void xen_properties_destroy(void)
++{
++ sysfs_remove_group(&hypervisor_subsys.kset.kobj,
++ &xen_properties_group);
++}
++
++static int __init hyper_sysfs_init(void)
++{
++ int ret;
++
++ if (!is_running_on_xen())
++ return -ENODEV;
++
++ ret = xen_sysfs_type_init();
++ if (ret)
++ goto out;
++ ret = xen_sysfs_version_init();
++ if (ret)
++ goto version_out;
++ ret = xen_compilation_init();
++ if (ret)
++ goto comp_out;
++ ret = xen_sysfs_uuid_init();
++ if (ret)
++ goto uuid_out;
++ ret = xen_properties_init();
++ if (!ret)
++ goto out;
++
++ xen_sysfs_uuid_destroy();
++uuid_out:
++ xen_compilation_destroy();
++comp_out:
++ xen_sysfs_version_destroy();
++version_out:
++ xen_sysfs_type_destroy();
++out:
++ return ret;
++}
++
++static void hyper_sysfs_exit(void)
++{
++ xen_properties_destroy();
++ xen_compilation_destroy();
++ xen_sysfs_uuid_destroy();
++ xen_sysfs_version_destroy();
++ xen_sysfs_type_destroy();
++
++}
++
++module_init(hyper_sysfs_init);
++module_exit(hyper_sysfs_exit);
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/evtchn/Makefile 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,2 @@
++
++obj-y := evtchn.o
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/evtchn/evtchn.c 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,469 @@
++/******************************************************************************
++ * evtchn.c
++ *
++ * Driver for receiving and demuxing event-channel signals.
++ *
++ * Copyright (c) 2004-2005, K A Fraser
++ * Multi-process extensions Copyright (c) 2004, Steven Smith
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/slab.h>
++#include <linux/string.h>
++#include <linux/errno.h>
++#include <linux/fs.h>
++#include <linux/errno.h>
++#include <linux/miscdevice.h>
++#include <linux/major.h>
++#include <linux/proc_fs.h>
++#include <linux/stat.h>
++#include <linux/poll.h>
++#include <linux/irq.h>
++#include <linux/init.h>
++#include <linux/gfp.h>
++#include <linux/mutex.h>
++#include <xen/evtchn.h>
++#include <xen/public/evtchn.h>
++
++struct per_user_data {
++ /* Notification ring, accessed via /dev/xen/evtchn. */
++#define EVTCHN_RING_SIZE (PAGE_SIZE / sizeof(evtchn_port_t))
++#define EVTCHN_RING_MASK(_i) ((_i)&(EVTCHN_RING_SIZE-1))
++ evtchn_port_t *ring;
++ unsigned int ring_cons, ring_prod, ring_overflow;
++ struct mutex ring_cons_mutex; /* protect against concurrent readers */
++
++ /* Processes wait on this queue when ring is empty. */
++ wait_queue_head_t evtchn_wait;
++ struct fasync_struct *evtchn_async_queue;
++};
++
++/* Who's bound to each port? */
++static struct per_user_data *port_user[NR_EVENT_CHANNELS];
++static spinlock_t port_user_lock;
++
++void evtchn_device_upcall(int port)
++{
++ struct per_user_data *u;
++
++ spin_lock(&port_user_lock);
++
++ mask_evtchn(port);
++ clear_evtchn(port);
++
++ if ((u = port_user[port]) != NULL) {
++ if ((u->ring_prod - u->ring_cons) < EVTCHN_RING_SIZE) {
++ u->ring[EVTCHN_RING_MASK(u->ring_prod)] = port;
++ if (u->ring_cons == u->ring_prod++) {
++ wake_up_interruptible(&u->evtchn_wait);
++ kill_fasync(&u->evtchn_async_queue,
++ SIGIO, POLL_IN);
++ }
++ } else {
++ u->ring_overflow = 1;
++ }
++ }
++
++ spin_unlock(&port_user_lock);
++}
++
++static ssize_t evtchn_read(struct file *file, char __user *buf,
++ size_t count, loff_t *ppos)
++{
++ int rc;
++ unsigned int c, p, bytes1 = 0, bytes2 = 0;
++ struct per_user_data *u = file->private_data;
++
++ /* Whole number of ports. */
++ count &= ~(sizeof(evtchn_port_t)-1);
++
++ if (count == 0)
++ return 0;
++
++ if (count > PAGE_SIZE)
++ count = PAGE_SIZE;
++
++ for (;;) {
++ mutex_lock(&u->ring_cons_mutex);
++
++ rc = -EFBIG;
++ if (u->ring_overflow)
++ goto unlock_out;
++
++ if ((c = u->ring_cons) != (p = u->ring_prod))
++ break;
++
++ mutex_unlock(&u->ring_cons_mutex);
++
++ if (file->f_flags & O_NONBLOCK)
++ return -EAGAIN;
++
++ rc = wait_event_interruptible(
++ u->evtchn_wait, u->ring_cons != u->ring_prod);
++ if (rc)
++ return rc;
++ }
++
++ /* Byte lengths of two chunks. Chunk split (if any) is at ring wrap. */
++ if (((c ^ p) & EVTCHN_RING_SIZE) != 0) {
++ bytes1 = (EVTCHN_RING_SIZE - EVTCHN_RING_MASK(c)) *
++ sizeof(evtchn_port_t);
++ bytes2 = EVTCHN_RING_MASK(p) * sizeof(evtchn_port_t);
++ } else {
++ bytes1 = (p - c) * sizeof(evtchn_port_t);
++ bytes2 = 0;
++ }
++
++ /* Truncate chunks according to caller's maximum byte count. */
++ if (bytes1 > count) {
++ bytes1 = count;
++ bytes2 = 0;
++ } else if ((bytes1 + bytes2) > count) {
++ bytes2 = count - bytes1;
++ }
++
++ rc = -EFAULT;
++ if (copy_to_user(buf, &u->ring[EVTCHN_RING_MASK(c)], bytes1) ||
++ ((bytes2 != 0) &&
++ copy_to_user(&buf[bytes1], &u->ring[0], bytes2)))
++ goto unlock_out;
++
++ u->ring_cons += (bytes1 + bytes2) / sizeof(evtchn_port_t);
++ rc = bytes1 + bytes2;
++
++ unlock_out:
++ mutex_unlock(&u->ring_cons_mutex);
++ return rc;
++}
++
++static ssize_t evtchn_write(struct file *file, const char __user *buf,
++ size_t count, loff_t *ppos)
++{
++ int rc, i;
++ evtchn_port_t *kbuf = (evtchn_port_t *)__get_free_page(GFP_KERNEL);
++ struct per_user_data *u = file->private_data;
++
++ if (kbuf == NULL)
++ return -ENOMEM;
++
++ /* Whole number of ports. */
++ count &= ~(sizeof(evtchn_port_t)-1);
++
++ rc = 0;
++ if (count == 0)
++ goto out;
++
++ if (count > PAGE_SIZE)
++ count = PAGE_SIZE;
++
++ rc = -EFAULT;
++ if (copy_from_user(kbuf, buf, count) != 0)
++ goto out;
++
++ spin_lock_irq(&port_user_lock);
++ for (i = 0; i < (count/sizeof(evtchn_port_t)); i++)
++ if ((kbuf[i] < NR_EVENT_CHANNELS) && (port_user[kbuf[i]] == u))
++ unmask_evtchn(kbuf[i]);
++ spin_unlock_irq(&port_user_lock);
++
++ rc = count;
++
++ out:
++ free_page((unsigned long)kbuf);
++ return rc;
++}
++
++static void evtchn_bind_to_user(struct per_user_data *u, int port)
++{
++ spin_lock_irq(&port_user_lock);
++ BUG_ON(port_user[port] != NULL);
++ port_user[port] = u;
++ unmask_evtchn(port);
++ spin_unlock_irq(&port_user_lock);
++}
++
++static int evtchn_ioctl(struct inode *inode, struct file *file,
++ unsigned int cmd, unsigned long arg)
++{
++ int rc;
++ struct per_user_data *u = file->private_data;
++ void __user *uarg = (void __user *) arg;
++
++ switch (cmd) {
++ case IOCTL_EVTCHN_BIND_VIRQ: {
++ struct ioctl_evtchn_bind_virq bind;
++ struct evtchn_bind_virq bind_virq;
++
++ rc = -EFAULT;
++ if (copy_from_user(&bind, uarg, sizeof(bind)))
++ break;
++
++ bind_virq.virq = bind.virq;
++ bind_virq.vcpu = 0;
++ rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
++ &bind_virq);
++ if (rc != 0)
++ break;
++
++ rc = bind_virq.port;
++ evtchn_bind_to_user(u, rc);
++ break;
++ }
++
++ case IOCTL_EVTCHN_BIND_INTERDOMAIN: {
++ struct ioctl_evtchn_bind_interdomain bind;
++ struct evtchn_bind_interdomain bind_interdomain;
++
++ rc = -EFAULT;
++ if (copy_from_user(&bind, uarg, sizeof(bind)))
++ break;
++
++ bind_interdomain.remote_dom = bind.remote_domain;
++ bind_interdomain.remote_port = bind.remote_port;
++ rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
++ &bind_interdomain);
++ if (rc != 0)
++ break;
++
++ rc = bind_interdomain.local_port;
++ evtchn_bind_to_user(u, rc);
++ break;
++ }
++
++ case IOCTL_EVTCHN_BIND_UNBOUND_PORT: {
++ struct ioctl_evtchn_bind_unbound_port bind;
++ struct evtchn_alloc_unbound alloc_unbound;
++
++ rc = -EFAULT;
++ if (copy_from_user(&bind, uarg, sizeof(bind)))
++ break;
++
++ alloc_unbound.dom = DOMID_SELF;
++ alloc_unbound.remote_dom = bind.remote_domain;
++ rc = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
++ &alloc_unbound);
++ if (rc != 0)
++ break;
++
++ rc = alloc_unbound.port;
++ evtchn_bind_to_user(u, rc);
++ break;
++ }
++
++ case IOCTL_EVTCHN_UNBIND: {
++ struct ioctl_evtchn_unbind unbind;
++ struct evtchn_close close;
++ int ret;
++
++ rc = -EFAULT;
++ if (copy_from_user(&unbind, uarg, sizeof(unbind)))
++ break;
++
++ rc = -EINVAL;
++ if (unbind.port >= NR_EVENT_CHANNELS)
++ break;
++
++ spin_lock_irq(&port_user_lock);
++
++ rc = -ENOTCONN;
++ if (port_user[unbind.port] != u) {
++ spin_unlock_irq(&port_user_lock);
++ break;
++ }
++
++ port_user[unbind.port] = NULL;
++ mask_evtchn(unbind.port);
++
++ spin_unlock_irq(&port_user_lock);
++
++ close.port = unbind.port;
++ ret = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
++ BUG_ON(ret);
++
++ rc = 0;
++ break;
++ }
++
++ case IOCTL_EVTCHN_NOTIFY: {
++ struct ioctl_evtchn_notify notify;
++
++ rc = -EFAULT;
++ if (copy_from_user(&notify, uarg, sizeof(notify)))
++ break;
++
++ if (notify.port >= NR_EVENT_CHANNELS) {
++ rc = -EINVAL;
++ } else if (port_user[notify.port] != u) {
++ rc = -ENOTCONN;
++ } else {
++ notify_remote_via_evtchn(notify.port);
++ rc = 0;
++ }
++ break;
++ }
++
++ case IOCTL_EVTCHN_RESET: {
++ /* Initialise the ring to empty. Clear errors. */
++ mutex_lock(&u->ring_cons_mutex);
++ spin_lock_irq(&port_user_lock);
++ u->ring_cons = u->ring_prod = u->ring_overflow = 0;
++ spin_unlock_irq(&port_user_lock);
++ mutex_unlock(&u->ring_cons_mutex);
++ rc = 0;
++ break;
++ }
++
++ default:
++ rc = -ENOSYS;
++ break;
++ }
++
++ return rc;
++}
++
++static unsigned int evtchn_poll(struct file *file, poll_table *wait)
++{
++ unsigned int mask = POLLOUT | POLLWRNORM;
++ struct per_user_data *u = file->private_data;
++
++ poll_wait(file, &u->evtchn_wait, wait);
++ if (u->ring_cons != u->ring_prod)
++ mask |= POLLIN | POLLRDNORM;
++ if (u->ring_overflow)
++ mask = POLLERR;
++ return mask;
++}
++
++static int evtchn_fasync(int fd, struct file *filp, int on)
++{
++ struct per_user_data *u = filp->private_data;
++ return fasync_helper(fd, filp, on, &u->evtchn_async_queue);
++}
++
++static int evtchn_open(struct inode *inode, struct file *filp)
++{
++ struct per_user_data *u;
++
++ if ((u = kmalloc(sizeof(*u), GFP_KERNEL)) == NULL)
++ return -ENOMEM;
++
++ memset(u, 0, sizeof(*u));
++ init_waitqueue_head(&u->evtchn_wait);
++
++ u->ring = (evtchn_port_t *)__get_free_page(GFP_KERNEL);
++ if (u->ring == NULL) {
++ kfree(u);
++ return -ENOMEM;
++ }
++
++ mutex_init(&u->ring_cons_mutex);
++
++ filp->private_data = u;
++
++ return 0;
++}
++
++static int evtchn_release(struct inode *inode, struct file *filp)
++{
++ int i;
++ struct per_user_data *u = filp->private_data;
++ struct evtchn_close close;
++
++ spin_lock_irq(&port_user_lock);
++
++ free_page((unsigned long)u->ring);
++
++ for (i = 0; i < NR_EVENT_CHANNELS; i++) {
++ int ret;
++ if (port_user[i] != u)
++ continue;
++
++ port_user[i] = NULL;
++ mask_evtchn(i);
++
++ close.port = i;
++ ret = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
++ BUG_ON(ret);
++ }
++
++ spin_unlock_irq(&port_user_lock);
++
++ kfree(u);
++
++ return 0;
++}
++
++static const struct file_operations evtchn_fops = {
++ .owner = THIS_MODULE,
++ .read = evtchn_read,
++ .write = evtchn_write,
++ .ioctl = evtchn_ioctl,
++ .poll = evtchn_poll,
++ .fasync = evtchn_fasync,
++ .open = evtchn_open,
++ .release = evtchn_release,
++};
++
++static struct miscdevice evtchn_miscdev = {
++ .minor = MISC_DYNAMIC_MINOR,
++ .name = "evtchn",
++ .fops = &evtchn_fops,
++};
++
++static int __init evtchn_init(void)
++{
++ int err;
++
++ if (!is_running_on_xen())
++ return -ENODEV;
++
++ spin_lock_init(&port_user_lock);
++ memset(port_user, 0, sizeof(port_user));
++
++ /* Create '/dev/misc/evtchn'. */
++ err = misc_register(&evtchn_miscdev);
++ if (err != 0) {
++ printk(KERN_ALERT "Could not register /dev/misc/evtchn\n");
++ return err;
++ }
++
++ printk("Event-channel device installed.\n");
++
++ return 0;
++}
++
++static void evtchn_cleanup(void)
++{
++ misc_deregister(&evtchn_miscdev);
++}
++
++module_init(evtchn_init);
++module_exit(evtchn_cleanup);
++
++MODULE_LICENSE("Dual BSD/GPL");
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/fbfront/Makefile 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,2 @@
++obj-$(CONFIG_XEN_FRAMEBUFFER) := xenfb.o
++obj-$(CONFIG_XEN_KEYBOARD) += xenkbd.o
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/fbfront/xenfb.c 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,752 @@
++/*
++ * linux/drivers/video/xenfb.c -- Xen para-virtual frame buffer device
++ *
++ * Copyright (C) 2005-2006 Anthony Liguori <aliguori@us.ibm.com>
++ * Copyright (C) 2006 Red Hat, Inc., Markus Armbruster <armbru@redhat.com>
++ *
++ * Based on linux/drivers/video/q40fb.c
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file COPYING in the main directory of this archive for
++ * more details.
++ */
++
++/*
++ * TODO:
++ *
++ * Switch to grant tables when they become capable of dealing with the
++ * frame buffer.
++ */
++
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/fb.h>
++#include <linux/module.h>
++#include <linux/vmalloc.h>
++#include <linux/mm.h>
++#include <linux/mutex.h>
++#include <asm/hypervisor.h>
++#include <xen/evtchn.h>
++#include <xen/interface/io/fbif.h>
++#include <xen/interface/io/protocols.h>
++#include <xen/xenbus.h>
++#include <linux/kthread.h>
++
++struct xenfb_mapping
++{
++ struct list_head link;
++ struct vm_area_struct *vma;
++ atomic_t map_refs;
++ int faults;
++ struct xenfb_info *info;
++};
++
++struct xenfb_info
++{
++ struct task_struct *kthread;
++ wait_queue_head_t wq;
++
++ unsigned char *fb;
++ struct fb_info *fb_info;
++ struct timer_list refresh;
++ int dirty;
++ int x1, y1, x2, y2; /* dirty rectangle,
++ protected by dirty_lock */
++ spinlock_t dirty_lock;
++ struct mutex mm_lock;
++ int nr_pages;
++ struct page **pages;
++ struct list_head mappings; /* protected by mm_lock */
++
++ int irq;
++ struct xenfb_page *page;
++ unsigned long *mfns;
++ int update_wanted; /* XENFB_TYPE_UPDATE wanted */
++
++ struct xenbus_device *xbdev;
++};
++
++/*
++ * How the locks work together
++ *
++ * There are two locks: spinlock dirty_lock protecting the dirty
++ * rectangle, and mutex mm_lock protecting mappings.
++ *
++ * The problem is that dirty rectangle and mappings aren't
++ * independent: the dirty rectangle must cover all faulted pages in
++ * mappings. We need to prove that our locking maintains this
++ * invariant.
++ *
++ * There are several kinds of critical regions:
++ *
++ * 1. Holding only dirty_lock: xenfb_refresh(). May run in
++ * interrupts. Extends the dirty rectangle. Trivially preserves
++ * invariant.
++ *
++ * 2. Holding only mm_lock: xenfb_mmap() and xenfb_vm_close(). Touch
++ * only mappings. The former creates unfaulted pages. Preserves
++ * invariant. The latter removes pages. Preserves invariant.
++ *
++ * 3. Holding both locks: xenfb_vm_nopage(). Extends the dirty
++ * rectangle and updates mappings consistently. Preserves
++ * invariant.
++ *
++ * 4. The ugliest one: xenfb_update_screen(). Clear the dirty
++ * rectangle and update mappings consistently.
++ *
++ * We can't simply hold both locks, because zap_page_range() cannot
++ * be called with a spinlock held.
++ *
++ * Therefore, we first clear the dirty rectangle with both locks
++ * held. Then we unlock dirty_lock and update the mappings.
++ * Critical regions that hold only dirty_lock may interfere with
++ * that. This can only be region 1: xenfb_refresh(). But that
++ * just extends the dirty rectangle, which can't harm the
++ * invariant.
++ *
++ * But FIXME: the invariant is too weak. It misses that the fault
++ * record in mappings must be consistent with the mapping of pages in
++ * the associated address space! do_no_page() updates the PTE after
++ * xenfb_vm_nopage() returns, i.e. outside the critical region. This
++ * allows the following race:
++ *
++ * X writes to some address in the Xen frame buffer
++ * Fault - call do_no_page()
++ * call xenfb_vm_nopage()
++ * grab mm_lock
++ * map->faults++;
++ * release mm_lock
++ * return back to do_no_page()
++ * (preempted, or SMP)
++ * Xen worker thread runs.
++ * grab mm_lock
++ * look at mappings
++ * find this mapping, zaps its pages (but page not in pte yet)
++ * clear map->faults
++ * releases mm_lock
++ * (back to X process)
++ * put page in X's pte
++ *
++ * Oh well, we wont be updating the writes to this page anytime soon.
++ */
++
++static int xenfb_fps = 20;
++static unsigned long xenfb_mem_len = XENFB_WIDTH * XENFB_HEIGHT * XENFB_DEPTH / 8;
++
++static int xenfb_remove(struct xenbus_device *);
++static void xenfb_init_shared_page(struct xenfb_info *);
++static int xenfb_connect_backend(struct xenbus_device *, struct xenfb_info *);
++static void xenfb_disconnect_backend(struct xenfb_info *);
++
++static void xenfb_do_update(struct xenfb_info *info,
++ int x, int y, int w, int h)
++{
++ union xenfb_out_event event;
++ __u32 prod;
++
++ event.type = XENFB_TYPE_UPDATE;
++ event.update.x = x;
++ event.update.y = y;
++ event.update.width = w;
++ event.update.height = h;
++
++ prod = info->page->out_prod;
++ /* caller ensures !xenfb_queue_full() */
++ mb(); /* ensure ring space available */
++ XENFB_OUT_RING_REF(info->page, prod) = event;
++ wmb(); /* ensure ring contents visible */
++ info->page->out_prod = prod + 1;
++
++ notify_remote_via_irq(info->irq);
++}
++
++static int xenfb_queue_full(struct xenfb_info *info)
++{
++ __u32 cons, prod;
++
++ prod = info->page->out_prod;
++ cons = info->page->out_cons;
++ return prod - cons == XENFB_OUT_RING_LEN;
++}
++
++static void xenfb_update_screen(struct xenfb_info *info)
++{
++ unsigned long flags;
++ int y1, y2, x1, x2;
++ struct xenfb_mapping *map;
++
++ if (!info->update_wanted)
++ return;
++ if (xenfb_queue_full(info))
++ return;
++
++ mutex_lock(&info->mm_lock);
++
++ spin_lock_irqsave(&info->dirty_lock, flags);
++ y1 = info->y1;
++ y2 = info->y2;
++ x1 = info->x1;
++ x2 = info->x2;
++ info->x1 = info->y1 = INT_MAX;
++ info->x2 = info->y2 = 0;
++ spin_unlock_irqrestore(&info->dirty_lock, flags);
++
++ list_for_each_entry(map, &info->mappings, link) {
++ if (!map->faults)
++ continue;
++ zap_page_range(map->vma, map->vma->vm_start,
++ map->vma->vm_end - map->vma->vm_start, NULL);
++ map->faults = 0;
++ }
++
++ mutex_unlock(&info->mm_lock);
++
++ xenfb_do_update(info, x1, y1, x2 - x1, y2 - y1);
++}
++
++static int xenfb_thread(void *data)
++{
++ struct xenfb_info *info = data;
++
++ while (!kthread_should_stop()) {
++ if (info->dirty) {
++ info->dirty = 0;
++ xenfb_update_screen(info);
++ }
++ wait_event_interruptible(info->wq,
++ kthread_should_stop() || info->dirty);
++ try_to_freeze();
++ }
++ return 0;
++}
++
++static int xenfb_setcolreg(unsigned regno, unsigned red, unsigned green,
++ unsigned blue, unsigned transp,
++ struct fb_info *info)
++{
++ u32 v;
++
++ if (regno > info->cmap.len)
++ return 1;
++
++ red >>= (16 - info->var.red.length);
++ green >>= (16 - info->var.green.length);
++ blue >>= (16 - info->var.blue.length);
++
++ v = (red << info->var.red.offset) |
++ (green << info->var.green.offset) |
++ (blue << info->var.blue.offset);
++
++ /* FIXME is this sane? check against xxxfb_setcolreg()! */
++ switch (info->var.bits_per_pixel) {
++ case 16:
++ case 24:
++ case 32:
++ ((u32 *)info->pseudo_palette)[regno] = v;
++ break;
++ }
++
++ return 0;
++}
++
++static void xenfb_timer(unsigned long data)
++{
++ struct xenfb_info *info = (struct xenfb_info *)data;
++ info->dirty = 1;
++ wake_up(&info->wq);
++}
++
++static void __xenfb_refresh(struct xenfb_info *info,
++ int x1, int y1, int w, int h)
++{
++ int y2, x2;
++
++ y2 = y1 + h;
++ x2 = x1 + w;
++
++ if (info->y1 > y1)
++ info->y1 = y1;
++ if (info->y2 < y2)
++ info->y2 = y2;
++ if (info->x1 > x1)
++ info->x1 = x1;
++ if (info->x2 < x2)
++ info->x2 = x2;
++
++ if (timer_pending(&info->refresh))
++ return;
++
++ mod_timer(&info->refresh, jiffies + HZ/xenfb_fps);
++}
++
++static void xenfb_refresh(struct xenfb_info *info,
++ int x1, int y1, int w, int h)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&info->dirty_lock, flags);
++ __xenfb_refresh(info, x1, y1, w, h);
++ spin_unlock_irqrestore(&info->dirty_lock, flags);
++}
++
++static void xenfb_fillrect(struct fb_info *p, const struct fb_fillrect *rect)
++{
++ struct xenfb_info *info = p->par;
++
++ cfb_fillrect(p, rect);
++ xenfb_refresh(info, rect->dx, rect->dy, rect->width, rect->height);
++}
++
++static void xenfb_imageblit(struct fb_info *p, const struct fb_image *image)
++{
++ struct xenfb_info *info = p->par;
++
++ cfb_imageblit(p, image);
++ xenfb_refresh(info, image->dx, image->dy, image->width, image->height);
++}
++
++static void xenfb_copyarea(struct fb_info *p, const struct fb_copyarea *area)
++{
++ struct xenfb_info *info = p->par;
++
++ cfb_copyarea(p, area);
++ xenfb_refresh(info, area->dx, area->dy, area->width, area->height);
++}
++
++static void xenfb_vm_open(struct vm_area_struct *vma)
++{
++ struct xenfb_mapping *map = vma->vm_private_data;
++ atomic_inc(&map->map_refs);
++}
++
++static void xenfb_vm_close(struct vm_area_struct *vma)
++{
++ struct xenfb_mapping *map = vma->vm_private_data;
++ struct xenfb_info *info = map->info;
++
++ mutex_lock(&info->mm_lock);
++ if (atomic_dec_and_test(&map->map_refs)) {
++ list_del(&map->link);
++ kfree(map);
++ }
++ mutex_unlock(&info->mm_lock);
++}
++
++static struct page *xenfb_vm_nopage(struct vm_area_struct *vma,
++ unsigned long vaddr, int *type)
++{
++ struct xenfb_mapping *map = vma->vm_private_data;
++ struct xenfb_info *info = map->info;
++ int pgnr = (vaddr - vma->vm_start) >> PAGE_SHIFT;
++ unsigned long flags;
++ struct page *page;
++ int y1, y2;
++
++ if (pgnr >= info->nr_pages)
++ return NOPAGE_SIGBUS;
++
++ mutex_lock(&info->mm_lock);
++ spin_lock_irqsave(&info->dirty_lock, flags);
++ page = info->pages[pgnr];
++ get_page(page);
++ map->faults++;
++
++ y1 = pgnr * PAGE_SIZE / info->fb_info->fix.line_length;
++ y2 = (pgnr * PAGE_SIZE + PAGE_SIZE - 1) / info->fb_info->fix.line_length;
++ if (y2 > info->fb_info->var.yres)
++ y2 = info->fb_info->var.yres;
++ __xenfb_refresh(info, 0, y1, info->fb_info->var.xres, y2 - y1);
++ spin_unlock_irqrestore(&info->dirty_lock, flags);
++ mutex_unlock(&info->mm_lock);
++
++ if (type)
++ *type = VM_FAULT_MINOR;
++
++ return page;
++}
++
++static struct vm_operations_struct xenfb_vm_ops = {
++ .open = xenfb_vm_open,
++ .close = xenfb_vm_close,
++ .nopage = xenfb_vm_nopage,
++};
++
++static int xenfb_mmap(struct fb_info *fb_info, struct vm_area_struct *vma)
++{
++ struct xenfb_info *info = fb_info->par;
++ struct xenfb_mapping *map;
++ int map_pages;
++
++ if (!(vma->vm_flags & VM_WRITE))
++ return -EINVAL;
++ if (!(vma->vm_flags & VM_SHARED))
++ return -EINVAL;
++ if (vma->vm_pgoff != 0)
++ return -EINVAL;
++
++ map_pages = (vma->vm_end - vma->vm_start + PAGE_SIZE-1) >> PAGE_SHIFT;
++ if (map_pages > info->nr_pages)
++ return -EINVAL;
++
++ map = kzalloc(sizeof(*map), GFP_KERNEL);
++ if (map == NULL)
++ return -ENOMEM;
++
++ map->vma = vma;
++ map->faults = 0;
++ map->info = info;
++ atomic_set(&map->map_refs, 1);
++
++ mutex_lock(&info->mm_lock);
++ list_add(&map->link, &info->mappings);
++ mutex_unlock(&info->mm_lock);
++
++ vma->vm_ops = &xenfb_vm_ops;
++ vma->vm_flags |= (VM_DONTEXPAND | VM_RESERVED);
++ vma->vm_private_data = map;
++
++ return 0;
++}
++
++static struct fb_ops xenfb_fb_ops = {
++ .owner = THIS_MODULE,
++ .fb_setcolreg = xenfb_setcolreg,
++ .fb_fillrect = xenfb_fillrect,
++ .fb_copyarea = xenfb_copyarea,
++ .fb_imageblit = xenfb_imageblit,
++ .fb_mmap = xenfb_mmap,
++};
++
++static irqreturn_t xenfb_event_handler(int rq, void *dev_id,
++ struct pt_regs *regs)
++{
++ /*
++ * No in events recognized, simply ignore them all.
++ * If you need to recognize some, see xenbkd's input_handler()
++ * for how to do that.
++ */
++ struct xenfb_info *info = dev_id;
++ struct xenfb_page *page = info->page;
++
++ if (page->in_cons != page->in_prod) {
++ info->page->in_cons = info->page->in_prod;
++ notify_remote_via_irq(info->irq);
++ }
++ return IRQ_HANDLED;
++}
++
++static unsigned long vmalloc_to_mfn(void *address)
++{
++ return pfn_to_mfn(vmalloc_to_pfn(address));
++}
++
++static int __devinit xenfb_probe(struct xenbus_device *dev,
++ const struct xenbus_device_id *id)
++{
++ struct xenfb_info *info;
++ struct fb_info *fb_info;
++ int ret;
++
++ info = kzalloc(sizeof(*info), GFP_KERNEL);
++ if (info == NULL) {
++ xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure");
++ return -ENOMEM;
++ }
++ dev->dev.driver_data = info;
++ info->xbdev = dev;
++ info->irq = -1;
++ info->x1 = info->y1 = INT_MAX;
++ spin_lock_init(&info->dirty_lock);
++ mutex_init(&info->mm_lock);
++ init_waitqueue_head(&info->wq);
++ init_timer(&info->refresh);
++ info->refresh.function = xenfb_timer;
++ info->refresh.data = (unsigned long)info;
++ INIT_LIST_HEAD(&info->mappings);
++
++ info->fb = vmalloc(xenfb_mem_len);
++ if (info->fb == NULL)
++ goto error_nomem;
++ memset(info->fb, 0, xenfb_mem_len);
++
++ info->nr_pages = (xenfb_mem_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
++
++ info->pages = kmalloc(sizeof(struct page *) * info->nr_pages,
++ GFP_KERNEL);
++ if (info->pages == NULL)
++ goto error_nomem;
++
++ info->mfns = vmalloc(sizeof(unsigned long) * info->nr_pages);
++ if (!info->mfns)
++ goto error_nomem;
++
++ /* set up shared page */
++ info->page = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
++ if (!info->page)
++ goto error_nomem;
++
++ xenfb_init_shared_page(info);
++
++ fb_info = framebuffer_alloc(sizeof(u32) * 256, NULL);
++ /* see fishy hackery below */
++ if (fb_info == NULL)
++ goto error_nomem;
++
++ /* FIXME fishy hackery */
++ fb_info->pseudo_palette = fb_info->par;
++ fb_info->par = info;
++ /* /FIXME */
++ fb_info->screen_base = info->fb;
++
++ fb_info->fbops = &xenfb_fb_ops;
++ fb_info->var.xres_virtual = fb_info->var.xres = info->page->width;
++ fb_info->var.yres_virtual = fb_info->var.yres = info->page->height;
++ fb_info->var.bits_per_pixel = info->page->depth;
++
++ fb_info->var.red = (struct fb_bitfield){16, 8, 0};
++ fb_info->var.green = (struct fb_bitfield){8, 8, 0};
++ fb_info->var.blue = (struct fb_bitfield){0, 8, 0};
++
++ fb_info->var.activate = FB_ACTIVATE_NOW;
++ fb_info->var.height = -1;
++ fb_info->var.width = -1;
++ fb_info->var.vmode = FB_VMODE_NONINTERLACED;
++
++ fb_info->fix.visual = FB_VISUAL_TRUECOLOR;
++ fb_info->fix.line_length = info->page->line_length;
++ fb_info->fix.smem_start = 0;
++ fb_info->fix.smem_len = xenfb_mem_len;
++ strcpy(fb_info->fix.id, "xen");
++ fb_info->fix.type = FB_TYPE_PACKED_PIXELS;
++ fb_info->fix.accel = FB_ACCEL_NONE;
++
++ fb_info->flags = FBINFO_FLAG_DEFAULT;
++
++ ret = fb_alloc_cmap(&fb_info->cmap, 256, 0);
++ if (ret < 0) {
++ framebuffer_release(fb_info);
++ xenbus_dev_fatal(dev, ret, "fb_alloc_cmap");
++ goto error;
++ }
++
++ ret = register_framebuffer(fb_info);
++ if (ret) {
++ fb_dealloc_cmap(&info->fb_info->cmap);
++ framebuffer_release(fb_info);
++ xenbus_dev_fatal(dev, ret, "register_framebuffer");
++ goto error;
++ }
++ info->fb_info = fb_info;
++
++ /* FIXME should this be delayed until backend XenbusStateConnected? */
++ info->kthread = kthread_run(xenfb_thread, info, "xenfb thread");
++ if (IS_ERR(info->kthread)) {
++ ret = PTR_ERR(info->kthread);
++ info->kthread = NULL;
++ xenbus_dev_fatal(dev, ret, "register_framebuffer");
++ goto error;
++ }
++
++ ret = xenfb_connect_backend(dev, info);
++ if (ret < 0)
++ goto error;
++
++ return 0;
++
++ error_nomem:
++ ret = -ENOMEM;
++ xenbus_dev_fatal(dev, ret, "allocating device memory");
++ error:
++ xenfb_remove(dev);
++ return ret;
++}
++
++static int xenfb_resume(struct xenbus_device *dev)
++{
++ struct xenfb_info *info = dev->dev.driver_data;
++
++ xenfb_disconnect_backend(info);
++ xenfb_init_shared_page(info);
++ return xenfb_connect_backend(dev, info);
++}
++
++static int xenfb_remove(struct xenbus_device *dev)
++{
++ struct xenfb_info *info = dev->dev.driver_data;
++
++ del_timer(&info->refresh);
++ if (info->kthread)
++ kthread_stop(info->kthread);
++ xenfb_disconnect_backend(info);
++ if (info->fb_info) {
++ unregister_framebuffer(info->fb_info);
++ fb_dealloc_cmap(&info->fb_info->cmap);
++ framebuffer_release(info->fb_info);
++ }
++ free_page((unsigned long)info->page);
++ vfree(info->mfns);
++ kfree(info->pages);
++ vfree(info->fb);
++ kfree(info);
++
++ return 0;
++}
++
++static void xenfb_init_shared_page(struct xenfb_info *info)
++{
++ int i;
++
++ for (i = 0; i < info->nr_pages; i++)
++ info->pages[i] = vmalloc_to_page(info->fb + i * PAGE_SIZE);
++
++ for (i = 0; i < info->nr_pages; i++)
++ info->mfns[i] = vmalloc_to_mfn(info->fb + i * PAGE_SIZE);
++
++ info->page->pd[0] = vmalloc_to_mfn(info->mfns);
++ info->page->pd[1] = 0;
++ info->page->width = XENFB_WIDTH;
++ info->page->height = XENFB_HEIGHT;
++ info->page->depth = XENFB_DEPTH;
++ info->page->line_length = (info->page->depth / 8) * info->page->width;
++ info->page->mem_length = xenfb_mem_len;
++ info->page->in_cons = info->page->in_prod = 0;
++ info->page->out_cons = info->page->out_prod = 0;
++}
++
++static int xenfb_connect_backend(struct xenbus_device *dev,
++ struct xenfb_info *info)
++{
++ int ret;
++ struct xenbus_transaction xbt;
++
++ ret = bind_listening_port_to_irqhandler(
++ dev->otherend_id, xenfb_event_handler, 0, "xenfb", info);
++ if (ret < 0) {
++ xenbus_dev_fatal(dev, ret,
++ "bind_listening_port_to_irqhandler");
++ return ret;
++ }
++ info->irq = ret;
++
++ again:
++ ret = xenbus_transaction_start(&xbt);
++ if (ret) {
++ xenbus_dev_fatal(dev, ret, "starting transaction");
++ return ret;
++ }
++ ret = xenbus_printf(xbt, dev->nodename, "page-ref", "%lu",
++ virt_to_mfn(info->page));
++ if (ret)
++ goto error_xenbus;
++ ret = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
++ irq_to_evtchn_port(info->irq));
++ if (ret)
++ goto error_xenbus;
++ ret = xenbus_printf(xbt, dev->nodename, "protocol", "%s",
++ XEN_IO_PROTO_ABI_NATIVE);
++ if (ret)
++ goto error_xenbus;
++ ret = xenbus_printf(xbt, dev->nodename, "feature-update", "1");
++ if (ret)
++ goto error_xenbus;
++ ret = xenbus_transaction_end(xbt, 0);
++ if (ret) {
++ if (ret == -EAGAIN)
++ goto again;
++ xenbus_dev_fatal(dev, ret, "completing transaction");
++ return ret;
++ }
++
++ xenbus_switch_state(dev, XenbusStateInitialised);
++ return 0;
++
++ error_xenbus:
++ xenbus_transaction_end(xbt, 1);
++ xenbus_dev_fatal(dev, ret, "writing xenstore");
++ return ret;
++}
++
++static void xenfb_disconnect_backend(struct xenfb_info *info)
++{
++ if (info->irq >= 0)
++ unbind_from_irqhandler(info->irq, info);
++ info->irq = -1;
++}
++
++static void xenfb_backend_changed(struct xenbus_device *dev,
++ enum xenbus_state backend_state)
++{
++ struct xenfb_info *info = dev->dev.driver_data;
++ int val;
++
++ switch (backend_state) {
++ case XenbusStateInitialising:
++ case XenbusStateInitialised:
++ case XenbusStateUnknown:
++ case XenbusStateClosed:
++ break;
++
++ case XenbusStateInitWait:
++ InitWait:
++ xenbus_switch_state(dev, XenbusStateConnected);
++ break;
++
++ case XenbusStateConnected:
++ /*
++ * Work around xenbus race condition: If backend goes
++ * through InitWait to Connected fast enough, we can
++ * get Connected twice here.
++ */
++ if (dev->state != XenbusStateConnected)
++ goto InitWait; /* no InitWait seen yet, fudge it */
++
++ if (xenbus_scanf(XBT_NIL, info->xbdev->otherend,
++ "request-update", "%d", &val) < 0)
++ val = 0;
++ if (val)
++ info->update_wanted = 1;
++ break;
++
++ case XenbusStateClosing:
++ // FIXME is this safe in any dev->state?
++ xenbus_frontend_closed(dev);
++ break;
++ }
++}
++
++static struct xenbus_device_id xenfb_ids[] = {
++ { "vfb" },
++ { "" }
++};
++
++static struct xenbus_driver xenfb = {
++ .name = "vfb",
++ .owner = THIS_MODULE,
++ .ids = xenfb_ids,
++ .probe = xenfb_probe,
++ .remove = xenfb_remove,
++ .resume = xenfb_resume,
++ .otherend_changed = xenfb_backend_changed,
++};
++
++static int __init xenfb_init(void)
++{
++ if (!is_running_on_xen())
++ return -ENODEV;
++
++ /* Nothing to do if running in dom0. */
++ if (is_initial_xendomain())
++ return -ENODEV;
++
++ return xenbus_register_frontend(&xenfb);
++}
++
++static void __exit xenfb_cleanup(void)
++{
++ return xenbus_unregister_driver(&xenfb);
++}
++
++module_init(xenfb_init);
++module_exit(xenfb_cleanup);
++
++MODULE_LICENSE("GPL");
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/fbfront/xenkbd.c 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,333 @@
++/*
++ * linux/drivers/input/keyboard/xenkbd.c -- Xen para-virtual input device
++ *
++ * Copyright (C) 2005 Anthony Liguori <aliguori@us.ibm.com>
++ * Copyright (C) 2006 Red Hat, Inc., Markus Armbruster <armbru@redhat.com>
++ *
++ * Based on linux/drivers/input/mouse/sermouse.c
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file COPYING in the main directory of this archive for
++ * more details.
++ */
++
++/*
++ * TODO:
++ *
++ * Switch to grant tables together with xenfb.c.
++ */
++
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/module.h>
++#include <linux/input.h>
++#include <asm/hypervisor.h>
++#include <xen/evtchn.h>
++#include <xen/interface/io/fbif.h>
++#include <xen/interface/io/kbdif.h>
++#include <xen/xenbus.h>
++
++struct xenkbd_info
++{
++ struct input_dev *kbd;
++ struct input_dev *ptr;
++ struct xenkbd_page *page;
++ int irq;
++ struct xenbus_device *xbdev;
++ char phys[32];
++};
++
++static int xenkbd_remove(struct xenbus_device *);
++static int xenkbd_connect_backend(struct xenbus_device *, struct xenkbd_info *);
++static void xenkbd_disconnect_backend(struct xenkbd_info *);
++
++/*
++ * Note: if you need to send out events, see xenfb_do_update() for how
++ * to do that.
++ */
++
++static irqreturn_t input_handler(int rq, void *dev_id, struct pt_regs *regs)
++{
++ struct xenkbd_info *info = dev_id;
++ struct xenkbd_page *page = info->page;
++ __u32 cons, prod;
++
++ prod = page->in_prod;
++ if (prod == page->out_cons)
++ return IRQ_HANDLED;
++ rmb(); /* ensure we see ring contents up to prod */
++ for (cons = page->in_cons; cons != prod; cons++) {
++ union xenkbd_in_event *event;
++ struct input_dev *dev;
++ event = &XENKBD_IN_RING_REF(page, cons);
++
++ dev = info->ptr;
++ switch (event->type) {
++ case XENKBD_TYPE_MOTION:
++ input_report_rel(dev, REL_X, event->motion.rel_x);
++ input_report_rel(dev, REL_Y, event->motion.rel_y);
++ break;
++ case XENKBD_TYPE_KEY:
++ dev = NULL;
++ if (test_bit(event->key.keycode, info->kbd->keybit))
++ dev = info->kbd;
++ if (test_bit(event->key.keycode, info->ptr->keybit))
++ dev = info->ptr;
++ if (dev)
++ input_report_key(dev, event->key.keycode,
++ event->key.pressed);
++ else
++ printk("xenkbd: unhandled keycode 0x%x\n",
++ event->key.keycode);
++ break;
++ case XENKBD_TYPE_POS:
++ input_report_abs(dev, ABS_X, event->pos.abs_x);
++ input_report_abs(dev, ABS_Y, event->pos.abs_y);
++ break;
++ }
++ if (dev)
++ input_sync(dev);
++ }
++ mb(); /* ensure we got ring contents */
++ page->in_cons = cons;
++ notify_remote_via_irq(info->irq);
++
++ return IRQ_HANDLED;
++}
++
++int __devinit xenkbd_probe(struct xenbus_device *dev,
++ const struct xenbus_device_id *id)
++{
++ int ret, i;
++ struct xenkbd_info *info;
++ struct input_dev *kbd, *ptr;
++
++ info = kzalloc(sizeof(*info), GFP_KERNEL);
++ if (!info) {
++ xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure");
++ return -ENOMEM;
++ }
++ dev->dev.driver_data = info;
++ info->xbdev = dev;
++ snprintf(info->phys, sizeof(info->phys), "xenbus/%s", dev->nodename);
++
++ info->page = (void *)__get_free_page(GFP_KERNEL);
++ if (!info->page)
++ goto error_nomem;
++ info->page->in_cons = info->page->in_prod = 0;
++ info->page->out_cons = info->page->out_prod = 0;
++
++ /* keyboard */
++ kbd = input_allocate_device();
++ if (!kbd)
++ goto error_nomem;
++ kbd->name = "Xen Virtual Keyboard";
++ kbd->phys = info->phys;
++ kbd->id.bustype = BUS_PCI;
++ kbd->id.vendor = 0x5853;
++ kbd->id.product = 0xffff;
++ kbd->evbit[0] = BIT(EV_KEY);
++ for (i = KEY_ESC; i < KEY_UNKNOWN; i++)
++ set_bit(i, kbd->keybit);
++ for (i = KEY_OK; i < KEY_MAX; i++)
++ set_bit(i, kbd->keybit);
++
++ ret = input_register_device(kbd);
++ if (ret) {
++ input_free_device(kbd);
++ xenbus_dev_fatal(dev, ret, "input_register_device(kbd)");
++ goto error;
++ }
++ info->kbd = kbd;
++
++ /* pointing device */
++ ptr = input_allocate_device();
++ if (!ptr)
++ goto error_nomem;
++ ptr->name = "Xen Virtual Pointer";
++ ptr->phys = info->phys;
++ ptr->id.bustype = BUS_PCI;
++ ptr->id.vendor = 0x5853;
++ ptr->id.product = 0xfffe;
++ ptr->evbit[0] = BIT(EV_KEY) | BIT(EV_REL) | BIT(EV_ABS);
++ for (i = BTN_LEFT; i <= BTN_TASK; i++)
++ set_bit(i, ptr->keybit);
++ ptr->relbit[0] = BIT(REL_X) | BIT(REL_Y);
++ input_set_abs_params(ptr, ABS_X, 0, XENFB_WIDTH, 0, 0);
++ input_set_abs_params(ptr, ABS_Y, 0, XENFB_HEIGHT, 0, 0);
++
++ ret = input_register_device(ptr);
++ if (ret) {
++ input_free_device(ptr);
++ xenbus_dev_fatal(dev, ret, "input_register_device(ptr)");
++ goto error;
++ }
++ info->ptr = ptr;
++
++ ret = xenkbd_connect_backend(dev, info);
++ if (ret < 0)
++ goto error;
++
++ return 0;
++
++ error_nomem:
++ ret = -ENOMEM;
++ xenbus_dev_fatal(dev, ret, "allocating device memory");
++ error:
++ xenkbd_remove(dev);
++ return ret;
++}
++
++static int xenkbd_resume(struct xenbus_device *dev)
++{
++ struct xenkbd_info *info = dev->dev.driver_data;
++
++ xenkbd_disconnect_backend(info);
++ return xenkbd_connect_backend(dev, info);
++}
++
++static int xenkbd_remove(struct xenbus_device *dev)
++{
++ struct xenkbd_info *info = dev->dev.driver_data;
++
++ xenkbd_disconnect_backend(info);
++ input_unregister_device(info->kbd);
++ input_unregister_device(info->ptr);
++ free_page((unsigned long)info->page);
++ kfree(info);
++ return 0;
++}
++
++static int xenkbd_connect_backend(struct xenbus_device *dev,
++ struct xenkbd_info *info)
++{
++ int ret;
++ struct xenbus_transaction xbt;
++
++ ret = bind_listening_port_to_irqhandler(
++ dev->otherend_id, input_handler, 0, "xenkbd", info);
++ if (ret < 0) {
++ xenbus_dev_fatal(dev, ret,
++ "bind_listening_port_to_irqhandler");
++ return ret;
++ }
++ info->irq = ret;
++
++ again:
++ ret = xenbus_transaction_start(&xbt);
++ if (ret) {
++ xenbus_dev_fatal(dev, ret, "starting transaction");
++ return ret;
++ }
++ ret = xenbus_printf(xbt, dev->nodename, "page-ref", "%lu",
++ virt_to_mfn(info->page));
++ if (ret)
++ goto error_xenbus;
++ ret = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
++ irq_to_evtchn_port(info->irq));
++ if (ret)
++ goto error_xenbus;
++ ret = xenbus_transaction_end(xbt, 0);
++ if (ret) {
++ if (ret == -EAGAIN)
++ goto again;
++ xenbus_dev_fatal(dev, ret, "completing transaction");
++ return ret;
++ }
++
++ xenbus_switch_state(dev, XenbusStateInitialised);
++ return 0;
++
++ error_xenbus:
++ xenbus_transaction_end(xbt, 1);
++ xenbus_dev_fatal(dev, ret, "writing xenstore");
++ return ret;
++}
++
++static void xenkbd_disconnect_backend(struct xenkbd_info *info)
++{
++ if (info->irq >= 0)
++ unbind_from_irqhandler(info->irq, info);
++ info->irq = -1;
++}
++
++static void xenkbd_backend_changed(struct xenbus_device *dev,
++ enum xenbus_state backend_state)
++{
++ struct xenkbd_info *info = dev->dev.driver_data;
++ int ret, val;
++
++ switch (backend_state) {
++ case XenbusStateInitialising:
++ case XenbusStateInitialised:
++ case XenbusStateUnknown:
++ case XenbusStateClosed:
++ break;
++
++ case XenbusStateInitWait:
++ InitWait:
++ ret = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
++ "feature-abs-pointer", "%d", &val);
++ if (ret < 0)
++ val = 0;
++ if (val) {
++ ret = xenbus_printf(XBT_NIL, info->xbdev->nodename,
++ "request-abs-pointer", "1");
++ if (ret)
++ ; /* FIXME */
++ }
++ xenbus_switch_state(dev, XenbusStateConnected);
++ break;
++
++ case XenbusStateConnected:
++ /*
++ * Work around xenbus race condition: If backend goes
++ * through InitWait to Connected fast enough, we can
++ * get Connected twice here.
++ */
++ if (dev->state != XenbusStateConnected)
++ goto InitWait; /* no InitWait seen yet, fudge it */
++ break;
++
++ case XenbusStateClosing:
++ xenbus_frontend_closed(dev);
++ break;
++ }
++}
++
++static struct xenbus_device_id xenkbd_ids[] = {
++ { "vkbd" },
++ { "" }
++};
++
++static struct xenbus_driver xenkbd = {
++ .name = "vkbd",
++ .owner = THIS_MODULE,
++ .ids = xenkbd_ids,
++ .probe = xenkbd_probe,
++ .remove = xenkbd_remove,
++ .resume = xenkbd_resume,
++ .otherend_changed = xenkbd_backend_changed,
++};
++
++static int __init xenkbd_init(void)
++{
++ if (!is_running_on_xen())
++ return -ENODEV;
++
++ /* Nothing to do if running in dom0. */
++ if (is_initial_xendomain())
++ return -ENODEV;
++
++ return xenbus_register_frontend(&xenkbd);
++}
++
++static void __exit xenkbd_cleanup(void)
++{
++ return xenbus_unregister_driver(&xenkbd);
++}
++
++module_init(xenkbd_init);
++module_exit(xenkbd_cleanup);
++
++MODULE_LICENSE("GPL");
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/gntdev/Makefile 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1 @@
++obj-y := gntdev.o
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/gntdev/gntdev.c 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,973 @@
++/******************************************************************************
++ * gntdev.c
++ *
++ * Device for accessing (in user-space) pages that have been granted by other
++ * domains.
++ *
++ * Copyright (c) 2006-2007, D G Murray.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ */
++
++#include <asm/atomic.h>
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/fs.h>
++#include <linux/device.h>
++#include <linux/mm.h>
++#include <linux/mman.h>
++#include <asm/uaccess.h>
++#include <asm/io.h>
++#include <xen/gnttab.h>
++#include <asm/hypervisor.h>
++#include <xen/balloon.h>
++#include <xen/evtchn.h>
++#include <xen/driver_util.h>
++
++#include <linux/types.h>
++#include <xen/public/gntdev.h>
++
++
++#define DRIVER_AUTHOR "Derek G. Murray <Derek.Murray@cl.cam.ac.uk>"
++#define DRIVER_DESC "User-space granted page access driver"
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR(DRIVER_AUTHOR);
++MODULE_DESCRIPTION(DRIVER_DESC);
++
++#define MAX_GRANTS 128
++
++/* A slot can be in one of three states:
++ *
++ * 0. GNTDEV_SLOT_INVALID:
++ * This slot is not associated with a grant reference, and is therefore free
++ * to be overwritten by a new grant reference.
++ *
++ * 1. GNTDEV_SLOT_NOT_YET_MAPPED:
++ * This slot is associated with a grant reference (via the
++ * IOCTL_GNTDEV_MAP_GRANT_REF ioctl), but it has not yet been mmap()-ed.
++ *
++ * 2. GNTDEV_SLOT_MAPPED:
++ * This slot is associated with a grant reference, and has been mmap()-ed.
++ */
++typedef enum gntdev_slot_state {
++ GNTDEV_SLOT_INVALID = 0,
++ GNTDEV_SLOT_NOT_YET_MAPPED,
++ GNTDEV_SLOT_MAPPED
++} gntdev_slot_state_t;
++
++#define GNTDEV_INVALID_HANDLE -1
++#define GNTDEV_FREE_LIST_INVALID -1
++/* Each opened instance of gntdev is associated with a list of grants,
++ * represented by an array of elements of the following type,
++ * gntdev_grant_info_t.
++ */
++typedef struct gntdev_grant_info {
++ gntdev_slot_state_t state;
++ union {
++ uint32_t free_list_index;
++ struct {
++ domid_t domid;
++ grant_ref_t ref;
++ grant_handle_t kernel_handle;
++ grant_handle_t user_handle;
++ uint64_t dev_bus_addr;
++ } valid;
++ } u;
++} gntdev_grant_info_t;
++
++/* Private data structure, which is stored in the file pointer for files
++ * associated with this device.
++ */
++typedef struct gntdev_file_private_data {
++
++ /* Array of grant information. */
++ gntdev_grant_info_t grants[MAX_GRANTS];
++
++ /* Read/write semaphore used to protect the grants array. */
++ struct rw_semaphore grants_sem;
++
++ /* An array of indices of free slots in the grants array.
++ * N.B. An entry in this list may temporarily have the value
++ * GNTDEV_FREE_LIST_INVALID if the corresponding slot has been removed
++ * from the list by the contiguous allocator, but the list has not yet
++ * been compressed. However, this is not visible across invocations of
++ * the device.
++ */
++ int32_t free_list[MAX_GRANTS];
++
++ /* The number of free slots in the grants array. */
++ uint32_t free_list_size;
++
++ /* Read/write semaphore used to protect the free list. */
++ struct rw_semaphore free_list_sem;
++
++ /* Index of the next slot after the most recent contiguous allocation,
++ * for use in a next-fit allocator.
++ */
++ uint32_t next_fit_index;
++
++ /* Used to map grants into the kernel, before mapping them into user
++ * space.
++ */
++ struct page **foreign_pages;
++
++} gntdev_file_private_data_t;
++
++/* Module lifecycle operations. */
++static int __init gntdev_init(void);
++static void __exit gntdev_exit(void);
++
++module_init(gntdev_init);
++module_exit(gntdev_exit);
++
++/* File operations. */
++static int gntdev_open(struct inode *inode, struct file *flip);
++static int gntdev_release(struct inode *inode, struct file *flip);
++static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma);
++static int gntdev_ioctl (struct inode *inode, struct file *flip,
++ unsigned int cmd, unsigned long arg);
++
++static struct file_operations gntdev_fops = {
++ .owner = THIS_MODULE,
++ .open = gntdev_open,
++ .release = gntdev_release,
++ .mmap = gntdev_mmap,
++ .ioctl = gntdev_ioctl
++};
++
++/* VM operations. */
++static void gntdev_vma_close(struct vm_area_struct *vma);
++static pte_t gntdev_clear_pte(struct vm_area_struct *vma, unsigned long addr,
++ pte_t *ptep, int is_fullmm);
++
++static struct vm_operations_struct gntdev_vmops = {
++ .close = gntdev_vma_close,
++ .zap_pte = gntdev_clear_pte
++};
++
++/* Global variables. */
++
++/* The driver major number, for use when unregistering the driver. */
++static int gntdev_major;
++
++#define GNTDEV_NAME "gntdev"
++
++/* Memory mapping functions
++ * ------------------------
++ *
++ * Every granted page is mapped into both kernel and user space, and the two
++ * following functions return the respective virtual addresses of these pages.
++ *
++ * When shadow paging is disabled, the granted page is mapped directly into
++ * user space; when it is enabled, it is mapped into the kernel and remapped
++ * into user space using vm_insert_page() (see gntdev_mmap(), below).
++ */
++
++/* Returns the virtual address (in user space) of the @page_index'th page
++ * in the given VM area.
++ */
++static inline unsigned long get_user_vaddr (struct vm_area_struct *vma,
++ int page_index)
++{
++ return (unsigned long) vma->vm_start + (page_index << PAGE_SHIFT);
++}
++
++/* Returns the virtual address (in kernel space) of the @slot_index'th page
++ * mapped by the gntdev instance that owns the given private data struct.
++ */
++static inline unsigned long get_kernel_vaddr (gntdev_file_private_data_t *priv,
++ int slot_index)
++{
++ unsigned long pfn;
++ void *kaddr;
++ pfn = page_to_pfn(priv->foreign_pages[slot_index]);
++ kaddr = pfn_to_kaddr(pfn);
++ return (unsigned long) kaddr;
++}
++
++/* Helper functions. */
++
++/* Adds information about a grant reference to the list of grants in the file's
++ * private data structure. Returns non-zero on failure. On success, sets the
++ * value of *offset to the offset that should be mmap()-ed in order to map the
++ * grant reference.
++ */
++static int add_grant_reference(struct file *flip,
++ struct ioctl_gntdev_grant_ref *op,
++ uint64_t *offset)
++{
++ gntdev_file_private_data_t *private_data
++ = (gntdev_file_private_data_t *) flip->private_data;
++
++ uint32_t slot_index;
++
++ if (unlikely(private_data->free_list_size == 0)) {
++ return -ENOMEM;
++ }
++
++ slot_index = private_data->free_list[--private_data->free_list_size];
++
++ /* Copy the grant information into file's private data. */
++ private_data->grants[slot_index].state = GNTDEV_SLOT_NOT_YET_MAPPED;
++ private_data->grants[slot_index].u.valid.domid = op->domid;
++ private_data->grants[slot_index].u.valid.ref = op->ref;
++
++ /* The offset is calculated as the index of the chosen entry in the
++ * file's private data's array of grant information. This is then
++ * shifted to give an offset into the virtual "file address space".
++ */
++ *offset = slot_index << PAGE_SHIFT;
++
++ return 0;
++}
++
++/* Adds the @count grant references to the contiguous range in the slot array
++ * beginning at @first_slot. It is assumed that @first_slot was returned by a
++ * previous invocation of find_contiguous_free_range(), during the same
++ * invocation of the driver.
++ */
++static int add_grant_references(struct file *flip,
++ int count,
++ struct ioctl_gntdev_grant_ref *ops,
++ uint32_t first_slot)
++{
++ gntdev_file_private_data_t *private_data
++ = (gntdev_file_private_data_t *) flip->private_data;
++ int i;
++
++ for (i = 0; i < count; ++i) {
++
++ /* First, mark the slot's entry in the free list as invalid. */
++ int free_list_index =
++ private_data->grants[first_slot+i].u.free_list_index;
++ private_data->free_list[free_list_index] =
++ GNTDEV_FREE_LIST_INVALID;
++
++ /* Now, update the slot. */
++ private_data->grants[first_slot+i].state =
++ GNTDEV_SLOT_NOT_YET_MAPPED;
++ private_data->grants[first_slot+i].u.valid.domid =
++ ops[i].domid;
++ private_data->grants[first_slot+i].u.valid.ref = ops[i].ref;
++ }
++
++ return 0;
++}
++
++/* Scans through the free list for @flip, removing entries that are marked as
++ * GNTDEV_SLOT_INVALID. This will reduce the recorded size of the free list to
++ * the number of valid entries.
++ */
++static void compress_free_list(struct file *flip)
++{
++ gntdev_file_private_data_t *private_data
++ = (gntdev_file_private_data_t *) flip->private_data;
++ int i, j = 0, old_size;
++
++ old_size = private_data->free_list_size;
++ for (i = 0; i < old_size; ++i) {
++ if (private_data->free_list[i] != GNTDEV_FREE_LIST_INVALID) {
++ private_data->free_list[j] =
++ private_data->free_list[i];
++ ++j;
++ } else {
++ --private_data->free_list_size;
++ }
++ }
++}
++
++/* Searches the grant array in the private data of @flip for a range of
++ * @num_slots contiguous slots in the GNTDEV_SLOT_INVALID state.
++ *
++ * Returns the index of the first slot if a range is found, otherwise -ENOMEM.
++ */
++static int find_contiguous_free_range(struct file *flip,
++ uint32_t num_slots)
++{
++ gntdev_file_private_data_t *private_data
++ = (gntdev_file_private_data_t *) flip->private_data;
++
++ int i;
++ int start_index = private_data->next_fit_index;
++ int range_start = 0, range_length;
++
++ if (private_data->free_list_size < num_slots) {
++ return -ENOMEM;
++ }
++
++ /* First search from the start_index to the end of the array. */
++ range_length = 0;
++ for (i = start_index; i < MAX_GRANTS; ++i) {
++ if (private_data->grants[i].state == GNTDEV_SLOT_INVALID) {
++ if (range_length == 0) {
++ range_start = i;
++ }
++ ++range_length;
++ if (range_length == num_slots) {
++ return range_start;
++ }
++ }
++ }
++
++ /* Now search from the start of the array to the start_index. */
++ range_length = 0;
++ for (i = 0; i < start_index; ++i) {
++ if (private_data->grants[i].state == GNTDEV_SLOT_INVALID) {
++ if (range_length == 0) {
++ range_start = i;
++ }
++ ++range_length;
++ if (range_length == num_slots) {
++ return range_start;
++ }
++ }
++ }
++
++ return -ENOMEM;
++}
++
++/* Interface functions. */
++
++/* Initialises the driver. Called when the module is loaded. */
++static int __init gntdev_init(void)
++{
++ struct class *class;
++ struct class_device *device;
++
++ if (!is_running_on_xen()) {
++ printk(KERN_ERR "You must be running Xen to use gntdev\n");
++ return -ENODEV;
++ }
++
++ gntdev_major = register_chrdev(0, GNTDEV_NAME, &gntdev_fops);
++ if (gntdev_major < 0)
++ {
++ printk(KERN_ERR "Could not register gntdev device\n");
++ return -ENOMEM;
++ }
++
++ /* Note that if the sysfs code fails, we will still initialise the
++ * device, and output the major number so that the device can be
++ * created manually using mknod.
++ */
++ if ((class = get_xen_class()) == NULL) {
++ printk(KERN_ERR "Error setting up xen_class\n");
++ printk(KERN_ERR "gntdev created with major number = %d\n",
++ gntdev_major);
++ return 0;
++ }
++
++ device = class_device_create(class, NULL, MKDEV(gntdev_major, 0),
++ NULL, GNTDEV_NAME);
++ if (IS_ERR(device)) {
++ printk(KERN_ERR "Error creating gntdev device in xen_class\n");
++ printk(KERN_ERR "gntdev created with major number = %d\n",
++ gntdev_major);
++ return 0;
++ }
++
++ return 0;
++}
++
++/* Cleans up and unregisters the driver. Called when the driver is unloaded.
++ */
++static void __exit gntdev_exit(void)
++{
++ struct class *class;
++ if ((class = get_xen_class()) != NULL)
++ class_device_destroy(class, MKDEV(gntdev_major, 0));
++ unregister_chrdev(gntdev_major, GNTDEV_NAME);
++}
++
++/* Called when the device is opened. */
++static int gntdev_open(struct inode *inode, struct file *flip)
++{
++ gntdev_file_private_data_t *private_data;
++ int i;
++
++ try_module_get(THIS_MODULE);
++
++ /* Allocate space for the per-instance private data. */
++ private_data = kmalloc(sizeof(*private_data), GFP_KERNEL);
++ if (!private_data)
++ goto nomem_out;
++
++ /* Allocate space for the kernel-mapping of granted pages. */
++ private_data->foreign_pages =
++ alloc_empty_pages_and_pagevec(MAX_GRANTS);
++ if (!private_data->foreign_pages)
++ goto nomem_out2;
++
++ /* Initialise the free-list, which contains all slots at first.
++ */
++ for (i = 0; i < MAX_GRANTS; ++i) {
++ private_data->free_list[MAX_GRANTS - i - 1] = i;
++ private_data->grants[i].state = GNTDEV_SLOT_INVALID;
++ private_data->grants[i].u.free_list_index = MAX_GRANTS - i - 1;
++ }
++ private_data->free_list_size = MAX_GRANTS;
++ private_data->next_fit_index = 0;
++
++ init_rwsem(&private_data->grants_sem);
++ init_rwsem(&private_data->free_list_sem);
++
++ flip->private_data = private_data;
++
++ return 0;
++
++nomem_out2:
++ kfree(private_data);
++nomem_out:
++ return -ENOMEM;
++}
++
++/* Called when the device is closed.
++ */
++static int gntdev_release(struct inode *inode, struct file *flip)
++{
++ if (flip->private_data) {
++ gntdev_file_private_data_t *private_data =
++ (gntdev_file_private_data_t *) flip->private_data;
++ if (private_data->foreign_pages) {
++ free_empty_pages_and_pagevec
++ (private_data->foreign_pages, MAX_GRANTS);
++ }
++ kfree(private_data);
++ }
++ module_put(THIS_MODULE);
++ return 0;
++}
++
++/* Called when an attempt is made to mmap() the device. The private data from
++ * @flip contains the list of grant references that can be mapped. The vm_pgoff
++ * field of @vma contains the index into that list that refers to the grant
++ * reference that will be mapped. Only mappings that are a multiple of
++ * PAGE_SIZE are handled.
++ */
++static int gntdev_mmap (struct file *flip, struct vm_area_struct *vma)
++{
++ struct gnttab_map_grant_ref op;
++ unsigned long slot_index = vma->vm_pgoff;
++ unsigned long kernel_vaddr, user_vaddr;
++ uint32_t size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
++ uint64_t ptep;
++ int ret;
++ int flags;
++ int i;
++ struct page *page;
++ gntdev_file_private_data_t *private_data = flip->private_data;
++
++ if (unlikely(!private_data)) {
++ printk(KERN_ERR "File's private data is NULL.\n");
++ return -EINVAL;
++ }
++
++ if (unlikely((size <= 0) || (size + slot_index) > MAX_GRANTS)) {
++ printk(KERN_ERR "Invalid number of pages or offset"
++ "(num_pages = %d, first_slot = %ld).\n",
++ size, slot_index);
++ return -ENXIO;
++ }
++
++ if ((vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_SHARED)) {
++ printk(KERN_ERR "Writable mappings must be shared.\n");
++ return -EINVAL;
++ }
++
++ /* Slots must be in the NOT_YET_MAPPED state. */
++ down_write(&private_data->grants_sem);
++ for (i = 0; i < size; ++i) {
++ if (private_data->grants[slot_index + i].state !=
++ GNTDEV_SLOT_NOT_YET_MAPPED) {
++ printk(KERN_ERR "Slot (index = %ld) is in the wrong "
++ "state (%d).\n", slot_index + i,
++ private_data->grants[slot_index + i].state);
++ up_write(&private_data->grants_sem);
++ return -EINVAL;
++ }
++ }
++
++ /* Install the hook for unmapping. */
++ vma->vm_ops = &gntdev_vmops;
++
++ /* The VM area contains pages from another VM. */
++ vma->vm_flags |= VM_FOREIGN;
++ vma->vm_private_data = kzalloc(size * sizeof(struct page_struct *),
++ GFP_KERNEL);
++ if (vma->vm_private_data == NULL) {
++ printk(KERN_ERR "Couldn't allocate mapping structure for VM "
++ "area.\n");
++ return -ENOMEM;
++ }
++
++ /* This flag prevents Bad PTE errors when the memory is unmapped. */
++ vma->vm_flags |= VM_RESERVED;
++
++ /* This flag prevents this VM area being copied on a fork(). A better
++ * behaviour might be to explicitly carry out the appropriate mappings
++ * on fork(), but I don't know if there's a hook for this.
++ */
++ vma->vm_flags |= VM_DONTCOPY;
++
++#ifdef CONFIG_X86
++ /* This flag ensures that the page tables are not unpinned before the
++ * VM area is unmapped. Therefore Xen still recognises the PTE as
++ * belonging to an L1 pagetable, and the grant unmap operation will
++ * succeed, even if the process does not exit cleanly.
++ */
++ vma->vm_mm->context.has_foreign_mappings = 1;
++#endif
++
++ for (i = 0; i < size; ++i) {
++
++ flags = GNTMAP_host_map;
++ if (!(vma->vm_flags & VM_WRITE))
++ flags |= GNTMAP_readonly;
++
++ kernel_vaddr = get_kernel_vaddr(private_data, slot_index + i);
++ user_vaddr = get_user_vaddr(vma, i);
++ page = pfn_to_page(__pa(kernel_vaddr) >> PAGE_SHIFT);
++
++ gnttab_set_map_op(&op, kernel_vaddr, flags,
++ private_data->grants[slot_index+i]
++ .u.valid.ref,
++ private_data->grants[slot_index+i]
++ .u.valid.domid);
++
++ /* Carry out the mapping of the grant reference. */
++ ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
++ &op, 1);
++ BUG_ON(ret);
++ if (op.status) {
++ printk(KERN_ERR "Error mapping the grant reference "
++ "into the kernel (%d). domid = %d; ref = %d\n",
++ op.status,
++ private_data->grants[slot_index+i]
++ .u.valid.domid,
++ private_data->grants[slot_index+i]
++ .u.valid.ref);
++ goto undo_map_out;
++ }
++
++ /* Store a reference to the page that will be mapped into user
++ * space.
++ */
++ ((struct page **) vma->vm_private_data)[i] = page;
++
++ /* Mark mapped page as reserved. */
++ SetPageReserved(page);
++
++ /* Record the grant handle, for use in the unmap operation. */
++ private_data->grants[slot_index+i].u.valid.kernel_handle =
++ op.handle;
++ private_data->grants[slot_index+i].u.valid.dev_bus_addr =
++ op.dev_bus_addr;
++
++ private_data->grants[slot_index+i].state = GNTDEV_SLOT_MAPPED;
++ private_data->grants[slot_index+i].u.valid.user_handle =
++ GNTDEV_INVALID_HANDLE;
++
++ /* Now perform the mapping to user space. */
++ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
++
++ /* NOT USING SHADOW PAGE TABLES. */
++ /* In this case, we map the grant(s) straight into user
++ * space.
++ */
++
++ /* Get the machine address of the PTE for the user
++ * page.
++ */
++ if ((ret = create_lookup_pte_addr(vma->vm_mm,
++ vma->vm_start
++ + (i << PAGE_SHIFT),
++ &ptep)))
++ {
++ printk(KERN_ERR "Error obtaining PTE pointer "
++ "(%d).\n", ret);
++ goto undo_map_out;
++ }
++
++ /* Configure the map operation. */
++
++ /* The reference is to be used by host CPUs. */
++ flags = GNTMAP_host_map;
++
++ /* Specifies a user space mapping. */
++ flags |= GNTMAP_application_map;
++
++ /* The map request contains the machine address of the
++ * PTE to update.
++ */
++ flags |= GNTMAP_contains_pte;
++
++ if (!(vma->vm_flags & VM_WRITE))
++ flags |= GNTMAP_readonly;
++
++ gnttab_set_map_op(&op, ptep, flags,
++ private_data->grants[slot_index+i]
++ .u.valid.ref,
++ private_data->grants[slot_index+i]
++ .u.valid.domid);
++
++ /* Carry out the mapping of the grant reference. */
++ ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
++ &op, 1);
++ BUG_ON(ret);
++ if (op.status) {
++ printk(KERN_ERR "Error mapping the grant "
++ "reference into user space (%d). domid "
++ "= %d; ref = %d\n", op.status,
++ private_data->grants[slot_index+i].u
++ .valid.domid,
++ private_data->grants[slot_index+i].u
++ .valid.ref);
++ goto undo_map_out;
++ }
++
++ /* Record the grant handle, for use in the unmap
++ * operation.
++ */
++ private_data->grants[slot_index+i].u.
++ valid.user_handle = op.handle;
++
++ /* Update p2m structure with the new mapping. */
++ set_phys_to_machine(__pa(kernel_vaddr) >> PAGE_SHIFT,
++ FOREIGN_FRAME(private_data->
++ grants[slot_index+i]
++ .u.valid.dev_bus_addr
++ >> PAGE_SHIFT));
++ } else {
++ /* USING SHADOW PAGE TABLES. */
++ /* In this case, we simply insert the page into the VM
++ * area. */
++ ret = vm_insert_page(vma, user_vaddr, page);
++ }
++
++ }
++
++ up_write(&private_data->grants_sem);
++ return 0;
++
++undo_map_out:
++ /* If we have a mapping failure, the unmapping will be taken care of
++ * by do_mmap_pgoff(), which will eventually call gntdev_clear_pte().
++ * All we need to do here is free the vma_private_data.
++ */
++ kfree(vma->vm_private_data);
++
++ /* THIS IS VERY UNPLEASANT: do_mmap_pgoff() will set the vma->vm_file
++ * to NULL on failure. However, we need this in gntdev_clear_pte() to
++ * unmap the grants. Therefore, we smuggle a reference to the file's
++ * private data in the VM area's private data pointer.
++ */
++ vma->vm_private_data = private_data;
++
++ up_write(&private_data->grants_sem);
++
++ return -ENOMEM;
++}
++
++static pte_t gntdev_clear_pte(struct vm_area_struct *vma, unsigned long addr,
++ pte_t *ptep, int is_fullmm)
++{
++ int slot_index, ret;
++ pte_t copy;
++ struct gnttab_unmap_grant_ref op;
++ gntdev_file_private_data_t *private_data;
++
++ /* THIS IS VERY UNPLEASANT: do_mmap_pgoff() will set the vma->vm_file
++ * to NULL on failure. However, we need this in gntdev_clear_pte() to
++ * unmap the grants. Therefore, we smuggle a reference to the file's
++ * private data in the VM area's private data pointer.
++ */
++ if (vma->vm_file) {
++ private_data = (gntdev_file_private_data_t *)
++ vma->vm_file->private_data;
++ } else if (vma->vm_private_data) {
++ private_data = (gntdev_file_private_data_t *)
++ vma->vm_private_data;
++ } else {
++ private_data = NULL; /* gcc warning */
++ BUG();
++ }
++
++ /* Copy the existing value of the PTE for returning. */
++ copy = *ptep;
++
++ /* Calculate the grant relating to this PTE. */
++ slot_index = vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT);
++
++ /* Only unmap grants if the slot has been mapped. This could be being
++ * called from a failing mmap().
++ */
++ if (private_data->grants[slot_index].state == GNTDEV_SLOT_MAPPED) {
++
++ /* First, we clear the user space mapping, if it has been made.
++ */
++ if (private_data->grants[slot_index].u.valid.user_handle !=
++ GNTDEV_INVALID_HANDLE &&
++ !xen_feature(XENFEAT_auto_translated_physmap)) {
++ /* NOT USING SHADOW PAGE TABLES. */
++ gnttab_set_unmap_op(&op, virt_to_machine(ptep),
++ GNTMAP_contains_pte,
++ private_data->grants[slot_index]
++ .u.valid.user_handle);
++ ret = HYPERVISOR_grant_table_op(
++ GNTTABOP_unmap_grant_ref, &op, 1);
++ BUG_ON(ret);
++ if (op.status)
++ printk("User unmap grant status = %d\n",
++ op.status);
++ } else {
++ /* USING SHADOW PAGE TABLES. */
++ pte_clear_full(vma->vm_mm, addr, ptep, is_fullmm);
++ }
++
++ /* Finally, we unmap the grant from kernel space. */
++ gnttab_set_unmap_op(&op,
++ get_kernel_vaddr(private_data, slot_index),
++ GNTMAP_host_map,
++ private_data->grants[slot_index].u.valid
++ .kernel_handle);
++ ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
++ &op, 1);
++ BUG_ON(ret);
++ if (op.status)
++ printk("Kernel unmap grant status = %d\n", op.status);
++
++
++ /* Return slot to the not-yet-mapped state, so that it may be
++ * mapped again, or removed by a subsequent ioctl.
++ */
++ private_data->grants[slot_index].state =
++ GNTDEV_SLOT_NOT_YET_MAPPED;
++
++ /* Invalidate the physical to machine mapping for this page. */
++ set_phys_to_machine(__pa(get_kernel_vaddr(private_data,
++ slot_index))
++ >> PAGE_SHIFT, INVALID_P2M_ENTRY);
++
++ } else {
++ pte_clear_full(vma->vm_mm, addr, ptep, is_fullmm);
++ }
++
++ return copy;
++}
++
++/* "Destructor" for a VM area.
++ */
++static void gntdev_vma_close(struct vm_area_struct *vma) {
++ if (vma->vm_private_data) {
++ kfree(vma->vm_private_data);
++ }
++}
++
++/* Called when an ioctl is made on the device.
++ */
++static int gntdev_ioctl(struct inode *inode, struct file *flip,
++ unsigned int cmd, unsigned long arg)
++{
++ int rc = 0;
++ gntdev_file_private_data_t *private_data =
++ (gntdev_file_private_data_t *) flip->private_data;
++
++ switch (cmd) {
++ case IOCTL_GNTDEV_MAP_GRANT_REF:
++ {
++ struct ioctl_gntdev_map_grant_ref op;
++ down_write(&private_data->grants_sem);
++ down_write(&private_data->free_list_sem);
++
++ if ((rc = copy_from_user(&op, (void __user *) arg,
++ sizeof(op)))) {
++ rc = -EFAULT;
++ goto map_out;
++ }
++ if (unlikely(op.count <= 0)) {
++ rc = -EINVAL;
++ goto map_out;
++ }
++
++ if (op.count == 1) {
++ if ((rc = add_grant_reference(flip, &op.refs[0],
++ &op.index)) < 0) {
++ printk(KERN_ERR "Adding grant reference "
++ "failed (%d).\n", rc);
++ goto map_out;
++ }
++ } else {
++ struct ioctl_gntdev_grant_ref *refs, *u;
++ refs = kmalloc(op.count * sizeof(*refs), GFP_KERNEL);
++ if (!refs) {
++ rc = -ENOMEM;
++ goto map_out;
++ }
++ u = ((struct ioctl_gntdev_map_grant_ref *)arg)->refs;
++ if ((rc = copy_from_user(refs,
++ (void __user *)u,
++ sizeof(*refs) * op.count))) {
++ printk(KERN_ERR "Copying refs from user failed"
++ " (%d).\n", rc);
++ rc = -EINVAL;
++ goto map_out;
++ }
++ if ((rc = find_contiguous_free_range(flip, op.count))
++ < 0) {
++ printk(KERN_ERR "Finding contiguous range "
++ "failed (%d).\n", rc);
++ kfree(refs);
++ goto map_out;
++ }
++ op.index = rc << PAGE_SHIFT;
++ if ((rc = add_grant_references(flip, op.count,
++ refs, rc))) {
++ printk(KERN_ERR "Adding grant references "
++ "failed (%d).\n", rc);
++ kfree(refs);
++ goto map_out;
++ }
++ compress_free_list(flip);
++ kfree(refs);
++ }
++ if ((rc = copy_to_user((void __user *) arg,
++ &op,
++ sizeof(op)))) {
++ printk(KERN_ERR "Copying result back to user failed "
++ "(%d)\n", rc);
++ rc = -EFAULT;
++ goto map_out;
++ }
++ map_out:
++ up_write(&private_data->grants_sem);
++ up_write(&private_data->free_list_sem);
++ return rc;
++ }
++ case IOCTL_GNTDEV_UNMAP_GRANT_REF:
++ {
++ struct ioctl_gntdev_unmap_grant_ref op;
++ int i, start_index;
++
++ down_write(&private_data->grants_sem);
++ down_write(&private_data->free_list_sem);
++
++ if ((rc = copy_from_user(&op,
++ (void __user *) arg,
++ sizeof(op)))) {
++ rc = -EFAULT;
++ goto unmap_out;
++ }
++
++ start_index = op.index >> PAGE_SHIFT;
++
++ /* First, check that all pages are in the NOT_YET_MAPPED
++ * state.
++ */
++ for (i = 0; i < op.count; ++i) {
++ if (unlikely
++ (private_data->grants[start_index + i].state
++ != GNTDEV_SLOT_NOT_YET_MAPPED)) {
++ if (private_data->grants[start_index + i].state
++ == GNTDEV_SLOT_INVALID) {
++ printk(KERN_ERR
++ "Tried to remove an invalid "
++ "grant at offset 0x%x.",
++ (start_index + i)
++ << PAGE_SHIFT);
++ rc = -EINVAL;
++ } else {
++ printk(KERN_ERR
++ "Tried to remove a grant which "
++ "is currently mmap()-ed at "
++ "offset 0x%x.",
++ (start_index + i)
++ << PAGE_SHIFT);
++ rc = -EBUSY;
++ }
++ goto unmap_out;
++ }
++ }
++
++ /* Unmap pages and add them to the free list.
++ */
++ for (i = 0; i < op.count; ++i) {
++ private_data->grants[start_index+i].state =
++ GNTDEV_SLOT_INVALID;
++ private_data->grants[start_index+i].u.free_list_index =
++ private_data->free_list_size;
++ private_data->free_list[private_data->free_list_size] =
++ start_index + i;
++ ++private_data->free_list_size;
++ }
++ compress_free_list(flip);
++
++ unmap_out:
++ up_write(&private_data->grants_sem);
++ up_write(&private_data->free_list_sem);
++ return rc;
++ }
++ case IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR:
++ {
++ struct ioctl_gntdev_get_offset_for_vaddr op;
++ struct vm_area_struct *vma;
++ unsigned long vaddr;
++
++ if ((rc = copy_from_user(&op,
++ (void __user *) arg,
++ sizeof(op)))) {
++ rc = -EFAULT;
++ goto get_offset_out;
++ }
++ vaddr = (unsigned long)op.vaddr;
++
++ down_read(&current->mm->mmap_sem);
++ vma = find_vma(current->mm, vaddr);
++ if (vma == NULL) {
++ rc = -EFAULT;
++ goto get_offset_unlock_out;
++ }
++ if ((!vma->vm_ops) || (vma->vm_ops != &gntdev_vmops)) {
++ printk(KERN_ERR "The vaddr specified does not belong "
++ "to a gntdev instance: %#lx\n", vaddr);
++ rc = -EFAULT;
++ goto get_offset_unlock_out;
++ }
++ if (vma->vm_start != vaddr) {
++ printk(KERN_ERR "The vaddr specified in an "
++ "IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR must be at "
++ "the start of the VM area. vma->vm_start = "
++ "%#lx; vaddr = %#lx\n",
++ vma->vm_start, vaddr);
++ rc = -EFAULT;
++ goto get_offset_unlock_out;
++ }
++ op.offset = vma->vm_pgoff << PAGE_SHIFT;
++ op.count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
++ up_read(&current->mm->mmap_sem);
++ if ((rc = copy_to_user((void __user *) arg,
++ &op,
++ sizeof(op)))) {
++ rc = -EFAULT;
++ goto get_offset_out;
++ }
++ goto get_offset_out;
++ get_offset_unlock_out:
++ up_read(&current->mm->mmap_sem);
++ get_offset_out:
++ return rc;
++ }
++ default:
++ return -ENOIOCTLCMD;
++ }
++
++ return 0;
++}
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/netback/Makefile 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,5 @@
++obj-$(CONFIG_XEN_NETDEV_BACKEND) := netbk.o
++obj-$(CONFIG_XEN_NETDEV_LOOPBACK) += netloop.o
++
++netbk-y := netback.o xenbus.o interface.o
++netloop-y := loopback.o
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/netback/common.h 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,157 @@
++/******************************************************************************
++ * arch/xen/drivers/netif/backend/common.h
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#ifndef __NETIF__BACKEND__COMMON_H__
++#define __NETIF__BACKEND__COMMON_H__
++
++#include <linux/version.h>
++#include <linux/module.h>
++#include <linux/interrupt.h>
++#include <linux/slab.h>
++#include <linux/ip.h>
++#include <linux/in.h>
++#include <linux/netdevice.h>
++#include <linux/etherdevice.h>
++#include <linux/wait.h>
++#include <xen/evtchn.h>
++#include <xen/interface/io/netif.h>
++#include <asm/io.h>
++#include <asm/pgalloc.h>
++#include <xen/interface/grant_table.h>
++#include <xen/gnttab.h>
++#include <xen/driver_util.h>
++
++#define DPRINTK(_f, _a...) \
++ pr_debug("(file=%s, line=%d) " _f, \
++ __FILE__ , __LINE__ , ## _a )
++#define IPRINTK(fmt, args...) \
++ printk(KERN_INFO "xen_net: " fmt, ##args)
++#define WPRINTK(fmt, args...) \
++ printk(KERN_WARNING "xen_net: " fmt, ##args)
++
++typedef struct netif_st {
++ /* Unique identifier for this interface. */
++ domid_t domid;
++ unsigned int handle;
++
++ u8 fe_dev_addr[6];
++
++ /* Physical parameters of the comms window. */
++ grant_handle_t tx_shmem_handle;
++ grant_ref_t tx_shmem_ref;
++ grant_handle_t rx_shmem_handle;
++ grant_ref_t rx_shmem_ref;
++ unsigned int irq;
++
++ /* The shared rings and indexes. */
++ netif_tx_back_ring_t tx;
++ netif_rx_back_ring_t rx;
++ struct vm_struct *tx_comms_area;
++ struct vm_struct *rx_comms_area;
++
++ /* Set of features that can be turned on in dev->features. */
++ int features;
++
++ /* Internal feature information. */
++ int can_queue:1; /* can queue packets for receiver? */
++ int copying_receiver:1; /* copy packets to receiver? */
++
++ /* Allow netif_be_start_xmit() to peek ahead in the rx request ring. */
++ RING_IDX rx_req_cons_peek;
++
++ /* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */
++ unsigned long credit_bytes;
++ unsigned long credit_usec;
++ unsigned long remaining_credit;
++ struct timer_list credit_timeout;
++
++ /* Enforce draining of the transmit queue. */
++ struct timer_list tx_queue_timeout;
++
++ /* Miscellaneous private stuff. */
++ struct list_head list; /* scheduling list */
++ atomic_t refcnt;
++ struct net_device *dev;
++ struct net_device_stats stats;
++
++ unsigned int carrier;
++
++ wait_queue_head_t waiting_to_free;
++} netif_t;
++
++/*
++ * Implement our own carrier flag: the network stack's version causes delays
++ * when the carrier is re-enabled (in particular, dev_activate() may not
++ * immediately be called, which can cause packet loss; also the etherbridge
++ * can be rather lazy in activating its port).
++ */
++#define netback_carrier_on(netif) ((netif)->carrier = 1)
++#define netback_carrier_off(netif) ((netif)->carrier = 0)
++#define netback_carrier_ok(netif) ((netif)->carrier)
++
++#define NET_TX_RING_SIZE __RING_SIZE((netif_tx_sring_t *)0, PAGE_SIZE)
++#define NET_RX_RING_SIZE __RING_SIZE((netif_rx_sring_t *)0, PAGE_SIZE)
++
++void netif_disconnect(netif_t *netif);
++
++netif_t *netif_alloc(domid_t domid, unsigned int handle);
++int netif_map(netif_t *netif, unsigned long tx_ring_ref,
++ unsigned long rx_ring_ref, unsigned int evtchn);
++
++#define netif_get(_b) (atomic_inc(&(_b)->refcnt))
++#define netif_put(_b) \
++ do { \
++ if ( atomic_dec_and_test(&(_b)->refcnt) ) \
++ wake_up(&(_b)->waiting_to_free); \
++ } while (0)
++
++void netif_xenbus_init(void);
++
++#define netif_schedulable(netif) \
++ (netif_running((netif)->dev) && netback_carrier_ok(netif))
++
++void netif_schedule_work(netif_t *netif);
++void netif_deschedule_work(netif_t *netif);
++
++int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev);
++struct net_device_stats *netif_be_get_stats(struct net_device *dev);
++irqreturn_t netif_be_int(int irq, void *dev_id, struct pt_regs *regs);
++
++static inline int netbk_can_queue(struct net_device *dev)
++{
++ netif_t *netif = netdev_priv(dev);
++ return netif->can_queue;
++}
++
++static inline int netbk_can_sg(struct net_device *dev)
++{
++ netif_t *netif = netdev_priv(dev);
++ return netif->features & NETIF_F_SG;
++}
++
++#endif /* __NETIF__BACKEND__COMMON_H__ */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/netback/interface.c 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,336 @@
++/******************************************************************************
++ * arch/xen/drivers/netif/backend/interface.c
++ *
++ * Network-device interface management.
++ *
++ * Copyright (c) 2004-2005, Keir Fraser
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include "common.h"
++#include <linux/ethtool.h>
++#include <linux/rtnetlink.h>
++
++/*
++ * Module parameter 'queue_length':
++ *
++ * Enables queuing in the network stack when a client has run out of receive
++ * descriptors. Although this feature can improve receive bandwidth by avoiding
++ * packet loss, it can also result in packets sitting in the 'tx_queue' for
++ * unbounded time. This is bad if those packets hold onto foreign resources.
++ * For example, consider a packet that holds onto resources belonging to the
++ * guest for which it is queued (e.g., packet received on vif1.0, destined for
++ * vif1.1 which is not activated in the guest): in this situation the guest
++ * will never be destroyed, unless vif1.1 is taken down. To avoid this, we
++ * run a timer (tx_queue_timeout) to drain the queue when the interface is
++ * blocked.
++ */
++static unsigned long netbk_queue_length = 32;
++module_param_named(queue_length, netbk_queue_length, ulong, 0);
++
++static void __netif_up(netif_t *netif)
++{
++ enable_irq(netif->irq);
++ netif_schedule_work(netif);
++}
++
++static void __netif_down(netif_t *netif)
++{
++ disable_irq(netif->irq);
++ netif_deschedule_work(netif);
++}
++
++static int net_open(struct net_device *dev)
++{
++ netif_t *netif = netdev_priv(dev);
++ if (netback_carrier_ok(netif)) {
++ __netif_up(netif);
++ netif_start_queue(dev);
++ }
++ return 0;
++}
++
++static int net_close(struct net_device *dev)
++{
++ netif_t *netif = netdev_priv(dev);
++ if (netback_carrier_ok(netif))
++ __netif_down(netif);
++ netif_stop_queue(dev);
++ return 0;
++}
++
++static int netbk_change_mtu(struct net_device *dev, int mtu)
++{
++ int max = netbk_can_sg(dev) ? 65535 - ETH_HLEN : ETH_DATA_LEN;
++
++ if (mtu > max)
++ return -EINVAL;
++ dev->mtu = mtu;
++ return 0;
++}
++
++static int netbk_set_sg(struct net_device *dev, u32 data)
++{
++ if (data) {
++ netif_t *netif = netdev_priv(dev);
++
++ if (!(netif->features & NETIF_F_SG))
++ return -ENOSYS;
++ }
++
++ return ethtool_op_set_sg(dev, data);
++}
++
++static int netbk_set_tso(struct net_device *dev, u32 data)
++{
++ if (data) {
++ netif_t *netif = netdev_priv(dev);
++
++ if (!(netif->features & NETIF_F_TSO))
++ return -ENOSYS;
++ }
++
++ return ethtool_op_set_tso(dev, data);
++}
++
++static struct ethtool_ops network_ethtool_ops =
++{
++ .get_tx_csum = ethtool_op_get_tx_csum,
++ .set_tx_csum = ethtool_op_set_tx_csum,
++ .get_sg = ethtool_op_get_sg,
++ .set_sg = netbk_set_sg,
++ .get_tso = ethtool_op_get_tso,
++ .set_tso = netbk_set_tso,
++ .get_link = ethtool_op_get_link,
++};
++
++netif_t *netif_alloc(domid_t domid, unsigned int handle)
++{
++ int err = 0;
++ struct net_device *dev;
++ netif_t *netif;
++ char name[IFNAMSIZ] = {};
++
++ snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
++ dev = alloc_netdev(sizeof(netif_t), name, ether_setup);
++ if (dev == NULL) {
++ DPRINTK("Could not create netif: out of memory\n");
++ return ERR_PTR(-ENOMEM);
++ }
++
++ netif = netdev_priv(dev);
++ memset(netif, 0, sizeof(*netif));
++ netif->domid = domid;
++ netif->handle = handle;
++ atomic_set(&netif->refcnt, 1);
++ init_waitqueue_head(&netif->waiting_to_free);
++ netif->dev = dev;
++
++ netback_carrier_off(netif);
++
++ netif->credit_bytes = netif->remaining_credit = ~0UL;
++ netif->credit_usec = 0UL;
++ init_timer(&netif->credit_timeout);
++ /* Initialize 'expires' now: it's used to track the credit window. */
++ netif->credit_timeout.expires = jiffies;
++
++ init_timer(&netif->tx_queue_timeout);
++
++ dev->hard_start_xmit = netif_be_start_xmit;
++ dev->get_stats = netif_be_get_stats;
++ dev->open = net_open;
++ dev->stop = net_close;
++ dev->change_mtu = netbk_change_mtu;
++ dev->features = NETIF_F_IP_CSUM;
++
++ SET_ETHTOOL_OPS(dev, &network_ethtool_ops);
++
++ dev->tx_queue_len = netbk_queue_length;
++
++ /*
++ * Initialise a dummy MAC address. We choose the numerically
++ * largest non-broadcast address to prevent the address getting
++ * stolen by an Ethernet bridge for STP purposes.
++ * (FE:FF:FF:FF:FF:FF)
++ */
++ memset(dev->dev_addr, 0xFF, ETH_ALEN);
++ dev->dev_addr[0] &= ~0x01;
++
++ rtnl_lock();
++ err = register_netdevice(dev);
++ rtnl_unlock();
++ if (err) {
++ DPRINTK("Could not register new net device %s: err=%d\n",
++ dev->name, err);
++ free_netdev(dev);
++ return ERR_PTR(err);
++ }
++
++ DPRINTK("Successfully created netif\n");
++ return netif;
++}
++
++static int map_frontend_pages(
++ netif_t *netif, grant_ref_t tx_ring_ref, grant_ref_t rx_ring_ref)
++{
++ struct gnttab_map_grant_ref op;
++
++ gnttab_set_map_op(&op, (unsigned long)netif->tx_comms_area->addr,
++ GNTMAP_host_map, tx_ring_ref, netif->domid);
++
++ if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
++ BUG();
++
++ if (op.status) {
++ DPRINTK(" Gnttab failure mapping tx_ring_ref!\n");
++ return op.status;
++ }
++
++ netif->tx_shmem_ref = tx_ring_ref;
++ netif->tx_shmem_handle = op.handle;
++
++ gnttab_set_map_op(&op, (unsigned long)netif->rx_comms_area->addr,
++ GNTMAP_host_map, rx_ring_ref, netif->domid);
++
++ if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
++ BUG();
++
++ if (op.status) {
++ DPRINTK(" Gnttab failure mapping rx_ring_ref!\n");
++ return op.status;
++ }
++
++ netif->rx_shmem_ref = rx_ring_ref;
++ netif->rx_shmem_handle = op.handle;
++
++ return 0;
++}
++
++static void unmap_frontend_pages(netif_t *netif)
++{
++ struct gnttab_unmap_grant_ref op;
++
++ gnttab_set_unmap_op(&op, (unsigned long)netif->tx_comms_area->addr,
++ GNTMAP_host_map, netif->tx_shmem_handle);
++
++ if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
++ BUG();
++
++ gnttab_set_unmap_op(&op, (unsigned long)netif->rx_comms_area->addr,
++ GNTMAP_host_map, netif->rx_shmem_handle);
++
++ if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
++ BUG();
++}
++
++int netif_map(netif_t *netif, unsigned long tx_ring_ref,
++ unsigned long rx_ring_ref, unsigned int evtchn)
++{
++ int err = -ENOMEM;
++ netif_tx_sring_t *txs;
++ netif_rx_sring_t *rxs;
++
++ /* Already connected through? */
++ if (netif->irq)
++ return 0;
++
++ netif->tx_comms_area = alloc_vm_area(PAGE_SIZE);
++ if (netif->tx_comms_area == NULL)
++ return -ENOMEM;
++ netif->rx_comms_area = alloc_vm_area(PAGE_SIZE);
++ if (netif->rx_comms_area == NULL)
++ goto err_rx;
++
++ err = map_frontend_pages(netif, tx_ring_ref, rx_ring_ref);
++ if (err)
++ goto err_map;
++
++ err = bind_interdomain_evtchn_to_irqhandler(
++ netif->domid, evtchn, netif_be_int, 0,
++ netif->dev->name, netif);
++ if (err < 0)
++ goto err_hypervisor;
++ netif->irq = err;
++ disable_irq(netif->irq);
++
++ txs = (netif_tx_sring_t *)netif->tx_comms_area->addr;
++ BACK_RING_INIT(&netif->tx, txs, PAGE_SIZE);
++
++ rxs = (netif_rx_sring_t *)
++ ((char *)netif->rx_comms_area->addr);
++ BACK_RING_INIT(&netif->rx, rxs, PAGE_SIZE);
++
++ netif->rx_req_cons_peek = 0;
++
++ netif_get(netif);
++
++ rtnl_lock();
++ netback_carrier_on(netif);
++ if (netif_running(netif->dev))
++ __netif_up(netif);
++ rtnl_unlock();
++
++ return 0;
++err_hypervisor:
++ unmap_frontend_pages(netif);
++err_map:
++ free_vm_area(netif->rx_comms_area);
++err_rx:
++ free_vm_area(netif->tx_comms_area);
++ return err;
++}
++
++void netif_disconnect(netif_t *netif)
++{
++ if (netback_carrier_ok(netif)) {
++ rtnl_lock();
++ netback_carrier_off(netif);
++ netif_carrier_off(netif->dev); /* discard queued packets */
++ if (netif_running(netif->dev))
++ __netif_down(netif);
++ rtnl_unlock();
++ netif_put(netif);
++ }
++
++ atomic_dec(&netif->refcnt);
++ wait_event(netif->waiting_to_free, atomic_read(&netif->refcnt) == 0);
++
++ del_timer_sync(&netif->credit_timeout);
++ del_timer_sync(&netif->tx_queue_timeout);
++
++ if (netif->irq)
++ unbind_from_irqhandler(netif->irq, netif);
++
++ unregister_netdev(netif->dev);
++
++ if (netif->tx.sring) {
++ unmap_frontend_pages(netif);
++ free_vm_area(netif->tx_comms_area);
++ free_vm_area(netif->rx_comms_area);
++ }
++
++ free_netdev(netif->dev);
++}
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/netback/loopback.c 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,320 @@
++/******************************************************************************
++ * netback/loopback.c
++ *
++ * A two-interface loopback device to emulate a local netfront-netback
++ * connection. This ensures that local packet delivery looks identical
++ * to inter-domain delivery. Most importantly, packets delivered locally
++ * originating from other domains will get *copied* when they traverse this
++ * driver. This prevents unbounded delays in socket-buffer queues from
++ * causing the netback driver to "seize up".
++ *
++ * This driver creates a symmetric pair of loopback interfaces with names
++ * vif0.0 and veth0. The intention is that 'vif0.0' is bound to an Ethernet
++ * bridge, just like a proper netback interface, while a local IP interface
++ * is configured on 'veth0'.
++ *
++ * As with a real netback interface, vif0.0 is configured with a suitable
++ * dummy MAC address. No default is provided for veth0: a reasonable strategy
++ * is to transfer eth0's MAC address to veth0, and give eth0 a dummy address
++ * (to avoid confusing the Etherbridge).
++ *
++ * Copyright (c) 2005 K A Fraser
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <linux/module.h>
++#include <linux/netdevice.h>
++#include <linux/inetdevice.h>
++#include <linux/etherdevice.h>
++#include <linux/skbuff.h>
++#include <linux/ethtool.h>
++#include <net/dst.h>
++#include <net/xfrm.h> /* secpath_reset() */
++#include <asm/hypervisor.h> /* is_initial_xendomain() */
++
++static int nloopbacks = -1;
++module_param(nloopbacks, int, 0);
++MODULE_PARM_DESC(nloopbacks, "Number of netback-loopback devices to create");
++
++struct net_private {
++ struct net_device *loopback_dev;
++ struct net_device_stats stats;
++};
++
++static int loopback_open(struct net_device *dev)
++{
++ struct net_private *np = netdev_priv(dev);
++ memset(&np->stats, 0, sizeof(np->stats));
++ netif_start_queue(dev);
++ return 0;
++}
++
++static int loopback_close(struct net_device *dev)
++{
++ netif_stop_queue(dev);
++ return 0;
++}
++
++#ifdef CONFIG_X86
++static int is_foreign(unsigned long pfn)
++{
++ /* NB. Play it safe for auto-translation mode. */
++ return (xen_feature(XENFEAT_auto_translated_physmap) ||
++ (phys_to_machine_mapping[pfn] & FOREIGN_FRAME_BIT));
++}
++#else
++/* How to detect a foreign mapping? Play it safe. */
++#define is_foreign(pfn) (1)
++#endif
++
++static int skb_remove_foreign_references(struct sk_buff *skb)
++{
++ struct page *page;
++ unsigned long pfn;
++ int i, off;
++ char *vaddr;
++
++ BUG_ON(skb_shinfo(skb)->frag_list);
++
++ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
++ pfn = page_to_pfn(skb_shinfo(skb)->frags[i].page);
++ if (!is_foreign(pfn))
++ continue;
++
++ page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
++ if (unlikely(!page))
++ return 0;
++
++ vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]);
++ off = skb_shinfo(skb)->frags[i].page_offset;
++ memcpy(page_address(page) + off,
++ vaddr + off,
++ skb_shinfo(skb)->frags[i].size);
++ kunmap_skb_frag(vaddr);
++
++ put_page(skb_shinfo(skb)->frags[i].page);
++ skb_shinfo(skb)->frags[i].page = page;
++ }
++
++ return 1;
++}
++
++static int loopback_start_xmit(struct sk_buff *skb, struct net_device *dev)
++{
++ struct net_private *np = netdev_priv(dev);
++
++ if (!skb_remove_foreign_references(skb)) {
++ np->stats.tx_dropped++;
++ dev_kfree_skb(skb);
++ return 0;
++ }
++
++ dst_release(skb->dst);
++ skb->dst = NULL;
++
++ skb_orphan(skb);
++
++ np->stats.tx_bytes += skb->len;
++ np->stats.tx_packets++;
++
++ /* Switch to loopback context. */
++ dev = np->loopback_dev;
++ np = netdev_priv(dev);
++
++ np->stats.rx_bytes += skb->len;
++ np->stats.rx_packets++;
++
++ if (skb->ip_summed == CHECKSUM_HW) {
++ /* Defer checksum calculation. */
++ skb->proto_csum_blank = 1;
++ /* Must be a local packet: assert its integrity. */
++ skb->proto_data_valid = 1;
++ }
++
++ skb->ip_summed = skb->proto_data_valid ?
++ CHECKSUM_UNNECESSARY : CHECKSUM_NONE;
++
++ skb->pkt_type = PACKET_HOST; /* overridden by eth_type_trans() */
++ skb->protocol = eth_type_trans(skb, dev);
++ skb->dev = dev;
++ dev->last_rx = jiffies;
++
++ /* Flush netfilter context: rx'ed skbuffs not expected to have any. */
++ nf_reset(skb);
++ secpath_reset(skb);
++
++ netif_rx(skb);
++
++ return 0;
++}
++
++static struct net_device_stats *loopback_get_stats(struct net_device *dev)
++{
++ struct net_private *np = netdev_priv(dev);
++ return &np->stats;
++}
++
++static struct ethtool_ops network_ethtool_ops =
++{
++ .get_tx_csum = ethtool_op_get_tx_csum,
++ .set_tx_csum = ethtool_op_set_tx_csum,
++ .get_sg = ethtool_op_get_sg,
++ .set_sg = ethtool_op_set_sg,
++ .get_tso = ethtool_op_get_tso,
++ .set_tso = ethtool_op_set_tso,
++ .get_link = ethtool_op_get_link,
++};
++
++/*
++ * Nothing to do here. Virtual interface is point-to-point and the
++ * physical interface is probably promiscuous anyway.
++ */
++static void loopback_set_multicast_list(struct net_device *dev)
++{
++}
++
++static void loopback_construct(struct net_device *dev, struct net_device *lo)
++{
++ struct net_private *np = netdev_priv(dev);
++
++ np->loopback_dev = lo;
++
++ dev->open = loopback_open;
++ dev->stop = loopback_close;
++ dev->hard_start_xmit = loopback_start_xmit;
++ dev->get_stats = loopback_get_stats;
++ dev->set_multicast_list = loopback_set_multicast_list;
++ dev->change_mtu = NULL; /* allow arbitrary mtu */
++
++ dev->tx_queue_len = 0;
++
++ dev->features = (NETIF_F_HIGHDMA |
++ NETIF_F_LLTX |
++ NETIF_F_TSO |
++ NETIF_F_SG |
++ NETIF_F_IP_CSUM);
++
++ SET_ETHTOOL_OPS(dev, &network_ethtool_ops);
++
++ /*
++ * We do not set a jumbo MTU on the interface. Otherwise the network
++ * stack will try to send large packets that will get dropped by the
++ * Ethernet bridge (unless the physical Ethernet interface is
++ * configured to transfer jumbo packets). If a larger MTU is desired
++ * then the system administrator can specify it using the 'ifconfig'
++ * command.
++ */
++ /*dev->mtu = 16*1024;*/
++}
++
++static int __init make_loopback(int i)
++{
++ struct net_device *dev1, *dev2;
++ char dev_name[IFNAMSIZ];
++ int err = -ENOMEM;
++
++ sprintf(dev_name, "vif0.%d", i);
++ dev1 = alloc_netdev(sizeof(struct net_private), dev_name, ether_setup);
++ if (!dev1)
++ return err;
++
++ sprintf(dev_name, "veth%d", i);
++ dev2 = alloc_netdev(sizeof(struct net_private), dev_name, ether_setup);
++ if (!dev2)
++ goto fail_netdev2;
++
++ loopback_construct(dev1, dev2);
++ loopback_construct(dev2, dev1);
++
++ /*
++ * Initialise a dummy MAC address for the 'dummy backend' interface. We
++ * choose the numerically largest non-broadcast address to prevent the
++ * address getting stolen by an Ethernet bridge for STP purposes.
++ */
++ memset(dev1->dev_addr, 0xFF, ETH_ALEN);
++ dev1->dev_addr[0] &= ~0x01;
++
++ if ((err = register_netdev(dev1)) != 0)
++ goto fail;
++
++ if ((err = register_netdev(dev2)) != 0) {
++ unregister_netdev(dev1);
++ goto fail;
++ }
++
++ return 0;
++
++ fail:
++ free_netdev(dev2);
++ fail_netdev2:
++ free_netdev(dev1);
++ return err;
++}
++
++static void __exit clean_loopback(int i)
++{
++ struct net_device *dev1, *dev2;
++ char dev_name[IFNAMSIZ];
++
++ sprintf(dev_name, "vif0.%d", i);
++ dev1 = dev_get_by_name(dev_name);
++ sprintf(dev_name, "veth%d", i);
++ dev2 = dev_get_by_name(dev_name);
++ if (dev1 && dev2) {
++ unregister_netdev(dev2);
++ unregister_netdev(dev1);
++ free_netdev(dev2);
++ free_netdev(dev1);
++ }
++}
++
++static int __init loopback_init(void)
++{
++ int i, err = 0;
++
++ if (nloopbacks == -1)
++ nloopbacks = is_initial_xendomain() ? 4 : 0;
++
++ for (i = 0; i < nloopbacks; i++)
++ if ((err = make_loopback(i)) != 0)
++ break;
++
++ return err;
++}
++
++module_init(loopback_init);
++
++static void __exit loopback_exit(void)
++{
++ int i;
++
++ for (i = nloopbacks; i-- > 0; )
++ clean_loopback(i);
++}
++
++module_exit(loopback_exit);
++
++MODULE_LICENSE("Dual BSD/GPL");
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/netback/netback.c 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,1496 @@
++/******************************************************************************
++ * drivers/xen/netback/netback.c
++ *
++ * Back-end of the driver for virtual network devices. This portion of the
++ * driver exports a 'unified' network-device interface that can be accessed
++ * by any operating system that implements a compatible front end. A
++ * reference front-end implementation can be found in:
++ * drivers/xen/netfront/netfront.c
++ *
++ * Copyright (c) 2002-2005, K A Fraser
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include "common.h"
++#include <xen/balloon.h>
++#include <xen/interface/memory.h>
++
++/*define NETBE_DEBUG_INTERRUPT*/
++
++/* extra field used in struct page */
++#define netif_page_index(pg) (*(long *)&(pg)->mapping)
++
++struct netbk_rx_meta {
++ skb_frag_t frag;
++ int id;
++ int copy:1;
++};
++
++static void netif_idx_release(u16 pending_idx);
++static void netif_page_release(struct page *page);
++static void make_tx_response(netif_t *netif,
++ netif_tx_request_t *txp,
++ s8 st);
++static netif_rx_response_t *make_rx_response(netif_t *netif,
++ u16 id,
++ s8 st,
++ u16 offset,
++ u16 size,
++ u16 flags);
++
++static void net_tx_action(unsigned long unused);
++static DECLARE_TASKLET(net_tx_tasklet, net_tx_action, 0);
++
++static void net_rx_action(unsigned long unused);
++static DECLARE_TASKLET(net_rx_tasklet, net_rx_action, 0);
++
++static struct timer_list net_timer;
++
++#define MAX_PENDING_REQS 256
++
++static struct sk_buff_head rx_queue;
++
++static struct page **mmap_pages;
++static inline unsigned long idx_to_kaddr(unsigned int idx)
++{
++ return (unsigned long)pfn_to_kaddr(page_to_pfn(mmap_pages[idx]));
++}
++
++#define PKT_PROT_LEN 64
++
++static struct pending_tx_info {
++ netif_tx_request_t req;
++ netif_t *netif;
++} pending_tx_info[MAX_PENDING_REQS];
++static u16 pending_ring[MAX_PENDING_REQS];
++typedef unsigned int PEND_RING_IDX;
++#define MASK_PEND_IDX(_i) ((_i)&(MAX_PENDING_REQS-1))
++static PEND_RING_IDX pending_prod, pending_cons;
++#define NR_PENDING_REQS (MAX_PENDING_REQS - pending_prod + pending_cons)
++
++/* Freed TX SKBs get batched on this ring before return to pending_ring. */
++static u16 dealloc_ring[MAX_PENDING_REQS];
++static PEND_RING_IDX dealloc_prod, dealloc_cons;
++
++static struct sk_buff_head tx_queue;
++
++static grant_handle_t grant_tx_handle[MAX_PENDING_REQS];
++static gnttab_unmap_grant_ref_t tx_unmap_ops[MAX_PENDING_REQS];
++static gnttab_map_grant_ref_t tx_map_ops[MAX_PENDING_REQS];
++
++static struct list_head net_schedule_list;
++static spinlock_t net_schedule_list_lock;
++
++#define MAX_MFN_ALLOC 64
++static unsigned long mfn_list[MAX_MFN_ALLOC];
++static unsigned int alloc_index = 0;
++
++static inline unsigned long alloc_mfn(void)
++{
++ BUG_ON(alloc_index == 0);
++ return mfn_list[--alloc_index];
++}
++
++static int check_mfn(int nr)
++{
++ struct xen_memory_reservation reservation = {
++ .extent_order = 0,
++ .domid = DOMID_SELF
++ };
++
++ if (likely(alloc_index >= nr))
++ return 0;
++
++ set_xen_guest_handle(reservation.extent_start, mfn_list + alloc_index);
++ reservation.nr_extents = MAX_MFN_ALLOC - alloc_index;
++ alloc_index += HYPERVISOR_memory_op(XENMEM_increase_reservation,
++ &reservation);
++
++ return alloc_index >= nr ? 0 : -ENOMEM;
++}
++
++static inline void maybe_schedule_tx_action(void)
++{
++ smp_mb();
++ if ((NR_PENDING_REQS < (MAX_PENDING_REQS/2)) &&
++ !list_empty(&net_schedule_list))
++ tasklet_schedule(&net_tx_tasklet);
++}
++
++static struct sk_buff *netbk_copy_skb(struct sk_buff *skb)
++{
++ struct skb_shared_info *ninfo;
++ struct sk_buff *nskb;
++ unsigned long offset;
++ int ret;
++ int len;
++ int headlen;
++
++ BUG_ON(skb_shinfo(skb)->frag_list != NULL);
++
++ nskb = alloc_skb(SKB_MAX_HEAD(0), GFP_ATOMIC | __GFP_NOWARN);
++ if (unlikely(!nskb))
++ goto err;
++
++ skb_reserve(nskb, 16 + NET_IP_ALIGN);
++ headlen = nskb->end - nskb->data;
++ if (headlen > skb_headlen(skb))
++ headlen = skb_headlen(skb);
++ ret = skb_copy_bits(skb, 0, __skb_put(nskb, headlen), headlen);
++ BUG_ON(ret);
++
++ ninfo = skb_shinfo(nskb);
++ ninfo->gso_size = skb_shinfo(skb)->gso_size;
++ ninfo->gso_type = skb_shinfo(skb)->gso_type;
++
++ offset = headlen;
++ len = skb->len - headlen;
++
++ nskb->len = skb->len;
++ nskb->data_len = len;
++ nskb->truesize += len;
++
++ while (len) {
++ struct page *page;
++ int copy;
++ int zero;
++
++ if (unlikely(ninfo->nr_frags >= MAX_SKB_FRAGS)) {
++ dump_stack();
++ goto err_free;
++ }
++
++ copy = len >= PAGE_SIZE ? PAGE_SIZE : len;
++ zero = len >= PAGE_SIZE ? 0 : __GFP_ZERO;
++
++ page = alloc_page(GFP_ATOMIC | __GFP_NOWARN | zero);
++ if (unlikely(!page))
++ goto err_free;
++
++ ret = skb_copy_bits(skb, offset, page_address(page), copy);
++ BUG_ON(ret);
++
++ ninfo->frags[ninfo->nr_frags].page = page;
++ ninfo->frags[ninfo->nr_frags].page_offset = 0;
++ ninfo->frags[ninfo->nr_frags].size = copy;
++ ninfo->nr_frags++;
++
++ offset += copy;
++ len -= copy;
++ }
++
++ offset = nskb->data - skb->data;
++
++ nskb->h.raw = skb->h.raw + offset;
++ nskb->nh.raw = skb->nh.raw + offset;
++ nskb->mac.raw = skb->mac.raw + offset;
++
++ return nskb;
++
++ err_free:
++ kfree_skb(nskb);
++ err:
++ return NULL;
++}
++
++static inline int netbk_max_required_rx_slots(netif_t *netif)
++{
++ if (netif->features & (NETIF_F_SG|NETIF_F_TSO))
++ return MAX_SKB_FRAGS + 2; /* header + extra_info + frags */
++ return 1; /* all in one */
++}
++
++static inline int netbk_queue_full(netif_t *netif)
++{
++ RING_IDX peek = netif->rx_req_cons_peek;
++ RING_IDX needed = netbk_max_required_rx_slots(netif);
++
++ return ((netif->rx.sring->req_prod - peek) < needed) ||
++ ((netif->rx.rsp_prod_pvt + NET_RX_RING_SIZE - peek) < needed);
++}
++
++static void tx_queue_callback(unsigned long data)
++{
++ netif_t *netif = (netif_t *)data;
++ if (netif_schedulable(netif))
++ netif_wake_queue(netif->dev);
++}
++
++int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev)
++{
++ netif_t *netif = netdev_priv(dev);
++
++ BUG_ON(skb->dev != dev);
++
++ /* Drop the packet if the target domain has no receive buffers. */
++ if (unlikely(!netif_schedulable(netif) || netbk_queue_full(netif)))
++ goto drop;
++
++ /*
++ * Copy the packet here if it's destined for a flipping interface
++ * but isn't flippable (e.g. extra references to data).
++ * XXX For now we also copy skbuffs whose head crosses a page
++ * boundary, because netbk_gop_skb can't handle them.
++ */
++ if (!netif->copying_receiver ||
++ ((skb_headlen(skb) + offset_in_page(skb->data)) >= PAGE_SIZE)) {
++ struct sk_buff *nskb = netbk_copy_skb(skb);
++ if ( unlikely(nskb == NULL) )
++ goto drop;
++ /* Copy only the header fields we use in this driver. */
++ nskb->dev = skb->dev;
++ nskb->ip_summed = skb->ip_summed;
++ nskb->proto_data_valid = skb->proto_data_valid;
++ dev_kfree_skb(skb);
++ skb = nskb;
++ }
++
++ netif->rx_req_cons_peek += skb_shinfo(skb)->nr_frags + 1 +
++ !!skb_shinfo(skb)->gso_size;
++ netif_get(netif);
++
++ if (netbk_can_queue(dev) && netbk_queue_full(netif)) {
++ netif->rx.sring->req_event = netif->rx_req_cons_peek +
++ netbk_max_required_rx_slots(netif);
++ mb(); /* request notification /then/ check & stop the queue */
++ if (netbk_queue_full(netif)) {
++ netif_stop_queue(dev);
++ /*
++ * Schedule 500ms timeout to restart the queue, thus
++ * ensuring that an inactive queue will be drained.
++ * Packets will be immediately be dropped until more
++ * receive buffers become available (see
++ * netbk_queue_full() check above).
++ */
++ netif->tx_queue_timeout.data = (unsigned long)netif;
++ netif->tx_queue_timeout.function = tx_queue_callback;
++ __mod_timer(&netif->tx_queue_timeout, jiffies + HZ/2);
++ }
++ }
++
++ skb_queue_tail(&rx_queue, skb);
++ tasklet_schedule(&net_rx_tasklet);
++
++ return 0;
++
++ drop:
++ netif->stats.tx_dropped++;
++ dev_kfree_skb(skb);
++ return 0;
++}
++
++#if 0
++static void xen_network_done_notify(void)
++{
++ static struct net_device *eth0_dev = NULL;
++ if (unlikely(eth0_dev == NULL))
++ eth0_dev = __dev_get_by_name("eth0");
++ netif_rx_schedule(eth0_dev);
++}
++/*
++ * Add following to poll() function in NAPI driver (Tigon3 is example):
++ * if ( xen_network_done() )
++ * tg3_enable_ints(tp);
++ */
++int xen_network_done(void)
++{
++ return skb_queue_empty(&rx_queue);
++}
++#endif
++
++struct netrx_pending_operations {
++ unsigned trans_prod, trans_cons;
++ unsigned mmu_prod, mmu_cons;
++ unsigned mcl_prod, mcl_cons;
++ unsigned copy_prod, copy_cons;
++ unsigned meta_prod, meta_cons;
++ mmu_update_t *mmu;
++ gnttab_transfer_t *trans;
++ gnttab_copy_t *copy;
++ multicall_entry_t *mcl;
++ struct netbk_rx_meta *meta;
++};
++
++/* Set up the grant operations for this fragment. If it's a flipping
++ interface, we also set up the unmap request from here. */
++static u16 netbk_gop_frag(netif_t *netif, struct netbk_rx_meta *meta,
++ int i, struct netrx_pending_operations *npo,
++ struct page *page, unsigned long size,
++ unsigned long offset)
++{
++ mmu_update_t *mmu;
++ gnttab_transfer_t *gop;
++ gnttab_copy_t *copy_gop;
++ multicall_entry_t *mcl;
++ netif_rx_request_t *req;
++ unsigned long old_mfn, new_mfn;
++
++ old_mfn = virt_to_mfn(page_address(page));
++
++ req = RING_GET_REQUEST(&netif->rx, netif->rx.req_cons + i);
++ if (netif->copying_receiver) {
++ /* The fragment needs to be copied rather than
++ flipped. */
++ meta->copy = 1;
++ copy_gop = npo->copy + npo->copy_prod++;
++ copy_gop->flags = GNTCOPY_dest_gref;
++ if (PageForeign(page)) {
++ struct pending_tx_info *src_pend =
++ &pending_tx_info[netif_page_index(page)];
++ copy_gop->source.domid = src_pend->netif->domid;
++ copy_gop->source.u.ref = src_pend->req.gref;
++ copy_gop->flags |= GNTCOPY_source_gref;
++ } else {
++ copy_gop->source.domid = DOMID_SELF;
++ copy_gop->source.u.gmfn = old_mfn;
++ }
++ copy_gop->source.offset = offset;
++ copy_gop->dest.domid = netif->domid;
++ copy_gop->dest.offset = 0;
++ copy_gop->dest.u.ref = req->gref;
++ copy_gop->len = size;
++ } else {
++ meta->copy = 0;
++ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
++ new_mfn = alloc_mfn();
++
++ /*
++ * Set the new P2M table entry before
++ * reassigning the old data page. Heed the
++ * comment in pgtable-2level.h:pte_page(). :-)
++ */
++ set_phys_to_machine(page_to_pfn(page), new_mfn);
++
++ mcl = npo->mcl + npo->mcl_prod++;
++ MULTI_update_va_mapping(mcl,
++ (unsigned long)page_address(page),
++ pfn_pte_ma(new_mfn, PAGE_KERNEL),
++ 0);
++
++ mmu = npo->mmu + npo->mmu_prod++;
++ mmu->ptr = ((maddr_t)new_mfn << PAGE_SHIFT) |
++ MMU_MACHPHYS_UPDATE;
++ mmu->val = page_to_pfn(page);
++ }
++
++ gop = npo->trans + npo->trans_prod++;
++ gop->mfn = old_mfn;
++ gop->domid = netif->domid;
++ gop->ref = req->gref;
++ }
++ return req->id;
++}
++
++static void netbk_gop_skb(struct sk_buff *skb,
++ struct netrx_pending_operations *npo)
++{
++ netif_t *netif = netdev_priv(skb->dev);
++ int nr_frags = skb_shinfo(skb)->nr_frags;
++ int i;
++ int extra;
++ struct netbk_rx_meta *head_meta, *meta;
++
++ head_meta = npo->meta + npo->meta_prod++;
++ head_meta->frag.page_offset = skb_shinfo(skb)->gso_type;
++ head_meta->frag.size = skb_shinfo(skb)->gso_size;
++ extra = !!head_meta->frag.size + 1;
++
++ for (i = 0; i < nr_frags; i++) {
++ meta = npo->meta + npo->meta_prod++;
++ meta->frag = skb_shinfo(skb)->frags[i];
++ meta->id = netbk_gop_frag(netif, meta, i + extra, npo,
++ meta->frag.page,
++ meta->frag.size,
++ meta->frag.page_offset);
++ }
++
++ /*
++ * This must occur at the end to ensure that we don't trash skb_shinfo
++ * until we're done. We know that the head doesn't cross a page
++ * boundary because such packets get copied in netif_be_start_xmit.
++ */
++ head_meta->id = netbk_gop_frag(netif, head_meta, 0, npo,
++ virt_to_page(skb->data),
++ skb_headlen(skb),
++ offset_in_page(skb->data));
++
++ netif->rx.req_cons += nr_frags + extra;
++}
++
++static inline void netbk_free_pages(int nr_frags, struct netbk_rx_meta *meta)
++{
++ int i;
++
++ for (i = 0; i < nr_frags; i++)
++ put_page(meta[i].frag.page);
++}
++
++/* This is a twin to netbk_gop_skb. Assume that netbk_gop_skb was
++ used to set up the operations on the top of
++ netrx_pending_operations, which have since been done. Check that
++ they didn't give any errors and advance over them. */
++static int netbk_check_gop(int nr_frags, domid_t domid,
++ struct netrx_pending_operations *npo)
++{
++ multicall_entry_t *mcl;
++ gnttab_transfer_t *gop;
++ gnttab_copy_t *copy_op;
++ int status = NETIF_RSP_OKAY;
++ int i;
++
++ for (i = 0; i <= nr_frags; i++) {
++ if (npo->meta[npo->meta_cons + i].copy) {
++ copy_op = npo->copy + npo->copy_cons++;
++ if (copy_op->status != GNTST_okay) {
++ DPRINTK("Bad status %d from copy to DOM%d.\n",
++ copy_op->status, domid);
++ status = NETIF_RSP_ERROR;
++ }
++ } else {
++ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
++ mcl = npo->mcl + npo->mcl_cons++;
++ /* The update_va_mapping() must not fail. */
++ BUG_ON(mcl->result != 0);
++ }
++
++ gop = npo->trans + npo->trans_cons++;
++ /* Check the reassignment error code. */
++ if (gop->status != 0) {
++ DPRINTK("Bad status %d from grant transfer to DOM%u\n",
++ gop->status, domid);
++ /*
++ * Page no longer belongs to us unless
++ * GNTST_bad_page, but that should be
++ * a fatal error anyway.
++ */
++ BUG_ON(gop->status == GNTST_bad_page);
++ status = NETIF_RSP_ERROR;
++ }
++ }
++ }
++
++ return status;
++}
++
++static void netbk_add_frag_responses(netif_t *netif, int status,
++ struct netbk_rx_meta *meta, int nr_frags)
++{
++ int i;
++ unsigned long offset;
++
++ for (i = 0; i < nr_frags; i++) {
++ int id = meta[i].id;
++ int flags = (i == nr_frags - 1) ? 0 : NETRXF_more_data;
++
++ if (meta[i].copy)
++ offset = 0;
++ else
++ offset = meta[i].frag.page_offset;
++ make_rx_response(netif, id, status, offset,
++ meta[i].frag.size, flags);
++ }
++}
++
++static void net_rx_action(unsigned long unused)
++{
++ netif_t *netif = NULL;
++ s8 status;
++ u16 id, irq, flags;
++ netif_rx_response_t *resp;
++ multicall_entry_t *mcl;
++ struct sk_buff_head rxq;
++ struct sk_buff *skb;
++ int notify_nr = 0;
++ int ret;
++ int nr_frags;
++ int count;
++ unsigned long offset;
++
++ /*
++ * Putting hundreds of bytes on the stack is considered rude.
++ * Static works because a tasklet can only be on one CPU at any time.
++ */
++ static multicall_entry_t rx_mcl[NET_RX_RING_SIZE+3];
++ static mmu_update_t rx_mmu[NET_RX_RING_SIZE];
++ static gnttab_transfer_t grant_trans_op[NET_RX_RING_SIZE];
++ static gnttab_copy_t grant_copy_op[NET_RX_RING_SIZE];
++ static unsigned char rx_notify[NR_IRQS];
++ static u16 notify_list[NET_RX_RING_SIZE];
++ static struct netbk_rx_meta meta[NET_RX_RING_SIZE];
++
++ struct netrx_pending_operations npo = {
++ mmu: rx_mmu,
++ trans: grant_trans_op,
++ copy: grant_copy_op,
++ mcl: rx_mcl,
++ meta: meta};
++
++ skb_queue_head_init(&rxq);
++
++ count = 0;
++
++ while ((skb = skb_dequeue(&rx_queue)) != NULL) {
++ nr_frags = skb_shinfo(skb)->nr_frags;
++ *(int *)skb->cb = nr_frags;
++
++ if (!xen_feature(XENFEAT_auto_translated_physmap) &&
++ !((netif_t *)netdev_priv(skb->dev))->copying_receiver &&
++ check_mfn(nr_frags + 1)) {
++ /* Memory squeeze? Back off for an arbitrary while. */
++ if ( net_ratelimit() )
++ WPRINTK("Memory squeeze in netback "
++ "driver.\n");
++ mod_timer(&net_timer, jiffies + HZ);
++ skb_queue_head(&rx_queue, skb);
++ break;
++ }
++
++ netbk_gop_skb(skb, &npo);
++
++ count += nr_frags + 1;
++
++ __skb_queue_tail(&rxq, skb);
++
++ /* Filled the batch queue? */
++ if (count + MAX_SKB_FRAGS >= NET_RX_RING_SIZE)
++ break;
++ }
++
++ if (npo.mcl_prod &&
++ !xen_feature(XENFEAT_auto_translated_physmap)) {
++ mcl = npo.mcl + npo.mcl_prod++;
++
++ BUG_ON(mcl[-1].op != __HYPERVISOR_update_va_mapping);
++ mcl[-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
++
++ mcl->op = __HYPERVISOR_mmu_update;
++ mcl->args[0] = (unsigned long)rx_mmu;
++ mcl->args[1] = npo.mmu_prod;
++ mcl->args[2] = 0;
++ mcl->args[3] = DOMID_SELF;
++ }
++
++ if (npo.trans_prod) {
++ mcl = npo.mcl + npo.mcl_prod++;
++ mcl->op = __HYPERVISOR_grant_table_op;
++ mcl->args[0] = GNTTABOP_transfer;
++ mcl->args[1] = (unsigned long)grant_trans_op;
++ mcl->args[2] = npo.trans_prod;
++ }
++
++ if (npo.copy_prod) {
++ mcl = npo.mcl + npo.mcl_prod++;
++ mcl->op = __HYPERVISOR_grant_table_op;
++ mcl->args[0] = GNTTABOP_copy;
++ mcl->args[1] = (unsigned long)grant_copy_op;
++ mcl->args[2] = npo.copy_prod;
++ }
++
++ /* Nothing to do? */
++ if (!npo.mcl_prod)
++ return;
++
++ BUG_ON(npo.copy_prod > NET_RX_RING_SIZE);
++ BUG_ON(npo.mmu_prod > NET_RX_RING_SIZE);
++ BUG_ON(npo.trans_prod > NET_RX_RING_SIZE);
++ BUG_ON(npo.mcl_prod > NET_RX_RING_SIZE+3);
++ BUG_ON(npo.meta_prod > NET_RX_RING_SIZE);
++
++ ret = HYPERVISOR_multicall(npo.mcl, npo.mcl_prod);
++ BUG_ON(ret != 0);
++
++ while ((skb = __skb_dequeue(&rxq)) != NULL) {
++ nr_frags = *(int *)skb->cb;
++
++ netif = netdev_priv(skb->dev);
++ /* We can't rely on skb_release_data to release the
++ pages used by fragments for us, since it tries to
++ touch the pages in the fraglist. If we're in
++ flipping mode, that doesn't work. In copying mode,
++ we still have access to all of the pages, and so
++ it's safe to let release_data deal with it. */
++ /* (Freeing the fragments is safe since we copy
++ non-linear skbs destined for flipping interfaces) */
++ if (!netif->copying_receiver) {
++ atomic_set(&(skb_shinfo(skb)->dataref), 1);
++ skb_shinfo(skb)->frag_list = NULL;
++ skb_shinfo(skb)->nr_frags = 0;
++ netbk_free_pages(nr_frags, meta + npo.meta_cons + 1);
++ }
++
++ netif->stats.tx_bytes += skb->len;
++ netif->stats.tx_packets++;
++
++ status = netbk_check_gop(nr_frags, netif->domid, &npo);
++
++ id = meta[npo.meta_cons].id;
++ flags = nr_frags ? NETRXF_more_data : 0;
++
++ if (skb->ip_summed == CHECKSUM_HW) /* local packet? */
++ flags |= NETRXF_csum_blank | NETRXF_data_validated;
++ else if (skb->proto_data_valid) /* remote but checksummed? */
++ flags |= NETRXF_data_validated;
++
++ if (meta[npo.meta_cons].copy)
++ offset = 0;
++ else
++ offset = offset_in_page(skb->data);
++ resp = make_rx_response(netif, id, status, offset,
++ skb_headlen(skb), flags);
++
++ if (meta[npo.meta_cons].frag.size) {
++ struct netif_extra_info *gso =
++ (struct netif_extra_info *)
++ RING_GET_RESPONSE(&netif->rx,
++ netif->rx.rsp_prod_pvt++);
++
++ resp->flags |= NETRXF_extra_info;
++
++ gso->u.gso.size = meta[npo.meta_cons].frag.size;
++ gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
++ gso->u.gso.pad = 0;
++ gso->u.gso.features = 0;
++
++ gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
++ gso->flags = 0;
++ }
++
++ netbk_add_frag_responses(netif, status,
++ meta + npo.meta_cons + 1,
++ nr_frags);
++
++ RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&netif->rx, ret);
++ irq = netif->irq;
++ if (ret && !rx_notify[irq]) {
++ rx_notify[irq] = 1;
++ notify_list[notify_nr++] = irq;
++ }
++
++ if (netif_queue_stopped(netif->dev) &&
++ netif_schedulable(netif) &&
++ !netbk_queue_full(netif))
++ netif_wake_queue(netif->dev);
++
++ netif_put(netif);
++ dev_kfree_skb(skb);
++ npo.meta_cons += nr_frags + 1;
++ }
++
++ while (notify_nr != 0) {
++ irq = notify_list[--notify_nr];
++ rx_notify[irq] = 0;
++ notify_remote_via_irq(irq);
++ }
++
++ /* More work to do? */
++ if (!skb_queue_empty(&rx_queue) && !timer_pending(&net_timer))
++ tasklet_schedule(&net_rx_tasklet);
++#if 0
++ else
++ xen_network_done_notify();
++#endif
++}
++
++static void net_alarm(unsigned long unused)
++{
++ tasklet_schedule(&net_rx_tasklet);
++}
++
++struct net_device_stats *netif_be_get_stats(struct net_device *dev)
++{
++ netif_t *netif = netdev_priv(dev);
++ return &netif->stats;
++}
++
++static int __on_net_schedule_list(netif_t *netif)
++{
++ return netif->list.next != NULL;
++}
++
++static void remove_from_net_schedule_list(netif_t *netif)
++{
++ spin_lock_irq(&net_schedule_list_lock);
++ if (likely(__on_net_schedule_list(netif))) {
++ list_del(&netif->list);
++ netif->list.next = NULL;
++ netif_put(netif);
++ }
++ spin_unlock_irq(&net_schedule_list_lock);
++}
++
++static void add_to_net_schedule_list_tail(netif_t *netif)
++{
++ if (__on_net_schedule_list(netif))
++ return;
++
++ spin_lock_irq(&net_schedule_list_lock);
++ if (!__on_net_schedule_list(netif) &&
++ likely(netif_schedulable(netif))) {
++ list_add_tail(&netif->list, &net_schedule_list);
++ netif_get(netif);
++ }
++ spin_unlock_irq(&net_schedule_list_lock);
++}
++
++/*
++ * Note on CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER:
++ * If this driver is pipelining transmit requests then we can be very
++ * aggressive in avoiding new-packet notifications -- frontend only needs to
++ * send a notification if there are no outstanding unreceived responses.
++ * If we may be buffer transmit buffers for any reason then we must be rather
++ * more conservative and treat this as the final check for pending work.
++ */
++void netif_schedule_work(netif_t *netif)
++{
++ int more_to_do;
++
++#ifdef CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER
++ more_to_do = RING_HAS_UNCONSUMED_REQUESTS(&netif->tx);
++#else
++ RING_FINAL_CHECK_FOR_REQUESTS(&netif->tx, more_to_do);
++#endif
++
++ if (more_to_do) {
++ add_to_net_schedule_list_tail(netif);
++ maybe_schedule_tx_action();
++ }
++}
++
++void netif_deschedule_work(netif_t *netif)
++{
++ remove_from_net_schedule_list(netif);
++}
++
++
++static void tx_add_credit(netif_t *netif)
++{
++ unsigned long max_burst, max_credit;
++
++ /*
++ * Allow a burst big enough to transmit a jumbo packet of up to 128kB.
++ * Otherwise the interface can seize up due to insufficient credit.
++ */
++ max_burst = RING_GET_REQUEST(&netif->tx, netif->tx.req_cons)->size;
++ max_burst = min(max_burst, 131072UL);
++ max_burst = max(max_burst, netif->credit_bytes);
++
++ /* Take care that adding a new chunk of credit doesn't wrap to zero. */
++ max_credit = netif->remaining_credit + netif->credit_bytes;
++ if (max_credit < netif->remaining_credit)
++ max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
++
++ netif->remaining_credit = min(max_credit, max_burst);
++}
++
++static void tx_credit_callback(unsigned long data)
++{
++ netif_t *netif = (netif_t *)data;
++ tx_add_credit(netif);
++ netif_schedule_work(netif);
++}
++
++inline static void net_tx_action_dealloc(void)
++{
++ gnttab_unmap_grant_ref_t *gop;
++ u16 pending_idx;
++ PEND_RING_IDX dc, dp;
++ netif_t *netif;
++ int ret;
++
++ dc = dealloc_cons;
++ dp = dealloc_prod;
++
++ /* Ensure we see all indexes enqueued by netif_idx_release(). */
++ smp_rmb();
++
++ /*
++ * Free up any grants we have finished using
++ */
++ gop = tx_unmap_ops;
++ while (dc != dp) {
++ pending_idx = dealloc_ring[MASK_PEND_IDX(dc++)];
++ gnttab_set_unmap_op(gop, idx_to_kaddr(pending_idx),
++ GNTMAP_host_map,
++ grant_tx_handle[pending_idx]);
++ gop++;
++ }
++ ret = HYPERVISOR_grant_table_op(
++ GNTTABOP_unmap_grant_ref, tx_unmap_ops, gop - tx_unmap_ops);
++ BUG_ON(ret);
++
++ while (dealloc_cons != dp) {
++ pending_idx = dealloc_ring[MASK_PEND_IDX(dealloc_cons++)];
++
++ netif = pending_tx_info[pending_idx].netif;
++
++ make_tx_response(netif, &pending_tx_info[pending_idx].req,
++ NETIF_RSP_OKAY);
++
++ pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
++
++ netif_put(netif);
++ }
++}
++
++static void netbk_tx_err(netif_t *netif, netif_tx_request_t *txp, RING_IDX end)
++{
++ RING_IDX cons = netif->tx.req_cons;
++
++ do {
++ make_tx_response(netif, txp, NETIF_RSP_ERROR);
++ if (cons >= end)
++ break;
++ txp = RING_GET_REQUEST(&netif->tx, cons++);
++ } while (1);
++ netif->tx.req_cons = cons;
++ netif_schedule_work(netif);
++ netif_put(netif);
++}
++
++static int netbk_count_requests(netif_t *netif, netif_tx_request_t *first,
++ netif_tx_request_t *txp, int work_to_do)
++{
++ RING_IDX cons = netif->tx.req_cons;
++ int frags = 0;
++
++ if (!(first->flags & NETTXF_more_data))
++ return 0;
++
++ do {
++ if (frags >= work_to_do) {
++ DPRINTK("Need more frags\n");
++ return -frags;
++ }
++
++ if (unlikely(frags >= MAX_SKB_FRAGS)) {
++ DPRINTK("Too many frags\n");
++ return -frags;
++ }
++
++ memcpy(txp, RING_GET_REQUEST(&netif->tx, cons + frags),
++ sizeof(*txp));
++ if (txp->size > first->size) {
++ DPRINTK("Frags galore\n");
++ return -frags;
++ }
++
++ first->size -= txp->size;
++ frags++;
++
++ if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
++ DPRINTK("txp->offset: %x, size: %u\n",
++ txp->offset, txp->size);
++ return -frags;
++ }
++ } while ((txp++)->flags & NETTXF_more_data);
++
++ return frags;
++}
++
++static gnttab_map_grant_ref_t *netbk_get_requests(netif_t *netif,
++ struct sk_buff *skb,
++ netif_tx_request_t *txp,
++ gnttab_map_grant_ref_t *mop)
++{
++ struct skb_shared_info *shinfo = skb_shinfo(skb);
++ skb_frag_t *frags = shinfo->frags;
++ unsigned long pending_idx = *((u16 *)skb->data);
++ int i, start;
++
++ /* Skip first skb fragment if it is on same page as header fragment. */
++ start = ((unsigned long)shinfo->frags[0].page == pending_idx);
++
++ for (i = start; i < shinfo->nr_frags; i++, txp++) {
++ pending_idx = pending_ring[MASK_PEND_IDX(pending_cons++)];
++
++ gnttab_set_map_op(mop++, idx_to_kaddr(pending_idx),
++ GNTMAP_host_map | GNTMAP_readonly,
++ txp->gref, netif->domid);
++
++ memcpy(&pending_tx_info[pending_idx].req, txp, sizeof(*txp));
++ netif_get(netif);
++ pending_tx_info[pending_idx].netif = netif;
++ frags[i].page = (void *)pending_idx;
++ }
++
++ return mop;
++}
++
++static int netbk_tx_check_mop(struct sk_buff *skb,
++ gnttab_map_grant_ref_t **mopp)
++{
++ gnttab_map_grant_ref_t *mop = *mopp;
++ int pending_idx = *((u16 *)skb->data);
++ netif_t *netif = pending_tx_info[pending_idx].netif;
++ netif_tx_request_t *txp;
++ struct skb_shared_info *shinfo = skb_shinfo(skb);
++ int nr_frags = shinfo->nr_frags;
++ int i, err, start;
++
++ /* Check status of header. */
++ err = mop->status;
++ if (unlikely(err)) {
++ txp = &pending_tx_info[pending_idx].req;
++ make_tx_response(netif, txp, NETIF_RSP_ERROR);
++ pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
++ netif_put(netif);
++ } else {
++ set_phys_to_machine(
++ __pa(idx_to_kaddr(pending_idx)) >> PAGE_SHIFT,
++ FOREIGN_FRAME(mop->dev_bus_addr >> PAGE_SHIFT));
++ grant_tx_handle[pending_idx] = mop->handle;
++ }
++
++ /* Skip first skb fragment if it is on same page as header fragment. */
++ start = ((unsigned long)shinfo->frags[0].page == pending_idx);
++
++ for (i = start; i < nr_frags; i++) {
++ int j, newerr;
++
++ pending_idx = (unsigned long)shinfo->frags[i].page;
++
++ /* Check error status: if okay then remember grant handle. */
++ newerr = (++mop)->status;
++ if (likely(!newerr)) {
++ set_phys_to_machine(
++ __pa(idx_to_kaddr(pending_idx))>>PAGE_SHIFT,
++ FOREIGN_FRAME(mop->dev_bus_addr>>PAGE_SHIFT));
++ grant_tx_handle[pending_idx] = mop->handle;
++ /* Had a previous error? Invalidate this fragment. */
++ if (unlikely(err))
++ netif_idx_release(pending_idx);
++ continue;
++ }
++
++ /* Error on this fragment: respond to client with an error. */
++ txp = &pending_tx_info[pending_idx].req;
++ make_tx_response(netif, txp, NETIF_RSP_ERROR);
++ pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
++ netif_put(netif);
++
++ /* Not the first error? Preceding frags already invalidated. */
++ if (err)
++ continue;
++
++ /* First error: invalidate header and preceding fragments. */
++ pending_idx = *((u16 *)skb->data);
++ netif_idx_release(pending_idx);
++ for (j = start; j < i; j++) {
++ pending_idx = (unsigned long)shinfo->frags[i].page;
++ netif_idx_release(pending_idx);
++ }
++
++ /* Remember the error: invalidate all subsequent fragments. */
++ err = newerr;
++ }
++
++ *mopp = mop + 1;
++ return err;
++}
++
++static void netbk_fill_frags(struct sk_buff *skb)
++{
++ struct skb_shared_info *shinfo = skb_shinfo(skb);
++ int nr_frags = shinfo->nr_frags;
++ int i;
++
++ for (i = 0; i < nr_frags; i++) {
++ skb_frag_t *frag = shinfo->frags + i;
++ netif_tx_request_t *txp;
++ unsigned long pending_idx;
++
++ pending_idx = (unsigned long)frag->page;
++ txp = &pending_tx_info[pending_idx].req;
++ frag->page = virt_to_page(idx_to_kaddr(pending_idx));
++ frag->size = txp->size;
++ frag->page_offset = txp->offset;
++
++ skb->len += txp->size;
++ skb->data_len += txp->size;
++ skb->truesize += txp->size;
++ }
++}
++
++int netbk_get_extras(netif_t *netif, struct netif_extra_info *extras,
++ int work_to_do)
++{
++ struct netif_extra_info extra;
++ RING_IDX cons = netif->tx.req_cons;
++
++ do {
++ if (unlikely(work_to_do-- <= 0)) {
++ DPRINTK("Missing extra info\n");
++ return -EBADR;
++ }
++
++ memcpy(&extra, RING_GET_REQUEST(&netif->tx, cons),
++ sizeof(extra));
++ if (unlikely(!extra.type ||
++ extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
++ netif->tx.req_cons = ++cons;
++ DPRINTK("Invalid extra type: %d\n", extra.type);
++ return -EINVAL;
++ }
++
++ memcpy(&extras[extra.type - 1], &extra, sizeof(extra));
++ netif->tx.req_cons = ++cons;
++ } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
++
++ return work_to_do;
++}
++
++static int netbk_set_skb_gso(struct sk_buff *skb, struct netif_extra_info *gso)
++{
++ if (!gso->u.gso.size) {
++ DPRINTK("GSO size must not be zero.\n");
++ return -EINVAL;
++ }
++
++ /* Currently only TCPv4 S.O. is supported. */
++ if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
++ DPRINTK("Bad GSO type %d.\n", gso->u.gso.type);
++ return -EINVAL;
++ }
++
++ skb_shinfo(skb)->gso_size = gso->u.gso.size;
++ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
++
++ /* Header must be checked, and gso_segs computed. */
++ skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
++ skb_shinfo(skb)->gso_segs = 0;
++
++ return 0;
++}
++
++/* Called after netfront has transmitted */
++static void net_tx_action(unsigned long unused)
++{
++ struct list_head *ent;
++ struct sk_buff *skb;
++ netif_t *netif;
++ netif_tx_request_t txreq;
++ netif_tx_request_t txfrags[MAX_SKB_FRAGS];
++ struct netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
++ u16 pending_idx;
++ RING_IDX i;
++ gnttab_map_grant_ref_t *mop;
++ unsigned int data_len;
++ int ret, work_to_do;
++
++ if (dealloc_cons != dealloc_prod)
++ net_tx_action_dealloc();
++
++ mop = tx_map_ops;
++ while (((NR_PENDING_REQS + MAX_SKB_FRAGS) < MAX_PENDING_REQS) &&
++ !list_empty(&net_schedule_list)) {
++ /* Get a netif from the list with work to do. */
++ ent = net_schedule_list.next;
++ netif = list_entry(ent, netif_t, list);
++ netif_get(netif);
++ remove_from_net_schedule_list(netif);
++
++ RING_FINAL_CHECK_FOR_REQUESTS(&netif->tx, work_to_do);
++ if (!work_to_do) {
++ netif_put(netif);
++ continue;
++ }
++
++ i = netif->tx.req_cons;
++ rmb(); /* Ensure that we see the request before we copy it. */
++ memcpy(&txreq, RING_GET_REQUEST(&netif->tx, i), sizeof(txreq));
++
++ /* Credit-based scheduling. */
++ if (txreq.size > netif->remaining_credit) {
++ unsigned long now = jiffies;
++ unsigned long next_credit =
++ netif->credit_timeout.expires +
++ msecs_to_jiffies(netif->credit_usec / 1000);
++
++ /* Timer could already be pending in rare cases. */
++ if (timer_pending(&netif->credit_timeout)) {
++ netif_put(netif);
++ continue;
++ }
++
++ /* Passed the point where we can replenish credit? */
++ if (time_after_eq(now, next_credit)) {
++ netif->credit_timeout.expires = now;
++ tx_add_credit(netif);
++ }
++
++ /* Still too big to send right now? Set a callback. */
++ if (txreq.size > netif->remaining_credit) {
++ netif->credit_timeout.data =
++ (unsigned long)netif;
++ netif->credit_timeout.function =
++ tx_credit_callback;
++ __mod_timer(&netif->credit_timeout,
++ next_credit);
++ netif_put(netif);
++ continue;
++ }
++ }
++ netif->remaining_credit -= txreq.size;
++
++ work_to_do--;
++ netif->tx.req_cons = ++i;
++
++ memset(extras, 0, sizeof(extras));
++ if (txreq.flags & NETTXF_extra_info) {
++ work_to_do = netbk_get_extras(netif, extras,
++ work_to_do);
++ i = netif->tx.req_cons;
++ if (unlikely(work_to_do < 0)) {
++ netbk_tx_err(netif, &txreq, i);
++ continue;
++ }
++ }
++
++ ret = netbk_count_requests(netif, &txreq, txfrags, work_to_do);
++ if (unlikely(ret < 0)) {
++ netbk_tx_err(netif, &txreq, i - ret);
++ continue;
++ }
++ i += ret;
++
++ if (unlikely(txreq.size < ETH_HLEN)) {
++ DPRINTK("Bad packet size: %d\n", txreq.size);
++ netbk_tx_err(netif, &txreq, i);
++ continue;
++ }
++
++ /* No crossing a page as the payload mustn't fragment. */
++ if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
++ DPRINTK("txreq.offset: %x, size: %u, end: %lu\n",
++ txreq.offset, txreq.size,
++ (txreq.offset &~PAGE_MASK) + txreq.size);
++ netbk_tx_err(netif, &txreq, i);
++ continue;
++ }
++
++ pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
++
++ data_len = (txreq.size > PKT_PROT_LEN &&
++ ret < MAX_SKB_FRAGS) ?
++ PKT_PROT_LEN : txreq.size;
++
++ skb = alloc_skb(data_len + 16 + NET_IP_ALIGN,
++ GFP_ATOMIC | __GFP_NOWARN);
++ if (unlikely(skb == NULL)) {
++ DPRINTK("Can't allocate a skb in start_xmit.\n");
++ netbk_tx_err(netif, &txreq, i);
++ break;
++ }
++
++ /* Packets passed to netif_rx() must have some headroom. */
++ skb_reserve(skb, 16 + NET_IP_ALIGN);
++
++ if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
++ struct netif_extra_info *gso;
++ gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
++
++ if (netbk_set_skb_gso(skb, gso)) {
++ kfree_skb(skb);
++ netbk_tx_err(netif, &txreq, i);
++ continue;
++ }
++ }
++
++ gnttab_set_map_op(mop, idx_to_kaddr(pending_idx),
++ GNTMAP_host_map | GNTMAP_readonly,
++ txreq.gref, netif->domid);
++ mop++;
++
++ memcpy(&pending_tx_info[pending_idx].req,
++ &txreq, sizeof(txreq));
++ pending_tx_info[pending_idx].netif = netif;
++ *((u16 *)skb->data) = pending_idx;
++
++ __skb_put(skb, data_len);
++
++ skb_shinfo(skb)->nr_frags = ret;
++ if (data_len < txreq.size) {
++ skb_shinfo(skb)->nr_frags++;
++ skb_shinfo(skb)->frags[0].page =
++ (void *)(unsigned long)pending_idx;
++ } else {
++ /* Discriminate from any valid pending_idx value. */
++ skb_shinfo(skb)->frags[0].page = (void *)~0UL;
++ }
++
++ __skb_queue_tail(&tx_queue, skb);
++
++ pending_cons++;
++
++ mop = netbk_get_requests(netif, skb, txfrags, mop);
++
++ netif->tx.req_cons = i;
++ netif_schedule_work(netif);
++
++ if ((mop - tx_map_ops) >= ARRAY_SIZE(tx_map_ops))
++ break;
++ }
++
++ if (mop == tx_map_ops)
++ return;
++
++ ret = HYPERVISOR_grant_table_op(
++ GNTTABOP_map_grant_ref, tx_map_ops, mop - tx_map_ops);
++ BUG_ON(ret);
++
++ mop = tx_map_ops;
++ while ((skb = __skb_dequeue(&tx_queue)) != NULL) {
++ netif_tx_request_t *txp;
++
++ pending_idx = *((u16 *)skb->data);
++ netif = pending_tx_info[pending_idx].netif;
++ txp = &pending_tx_info[pending_idx].req;
++
++ /* Check the remap error code. */
++ if (unlikely(netbk_tx_check_mop(skb, &mop))) {
++ DPRINTK("netback grant failed.\n");
++ skb_shinfo(skb)->nr_frags = 0;
++ kfree_skb(skb);
++ continue;
++ }
++
++ data_len = skb->len;
++ memcpy(skb->data,
++ (void *)(idx_to_kaddr(pending_idx)|txp->offset),
++ data_len);
++ if (data_len < txp->size) {
++ /* Append the packet payload as a fragment. */
++ txp->offset += data_len;
++ txp->size -= data_len;
++ } else {
++ /* Schedule a response immediately. */
++ netif_idx_release(pending_idx);
++ }
++
++ /*
++ * Old frontends do not assert data_validated but we
++ * can infer it from csum_blank so test both flags.
++ */
++ if (txp->flags & (NETTXF_data_validated|NETTXF_csum_blank)) {
++ skb->ip_summed = CHECKSUM_UNNECESSARY;
++ skb->proto_data_valid = 1;
++ } else {
++ skb->ip_summed = CHECKSUM_NONE;
++ skb->proto_data_valid = 0;
++ }
++ skb->proto_csum_blank = !!(txp->flags & NETTXF_csum_blank);
++
++ netbk_fill_frags(skb);
++
++ skb->dev = netif->dev;
++ skb->protocol = eth_type_trans(skb, skb->dev);
++
++ netif->stats.rx_bytes += skb->len;
++ netif->stats.rx_packets++;
++
++ netif_rx(skb);
++ netif->dev->last_rx = jiffies;
++ }
++}
++
++static void netif_idx_release(u16 pending_idx)
++{
++ static DEFINE_SPINLOCK(_lock);
++ unsigned long flags;
++
++ spin_lock_irqsave(&_lock, flags);
++ dealloc_ring[MASK_PEND_IDX(dealloc_prod)] = pending_idx;
++ /* Sync with net_tx_action_dealloc: insert idx /then/ incr producer. */
++ smp_wmb();
++ dealloc_prod++;
++ spin_unlock_irqrestore(&_lock, flags);
++
++ tasklet_schedule(&net_tx_tasklet);
++}
++
++static void netif_page_release(struct page *page)
++{
++ /* Ready for next use. */
++ init_page_count(page);
++
++ netif_idx_release(netif_page_index(page));
++}
++
++irqreturn_t netif_be_int(int irq, void *dev_id, struct pt_regs *regs)
++{
++ netif_t *netif = dev_id;
++
++ add_to_net_schedule_list_tail(netif);
++ maybe_schedule_tx_action();
++
++ if (netif_schedulable(netif) && !netbk_queue_full(netif))
++ netif_wake_queue(netif->dev);
++
++ return IRQ_HANDLED;
++}
++
++static void make_tx_response(netif_t *netif,
++ netif_tx_request_t *txp,
++ s8 st)
++{
++ RING_IDX i = netif->tx.rsp_prod_pvt;
++ netif_tx_response_t *resp;
++ int notify;
++
++ resp = RING_GET_RESPONSE(&netif->tx, i);
++ resp->id = txp->id;
++ resp->status = st;
++
++ if (txp->flags & NETTXF_extra_info)
++ RING_GET_RESPONSE(&netif->tx, ++i)->status = NETIF_RSP_NULL;
++
++ netif->tx.rsp_prod_pvt = ++i;
++ RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&netif->tx, notify);
++ if (notify)
++ notify_remote_via_irq(netif->irq);
++
++#ifdef CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER
++ if (i == netif->tx.req_cons) {
++ int more_to_do;
++ RING_FINAL_CHECK_FOR_REQUESTS(&netif->tx, more_to_do);
++ if (more_to_do)
++ add_to_net_schedule_list_tail(netif);
++ }
++#endif
++}
++
++static netif_rx_response_t *make_rx_response(netif_t *netif,
++ u16 id,
++ s8 st,
++ u16 offset,
++ u16 size,
++ u16 flags)
++{
++ RING_IDX i = netif->rx.rsp_prod_pvt;
++ netif_rx_response_t *resp;
++
++ resp = RING_GET_RESPONSE(&netif->rx, i);
++ resp->offset = offset;
++ resp->flags = flags;
++ resp->id = id;
++ resp->status = (s16)size;
++ if (st < 0)
++ resp->status = (s16)st;
++
++ netif->rx.rsp_prod_pvt = ++i;
++
++ return resp;
++}
++
++#ifdef NETBE_DEBUG_INTERRUPT
++static irqreturn_t netif_be_dbg(int irq, void *dev_id, struct pt_regs *regs)
++{
++ struct list_head *ent;
++ netif_t *netif;
++ int i = 0;
++
++ printk(KERN_ALERT "netif_schedule_list:\n");
++ spin_lock_irq(&net_schedule_list_lock);
++
++ list_for_each (ent, &net_schedule_list) {
++ netif = list_entry(ent, netif_t, list);
++ printk(KERN_ALERT " %d: private(rx_req_cons=%08x "
++ "rx_resp_prod=%08x\n",
++ i, netif->rx.req_cons, netif->rx.rsp_prod_pvt);
++ printk(KERN_ALERT " tx_req_cons=%08x tx_resp_prod=%08x)\n",
++ netif->tx.req_cons, netif->tx.rsp_prod_pvt);
++ printk(KERN_ALERT " shared(rx_req_prod=%08x "
++ "rx_resp_prod=%08x\n",
++ netif->rx.sring->req_prod, netif->rx.sring->rsp_prod);
++ printk(KERN_ALERT " rx_event=%08x tx_req_prod=%08x\n",
++ netif->rx.sring->rsp_event, netif->tx.sring->req_prod);
++ printk(KERN_ALERT " tx_resp_prod=%08x, tx_event=%08x)\n",
++ netif->tx.sring->rsp_prod, netif->tx.sring->rsp_event);
++ i++;
++ }
++
++ spin_unlock_irq(&net_schedule_list_lock);
++ printk(KERN_ALERT " ** End of netif_schedule_list **\n");
++
++ return IRQ_HANDLED;
++}
++#endif
++
++static int __init netback_init(void)
++{
++ int i;
++ struct page *page;
++
++ if (!is_running_on_xen())
++ return -ENODEV;
++
++ /* We can increase reservation by this much in net_rx_action(). */
++ balloon_update_driver_allowance(NET_RX_RING_SIZE);
++
++ skb_queue_head_init(&rx_queue);
++ skb_queue_head_init(&tx_queue);
++
++ init_timer(&net_timer);
++ net_timer.data = 0;
++ net_timer.function = net_alarm;
++
++ mmap_pages = alloc_empty_pages_and_pagevec(MAX_PENDING_REQS);
++ if (mmap_pages == NULL) {
++ printk("%s: out of memory\n", __FUNCTION__);
++ return -ENOMEM;
++ }
++
++ for (i = 0; i < MAX_PENDING_REQS; i++) {
++ page = mmap_pages[i];
++ SetPageForeign(page, netif_page_release);
++ netif_page_index(page) = i;
++ }
++
++ pending_cons = 0;
++ pending_prod = MAX_PENDING_REQS;
++ for (i = 0; i < MAX_PENDING_REQS; i++)
++ pending_ring[i] = i;
++
++ spin_lock_init(&net_schedule_list_lock);
++ INIT_LIST_HEAD(&net_schedule_list);
++
++ netif_xenbus_init();
++
++#ifdef NETBE_DEBUG_INTERRUPT
++ (void)bind_virq_to_irqhandler(VIRQ_DEBUG,
++ 0,
++ netif_be_dbg,
++ SA_SHIRQ,
++ "net-be-dbg",
++ &netif_be_dbg);
++#endif
++
++ return 0;
++}
++
++module_init(netback_init);
++
++MODULE_LICENSE("Dual BSD/GPL");
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/netback/xenbus.c 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,448 @@
++/* Xenbus code for netif backend
++ Copyright (C) 2005 Rusty Russell <rusty@rustcorp.com.au>
++ Copyright (C) 2005 XenSource Ltd
++
++ This program is free software; you can redistribute it and/or modify
++ it under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 2 of the License, or
++ (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ GNU General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program; if not, write to the Free Software
++ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++*/
++
++#include <stdarg.h>
++#include <linux/module.h>
++#include <xen/xenbus.h>
++#include "common.h"
++
++#if 0
++#undef DPRINTK
++#define DPRINTK(fmt, args...) \
++ printk("netback/xenbus (%s:%d) " fmt ".\n", __FUNCTION__, __LINE__, ##args)
++#endif
++
++struct backend_info {
++ struct xenbus_device *dev;
++ netif_t *netif;
++ enum xenbus_state frontend_state;
++};
++
++static int connect_rings(struct backend_info *);
++static void connect(struct backend_info *);
++static void backend_create_netif(struct backend_info *be);
++
++static int netback_remove(struct xenbus_device *dev)
++{
++ struct backend_info *be = dev->dev.driver_data;
++
++ if (be->netif) {
++ netif_disconnect(be->netif);
++ be->netif = NULL;
++ }
++ kfree(be);
++ dev->dev.driver_data = NULL;
++ return 0;
++}
++
++
++/**
++ * Entry point to this code when a new device is created. Allocate the basic
++ * structures and switch to InitWait.
++ */
++static int netback_probe(struct xenbus_device *dev,
++ const struct xenbus_device_id *id)
++{
++ const char *message;
++ struct xenbus_transaction xbt;
++ int err;
++ struct backend_info *be = kzalloc(sizeof(struct backend_info),
++ GFP_KERNEL);
++ if (!be) {
++ xenbus_dev_fatal(dev, -ENOMEM,
++ "allocating backend structure");
++ return -ENOMEM;
++ }
++
++ be->dev = dev;
++ dev->dev.driver_data = be;
++
++ do {
++ err = xenbus_transaction_start(&xbt);
++ if (err) {
++ xenbus_dev_fatal(dev, err, "starting transaction");
++ goto fail;
++ }
++
++ err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1);
++ if (err) {
++ message = "writing feature-sg";
++ goto abort_transaction;
++ }
++
++ err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4",
++ "%d", 1);
++ if (err) {
++ message = "writing feature-gso-tcpv4";
++ goto abort_transaction;
++ }
++
++ /* We support rx-copy path. */
++ err = xenbus_printf(xbt, dev->nodename,
++ "feature-rx-copy", "%d", 1);
++ if (err) {
++ message = "writing feature-rx-copy";
++ goto abort_transaction;
++ }
++
++ /*
++ * We don't support rx-flip path (except old guests who don't
++ * grok this feature flag).
++ */
++ err = xenbus_printf(xbt, dev->nodename,
++ "feature-rx-flip", "%d", 0);
++ if (err) {
++ message = "writing feature-rx-flip";
++ goto abort_transaction;
++ }
++
++ err = xenbus_transaction_end(xbt, 0);
++ } while (err == -EAGAIN);
++
++ if (err) {
++ xenbus_dev_fatal(dev, err, "completing transaction");
++ goto fail;
++ }
++
++ err = xenbus_switch_state(dev, XenbusStateInitWait);
++ if (err)
++ goto fail;
++
++ /* This kicks hotplug scripts, so do it immediately. */
++ backend_create_netif(be);
++
++ return 0;
++
++abort_transaction:
++ xenbus_transaction_end(xbt, 1);
++ xenbus_dev_fatal(dev, err, "%s", message);
++fail:
++ DPRINTK("failed");
++ netback_remove(dev);
++ return err;
++}
++
++
++/**
++ * Handle the creation of the hotplug script environment. We add the script
++ * and vif variables to the environment, for the benefit of the vif-* hotplug
++ * scripts.
++ */
++static int netback_uevent(struct xenbus_device *xdev, char **envp,
++ int num_envp, char *buffer, int buffer_size)
++{
++ struct backend_info *be = xdev->dev.driver_data;
++ netif_t *netif = be->netif;
++ int i = 0, length = 0;
++ char *val;
++
++ DPRINTK("netback_uevent");
++
++ val = xenbus_read(XBT_NIL, xdev->nodename, "script", NULL);
++ if (IS_ERR(val)) {
++ int err = PTR_ERR(val);
++ xenbus_dev_fatal(xdev, err, "reading script");
++ return err;
++ }
++ else {
++ add_uevent_var(envp, num_envp, &i, buffer, buffer_size,
++ &length, "script=%s", val);
++ kfree(val);
++ }
++
++ add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length,
++ "vif=%s", netif->dev->name);
++
++ envp[i] = NULL;
++
++ return 0;
++}
++
++
++static void backend_create_netif(struct backend_info *be)
++{
++ int err;
++ long handle;
++ struct xenbus_device *dev = be->dev;
++
++ if (be->netif != NULL)
++ return;
++
++ err = xenbus_scanf(XBT_NIL, dev->nodename, "handle", "%li", &handle);
++ if (err != 1) {
++ xenbus_dev_fatal(dev, err, "reading handle");
++ return;
++ }
++
++ be->netif = netif_alloc(dev->otherend_id, handle);
++ if (IS_ERR(be->netif)) {
++ err = PTR_ERR(be->netif);
++ be->netif = NULL;
++ xenbus_dev_fatal(dev, err, "creating interface");
++ return;
++ }
++
++ kobject_uevent(&dev->dev.kobj, KOBJ_ONLINE);
++}
++
++
++/**
++ * Callback received when the frontend's state changes.
++ */
++static void frontend_changed(struct xenbus_device *dev,
++ enum xenbus_state frontend_state)
++{
++ struct backend_info *be = dev->dev.driver_data;
++
++ DPRINTK("%s", xenbus_strstate(frontend_state));
++
++ be->frontend_state = frontend_state;
++
++ switch (frontend_state) {
++ case XenbusStateInitialising:
++ if (dev->state == XenbusStateClosed) {
++ printk(KERN_INFO "%s: %s: prepare for reconnect\n",
++ __FUNCTION__, dev->nodename);
++ if (be->netif) {
++ netif_disconnect(be->netif);
++ be->netif = NULL;
++ }
++ xenbus_switch_state(dev, XenbusStateInitWait);
++ }
++ break;
++
++ case XenbusStateInitialised:
++ break;
++
++ case XenbusStateConnected:
++ backend_create_netif(be);
++ if (be->netif)
++ connect(be);
++ break;
++
++ case XenbusStateClosing:
++ xenbus_switch_state(dev, XenbusStateClosing);
++ break;
++
++ case XenbusStateClosed:
++ xenbus_switch_state(dev, XenbusStateClosed);
++ if (xenbus_dev_is_online(dev))
++ break;
++ /* fall through if not online */
++ case XenbusStateUnknown:
++ if (be->netif != NULL)
++ kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
++ device_unregister(&dev->dev);
++ break;
++
++ default:
++ xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
++ frontend_state);
++ break;
++ }
++}
++
++
++static void xen_net_read_rate(struct xenbus_device *dev,
++ unsigned long *bytes, unsigned long *usec)
++{
++ char *s, *e;
++ unsigned long b, u;
++ char *ratestr;
++
++ /* Default to unlimited bandwidth. */
++ *bytes = ~0UL;
++ *usec = 0;
++
++ ratestr = xenbus_read(XBT_NIL, dev->nodename, "rate", NULL);
++ if (IS_ERR(ratestr))
++ return;
++
++ s = ratestr;
++ b = simple_strtoul(s, &e, 10);
++ if ((s == e) || (*e != ','))
++ goto fail;
++
++ s = e + 1;
++ u = simple_strtoul(s, &e, 10);
++ if ((s == e) || (*e != '\0'))
++ goto fail;
++
++ *bytes = b;
++ *usec = u;
++
++ kfree(ratestr);
++ return;
++
++ fail:
++ WPRINTK("Failed to parse network rate limit. Traffic unlimited.\n");
++ kfree(ratestr);
++}
++
++static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
++{
++ char *s, *e, *macstr;
++ int i;
++
++ macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL);
++ if (IS_ERR(macstr))
++ return PTR_ERR(macstr);
++
++ for (i = 0; i < ETH_ALEN; i++) {
++ mac[i] = simple_strtoul(s, &e, 16);
++ if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
++ kfree(macstr);
++ return -ENOENT;
++ }
++ s = e+1;
++ }
++
++ kfree(macstr);
++ return 0;
++}
++
++static void connect(struct backend_info *be)
++{
++ int err;
++ struct xenbus_device *dev = be->dev;
++
++ err = connect_rings(be);
++ if (err)
++ return;
++
++ err = xen_net_read_mac(dev, be->netif->fe_dev_addr);
++ if (err) {
++ xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
++ return;
++ }
++
++ xen_net_read_rate(dev, &be->netif->credit_bytes,
++ &be->netif->credit_usec);
++ be->netif->remaining_credit = be->netif->credit_bytes;
++
++ xenbus_switch_state(dev, XenbusStateConnected);
++
++ netif_wake_queue(be->netif->dev);
++}
++
++
++static int connect_rings(struct backend_info *be)
++{
++ struct xenbus_device *dev = be->dev;
++ unsigned long tx_ring_ref, rx_ring_ref;
++ unsigned int evtchn, rx_copy;
++ int err;
++ int val;
++
++ DPRINTK("");
++
++ err = xenbus_gather(XBT_NIL, dev->otherend,
++ "tx-ring-ref", "%lu", &tx_ring_ref,
++ "rx-ring-ref", "%lu", &rx_ring_ref,
++ "event-channel", "%u", &evtchn, NULL);
++ if (err) {
++ xenbus_dev_fatal(dev, err,
++ "reading %s/ring-ref and event-channel",
++ dev->otherend);
++ return err;
++ }
++
++ err = xenbus_scanf(XBT_NIL, dev->otherend, "request-rx-copy", "%u",
++ &rx_copy);
++ if (err == -ENOENT) {
++ err = 0;
++ rx_copy = 0;
++ }
++ if (err < 0) {
++ xenbus_dev_fatal(dev, err, "reading %s/request-rx-copy",
++ dev->otherend);
++ return err;
++ }
++ be->netif->copying_receiver = !!rx_copy;
++
++ if (be->netif->dev->tx_queue_len != 0) {
++ if (xenbus_scanf(XBT_NIL, dev->otherend,
++ "feature-rx-notify", "%d", &val) < 0)
++ val = 0;
++ if (val)
++ be->netif->can_queue = 1;
++ else
++ /* Must be non-zero for pfifo_fast to work. */
++ be->netif->dev->tx_queue_len = 1;
++ }
++
++ if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-sg", "%d", &val) < 0)
++ val = 0;
++ if (val) {
++ be->netif->features |= NETIF_F_SG;
++ be->netif->dev->features |= NETIF_F_SG;
++ }
++
++ if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4", "%d",
++ &val) < 0)
++ val = 0;
++ if (val) {
++ be->netif->features |= NETIF_F_TSO;
++ be->netif->dev->features |= NETIF_F_TSO;
++ }
++
++ if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-no-csum-offload",
++ "%d", &val) < 0)
++ val = 0;
++ if (val) {
++ be->netif->features &= ~NETIF_F_IP_CSUM;
++ be->netif->dev->features &= ~NETIF_F_IP_CSUM;
++ }
++
++ /* Map the shared frame, irq etc. */
++ err = netif_map(be->netif, tx_ring_ref, rx_ring_ref, evtchn);
++ if (err) {
++ xenbus_dev_fatal(dev, err,
++ "mapping shared-frames %lu/%lu port %u",
++ tx_ring_ref, rx_ring_ref, evtchn);
++ return err;
++ }
++ return 0;
++}
++
++
++/* ** Driver Registration ** */
++
++
++static struct xenbus_device_id netback_ids[] = {
++ { "vif" },
++ { "" }
++};
++
++
++static struct xenbus_driver netback = {
++ .name = "vif",
++ .owner = THIS_MODULE,
++ .ids = netback_ids,
++ .probe = netback_probe,
++ .remove = netback_remove,
++ .uevent = netback_uevent,
++ .otherend_changed = frontend_changed,
++};
++
++
++void netif_xenbus_init(void)
++{
++ xenbus_register_backend(&netback);
++}
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/netfront/Makefile 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,4 @@
++
++obj-$(CONFIG_XEN_NETDEV_FRONTEND) := xennet.o
++
++xennet-objs := netfront.o
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/netfront/netfront.c 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,2133 @@
++/******************************************************************************
++ * Virtual network driver for conversing with remote driver backends.
++ *
++ * Copyright (c) 2002-2005, K A Fraser
++ * Copyright (c) 2005, XenSource Ltd
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <linux/module.h>
++#include <linux/version.h>
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/slab.h>
++#include <linux/string.h>
++#include <linux/errno.h>
++#include <linux/netdevice.h>
++#include <linux/inetdevice.h>
++#include <linux/etherdevice.h>
++#include <linux/skbuff.h>
++#include <linux/init.h>
++#include <linux/bitops.h>
++#include <linux/ethtool.h>
++#include <linux/in.h>
++#include <linux/if_ether.h>
++#include <linux/io.h>
++#include <linux/moduleparam.h>
++#include <net/sock.h>
++#include <net/pkt_sched.h>
++#include <net/arp.h>
++#include <net/route.h>
++#include <asm/uaccess.h>
++#include <xen/evtchn.h>
++#include <xen/xenbus.h>
++#include <xen/interface/io/netif.h>
++#include <xen/interface/memory.h>
++#include <xen/balloon.h>
++#include <asm/page.h>
++#include <asm/maddr.h>
++#include <asm/uaccess.h>
++#include <xen/interface/grant_table.h>
++#include <xen/gnttab.h>
++
++#ifdef HAVE_XEN_PLATFORM_COMPAT_H
++#include <xen/platform-compat.h>
++#endif
++
++struct netfront_cb {
++ struct page *page;
++ unsigned offset;
++};
++
++#define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb))
++
++/*
++ * Mutually-exclusive module options to select receive data path:
++ * rx_copy : Packets are copied by network backend into local memory
++ * rx_flip : Page containing packet data is transferred to our ownership
++ * For fully-virtualised guests there is no option - copying must be used.
++ * For paravirtualised guests, flipping is the default.
++ */
++#ifdef CONFIG_XEN
++static int MODPARM_rx_copy = 0;
++module_param_named(rx_copy, MODPARM_rx_copy, bool, 0);
++MODULE_PARM_DESC(rx_copy, "Copy packets from network card (rather than flip)");
++static int MODPARM_rx_flip = 0;
++module_param_named(rx_flip, MODPARM_rx_flip, bool, 0);
++MODULE_PARM_DESC(rx_flip, "Flip packets from network card (rather than copy)");
++#else
++static const int MODPARM_rx_copy = 1;
++static const int MODPARM_rx_flip = 0;
++#endif
++
++#define RX_COPY_THRESHOLD 256
++
++/* If we don't have GSO, fake things up so that we never try to use it. */
++#if defined(NETIF_F_GSO)
++#define HAVE_GSO 1
++#define HAVE_TSO 1 /* TSO is a subset of GSO */
++static inline void dev_disable_gso_features(struct net_device *dev)
++{
++ /* Turn off all GSO bits except ROBUST. */
++ dev->features &= (1 << NETIF_F_GSO_SHIFT) - 1;
++ dev->features |= NETIF_F_GSO_ROBUST;
++}
++#elif defined(NETIF_F_TSO)
++#define HAVE_TSO 1
++
++/* Some older kernels cannot cope with incorrect checksums,
++ * particularly in netfilter. I'm not sure there is 100% correlation
++ * with the presence of NETIF_F_TSO but it appears to be a good first
++ * approximiation.
++ */
++#define HAVE_NO_CSUM_OFFLOAD 1
++
++#define gso_size tso_size
++#define gso_segs tso_segs
++static inline void dev_disable_gso_features(struct net_device *dev)
++{
++ /* Turn off all TSO bits. */
++ dev->features &= ~NETIF_F_TSO;
++}
++static inline int skb_is_gso(const struct sk_buff *skb)
++{
++ return skb_shinfo(skb)->tso_size;
++}
++static inline int skb_gso_ok(struct sk_buff *skb, int features)
++{
++ return (features & NETIF_F_TSO);
++}
++
++static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
++{
++ return skb_is_gso(skb) &&
++ (!skb_gso_ok(skb, dev->features) ||
++ unlikely(skb->ip_summed != CHECKSUM_HW));
++}
++#else
++#define netif_needs_gso(dev, skb) 0
++#define dev_disable_gso_features(dev) ((void)0)
++#endif
++
++#define GRANT_INVALID_REF 0
++
++#define NET_TX_RING_SIZE __RING_SIZE((struct netif_tx_sring *)0, PAGE_SIZE)
++#define NET_RX_RING_SIZE __RING_SIZE((struct netif_rx_sring *)0, PAGE_SIZE)
++
++struct netfront_info {
++ struct list_head list;
++ struct net_device *netdev;
++
++ struct net_device_stats stats;
++
++ struct netif_tx_front_ring tx;
++ struct netif_rx_front_ring rx;
++
++ spinlock_t tx_lock;
++ spinlock_t rx_lock;
++
++ unsigned int irq;
++ unsigned int copying_receiver;
++ unsigned int carrier;
++
++ /* Receive-ring batched refills. */
++#define RX_MIN_TARGET 8
++#define RX_DFL_MIN_TARGET 64
++#define RX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256)
++ unsigned rx_min_target, rx_max_target, rx_target;
++ struct sk_buff_head rx_batch;
++
++ struct timer_list rx_refill_timer;
++
++ /*
++ * {tx,rx}_skbs store outstanding skbuffs. The first entry in tx_skbs
++ * is an index into a chain of free entries.
++ */
++ struct sk_buff *tx_skbs[NET_TX_RING_SIZE+1];
++ struct sk_buff *rx_skbs[NET_RX_RING_SIZE];
++
++#define TX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256)
++ grant_ref_t gref_tx_head;
++ grant_ref_t grant_tx_ref[NET_TX_RING_SIZE + 1];
++ grant_ref_t gref_rx_head;
++ grant_ref_t grant_rx_ref[NET_RX_RING_SIZE];
++
++ struct xenbus_device *xbdev;
++ int tx_ring_ref;
++ int rx_ring_ref;
++ u8 mac[ETH_ALEN];
++
++ unsigned long rx_pfn_array[NET_RX_RING_SIZE];
++ struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1];
++ struct mmu_update rx_mmu[NET_RX_RING_SIZE];
++};
++
++struct netfront_rx_info {
++ struct netif_rx_response rx;
++ struct netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
++};
++
++/*
++ * Implement our own carrier flag: the network stack's version causes delays
++ * when the carrier is re-enabled (in particular, dev_activate() may not
++ * immediately be called, which can cause packet loss).
++ */
++#define netfront_carrier_on(netif) ((netif)->carrier = 1)
++#define netfront_carrier_off(netif) ((netif)->carrier = 0)
++#define netfront_carrier_ok(netif) ((netif)->carrier)
++
++/*
++ * Access macros for acquiring freeing slots in tx_skbs[].
++ */
++
++static inline void add_id_to_freelist(struct sk_buff **list, unsigned short id)
++{
++ list[id] = list[0];
++ list[0] = (void *)(unsigned long)id;
++}
++
++static inline unsigned short get_id_from_freelist(struct sk_buff **list)
++{
++ unsigned int id = (unsigned int)(unsigned long)list[0];
++ list[0] = list[id];
++ return id;
++}
++
++static inline int xennet_rxidx(RING_IDX idx)
++{
++ return idx & (NET_RX_RING_SIZE - 1);
++}
++
++static inline struct sk_buff *xennet_get_rx_skb(struct netfront_info *np,
++ RING_IDX ri)
++{
++ int i = xennet_rxidx(ri);
++ struct sk_buff *skb = np->rx_skbs[i];
++ np->rx_skbs[i] = NULL;
++ return skb;
++}
++
++static inline grant_ref_t xennet_get_rx_ref(struct netfront_info *np,
++ RING_IDX ri)
++{
++ int i = xennet_rxidx(ri);
++ grant_ref_t ref = np->grant_rx_ref[i];
++ np->grant_rx_ref[i] = GRANT_INVALID_REF;
++ return ref;
++}
++
++#define DPRINTK(fmt, args...) \
++ pr_debug("netfront (%s:%d) " fmt, \
++ __FUNCTION__, __LINE__, ##args)
++#define IPRINTK(fmt, args...) \
++ printk(KERN_INFO "netfront: " fmt, ##args)
++#define WPRINTK(fmt, args...) \
++ printk(KERN_WARNING "netfront: " fmt, ##args)
++
++static int setup_device(struct xenbus_device *, struct netfront_info *);
++static struct net_device *create_netdev(struct xenbus_device *);
++
++static void end_access(int, void *);
++static void netif_disconnect_backend(struct netfront_info *);
++
++static int network_connect(struct net_device *);
++static void network_tx_buf_gc(struct net_device *);
++static void network_alloc_rx_buffers(struct net_device *);
++static int send_fake_arp(struct net_device *);
++
++static irqreturn_t netif_int(int irq, void *dev_id, struct pt_regs *ptregs);
++
++#ifdef CONFIG_SYSFS
++static int xennet_sysfs_addif(struct net_device *netdev);
++static void xennet_sysfs_delif(struct net_device *netdev);
++#else /* !CONFIG_SYSFS */
++#define xennet_sysfs_addif(dev) (0)
++#define xennet_sysfs_delif(dev) do { } while(0)
++#endif
++
++static inline int xennet_can_sg(struct net_device *dev)
++{
++ return dev->features & NETIF_F_SG;
++}
++
++/**
++ * Entry point to this code when a new device is created. Allocate the basic
++ * structures and the ring buffers for communication with the backend, and
++ * inform the backend of the appropriate details for those.
++ */
++static int __devinit netfront_probe(struct xenbus_device *dev,
++ const struct xenbus_device_id *id)
++{
++ int err;
++ struct net_device *netdev;
++ struct netfront_info *info;
++
++ netdev = create_netdev(dev);
++ if (IS_ERR(netdev)) {
++ err = PTR_ERR(netdev);
++ xenbus_dev_fatal(dev, err, "creating netdev");
++ return err;
++ }
++
++ info = netdev_priv(netdev);
++ dev->dev.driver_data = info;
++
++ err = register_netdev(info->netdev);
++ if (err) {
++ printk(KERN_WARNING "%s: register_netdev err=%d\n",
++ __FUNCTION__, err);
++ goto fail;
++ }
++
++ err = xennet_sysfs_addif(info->netdev);
++ if (err) {
++ unregister_netdev(info->netdev);
++ printk(KERN_WARNING "%s: add sysfs failed err=%d\n",
++ __FUNCTION__, err);
++ goto fail;
++ }
++
++ return 0;
++
++ fail:
++ free_netdev(netdev);
++ dev->dev.driver_data = NULL;
++ return err;
++}
++
++static int __devexit netfront_remove(struct xenbus_device *dev)
++{
++ struct netfront_info *info = dev->dev.driver_data;
++
++ DPRINTK("%s\n", dev->nodename);
++
++ netif_disconnect_backend(info);
++
++ del_timer_sync(&info->rx_refill_timer);
++
++ xennet_sysfs_delif(info->netdev);
++
++ unregister_netdev(info->netdev);
++
++ free_netdev(info->netdev);
++
++ return 0;
++}
++
++/**
++ * We are reconnecting to the backend, due to a suspend/resume, or a backend
++ * driver restart. We tear down our netif structure and recreate it, but
++ * leave the device-layer structures intact so that this is transparent to the
++ * rest of the kernel.
++ */
++static int netfront_resume(struct xenbus_device *dev)
++{
++ struct netfront_info *info = dev->dev.driver_data;
++
++ DPRINTK("%s\n", dev->nodename);
++
++ netif_disconnect_backend(info);
++ return 0;
++}
++
++static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
++{
++ char *s, *e, *macstr;
++ int i;
++
++ macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL);
++ if (IS_ERR(macstr))
++ return PTR_ERR(macstr);
++
++ for (i = 0; i < ETH_ALEN; i++) {
++ mac[i] = simple_strtoul(s, &e, 16);
++ if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
++ kfree(macstr);
++ return -ENOENT;
++ }
++ s = e+1;
++ }
++
++ kfree(macstr);
++ return 0;
++}
++
++/* Common code used when first setting up, and when resuming. */
++static int talk_to_backend(struct xenbus_device *dev,
++ struct netfront_info *info)
++{
++ const char *message;
++ struct xenbus_transaction xbt;
++ int err;
++
++ err = xen_net_read_mac(dev, info->mac);
++ if (err) {
++ xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
++ goto out;
++ }
++
++ /* Create shared ring, alloc event channel. */
++ err = setup_device(dev, info);
++ if (err)
++ goto out;
++
++again:
++ err = xenbus_transaction_start(&xbt);
++ if (err) {
++ xenbus_dev_fatal(dev, err, "starting transaction");
++ goto destroy_ring;
++ }
++
++ err = xenbus_printf(xbt, dev->nodename, "tx-ring-ref","%u",
++ info->tx_ring_ref);
++ if (err) {
++ message = "writing tx ring-ref";
++ goto abort_transaction;
++ }
++ err = xenbus_printf(xbt, dev->nodename, "rx-ring-ref","%u",
++ info->rx_ring_ref);
++ if (err) {
++ message = "writing rx ring-ref";
++ goto abort_transaction;
++ }
++ err = xenbus_printf(xbt, dev->nodename,
++ "event-channel", "%u",
++ irq_to_evtchn_port(info->irq));
++ if (err) {
++ message = "writing event-channel";
++ goto abort_transaction;
++ }
++
++ err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u",
++ info->copying_receiver);
++ if (err) {
++ message = "writing request-rx-copy";
++ goto abort_transaction;
++ }
++
++ err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1);
++ if (err) {
++ message = "writing feature-rx-notify";
++ goto abort_transaction;
++ }
++
++#ifdef HAVE_NO_CSUM_OFFLOAD
++ err = xenbus_printf(xbt, dev->nodename, "feature-no-csum-offload", "%d", 1);
++ if (err) {
++ message = "writing feature-no-csum-offload";
++ goto abort_transaction;
++ }
++#endif
++
++ err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1);
++ if (err) {
++ message = "writing feature-sg";
++ goto abort_transaction;
++ }
++
++#ifdef HAVE_TSO
++ err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", 1);
++ if (err) {
++ message = "writing feature-gso-tcpv4";
++ goto abort_transaction;
++ }
++#endif
++
++ err = xenbus_transaction_end(xbt, 0);
++ if (err) {
++ if (err == -EAGAIN)
++ goto again;
++ xenbus_dev_fatal(dev, err, "completing transaction");
++ goto destroy_ring;
++ }
++
++ return 0;
++
++ abort_transaction:
++ xenbus_transaction_end(xbt, 1);
++ xenbus_dev_fatal(dev, err, "%s", message);
++ destroy_ring:
++ netif_disconnect_backend(info);
++ out:
++ return err;
++}
++
++static int setup_device(struct xenbus_device *dev, struct netfront_info *info)
++{
++ struct netif_tx_sring *txs;
++ struct netif_rx_sring *rxs;
++ int err;
++ struct net_device *netdev = info->netdev;
++
++ info->tx_ring_ref = GRANT_INVALID_REF;
++ info->rx_ring_ref = GRANT_INVALID_REF;
++ info->rx.sring = NULL;
++ info->tx.sring = NULL;
++ info->irq = 0;
++
++ txs = (struct netif_tx_sring *)get_zeroed_page(GFP_KERNEL);
++ if (!txs) {
++ err = -ENOMEM;
++ xenbus_dev_fatal(dev, err, "allocating tx ring page");
++ goto fail;
++ }
++ SHARED_RING_INIT(txs);
++ FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE);
++
++ err = xenbus_grant_ring(dev, virt_to_mfn(txs));
++ if (err < 0) {
++ free_page((unsigned long)txs);
++ goto fail;
++ }
++ info->tx_ring_ref = err;
++
++ rxs = (struct netif_rx_sring *)get_zeroed_page(GFP_KERNEL);
++ if (!rxs) {
++ err = -ENOMEM;
++ xenbus_dev_fatal(dev, err, "allocating rx ring page");
++ goto fail;
++ }
++ SHARED_RING_INIT(rxs);
++ FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE);
++
++ err = xenbus_grant_ring(dev, virt_to_mfn(rxs));
++ if (err < 0) {
++ free_page((unsigned long)rxs);
++ goto fail;
++ }
++ info->rx_ring_ref = err;
++
++ memcpy(netdev->dev_addr, info->mac, ETH_ALEN);
++
++ err = bind_listening_port_to_irqhandler(
++ dev->otherend_id, netif_int, SA_SAMPLE_RANDOM, netdev->name,
++ netdev);
++ if (err < 0)
++ goto fail;
++ info->irq = err;
++
++ return 0;
++
++ fail:
++ return err;
++}
++
++/**
++ * Callback received when the backend's state changes.
++ */
++static void backend_changed(struct xenbus_device *dev,
++ enum xenbus_state backend_state)
++{
++ struct netfront_info *np = dev->dev.driver_data;
++ struct net_device *netdev = np->netdev;
++
++ DPRINTK("%s\n", xenbus_strstate(backend_state));
++
++ switch (backend_state) {
++ case XenbusStateInitialising:
++ case XenbusStateInitialised:
++ case XenbusStateConnected:
++ case XenbusStateUnknown:
++ case XenbusStateClosed:
++ break;
++
++ case XenbusStateInitWait:
++ if (dev->state != XenbusStateInitialising)
++ break;
++ if (network_connect(netdev) != 0)
++ break;
++ xenbus_switch_state(dev, XenbusStateConnected);
++ (void)send_fake_arp(netdev);
++ break;
++
++ case XenbusStateClosing:
++ xenbus_frontend_closed(dev);
++ break;
++ }
++}
++
++/** Send a packet on a net device to encourage switches to learn the
++ * MAC. We send a fake ARP request.
++ *
++ * @param dev device
++ * @return 0 on success, error code otherwise
++ */
++static int send_fake_arp(struct net_device *dev)
++{
++ struct sk_buff *skb;
++ u32 src_ip, dst_ip;
++
++ dst_ip = INADDR_BROADCAST;
++ src_ip = inet_select_addr(dev, dst_ip, RT_SCOPE_LINK);
++
++ /* No IP? Then nothing to do. */
++ if (src_ip == 0)
++ return 0;
++
++ skb = arp_create(ARPOP_REPLY, ETH_P_ARP,
++ dst_ip, dev, src_ip,
++ /*dst_hw*/ NULL, /*src_hw*/ NULL,
++ /*target_hw*/ dev->dev_addr);
++ if (skb == NULL)
++ return -ENOMEM;
++
++ return dev_queue_xmit(skb);
++}
++
++static inline int netfront_tx_slot_available(struct netfront_info *np)
++{
++ return ((np->tx.req_prod_pvt - np->tx.rsp_cons) <
++ (TX_MAX_TARGET - MAX_SKB_FRAGS - 2));
++}
++
++static inline void network_maybe_wake_tx(struct net_device *dev)
++{
++ struct netfront_info *np = netdev_priv(dev);
++
++ if (unlikely(netif_queue_stopped(dev)) &&
++ netfront_tx_slot_available(np) &&
++ likely(netif_running(dev)))
++ netif_wake_queue(dev);
++}
++
++static int network_open(struct net_device *dev)
++{
++ struct netfront_info *np = netdev_priv(dev);
++
++ memset(&np->stats, 0, sizeof(np->stats));
++
++ spin_lock_bh(&np->rx_lock);
++ if (netfront_carrier_ok(np)) {
++ network_alloc_rx_buffers(dev);
++ np->rx.sring->rsp_event = np->rx.rsp_cons + 1;
++ if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx))
++ netif_rx_schedule(dev);
++ }
++ spin_unlock_bh(&np->rx_lock);
++
++ network_maybe_wake_tx(dev);
++
++ return 0;
++}
++
++static void network_tx_buf_gc(struct net_device *dev)
++{
++ RING_IDX cons, prod;
++ unsigned short id;
++ struct netfront_info *np = netdev_priv(dev);
++ struct sk_buff *skb;
++
++ BUG_ON(!netfront_carrier_ok(np));
++
++ do {
++ prod = np->tx.sring->rsp_prod;
++ rmb(); /* Ensure we see responses up to 'rp'. */
++
++ for (cons = np->tx.rsp_cons; cons != prod; cons++) {
++ struct netif_tx_response *txrsp;
++
++ txrsp = RING_GET_RESPONSE(&np->tx, cons);
++ if (txrsp->status == NETIF_RSP_NULL)
++ continue;
++
++ id = txrsp->id;
++ skb = np->tx_skbs[id];
++ if (unlikely(gnttab_query_foreign_access(
++ np->grant_tx_ref[id]) != 0)) {
++ printk(KERN_ALERT "network_tx_buf_gc: warning "
++ "-- grant still in use by backend "
++ "domain.\n");
++ BUG();
++ }
++ gnttab_end_foreign_access_ref(
++ np->grant_tx_ref[id], GNTMAP_readonly);
++ gnttab_release_grant_reference(
++ &np->gref_tx_head, np->grant_tx_ref[id]);
++ np->grant_tx_ref[id] = GRANT_INVALID_REF;
++ add_id_to_freelist(np->tx_skbs, id);
++ dev_kfree_skb_irq(skb);
++ }
++
++ np->tx.rsp_cons = prod;
++
++ /*
++ * Set a new event, then check for race with update of tx_cons.
++ * Note that it is essential to schedule a callback, no matter
++ * how few buffers are pending. Even if there is space in the
++ * transmit ring, higher layers may be blocked because too much
++ * data is outstanding: in such cases notification from Xen is
++ * likely to be the only kick that we'll get.
++ */
++ np->tx.sring->rsp_event =
++ prod + ((np->tx.sring->req_prod - prod) >> 1) + 1;
++ mb();
++ } while ((cons == prod) && (prod != np->tx.sring->rsp_prod));
++
++ network_maybe_wake_tx(dev);
++}
++
++static void rx_refill_timeout(unsigned long data)
++{
++ struct net_device *dev = (struct net_device *)data;
++ netif_rx_schedule(dev);
++}
++
++static void network_alloc_rx_buffers(struct net_device *dev)
++{
++ unsigned short id;
++ struct netfront_info *np = netdev_priv(dev);
++ struct sk_buff *skb;
++ struct page *page;
++ int i, batch_target, notify;
++ RING_IDX req_prod = np->rx.req_prod_pvt;
++ struct xen_memory_reservation reservation;
++ grant_ref_t ref;
++ unsigned long pfn;
++ void *vaddr;
++ int nr_flips;
++ netif_rx_request_t *req;
++
++ if (unlikely(!netfront_carrier_ok(np)))
++ return;
++
++ /*
++ * Allocate skbuffs greedily, even though we batch updates to the
++ * receive ring. This creates a less bursty demand on the memory
++ * allocator, so should reduce the chance of failed allocation requests
++ * both for ourself and for other kernel subsystems.
++ */
++ batch_target = np->rx_target - (req_prod - np->rx.rsp_cons);
++ for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) {
++ /*
++ * Allocate an skb and a page. Do not use __dev_alloc_skb as
++ * that will allocate page-sized buffers which is not
++ * necessary here.
++ * 16 bytes added as necessary headroom for netif_receive_skb.
++ */
++ skb = alloc_skb(RX_COPY_THRESHOLD + 16 + NET_IP_ALIGN,
++ GFP_ATOMIC | __GFP_NOWARN);
++ if (unlikely(!skb))
++ goto no_skb;
++
++ page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
++ if (!page) {
++ kfree_skb(skb);
++no_skb:
++ /* Any skbuffs queued for refill? Force them out. */
++ if (i != 0)
++ goto refill;
++ /* Could not allocate any skbuffs. Try again later. */
++ mod_timer(&np->rx_refill_timer,
++ jiffies + (HZ/10));
++ break;
++ }
++
++ skb_reserve(skb, 16 + NET_IP_ALIGN); /* mimic dev_alloc_skb() */
++ skb_shinfo(skb)->frags[0].page = page;
++ skb_shinfo(skb)->nr_frags = 1;
++ __skb_queue_tail(&np->rx_batch, skb);
++ }
++
++ /* Is the batch large enough to be worthwhile? */
++ if (i < (np->rx_target/2)) {
++ if (req_prod > np->rx.sring->req_prod)
++ goto push;
++ return;
++ }
++
++ /* Adjust our fill target if we risked running out of buffers. */
++ if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) &&
++ ((np->rx_target *= 2) > np->rx_max_target))
++ np->rx_target = np->rx_max_target;
++
++ refill:
++ for (nr_flips = i = 0; ; i++) {
++ if ((skb = __skb_dequeue(&np->rx_batch)) == NULL)
++ break;
++
++ skb->dev = dev;
++
++ id = xennet_rxidx(req_prod + i);
++
++ BUG_ON(np->rx_skbs[id]);
++ np->rx_skbs[id] = skb;
++
++ ref = gnttab_claim_grant_reference(&np->gref_rx_head);
++ BUG_ON((signed short)ref < 0);
++ np->grant_rx_ref[id] = ref;
++
++ pfn = page_to_pfn(skb_shinfo(skb)->frags[0].page);
++ vaddr = page_address(skb_shinfo(skb)->frags[0].page);
++
++ req = RING_GET_REQUEST(&np->rx, req_prod + i);
++ if (!np->copying_receiver) {
++ gnttab_grant_foreign_transfer_ref(ref,
++ np->xbdev->otherend_id,
++ pfn);
++ np->rx_pfn_array[nr_flips] = pfn_to_mfn(pfn);
++ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
++ /* Remove this page before passing
++ * back to Xen. */
++ set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
++ MULTI_update_va_mapping(np->rx_mcl+i,
++ (unsigned long)vaddr,
++ __pte(0), 0);
++ }
++ nr_flips++;
++ } else {
++ gnttab_grant_foreign_access_ref(ref,
++ np->xbdev->otherend_id,
++ pfn_to_mfn(pfn),
++ 0);
++ }
++
++ req->id = id;
++ req->gref = ref;
++ }
++
++ if ( nr_flips != 0 ) {
++ /* Tell the ballon driver what is going on. */
++ balloon_update_driver_allowance(i);
++
++ set_xen_guest_handle(reservation.extent_start,
++ np->rx_pfn_array);
++ reservation.nr_extents = nr_flips;
++ reservation.extent_order = 0;
++ reservation.address_bits = 0;
++ reservation.domid = DOMID_SELF;
++
++ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
++ /* After all PTEs have been zapped, flush the TLB. */
++ np->rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] =
++ UVMF_TLB_FLUSH|UVMF_ALL;
++
++ /* Give away a batch of pages. */
++ np->rx_mcl[i].op = __HYPERVISOR_memory_op;
++ np->rx_mcl[i].args[0] = XENMEM_decrease_reservation;
++ np->rx_mcl[i].args[1] = (unsigned long)&reservation;
++
++ /* Zap PTEs and give away pages in one big
++ * multicall. */
++ (void)HYPERVISOR_multicall(np->rx_mcl, i+1);
++
++ /* Check return status of HYPERVISOR_memory_op(). */
++ if (unlikely(np->rx_mcl[i].result != i))
++ panic("Unable to reduce memory reservation\n");
++ } else {
++ if (HYPERVISOR_memory_op(XENMEM_decrease_reservation,
++ &reservation) != i)
++ panic("Unable to reduce memory reservation\n");
++ }
++ } else {
++ wmb();
++ }
++
++ /* Above is a suitable barrier to ensure backend will see requests. */
++ np->rx.req_prod_pvt = req_prod + i;
++ push:
++ RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify);
++ if (notify)
++ notify_remote_via_irq(np->irq);
++}
++
++static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
++ struct netif_tx_request *tx)
++{
++ struct netfront_info *np = netdev_priv(dev);
++ char *data = skb->data;
++ unsigned long mfn;
++ RING_IDX prod = np->tx.req_prod_pvt;
++ int frags = skb_shinfo(skb)->nr_frags;
++ unsigned int offset = offset_in_page(data);
++ unsigned int len = skb_headlen(skb);
++ unsigned int id;
++ grant_ref_t ref;
++ int i;
++
++ while (len > PAGE_SIZE - offset) {
++ tx->size = PAGE_SIZE - offset;
++ tx->flags |= NETTXF_more_data;
++ len -= tx->size;
++ data += tx->size;
++ offset = 0;
++
++ id = get_id_from_freelist(np->tx_skbs);
++ np->tx_skbs[id] = skb_get(skb);
++ tx = RING_GET_REQUEST(&np->tx, prod++);
++ tx->id = id;
++ ref = gnttab_claim_grant_reference(&np->gref_tx_head);
++ BUG_ON((signed short)ref < 0);
++
++ mfn = virt_to_mfn(data);
++ gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,
++ mfn, GNTMAP_readonly);
++
++ tx->gref = np->grant_tx_ref[id] = ref;
++ tx->offset = offset;
++ tx->size = len;
++ tx->flags = 0;
++ }
++
++ for (i = 0; i < frags; i++) {
++ skb_frag_t *frag = skb_shinfo(skb)->frags + i;
++
++ tx->flags |= NETTXF_more_data;
++
++ id = get_id_from_freelist(np->tx_skbs);
++ np->tx_skbs[id] = skb_get(skb);
++ tx = RING_GET_REQUEST(&np->tx, prod++);
++ tx->id = id;
++ ref = gnttab_claim_grant_reference(&np->gref_tx_head);
++ BUG_ON((signed short)ref < 0);
++
++ mfn = pfn_to_mfn(page_to_pfn(frag->page));
++ gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,
++ mfn, GNTMAP_readonly);
++
++ tx->gref = np->grant_tx_ref[id] = ref;
++ tx->offset = frag->page_offset;
++ tx->size = frag->size;
++ tx->flags = 0;
++ }
++
++ np->tx.req_prod_pvt = prod;
++}
++
++static int network_start_xmit(struct sk_buff *skb, struct net_device *dev)
++{
++ unsigned short id;
++ struct netfront_info *np = netdev_priv(dev);
++ struct netif_tx_request *tx;
++ struct netif_extra_info *extra;
++ char *data = skb->data;
++ RING_IDX i;
++ grant_ref_t ref;
++ unsigned long mfn;
++ int notify;
++ int frags = skb_shinfo(skb)->nr_frags;
++ unsigned int offset = offset_in_page(data);
++ unsigned int len = skb_headlen(skb);
++
++ frags += (offset + len + PAGE_SIZE - 1) / PAGE_SIZE;
++ if (unlikely(frags > MAX_SKB_FRAGS + 1)) {
++ printk(KERN_ALERT "xennet: skb rides the rocket: %d frags\n",
++ frags);
++ dump_stack();
++ goto drop;
++ }
++
++ spin_lock_irq(&np->tx_lock);
++
++ if (unlikely(!netfront_carrier_ok(np) ||
++ (frags > 1 && !xennet_can_sg(dev)) ||
++ netif_needs_gso(dev, skb))) {
++ spin_unlock_irq(&np->tx_lock);
++ goto drop;
++ }
++
++ i = np->tx.req_prod_pvt;
++
++ id = get_id_from_freelist(np->tx_skbs);
++ np->tx_skbs[id] = skb;
++
++ tx = RING_GET_REQUEST(&np->tx, i);
++
++ tx->id = id;
++ ref = gnttab_claim_grant_reference(&np->gref_tx_head);
++ BUG_ON((signed short)ref < 0);
++ mfn = virt_to_mfn(data);
++ gnttab_grant_foreign_access_ref(
++ ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly);
++ tx->gref = np->grant_tx_ref[id] = ref;
++ tx->offset = offset;
++ tx->size = len;
++
++ tx->flags = 0;
++ extra = NULL;
++
++ if (skb->ip_summed == CHECKSUM_HW) /* local packet? */
++ tx->flags |= NETTXF_csum_blank | NETTXF_data_validated;
++#ifdef CONFIG_XEN
++ if (skb->proto_data_valid) /* remote but checksummed? */
++ tx->flags |= NETTXF_data_validated;
++#endif
++
++#ifdef HAVE_TSO
++ if (skb_shinfo(skb)->gso_size) {
++ struct netif_extra_info *gso = (struct netif_extra_info *)
++ RING_GET_REQUEST(&np->tx, ++i);
++
++ if (extra)
++ extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE;
++ else
++ tx->flags |= NETTXF_extra_info;
++
++ gso->u.gso.size = skb_shinfo(skb)->gso_size;
++ gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
++ gso->u.gso.pad = 0;
++ gso->u.gso.features = 0;
++
++ gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
++ gso->flags = 0;
++ extra = gso;
++ }
++#endif
++
++ np->tx.req_prod_pvt = i + 1;
++
++ xennet_make_frags(skb, dev, tx);
++ tx->size = skb->len;
++
++ RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify);
++ if (notify)
++ notify_remote_via_irq(np->irq);
++
++ network_tx_buf_gc(dev);
++
++ if (!netfront_tx_slot_available(np))
++ netif_stop_queue(dev);
++
++ spin_unlock_irq(&np->tx_lock);
++
++ np->stats.tx_bytes += skb->len;
++ np->stats.tx_packets++;
++
++ return 0;
++
++ drop:
++ np->stats.tx_dropped++;
++ dev_kfree_skb(skb);
++ return 0;
++}
++
++static irqreturn_t netif_int(int irq, void *dev_id, struct pt_regs *ptregs)
++{
++ struct net_device *dev = dev_id;
++ struct netfront_info *np = netdev_priv(dev);
++ unsigned long flags;
++
++ spin_lock_irqsave(&np->tx_lock, flags);
++
++ if (likely(netfront_carrier_ok(np))) {
++ network_tx_buf_gc(dev);
++ /* Under tx_lock: protects access to rx shared-ring indexes. */
++ if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx))
++ netif_rx_schedule(dev);
++ }
++
++ spin_unlock_irqrestore(&np->tx_lock, flags);
++
++ return IRQ_HANDLED;
++}
++
++static void xennet_move_rx_slot(struct netfront_info *np, struct sk_buff *skb,
++ grant_ref_t ref)
++{
++ int new = xennet_rxidx(np->rx.req_prod_pvt);
++
++ BUG_ON(np->rx_skbs[new]);
++ np->rx_skbs[new] = skb;
++ np->grant_rx_ref[new] = ref;
++ RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new;
++ RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref;
++ np->rx.req_prod_pvt++;
++}
++
++int xennet_get_extras(struct netfront_info *np,
++ struct netif_extra_info *extras, RING_IDX rp)
++
++{
++ struct netif_extra_info *extra;
++ RING_IDX cons = np->rx.rsp_cons;
++ int err = 0;
++
++ do {
++ struct sk_buff *skb;
++ grant_ref_t ref;
++
++ if (unlikely(cons + 1 == rp)) {
++ if (net_ratelimit())
++ WPRINTK("Missing extra info\n");
++ err = -EBADR;
++ break;
++ }
++
++ extra = (struct netif_extra_info *)
++ RING_GET_RESPONSE(&np->rx, ++cons);
++
++ if (unlikely(!extra->type ||
++ extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
++ if (net_ratelimit())
++ WPRINTK("Invalid extra type: %d\n",
++ extra->type);
++ err = -EINVAL;
++ } else {
++ memcpy(&extras[extra->type - 1], extra,
++ sizeof(*extra));
++ }
++
++ skb = xennet_get_rx_skb(np, cons);
++ ref = xennet_get_rx_ref(np, cons);
++ xennet_move_rx_slot(np, skb, ref);
++ } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
++
++ np->rx.rsp_cons = cons;
++ return err;
++}
++
++static int xennet_get_responses(struct netfront_info *np,
++ struct netfront_rx_info *rinfo, RING_IDX rp,
++ struct sk_buff_head *list,
++ int *pages_flipped_p)
++{
++ int pages_flipped = *pages_flipped_p;
++ struct mmu_update *mmu;
++ struct multicall_entry *mcl;
++ struct netif_rx_response *rx = &rinfo->rx;
++ struct netif_extra_info *extras = rinfo->extras;
++ RING_IDX cons = np->rx.rsp_cons;
++ struct sk_buff *skb = xennet_get_rx_skb(np, cons);
++ grant_ref_t ref = xennet_get_rx_ref(np, cons);
++ int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD);
++ int frags = 1;
++ int err = 0;
++ unsigned long ret;
++
++ if (rx->flags & NETRXF_extra_info) {
++ err = xennet_get_extras(np, extras, rp);
++ cons = np->rx.rsp_cons;
++ }
++
++ for (;;) {
++ unsigned long mfn;
++
++ if (unlikely(rx->status < 0 ||
++ rx->offset + rx->status > PAGE_SIZE)) {
++ if (net_ratelimit())
++ WPRINTK("rx->offset: %x, size: %u\n",
++ rx->offset, rx->status);
++ xennet_move_rx_slot(np, skb, ref);
++ err = -EINVAL;
++ goto next;
++ }
++
++ /*
++ * This definitely indicates a bug, either in this driver or in
++ * the backend driver. In future this should flag the bad
++ * situation to the system controller to reboot the backed.
++ */
++ if (ref == GRANT_INVALID_REF) {
++ if (net_ratelimit())
++ WPRINTK("Bad rx response id %d.\n", rx->id);
++ err = -EINVAL;
++ goto next;
++ }
++
++ if (!np->copying_receiver) {
++ /* Memory pressure, insufficient buffer
++ * headroom, ... */
++ if (!(mfn = gnttab_end_foreign_transfer_ref(ref))) {
++ if (net_ratelimit())
++ WPRINTK("Unfulfilled rx req "
++ "(id=%d, st=%d).\n",
++ rx->id, rx->status);
++ xennet_move_rx_slot(np, skb, ref);
++ err = -ENOMEM;
++ goto next;
++ }
++
++ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
++ /* Remap the page. */
++ struct page *page =
++ skb_shinfo(skb)->frags[0].page;
++ unsigned long pfn = page_to_pfn(page);
++ void *vaddr = page_address(page);
++
++ mcl = np->rx_mcl + pages_flipped;
++ mmu = np->rx_mmu + pages_flipped;
++
++ MULTI_update_va_mapping(mcl,
++ (unsigned long)vaddr,
++ pfn_pte_ma(mfn,
++ PAGE_KERNEL),
++ 0);
++ mmu->ptr = ((maddr_t)mfn << PAGE_SHIFT)
++ | MMU_MACHPHYS_UPDATE;
++ mmu->val = pfn;
++
++ set_phys_to_machine(pfn, mfn);
++ }
++ pages_flipped++;
++ } else {
++ ret = gnttab_end_foreign_access_ref(ref, 0);
++ BUG_ON(!ret);
++ }
++
++ gnttab_release_grant_reference(&np->gref_rx_head, ref);
++
++ __skb_queue_tail(list, skb);
++
++next:
++ if (!(rx->flags & NETRXF_more_data))
++ break;
++
++ if (cons + frags == rp) {
++ if (net_ratelimit())
++ WPRINTK("Need more frags\n");
++ err = -ENOENT;
++ break;
++ }
++
++ rx = RING_GET_RESPONSE(&np->rx, cons + frags);
++ skb = xennet_get_rx_skb(np, cons + frags);
++ ref = xennet_get_rx_ref(np, cons + frags);
++ frags++;
++ }
++
++ if (unlikely(frags > max)) {
++ if (net_ratelimit())
++ WPRINTK("Too many frags\n");
++ err = -E2BIG;
++ }
++
++ if (unlikely(err))
++ np->rx.rsp_cons = cons + frags;
++
++ *pages_flipped_p = pages_flipped;
++
++ return err;
++}
++
++static RING_IDX xennet_fill_frags(struct netfront_info *np,
++ struct sk_buff *skb,
++ struct sk_buff_head *list)
++{
++ struct skb_shared_info *shinfo = skb_shinfo(skb);
++ int nr_frags = shinfo->nr_frags;
++ RING_IDX cons = np->rx.rsp_cons;
++ skb_frag_t *frag = shinfo->frags + nr_frags;
++ struct sk_buff *nskb;
++
++ while ((nskb = __skb_dequeue(list))) {
++ struct netif_rx_response *rx =
++ RING_GET_RESPONSE(&np->rx, ++cons);
++
++ frag->page = skb_shinfo(nskb)->frags[0].page;
++ frag->page_offset = rx->offset;
++ frag->size = rx->status;
++
++ skb->data_len += rx->status;
++
++ skb_shinfo(nskb)->nr_frags = 0;
++ kfree_skb(nskb);
++
++ frag++;
++ nr_frags++;
++ }
++
++ shinfo->nr_frags = nr_frags;
++ return cons;
++}
++
++static int xennet_set_skb_gso(struct sk_buff *skb,
++ struct netif_extra_info *gso)
++{
++ if (!gso->u.gso.size) {
++ if (net_ratelimit())
++ WPRINTK("GSO size must not be zero.\n");
++ return -EINVAL;
++ }
++
++ /* Currently only TCPv4 S.O. is supported. */
++ if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
++ if (net_ratelimit())
++ WPRINTK("Bad GSO type %d.\n", gso->u.gso.type);
++ return -EINVAL;
++ }
++
++#ifdef HAVE_TSO
++ skb_shinfo(skb)->gso_size = gso->u.gso.size;
++#ifdef HAVE_GSO
++ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
++
++ /* Header must be checked, and gso_segs computed. */
++ skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
++#endif
++ skb_shinfo(skb)->gso_segs = 0;
++
++ return 0;
++#else
++ if (net_ratelimit())
++ WPRINTK("GSO unsupported by this kernel.\n");
++ return -EINVAL;
++#endif
++}
++
++static int netif_poll(struct net_device *dev, int *pbudget)
++{
++ struct netfront_info *np = netdev_priv(dev);
++ struct sk_buff *skb;
++ struct netfront_rx_info rinfo;
++ struct netif_rx_response *rx = &rinfo.rx;
++ struct netif_extra_info *extras = rinfo.extras;
++ RING_IDX i, rp;
++ struct multicall_entry *mcl;
++ int work_done, budget, more_to_do = 1;
++ struct sk_buff_head rxq;
++ struct sk_buff_head errq;
++ struct sk_buff_head tmpq;
++ unsigned long flags;
++ unsigned int len;
++ int pages_flipped = 0;
++ int err;
++
++ spin_lock(&np->rx_lock); /* no need for spin_lock_bh() in ->poll() */
++
++ if (unlikely(!netfront_carrier_ok(np))) {
++ spin_unlock(&np->rx_lock);
++ return 0;
++ }
++
++ skb_queue_head_init(&rxq);
++ skb_queue_head_init(&errq);
++ skb_queue_head_init(&tmpq);
++
++ if ((budget = *pbudget) > dev->quota)
++ budget = dev->quota;
++ rp = np->rx.sring->rsp_prod;
++ rmb(); /* Ensure we see queued responses up to 'rp'. */
++
++ i = np->rx.rsp_cons;
++ work_done = 0;
++ while ((i != rp) && (work_done < budget)) {
++ memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx));
++ memset(extras, 0, sizeof(rinfo.extras));
++
++ err = xennet_get_responses(np, &rinfo, rp, &tmpq,
++ &pages_flipped);
++
++ if (unlikely(err)) {
++err:
++ while ((skb = __skb_dequeue(&tmpq)))
++ __skb_queue_tail(&errq, skb);
++ np->stats.rx_errors++;
++ i = np->rx.rsp_cons;
++ continue;
++ }
++
++ skb = __skb_dequeue(&tmpq);
++
++ if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
++ struct netif_extra_info *gso;
++ gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
++
++ if (unlikely(xennet_set_skb_gso(skb, gso))) {
++ __skb_queue_head(&tmpq, skb);
++ np->rx.rsp_cons += skb_queue_len(&tmpq);
++ goto err;
++ }
++ }
++
++ NETFRONT_SKB_CB(skb)->page = skb_shinfo(skb)->frags[0].page;
++ NETFRONT_SKB_CB(skb)->offset = rx->offset;
++
++ len = rx->status;
++ if (len > RX_COPY_THRESHOLD)
++ len = RX_COPY_THRESHOLD;
++ skb_put(skb, len);
++
++ if (rx->status > len) {
++ skb_shinfo(skb)->frags[0].page_offset =
++ rx->offset + len;
++ skb_shinfo(skb)->frags[0].size = rx->status - len;
++ skb->data_len = rx->status - len;
++ } else {
++ skb_shinfo(skb)->frags[0].page = NULL;
++ skb_shinfo(skb)->nr_frags = 0;
++ }
++
++ i = xennet_fill_frags(np, skb, &tmpq);
++
++ /*
++ * Truesize must approximates the size of true data plus
++ * any supervisor overheads. Adding hypervisor overheads
++ * has been shown to significantly reduce achievable
++ * bandwidth with the default receive buffer size. It is
++ * therefore not wise to account for it here.
++ *
++ * After alloc_skb(RX_COPY_THRESHOLD), truesize is set to
++ * RX_COPY_THRESHOLD + the supervisor overheads. Here, we
++ * add the size of the data pulled in xennet_fill_frags().
++ *
++ * We also adjust for any unused space in the main data
++ * area by subtracting (RX_COPY_THRESHOLD - len). This is
++ * especially important with drivers which split incoming
++ * packets into header and data, using only 66 bytes of
++ * the main data area (see the e1000 driver for example.)
++ * On such systems, without this last adjustement, our
++ * achievable receive throughout using the standard receive
++ * buffer size was cut by 25%(!!!).
++ */
++ skb->truesize += skb->data_len - (RX_COPY_THRESHOLD - len);
++ skb->len += skb->data_len;
++
++ /*
++ * Old backends do not assert data_validated but we
++ * can infer it from csum_blank so test both flags.
++ */
++ if (rx->flags & (NETRXF_data_validated|NETRXF_csum_blank))
++ skb->ip_summed = CHECKSUM_UNNECESSARY;
++ else
++ skb->ip_summed = CHECKSUM_NONE;
++#ifdef CONFIG_XEN
++ skb->proto_data_valid = (skb->ip_summed != CHECKSUM_NONE);
++ skb->proto_csum_blank = !!(rx->flags & NETRXF_csum_blank);
++#endif
++ np->stats.rx_packets++;
++ np->stats.rx_bytes += skb->len;
++
++ __skb_queue_tail(&rxq, skb);
++
++ np->rx.rsp_cons = ++i;
++ work_done++;
++ }
++
++ if (pages_flipped) {
++ /* Some pages are no longer absent... */
++ balloon_update_driver_allowance(-pages_flipped);
++
++ /* Do all the remapping work and M2P updates. */
++ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
++ mcl = np->rx_mcl + pages_flipped;
++ mcl->op = __HYPERVISOR_mmu_update;
++ mcl->args[0] = (unsigned long)np->rx_mmu;
++ mcl->args[1] = pages_flipped;
++ mcl->args[2] = 0;
++ mcl->args[3] = DOMID_SELF;
++ (void)HYPERVISOR_multicall(np->rx_mcl,
++ pages_flipped + 1);
++ }
++ }
++
++ while ((skb = __skb_dequeue(&errq)))
++ kfree_skb(skb);
++
++ while ((skb = __skb_dequeue(&rxq)) != NULL) {
++ struct page *page = NETFRONT_SKB_CB(skb)->page;
++ void *vaddr = page_address(page);
++ unsigned offset = NETFRONT_SKB_CB(skb)->offset;
++
++ memcpy(skb->data, vaddr + offset, skb_headlen(skb));
++
++ if (page != skb_shinfo(skb)->frags[0].page)
++ __free_page(page);
++
++ /* Ethernet work: Delayed to here as it peeks the header. */
++ skb->protocol = eth_type_trans(skb, dev);
++
++ /* Pass it up. */
++ netif_receive_skb(skb);
++ dev->last_rx = jiffies;
++ }
++
++ /* If we get a callback with very few responses, reduce fill target. */
++ /* NB. Note exponential increase, linear decrease. */
++ if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) >
++ ((3*np->rx_target) / 4)) &&
++ (--np->rx_target < np->rx_min_target))
++ np->rx_target = np->rx_min_target;
++
++ network_alloc_rx_buffers(dev);
++
++ *pbudget -= work_done;
++ dev->quota -= work_done;
++
++ if (work_done < budget) {
++ local_irq_save(flags);
++
++ RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do);
++ if (!more_to_do)
++ __netif_rx_complete(dev);
++
++ local_irq_restore(flags);
++ }
++
++ spin_unlock(&np->rx_lock);
++
++ return more_to_do;
++}
++
++static void netif_release_tx_bufs(struct netfront_info *np)
++{
++ struct sk_buff *skb;
++ int i;
++
++ for (i = 1; i <= NET_TX_RING_SIZE; i++) {
++ if ((unsigned long)np->tx_skbs[i] < PAGE_OFFSET)
++ continue;
++
++ skb = np->tx_skbs[i];
++ gnttab_end_foreign_access_ref(
++ np->grant_tx_ref[i], GNTMAP_readonly);
++ gnttab_release_grant_reference(
++ &np->gref_tx_head, np->grant_tx_ref[i]);
++ np->grant_tx_ref[i] = GRANT_INVALID_REF;
++ add_id_to_freelist(np->tx_skbs, i);
++ dev_kfree_skb_irq(skb);
++ }
++}
++
++static void netif_release_rx_bufs(struct netfront_info *np)
++{
++ struct mmu_update *mmu = np->rx_mmu;
++ struct multicall_entry *mcl = np->rx_mcl;
++ struct sk_buff_head free_list;
++ struct sk_buff *skb;
++ unsigned long mfn;
++ int xfer = 0, noxfer = 0, unused = 0;
++ int id, ref, rc;
++
++ if (np->copying_receiver) {
++ WPRINTK("%s: fix me for copying receiver.\n", __FUNCTION__);
++ return;
++ }
++
++ skb_queue_head_init(&free_list);
++
++ spin_lock_bh(&np->rx_lock);
++
++ for (id = 0; id < NET_RX_RING_SIZE; id++) {
++ if ((ref = np->grant_rx_ref[id]) == GRANT_INVALID_REF) {
++ unused++;
++ continue;
++ }
++
++ skb = np->rx_skbs[id];
++ mfn = gnttab_end_foreign_transfer_ref(ref);
++ gnttab_release_grant_reference(&np->gref_rx_head, ref);
++ np->grant_rx_ref[id] = GRANT_INVALID_REF;
++ add_id_to_freelist(np->rx_skbs, id);
++
++ if (0 == mfn) {
++ struct page *page = skb_shinfo(skb)->frags[0].page;
++ balloon_release_driver_page(page);
++ skb_shinfo(skb)->nr_frags = 0;
++ dev_kfree_skb(skb);
++ noxfer++;
++ continue;
++ }
++
++ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
++ /* Remap the page. */
++ struct page *page = skb_shinfo(skb)->frags[0].page;
++ unsigned long pfn = page_to_pfn(page);
++ void *vaddr = page_address(page);
++
++ MULTI_update_va_mapping(mcl, (unsigned long)vaddr,
++ pfn_pte_ma(mfn, PAGE_KERNEL),
++ 0);
++ mcl++;
++ mmu->ptr = ((maddr_t)mfn << PAGE_SHIFT)
++ | MMU_MACHPHYS_UPDATE;
++ mmu->val = pfn;
++ mmu++;
++
++ set_phys_to_machine(pfn, mfn);
++ }
++ __skb_queue_tail(&free_list, skb);
++ xfer++;
++ }
++
++ IPRINTK("%s: %d xfer, %d noxfer, %d unused\n",
++ __FUNCTION__, xfer, noxfer, unused);
++
++ if (xfer) {
++ /* Some pages are no longer absent... */
++ balloon_update_driver_allowance(-xfer);
++
++ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
++ /* Do all the remapping work and M2P updates. */
++ mcl->op = __HYPERVISOR_mmu_update;
++ mcl->args[0] = (unsigned long)np->rx_mmu;
++ mcl->args[1] = mmu - np->rx_mmu;
++ mcl->args[2] = 0;
++ mcl->args[3] = DOMID_SELF;
++ mcl++;
++ rc = HYPERVISOR_multicall_check(
++ np->rx_mcl, mcl - np->rx_mcl, NULL);
++ BUG_ON(rc);
++ }
++ }
++
++ while ((skb = __skb_dequeue(&free_list)) != NULL)
++ dev_kfree_skb(skb);
++
++ spin_unlock_bh(&np->rx_lock);
++}
++
++static int network_close(struct net_device *dev)
++{
++ struct netfront_info *np = netdev_priv(dev);
++ netif_stop_queue(np->netdev);
++ return 0;
++}
++
++
++static struct net_device_stats *network_get_stats(struct net_device *dev)
++{
++ struct netfront_info *np = netdev_priv(dev);
++ return &np->stats;
++}
++
++static int xennet_change_mtu(struct net_device *dev, int mtu)
++{
++ int max = xennet_can_sg(dev) ? 65535 - ETH_HLEN : ETH_DATA_LEN;
++
++ if (mtu > max)
++ return -EINVAL;
++ dev->mtu = mtu;
++ return 0;
++}
++
++static int xennet_set_sg(struct net_device *dev, u32 data)
++{
++ if (data) {
++ struct netfront_info *np = netdev_priv(dev);
++ int val;
++
++ if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg",
++ "%d", &val) < 0)
++ val = 0;
++ if (!val)
++ return -ENOSYS;
++ } else if (dev->mtu > ETH_DATA_LEN)
++ dev->mtu = ETH_DATA_LEN;
++
++ return ethtool_op_set_sg(dev, data);
++}
++
++static int xennet_set_tso(struct net_device *dev, u32 data)
++{
++#ifdef HAVE_TSO
++ if (data) {
++ struct netfront_info *np = netdev_priv(dev);
++ int val;
++
++ if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
++ "feature-gso-tcpv4", "%d", &val) < 0)
++ val = 0;
++ if (!val)
++ return -ENOSYS;
++ }
++
++ return ethtool_op_set_tso(dev, data);
++#else
++ return -ENOSYS;
++#endif
++}
++
++static void xennet_set_features(struct net_device *dev)
++{
++ dev_disable_gso_features(dev);
++ xennet_set_sg(dev, 0);
++
++ /* We need checksum offload to enable scatter/gather and TSO. */
++ if (!(dev->features & NETIF_F_IP_CSUM))
++ return;
++
++ if (xennet_set_sg(dev, 1))
++ return;
++
++ /* Before 2.6.9 TSO seems to be unreliable so do not enable it
++ * on older kernels.
++ */
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9)
++ xennet_set_tso(dev, 1);
++#endif
++
++}
++
++static int network_connect(struct net_device *dev)
++{
++ struct netfront_info *np = netdev_priv(dev);
++ int i, requeue_idx, err;
++ struct sk_buff *skb;
++ grant_ref_t ref;
++ netif_rx_request_t *req;
++ unsigned int feature_rx_copy, feature_rx_flip;
++
++ err = xenbus_scanf(XBT_NIL, np->xbdev->otherend,
++ "feature-rx-copy", "%u", &feature_rx_copy);
++ if (err != 1)
++ feature_rx_copy = 0;
++ err = xenbus_scanf(XBT_NIL, np->xbdev->otherend,
++ "feature-rx-flip", "%u", &feature_rx_flip);
++ if (err != 1)
++ feature_rx_flip = 1;
++
++ /*
++ * Copy packets on receive path if:
++ * (a) This was requested by user, and the backend supports it; or
++ * (b) Flipping was requested, but this is unsupported by the backend.
++ */
++ np->copying_receiver = ((MODPARM_rx_copy && feature_rx_copy) ||
++ (MODPARM_rx_flip && !feature_rx_flip));
++
++ err = talk_to_backend(np->xbdev, np);
++ if (err)
++ return err;
++
++ xennet_set_features(dev);
++
++ IPRINTK("device %s has %sing receive path.\n",
++ dev->name, np->copying_receiver ? "copy" : "flipp");
++
++ spin_lock_bh(&np->rx_lock);
++ spin_lock_irq(&np->tx_lock);
++
++ /*
++ * Recovery procedure:
++ * NB. Freelist index entries are always going to be less than
++ * PAGE_OFFSET, whereas pointers to skbs will always be equal or
++ * greater than PAGE_OFFSET: we use this property to distinguish
++ * them.
++ */
++
++ /* Step 1: Discard all pending TX packet fragments. */
++ netif_release_tx_bufs(np);
++
++ /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */
++ for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) {
++ if (!np->rx_skbs[i])
++ continue;
++
++ skb = np->rx_skbs[requeue_idx] = xennet_get_rx_skb(np, i);
++ ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i);
++ req = RING_GET_REQUEST(&np->rx, requeue_idx);
++
++ if (!np->copying_receiver) {
++ gnttab_grant_foreign_transfer_ref(
++ ref, np->xbdev->otherend_id,
++ page_to_pfn(skb_shinfo(skb)->frags->page));
++ } else {
++ gnttab_grant_foreign_access_ref(
++ ref, np->xbdev->otherend_id,
++ pfn_to_mfn(page_to_pfn(skb_shinfo(skb)->
++ frags->page)),
++ 0);
++ }
++ req->gref = ref;
++ req->id = requeue_idx;
++
++ requeue_idx++;
++ }
++
++ np->rx.req_prod_pvt = requeue_idx;
++
++ /*
++ * Step 3: All public and private state should now be sane. Get
++ * ready to start sending and receiving packets and give the driver
++ * domain a kick because we've probably just requeued some
++ * packets.
++ */
++ netfront_carrier_on(np);
++ notify_remote_via_irq(np->irq);
++ network_tx_buf_gc(dev);
++ network_alloc_rx_buffers(dev);
++
++ spin_unlock_irq(&np->tx_lock);
++ spin_unlock_bh(&np->rx_lock);
++
++ return 0;
++}
++
++static void netif_uninit(struct net_device *dev)
++{
++ struct netfront_info *np = netdev_priv(dev);
++ netif_release_tx_bufs(np);
++ netif_release_rx_bufs(np);
++ gnttab_free_grant_references(np->gref_tx_head);
++ gnttab_free_grant_references(np->gref_rx_head);
++}
++
++static struct ethtool_ops network_ethtool_ops =
++{
++ .get_tx_csum = ethtool_op_get_tx_csum,
++ .set_tx_csum = ethtool_op_set_tx_csum,
++ .get_sg = ethtool_op_get_sg,
++ .set_sg = xennet_set_sg,
++ .get_tso = ethtool_op_get_tso,
++ .set_tso = xennet_set_tso,
++ .get_link = ethtool_op_get_link,
++};
++
++#ifdef CONFIG_SYSFS
++static ssize_t show_rxbuf_min(struct class_device *cd, char *buf)
++{
++ struct net_device *netdev = container_of(cd, struct net_device,
++ class_dev);
++ struct netfront_info *info = netdev_priv(netdev);
++
++ return sprintf(buf, "%u\n", info->rx_min_target);
++}
++
++static ssize_t store_rxbuf_min(struct class_device *cd,
++ const char *buf, size_t len)
++{
++ struct net_device *netdev = container_of(cd, struct net_device,
++ class_dev);
++ struct netfront_info *np = netdev_priv(netdev);
++ char *endp;
++ unsigned long target;
++
++ if (!capable(CAP_NET_ADMIN))
++ return -EPERM;
++
++ target = simple_strtoul(buf, &endp, 0);
++ if (endp == buf)
++ return -EBADMSG;
++
++ if (target < RX_MIN_TARGET)
++ target = RX_MIN_TARGET;
++ if (target > RX_MAX_TARGET)
++ target = RX_MAX_TARGET;
++
++ spin_lock_bh(&np->rx_lock);
++ if (target > np->rx_max_target)
++ np->rx_max_target = target;
++ np->rx_min_target = target;
++ if (target > np->rx_target)
++ np->rx_target = target;
++
++ network_alloc_rx_buffers(netdev);
++
++ spin_unlock_bh(&np->rx_lock);
++ return len;
++}
++
++static ssize_t show_rxbuf_max(struct class_device *cd, char *buf)
++{
++ struct net_device *netdev = container_of(cd, struct net_device,
++ class_dev);
++ struct netfront_info *info = netdev_priv(netdev);
++
++ return sprintf(buf, "%u\n", info->rx_max_target);
++}
++
++static ssize_t store_rxbuf_max(struct class_device *cd,
++ const char *buf, size_t len)
++{
++ struct net_device *netdev = container_of(cd, struct net_device,
++ class_dev);
++ struct netfront_info *np = netdev_priv(netdev);
++ char *endp;
++ unsigned long target;
++
++ if (!capable(CAP_NET_ADMIN))
++ return -EPERM;
++
++ target = simple_strtoul(buf, &endp, 0);
++ if (endp == buf)
++ return -EBADMSG;
++
++ if (target < RX_MIN_TARGET)
++ target = RX_MIN_TARGET;
++ if (target > RX_MAX_TARGET)
++ target = RX_MAX_TARGET;
++
++ spin_lock_bh(&np->rx_lock);
++ if (target < np->rx_min_target)
++ np->rx_min_target = target;
++ np->rx_max_target = target;
++ if (target < np->rx_target)
++ np->rx_target = target;
++
++ network_alloc_rx_buffers(netdev);
++
++ spin_unlock_bh(&np->rx_lock);
++ return len;
++}
++
++static ssize_t show_rxbuf_cur(struct class_device *cd, char *buf)
++{
++ struct net_device *netdev = container_of(cd, struct net_device,
++ class_dev);
++ struct netfront_info *info = netdev_priv(netdev);
++
++ return sprintf(buf, "%u\n", info->rx_target);
++}
++
++static const struct class_device_attribute xennet_attrs[] = {
++ __ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf_min, store_rxbuf_min),
++ __ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf_max, store_rxbuf_max),
++ __ATTR(rxbuf_cur, S_IRUGO, show_rxbuf_cur, NULL),
++};
++
++static int xennet_sysfs_addif(struct net_device *netdev)
++{
++ int i;
++ int error = 0;
++
++ for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) {
++ error = class_device_create_file(&netdev->class_dev,
++ &xennet_attrs[i]);
++ if (error)
++ goto fail;
++ }
++ return 0;
++
++ fail:
++ while (--i >= 0)
++ class_device_remove_file(&netdev->class_dev,
++ &xennet_attrs[i]);
++ return error;
++}
++
++static void xennet_sysfs_delif(struct net_device *netdev)
++{
++ int i;
++
++ for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) {
++ class_device_remove_file(&netdev->class_dev,
++ &xennet_attrs[i]);
++ }
++}
++
++#endif /* CONFIG_SYSFS */
++
++
++/*
++ * Nothing to do here. Virtual interface is point-to-point and the
++ * physical interface is probably promiscuous anyway.
++ */
++static void network_set_multicast_list(struct net_device *dev)
++{
++}
++
++static struct net_device * __devinit create_netdev(struct xenbus_device *dev)
++{
++ int i, err = 0;
++ struct net_device *netdev = NULL;
++ struct netfront_info *np = NULL;
++
++ netdev = alloc_etherdev(sizeof(struct netfront_info));
++ if (!netdev) {
++ printk(KERN_WARNING "%s> alloc_etherdev failed.\n",
++ __FUNCTION__);
++ return ERR_PTR(-ENOMEM);
++ }
++
++ np = netdev_priv(netdev);
++ np->xbdev = dev;
++
++ spin_lock_init(&np->tx_lock);
++ spin_lock_init(&np->rx_lock);
++
++ skb_queue_head_init(&np->rx_batch);
++ np->rx_target = RX_DFL_MIN_TARGET;
++ np->rx_min_target = RX_DFL_MIN_TARGET;
++ np->rx_max_target = RX_MAX_TARGET;
++
++ init_timer(&np->rx_refill_timer);
++ np->rx_refill_timer.data = (unsigned long)netdev;
++ np->rx_refill_timer.function = rx_refill_timeout;
++
++ /* Initialise {tx,rx}_skbs as a free chain containing every entry. */
++ for (i = 0; i <= NET_TX_RING_SIZE; i++) {
++ np->tx_skbs[i] = (void *)((unsigned long) i+1);
++ np->grant_tx_ref[i] = GRANT_INVALID_REF;
++ }
++
++ for (i = 0; i < NET_RX_RING_SIZE; i++) {
++ np->rx_skbs[i] = NULL;
++ np->grant_rx_ref[i] = GRANT_INVALID_REF;
++ }
++
++ /* A grant for every tx ring slot */
++ if (gnttab_alloc_grant_references(TX_MAX_TARGET,
++ &np->gref_tx_head) < 0) {
++ printk(KERN_ALERT "#### netfront can't alloc tx grant refs\n");
++ err = -ENOMEM;
++ goto exit;
++ }
++ /* A grant for every rx ring slot */
++ if (gnttab_alloc_grant_references(RX_MAX_TARGET,
++ &np->gref_rx_head) < 0) {
++ printk(KERN_ALERT "#### netfront can't alloc rx grant refs\n");
++ err = -ENOMEM;
++ goto exit_free_tx;
++ }
++
++ netdev->open = network_open;
++ netdev->hard_start_xmit = network_start_xmit;
++ netdev->stop = network_close;
++ netdev->get_stats = network_get_stats;
++ netdev->poll = netif_poll;
++ netdev->set_multicast_list = network_set_multicast_list;
++ netdev->uninit = netif_uninit;
++ netdev->change_mtu = xennet_change_mtu;
++ netdev->weight = 64;
++ netdev->features = NETIF_F_IP_CSUM;
++
++ SET_ETHTOOL_OPS(netdev, &network_ethtool_ops);
++ SET_MODULE_OWNER(netdev);
++ SET_NETDEV_DEV(netdev, &dev->dev);
++
++ np->netdev = netdev;
++
++ netfront_carrier_off(np);
++
++ return netdev;
++
++ exit_free_tx:
++ gnttab_free_grant_references(np->gref_tx_head);
++ exit:
++ free_netdev(netdev);
++ return ERR_PTR(err);
++}
++
++/*
++ * We use this notifier to send out a fake ARP reply to reset switches and
++ * router ARP caches when an IP interface is brought up on a VIF.
++ */
++static int
++inetdev_notify(struct notifier_block *this, unsigned long event, void *ptr)
++{
++ struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
++ struct net_device *dev = ifa->ifa_dev->dev;
++
++ /* UP event and is it one of our devices? */
++ if (event == NETDEV_UP && dev->open == network_open)
++ (void)send_fake_arp(dev);
++
++ return NOTIFY_DONE;
++}
++
++
++static void netif_disconnect_backend(struct netfront_info *info)
++{
++ /* Stop old i/f to prevent errors whilst we rebuild the state. */
++ spin_lock_bh(&info->rx_lock);
++ spin_lock_irq(&info->tx_lock);
++ netfront_carrier_off(info);
++ spin_unlock_irq(&info->tx_lock);
++ spin_unlock_bh(&info->rx_lock);
++
++ if (info->irq)
++ unbind_from_irqhandler(info->irq, info->netdev);
++ info->irq = 0;
++
++ end_access(info->tx_ring_ref, info->tx.sring);
++ end_access(info->rx_ring_ref, info->rx.sring);
++ info->tx_ring_ref = GRANT_INVALID_REF;
++ info->rx_ring_ref = GRANT_INVALID_REF;
++ info->tx.sring = NULL;
++ info->rx.sring = NULL;
++}
++
++
++static void end_access(int ref, void *page)
++{
++ if (ref != GRANT_INVALID_REF)
++ gnttab_end_foreign_access(ref, 0, (unsigned long)page);
++}
++
++
++/* ** Driver registration ** */
++
++
++static struct xenbus_device_id netfront_ids[] = {
++ { "vif" },
++ { "" }
++};
++
++
++static struct xenbus_driver netfront = {
++ .name = "vif",
++ .owner = THIS_MODULE,
++ .ids = netfront_ids,
++ .probe = netfront_probe,
++ .remove = __devexit_p(netfront_remove),
++ .resume = netfront_resume,
++ .otherend_changed = backend_changed,
++};
++
++
++static struct notifier_block notifier_inetdev = {
++ .notifier_call = inetdev_notify,
++ .next = NULL,
++ .priority = 0
++};
++
++static int __init netif_init(void)
++{
++ if (!is_running_on_xen())
++ return -ENODEV;
++
++#ifdef CONFIG_XEN
++ if (MODPARM_rx_flip && MODPARM_rx_copy) {
++ WPRINTK("Cannot specify both rx_copy and rx_flip.\n");
++ return -EINVAL;
++ }
++
++ if (!MODPARM_rx_flip && !MODPARM_rx_copy)
++ MODPARM_rx_flip = 1; /* Default is to flip. */
++#endif
++
++ if (is_initial_xendomain())
++ return 0;
++
++ IPRINTK("Initialising virtual ethernet driver.\n");
++
++ (void)register_inetaddr_notifier(&notifier_inetdev);
++
++ return xenbus_register_frontend(&netfront);
++}
++module_init(netif_init);
++
++
++static void __exit netif_exit(void)
++{
++ if (is_initial_xendomain())
++ return;
++
++ unregister_inetaddr_notifier(&notifier_inetdev);
++
++ return xenbus_unregister_driver(&netfront);
++}
++module_exit(netif_exit);
++
++MODULE_LICENSE("Dual BSD/GPL");
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/pciback/Makefile 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,15 @@
++obj-$(CONFIG_XEN_PCIDEV_BACKEND) += pciback.o
++
++pciback-y := pci_stub.o pciback_ops.o xenbus.o
++pciback-y += conf_space.o conf_space_header.o \
++ conf_space_capability.o \
++ conf_space_capability_vpd.o \
++ conf_space_capability_pm.o \
++ conf_space_quirks.o
++pciback-$(CONFIG_XEN_PCIDEV_BACKEND_VPCI) += vpci.o
++pciback-$(CONFIG_XEN_PCIDEV_BACKEND_SLOT) += slot.o
++pciback-$(CONFIG_XEN_PCIDEV_BACKEND_PASS) += passthrough.o
++
++ifeq ($(CONFIG_XEN_PCIDEV_BE_DEBUG),y)
++EXTRA_CFLAGS += -DDEBUG
++endif
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/pciback/conf_space.c 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,426 @@
++/*
++ * PCI Backend - Functions for creating a virtual configuration space for
++ * exported PCI Devices.
++ * It's dangerous to allow PCI Driver Domains to change their
++ * device's resources (memory, i/o ports, interrupts). We need to
++ * restrict changes to certain PCI Configuration registers:
++ * BARs, INTERRUPT_PIN, most registers in the header...
++ *
++ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
++ */
++
++#include <linux/kernel.h>
++#include <linux/pci.h>
++#include "pciback.h"
++#include "conf_space.h"
++#include "conf_space_quirks.h"
++
++#define DEFINE_PCI_CONFIG(op,size,type) \
++int pciback_##op##_config_##size \
++(struct pci_dev *dev, int offset, type value, void *data) \
++{ \
++ return pci_##op##_config_##size (dev, offset, value); \
++}
++
++DEFINE_PCI_CONFIG(read, byte, u8 *)
++DEFINE_PCI_CONFIG(read, word, u16 *)
++DEFINE_PCI_CONFIG(read, dword, u32 *)
++
++DEFINE_PCI_CONFIG(write, byte, u8)
++DEFINE_PCI_CONFIG(write, word, u16)
++DEFINE_PCI_CONFIG(write, dword, u32)
++
++static int conf_space_read(struct pci_dev *dev,
++ struct config_field_entry *entry, int offset,
++ u32 * value)
++{
++ int ret = 0;
++ struct config_field *field = entry->field;
++
++ *value = 0;
++
++ switch (field->size) {
++ case 1:
++ if (field->u.b.read)
++ ret = field->u.b.read(dev, offset, (u8 *) value,
++ entry->data);
++ break;
++ case 2:
++ if (field->u.w.read)
++ ret = field->u.w.read(dev, offset, (u16 *) value,
++ entry->data);
++ break;
++ case 4:
++ if (field->u.dw.read)
++ ret = field->u.dw.read(dev, offset, value, entry->data);
++ break;
++ }
++ return ret;
++}
++
++static int conf_space_write(struct pci_dev *dev,
++ struct config_field_entry *entry, int offset,
++ u32 value)
++{
++ int ret = 0;
++ struct config_field *field = entry->field;
++
++ switch (field->size) {
++ case 1:
++ if (field->u.b.write)
++ ret = field->u.b.write(dev, offset, (u8) value,
++ entry->data);
++ break;
++ case 2:
++ if (field->u.w.write)
++ ret = field->u.w.write(dev, offset, (u16) value,
++ entry->data);
++ break;
++ case 4:
++ if (field->u.dw.write)
++ ret = field->u.dw.write(dev, offset, value,
++ entry->data);
++ break;
++ }
++ return ret;
++}
++
++static inline u32 get_mask(int size)
++{
++ if (size == 1)
++ return 0xff;
++ else if (size == 2)
++ return 0xffff;
++ else
++ return 0xffffffff;
++}
++
++static inline int valid_request(int offset, int size)
++{
++ /* Validate request (no un-aligned requests) */
++ if ((size == 1 || size == 2 || size == 4) && (offset % size) == 0)
++ return 1;
++ return 0;
++}
++
++static inline u32 merge_value(u32 val, u32 new_val, u32 new_val_mask,
++ int offset)
++{
++ if (offset >= 0) {
++ new_val_mask <<= (offset * 8);
++ new_val <<= (offset * 8);
++ } else {
++ new_val_mask >>= (offset * -8);
++ new_val >>= (offset * -8);
++ }
++ val = (val & ~new_val_mask) | (new_val & new_val_mask);
++
++ return val;
++}
++
++static int pcibios_err_to_errno(int err)
++{
++ switch (err) {
++ case PCIBIOS_SUCCESSFUL:
++ return XEN_PCI_ERR_success;
++ case PCIBIOS_DEVICE_NOT_FOUND:
++ return XEN_PCI_ERR_dev_not_found;
++ case PCIBIOS_BAD_REGISTER_NUMBER:
++ return XEN_PCI_ERR_invalid_offset;
++ case PCIBIOS_FUNC_NOT_SUPPORTED:
++ return XEN_PCI_ERR_not_implemented;
++ case PCIBIOS_SET_FAILED:
++ return XEN_PCI_ERR_access_denied;
++ }
++ return err;
++}
++
++int pciback_config_read(struct pci_dev *dev, int offset, int size,
++ u32 * ret_val)
++{
++ int err = 0;
++ struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
++ struct config_field_entry *cfg_entry;
++ struct config_field *field;
++ int req_start, req_end, field_start, field_end;
++ /* if read fails for any reason, return 0 (as if device didn't respond) */
++ u32 value = 0, tmp_val;
++
++ if (unlikely(verbose_request))
++ printk(KERN_DEBUG "pciback: %s: read %d bytes at 0x%x\n",
++ pci_name(dev), size, offset);
++
++ if (!valid_request(offset, size)) {
++ err = XEN_PCI_ERR_invalid_offset;
++ goto out;
++ }
++
++ /* Get the real value first, then modify as appropriate */
++ switch (size) {
++ case 1:
++ err = pci_read_config_byte(dev, offset, (u8 *) & value);
++ break;
++ case 2:
++ err = pci_read_config_word(dev, offset, (u16 *) & value);
++ break;
++ case 4:
++ err = pci_read_config_dword(dev, offset, &value);
++ break;
++ }
++
++ list_for_each_entry(cfg_entry, &dev_data->config_fields, list) {
++ field = cfg_entry->field;
++
++ req_start = offset;
++ req_end = offset + size;
++ field_start = OFFSET(cfg_entry);
++ field_end = OFFSET(cfg_entry) + field->size;
++
++ if ((req_start >= field_start && req_start < field_end)
++ || (req_end > field_start && req_end <= field_end)) {
++ err = conf_space_read(dev, cfg_entry, field_start,
++ &tmp_val);
++ if (err)
++ goto out;
++
++ value = merge_value(value, tmp_val,
++ get_mask(field->size),
++ field_start - req_start);
++ }
++ }
++
++ out:
++ if (unlikely(verbose_request))
++ printk(KERN_DEBUG "pciback: %s: read %d bytes at 0x%x = %x\n",
++ pci_name(dev), size, offset, value);
++
++ *ret_val = value;
++ return pcibios_err_to_errno(err);
++}
++
++int pciback_config_write(struct pci_dev *dev, int offset, int size, u32 value)
++{
++ int err = 0, handled = 0;
++ struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
++ struct config_field_entry *cfg_entry;
++ struct config_field *field;
++ u32 tmp_val;
++ int req_start, req_end, field_start, field_end;
++
++ if (unlikely(verbose_request))
++ printk(KERN_DEBUG
++ "pciback: %s: write request %d bytes at 0x%x = %x\n",
++ pci_name(dev), size, offset, value);
++
++ if (!valid_request(offset, size))
++ return XEN_PCI_ERR_invalid_offset;
++
++ list_for_each_entry(cfg_entry, &dev_data->config_fields, list) {
++ field = cfg_entry->field;
++
++ req_start = offset;
++ req_end = offset + size;
++ field_start = OFFSET(cfg_entry);
++ field_end = OFFSET(cfg_entry) + field->size;
++
++ if ((req_start >= field_start && req_start < field_end)
++ || (req_end > field_start && req_end <= field_end)) {
++ tmp_val = 0;
++
++ err = pciback_config_read(dev, field_start,
++ field->size, &tmp_val);
++ if (err)
++ break;
++
++ tmp_val = merge_value(tmp_val, value, get_mask(size),
++ req_start - field_start);
++
++ err = conf_space_write(dev, cfg_entry, field_start,
++ tmp_val);
++
++ /* handled is set true here, but not every byte
++ * may have been written! Properly detecting if
++ * every byte is handled is unnecessary as the
++ * flag is used to detect devices that need
++ * special helpers to work correctly.
++ */
++ handled = 1;
++ }
++ }
++
++ if (!handled && !err) {
++ /* By default, anything not specificially handled above is
++ * read-only. The permissive flag changes this behavior so
++ * that anything not specifically handled above is writable.
++ * This means that some fields may still be read-only because
++ * they have entries in the config_field list that intercept
++ * the write and do nothing. */
++ if (dev_data->permissive) {
++ switch (size) {
++ case 1:
++ err = pci_write_config_byte(dev, offset,
++ (u8) value);
++ break;
++ case 2:
++ err = pci_write_config_word(dev, offset,
++ (u16) value);
++ break;
++ case 4:
++ err = pci_write_config_dword(dev, offset,
++ (u32) value);
++ break;
++ }
++ } else if (!dev_data->warned_on_write) {
++ dev_data->warned_on_write = 1;
++ dev_warn(&dev->dev, "Driver tried to write to a "
++ "read-only configuration space field at offset "
++ "0x%x, size %d. This may be harmless, but if "
++ "you have problems with your device:\n"
++ "1) see permissive attribute in sysfs\n"
++ "2) report problems to the xen-devel "
++ "mailing list along with details of your "
++ "device obtained from lspci.\n", offset, size);
++ }
++ }
++
++ return pcibios_err_to_errno(err);
++}
++
++void pciback_config_free_dyn_fields(struct pci_dev *dev)
++{
++ struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
++ struct config_field_entry *cfg_entry, *t;
++ struct config_field *field;
++
++ dev_dbg(&dev->dev,
++ "free-ing dynamically allocated virtual configuration space fields\n");
++
++ list_for_each_entry_safe(cfg_entry, t, &dev_data->config_fields, list) {
++ field = cfg_entry->field;
++
++ if (field->clean) {
++ field->clean(field);
++
++ if (cfg_entry->data)
++ kfree(cfg_entry->data);
++
++ list_del(&cfg_entry->list);
++ kfree(cfg_entry);
++ }
++
++ }
++}
++
++void pciback_config_reset_dev(struct pci_dev *dev)
++{
++ struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
++ struct config_field_entry *cfg_entry;
++ struct config_field *field;
++
++ dev_dbg(&dev->dev, "resetting virtual configuration space\n");
++
++ list_for_each_entry(cfg_entry, &dev_data->config_fields, list) {
++ field = cfg_entry->field;
++
++ if (field->reset)
++ field->reset(dev, OFFSET(cfg_entry), cfg_entry->data);
++ }
++}
++
++void pciback_config_free_dev(struct pci_dev *dev)
++{
++ struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
++ struct config_field_entry *cfg_entry, *t;
++ struct config_field *field;
++
++ dev_dbg(&dev->dev, "free-ing virtual configuration space fields\n");
++
++ list_for_each_entry_safe(cfg_entry, t, &dev_data->config_fields, list) {
++ list_del(&cfg_entry->list);
++
++ field = cfg_entry->field;
++
++ if (field->release)
++ field->release(dev, OFFSET(cfg_entry), cfg_entry->data);
++
++ kfree(cfg_entry);
++ }
++}
++
++int pciback_config_add_field_offset(struct pci_dev *dev,
++ struct config_field *field,
++ unsigned int base_offset)
++{
++ int err = 0;
++ struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
++ struct config_field_entry *cfg_entry;
++ void *tmp;
++
++ cfg_entry = kmalloc(sizeof(*cfg_entry), GFP_KERNEL);
++ if (!cfg_entry) {
++ err = -ENOMEM;
++ goto out;
++ }
++
++ cfg_entry->data = NULL;
++ cfg_entry->field = field;
++ cfg_entry->base_offset = base_offset;
++
++ /* silently ignore duplicate fields */
++ err = pciback_field_is_dup(dev,OFFSET(cfg_entry));
++ if (err)
++ goto out;
++
++ if (field->init) {
++ tmp = field->init(dev, OFFSET(cfg_entry));
++
++ if (IS_ERR(tmp)) {
++ err = PTR_ERR(tmp);
++ goto out;
++ }
++
++ cfg_entry->data = tmp;
++ }
++
++ dev_dbg(&dev->dev, "added config field at offset 0x%02x\n",
++ OFFSET(cfg_entry));
++ list_add_tail(&cfg_entry->list, &dev_data->config_fields);
++
++ out:
++ if (err)
++ kfree(cfg_entry);
++
++ return err;
++}
++
++/* This sets up the device's virtual configuration space to keep track of
++ * certain registers (like the base address registers (BARs) so that we can
++ * keep the client from manipulating them directly.
++ */
++int pciback_config_init_dev(struct pci_dev *dev)
++{
++ int err = 0;
++ struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
++
++ dev_dbg(&dev->dev, "initializing virtual configuration space\n");
++
++ INIT_LIST_HEAD(&dev_data->config_fields);
++
++ err = pciback_config_header_add_fields(dev);
++ if (err)
++ goto out;
++
++ err = pciback_config_capability_add_fields(dev);
++ if (err)
++ goto out;
++
++ err = pciback_config_quirks_init(dev);
++
++ out:
++ return err;
++}
++
++int pciback_config_init(void)
++{
++ return pciback_config_capability_init();
++}
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/pciback/conf_space.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,126 @@
++/*
++ * PCI Backend - Common data structures for overriding the configuration space
++ *
++ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
++ */
++
++#ifndef __XEN_PCIBACK_CONF_SPACE_H__
++#define __XEN_PCIBACK_CONF_SPACE_H__
++
++#include <linux/list.h>
++#include <linux/err.h>
++
++/* conf_field_init can return an errno in a ptr with ERR_PTR() */
++typedef void *(*conf_field_init) (struct pci_dev * dev, int offset);
++typedef void (*conf_field_reset) (struct pci_dev * dev, int offset, void *data);
++typedef void (*conf_field_free) (struct pci_dev * dev, int offset, void *data);
++
++typedef int (*conf_dword_write) (struct pci_dev * dev, int offset, u32 value,
++ void *data);
++typedef int (*conf_word_write) (struct pci_dev * dev, int offset, u16 value,
++ void *data);
++typedef int (*conf_byte_write) (struct pci_dev * dev, int offset, u8 value,
++ void *data);
++typedef int (*conf_dword_read) (struct pci_dev * dev, int offset, u32 * value,
++ void *data);
++typedef int (*conf_word_read) (struct pci_dev * dev, int offset, u16 * value,
++ void *data);
++typedef int (*conf_byte_read) (struct pci_dev * dev, int offset, u8 * value,
++ void *data);
++
++/* These are the fields within the configuration space which we
++ * are interested in intercepting reads/writes to and changing their
++ * values.
++ */
++struct config_field {
++ unsigned int offset;
++ unsigned int size;
++ unsigned int mask;
++ conf_field_init init;
++ conf_field_reset reset;
++ conf_field_free release;
++ void (*clean) (struct config_field * field);
++ union {
++ struct {
++ conf_dword_write write;
++ conf_dword_read read;
++ } dw;
++ struct {
++ conf_word_write write;
++ conf_word_read read;
++ } w;
++ struct {
++ conf_byte_write write;
++ conf_byte_read read;
++ } b;
++ } u;
++ struct list_head list;
++};
++
++struct config_field_entry {
++ struct list_head list;
++ struct config_field *field;
++ unsigned int base_offset;
++ void *data;
++};
++
++#define OFFSET(cfg_entry) ((cfg_entry)->base_offset+(cfg_entry)->field->offset)
++
++/* Add fields to a device - the add_fields macro expects to get a pointer to
++ * the first entry in an array (of which the ending is marked by size==0)
++ */
++int pciback_config_add_field_offset(struct pci_dev *dev,
++ struct config_field *field,
++ unsigned int offset);
++
++static inline int pciback_config_add_field(struct pci_dev *dev,
++ struct config_field *field)
++{
++ return pciback_config_add_field_offset(dev, field, 0);
++}
++
++static inline int pciback_config_add_fields(struct pci_dev *dev,
++ struct config_field *field)
++{
++ int i, err = 0;
++ for (i = 0; field[i].size != 0; i++) {
++ err = pciback_config_add_field(dev, &field[i]);
++ if (err)
++ break;
++ }
++ return err;
++}
++
++static inline int pciback_config_add_fields_offset(struct pci_dev *dev,
++ struct config_field *field,
++ unsigned int offset)
++{
++ int i, err = 0;
++ for (i = 0; field[i].size != 0; i++) {
++ err = pciback_config_add_field_offset(dev, &field[i], offset);
++ if (err)
++ break;
++ }
++ return err;
++}
++
++/* Read/Write the real configuration space */
++int pciback_read_config_byte(struct pci_dev *dev, int offset, u8 * value,
++ void *data);
++int pciback_read_config_word(struct pci_dev *dev, int offset, u16 * value,
++ void *data);
++int pciback_read_config_dword(struct pci_dev *dev, int offset, u32 * value,
++ void *data);
++int pciback_write_config_byte(struct pci_dev *dev, int offset, u8 value,
++ void *data);
++int pciback_write_config_word(struct pci_dev *dev, int offset, u16 value,
++ void *data);
++int pciback_write_config_dword(struct pci_dev *dev, int offset, u32 value,
++ void *data);
++
++int pciback_config_capability_init(void);
++
++int pciback_config_header_add_fields(struct pci_dev *dev);
++int pciback_config_capability_add_fields(struct pci_dev *dev);
++
++#endif /* __XEN_PCIBACK_CONF_SPACE_H__ */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/pciback/conf_space_capability.c 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,71 @@
++/*
++ * PCI Backend - Handles the virtual fields found on the capability lists
++ * in the configuration space.
++ *
++ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
++ */
++
++#include <linux/kernel.h>
++#include <linux/pci.h>
++#include "pciback.h"
++#include "conf_space.h"
++#include "conf_space_capability.h"
++
++static LIST_HEAD(capabilities);
++
++static struct config_field caplist_header[] = {
++ {
++ .offset = PCI_CAP_LIST_ID,
++ .size = 2, /* encompass PCI_CAP_LIST_ID & PCI_CAP_LIST_NEXT */
++ .u.w.read = pciback_read_config_word,
++ .u.w.write = NULL,
++ },
++ {
++ .size = 0,
++ },
++};
++
++static inline void register_capability(struct pciback_config_capability *cap)
++{
++ list_add_tail(&cap->cap_list, &capabilities);
++}
++
++int pciback_config_capability_add_fields(struct pci_dev *dev)
++{
++ int err = 0;
++ struct pciback_config_capability *cap;
++ int cap_offset;
++
++ list_for_each_entry(cap, &capabilities, cap_list) {
++ cap_offset = pci_find_capability(dev, cap->capability);
++ if (cap_offset) {
++ dev_dbg(&dev->dev, "Found capability 0x%x at 0x%x\n",
++ cap->capability, cap_offset);
++
++ err = pciback_config_add_fields_offset(dev,
++ caplist_header,
++ cap_offset);
++ if (err)
++ goto out;
++ err = pciback_config_add_fields_offset(dev,
++ cap->fields,
++ cap_offset);
++ if (err)
++ goto out;
++ }
++ }
++
++ out:
++ return err;
++}
++
++extern struct pciback_config_capability pciback_config_capability_vpd;
++extern struct pciback_config_capability pciback_config_capability_pm;
++
++int pciback_config_capability_init(void)
++{
++ register_capability(&pciback_config_capability_vpd);
++ register_capability(&pciback_config_capability_pm);
++
++ return 0;
++}
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/pciback/conf_space_capability.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,23 @@
++/*
++ * PCI Backend - Data structures for special overlays for structures on
++ * the capability list.
++ *
++ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
++ */
++
++#ifndef __PCIBACK_CONFIG_CAPABILITY_H__
++#define __PCIBACK_CONFIG_CAPABILITY_H__
++
++#include <linux/pci.h>
++#include <linux/list.h>
++
++struct pciback_config_capability {
++ struct list_head cap_list;
++
++ int capability;
++
++ /* If the device has the capability found above, add these fields */
++ struct config_field *fields;
++};
++
++#endif
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/pciback/conf_space_capability_pm.c 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,128 @@
++/*
++ * PCI Backend - Configuration space overlay for power management
++ *
++ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
++ */
++
++#include <linux/pci.h>
++#include "conf_space.h"
++#include "conf_space_capability.h"
++
++static int pm_caps_read(struct pci_dev *dev, int offset, u16 *value,
++ void *data)
++{
++ int err;
++ u16 real_value;
++
++ err = pci_read_config_word(dev, offset, &real_value);
++ if (err)
++ goto out;
++
++ *value = real_value & ~PCI_PM_CAP_PME_MASK;
++
++ out:
++ return err;
++}
++
++/* PM_OK_BITS specifies the bits that the driver domain is allowed to change.
++ * Can't allow driver domain to enable PMEs - they're shared */
++#define PM_OK_BITS (PCI_PM_CTRL_PME_STATUS|PCI_PM_CTRL_DATA_SEL_MASK)
++
++static int pm_ctrl_write(struct pci_dev *dev, int offset, u16 new_value,
++ void *data)
++{
++ int err;
++ u16 old_value;
++ pci_power_t new_state, old_state;
++
++ err = pci_read_config_word(dev, offset, &old_value);
++ if (err)
++ goto out;
++
++ old_state = (pci_power_t)(old_value & PCI_PM_CTRL_STATE_MASK);
++ new_state = (pci_power_t)(new_value & PCI_PM_CTRL_STATE_MASK);
++
++ new_value &= PM_OK_BITS;
++ if ((old_value & PM_OK_BITS) != new_value) {
++ new_value = (old_value & ~PM_OK_BITS) | new_value;
++ err = pci_write_config_word(dev, offset, new_value);
++ if (err)
++ goto out;
++ }
++
++ /* Let pci core handle the power management change */
++ dev_dbg(&dev->dev, "set power state to %x\n", new_state);
++ err = pci_set_power_state(dev, new_state);
++ if (err) {
++ err = PCIBIOS_SET_FAILED;
++ goto out;
++ }
++
++ /*
++ * Device may lose PCI config info on D3->D0 transition. This
++ * is a problem for some guests which will not reset BARs. Even
++ * those that have a go will be foiled by our BAR-write handler
++ * which will discard the write! Since Linux won't re-init
++ * the config space automatically in all cases, we do it here.
++ * Future: Should we re-initialise all first 64 bytes of config space?
++ */
++ if (new_state == PCI_D0 &&
++ (old_state == PCI_D3hot || old_state == PCI_D3cold) &&
++ !(old_value & PCI_PM_CTRL_NO_SOFT_RESET))
++ pci_restore_bars(dev);
++
++ out:
++ return err;
++}
++
++/* Ensure PMEs are disabled */
++static void *pm_ctrl_init(struct pci_dev *dev, int offset)
++{
++ int err;
++ u16 value;
++
++ err = pci_read_config_word(dev, offset, &value);
++ if (err)
++ goto out;
++
++ if (value & PCI_PM_CTRL_PME_ENABLE) {
++ value &= ~PCI_PM_CTRL_PME_ENABLE;
++ err = pci_write_config_word(dev, offset, value);
++ }
++
++ out:
++ return ERR_PTR(err);
++}
++
++static struct config_field caplist_pm[] = {
++ {
++ .offset = PCI_PM_PMC,
++ .size = 2,
++ .u.w.read = pm_caps_read,
++ },
++ {
++ .offset = PCI_PM_CTRL,
++ .size = 2,
++ .init = pm_ctrl_init,
++ .u.w.read = pciback_read_config_word,
++ .u.w.write = pm_ctrl_write,
++ },
++ {
++ .offset = PCI_PM_PPB_EXTENSIONS,
++ .size = 1,
++ .u.b.read = pciback_read_config_byte,
++ },
++ {
++ .offset = PCI_PM_DATA_REGISTER,
++ .size = 1,
++ .u.b.read = pciback_read_config_byte,
++ },
++ {
++ .size = 0,
++ },
++};
++
++struct pciback_config_capability pciback_config_capability_pm = {
++ .capability = PCI_CAP_ID_PM,
++ .fields = caplist_pm,
++};
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/pciback/conf_space_capability_vpd.c 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,42 @@
++/*
++ * PCI Backend - Configuration space overlay for Vital Product Data
++ *
++ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
++ */
++
++#include <linux/pci.h>
++#include "conf_space.h"
++#include "conf_space_capability.h"
++
++static int vpd_address_write(struct pci_dev *dev, int offset, u16 value,
++ void *data)
++{
++ /* Disallow writes to the vital product data */
++ if (value & PCI_VPD_ADDR_F)
++ return PCIBIOS_SET_FAILED;
++ else
++ return pci_write_config_word(dev, offset, value);
++}
++
++static struct config_field caplist_vpd[] = {
++ {
++ .offset = PCI_VPD_ADDR,
++ .size = 2,
++ .u.w.read = pciback_read_config_word,
++ .u.w.write = vpd_address_write,
++ },
++ {
++ .offset = PCI_VPD_DATA,
++ .size = 4,
++ .u.dw.read = pciback_read_config_dword,
++ .u.dw.write = NULL,
++ },
++ {
++ .size = 0,
++ },
++};
++
++struct pciback_config_capability pciback_config_capability_vpd = {
++ .capability = PCI_CAP_ID_VPD,
++ .fields = caplist_vpd,
++};
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/pciback/conf_space_header.c 2007-08-27 14:02:01.000000000 -0400
+@@ -0,0 +1,309 @@
++/*
++ * PCI Backend - Handles the virtual fields in the configuration space headers.
++ *
++ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
++ */
++
++#include <linux/kernel.h>
++#include <linux/pci.h>
++#include "pciback.h"
++#include "conf_space.h"
++
++struct pci_bar_info {
++ u32 val;
++ u32 len_val;
++ int which;
++};
++
++#define is_enable_cmd(value) ((value)&(PCI_COMMAND_MEMORY|PCI_COMMAND_IO))
++#define is_master_cmd(value) ((value)&PCI_COMMAND_MASTER)
++
++static int command_write(struct pci_dev *dev, int offset, u16 value, void *data)
++{
++ int err;
++
++ if (!dev->is_enabled && is_enable_cmd(value)) {
++ if (unlikely(verbose_request))
++ printk(KERN_DEBUG "pciback: %s: enable\n",
++ pci_name(dev));
++ err = pci_enable_device(dev);
++ if (err)
++ return err;
++ } else if (dev->is_enabled && !is_enable_cmd(value)) {
++ if (unlikely(verbose_request))
++ printk(KERN_DEBUG "pciback: %s: disable\n",
++ pci_name(dev));
++ pci_disable_device(dev);
++ }
++
++ if (!dev->is_busmaster && is_master_cmd(value)) {
++ if (unlikely(verbose_request))
++ printk(KERN_DEBUG "pciback: %s: set bus master\n",
++ pci_name(dev));
++ pci_set_master(dev);
++ }
++
++ if (value & PCI_COMMAND_INVALIDATE) {
++ if (unlikely(verbose_request))
++ printk(KERN_DEBUG
++ "pciback: %s: enable memory-write-invalidate\n",
++ pci_name(dev));
++ err = pci_set_mwi(dev);
++ if (err) {
++ printk(KERN_WARNING
++ "pciback: %s: cannot enable memory-write-invalidate (%d)\n",
++ pci_name(dev), err);
++ value &= ~PCI_COMMAND_INVALIDATE;
++ }
++ }
++
++ return pci_write_config_word(dev, offset, value);
++}
++
++static int rom_write(struct pci_dev *dev, int offset, u32 value, void *data)
++{
++ struct pci_bar_info *bar = data;
++
++ if (unlikely(!bar)) {
++ printk(KERN_WARNING "pciback: driver data not found for %s\n",
++ pci_name(dev));
++ return XEN_PCI_ERR_op_failed;
++ }
++
++ /* A write to obtain the length must happen as a 32-bit write.
++ * This does not (yet) support writing individual bytes
++ */
++ if (value == ~PCI_ROM_ADDRESS_ENABLE)
++ bar->which = 1;
++ else
++ bar->which = 0;
++
++ /* Do we need to support enabling/disabling the rom address here? */
++
++ return 0;
++}
++
++/* For the BARs, only allow writes which write ~0 or
++ * the correct resource information
++ * (Needed for when the driver probes the resource usage)
++ */
++static int bar_write(struct pci_dev *dev, int offset, u32 value, void *data)
++{
++ struct pci_bar_info *bar = data;
++
++ if (unlikely(!bar)) {
++ printk(KERN_WARNING "pciback: driver data not found for %s\n",
++ pci_name(dev));
++ return XEN_PCI_ERR_op_failed;
++ }
++
++ /* A write to obtain the length must happen as a 32-bit write.
++ * This does not (yet) support writing individual bytes
++ */
++ if (value == ~0)
++ bar->which = 1;
++ else
++ bar->which = 0;
++
++ return 0;
++}
++
++static int bar_read(struct pci_dev *dev, int offset, u32 * value, void *data)
++{
++ struct pci_bar_info *bar = data;
++
++ if (unlikely(!bar)) {
++ printk(KERN_WARNING "pciback: driver data not found for %s\n",
++ pci_name(dev));
++ return XEN_PCI_ERR_op_failed;
++ }
++
++ *value = bar->which ? bar->len_val : bar->val;
++
++ return 0;
++}
++
++static inline void read_dev_bar(struct pci_dev *dev,
++ struct pci_bar_info *bar_info, int offset,
++ u32 len_mask)
++{
++ pci_read_config_dword(dev, offset, &bar_info->val);
++ pci_write_config_dword(dev, offset, len_mask);
++ pci_read_config_dword(dev, offset, &bar_info->len_val);
++ pci_write_config_dword(dev, offset, bar_info->val);
++}
++
++static void *bar_init(struct pci_dev *dev, int offset)
++{
++ struct pci_bar_info *bar = kmalloc(sizeof(*bar), GFP_KERNEL);
++
++ if (!bar)
++ return ERR_PTR(-ENOMEM);
++
++ read_dev_bar(dev, bar, offset, ~0);
++ bar->which = 0;
++
++ return bar;
++}
++
++static void *rom_init(struct pci_dev *dev, int offset)
++{
++ struct pci_bar_info *bar = kmalloc(sizeof(*bar), GFP_KERNEL);
++
++ if (!bar)
++ return ERR_PTR(-ENOMEM);
++
++ read_dev_bar(dev, bar, offset, ~PCI_ROM_ADDRESS_ENABLE);
++ bar->which = 0;
++
++ return bar;
++}
++
++static void bar_reset(struct pci_dev *dev, int offset, void *data)
++{
++ struct pci_bar_info *bar = data;
++
++ bar->which = 0;
++}
++
++static void bar_release(struct pci_dev *dev, int offset, void *data)
++{
++ kfree(data);
++}
++
++static int interrupt_read(struct pci_dev *dev, int offset, u8 * value,
++ void *data)
++{
++ *value = (u8) dev->irq;
++
++ return 0;
++}
++
++static int bist_write(struct pci_dev *dev, int offset, u8 value, void *data)
++{
++ u8 cur_value;
++ int err;
++
++ err = pci_read_config_byte(dev, offset, &cur_value);
++ if (err)
++ goto out;
++
++ if ((cur_value & ~PCI_BIST_START) == (value & ~PCI_BIST_START)
++ || value == PCI_BIST_START)
++ err = pci_write_config_byte(dev, offset, value);
++
++ out:
++ return err;
++}
++
++static struct config_field header_common[] = {
++ {
++ .offset = PCI_COMMAND,
++ .size = 2,
++ .u.w.read = pciback_read_config_word,
++ .u.w.write = command_write,
++ },
++ {
++ .offset = PCI_INTERRUPT_LINE,
++ .size = 1,
++ .u.b.read = interrupt_read,
++ },
++ {
++ .offset = PCI_INTERRUPT_PIN,
++ .size = 1,
++ .u.b.read = pciback_read_config_byte,
++ },
++ {
++ /* Any side effects of letting driver domain control cache line? */
++ .offset = PCI_CACHE_LINE_SIZE,
++ .size = 1,
++ .u.b.read = pciback_read_config_byte,
++ .u.b.write = pciback_write_config_byte,
++ },
++ {
++ .offset = PCI_LATENCY_TIMER,
++ .size = 1,
++ .u.b.read = pciback_read_config_byte,
++ },
++ {
++ .offset = PCI_BIST,
++ .size = 1,
++ .u.b.read = pciback_read_config_byte,
++ .u.b.write = bist_write,
++ },
++ {
++ .size = 0,
++ },
++};
++
++#define CFG_FIELD_BAR(reg_offset) \
++ { \
++ .offset = reg_offset, \
++ .size = 4, \
++ .init = bar_init, \
++ .reset = bar_reset, \
++ .release = bar_release, \
++ .u.dw.read = bar_read, \
++ .u.dw.write = bar_write, \
++ }
++
++#define CFG_FIELD_ROM(reg_offset) \
++ { \
++ .offset = reg_offset, \
++ .size = 4, \
++ .init = rom_init, \
++ .reset = bar_reset, \
++ .release = bar_release, \
++ .u.dw.read = bar_read, \
++ .u.dw.write = rom_write, \
++ }
++
++static struct config_field header_0[] = {
++ CFG_FIELD_BAR(PCI_BASE_ADDRESS_0),
++ CFG_FIELD_BAR(PCI_BASE_ADDRESS_1),
++ CFG_FIELD_BAR(PCI_BASE_ADDRESS_2),
++ CFG_FIELD_BAR(PCI_BASE_ADDRESS_3),
++ CFG_FIELD_BAR(PCI_BASE_ADDRESS_4),
++ CFG_FIELD_BAR(PCI_BASE_ADDRESS_5),
++ CFG_FIELD_ROM(PCI_ROM_ADDRESS),
++ {
++ .size = 0,
++ },
++};
++
++static struct config_field header_1[] = {
++ CFG_FIELD_BAR(PCI_BASE_ADDRESS_0),
++ CFG_FIELD_BAR(PCI_BASE_ADDRESS_1),
++ CFG_FIELD_ROM(PCI_ROM_ADDRESS1),
++ {
++ .size = 0,
++ },
++};
++
++int pciback_config_header_add_fields(struct pci_dev *dev)
++{
++ int err;
++
++ err = pciback_config_add_fields(dev, header_common);
++ if (err)
++ goto out;
++
++ switch (dev->hdr_type) {
++ case PCI_HEADER_TYPE_NORMAL:
++ err = pciback_config_add_fields(dev, header_0);
++ break;
++
++ case PCI_HEADER_TYPE_BRIDGE:
++ err = pciback_config_add_fields(dev, header_1);
++ break;
++
++ default:
++ err = -EINVAL;
++ printk(KERN_ERR "pciback: %s: Unsupported header type %d!\n",
++ pci_name(dev), dev->hdr_type);
++ break;
++ }
++
++ out:
++ return err;
++}
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/pciback/conf_space_quirks.c 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,126 @@
++/*
++ * PCI Backend - Handle special overlays for broken devices.
++ *
++ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
++ * Author: Chris Bookholt <hap10@epoch.ncsc.mil>
++ */
++
++#include <linux/kernel.h>
++#include <linux/pci.h>
++#include "pciback.h"
++#include "conf_space.h"
++#include "conf_space_quirks.h"
++
++LIST_HEAD(pciback_quirks);
++
++struct pciback_config_quirk *pciback_find_quirk(struct pci_dev *dev)
++{
++ struct pciback_config_quirk *tmp_quirk;
++
++ list_for_each_entry(tmp_quirk, &pciback_quirks, quirks_list)
++ if (pci_match_id(&tmp_quirk->devid, dev))
++ goto out;
++ tmp_quirk = NULL;
++ printk(KERN_DEBUG
++ "quirk didn't match any device pciback knows about\n");
++ out:
++ return tmp_quirk;
++}
++
++static inline void register_quirk(struct pciback_config_quirk *quirk)
++{
++ list_add_tail(&quirk->quirks_list, &pciback_quirks);
++}
++
++int pciback_field_is_dup(struct pci_dev *dev, unsigned int reg)
++{
++ int ret = 0;
++ struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
++ struct config_field_entry *cfg_entry;
++
++ list_for_each_entry(cfg_entry, &dev_data->config_fields, list) {
++ if ( OFFSET(cfg_entry) == reg) {
++ ret = 1;
++ break;
++ }
++ }
++ return ret;
++}
++
++int pciback_config_quirks_add_field(struct pci_dev *dev, struct config_field
++ *field)
++{
++ int err = 0;
++
++ switch (field->size) {
++ case 1:
++ field->u.b.read = pciback_read_config_byte;
++ field->u.b.write = pciback_write_config_byte;
++ break;
++ case 2:
++ field->u.w.read = pciback_read_config_word;
++ field->u.w.write = pciback_write_config_word;
++ break;
++ case 4:
++ field->u.dw.read = pciback_read_config_dword;
++ field->u.dw.write = pciback_write_config_dword;
++ break;
++ default:
++ err = -EINVAL;
++ goto out;
++ }
++
++ pciback_config_add_field(dev, field);
++
++ out:
++ return err;
++}
++
++int pciback_config_quirks_init(struct pci_dev *dev)
++{
++ struct pciback_config_quirk *quirk;
++ int ret = 0;
++
++ quirk = kzalloc(sizeof(*quirk), GFP_ATOMIC);
++ if (!quirk) {
++ ret = -ENOMEM;
++ goto out;
++ }
++
++ quirk->devid.vendor = dev->vendor;
++ quirk->devid.device = dev->device;
++ quirk->devid.subvendor = dev->subsystem_vendor;
++ quirk->devid.subdevice = dev->subsystem_device;
++ quirk->devid.class = 0;
++ quirk->devid.class_mask = 0;
++ quirk->devid.driver_data = 0UL;
++
++ quirk->pdev = dev;
++
++ register_quirk(quirk);
++ out:
++ return ret;
++}
++
++void pciback_config_field_free(struct config_field *field)
++{
++ kfree(field);
++}
++
++int pciback_config_quirk_release(struct pci_dev *dev)
++{
++ struct pciback_config_quirk *quirk;
++ int ret = 0;
++
++ quirk = pciback_find_quirk(dev);
++ if (!quirk) {
++ ret = -ENXIO;
++ goto out;
++ }
++
++ list_del(&quirk->quirks_list);
++ kfree(quirk);
++
++ out:
++ return ret;
++}
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/pciback/conf_space_quirks.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,35 @@
++/*
++ * PCI Backend - Data structures for special overlays for broken devices.
++ *
++ * Ryan Wilson <hap9@epoch.ncsc.mil>
++ * Chris Bookholt <hap10@epoch.ncsc.mil>
++ */
++
++#ifndef __XEN_PCIBACK_CONF_SPACE_QUIRKS_H__
++#define __XEN_PCIBACK_CONF_SPACE_QUIRKS_H__
++
++#include <linux/pci.h>
++#include <linux/list.h>
++
++struct pciback_config_quirk {
++ struct list_head quirks_list;
++ struct pci_device_id devid;
++ struct pci_dev *pdev;
++};
++
++struct pciback_config_quirk *pciback_find_quirk(struct pci_dev *dev);
++
++int pciback_config_quirks_add_field(struct pci_dev *dev, struct config_field
++ *field);
++
++int pciback_config_quirks_remove_field(struct pci_dev *dev, int reg);
++
++int pciback_config_quirks_init(struct pci_dev *dev);
++
++void pciback_config_field_free(struct config_field *field);
++
++int pciback_config_quirk_release(struct pci_dev *dev);
++
++int pciback_field_is_dup(struct pci_dev *dev, unsigned int reg);
++
++#endif
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/pciback/passthrough.c 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,157 @@
++/*
++ * PCI Backend - Provides restricted access to the real PCI bus topology
++ * to the frontend
++ *
++ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
++ */
++
++#include <linux/list.h>
++#include <linux/pci.h>
++#include <linux/spinlock.h>
++#include "pciback.h"
++
++struct passthrough_dev_data {
++ /* Access to dev_list must be protected by lock */
++ struct list_head dev_list;
++ spinlock_t lock;
++};
++
++struct pci_dev *pciback_get_pci_dev(struct pciback_device *pdev,
++ unsigned int domain, unsigned int bus,
++ unsigned int devfn)
++{
++ struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
++ struct pci_dev_entry *dev_entry;
++ struct pci_dev *dev = NULL;
++ unsigned long flags;
++
++ spin_lock_irqsave(&dev_data->lock, flags);
++
++ list_for_each_entry(dev_entry, &dev_data->dev_list, list) {
++ if (domain == (unsigned int)pci_domain_nr(dev_entry->dev->bus)
++ && bus == (unsigned int)dev_entry->dev->bus->number
++ && devfn == dev_entry->dev->devfn) {
++ dev = dev_entry->dev;
++ break;
++ }
++ }
++
++ spin_unlock_irqrestore(&dev_data->lock, flags);
++
++ return dev;
++}
++
++int pciback_add_pci_dev(struct pciback_device *pdev, struct pci_dev *dev)
++{
++ struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
++ struct pci_dev_entry *dev_entry;
++ unsigned long flags;
++
++ dev_entry = kmalloc(sizeof(*dev_entry), GFP_KERNEL);
++ if (!dev_entry)
++ return -ENOMEM;
++ dev_entry->dev = dev;
++
++ spin_lock_irqsave(&dev_data->lock, flags);
++ list_add_tail(&dev_entry->list, &dev_data->dev_list);
++ spin_unlock_irqrestore(&dev_data->lock, flags);
++
++ return 0;
++}
++
++void pciback_release_pci_dev(struct pciback_device *pdev, struct pci_dev *dev)
++{
++ struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
++ struct pci_dev_entry *dev_entry, *t;
++ struct pci_dev *found_dev = NULL;
++ unsigned long flags;
++
++ spin_lock_irqsave(&dev_data->lock, flags);
++
++ list_for_each_entry_safe(dev_entry, t, &dev_data->dev_list, list) {
++ if (dev_entry->dev == dev) {
++ list_del(&dev_entry->list);
++ found_dev = dev_entry->dev;
++ kfree(dev_entry);
++ }
++ }
++
++ spin_unlock_irqrestore(&dev_data->lock, flags);
++
++ if (found_dev)
++ pcistub_put_pci_dev(found_dev);
++}
++
++int pciback_init_devices(struct pciback_device *pdev)
++{
++ struct passthrough_dev_data *dev_data;
++
++ dev_data = kmalloc(sizeof(*dev_data), GFP_KERNEL);
++ if (!dev_data)
++ return -ENOMEM;
++
++ spin_lock_init(&dev_data->lock);
++
++ INIT_LIST_HEAD(&dev_data->dev_list);
++
++ pdev->pci_dev_data = dev_data;
++
++ return 0;
++}
++
++int pciback_publish_pci_roots(struct pciback_device *pdev,
++ publish_pci_root_cb publish_root_cb)
++{
++ int err = 0;
++ struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
++ struct pci_dev_entry *dev_entry, *e;
++ struct pci_dev *dev;
++ int found;
++ unsigned int domain, bus;
++
++ spin_lock(&dev_data->lock);
++
++ list_for_each_entry(dev_entry, &dev_data->dev_list, list) {
++ /* Only publish this device as a root if none of its
++ * parent bridges are exported
++ */
++ found = 0;
++ dev = dev_entry->dev->bus->self;
++ for (; !found && dev != NULL; dev = dev->bus->self) {
++ list_for_each_entry(e, &dev_data->dev_list, list) {
++ if (dev == e->dev) {
++ found = 1;
++ break;
++ }
++ }
++ }
++
++ domain = (unsigned int)pci_domain_nr(dev_entry->dev->bus);
++ bus = (unsigned int)dev_entry->dev->bus->number;
++
++ if (!found) {
++ err = publish_root_cb(pdev, domain, bus);
++ if (err)
++ break;
++ }
++ }
++
++ spin_unlock(&dev_data->lock);
++
++ return err;
++}
++
++void pciback_release_devices(struct pciback_device *pdev)
++{
++ struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
++ struct pci_dev_entry *dev_entry, *t;
++
++ list_for_each_entry_safe(dev_entry, t, &dev_data->dev_list, list) {
++ list_del(&dev_entry->list);
++ pcistub_put_pci_dev(dev_entry->dev);
++ kfree(dev_entry);
++ }
++
++ kfree(dev_data);
++ pdev->pci_dev_data = NULL;
++}
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/pciback/pci_stub.c 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,929 @@
++/*
++ * PCI Stub Driver - Grabs devices in backend to be exported later
++ *
++ * Ryan Wilson <hap9@epoch.ncsc.mil>
++ * Chris Bookholt <hap10@epoch.ncsc.mil>
++ */
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/list.h>
++#include <linux/spinlock.h>
++#include <linux/kref.h>
++#include <asm/atomic.h>
++#include "pciback.h"
++#include "conf_space.h"
++#include "conf_space_quirks.h"
++
++static char *pci_devs_to_hide = NULL;
++module_param_named(hide, pci_devs_to_hide, charp, 0444);
++
++struct pcistub_device_id {
++ struct list_head slot_list;
++ int domain;
++ unsigned char bus;
++ unsigned int devfn;
++};
++static LIST_HEAD(pcistub_device_ids);
++static DEFINE_SPINLOCK(device_ids_lock);
++
++struct pcistub_device {
++ struct kref kref;
++ struct list_head dev_list;
++ spinlock_t lock;
++
++ struct pci_dev *dev;
++ struct pciback_device *pdev; /* non-NULL if struct pci_dev is in use */
++};
++
++/* Access to pcistub_devices & seized_devices lists and the initialize_devices
++ * flag must be locked with pcistub_devices_lock
++ */
++static DEFINE_SPINLOCK(pcistub_devices_lock);
++static LIST_HEAD(pcistub_devices);
++
++/* wait for device_initcall before initializing our devices
++ * (see pcistub_init_devices_late)
++ */
++static int initialize_devices = 0;
++static LIST_HEAD(seized_devices);
++
++static struct pcistub_device *pcistub_device_alloc(struct pci_dev *dev)
++{
++ struct pcistub_device *psdev;
++
++ dev_dbg(&dev->dev, "pcistub_device_alloc\n");
++
++ psdev = kzalloc(sizeof(*psdev), GFP_ATOMIC);
++ if (!psdev)
++ return NULL;
++
++ psdev->dev = pci_dev_get(dev);
++ if (!psdev->dev) {
++ kfree(psdev);
++ return NULL;
++ }
++
++ kref_init(&psdev->kref);
++ spin_lock_init(&psdev->lock);
++
++ return psdev;
++}
++
++/* Don't call this directly as it's called by pcistub_device_put */
++static void pcistub_device_release(struct kref *kref)
++{
++ struct pcistub_device *psdev;
++
++ psdev = container_of(kref, struct pcistub_device, kref);
++
++ dev_dbg(&psdev->dev->dev, "pcistub_device_release\n");
++
++ /* Clean-up the device */
++ pciback_reset_device(psdev->dev);
++ pciback_config_free_dyn_fields(psdev->dev);
++ pciback_config_free_dev(psdev->dev);
++ kfree(pci_get_drvdata(psdev->dev));
++ pci_set_drvdata(psdev->dev, NULL);
++
++ pci_dev_put(psdev->dev);
++
++ kfree(psdev);
++}
++
++static inline void pcistub_device_get(struct pcistub_device *psdev)
++{
++ kref_get(&psdev->kref);
++}
++
++static inline void pcistub_device_put(struct pcistub_device *psdev)
++{
++ kref_put(&psdev->kref, pcistub_device_release);
++}
++
++static struct pcistub_device *pcistub_device_find(int domain, int bus,
++ int slot, int func)
++{
++ struct pcistub_device *psdev = NULL;
++ unsigned long flags;
++
++ spin_lock_irqsave(&pcistub_devices_lock, flags);
++
++ list_for_each_entry(psdev, &pcistub_devices, dev_list) {
++ if (psdev->dev != NULL
++ && domain == pci_domain_nr(psdev->dev->bus)
++ && bus == psdev->dev->bus->number
++ && PCI_DEVFN(slot, func) == psdev->dev->devfn) {
++ pcistub_device_get(psdev);
++ goto out;
++ }
++ }
++
++ /* didn't find it */
++ psdev = NULL;
++
++ out:
++ spin_unlock_irqrestore(&pcistub_devices_lock, flags);
++ return psdev;
++}
++
++static struct pci_dev *pcistub_device_get_pci_dev(struct pciback_device *pdev,
++ struct pcistub_device *psdev)
++{
++ struct pci_dev *pci_dev = NULL;
++ unsigned long flags;
++
++ pcistub_device_get(psdev);
++
++ spin_lock_irqsave(&psdev->lock, flags);
++ if (!psdev->pdev) {
++ psdev->pdev = pdev;
++ pci_dev = psdev->dev;
++ }
++ spin_unlock_irqrestore(&psdev->lock, flags);
++
++ if (!pci_dev)
++ pcistub_device_put(psdev);
++
++ return pci_dev;
++}
++
++struct pci_dev *pcistub_get_pci_dev_by_slot(struct pciback_device *pdev,
++ int domain, int bus,
++ int slot, int func)
++{
++ struct pcistub_device *psdev;
++ struct pci_dev *found_dev = NULL;
++ unsigned long flags;
++
++ spin_lock_irqsave(&pcistub_devices_lock, flags);
++
++ list_for_each_entry(psdev, &pcistub_devices, dev_list) {
++ if (psdev->dev != NULL
++ && domain == pci_domain_nr(psdev->dev->bus)
++ && bus == psdev->dev->bus->number
++ && PCI_DEVFN(slot, func) == psdev->dev->devfn) {
++ found_dev = pcistub_device_get_pci_dev(pdev, psdev);
++ break;
++ }
++ }
++
++ spin_unlock_irqrestore(&pcistub_devices_lock, flags);
++ return found_dev;
++}
++
++struct pci_dev *pcistub_get_pci_dev(struct pciback_device *pdev,
++ struct pci_dev *dev)
++{
++ struct pcistub_device *psdev;
++ struct pci_dev *found_dev = NULL;
++ unsigned long flags;
++
++ spin_lock_irqsave(&pcistub_devices_lock, flags);
++
++ list_for_each_entry(psdev, &pcistub_devices, dev_list) {
++ if (psdev->dev == dev) {
++ found_dev = pcistub_device_get_pci_dev(pdev, psdev);
++ break;
++ }
++ }
++
++ spin_unlock_irqrestore(&pcistub_devices_lock, flags);
++ return found_dev;
++}
++
++void pcistub_put_pci_dev(struct pci_dev *dev)
++{
++ struct pcistub_device *psdev, *found_psdev = NULL;
++ unsigned long flags;
++
++ spin_lock_irqsave(&pcistub_devices_lock, flags);
++
++ list_for_each_entry(psdev, &pcistub_devices, dev_list) {
++ if (psdev->dev == dev) {
++ found_psdev = psdev;
++ break;
++ }
++ }
++
++ spin_unlock_irqrestore(&pcistub_devices_lock, flags);
++
++ /* Cleanup our device
++ * (so it's ready for the next domain)
++ */
++ pciback_reset_device(found_psdev->dev);
++ pciback_config_free_dyn_fields(found_psdev->dev);
++ pciback_config_reset_dev(found_psdev->dev);
++
++ spin_lock_irqsave(&found_psdev->lock, flags);
++ found_psdev->pdev = NULL;
++ spin_unlock_irqrestore(&found_psdev->lock, flags);
++
++ pcistub_device_put(found_psdev);
++}
++
++static int __devinit pcistub_match_one(struct pci_dev *dev,
++ struct pcistub_device_id *pdev_id)
++{
++ /* Match the specified device by domain, bus, slot, func and also if
++ * any of the device's parent bridges match.
++ */
++ for (; dev != NULL; dev = dev->bus->self) {
++ if (pci_domain_nr(dev->bus) == pdev_id->domain
++ && dev->bus->number == pdev_id->bus
++ && dev->devfn == pdev_id->devfn)
++ return 1;
++
++ /* Sometimes topmost bridge links to itself. */
++ if (dev == dev->bus->self)
++ break;
++ }
++
++ return 0;
++}
++
++static int __devinit pcistub_match(struct pci_dev *dev)
++{
++ struct pcistub_device_id *pdev_id;
++ unsigned long flags;
++ int found = 0;
++
++ spin_lock_irqsave(&device_ids_lock, flags);
++ list_for_each_entry(pdev_id, &pcistub_device_ids, slot_list) {
++ if (pcistub_match_one(dev, pdev_id)) {
++ found = 1;
++ break;
++ }
++ }
++ spin_unlock_irqrestore(&device_ids_lock, flags);
++
++ return found;
++}
++
++static int __devinit pcistub_init_device(struct pci_dev *dev)
++{
++ struct pciback_dev_data *dev_data;
++ int err = 0;
++
++ dev_dbg(&dev->dev, "initializing...\n");
++
++ /* The PCI backend is not intended to be a module (or to work with
++ * removable PCI devices (yet). If it were, pciback_config_free()
++ * would need to be called somewhere to free the memory allocated
++ * here and then to call kfree(pci_get_drvdata(psdev->dev)).
++ */
++ dev_data = kzalloc(sizeof(*dev_data), GFP_ATOMIC);
++ if (!dev_data) {
++ err = -ENOMEM;
++ goto out;
++ }
++ pci_set_drvdata(dev, dev_data);
++
++ dev_dbg(&dev->dev, "initializing config\n");
++ err = pciback_config_init_dev(dev);
++ if (err)
++ goto out;
++
++ /* HACK: Force device (& ACPI) to determine what IRQ it's on - we
++ * must do this here because pcibios_enable_device may specify
++ * the pci device's true irq (and possibly its other resources)
++ * if they differ from what's in the configuration space.
++ * This makes the assumption that the device's resources won't
++ * change after this point (otherwise this code may break!)
++ */
++ dev_dbg(&dev->dev, "enabling device\n");
++ err = pci_enable_device(dev);
++ if (err)
++ goto config_release;
++
++ /* Now disable the device (this also ensures some private device
++ * data is setup before we export)
++ */
++ dev_dbg(&dev->dev, "reset device\n");
++ pciback_reset_device(dev);
++
++ return 0;
++
++ config_release:
++ pciback_config_free_dev(dev);
++
++ out:
++ pci_set_drvdata(dev, NULL);
++ kfree(dev_data);
++ return err;
++}
++
++/*
++ * Because some initialization still happens on
++ * devices during fs_initcall, we need to defer
++ * full initialization of our devices until
++ * device_initcall.
++ */
++static int __init pcistub_init_devices_late(void)
++{
++ struct pcistub_device *psdev;
++ unsigned long flags;
++ int err = 0;
++
++ pr_debug("pciback: pcistub_init_devices_late\n");
++
++ spin_lock_irqsave(&pcistub_devices_lock, flags);
++
++ while (!list_empty(&seized_devices)) {
++ psdev = container_of(seized_devices.next,
++ struct pcistub_device, dev_list);
++ list_del(&psdev->dev_list);
++
++ spin_unlock_irqrestore(&pcistub_devices_lock, flags);
++
++ err = pcistub_init_device(psdev->dev);
++ if (err) {
++ dev_err(&psdev->dev->dev,
++ "error %d initializing device\n", err);
++ kfree(psdev);
++ psdev = NULL;
++ }
++
++ spin_lock_irqsave(&pcistub_devices_lock, flags);
++
++ if (psdev)
++ list_add_tail(&psdev->dev_list, &pcistub_devices);
++ }
++
++ initialize_devices = 1;
++
++ spin_unlock_irqrestore(&pcistub_devices_lock, flags);
++
++ return 0;
++}
++
++static int __devinit pcistub_seize(struct pci_dev *dev)
++{
++ struct pcistub_device *psdev;
++ unsigned long flags;
++ int err = 0;
++
++ psdev = pcistub_device_alloc(dev);
++ if (!psdev)
++ return -ENOMEM;
++
++ spin_lock_irqsave(&pcistub_devices_lock, flags);
++
++ if (initialize_devices) {
++ spin_unlock_irqrestore(&pcistub_devices_lock, flags);
++
++ /* don't want irqs disabled when calling pcistub_init_device */
++ err = pcistub_init_device(psdev->dev);
++
++ spin_lock_irqsave(&pcistub_devices_lock, flags);
++
++ if (!err)
++ list_add(&psdev->dev_list, &pcistub_devices);
++ } else {
++ dev_dbg(&dev->dev, "deferring initialization\n");
++ list_add(&psdev->dev_list, &seized_devices);
++ }
++
++ spin_unlock_irqrestore(&pcistub_devices_lock, flags);
++
++ if (err)
++ pcistub_device_put(psdev);
++
++ return err;
++}
++
++static int __devinit pcistub_probe(struct pci_dev *dev,
++ const struct pci_device_id *id)
++{
++ int err = 0;
++
++ dev_dbg(&dev->dev, "probing...\n");
++
++ if (pcistub_match(dev)) {
++
++ if (dev->hdr_type != PCI_HEADER_TYPE_NORMAL
++ && dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) {
++ dev_err(&dev->dev, "can't export pci devices that "
++ "don't have a normal (0) or bridge (1) "
++ "header type!\n");
++ err = -ENODEV;
++ goto out;
++ }
++
++ dev_info(&dev->dev, "seizing device\n");
++ err = pcistub_seize(dev);
++ } else
++ /* Didn't find the device */
++ err = -ENODEV;
++
++ out:
++ return err;
++}
++
++static void pcistub_remove(struct pci_dev *dev)
++{
++ struct pcistub_device *psdev, *found_psdev = NULL;
++ unsigned long flags;
++
++ dev_dbg(&dev->dev, "removing\n");
++
++ spin_lock_irqsave(&pcistub_devices_lock, flags);
++
++ pciback_config_quirk_release(dev);
++
++ list_for_each_entry(psdev, &pcistub_devices, dev_list) {
++ if (psdev->dev == dev) {
++ found_psdev = psdev;
++ break;
++ }
++ }
++
++ spin_unlock_irqrestore(&pcistub_devices_lock, flags);
++
++ if (found_psdev) {
++ dev_dbg(&dev->dev, "found device to remove - in use? %p\n",
++ found_psdev->pdev);
++
++ if (found_psdev->pdev) {
++ printk(KERN_WARNING "pciback: ****** removing device "
++ "%s while still in-use! ******\n",
++ pci_name(found_psdev->dev));
++ printk(KERN_WARNING "pciback: ****** driver domain may "
++ "still access this device's i/o resources!\n");
++ printk(KERN_WARNING "pciback: ****** shutdown driver "
++ "domain before binding device\n");
++ printk(KERN_WARNING "pciback: ****** to other drivers "
++ "or domains\n");
++
++ pciback_release_pci_dev(found_psdev->pdev,
++ found_psdev->dev);
++ }
++
++ spin_lock_irqsave(&pcistub_devices_lock, flags);
++ list_del(&found_psdev->dev_list);
++ spin_unlock_irqrestore(&pcistub_devices_lock, flags);
++
++ /* the final put for releasing from the list */
++ pcistub_device_put(found_psdev);
++ }
++}
++
++static struct pci_device_id pcistub_ids[] = {
++ {
++ .vendor = PCI_ANY_ID,
++ .device = PCI_ANY_ID,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ },
++ {0,},
++};
++
++/*
++ * Note: There is no MODULE_DEVICE_TABLE entry here because this isn't
++ * for a normal device. I don't want it to be loaded automatically.
++ */
++
++static struct pci_driver pciback_pci_driver = {
++ .name = "pciback",
++ .id_table = pcistub_ids,
++ .probe = pcistub_probe,
++ .remove = pcistub_remove,
++};
++
++static inline int str_to_slot(const char *buf, int *domain, int *bus,
++ int *slot, int *func)
++{
++ int err;
++
++ err = sscanf(buf, " %x:%x:%x.%x", domain, bus, slot, func);
++ if (err == 4)
++ return 0;
++ else if (err < 0)
++ return -EINVAL;
++
++ /* try again without domain */
++ *domain = 0;
++ err = sscanf(buf, " %x:%x.%x", bus, slot, func);
++ if (err == 3)
++ return 0;
++
++ return -EINVAL;
++}
++
++static inline int str_to_quirk(const char *buf, int *domain, int *bus, int
++ *slot, int *func, int *reg, int *size, int *mask)
++{
++ int err;
++
++ err =
++ sscanf(buf, " %04x:%02x:%02x.%1x-%08x:%1x:%08x", domain, bus, slot,
++ func, reg, size, mask);
++ if (err == 7)
++ return 0;
++ return -EINVAL;
++}
++
++static int pcistub_device_id_add(int domain, int bus, int slot, int func)
++{
++ struct pcistub_device_id *pci_dev_id;
++ unsigned long flags;
++
++ pci_dev_id = kmalloc(sizeof(*pci_dev_id), GFP_KERNEL);
++ if (!pci_dev_id)
++ return -ENOMEM;
++
++ pci_dev_id->domain = domain;
++ pci_dev_id->bus = bus;
++ pci_dev_id->devfn = PCI_DEVFN(slot, func);
++
++ pr_debug("pciback: wants to seize %04x:%02x:%02x.%01x\n",
++ domain, bus, slot, func);
++
++ spin_lock_irqsave(&device_ids_lock, flags);
++ list_add_tail(&pci_dev_id->slot_list, &pcistub_device_ids);
++ spin_unlock_irqrestore(&device_ids_lock, flags);
++
++ return 0;
++}
++
++static int pcistub_device_id_remove(int domain, int bus, int slot, int func)
++{
++ struct pcistub_device_id *pci_dev_id, *t;
++ int devfn = PCI_DEVFN(slot, func);
++ int err = -ENOENT;
++ unsigned long flags;
++
++ spin_lock_irqsave(&device_ids_lock, flags);
++ list_for_each_entry_safe(pci_dev_id, t, &pcistub_device_ids, slot_list) {
++
++ if (pci_dev_id->domain == domain
++ && pci_dev_id->bus == bus && pci_dev_id->devfn == devfn) {
++ /* Don't break; here because it's possible the same
++ * slot could be in the list more than once
++ */
++ list_del(&pci_dev_id->slot_list);
++ kfree(pci_dev_id);
++
++ err = 0;
++
++ pr_debug("pciback: removed %04x:%02x:%02x.%01x from "
++ "seize list\n", domain, bus, slot, func);
++ }
++ }
++ spin_unlock_irqrestore(&device_ids_lock, flags);
++
++ return err;
++}
++
++static int pcistub_reg_add(int domain, int bus, int slot, int func, int reg,
++ int size, int mask)
++{
++ int err = 0;
++ struct pcistub_device *psdev;
++ struct pci_dev *dev;
++ struct config_field *field;
++
++ psdev = pcistub_device_find(domain, bus, slot, func);
++ if (!psdev || !psdev->dev) {
++ err = -ENODEV;
++ goto out;
++ }
++ dev = psdev->dev;
++
++ field = kzalloc(sizeof(*field), GFP_ATOMIC);
++ if (!field) {
++ err = -ENOMEM;
++ goto out;
++ }
++
++ field->offset = reg;
++ field->size = size;
++ field->mask = mask;
++ field->init = NULL;
++ field->reset = NULL;
++ field->release = NULL;
++ field->clean = pciback_config_field_free;
++
++ err = pciback_config_quirks_add_field(dev, field);
++ if (err)
++ kfree(field);
++ out:
++ return err;
++}
++
++static ssize_t pcistub_slot_add(struct device_driver *drv, const char *buf,
++ size_t count)
++{
++ int domain, bus, slot, func;
++ int err;
++
++ err = str_to_slot(buf, &domain, &bus, &slot, &func);
++ if (err)
++ goto out;
++
++ err = pcistub_device_id_add(domain, bus, slot, func);
++
++ out:
++ if (!err)
++ err = count;
++ return err;
++}
++
++DRIVER_ATTR(new_slot, S_IWUSR, NULL, pcistub_slot_add);
++
++static ssize_t pcistub_slot_remove(struct device_driver *drv, const char *buf,
++ size_t count)
++{
++ int domain, bus, slot, func;
++ int err;
++
++ err = str_to_slot(buf, &domain, &bus, &slot, &func);
++ if (err)
++ goto out;
++
++ err = pcistub_device_id_remove(domain, bus, slot, func);
++
++ out:
++ if (!err)
++ err = count;
++ return err;
++}
++
++DRIVER_ATTR(remove_slot, S_IWUSR, NULL, pcistub_slot_remove);
++
++static ssize_t pcistub_slot_show(struct device_driver *drv, char *buf)
++{
++ struct pcistub_device_id *pci_dev_id;
++ size_t count = 0;
++ unsigned long flags;
++
++ spin_lock_irqsave(&device_ids_lock, flags);
++ list_for_each_entry(pci_dev_id, &pcistub_device_ids, slot_list) {
++ if (count >= PAGE_SIZE)
++ break;
++
++ count += scnprintf(buf + count, PAGE_SIZE - count,
++ "%04x:%02x:%02x.%01x\n",
++ pci_dev_id->domain, pci_dev_id->bus,
++ PCI_SLOT(pci_dev_id->devfn),
++ PCI_FUNC(pci_dev_id->devfn));
++ }
++ spin_unlock_irqrestore(&device_ids_lock, flags);
++
++ return count;
++}
++
++DRIVER_ATTR(slots, S_IRUSR, pcistub_slot_show, NULL);
++
++static ssize_t pcistub_quirk_add(struct device_driver *drv, const char *buf,
++ size_t count)
++{
++ int domain, bus, slot, func, reg, size, mask;
++ int err;
++
++ err = str_to_quirk(buf, &domain, &bus, &slot, &func, &reg, &size,
++ &mask);
++ if (err)
++ goto out;
++
++ err = pcistub_reg_add(domain, bus, slot, func, reg, size, mask);
++
++ out:
++ if (!err)
++ err = count;
++ return err;
++}
++
++static ssize_t pcistub_quirk_show(struct device_driver *drv, char *buf)
++{
++ int count = 0;
++ unsigned long flags;
++ extern struct list_head pciback_quirks;
++ struct pciback_config_quirk *quirk;
++ struct pciback_dev_data *dev_data;
++ struct config_field *field;
++ struct config_field_entry *cfg_entry;
++
++ spin_lock_irqsave(&device_ids_lock, flags);
++ list_for_each_entry(quirk, &pciback_quirks, quirks_list) {
++ if (count >= PAGE_SIZE)
++ goto out;
++
++ count += scnprintf(buf + count, PAGE_SIZE - count,
++ "%02x:%02x.%01x\n\t%04x:%04x:%04x:%04x\n",
++ quirk->pdev->bus->number,
++ PCI_SLOT(quirk->pdev->devfn),
++ PCI_FUNC(quirk->pdev->devfn),
++ quirk->devid.vendor, quirk->devid.device,
++ quirk->devid.subvendor,
++ quirk->devid.subdevice);
++
++ dev_data = pci_get_drvdata(quirk->pdev);
++
++ list_for_each_entry(cfg_entry, &dev_data->config_fields, list) {
++ field = cfg_entry->field;
++ if (count >= PAGE_SIZE)
++ goto out;
++
++ count += scnprintf(buf + count, PAGE_SIZE - count,
++ "\t\t%08x:%01x:%08x\n",
++ cfg_entry->base_offset + field->offset,
++ field->size, field->mask);
++ }
++ }
++
++ out:
++ spin_unlock_irqrestore(&device_ids_lock, flags);
++
++ return count;
++}
++
++DRIVER_ATTR(quirks, S_IRUSR | S_IWUSR, pcistub_quirk_show, pcistub_quirk_add);
++
++static ssize_t permissive_add(struct device_driver *drv, const char *buf,
++ size_t count)
++{
++ int domain, bus, slot, func;
++ int err;
++ struct pcistub_device *psdev;
++ struct pciback_dev_data *dev_data;
++ err = str_to_slot(buf, &domain, &bus, &slot, &func);
++ if (err)
++ goto out;
++ psdev = pcistub_device_find(domain, bus, slot, func);
++ if (!psdev) {
++ err = -ENODEV;
++ goto out;
++ }
++ if (!psdev->dev) {
++ err = -ENODEV;
++ goto release;
++ }
++ dev_data = pci_get_drvdata(psdev->dev);
++ /* the driver data for a device should never be null at this point */
++ if (!dev_data) {
++ err = -ENXIO;
++ goto release;
++ }
++ if (!dev_data->permissive) {
++ dev_data->permissive = 1;
++ /* Let user know that what they're doing could be unsafe */
++ dev_warn(&psdev->dev->dev,
++ "enabling permissive mode configuration space accesses!\n");
++ dev_warn(&psdev->dev->dev,
++ "permissive mode is potentially unsafe!\n");
++ }
++ release:
++ pcistub_device_put(psdev);
++ out:
++ if (!err)
++ err = count;
++ return err;
++}
++
++static ssize_t permissive_show(struct device_driver *drv, char *buf)
++{
++ struct pcistub_device *psdev;
++ struct pciback_dev_data *dev_data;
++ size_t count = 0;
++ unsigned long flags;
++ spin_lock_irqsave(&pcistub_devices_lock, flags);
++ list_for_each_entry(psdev, &pcistub_devices, dev_list) {
++ if (count >= PAGE_SIZE)
++ break;
++ if (!psdev->dev)
++ continue;
++ dev_data = pci_get_drvdata(psdev->dev);
++ if (!dev_data || !dev_data->permissive)
++ continue;
++ count +=
++ scnprintf(buf + count, PAGE_SIZE - count, "%s\n",
++ pci_name(psdev->dev));
++ }
++ spin_unlock_irqrestore(&pcistub_devices_lock, flags);
++ return count;
++}
++
++DRIVER_ATTR(permissive, S_IRUSR | S_IWUSR, permissive_show, permissive_add);
++
++static void pcistub_exit(void)
++{
++ driver_remove_file(&pciback_pci_driver.driver, &driver_attr_new_slot);
++ driver_remove_file(&pciback_pci_driver.driver,
++ &driver_attr_remove_slot);
++ driver_remove_file(&pciback_pci_driver.driver, &driver_attr_slots);
++ driver_remove_file(&pciback_pci_driver.driver, &driver_attr_quirks);
++ driver_remove_file(&pciback_pci_driver.driver, &driver_attr_permissive);
++
++ pci_unregister_driver(&pciback_pci_driver);
++}
++
++static int __init pcistub_init(void)
++{
++ int pos = 0;
++ int err = 0;
++ int domain, bus, slot, func;
++ int parsed;
++
++ if (pci_devs_to_hide && *pci_devs_to_hide) {
++ do {
++ parsed = 0;
++
++ err = sscanf(pci_devs_to_hide + pos,
++ " (%x:%x:%x.%x) %n",
++ &domain, &bus, &slot, &func, &parsed);
++ if (err != 4) {
++ domain = 0;
++ err = sscanf(pci_devs_to_hide + pos,
++ " (%x:%x.%x) %n",
++ &bus, &slot, &func, &parsed);
++ if (err != 3)
++ goto parse_error;
++ }
++
++ err = pcistub_device_id_add(domain, bus, slot, func);
++ if (err)
++ goto out;
++
++ /* if parsed<=0, we've reached the end of the string */
++ pos += parsed;
++ } while (parsed > 0 && pci_devs_to_hide[pos]);
++ }
++
++ /* If we're the first PCI Device Driver to register, we're the
++ * first one to get offered PCI devices as they become
++ * available (and thus we can be the first to grab them)
++ */
++ err = pci_register_driver(&pciback_pci_driver);
++ if (err < 0)
++ goto out;
++
++ err = driver_create_file(&pciback_pci_driver.driver,
++ &driver_attr_new_slot);
++ if (!err)
++ err = driver_create_file(&pciback_pci_driver.driver,
++ &driver_attr_remove_slot);
++ if (!err)
++ err = driver_create_file(&pciback_pci_driver.driver,
++ &driver_attr_slots);
++ if (!err)
++ err = driver_create_file(&pciback_pci_driver.driver,
++ &driver_attr_quirks);
++ if (!err)
++ err = driver_create_file(&pciback_pci_driver.driver,
++ &driver_attr_permissive);
++
++ if (err)
++ pcistub_exit();
++
++ out:
++ return err;
++
++ parse_error:
++ printk(KERN_ERR "pciback: Error parsing pci_devs_to_hide at \"%s\"\n",
++ pci_devs_to_hide + pos);
++ return -EINVAL;
++}
++
++#ifndef MODULE
++/*
++ * fs_initcall happens before device_initcall
++ * so pciback *should* get called first (b/c we
++ * want to suck up any device before other drivers
++ * get a chance by being the first pci device
++ * driver to register)
++ */
++fs_initcall(pcistub_init);
++#endif
++
++static int __init pciback_init(void)
++{
++ int err;
++
++ err = pciback_config_init();
++ if (err)
++ return err;
++
++#ifdef MODULE
++ err = pcistub_init();
++ if (err < 0)
++ return err;
++#endif
++
++ pcistub_init_devices_late();
++ err = pciback_xenbus_register();
++ if (err)
++ pcistub_exit();
++
++ return err;
++}
++
++static void __exit pciback_cleanup(void)
++{
++ pciback_xenbus_unregister();
++ pcistub_exit();
++}
++
++module_init(pciback_init);
++module_exit(pciback_cleanup);
++
++MODULE_LICENSE("Dual BSD/GPL");
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/pciback/pciback.h 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,93 @@
++/*
++ * PCI Backend Common Data Structures & Function Declarations
++ *
++ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
++ */
++#ifndef __XEN_PCIBACK_H__
++#define __XEN_PCIBACK_H__
++
++#include <linux/pci.h>
++#include <linux/interrupt.h>
++#include <xen/xenbus.h>
++#include <linux/list.h>
++#include <linux/spinlock.h>
++#include <linux/workqueue.h>
++#include <asm/atomic.h>
++#include <xen/interface/io/pciif.h>
++
++struct pci_dev_entry {
++ struct list_head list;
++ struct pci_dev *dev;
++};
++
++#define _PDEVF_op_active (0)
++#define PDEVF_op_active (1<<(_PDEVF_op_active))
++
++struct pciback_device {
++ void *pci_dev_data;
++ spinlock_t dev_lock;
++
++ struct xenbus_device *xdev;
++
++ struct xenbus_watch be_watch;
++ u8 be_watching;
++
++ int evtchn_irq;
++
++ struct vm_struct *sh_area;
++ struct xen_pci_sharedinfo *sh_info;
++
++ unsigned long flags;
++
++ struct work_struct op_work;
++};
++
++struct pciback_dev_data {
++ struct list_head config_fields;
++ int permissive;
++ int warned_on_write;
++};
++
++/* Get/Put PCI Devices that are hidden from the PCI Backend Domain */
++struct pci_dev *pcistub_get_pci_dev_by_slot(struct pciback_device *pdev,
++ int domain, int bus,
++ int slot, int func);
++struct pci_dev *pcistub_get_pci_dev(struct pciback_device *pdev,
++ struct pci_dev *dev);
++void pcistub_put_pci_dev(struct pci_dev *dev);
++
++/* Ensure a device is turned off or reset */
++void pciback_reset_device(struct pci_dev *pdev);
++
++/* Access a virtual configuration space for a PCI device */
++int pciback_config_init(void);
++int pciback_config_init_dev(struct pci_dev *dev);
++void pciback_config_free_dyn_fields(struct pci_dev *dev);
++void pciback_config_reset_dev(struct pci_dev *dev);
++void pciback_config_free_dev(struct pci_dev *dev);
++int pciback_config_read(struct pci_dev *dev, int offset, int size,
++ u32 * ret_val);
++int pciback_config_write(struct pci_dev *dev, int offset, int size, u32 value);
++
++/* Handle requests for specific devices from the frontend */
++typedef int (*publish_pci_root_cb) (struct pciback_device * pdev,
++ unsigned int domain, unsigned int bus);
++int pciback_add_pci_dev(struct pciback_device *pdev, struct pci_dev *dev);
++void pciback_release_pci_dev(struct pciback_device *pdev, struct pci_dev *dev);
++struct pci_dev *pciback_get_pci_dev(struct pciback_device *pdev,
++ unsigned int domain, unsigned int bus,
++ unsigned int devfn);
++int pciback_init_devices(struct pciback_device *pdev);
++int pciback_publish_pci_roots(struct pciback_device *pdev,
++ publish_pci_root_cb cb);
++void pciback_release_devices(struct pciback_device *pdev);
++
++/* Handles events from front-end */
++irqreturn_t pciback_handle_event(int irq, void *dev_id, struct pt_regs *regs);
++void pciback_do_op(void *data);
++
++int pciback_xenbus_register(void);
++void pciback_xenbus_unregister(void);
++
++extern int verbose_request;
++#endif
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/pciback/pciback_ops.c 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,95 @@
++/*
++ * PCI Backend Operations - respond to PCI requests from Frontend
++ *
++ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
++ */
++#include <linux/module.h>
++#include <asm/bitops.h>
++#include <xen/evtchn.h>
++#include "pciback.h"
++
++int verbose_request = 0;
++module_param(verbose_request, int, 0644);
++
++/* Ensure a device is "turned off" and ready to be exported.
++ * (Also see pciback_config_reset to ensure virtual configuration space is
++ * ready to be re-exported)
++ */
++void pciback_reset_device(struct pci_dev *dev)
++{
++ u16 cmd;
++
++ /* Disable devices (but not bridges) */
++ if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
++ pci_disable_device(dev);
++
++ pci_write_config_word(dev, PCI_COMMAND, 0);
++
++ dev->is_enabled = 0;
++ dev->is_busmaster = 0;
++ } else {
++ pci_read_config_word(dev, PCI_COMMAND, &cmd);
++ if (cmd & (PCI_COMMAND_INVALIDATE)) {
++ cmd &= ~(PCI_COMMAND_INVALIDATE);
++ pci_write_config_word(dev, PCI_COMMAND, cmd);
++
++ dev->is_busmaster = 0;
++ }
++ }
++}
++
++static inline void test_and_schedule_op(struct pciback_device *pdev)
++{
++ /* Check that frontend is requesting an operation and that we are not
++ * already processing a request */
++ if (test_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags)
++ && !test_and_set_bit(_PDEVF_op_active, &pdev->flags))
++ schedule_work(&pdev->op_work);
++}
++
++/* Performing the configuration space reads/writes must not be done in atomic
++ * context because some of the pci_* functions can sleep (mostly due to ACPI
++ * use of semaphores). This function is intended to be called from a work
++ * queue in process context taking a struct pciback_device as a parameter */
++void pciback_do_op(void *data)
++{
++ struct pciback_device *pdev = data;
++ struct pci_dev *dev;
++ struct xen_pci_op *op = &pdev->sh_info->op;
++
++ dev = pciback_get_pci_dev(pdev, op->domain, op->bus, op->devfn);
++
++ if (dev == NULL)
++ op->err = XEN_PCI_ERR_dev_not_found;
++ else if (op->cmd == XEN_PCI_OP_conf_read)
++ op->err = pciback_config_read(dev, op->offset, op->size,
++ &op->value);
++ else if (op->cmd == XEN_PCI_OP_conf_write)
++ op->err = pciback_config_write(dev, op->offset, op->size,
++ op->value);
++ else
++ op->err = XEN_PCI_ERR_not_implemented;
++
++ /* Tell the driver domain that we're done. */
++ wmb();
++ clear_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags);
++ notify_remote_via_irq(pdev->evtchn_irq);
++
++ /* Mark that we're done. */
++ smp_mb__before_clear_bit(); /* /after/ clearing PCIF_active */
++ clear_bit(_PDEVF_op_active, &pdev->flags);
++ smp_mb__after_clear_bit(); /* /before/ final check for work */
++
++ /* Check to see if the driver domain tried to start another request in
++ * between clearing _XEN_PCIF_active and clearing _PDEVF_op_active. */
++ test_and_schedule_op(pdev);
++}
++
++irqreturn_t pciback_handle_event(int irq, void *dev_id, struct pt_regs *regs)
++{
++ struct pciback_device *pdev = dev_id;
++
++ test_and_schedule_op(pdev);
++
++ return IRQ_HANDLED;
++}
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/pciback/slot.c 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,151 @@
++/*
++ * PCI Backend - Provides a Virtual PCI bus (with real devices)
++ * to the frontend
++ *
++ * Author: Ryan Wilson <hap9@epoch.ncsc.mil> (vpci.c)
++ * Author: Tristan Gingold <tristan.gingold@bull.net>, from vpci.c
++ */
++
++#include <linux/list.h>
++#include <linux/slab.h>
++#include <linux/pci.h>
++#include <linux/spinlock.h>
++#include "pciback.h"
++
++/* There are at most 32 slots in a pci bus. */
++#define PCI_SLOT_MAX 32
++
++#define PCI_BUS_NBR 2
++
++struct slot_dev_data {
++ /* Access to dev_list must be protected by lock */
++ struct pci_dev *slots[PCI_BUS_NBR][PCI_SLOT_MAX];
++ spinlock_t lock;
++};
++
++struct pci_dev *pciback_get_pci_dev(struct pciback_device *pdev,
++ unsigned int domain, unsigned int bus,
++ unsigned int devfn)
++{
++ struct pci_dev *dev = NULL;
++ struct slot_dev_data *slot_dev = pdev->pci_dev_data;
++ unsigned long flags;
++
++ if (domain != 0 || PCI_FUNC(devfn) != 0)
++ return NULL;
++
++ if (PCI_SLOT(devfn) >= PCI_SLOT_MAX || bus >= PCI_BUS_NBR)
++ return NULL;
++
++ spin_lock_irqsave(&slot_dev->lock, flags);
++ dev = slot_dev->slots[bus][PCI_SLOT(devfn)];
++ spin_unlock_irqrestore(&slot_dev->lock, flags);
++
++ return dev;
++}
++
++int pciback_add_pci_dev(struct pciback_device *pdev, struct pci_dev *dev)
++{
++ int err = 0, slot, bus;
++ struct slot_dev_data *slot_dev = pdev->pci_dev_data;
++ unsigned long flags;
++
++ if ((dev->class >> 24) == PCI_BASE_CLASS_BRIDGE) {
++ err = -EFAULT;
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Can't export bridges on the virtual PCI bus");
++ goto out;
++ }
++
++ spin_lock_irqsave(&slot_dev->lock, flags);
++
++ /* Assign to a new slot on the virtual PCI bus */
++ for (bus = 0; bus < PCI_BUS_NBR; bus++)
++ for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
++ if (slot_dev->slots[bus][slot] == NULL) {
++ printk(KERN_INFO
++ "pciback: slot: %s: assign to virtual slot %d, bus %d\n",
++ pci_name(dev), slot, bus);
++ slot_dev->slots[bus][slot] = dev;
++ goto unlock;
++ }
++ }
++
++ err = -ENOMEM;
++ xenbus_dev_fatal(pdev->xdev, err,
++ "No more space on root virtual PCI bus");
++
++ unlock:
++ spin_unlock_irqrestore(&slot_dev->lock, flags);
++ out:
++ return err;
++}
++
++void pciback_release_pci_dev(struct pciback_device *pdev, struct pci_dev *dev)
++{
++ int slot, bus;
++ struct slot_dev_data *slot_dev = pdev->pci_dev_data;
++ struct pci_dev *found_dev = NULL;
++ unsigned long flags;
++
++ spin_lock_irqsave(&slot_dev->lock, flags);
++
++ for (bus = 0; bus < PCI_BUS_NBR; bus++)
++ for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
++ if (slot_dev->slots[bus][slot] == dev) {
++ slot_dev->slots[bus][slot] = NULL;
++ found_dev = dev;
++ goto out;
++ }
++ }
++
++ out:
++ spin_unlock_irqrestore(&slot_dev->lock, flags);
++
++ if (found_dev)
++ pcistub_put_pci_dev(found_dev);
++}
++
++int pciback_init_devices(struct pciback_device *pdev)
++{
++ int slot, bus;
++ struct slot_dev_data *slot_dev;
++
++ slot_dev = kmalloc(sizeof(*slot_dev), GFP_KERNEL);
++ if (!slot_dev)
++ return -ENOMEM;
++
++ spin_lock_init(&slot_dev->lock);
++
++ for (bus = 0; bus < PCI_BUS_NBR; bus++)
++ for (slot = 0; slot < PCI_SLOT_MAX; slot++)
++ slot_dev->slots[bus][slot] = NULL;
++
++ pdev->pci_dev_data = slot_dev;
++
++ return 0;
++}
++
++int pciback_publish_pci_roots(struct pciback_device *pdev,
++ publish_pci_root_cb publish_cb)
++{
++ /* The Virtual PCI bus has only one root */
++ return publish_cb(pdev, 0, 0);
++}
++
++void pciback_release_devices(struct pciback_device *pdev)
++{
++ int slot, bus;
++ struct slot_dev_data *slot_dev = pdev->pci_dev_data;
++ struct pci_dev *dev;
++
++ for (bus = 0; bus < PCI_BUS_NBR; bus++)
++ for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
++ dev = slot_dev->slots[bus][slot];
++ if (dev != NULL)
++ pcistub_put_pci_dev(dev);
++ }
++
++ kfree(slot_dev);
++ pdev->pci_dev_data = NULL;
++}
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/pciback/vpci.c 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,204 @@
++/*
++ * PCI Backend - Provides a Virtual PCI bus (with real devices)
++ * to the frontend
++ *
++ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
++ */
++
++#include <linux/list.h>
++#include <linux/slab.h>
++#include <linux/pci.h>
++#include <linux/spinlock.h>
++#include "pciback.h"
++
++#define PCI_SLOT_MAX 32
++
++struct vpci_dev_data {
++ /* Access to dev_list must be protected by lock */
++ struct list_head dev_list[PCI_SLOT_MAX];
++ spinlock_t lock;
++};
++
++static inline struct list_head *list_first(struct list_head *head)
++{
++ return head->next;
++}
++
++struct pci_dev *pciback_get_pci_dev(struct pciback_device *pdev,
++ unsigned int domain, unsigned int bus,
++ unsigned int devfn)
++{
++ struct pci_dev_entry *entry;
++ struct pci_dev *dev = NULL;
++ struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
++ unsigned long flags;
++
++ if (domain != 0 || bus != 0)
++ return NULL;
++
++ if (PCI_SLOT(devfn) < PCI_SLOT_MAX) {
++ spin_lock_irqsave(&vpci_dev->lock, flags);
++
++ list_for_each_entry(entry,
++ &vpci_dev->dev_list[PCI_SLOT(devfn)],
++ list) {
++ if (PCI_FUNC(entry->dev->devfn) == PCI_FUNC(devfn)) {
++ dev = entry->dev;
++ break;
++ }
++ }
++
++ spin_unlock_irqrestore(&vpci_dev->lock, flags);
++ }
++ return dev;
++}
++
++static inline int match_slot(struct pci_dev *l, struct pci_dev *r)
++{
++ if (pci_domain_nr(l->bus) == pci_domain_nr(r->bus)
++ && l->bus == r->bus && PCI_SLOT(l->devfn) == PCI_SLOT(r->devfn))
++ return 1;
++
++ return 0;
++}
++
++int pciback_add_pci_dev(struct pciback_device *pdev, struct pci_dev *dev)
++{
++ int err = 0, slot;
++ struct pci_dev_entry *t, *dev_entry;
++ struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
++ unsigned long flags;
++
++ if ((dev->class >> 24) == PCI_BASE_CLASS_BRIDGE) {
++ err = -EFAULT;
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Can't export bridges on the virtual PCI bus");
++ goto out;
++ }
++
++ dev_entry = kmalloc(sizeof(*dev_entry), GFP_KERNEL);
++ if (!dev_entry) {
++ err = -ENOMEM;
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Error adding entry to virtual PCI bus");
++ goto out;
++ }
++
++ dev_entry->dev = dev;
++
++ spin_lock_irqsave(&vpci_dev->lock, flags);
++
++ /* Keep multi-function devices together on the virtual PCI bus */
++ for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
++ if (!list_empty(&vpci_dev->dev_list[slot])) {
++ t = list_entry(list_first(&vpci_dev->dev_list[slot]),
++ struct pci_dev_entry, list);
++
++ if (match_slot(dev, t->dev)) {
++ pr_info("pciback: vpci: %s: "
++ "assign to virtual slot %d func %d\n",
++ pci_name(dev), slot,
++ PCI_FUNC(dev->devfn));
++ list_add_tail(&dev_entry->list,
++ &vpci_dev->dev_list[slot]);
++ goto unlock;
++ }
++ }
++ }
++
++ /* Assign to a new slot on the virtual PCI bus */
++ for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
++ if (list_empty(&vpci_dev->dev_list[slot])) {
++ printk(KERN_INFO
++ "pciback: vpci: %s: assign to virtual slot %d\n",
++ pci_name(dev), slot);
++ list_add_tail(&dev_entry->list,
++ &vpci_dev->dev_list[slot]);
++ goto unlock;
++ }
++ }
++
++ err = -ENOMEM;
++ xenbus_dev_fatal(pdev->xdev, err,
++ "No more space on root virtual PCI bus");
++
++ unlock:
++ spin_unlock_irqrestore(&vpci_dev->lock, flags);
++ out:
++ return err;
++}
++
++void pciback_release_pci_dev(struct pciback_device *pdev, struct pci_dev *dev)
++{
++ int slot;
++ struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
++ struct pci_dev *found_dev = NULL;
++ unsigned long flags;
++
++ spin_lock_irqsave(&vpci_dev->lock, flags);
++
++ for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
++ struct pci_dev_entry *e, *tmp;
++ list_for_each_entry_safe(e, tmp, &vpci_dev->dev_list[slot],
++ list) {
++ if (e->dev == dev) {
++ list_del(&e->list);
++ found_dev = e->dev;
++ kfree(e);
++ goto out;
++ }
++ }
++ }
++
++ out:
++ spin_unlock_irqrestore(&vpci_dev->lock, flags);
++
++ if (found_dev)
++ pcistub_put_pci_dev(found_dev);
++}
++
++int pciback_init_devices(struct pciback_device *pdev)
++{
++ int slot;
++ struct vpci_dev_data *vpci_dev;
++
++ vpci_dev = kmalloc(sizeof(*vpci_dev), GFP_KERNEL);
++ if (!vpci_dev)
++ return -ENOMEM;
++
++ spin_lock_init(&vpci_dev->lock);
++
++ for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
++ INIT_LIST_HEAD(&vpci_dev->dev_list[slot]);
++ }
++
++ pdev->pci_dev_data = vpci_dev;
++
++ return 0;
++}
++
++int pciback_publish_pci_roots(struct pciback_device *pdev,
++ publish_pci_root_cb publish_cb)
++{
++ /* The Virtual PCI bus has only one root */
++ return publish_cb(pdev, 0, 0);
++}
++
++void pciback_release_devices(struct pciback_device *pdev)
++{
++ int slot;
++ struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
++
++ for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
++ struct pci_dev_entry *e, *tmp;
++ list_for_each_entry_safe(e, tmp, &vpci_dev->dev_list[slot],
++ list) {
++ list_del(&e->list);
++ pcistub_put_pci_dev(e->dev);
++ kfree(e);
++ }
++ }
++
++ kfree(vpci_dev);
++ pdev->pci_dev_data = NULL;
++}
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/pciback/xenbus.c 2007-08-27 14:02:01.000000000 -0400
+@@ -0,0 +1,454 @@
++/*
++ * PCI Backend Xenbus Setup - handles setup with frontend and xend
++ *
++ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
++ */
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/list.h>
++#include <linux/vmalloc.h>
++#include <xen/xenbus.h>
++#include <xen/evtchn.h>
++#include "pciback.h"
++
++#define INVALID_EVTCHN_IRQ (-1)
++
++static struct pciback_device *alloc_pdev(struct xenbus_device *xdev)
++{
++ struct pciback_device *pdev;
++
++ pdev = kzalloc(sizeof(struct pciback_device), GFP_KERNEL);
++ if (pdev == NULL)
++ goto out;
++ dev_dbg(&xdev->dev, "allocated pdev @ 0x%p\n", pdev);
++
++ pdev->xdev = xdev;
++ xdev->dev.driver_data = pdev;
++
++ spin_lock_init(&pdev->dev_lock);
++
++ pdev->sh_area = NULL;
++ pdev->sh_info = NULL;
++ pdev->evtchn_irq = INVALID_EVTCHN_IRQ;
++ pdev->be_watching = 0;
++
++ INIT_WORK(&pdev->op_work, pciback_do_op, pdev);
++
++ if (pciback_init_devices(pdev)) {
++ kfree(pdev);
++ pdev = NULL;
++ }
++ out:
++ return pdev;
++}
++
++static void free_pdev(struct pciback_device *pdev)
++{
++ if (pdev->be_watching)
++ unregister_xenbus_watch(&pdev->be_watch);
++
++ /* Ensure the guest can't trigger our handler before removing devices */
++ if (pdev->evtchn_irq != INVALID_EVTCHN_IRQ)
++ unbind_from_irqhandler(pdev->evtchn_irq, pdev);
++
++ /* If the driver domain started an op, make sure we complete it or
++ * delete it before releasing the shared memory */
++ cancel_delayed_work(&pdev->op_work);
++ flush_scheduled_work();
++
++ if (pdev->sh_info)
++ xenbus_unmap_ring_vfree(pdev->xdev, pdev->sh_area);
++
++ pciback_release_devices(pdev);
++
++ pdev->xdev->dev.driver_data = NULL;
++ pdev->xdev = NULL;
++
++ kfree(pdev);
++}
++
++static int pciback_do_attach(struct pciback_device *pdev, int gnt_ref,
++ int remote_evtchn)
++{
++ int err = 0;
++ struct vm_struct *area;
++
++ dev_dbg(&pdev->xdev->dev,
++ "Attaching to frontend resources - gnt_ref=%d evtchn=%d\n",
++ gnt_ref, remote_evtchn);
++
++ area = xenbus_map_ring_valloc(pdev->xdev, gnt_ref);
++ if (IS_ERR(area)) {
++ err = PTR_ERR(area);
++ goto out;
++ }
++ pdev->sh_area = area;
++ pdev->sh_info = area->addr;
++
++ err = bind_interdomain_evtchn_to_irqhandler(
++ pdev->xdev->otherend_id, remote_evtchn, pciback_handle_event,
++ SA_SAMPLE_RANDOM, "pciback", pdev);
++ if (err < 0) {
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Error binding event channel to IRQ");
++ goto out;
++ }
++ pdev->evtchn_irq = err;
++ err = 0;
++
++ dev_dbg(&pdev->xdev->dev, "Attached!\n");
++ out:
++ return err;
++}
++
++static int pciback_attach(struct pciback_device *pdev)
++{
++ int err = 0;
++ int gnt_ref, remote_evtchn;
++ char *magic = NULL;
++
++ spin_lock(&pdev->dev_lock);
++
++ /* Make sure we only do this setup once */
++ if (xenbus_read_driver_state(pdev->xdev->nodename) !=
++ XenbusStateInitialised)
++ goto out;
++
++ /* Wait for frontend to state that it has published the configuration */
++ if (xenbus_read_driver_state(pdev->xdev->otherend) !=
++ XenbusStateInitialised)
++ goto out;
++
++ dev_dbg(&pdev->xdev->dev, "Reading frontend config\n");
++
++ err = xenbus_gather(XBT_NIL, pdev->xdev->otherend,
++ "pci-op-ref", "%u", &gnt_ref,
++ "event-channel", "%u", &remote_evtchn,
++ "magic", NULL, &magic, NULL);
++ if (err) {
++ /* If configuration didn't get read correctly, wait longer */
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Error reading configuration from frontend");
++ goto out;
++ }
++
++ if (magic == NULL || strcmp(magic, XEN_PCI_MAGIC) != 0) {
++ xenbus_dev_fatal(pdev->xdev, -EFAULT,
++ "version mismatch (%s/%s) with pcifront - "
++ "halting pciback",
++ magic, XEN_PCI_MAGIC);
++ goto out;
++ }
++
++ err = pciback_do_attach(pdev, gnt_ref, remote_evtchn);
++ if (err)
++ goto out;
++
++ dev_dbg(&pdev->xdev->dev, "Connecting...\n");
++
++ err = xenbus_switch_state(pdev->xdev, XenbusStateConnected);
++ if (err)
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Error switching to connected state!");
++
++ dev_dbg(&pdev->xdev->dev, "Connected? %d\n", err);
++ out:
++ spin_unlock(&pdev->dev_lock);
++
++ if (magic)
++ kfree(magic);
++
++ return err;
++}
++
++static void pciback_frontend_changed(struct xenbus_device *xdev,
++ enum xenbus_state fe_state)
++{
++ struct pciback_device *pdev = xdev->dev.driver_data;
++
++ dev_dbg(&xdev->dev, "fe state changed %d\n", fe_state);
++
++ switch (fe_state) {
++ case XenbusStateInitialised:
++ pciback_attach(pdev);
++ break;
++
++ case XenbusStateClosing:
++ xenbus_switch_state(xdev, XenbusStateClosing);
++ break;
++
++ case XenbusStateUnknown:
++ case XenbusStateClosed:
++ dev_dbg(&xdev->dev, "frontend is gone! unregister device\n");
++ device_unregister(&xdev->dev);
++ break;
++
++ default:
++ break;
++ }
++}
++
++static int pciback_publish_pci_root(struct pciback_device *pdev,
++ unsigned int domain, unsigned int bus)
++{
++ unsigned int d, b;
++ int i, root_num, len, err;
++ char str[64];
++
++ dev_dbg(&pdev->xdev->dev, "Publishing pci roots\n");
++
++ err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename,
++ "root_num", "%d", &root_num);
++ if (err == 0 || err == -ENOENT)
++ root_num = 0;
++ else if (err < 0)
++ goto out;
++
++ /* Verify that we haven't already published this pci root */
++ for (i = 0; i < root_num; i++) {
++ len = snprintf(str, sizeof(str), "root-%d", i);
++ if (unlikely(len >= (sizeof(str) - 1))) {
++ err = -ENOMEM;
++ goto out;
++ }
++
++ err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename,
++ str, "%x:%x", &d, &b);
++ if (err < 0)
++ goto out;
++ if (err != 2) {
++ err = -EINVAL;
++ goto out;
++ }
++
++ if (d == domain && b == bus) {
++ err = 0;
++ goto out;
++ }
++ }
++
++ len = snprintf(str, sizeof(str), "root-%d", root_num);
++ if (unlikely(len >= (sizeof(str) - 1))) {
++ err = -ENOMEM;
++ goto out;
++ }
++
++ dev_dbg(&pdev->xdev->dev, "writing root %d at %04x:%02x\n",
++ root_num, domain, bus);
++
++ err = xenbus_printf(XBT_NIL, pdev->xdev->nodename, str,
++ "%04x:%02x", domain, bus);
++ if (err)
++ goto out;
++
++ err = xenbus_printf(XBT_NIL, pdev->xdev->nodename,
++ "root_num", "%d", (root_num + 1));
++
++ out:
++ return err;
++}
++
++static int pciback_export_device(struct pciback_device *pdev,
++ int domain, int bus, int slot, int func)
++{
++ struct pci_dev *dev;
++ int err = 0;
++
++ dev_dbg(&pdev->xdev->dev, "exporting dom %x bus %x slot %x func %x\n",
++ domain, bus, slot, func);
++
++ dev = pcistub_get_pci_dev_by_slot(pdev, domain, bus, slot, func);
++ if (!dev) {
++ err = -EINVAL;
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Couldn't locate PCI device "
++ "(%04x:%02x:%02x.%01x)! "
++ "perhaps already in-use?",
++ domain, bus, slot, func);
++ goto out;
++ }
++
++ err = pciback_add_pci_dev(pdev, dev);
++ if (err)
++ goto out;
++
++ /* TODO: It'd be nice to export a bridge and have all of its children
++ * get exported with it. This may be best done in xend (which will
++ * have to calculate resource usage anyway) but we probably want to
++ * put something in here to ensure that if a bridge gets given to a
++ * driver domain, that all devices under that bridge are not given
++ * to other driver domains (as he who controls the bridge can disable
++ * it and stop the other devices from working).
++ */
++ out:
++ return err;
++}
++
++static int pciback_setup_backend(struct pciback_device *pdev)
++{
++ /* Get configuration from xend (if available now) */
++ int domain, bus, slot, func;
++ int err = 0;
++ int i, num_devs;
++ char dev_str[64];
++
++ spin_lock(&pdev->dev_lock);
++
++ /* It's possible we could get the call to setup twice, so make sure
++ * we're not already connected.
++ */
++ if (xenbus_read_driver_state(pdev->xdev->nodename) !=
++ XenbusStateInitWait)
++ goto out;
++
++ dev_dbg(&pdev->xdev->dev, "getting be setup\n");
++
++ err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename, "num_devs", "%d",
++ &num_devs);
++ if (err != 1) {
++ if (err >= 0)
++ err = -EINVAL;
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Error reading number of devices");
++ goto out;
++ }
++
++ for (i = 0; i < num_devs; i++) {
++ int l = snprintf(dev_str, sizeof(dev_str), "dev-%d", i);
++ if (unlikely(l >= (sizeof(dev_str) - 1))) {
++ err = -ENOMEM;
++ xenbus_dev_fatal(pdev->xdev, err,
++ "String overflow while reading "
++ "configuration");
++ goto out;
++ }
++
++ err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename, dev_str,
++ "%x:%x:%x.%x", &domain, &bus, &slot, &func);
++ if (err < 0) {
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Error reading device configuration");
++ goto out;
++ }
++ if (err != 4) {
++ err = -EINVAL;
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Error parsing pci device "
++ "configuration");
++ goto out;
++ }
++
++ err = pciback_export_device(pdev, domain, bus, slot, func);
++ if (err)
++ goto out;
++ }
++
++ err = pciback_publish_pci_roots(pdev, pciback_publish_pci_root);
++ if (err) {
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Error while publish PCI root buses "
++ "for frontend");
++ goto out;
++ }
++
++ err = xenbus_switch_state(pdev->xdev, XenbusStateInitialised);
++ if (err)
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Error switching to initialised state!");
++
++ out:
++ spin_unlock(&pdev->dev_lock);
++
++ if (!err)
++ /* see if pcifront is already configured (if not, we'll wait) */
++ pciback_attach(pdev);
++
++ return err;
++}
++
++static void pciback_be_watch(struct xenbus_watch *watch,
++ const char **vec, unsigned int len)
++{
++ struct pciback_device *pdev =
++ container_of(watch, struct pciback_device, be_watch);
++
++ switch (xenbus_read_driver_state(pdev->xdev->nodename)) {
++ case XenbusStateInitWait:
++ pciback_setup_backend(pdev);
++ break;
++
++ default:
++ break;
++ }
++}
++
++static int pciback_xenbus_probe(struct xenbus_device *dev,
++ const struct xenbus_device_id *id)
++{
++ int err = 0;
++ struct pciback_device *pdev = alloc_pdev(dev);
++
++ if (pdev == NULL) {
++ err = -ENOMEM;
++ xenbus_dev_fatal(dev, err,
++ "Error allocating pciback_device struct");
++ goto out;
++ }
++
++ /* wait for xend to configure us */
++ err = xenbus_switch_state(dev, XenbusStateInitWait);
++ if (err)
++ goto out;
++
++ /* watch the backend node for backend configuration information */
++ err = xenbus_watch_path(dev, dev->nodename, &pdev->be_watch,
++ pciback_be_watch);
++ if (err)
++ goto out;
++ pdev->be_watching = 1;
++
++ /* We need to force a call to our callback here in case
++ * xend already configured us!
++ */
++ pciback_be_watch(&pdev->be_watch, NULL, 0);
++
++ out:
++ return err;
++}
++
++static int pciback_xenbus_remove(struct xenbus_device *dev)
++{
++ struct pciback_device *pdev = dev->dev.driver_data;
++
++ if (pdev != NULL)
++ free_pdev(pdev);
++
++ return 0;
++}
++
++static struct xenbus_device_id xenpci_ids[] = {
++ {"pci"},
++ {{0}},
++};
++
++static struct xenbus_driver xenbus_pciback_driver = {
++ .name = "pciback",
++ .owner = THIS_MODULE,
++ .ids = xenpci_ids,
++ .probe = pciback_xenbus_probe,
++ .remove = pciback_xenbus_remove,
++ .otherend_changed = pciback_frontend_changed,
++};
++
++int __init pciback_xenbus_register(void)
++{
++ if (!is_running_on_xen())
++ return -ENODEV;
++
++ return xenbus_register_backend(&xenbus_pciback_driver);
++}
++
++void __exit pciback_xenbus_unregister(void)
++{
++ xenbus_unregister_driver(&xenbus_pciback_driver);
++}
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/pcifront/Makefile 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,7 @@
++obj-y += pcifront.o
++
++pcifront-y := pci_op.o xenbus.o pci.o
++
++ifeq ($(CONFIG_XEN_PCIDEV_FE_DEBUG),y)
++EXTRA_CFLAGS += -DDEBUG
++endif
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/pcifront/pci.c 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,46 @@
++/*
++ * PCI Frontend Operations - ensure only one PCI frontend runs at a time
++ *
++ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
++ */
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/pci.h>
++#include <linux/spinlock.h>
++#include "pcifront.h"
++
++DEFINE_SPINLOCK(pcifront_dev_lock);
++static struct pcifront_device *pcifront_dev = NULL;
++
++int pcifront_connect(struct pcifront_device *pdev)
++{
++ int err = 0;
++
++ spin_lock(&pcifront_dev_lock);
++
++ if (!pcifront_dev) {
++ dev_info(&pdev->xdev->dev, "Installing PCI frontend\n");
++ pcifront_dev = pdev;
++ }
++ else {
++ dev_err(&pdev->xdev->dev, "PCI frontend already installed!\n");
++ err = -EEXIST;
++ }
++
++ spin_unlock(&pcifront_dev_lock);
++
++ return err;
++}
++
++void pcifront_disconnect(struct pcifront_device *pdev)
++{
++ spin_lock(&pcifront_dev_lock);
++
++ if (pdev == pcifront_dev) {
++ dev_info(&pdev->xdev->dev,
++ "Disconnecting PCI Frontend Buses\n");
++ pcifront_dev = NULL;
++ }
++
++ spin_unlock(&pcifront_dev_lock);
++}
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/pcifront/pci_op.c 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,268 @@
++/*
++ * PCI Frontend Operations - Communicates with frontend
++ *
++ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
++ */
++#include <linux/module.h>
++#include <linux/version.h>
++#include <linux/init.h>
++#include <linux/pci.h>
++#include <linux/spinlock.h>
++#include <linux/time.h>
++#include <xen/evtchn.h>
++#include "pcifront.h"
++
++static int verbose_request = 0;
++module_param(verbose_request, int, 0644);
++
++static int errno_to_pcibios_err(int errno)
++{
++ switch (errno) {
++ case XEN_PCI_ERR_success:
++ return PCIBIOS_SUCCESSFUL;
++
++ case XEN_PCI_ERR_dev_not_found:
++ return PCIBIOS_DEVICE_NOT_FOUND;
++
++ case XEN_PCI_ERR_invalid_offset:
++ case XEN_PCI_ERR_op_failed:
++ return PCIBIOS_BAD_REGISTER_NUMBER;
++
++ case XEN_PCI_ERR_not_implemented:
++ return PCIBIOS_FUNC_NOT_SUPPORTED;
++
++ case XEN_PCI_ERR_access_denied:
++ return PCIBIOS_SET_FAILED;
++ }
++ return errno;
++}
++
++static int do_pci_op(struct pcifront_device *pdev, struct xen_pci_op *op)
++{
++ int err = 0;
++ struct xen_pci_op *active_op = &pdev->sh_info->op;
++ unsigned long irq_flags;
++ evtchn_port_t port = pdev->evtchn;
++ s64 ns, ns_timeout;
++ struct timeval tv;
++
++ spin_lock_irqsave(&pdev->sh_info_lock, irq_flags);
++
++ memcpy(active_op, op, sizeof(struct xen_pci_op));
++
++ /* Go */
++ wmb();
++ set_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags);
++ notify_remote_via_evtchn(port);
++
++ /*
++ * We set a poll timeout of 3 seconds but give up on return after
++ * 2 seconds. It is better to time out too late rather than too early
++ * (in the latter case we end up continually re-executing poll() with a
++ * timeout in the past). 1s difference gives plenty of slack for error.
++ */
++ do_gettimeofday(&tv);
++ ns_timeout = timeval_to_ns(&tv) + 2 * (s64)NSEC_PER_SEC;
++
++ clear_evtchn(port);
++
++ while (test_bit(_XEN_PCIF_active,
++ (unsigned long *)&pdev->sh_info->flags)) {
++ if (HYPERVISOR_poll(&port, 1, jiffies + 3*HZ))
++ BUG();
++ clear_evtchn(port);
++ do_gettimeofday(&tv);
++ ns = timeval_to_ns(&tv);
++ if (ns > ns_timeout) {
++ dev_err(&pdev->xdev->dev,
++ "pciback not responding!!!\n");
++ clear_bit(_XEN_PCIF_active,
++ (unsigned long *)&pdev->sh_info->flags);
++ err = XEN_PCI_ERR_dev_not_found;
++ goto out;
++ }
++ }
++
++ memcpy(op, active_op, sizeof(struct xen_pci_op));
++
++ err = op->err;
++ out:
++ spin_unlock_irqrestore(&pdev->sh_info_lock, irq_flags);
++ return err;
++}
++
++/* Access to this function is spinlocked in drivers/pci/access.c */
++static int pcifront_bus_read(struct pci_bus *bus, unsigned int devfn,
++ int where, int size, u32 * val)
++{
++ int err = 0;
++ struct xen_pci_op op = {
++ .cmd = XEN_PCI_OP_conf_read,
++ .domain = pci_domain_nr(bus),
++ .bus = bus->number,
++ .devfn = devfn,
++ .offset = where,
++ .size = size,
++ };
++ struct pcifront_sd *sd = bus->sysdata;
++ struct pcifront_device *pdev = pcifront_get_pdev(sd);
++
++ if (verbose_request)
++ dev_info(&pdev->xdev->dev,
++ "read dev=%04x:%02x:%02x.%01x - offset %x size %d\n",
++ pci_domain_nr(bus), bus->number, PCI_SLOT(devfn),
++ PCI_FUNC(devfn), where, size);
++
++ err = do_pci_op(pdev, &op);
++
++ if (likely(!err)) {
++ if (verbose_request)
++ dev_info(&pdev->xdev->dev, "read got back value %x\n",
++ op.value);
++
++ *val = op.value;
++ } else if (err == -ENODEV) {
++ /* No device here, pretend that it just returned 0 */
++ err = 0;
++ *val = 0;
++ }
++
++ return errno_to_pcibios_err(err);
++}
++
++/* Access to this function is spinlocked in drivers/pci/access.c */
++static int pcifront_bus_write(struct pci_bus *bus, unsigned int devfn,
++ int where, int size, u32 val)
++{
++ struct xen_pci_op op = {
++ .cmd = XEN_PCI_OP_conf_write,
++ .domain = pci_domain_nr(bus),
++ .bus = bus->number,
++ .devfn = devfn,
++ .offset = where,
++ .size = size,
++ .value = val,
++ };
++ struct pcifront_sd *sd = bus->sysdata;
++ struct pcifront_device *pdev = pcifront_get_pdev(sd);
++
++ if (verbose_request)
++ dev_info(&pdev->xdev->dev,
++ "write dev=%04x:%02x:%02x.%01x - "
++ "offset %x size %d val %x\n",
++ pci_domain_nr(bus), bus->number,
++ PCI_SLOT(devfn), PCI_FUNC(devfn), where, size, val);
++
++ return errno_to_pcibios_err(do_pci_op(pdev, &op));
++}
++
++struct pci_ops pcifront_bus_ops = {
++ .read = pcifront_bus_read,
++ .write = pcifront_bus_write,
++};
++
++/* Claim resources for the PCI frontend as-is, backend won't allow changes */
++static void pcifront_claim_resource(struct pci_dev *dev, void *data)
++{
++ struct pcifront_device *pdev = data;
++ int i;
++ struct resource *r;
++
++ for (i = 0; i < PCI_NUM_RESOURCES; i++) {
++ r = &dev->resource[i];
++
++ if (!r->parent && r->start && r->flags) {
++ dev_dbg(&pdev->xdev->dev, "claiming resource %s/%d\n",
++ pci_name(dev), i);
++ pci_claim_resource(dev, i);
++ }
++ }
++}
++
++int pcifront_scan_root(struct pcifront_device *pdev,
++ unsigned int domain, unsigned int bus)
++{
++ struct pci_bus *b;
++ struct pcifront_sd *sd = NULL;
++ struct pci_bus_entry *bus_entry = NULL;
++ int err = 0;
++
++#ifndef CONFIG_PCI_DOMAINS
++ if (domain != 0) {
++ dev_err(&pdev->xdev->dev,
++ "PCI Root in non-zero PCI Domain! domain=%d\n", domain);
++ dev_err(&pdev->xdev->dev,
++ "Please compile with CONFIG_PCI_DOMAINS\n");
++ err = -EINVAL;
++ goto err_out;
++ }
++#endif
++
++ dev_info(&pdev->xdev->dev, "Creating PCI Frontend Bus %04x:%02x\n",
++ domain, bus);
++
++ bus_entry = kmalloc(sizeof(*bus_entry), GFP_KERNEL);
++ sd = kmalloc(sizeof(*sd), GFP_KERNEL);
++ if (!bus_entry || !sd) {
++ err = -ENOMEM;
++ goto err_out;
++ }
++ pcifront_init_sd(sd, domain, pdev);
++
++ b = pci_scan_bus_parented(&pdev->xdev->dev, bus,
++ &pcifront_bus_ops, sd);
++ if (!b) {
++ dev_err(&pdev->xdev->dev,
++ "Error creating PCI Frontend Bus!\n");
++ err = -ENOMEM;
++ goto err_out;
++ }
++ bus_entry->bus = b;
++
++ list_add(&bus_entry->list, &pdev->root_buses);
++
++ /* Claim resources before going "live" with our devices */
++ pci_walk_bus(b, pcifront_claim_resource, pdev);
++
++ pci_bus_add_devices(b);
++
++ return 0;
++
++ err_out:
++ kfree(bus_entry);
++ kfree(sd);
++
++ return err;
++}
++
++static void free_root_bus_devs(struct pci_bus *bus)
++{
++ struct pci_dev *dev;
++
++ while (!list_empty(&bus->devices)) {
++ dev = container_of(bus->devices.next, struct pci_dev,
++ bus_list);
++ dev_dbg(&dev->dev, "removing device\n");
++ pci_remove_bus_device(dev);
++ }
++}
++
++void pcifront_free_roots(struct pcifront_device *pdev)
++{
++ struct pci_bus_entry *bus_entry, *t;
++
++ dev_dbg(&pdev->xdev->dev, "cleaning up root buses\n");
++
++ list_for_each_entry_safe(bus_entry, t, &pdev->root_buses, list) {
++ list_del(&bus_entry->list);
++
++ free_root_bus_devs(bus_entry->bus);
++
++ kfree(bus_entry->bus->sysdata);
++
++ device_unregister(bus_entry->bus->bridge);
++ pci_remove_bus(bus_entry->bus);
++
++ kfree(bus_entry);
++ }
++}
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/pcifront/pcifront.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,40 @@
++/*
++ * PCI Frontend - Common data structures & function declarations
++ *
++ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
++ */
++#ifndef __XEN_PCIFRONT_H__
++#define __XEN_PCIFRONT_H__
++
++#include <linux/spinlock.h>
++#include <linux/pci.h>
++#include <xen/xenbus.h>
++#include <xen/interface/io/pciif.h>
++#include <xen/pcifront.h>
++
++struct pci_bus_entry {
++ struct list_head list;
++ struct pci_bus *bus;
++};
++
++struct pcifront_device {
++ struct xenbus_device *xdev;
++ struct list_head root_buses;
++ spinlock_t dev_lock;
++
++ int evtchn;
++ int gnt_ref;
++
++ /* Lock this when doing any operations in sh_info */
++ spinlock_t sh_info_lock;
++ struct xen_pci_sharedinfo *sh_info;
++};
++
++int pcifront_connect(struct pcifront_device *pdev);
++void pcifront_disconnect(struct pcifront_device *pdev);
++
++int pcifront_scan_root(struct pcifront_device *pdev,
++ unsigned int domain, unsigned int bus);
++void pcifront_free_roots(struct pcifront_device *pdev);
++
++#endif /* __XEN_PCIFRONT_H__ */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/pcifront/xenbus.c 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,295 @@
++/*
++ * PCI Frontend Xenbus Setup - handles setup with backend (imports page/evtchn)
++ *
++ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
++ */
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/mm.h>
++#include <xen/xenbus.h>
++#include <xen/gnttab.h>
++#include "pcifront.h"
++
++#define INVALID_GRANT_REF (0)
++#define INVALID_EVTCHN (-1)
++
++static struct pcifront_device *alloc_pdev(struct xenbus_device *xdev)
++{
++ struct pcifront_device *pdev;
++
++ pdev = kmalloc(sizeof(struct pcifront_device), GFP_KERNEL);
++ if (pdev == NULL)
++ goto out;
++
++ pdev->sh_info =
++ (struct xen_pci_sharedinfo *)__get_free_page(GFP_KERNEL);
++ if (pdev->sh_info == NULL) {
++ kfree(pdev);
++ pdev = NULL;
++ goto out;
++ }
++ pdev->sh_info->flags = 0;
++
++ xdev->dev.driver_data = pdev;
++ pdev->xdev = xdev;
++
++ INIT_LIST_HEAD(&pdev->root_buses);
++
++ spin_lock_init(&pdev->dev_lock);
++ spin_lock_init(&pdev->sh_info_lock);
++
++ pdev->evtchn = INVALID_EVTCHN;
++ pdev->gnt_ref = INVALID_GRANT_REF;
++
++ dev_dbg(&xdev->dev, "Allocated pdev @ 0x%p pdev->sh_info @ 0x%p\n",
++ pdev, pdev->sh_info);
++ out:
++ return pdev;
++}
++
++static void free_pdev(struct pcifront_device *pdev)
++{
++ dev_dbg(&pdev->xdev->dev, "freeing pdev @ 0x%p\n", pdev);
++
++ pcifront_free_roots(pdev);
++
++ if (pdev->evtchn != INVALID_EVTCHN)
++ xenbus_free_evtchn(pdev->xdev, pdev->evtchn);
++
++ if (pdev->gnt_ref != INVALID_GRANT_REF)
++ gnttab_end_foreign_access(pdev->gnt_ref, 0,
++ (unsigned long)pdev->sh_info);
++
++ pdev->xdev->dev.driver_data = NULL;
++
++ kfree(pdev);
++}
++
++static int pcifront_publish_info(struct pcifront_device *pdev)
++{
++ int err = 0;
++ struct xenbus_transaction trans;
++
++ err = xenbus_grant_ring(pdev->xdev, virt_to_mfn(pdev->sh_info));
++ if (err < 0)
++ goto out;
++
++ pdev->gnt_ref = err;
++
++ err = xenbus_alloc_evtchn(pdev->xdev, &pdev->evtchn);
++ if (err)
++ goto out;
++
++ do_publish:
++ err = xenbus_transaction_start(&trans);
++ if (err) {
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Error writing configuration for backend "
++ "(start transaction)");
++ goto out;
++ }
++
++ err = xenbus_printf(trans, pdev->xdev->nodename,
++ "pci-op-ref", "%u", pdev->gnt_ref);
++ if (!err)
++ err = xenbus_printf(trans, pdev->xdev->nodename,
++ "event-channel", "%u", pdev->evtchn);
++ if (!err)
++ err = xenbus_printf(trans, pdev->xdev->nodename,
++ "magic", XEN_PCI_MAGIC);
++
++ if (err) {
++ xenbus_transaction_end(trans, 1);
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Error writing configuration for backend");
++ goto out;
++ } else {
++ err = xenbus_transaction_end(trans, 0);
++ if (err == -EAGAIN)
++ goto do_publish;
++ else if (err) {
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Error completing transaction "
++ "for backend");
++ goto out;
++ }
++ }
++
++ xenbus_switch_state(pdev->xdev, XenbusStateInitialised);
++
++ dev_dbg(&pdev->xdev->dev, "publishing successful!\n");
++
++ out:
++ return err;
++}
++
++static int pcifront_try_connect(struct pcifront_device *pdev)
++{
++ int err = -EFAULT;
++ int i, num_roots, len;
++ char str[64];
++ unsigned int domain, bus;
++
++ spin_lock(&pdev->dev_lock);
++
++ /* Only connect once */
++ if (xenbus_read_driver_state(pdev->xdev->nodename) !=
++ XenbusStateInitialised)
++ goto out;
++
++ err = pcifront_connect(pdev);
++ if (err) {
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Error connecting PCI Frontend");
++ goto out;
++ }
++
++ err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend,
++ "root_num", "%d", &num_roots);
++ if (err == -ENOENT) {
++ xenbus_dev_error(pdev->xdev, err,
++ "No PCI Roots found, trying 0000:00");
++ err = pcifront_scan_root(pdev, 0, 0);
++ num_roots = 0;
++ } else if (err != 1) {
++ if (err == 0)
++ err = -EINVAL;
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Error reading number of PCI roots");
++ goto out;
++ }
++
++ for (i = 0; i < num_roots; i++) {
++ len = snprintf(str, sizeof(str), "root-%d", i);
++ if (unlikely(len >= (sizeof(str) - 1))) {
++ err = -ENOMEM;
++ goto out;
++ }
++
++ err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, str,
++ "%x:%x", &domain, &bus);
++ if (err != 2) {
++ if (err >= 0)
++ err = -EINVAL;
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Error reading PCI root %d", i);
++ goto out;
++ }
++
++ err = pcifront_scan_root(pdev, domain, bus);
++ if (err) {
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Error scanning PCI root %04x:%02x",
++ domain, bus);
++ goto out;
++ }
++ }
++
++ err = xenbus_switch_state(pdev->xdev, XenbusStateConnected);
++ if (err)
++ goto out;
++
++ out:
++ spin_unlock(&pdev->dev_lock);
++ return err;
++}
++
++static int pcifront_try_disconnect(struct pcifront_device *pdev)
++{
++ int err = 0;
++ enum xenbus_state prev_state;
++
++ spin_lock(&pdev->dev_lock);
++
++ prev_state = xenbus_read_driver_state(pdev->xdev->nodename);
++
++ if (prev_state < XenbusStateClosing)
++ err = xenbus_switch_state(pdev->xdev, XenbusStateClosing);
++
++ if (!err && prev_state == XenbusStateConnected)
++ pcifront_disconnect(pdev);
++
++ spin_unlock(&pdev->dev_lock);
++
++ return err;
++}
++
++static void pcifront_backend_changed(struct xenbus_device *xdev,
++ enum xenbus_state be_state)
++{
++ struct pcifront_device *pdev = xdev->dev.driver_data;
++
++ switch (be_state) {
++ case XenbusStateClosing:
++ dev_warn(&xdev->dev, "backend going away!\n");
++ pcifront_try_disconnect(pdev);
++ break;
++
++ case XenbusStateUnknown:
++ case XenbusStateClosed:
++ dev_warn(&xdev->dev, "backend went away!\n");
++ pcifront_try_disconnect(pdev);
++
++ device_unregister(&pdev->xdev->dev);
++ break;
++
++ case XenbusStateConnected:
++ pcifront_try_connect(pdev);
++ break;
++
++ default:
++ break;
++ }
++}
++
++static int pcifront_xenbus_probe(struct xenbus_device *xdev,
++ const struct xenbus_device_id *id)
++{
++ int err = 0;
++ struct pcifront_device *pdev = alloc_pdev(xdev);
++
++ if (pdev == NULL) {
++ err = -ENOMEM;
++ xenbus_dev_fatal(xdev, err,
++ "Error allocating pcifront_device struct");
++ goto out;
++ }
++
++ err = pcifront_publish_info(pdev);
++
++ out:
++ return err;
++}
++
++static int pcifront_xenbus_remove(struct xenbus_device *xdev)
++{
++ if (xdev->dev.driver_data)
++ free_pdev(xdev->dev.driver_data);
++
++ return 0;
++}
++
++static struct xenbus_device_id xenpci_ids[] = {
++ {"pci"},
++ {{0}},
++};
++
++static struct xenbus_driver xenbus_pcifront_driver = {
++ .name = "pcifront",
++ .owner = THIS_MODULE,
++ .ids = xenpci_ids,
++ .probe = pcifront_xenbus_probe,
++ .remove = pcifront_xenbus_remove,
++ .otherend_changed = pcifront_backend_changed,
++};
++
++static int __init pcifront_init(void)
++{
++ if (!is_running_on_xen())
++ return -ENODEV;
++
++ return xenbus_register_frontend(&xenbus_pcifront_driver);
++}
++
++/* Initialize after the Xen PCI Frontend Stub is initialized */
++subsys_initcall(pcifront_init);
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/privcmd/Makefile 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,2 @@
++
++obj-$(CONFIG_XEN_PRIVCMD) := privcmd.o
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/privcmd/privcmd.c 2007-08-27 14:02:05.000000000 -0400
+@@ -0,0 +1,284 @@
++/******************************************************************************
++ * privcmd.c
++ *
++ * Interface to privileged domain-0 commands.
++ *
++ * Copyright (c) 2002-2004, K A Fraser, B Dragovic
++ */
++
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/slab.h>
++#include <linux/string.h>
++#include <linux/errno.h>
++#include <linux/mm.h>
++#include <linux/mman.h>
++#include <linux/swap.h>
++#include <linux/smp_lock.h>
++#include <linux/highmem.h>
++#include <linux/pagemap.h>
++#include <linux/seq_file.h>
++#include <linux/kthread.h>
++#include <asm/hypervisor.h>
++
++#include <asm/pgalloc.h>
++#include <asm/pgtable.h>
++#include <asm/uaccess.h>
++#include <asm/tlb.h>
++#include <asm/hypervisor.h>
++#include <xen/public/privcmd.h>
++#include <xen/interface/xen.h>
++#include <xen/xen_proc.h>
++
++static struct proc_dir_entry *privcmd_intf;
++static struct proc_dir_entry *capabilities_intf;
++
++#ifndef HAVE_ARCH_PRIVCMD_MMAP
++static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma);
++#endif
++
++static int privcmd_ioctl(struct inode *inode, struct file *file,
++ unsigned int cmd, unsigned long data)
++{
++ int ret = -ENOSYS;
++ void __user *udata = (void __user *) data;
++
++ switch (cmd) {
++ case IOCTL_PRIVCMD_HYPERCALL: {
++ privcmd_hypercall_t hypercall;
++
++ if (copy_from_user(&hypercall, udata, sizeof(hypercall)))
++ return -EFAULT;
++
++#if defined(__i386__)
++ if (hypercall.op >= (PAGE_SIZE >> 5))
++ break;
++ __asm__ __volatile__ (
++ "pushl %%ebx; pushl %%ecx; pushl %%edx; "
++ "pushl %%esi; pushl %%edi; "
++ "movl 8(%%eax),%%ebx ;"
++ "movl 16(%%eax),%%ecx ;"
++ "movl 24(%%eax),%%edx ;"
++ "movl 32(%%eax),%%esi ;"
++ "movl 40(%%eax),%%edi ;"
++ "movl (%%eax),%%eax ;"
++ "shll $5,%%eax ;"
++ "addl $hypercall_page,%%eax ;"
++ "call *%%eax ;"
++ "popl %%edi; popl %%esi; popl %%edx; "
++ "popl %%ecx; popl %%ebx"
++ : "=a" (ret) : "0" (&hypercall) : "memory" );
++#elif defined (__x86_64__)
++ if (hypercall.op < (PAGE_SIZE >> 5)) {
++ long ign1, ign2, ign3;
++ __asm__ __volatile__ (
++ "movq %8,%%r10; movq %9,%%r8;"
++ "shll $5,%%eax ;"
++ "addq $hypercall_page,%%rax ;"
++ "call *%%rax"
++ : "=a" (ret), "=D" (ign1),
++ "=S" (ign2), "=d" (ign3)
++ : "0" ((unsigned int)hypercall.op),
++ "1" (hypercall.arg[0]),
++ "2" (hypercall.arg[1]),
++ "3" (hypercall.arg[2]),
++ "g" (hypercall.arg[3]),
++ "g" (hypercall.arg[4])
++ : "r8", "r10", "memory" );
++ }
++#elif defined (__ia64__)
++ ret = privcmd_hypercall(&hypercall);
++#endif
++ }
++ break;
++
++ case IOCTL_PRIVCMD_MMAP: {
++ privcmd_mmap_t mmapcmd;
++ privcmd_mmap_entry_t msg;
++ privcmd_mmap_entry_t __user *p;
++ struct mm_struct *mm = current->mm;
++ struct vm_area_struct *vma;
++ unsigned long va;
++ int i, rc;
++
++ if (!is_initial_xendomain())
++ return -EPERM;
++
++ if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd)))
++ return -EFAULT;
++
++ p = mmapcmd.entry;
++ if (copy_from_user(&msg, p, sizeof(msg)))
++ return -EFAULT;
++
++ down_read(&mm->mmap_sem);
++
++ vma = find_vma(mm, msg.va);
++ rc = -EINVAL;
++ if (!vma || (msg.va != vma->vm_start) ||
++ !privcmd_enforce_singleshot_mapping(vma))
++ goto mmap_out;
++
++ va = vma->vm_start;
++
++ for (i = 0; i < mmapcmd.num; i++) {
++ rc = -EFAULT;
++ if (copy_from_user(&msg, p, sizeof(msg)))
++ goto mmap_out;
++
++ /* Do not allow range to wrap the address space. */
++ rc = -EINVAL;
++ if ((msg.npages > (LONG_MAX >> PAGE_SHIFT)) ||
++ ((unsigned long)(msg.npages << PAGE_SHIFT) >= -va))
++ goto mmap_out;
++
++ /* Range chunks must be contiguous in va space. */
++ if ((msg.va != va) ||
++ ((msg.va+(msg.npages<<PAGE_SHIFT)) > vma->vm_end))
++ goto mmap_out;
++
++ if ((rc = direct_remap_pfn_range(
++ vma,
++ msg.va & PAGE_MASK,
++ msg.mfn,
++ msg.npages << PAGE_SHIFT,
++ vma->vm_page_prot,
++ mmapcmd.dom)) < 0)
++ goto mmap_out;
++
++ p++;
++ va += msg.npages << PAGE_SHIFT;
++ }
++
++ rc = 0;
++
++ mmap_out:
++ up_read(&mm->mmap_sem);
++ ret = rc;
++ }
++ break;
++
++ case IOCTL_PRIVCMD_MMAPBATCH: {
++ privcmd_mmapbatch_t m;
++ struct mm_struct *mm = current->mm;
++ struct vm_area_struct *vma;
++ xen_pfn_t __user *p;
++ unsigned long addr, mfn, nr_pages;
++ int i;
++
++ if (!is_initial_xendomain())
++ return -EPERM;
++
++ if (copy_from_user(&m, udata, sizeof(m)))
++ return -EFAULT;
++
++ nr_pages = m.num;
++ if ((m.num <= 0) || (nr_pages > (LONG_MAX >> PAGE_SHIFT)))
++ return -EINVAL;
++
++ down_read(&mm->mmap_sem);
++
++ vma = find_vma(mm, m.addr);
++ if (!vma ||
++ (m.addr != vma->vm_start) ||
++ ((m.addr + (nr_pages << PAGE_SHIFT)) != vma->vm_end) ||
++ !privcmd_enforce_singleshot_mapping(vma)) {
++ up_read(&mm->mmap_sem);
++ return -EINVAL;
++ }
++
++ p = m.arr;
++ addr = m.addr;
++ for (i = 0; i < nr_pages; i++, addr += PAGE_SIZE, p++) {
++ if (get_user(mfn, p)) {
++ up_read(&mm->mmap_sem);
++ return -EFAULT;
++ }
++
++ ret = direct_remap_pfn_range(vma, addr & PAGE_MASK,
++ mfn, PAGE_SIZE,
++ vma->vm_page_prot, m.dom);
++ if (ret < 0)
++ put_user(0xF0000000 | mfn, p);
++ }
++
++ up_read(&mm->mmap_sem);
++ ret = 0;
++ }
++ break;
++
++ default:
++ ret = -EINVAL;
++ break;
++ }
++
++ return ret;
++}
++
++#ifndef HAVE_ARCH_PRIVCMD_MMAP
++static struct page *privcmd_nopage(struct vm_area_struct *vma,
++ unsigned long address,
++ int *type)
++{
++ return NOPAGE_SIGBUS;
++}
++
++static struct vm_operations_struct privcmd_vm_ops = {
++ .nopage = privcmd_nopage
++};
++
++static int privcmd_mmap(struct file * file, struct vm_area_struct * vma)
++{
++ /* Unsupported for auto-translate guests. */
++ if (xen_feature(XENFEAT_auto_translated_physmap))
++ return -ENOSYS;
++
++ /* DONTCOPY is essential for Xen as copy_page_range is broken. */
++ vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY;
++ vma->vm_ops = &privcmd_vm_ops;
++ vma->vm_private_data = NULL;
++
++ return 0;
++}
++
++static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma)
++{
++ return (xchg(&vma->vm_private_data, (void *)1) == NULL);
++}
++#endif
++
++static const struct file_operations privcmd_file_ops = {
++ .ioctl = privcmd_ioctl,
++ .mmap = privcmd_mmap,
++};
++
++static int capabilities_read(char *page, char **start, off_t off,
++ int count, int *eof, void *data)
++{
++ int len = 0;
++ *page = 0;
++
++ if (is_initial_xendomain())
++ len = sprintf( page, "control_d\n" );
++
++ *eof = 1;
++ return len;
++}
++
++static int __init privcmd_init(void)
++{
++ if (!is_running_on_xen())
++ return -ENODEV;
++
++ privcmd_intf = create_xen_proc_entry("privcmd", 0400);
++ if (privcmd_intf != NULL)
++ privcmd_intf->proc_fops = &privcmd_file_ops;
++
++ capabilities_intf = create_xen_proc_entry("capabilities", 0400 );
++ if (capabilities_intf != NULL)
++ capabilities_intf->read_proc = capabilities_read;
++
++ return 0;
++}
++
++__initcall(privcmd_init);
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/tpmback/Makefile 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,4 @@
++
++obj-$(CONFIG_XEN_TPMDEV_BACKEND) += tpmbk.o
++
++tpmbk-y += tpmback.o interface.o xenbus.o
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/tpmback/common.h 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,85 @@
++/******************************************************************************
++ * drivers/xen/tpmback/common.h
++ */
++
++#ifndef __TPM__BACKEND__COMMON_H__
++#define __TPM__BACKEND__COMMON_H__
++
++#include <linux/version.h>
++#include <linux/module.h>
++#include <linux/interrupt.h>
++#include <linux/slab.h>
++#include <xen/evtchn.h>
++#include <xen/driver_util.h>
++#include <xen/interface/grant_table.h>
++#include <xen/interface/io/tpmif.h>
++#include <asm/io.h>
++#include <asm/pgalloc.h>
++
++#define DPRINTK(_f, _a...) \
++ pr_debug("(file=%s, line=%d) " _f, \
++ __FILE__ , __LINE__ , ## _a )
++
++struct backend_info;
++
++typedef struct tpmif_st {
++ struct list_head tpmif_list;
++ /* Unique identifier for this interface. */
++ domid_t domid;
++ unsigned int handle;
++
++ /* Physical parameters of the comms window. */
++ unsigned int irq;
++
++ /* The shared rings and indexes. */
++ tpmif_tx_interface_t *tx;
++ struct vm_struct *tx_area;
++
++ /* Miscellaneous private stuff. */
++ enum { DISCONNECTED, DISCONNECTING, CONNECTED } status;
++ int active;
++
++ struct tpmif_st *hash_next;
++ struct list_head list; /* scheduling list */
++ atomic_t refcnt;
++
++ struct backend_info *bi;
++
++ grant_handle_t shmem_handle;
++ grant_ref_t shmem_ref;
++ struct page **mmap_pages;
++
++ char devname[20];
++} tpmif_t;
++
++void tpmif_disconnect_complete(tpmif_t * tpmif);
++tpmif_t *tpmif_find(domid_t domid, struct backend_info *bi);
++void tpmif_interface_init(void);
++void tpmif_interface_exit(void);
++void tpmif_schedule_work(tpmif_t * tpmif);
++void tpmif_deschedule_work(tpmif_t * tpmif);
++void tpmif_xenbus_init(void);
++void tpmif_xenbus_exit(void);
++int tpmif_map(tpmif_t *tpmif, unsigned long shared_page, unsigned int evtchn);
++irqreturn_t tpmif_be_int(int irq, void *dev_id, struct pt_regs *regs);
++
++long int tpmback_get_instance(struct backend_info *bi);
++
++int vtpm_release_packets(tpmif_t * tpmif, int send_msgs);
++
++
++#define tpmif_get(_b) (atomic_inc(&(_b)->refcnt))
++#define tpmif_put(_b) \
++ do { \
++ if (atomic_dec_and_test(&(_b)->refcnt)) \
++ tpmif_disconnect_complete(_b); \
++ } while (0)
++
++extern int num_frontends;
++
++static inline unsigned long idx_to_kaddr(tpmif_t *t, unsigned int idx)
++{
++ return (unsigned long)pfn_to_kaddr(page_to_pfn(t->mmap_pages[idx]));
++}
++
++#endif /* __TPMIF__BACKEND__COMMON_H__ */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/tpmback/interface.c 2007-08-27 14:02:01.000000000 -0400
+@@ -0,0 +1,167 @@
++ /*****************************************************************************
++ * drivers/xen/tpmback/interface.c
++ *
++ * Vritual TPM interface management.
++ *
++ * Copyright (c) 2005, IBM Corporation
++ *
++ * Author: Stefan Berger, stefanb@us.ibm.com
++ *
++ * This code has been derived from drivers/xen/netback/interface.c
++ * Copyright (c) 2004, Keir Fraser
++ */
++
++#include "common.h"
++#include <xen/balloon.h>
++#include <xen/gnttab.h>
++
++static kmem_cache_t *tpmif_cachep;
++int num_frontends = 0;
++
++LIST_HEAD(tpmif_list);
++
++static tpmif_t *alloc_tpmif(domid_t domid, struct backend_info *bi)
++{
++ tpmif_t *tpmif;
++
++ tpmif = kmem_cache_alloc(tpmif_cachep, GFP_KERNEL);
++ if (tpmif == NULL)
++ goto out_of_memory;
++
++ memset(tpmif, 0, sizeof (*tpmif));
++ tpmif->domid = domid;
++ tpmif->status = DISCONNECTED;
++ tpmif->bi = bi;
++ snprintf(tpmif->devname, sizeof(tpmif->devname), "tpmif%d", domid);
++ atomic_set(&tpmif->refcnt, 1);
++
++ tpmif->mmap_pages = alloc_empty_pages_and_pagevec(TPMIF_TX_RING_SIZE);
++ if (tpmif->mmap_pages == NULL)
++ goto out_of_memory;
++
++ list_add(&tpmif->tpmif_list, &tpmif_list);
++ num_frontends++;
++
++ return tpmif;
++
++ out_of_memory:
++ if (tpmif != NULL)
++ kmem_cache_free(tpmif_cachep, tpmif);
++ printk("%s: out of memory\n", __FUNCTION__);
++ return ERR_PTR(-ENOMEM);
++}
++
++static void free_tpmif(tpmif_t * tpmif)
++{
++ num_frontends--;
++ list_del(&tpmif->tpmif_list);
++ free_empty_pages_and_pagevec(tpmif->mmap_pages, TPMIF_TX_RING_SIZE);
++ kmem_cache_free(tpmif_cachep, tpmif);
++}
++
++tpmif_t *tpmif_find(domid_t domid, struct backend_info *bi)
++{
++ tpmif_t *tpmif;
++
++ list_for_each_entry(tpmif, &tpmif_list, tpmif_list) {
++ if (tpmif->bi == bi) {
++ if (tpmif->domid == domid) {
++ tpmif_get(tpmif);
++ return tpmif;
++ } else {
++ return ERR_PTR(-EEXIST);
++ }
++ }
++ }
++
++ return alloc_tpmif(domid, bi);
++}
++
++static int map_frontend_page(tpmif_t *tpmif, unsigned long shared_page)
++{
++ struct gnttab_map_grant_ref op;
++
++ gnttab_set_map_op(&op, (unsigned long)tpmif->tx_area->addr,
++ GNTMAP_host_map, shared_page, tpmif->domid);
++
++ if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
++ BUG();
++
++ if (op.status) {
++ DPRINTK(" Grant table operation failure !\n");
++ return op.status;
++ }
++
++ tpmif->shmem_ref = shared_page;
++ tpmif->shmem_handle = op.handle;
++
++ return 0;
++}
++
++static void unmap_frontend_page(tpmif_t *tpmif)
++{
++ struct gnttab_unmap_grant_ref op;
++
++ gnttab_set_unmap_op(&op, (unsigned long)tpmif->tx_area->addr,
++ GNTMAP_host_map, tpmif->shmem_handle);
++
++ if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
++ BUG();
++}
++
++int tpmif_map(tpmif_t *tpmif, unsigned long shared_page, unsigned int evtchn)
++{
++ int err;
++
++ if (tpmif->irq)
++ return 0;
++
++ if ((tpmif->tx_area = alloc_vm_area(PAGE_SIZE)) == NULL)
++ return -ENOMEM;
++
++ err = map_frontend_page(tpmif, shared_page);
++ if (err) {
++ free_vm_area(tpmif->tx_area);
++ return err;
++ }
++
++ tpmif->tx = (tpmif_tx_interface_t *)tpmif->tx_area->addr;
++
++ err = bind_interdomain_evtchn_to_irqhandler(
++ tpmif->domid, evtchn, tpmif_be_int, 0, tpmif->devname, tpmif);
++ if (err < 0) {
++ unmap_frontend_page(tpmif);
++ free_vm_area(tpmif->tx_area);
++ return err;
++ }
++ tpmif->irq = err;
++
++ tpmif->shmem_ref = shared_page;
++ tpmif->active = 1;
++
++ return 0;
++}
++
++void tpmif_disconnect_complete(tpmif_t *tpmif)
++{
++ if (tpmif->irq)
++ unbind_from_irqhandler(tpmif->irq, tpmif);
++
++ if (tpmif->tx) {
++ unmap_frontend_page(tpmif);
++ free_vm_area(tpmif->tx_area);
++ }
++
++ free_tpmif(tpmif);
++}
++
++void __init tpmif_interface_init(void)
++{
++ tpmif_cachep = kmem_cache_create("tpmif_cache", sizeof (tpmif_t),
++ 0, 0, NULL, NULL);
++}
++
++void __exit tpmif_interface_exit(void)
++{
++ kmem_cache_destroy(tpmif_cachep);
++}
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/tpmback/tpmback.c 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,944 @@
++/******************************************************************************
++ * drivers/xen/tpmback/tpmback.c
++ *
++ * Copyright (c) 2005, IBM Corporation
++ *
++ * Author: Stefan Berger, stefanb@us.ibm.com
++ * Grant table support: Mahadevan Gomathisankaran
++ *
++ * This code has been derived from drivers/xen/netback/netback.c
++ * Copyright (c) 2002-2004, K A Fraser
++ *
++ */
++
++#include "common.h"
++#include <xen/evtchn.h>
++
++#include <linux/types.h>
++#include <linux/list.h>
++#include <linux/miscdevice.h>
++#include <linux/poll.h>
++#include <asm/uaccess.h>
++#include <xen/xenbus.h>
++#include <xen/interface/grant_table.h>
++#include <xen/gnttab.h>
++
++/* local data structures */
++struct data_exchange {
++ struct list_head pending_pak;
++ struct list_head current_pak;
++ unsigned int copied_so_far;
++ u8 has_opener:1;
++ u8 aborted:1;
++ rwlock_t pak_lock; // protects all of the previous fields
++ wait_queue_head_t wait_queue;
++};
++
++struct vtpm_resp_hdr {
++ uint32_t instance_no;
++ uint16_t tag_no;
++ uint32_t len_no;
++ uint32_t ordinal_no;
++} __attribute__ ((packed));
++
++struct packet {
++ struct list_head next;
++ unsigned int data_len;
++ u8 *data_buffer;
++ tpmif_t *tpmif;
++ u32 tpm_instance;
++ u8 req_tag;
++ u32 last_read;
++ u8 flags;
++ struct timer_list processing_timer;
++};
++
++enum {
++ PACKET_FLAG_DISCARD_RESPONSE = 1,
++};
++
++/* local variables */
++static struct data_exchange dataex;
++
++/* local function prototypes */
++static int _packet_write(struct packet *pak,
++ const char *data, size_t size, int userbuffer);
++static void processing_timeout(unsigned long ptr);
++static int packet_read_shmem(struct packet *pak,
++ tpmif_t * tpmif,
++ u32 offset,
++ char *buffer, int isuserbuffer, u32 left);
++static int vtpm_queue_packet(struct packet *pak);
++
++/***************************************************************
++ Buffer copying fo user and kernel space buffes.
++***************************************************************/
++static inline int copy_from_buffer(void *to,
++ const void *from, unsigned long size,
++ int isuserbuffer)
++{
++ if (isuserbuffer) {
++ if (copy_from_user(to, (void __user *)from, size))
++ return -EFAULT;
++ } else {
++ memcpy(to, from, size);
++ }
++ return 0;
++}
++
++static inline int copy_to_buffer(void *to,
++ const void *from, unsigned long size,
++ int isuserbuffer)
++{
++ if (isuserbuffer) {
++ if (copy_to_user((void __user *)to, from, size))
++ return -EFAULT;
++ } else {
++ memcpy(to, from, size);
++ }
++ return 0;
++}
++
++
++static void dataex_init(struct data_exchange *dataex)
++{
++ INIT_LIST_HEAD(&dataex->pending_pak);
++ INIT_LIST_HEAD(&dataex->current_pak);
++ dataex->has_opener = 0;
++ rwlock_init(&dataex->pak_lock);
++ init_waitqueue_head(&dataex->wait_queue);
++}
++
++/***************************************************************
++ Packet-related functions
++***************************************************************/
++
++static struct packet *packet_find_instance(struct list_head *head,
++ u32 tpm_instance)
++{
++ struct packet *pak;
++ struct list_head *p;
++
++ /*
++ * traverse the list of packets and return the first
++ * one with the given instance number
++ */
++ list_for_each(p, head) {
++ pak = list_entry(p, struct packet, next);
++
++ if (pak->tpm_instance == tpm_instance) {
++ return pak;
++ }
++ }
++ return NULL;
++}
++
++static struct packet *packet_find_packet(struct list_head *head, void *packet)
++{
++ struct packet *pak;
++ struct list_head *p;
++
++ /*
++ * traverse the list of packets and return the first
++ * one with the given instance number
++ */
++ list_for_each(p, head) {
++ pak = list_entry(p, struct packet, next);
++
++ if (pak == packet) {
++ return pak;
++ }
++ }
++ return NULL;
++}
++
++static struct packet *packet_alloc(tpmif_t * tpmif,
++ u32 size, u8 req_tag, u8 flags)
++{
++ struct packet *pak = NULL;
++ pak = kzalloc(sizeof (struct packet), GFP_ATOMIC);
++ if (NULL != pak) {
++ if (tpmif) {
++ pak->tpmif = tpmif;
++ pak->tpm_instance = tpmback_get_instance(tpmif->bi);
++ tpmif_get(tpmif);
++ }
++ pak->data_len = size;
++ pak->req_tag = req_tag;
++ pak->last_read = 0;
++ pak->flags = flags;
++
++ /*
++ * cannot do tpmif_get(tpmif); bad things happen
++ * on the last tpmif_put()
++ */
++ init_timer(&pak->processing_timer);
++ pak->processing_timer.function = processing_timeout;
++ pak->processing_timer.data = (unsigned long)pak;
++ }
++ return pak;
++}
++
++static void inline packet_reset(struct packet *pak)
++{
++ pak->last_read = 0;
++}
++
++static void packet_free(struct packet *pak)
++{
++ if (timer_pending(&pak->processing_timer)) {
++ BUG();
++ }
++
++ if (pak->tpmif)
++ tpmif_put(pak->tpmif);
++ kfree(pak->data_buffer);
++ /*
++ * cannot do tpmif_put(pak->tpmif); bad things happen
++ * on the last tpmif_put()
++ */
++ kfree(pak);
++}
++
++
++/*
++ * Write data to the shared memory and send it to the FE.
++ */
++static int packet_write(struct packet *pak,
++ const char *data, size_t size, int isuserbuffer)
++{
++ int rc = 0;
++
++ if (0 != (pak->flags & PACKET_FLAG_DISCARD_RESPONSE)) {
++ /* Don't send a respone to this packet. Just acknowledge it. */
++ rc = size;
++ } else {
++ rc = _packet_write(pak, data, size, isuserbuffer);
++ }
++
++ return rc;
++}
++
++int _packet_write(struct packet *pak,
++ const char *data, size_t size, int isuserbuffer)
++{
++ /*
++ * Write into the shared memory pages directly
++ * and send it to the front end.
++ */
++ tpmif_t *tpmif = pak->tpmif;
++ grant_handle_t handle;
++ int rc = 0;
++ unsigned int i = 0;
++ unsigned int offset = 0;
++
++ if (tpmif == NULL) {
++ return -EFAULT;
++ }
++
++ if (tpmif->status == DISCONNECTED) {
++ return size;
++ }
++
++ while (offset < size && i < TPMIF_TX_RING_SIZE) {
++ unsigned int tocopy;
++ struct gnttab_map_grant_ref map_op;
++ struct gnttab_unmap_grant_ref unmap_op;
++ tpmif_tx_request_t *tx;
++
++ tx = &tpmif->tx->ring[i].req;
++
++ if (0 == tx->addr) {
++ DPRINTK("ERROR: Buffer for outgoing packet NULL?! i=%d\n", i);
++ return 0;
++ }
++
++ gnttab_set_map_op(&map_op, idx_to_kaddr(tpmif, i),
++ GNTMAP_host_map, tx->ref, tpmif->domid);
++
++ if (unlikely(HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
++ &map_op, 1))) {
++ BUG();
++ }
++
++ handle = map_op.handle;
++
++ if (map_op.status) {
++ DPRINTK(" Grant table operation failure !\n");
++ return 0;
++ }
++
++ tocopy = min_t(size_t, size - offset, PAGE_SIZE);
++
++ if (copy_from_buffer((void *)(idx_to_kaddr(tpmif, i) |
++ (tx->addr & ~PAGE_MASK)),
++ &data[offset], tocopy, isuserbuffer)) {
++ tpmif_put(tpmif);
++ return -EFAULT;
++ }
++ tx->size = tocopy;
++
++ gnttab_set_unmap_op(&unmap_op, idx_to_kaddr(tpmif, i),
++ GNTMAP_host_map, handle);
++
++ if (unlikely
++ (HYPERVISOR_grant_table_op
++ (GNTTABOP_unmap_grant_ref, &unmap_op, 1))) {
++ BUG();
++ }
++
++ offset += tocopy;
++ i++;
++ }
++
++ rc = offset;
++ DPRINTK("Notifying frontend via irq %d\n", tpmif->irq);
++ notify_remote_via_irq(tpmif->irq);
++
++ return rc;
++}
++
++/*
++ * Read data from the shared memory and copy it directly into the
++ * provided buffer. Advance the read_last indicator which tells
++ * how many bytes have already been read.
++ */
++static int packet_read(struct packet *pak, size_t numbytes,
++ char *buffer, size_t buffersize, int isuserbuffer)
++{
++ tpmif_t *tpmif = pak->tpmif;
++
++ /*
++ * Read 'numbytes' of data from the buffer. The first 4
++ * bytes are the instance number in network byte order,
++ * after that come the data from the shared memory buffer.
++ */
++ u32 to_copy;
++ u32 offset = 0;
++ u32 room_left = buffersize;
++
++ if (pak->last_read < 4) {
++ /*
++ * copy the instance number into the buffer
++ */
++ u32 instance_no = htonl(pak->tpm_instance);
++ u32 last_read = pak->last_read;
++
++ to_copy = min_t(size_t, 4 - last_read, numbytes);
++
++ if (copy_to_buffer(&buffer[0],
++ &(((u8 *) & instance_no)[last_read]),
++ to_copy, isuserbuffer)) {
++ return -EFAULT;
++ }
++
++ pak->last_read += to_copy;
++ offset += to_copy;
++ room_left -= to_copy;
++ }
++
++ /*
++ * If the packet has a data buffer appended, read from it...
++ */
++
++ if (room_left > 0) {
++ if (pak->data_buffer) {
++ u32 to_copy = min_t(u32, pak->data_len - offset, room_left);
++ u32 last_read = pak->last_read - 4;
++
++ if (copy_to_buffer(&buffer[offset],
++ &pak->data_buffer[last_read],
++ to_copy, isuserbuffer)) {
++ return -EFAULT;
++ }
++ pak->last_read += to_copy;
++ offset += to_copy;
++ } else {
++ offset = packet_read_shmem(pak,
++ tpmif,
++ offset,
++ buffer,
++ isuserbuffer, room_left);
++ }
++ }
++ return offset;
++}
++
++static int packet_read_shmem(struct packet *pak,
++ tpmif_t * tpmif,
++ u32 offset, char *buffer, int isuserbuffer,
++ u32 room_left)
++{
++ u32 last_read = pak->last_read - 4;
++ u32 i = (last_read / PAGE_SIZE);
++ u32 pg_offset = last_read & (PAGE_SIZE - 1);
++ u32 to_copy;
++ grant_handle_t handle;
++
++ tpmif_tx_request_t *tx;
++
++ tx = &tpmif->tx->ring[0].req;
++ /*
++ * Start copying data at the page with index 'index'
++ * and within that page at offset 'offset'.
++ * Copy a maximum of 'room_left' bytes.
++ */
++ to_copy = min_t(u32, PAGE_SIZE - pg_offset, room_left);
++ while (to_copy > 0) {
++ void *src;
++ struct gnttab_map_grant_ref map_op;
++ struct gnttab_unmap_grant_ref unmap_op;
++
++ tx = &tpmif->tx->ring[i].req;
++
++ gnttab_set_map_op(&map_op, idx_to_kaddr(tpmif, i),
++ GNTMAP_host_map, tx->ref, tpmif->domid);
++
++ if (unlikely(HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
++ &map_op, 1))) {
++ BUG();
++ }
++
++ if (map_op.status) {
++ DPRINTK(" Grant table operation failure !\n");
++ return -EFAULT;
++ }
++
++ handle = map_op.handle;
++
++ if (to_copy > tx->size) {
++ /*
++ * User requests more than what's available
++ */
++ to_copy = min_t(u32, tx->size, to_copy);
++ }
++
++ DPRINTK("Copying from mapped memory at %08lx\n",
++ (unsigned long)(idx_to_kaddr(tpmif, i) |
++ (tx->addr & ~PAGE_MASK)));
++
++ src = (void *)(idx_to_kaddr(tpmif, i) |
++ ((tx->addr & ~PAGE_MASK) + pg_offset));
++ if (copy_to_buffer(&buffer[offset],
++ src, to_copy, isuserbuffer)) {
++ return -EFAULT;
++ }
++
++ DPRINTK("Data from TPM-FE of domain %d are %d %d %d %d\n",
++ tpmif->domid, buffer[offset], buffer[offset + 1],
++ buffer[offset + 2], buffer[offset + 3]);
++
++ gnttab_set_unmap_op(&unmap_op, idx_to_kaddr(tpmif, i),
++ GNTMAP_host_map, handle);
++
++ if (unlikely
++ (HYPERVISOR_grant_table_op
++ (GNTTABOP_unmap_grant_ref, &unmap_op, 1))) {
++ BUG();
++ }
++
++ offset += to_copy;
++ pg_offset = 0;
++ last_read += to_copy;
++ room_left -= to_copy;
++
++ to_copy = min_t(u32, PAGE_SIZE, room_left);
++ i++;
++ } /* while (to_copy > 0) */
++ /*
++ * Adjust the last_read pointer
++ */
++ pak->last_read = last_read + 4;
++ return offset;
++}
++
++/* ============================================================
++ * The file layer for reading data from this device
++ * ============================================================
++ */
++static int vtpm_op_open(struct inode *inode, struct file *f)
++{
++ int rc = 0;
++ unsigned long flags;
++
++ write_lock_irqsave(&dataex.pak_lock, flags);
++ if (dataex.has_opener == 0) {
++ dataex.has_opener = 1;
++ } else {
++ rc = -EPERM;
++ }
++ write_unlock_irqrestore(&dataex.pak_lock, flags);
++ return rc;
++}
++
++static ssize_t vtpm_op_read(struct file *file,
++ char __user * data, size_t size, loff_t * offset)
++{
++ int ret_size = -ENODATA;
++ struct packet *pak = NULL;
++ unsigned long flags;
++
++ write_lock_irqsave(&dataex.pak_lock, flags);
++ if (dataex.aborted) {
++ dataex.aborted = 0;
++ dataex.copied_so_far = 0;
++ write_unlock_irqrestore(&dataex.pak_lock, flags);
++ return -EIO;
++ }
++
++ if (list_empty(&dataex.pending_pak)) {
++ write_unlock_irqrestore(&dataex.pak_lock, flags);
++ wait_event_interruptible(dataex.wait_queue,
++ !list_empty(&dataex.pending_pak));
++ write_lock_irqsave(&dataex.pak_lock, flags);
++ dataex.copied_so_far = 0;
++ }
++
++ if (!list_empty(&dataex.pending_pak)) {
++ unsigned int left;
++
++ pak = list_entry(dataex.pending_pak.next, struct packet, next);
++ left = pak->data_len - dataex.copied_so_far;
++ list_del(&pak->next);
++ write_unlock_irqrestore(&dataex.pak_lock, flags);
++
++ DPRINTK("size given by app: %d, available: %d\n", size, left);
++
++ ret_size = min_t(size_t, size, left);
++
++ ret_size = packet_read(pak, ret_size, data, size, 1);
++
++ write_lock_irqsave(&dataex.pak_lock, flags);
++
++ if (ret_size < 0) {
++ del_singleshot_timer_sync(&pak->processing_timer);
++ packet_free(pak);
++ dataex.copied_so_far = 0;
++ } else {
++ DPRINTK("Copied %d bytes to user buffer\n", ret_size);
++
++ dataex.copied_so_far += ret_size;
++ if (dataex.copied_so_far >= pak->data_len + 4) {
++ DPRINTK("All data from this packet given to app.\n");
++ /* All data given to app */
++
++ del_singleshot_timer_sync(&pak->
++ processing_timer);
++ list_add_tail(&pak->next, &dataex.current_pak);
++ /*
++ * The more fontends that are handled at the same time,
++ * the more time we give the TPM to process the request.
++ */
++ mod_timer(&pak->processing_timer,
++ jiffies + (num_frontends * 60 * HZ));
++ dataex.copied_so_far = 0;
++ } else {
++ list_add(&pak->next, &dataex.pending_pak);
++ }
++ }
++ }
++ write_unlock_irqrestore(&dataex.pak_lock, flags);
++
++ DPRINTK("Returning result from read to app: %d\n", ret_size);
++
++ return ret_size;
++}
++
++/*
++ * Write operation - only works after a previous read operation!
++ */
++static ssize_t vtpm_op_write(struct file *file,
++ const char __user * data, size_t size,
++ loff_t * offset)
++{
++ struct packet *pak;
++ int rc = 0;
++ unsigned int off = 4;
++ unsigned long flags;
++ struct vtpm_resp_hdr vrh;
++
++ /*
++ * Minimum required packet size is:
++ * 4 bytes for instance number
++ * 2 bytes for tag
++ * 4 bytes for paramSize
++ * 4 bytes for the ordinal
++ * sum: 14 bytes
++ */
++ if (size < sizeof (vrh))
++ return -EFAULT;
++
++ if (copy_from_user(&vrh, data, sizeof (vrh)))
++ return -EFAULT;
++
++ /* malformed packet? */
++ if ((off + ntohl(vrh.len_no)) != size)
++ return -EFAULT;
++
++ write_lock_irqsave(&dataex.pak_lock, flags);
++ pak = packet_find_instance(&dataex.current_pak,
++ ntohl(vrh.instance_no));
++
++ if (pak == NULL) {
++ write_unlock_irqrestore(&dataex.pak_lock, flags);
++ DPRINTK(KERN_ALERT "No associated packet! (inst=%d)\n",
++ ntohl(vrh.instance_no));
++ return -EFAULT;
++ }
++
++ del_singleshot_timer_sync(&pak->processing_timer);
++ list_del(&pak->next);
++
++ write_unlock_irqrestore(&dataex.pak_lock, flags);
++
++ /*
++ * The first 'offset' bytes must be the instance number - skip them.
++ */
++ size -= off;
++
++ rc = packet_write(pak, &data[off], size, 1);
++
++ if (rc > 0) {
++ /* I neglected the first 4 bytes */
++ rc += off;
++ }
++ packet_free(pak);
++ return rc;
++}
++
++static int vtpm_op_release(struct inode *inode, struct file *file)
++{
++ unsigned long flags;
++
++ vtpm_release_packets(NULL, 1);
++ write_lock_irqsave(&dataex.pak_lock, flags);
++ dataex.has_opener = 0;
++ write_unlock_irqrestore(&dataex.pak_lock, flags);
++ return 0;
++}
++
++static unsigned int vtpm_op_poll(struct file *file,
++ struct poll_table_struct *pts)
++{
++ unsigned int flags = POLLOUT | POLLWRNORM;
++
++ poll_wait(file, &dataex.wait_queue, pts);
++ if (!list_empty(&dataex.pending_pak)) {
++ flags |= POLLIN | POLLRDNORM;
++ }
++ return flags;
++}
++
++static const struct file_operations vtpm_ops = {
++ .owner = THIS_MODULE,
++ .llseek = no_llseek,
++ .open = vtpm_op_open,
++ .read = vtpm_op_read,
++ .write = vtpm_op_write,
++ .release = vtpm_op_release,
++ .poll = vtpm_op_poll,
++};
++
++static struct miscdevice vtpms_miscdevice = {
++ .minor = 225,
++ .name = "vtpm",
++ .fops = &vtpm_ops,
++};
++
++/***************************************************************
++ Utility functions
++***************************************************************/
++
++static int tpm_send_fail_message(struct packet *pak, u8 req_tag)
++{
++ int rc;
++ static const unsigned char tpm_error_message_fail[] = {
++ 0x00, 0x00,
++ 0x00, 0x00, 0x00, 0x0a,
++ 0x00, 0x00, 0x00, 0x09 /* TPM_FAIL */
++ };
++ unsigned char buffer[sizeof (tpm_error_message_fail)];
++
++ memcpy(buffer, tpm_error_message_fail,
++ sizeof (tpm_error_message_fail));
++ /*
++ * Insert the right response tag depending on the given tag
++ * All response tags are '+3' to the request tag.
++ */
++ buffer[1] = req_tag + 3;
++
++ /*
++ * Write the data to shared memory and notify the front-end
++ */
++ rc = packet_write(pak, buffer, sizeof (buffer), 0);
++
++ return rc;
++}
++
++static int _vtpm_release_packets(struct list_head *head,
++ tpmif_t * tpmif, int send_msgs)
++{
++ int aborted = 0;
++ int c = 0;
++ struct packet *pak;
++ struct list_head *pos, *tmp;
++
++ list_for_each_safe(pos, tmp, head) {
++ pak = list_entry(pos, struct packet, next);
++ c += 1;
++
++ if (tpmif == NULL || pak->tpmif == tpmif) {
++ int can_send = 0;
++
++ del_singleshot_timer_sync(&pak->processing_timer);
++ list_del(&pak->next);
++
++ if (pak->tpmif && pak->tpmif->status == CONNECTED) {
++ can_send = 1;
++ }
++
++ if (send_msgs && can_send) {
++ tpm_send_fail_message(pak, pak->req_tag);
++ }
++ packet_free(pak);
++ if (c == 1)
++ aborted = 1;
++ }
++ }
++ return aborted;
++}
++
++int vtpm_release_packets(tpmif_t * tpmif, int send_msgs)
++{
++ unsigned long flags;
++
++ write_lock_irqsave(&dataex.pak_lock, flags);
++
++ dataex.aborted = _vtpm_release_packets(&dataex.pending_pak,
++ tpmif,
++ send_msgs);
++ _vtpm_release_packets(&dataex.current_pak, tpmif, send_msgs);
++
++ write_unlock_irqrestore(&dataex.pak_lock, flags);
++ return 0;
++}
++
++static int vtpm_queue_packet(struct packet *pak)
++{
++ int rc = 0;
++
++ if (dataex.has_opener) {
++ unsigned long flags;
++
++ write_lock_irqsave(&dataex.pak_lock, flags);
++ list_add_tail(&pak->next, &dataex.pending_pak);
++ /* give the TPM some time to pick up the request */
++ mod_timer(&pak->processing_timer, jiffies + (30 * HZ));
++ write_unlock_irqrestore(&dataex.pak_lock, flags);
++
++ wake_up_interruptible(&dataex.wait_queue);
++ } else {
++ rc = -EFAULT;
++ }
++ return rc;
++}
++
++static int vtpm_receive(tpmif_t * tpmif, u32 size)
++{
++ int rc = 0;
++ unsigned char buffer[10];
++ __be32 *native_size;
++ struct packet *pak = packet_alloc(tpmif, size, 0, 0);
++
++ if (!pak)
++ return -ENOMEM;
++ /*
++ * Read 10 bytes from the received buffer to test its
++ * content for validity.
++ */
++ if (sizeof (buffer) != packet_read(pak,
++ sizeof (buffer), buffer,
++ sizeof (buffer), 0)) {
++ goto failexit;
++ }
++ /*
++ * Reset the packet read pointer so we can read all its
++ * contents again.
++ */
++ packet_reset(pak);
++
++ native_size = (__force __be32 *) (&buffer[4 + 2]);
++ /*
++ * Verify that the size of the packet is correct
++ * as indicated and that there's actually someone reading packets.
++ * The minimum size of the packet is '10' for tag, size indicator
++ * and ordinal.
++ */
++ if (size < 10 ||
++ be32_to_cpu(*native_size) != size ||
++ 0 == dataex.has_opener || tpmif->status != CONNECTED) {
++ rc = -EINVAL;
++ goto failexit;
++ } else {
++ rc = vtpm_queue_packet(pak);
++ if (rc < 0)
++ goto failexit;
++ }
++ return 0;
++
++ failexit:
++ if (pak) {
++ tpm_send_fail_message(pak, buffer[4 + 1]);
++ packet_free(pak);
++ }
++ return rc;
++}
++
++/*
++ * Timeout function that gets invoked when a packet has not been processed
++ * during the timeout period.
++ * The packet must be on a list when this function is invoked. This
++ * also means that once its taken off a list, the timer must be
++ * destroyed as well.
++ */
++static void processing_timeout(unsigned long ptr)
++{
++ struct packet *pak = (struct packet *)ptr;
++ unsigned long flags;
++
++ write_lock_irqsave(&dataex.pak_lock, flags);
++ /*
++ * The packet needs to be searched whether it
++ * is still on the list.
++ */
++ if (pak == packet_find_packet(&dataex.pending_pak, pak) ||
++ pak == packet_find_packet(&dataex.current_pak, pak)) {
++ if ((pak->flags & PACKET_FLAG_DISCARD_RESPONSE) == 0) {
++ tpm_send_fail_message(pak, pak->req_tag);
++ }
++ /* discard future responses */
++ pak->flags |= PACKET_FLAG_DISCARD_RESPONSE;
++ }
++
++ write_unlock_irqrestore(&dataex.pak_lock, flags);
++}
++
++static void tpm_tx_action(unsigned long unused);
++static DECLARE_TASKLET(tpm_tx_tasklet, tpm_tx_action, 0);
++
++static struct list_head tpm_schedule_list;
++static spinlock_t tpm_schedule_list_lock;
++
++static inline void maybe_schedule_tx_action(void)
++{
++ smp_mb();
++ tasklet_schedule(&tpm_tx_tasklet);
++}
++
++static inline int __on_tpm_schedule_list(tpmif_t * tpmif)
++{
++ return tpmif->list.next != NULL;
++}
++
++static void remove_from_tpm_schedule_list(tpmif_t * tpmif)
++{
++ spin_lock_irq(&tpm_schedule_list_lock);
++ if (likely(__on_tpm_schedule_list(tpmif))) {
++ list_del(&tpmif->list);
++ tpmif->list.next = NULL;
++ tpmif_put(tpmif);
++ }
++ spin_unlock_irq(&tpm_schedule_list_lock);
++}
++
++static void add_to_tpm_schedule_list_tail(tpmif_t * tpmif)
++{
++ if (__on_tpm_schedule_list(tpmif))
++ return;
++
++ spin_lock_irq(&tpm_schedule_list_lock);
++ if (!__on_tpm_schedule_list(tpmif) && tpmif->active) {
++ list_add_tail(&tpmif->list, &tpm_schedule_list);
++ tpmif_get(tpmif);
++ }
++ spin_unlock_irq(&tpm_schedule_list_lock);
++}
++
++void tpmif_schedule_work(tpmif_t * tpmif)
++{
++ add_to_tpm_schedule_list_tail(tpmif);
++ maybe_schedule_tx_action();
++}
++
++void tpmif_deschedule_work(tpmif_t * tpmif)
++{
++ remove_from_tpm_schedule_list(tpmif);
++}
++
++static void tpm_tx_action(unsigned long unused)
++{
++ struct list_head *ent;
++ tpmif_t *tpmif;
++ tpmif_tx_request_t *tx;
++
++ DPRINTK("%s: Getting data from front-end(s)!\n", __FUNCTION__);
++
++ while (!list_empty(&tpm_schedule_list)) {
++ /* Get a tpmif from the list with work to do. */
++ ent = tpm_schedule_list.next;
++ tpmif = list_entry(ent, tpmif_t, list);
++ tpmif_get(tpmif);
++ remove_from_tpm_schedule_list(tpmif);
++
++ tx = &tpmif->tx->ring[0].req;
++
++ /* pass it up */
++ vtpm_receive(tpmif, tx->size);
++
++ tpmif_put(tpmif);
++ }
++}
++
++irqreturn_t tpmif_be_int(int irq, void *dev_id, struct pt_regs *regs)
++{
++ tpmif_t *tpmif = (tpmif_t *) dev_id;
++
++ add_to_tpm_schedule_list_tail(tpmif);
++ maybe_schedule_tx_action();
++ return IRQ_HANDLED;
++}
++
++static int __init tpmback_init(void)
++{
++ int rc;
++
++ if ((rc = misc_register(&vtpms_miscdevice)) != 0) {
++ printk(KERN_ALERT
++ "Could not register misc device for TPM BE.\n");
++ return rc;
++ }
++
++ dataex_init(&dataex);
++
++ spin_lock_init(&tpm_schedule_list_lock);
++ INIT_LIST_HEAD(&tpm_schedule_list);
++
++ tpmif_interface_init();
++ tpmif_xenbus_init();
++
++ printk(KERN_ALERT "Successfully initialized TPM backend driver.\n");
++
++ return 0;
++}
++
++module_init(tpmback_init);
++
++void __exit tpmback_exit(void)
++{
++ vtpm_release_packets(NULL, 0);
++ tpmif_xenbus_exit();
++ tpmif_interface_exit();
++ misc_deregister(&vtpms_miscdevice);
++}
++
++MODULE_LICENSE("Dual BSD/GPL");
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/tpmback/xenbus.c 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,289 @@
++/* Xenbus code for tpmif backend
++ Copyright (C) 2005 IBM Corporation
++ Copyright (C) 2005 Rusty Russell <rusty@rustcorp.com.au>
++
++ This program is free software; you can redistribute it and/or modify
++ it under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 2 of the License, or
++ (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ GNU General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program; if not, write to the Free Software
++ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++*/
++#include <stdarg.h>
++#include <linux/module.h>
++#include <xen/xenbus.h>
++#include "common.h"
++
++struct backend_info
++{
++ struct xenbus_device *dev;
++
++ /* our communications channel */
++ tpmif_t *tpmif;
++
++ long int frontend_id;
++ long int instance; // instance of TPM
++ u8 is_instance_set;// whether instance number has been set
++
++ /* watch front end for changes */
++ struct xenbus_watch backend_watch;
++};
++
++static void maybe_connect(struct backend_info *be);
++static void connect(struct backend_info *be);
++static int connect_ring(struct backend_info *be);
++static void backend_changed(struct xenbus_watch *watch,
++ const char **vec, unsigned int len);
++static void frontend_changed(struct xenbus_device *dev,
++ enum xenbus_state frontend_state);
++
++long int tpmback_get_instance(struct backend_info *bi)
++{
++ long int res = -1;
++ if (bi && bi->is_instance_set)
++ res = bi->instance;
++ return res;
++}
++
++static int tpmback_remove(struct xenbus_device *dev)
++{
++ struct backend_info *be = dev->dev.driver_data;
++
++ if (!be) return 0;
++
++ if (be->backend_watch.node) {
++ unregister_xenbus_watch(&be->backend_watch);
++ kfree(be->backend_watch.node);
++ be->backend_watch.node = NULL;
++ }
++ if (be->tpmif) {
++ be->tpmif->bi = NULL;
++ vtpm_release_packets(be->tpmif, 0);
++ tpmif_put(be->tpmif);
++ be->tpmif = NULL;
++ }
++ kfree(be);
++ dev->dev.driver_data = NULL;
++ return 0;
++}
++
++static int tpmback_probe(struct xenbus_device *dev,
++ const struct xenbus_device_id *id)
++{
++ int err;
++ struct backend_info *be = kzalloc(sizeof(struct backend_info),
++ GFP_KERNEL);
++
++ if (!be) {
++ xenbus_dev_fatal(dev, -ENOMEM,
++ "allocating backend structure");
++ return -ENOMEM;
++ }
++
++ be->is_instance_set = 0;
++ be->dev = dev;
++ dev->dev.driver_data = be;
++
++ err = xenbus_watch_path2(dev, dev->nodename,
++ "instance", &be->backend_watch,
++ backend_changed);
++ if (err) {
++ goto fail;
++ }
++
++ err = xenbus_switch_state(dev, XenbusStateInitWait);
++ if (err) {
++ goto fail;
++ }
++ return 0;
++fail:
++ tpmback_remove(dev);
++ return err;
++}
++
++
++static void backend_changed(struct xenbus_watch *watch,
++ const char **vec, unsigned int len)
++{
++ int err;
++ long instance;
++ struct backend_info *be
++ = container_of(watch, struct backend_info, backend_watch);
++ struct xenbus_device *dev = be->dev;
++
++ err = xenbus_scanf(XBT_NIL, dev->nodename,
++ "instance","%li", &instance);
++ if (XENBUS_EXIST_ERR(err)) {
++ return;
++ }
++
++ if (err != 1) {
++ xenbus_dev_fatal(dev, err, "reading instance");
++ return;
++ }
++
++ if (be->is_instance_set == 0) {
++ be->instance = instance;
++ be->is_instance_set = 1;
++ }
++}
++
++
++static void frontend_changed(struct xenbus_device *dev,
++ enum xenbus_state frontend_state)
++{
++ struct backend_info *be = dev->dev.driver_data;
++ int err;
++
++ switch (frontend_state) {
++ case XenbusStateInitialising:
++ case XenbusStateInitialised:
++ break;
++
++ case XenbusStateConnected:
++ err = connect_ring(be);
++ if (err) {
++ return;
++ }
++ maybe_connect(be);
++ break;
++
++ case XenbusStateClosing:
++ be->instance = -1;
++ xenbus_switch_state(dev, XenbusStateClosing);
++ break;
++
++ case XenbusStateUnknown: /* keep it here */
++ case XenbusStateClosed:
++ xenbus_switch_state(dev, XenbusStateClosed);
++ device_unregister(&be->dev->dev);
++ tpmback_remove(dev);
++ break;
++
++ default:
++ xenbus_dev_fatal(dev, -EINVAL,
++ "saw state %d at frontend",
++ frontend_state);
++ break;
++ }
++}
++
++
++
++static void maybe_connect(struct backend_info *be)
++{
++ if (be->tpmif == NULL || be->tpmif->status == CONNECTED)
++ return;
++
++ connect(be);
++}
++
++
++static void connect(struct backend_info *be)
++{
++ struct xenbus_transaction xbt;
++ int err;
++ struct xenbus_device *dev = be->dev;
++ unsigned long ready = 1;
++
++again:
++ err = xenbus_transaction_start(&xbt);
++ if (err) {
++ xenbus_dev_fatal(be->dev, err, "starting transaction");
++ return;
++ }
++
++ err = xenbus_printf(xbt, be->dev->nodename,
++ "ready", "%lu", ready);
++ if (err) {
++ xenbus_dev_fatal(be->dev, err, "writing 'ready'");
++ goto abort;
++ }
++
++ err = xenbus_transaction_end(xbt, 0);
++ if (err == -EAGAIN)
++ goto again;
++ if (err)
++ xenbus_dev_fatal(be->dev, err, "end of transaction");
++
++ err = xenbus_switch_state(dev, XenbusStateConnected);
++ if (!err)
++ be->tpmif->status = CONNECTED;
++ return;
++abort:
++ xenbus_transaction_end(xbt, 1);
++}
++
++
++static int connect_ring(struct backend_info *be)
++{
++ struct xenbus_device *dev = be->dev;
++ unsigned long ring_ref;
++ unsigned int evtchn;
++ int err;
++
++ err = xenbus_gather(XBT_NIL, dev->otherend,
++ "ring-ref", "%lu", &ring_ref,
++ "event-channel", "%u", &evtchn, NULL);
++ if (err) {
++ xenbus_dev_error(dev, err,
++ "reading %s/ring-ref and event-channel",
++ dev->otherend);
++ return err;
++ }
++
++ if (!be->tpmif) {
++ be->tpmif = tpmif_find(dev->otherend_id, be);
++ if (IS_ERR(be->tpmif)) {
++ err = PTR_ERR(be->tpmif);
++ be->tpmif = NULL;
++ xenbus_dev_fatal(dev,err,"creating vtpm interface");
++ return err;
++ }
++ }
++
++ if (be->tpmif != NULL) {
++ err = tpmif_map(be->tpmif, ring_ref, evtchn);
++ if (err) {
++ xenbus_dev_error(dev, err,
++ "mapping shared-frame %lu port %u",
++ ring_ref, evtchn);
++ return err;
++ }
++ }
++ return 0;
++}
++
++
++static struct xenbus_device_id tpmback_ids[] = {
++ { "vtpm" },
++ { "" }
++};
++
++
++static struct xenbus_driver tpmback = {
++ .name = "vtpm",
++ .owner = THIS_MODULE,
++ .ids = tpmback_ids,
++ .probe = tpmback_probe,
++ .remove = tpmback_remove,
++ .otherend_changed = frontend_changed,
++};
++
++
++void tpmif_xenbus_init(void)
++{
++ xenbus_register_backend(&tpmback);
++}
++
++void tpmif_xenbus_exit(void)
++{
++ xenbus_unregister_driver(&tpmback);
++}
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/util.c 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,70 @@
++#include <linux/mm.h>
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <asm/uaccess.h>
++#include <xen/driver_util.h>
++
++struct class *get_xen_class(void)
++{
++ static struct class *xen_class;
++
++ if (xen_class)
++ return xen_class;
++
++ xen_class = class_create(THIS_MODULE, "xen");
++ if (IS_ERR(xen_class)) {
++ printk("Failed to create xen sysfs class.\n");
++ xen_class = NULL;
++ }
++
++ return xen_class;
++}
++EXPORT_SYMBOL_GPL(get_xen_class);
++
++/* Todo: merge ia64 ('auto-translate physmap') versions of these functions. */
++#ifndef __ia64__
++
++static int f(pte_t *pte, struct page *pmd_page, unsigned long addr, void *data)
++{
++ /* apply_to_page_range() does all the hard work. */
++ return 0;
++}
++
++struct vm_struct *alloc_vm_area(unsigned long size)
++{
++ struct vm_struct *area;
++
++ area = get_vm_area(size, VM_IOREMAP);
++ if (area == NULL)
++ return NULL;
++
++ /*
++ * This ensures that page tables are constructed for this region
++ * of kernel virtual address space and mapped into init_mm.
++ */
++ if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
++ area->size, f, NULL)) {
++ free_vm_area(area);
++ return NULL;
++ }
++
++ /* Map page directories into every address space. */
++#ifdef CONFIG_X86
++ vmalloc_sync_all();
++#endif
++
++ return area;
++}
++EXPORT_SYMBOL_GPL(alloc_vm_area);
++
++void free_vm_area(struct vm_struct *area)
++{
++ struct vm_struct *ret;
++ ret = remove_vm_area(area->addr);
++ BUG_ON(ret != area);
++ kfree(area);
++}
++EXPORT_SYMBOL_GPL(free_vm_area);
++
++#endif /* !__ia64__ */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/xenbus/Makefile 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,9 @@
++obj-y += xenbus_client.o xenbus_comms.o xenbus_xs.o xenbus_probe.o
++obj-$(CONFIG_XEN_BACKEND) += xenbus_be.o
++
++xenbus_be-objs =
++xenbus_be-objs += xenbus_backend_client.o
++
++xenbus-$(CONFIG_XEN_BACKEND) += xenbus_probe_backend.o
++obj-y += $(xenbus-y) $(xenbus-m)
++obj-$(CONFIG_XEN_XENBUS_DEV) += xenbus_dev.o
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/xenbus/xenbus_backend_client.c 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,147 @@
++/******************************************************************************
++ * Backend-client-facing interface for the Xenbus driver. In other words, the
++ * interface between the Xenbus and the device-specific code in the backend
++ * driver.
++ *
++ * Copyright (C) 2005-2006 XenSource Ltd
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <linux/err.h>
++#include <xen/gnttab.h>
++#include <xen/xenbus.h>
++#include <xen/driver_util.h>
++
++/* Based on Rusty Russell's skeleton driver's map_page */
++struct vm_struct *xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref)
++{
++ struct gnttab_map_grant_ref op;
++ struct vm_struct *area;
++
++ area = alloc_vm_area(PAGE_SIZE);
++ if (!area)
++ return ERR_PTR(-ENOMEM);
++
++ gnttab_set_map_op(&op, (unsigned long)area->addr, GNTMAP_host_map,
++ gnt_ref, dev->otherend_id);
++
++ if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
++ BUG();
++
++ if (op.status != GNTST_okay) {
++ free_vm_area(area);
++ xenbus_dev_fatal(dev, op.status,
++ "mapping in shared page %d from domain %d",
++ gnt_ref, dev->otherend_id);
++ BUG_ON(!IS_ERR(ERR_PTR(op.status)));
++ return ERR_PTR(op.status);
++ }
++
++ /* Stuff the handle in an unused field */
++ area->phys_addr = (unsigned long)op.handle;
++
++ return area;
++}
++EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc);
++
++
++int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref,
++ grant_handle_t *handle, void *vaddr)
++{
++ struct gnttab_map_grant_ref op;
++
++ gnttab_set_map_op(&op, (unsigned long)vaddr, GNTMAP_host_map,
++ gnt_ref, dev->otherend_id);
++ if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
++ BUG();
++
++ if (op.status != GNTST_okay) {
++ xenbus_dev_fatal(dev, op.status,
++ "mapping in shared page %d from domain %d",
++ gnt_ref, dev->otherend_id);
++ } else
++ *handle = op.handle;
++
++ return op.status;
++}
++EXPORT_SYMBOL_GPL(xenbus_map_ring);
++
++
++/* Based on Rusty Russell's skeleton driver's unmap_page */
++int xenbus_unmap_ring_vfree(struct xenbus_device *dev, struct vm_struct *area)
++{
++ struct gnttab_unmap_grant_ref op;
++
++ gnttab_set_unmap_op(&op, (unsigned long)area->addr, GNTMAP_host_map,
++ (grant_handle_t)area->phys_addr);
++
++ if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
++ BUG();
++
++ if (op.status == GNTST_okay)
++ free_vm_area(area);
++ else
++ xenbus_dev_error(dev, op.status,
++ "unmapping page at handle %d error %d",
++ (int16_t)area->phys_addr, op.status);
++
++ return op.status;
++}
++EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);
++
++
++int xenbus_unmap_ring(struct xenbus_device *dev,
++ grant_handle_t handle, void *vaddr)
++{
++ struct gnttab_unmap_grant_ref op;
++
++ gnttab_set_unmap_op(&op, (unsigned long)vaddr, GNTMAP_host_map,
++ handle);
++ if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
++ BUG();
++
++ if (op.status != GNTST_okay)
++ xenbus_dev_error(dev, op.status,
++ "unmapping page at handle %d error %d",
++ handle, op.status);
++
++ return op.status;
++}
++EXPORT_SYMBOL_GPL(xenbus_unmap_ring);
++
++int xenbus_dev_is_online(struct xenbus_device *dev)
++{
++ int rc, val;
++
++ rc = xenbus_scanf(XBT_NIL, dev->nodename, "online", "%d", &val);
++ if (rc != 1)
++ val = 0; /* no online node present */
++
++ return val;
++}
++EXPORT_SYMBOL_GPL(xenbus_dev_is_online);
++
++MODULE_LICENSE("Dual BSD/GPL");
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/xenbus/xenbus_client.c 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,283 @@
++/******************************************************************************
++ * Client-facing interface for the Xenbus driver. In other words, the
++ * interface between the Xenbus and the device-specific code, be it the
++ * frontend or the backend of that driver.
++ *
++ * Copyright (C) 2005 XenSource Ltd
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <xen/evtchn.h>
++#include <xen/gnttab.h>
++#include <xen/xenbus.h>
++#include <xen/driver_util.h>
++
++#ifdef HAVE_XEN_PLATFORM_COMPAT_H
++#include <xen/platform-compat.h>
++#endif
++
++#define DPRINTK(fmt, args...) \
++ pr_debug("xenbus_client (%s:%d) " fmt ".\n", __FUNCTION__, __LINE__, ##args)
++
++const char *xenbus_strstate(enum xenbus_state state)
++{
++ static const char *const name[] = {
++ [ XenbusStateUnknown ] = "Unknown",
++ [ XenbusStateInitialising ] = "Initialising",
++ [ XenbusStateInitWait ] = "InitWait",
++ [ XenbusStateInitialised ] = "Initialised",
++ [ XenbusStateConnected ] = "Connected",
++ [ XenbusStateClosing ] = "Closing",
++ [ XenbusStateClosed ] = "Closed",
++ };
++ return (state < ARRAY_SIZE(name)) ? name[state] : "INVALID";
++}
++EXPORT_SYMBOL_GPL(xenbus_strstate);
++
++int xenbus_watch_path(struct xenbus_device *dev, const char *path,
++ struct xenbus_watch *watch,
++ void (*callback)(struct xenbus_watch *,
++ const char **, unsigned int))
++{
++ int err;
++
++ watch->node = path;
++ watch->callback = callback;
++
++ err = register_xenbus_watch(watch);
++
++ if (err) {
++ watch->node = NULL;
++ watch->callback = NULL;
++ xenbus_dev_fatal(dev, err, "adding watch on %s", path);
++ }
++
++ return err;
++}
++EXPORT_SYMBOL_GPL(xenbus_watch_path);
++
++
++int xenbus_watch_path2(struct xenbus_device *dev, const char *path,
++ const char *path2, struct xenbus_watch *watch,
++ void (*callback)(struct xenbus_watch *,
++ const char **, unsigned int))
++{
++ int err;
++ char *state = kasprintf(GFP_KERNEL, "%s/%s", path, path2);
++ if (!state) {
++ xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch");
++ return -ENOMEM;
++ }
++ err = xenbus_watch_path(dev, state, watch, callback);
++
++ if (err)
++ kfree(state);
++ return err;
++}
++EXPORT_SYMBOL_GPL(xenbus_watch_path2);
++
++
++int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state state)
++{
++ /* We check whether the state is currently set to the given value, and
++ if not, then the state is set. We don't want to unconditionally
++ write the given state, because we don't want to fire watches
++ unnecessarily. Furthermore, if the node has gone, we don't write
++ to it, as the device will be tearing down, and we don't want to
++ resurrect that directory.
++
++ Note that, because of this cached value of our state, this function
++ will not work inside a Xenstore transaction (something it was
++ trying to in the past) because dev->state would not get reset if
++ the transaction was aborted.
++
++ */
++
++ int current_state;
++ int err;
++
++ if (state == dev->state)
++ return 0;
++
++ err = xenbus_scanf(XBT_NIL, dev->nodename, "state", "%d",
++ &current_state);
++ if (err != 1)
++ return 0;
++
++ err = xenbus_printf(XBT_NIL, dev->nodename, "state", "%d", state);
++ if (err) {
++ if (state != XenbusStateClosing) /* Avoid looping */
++ xenbus_dev_fatal(dev, err, "writing new state");
++ return err;
++ }
++
++ dev->state = state;
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(xenbus_switch_state);
++
++int xenbus_frontend_closed(struct xenbus_device *dev)
++{
++ xenbus_switch_state(dev, XenbusStateClosed);
++ complete(&dev->down);
++ return 0;
++}
++EXPORT_SYMBOL_GPL(xenbus_frontend_closed);
++
++/**
++ * Return the path to the error node for the given device, or NULL on failure.
++ * If the value returned is non-NULL, then it is the caller's to kfree.
++ */
++static char *error_path(struct xenbus_device *dev)
++{
++ return kasprintf(GFP_KERNEL, "error/%s", dev->nodename);
++}
++
++
++void _dev_error(struct xenbus_device *dev, int err, const char *fmt,
++ va_list ap)
++{
++ int ret;
++ unsigned int len;
++ char *printf_buffer = NULL, *path_buffer = NULL;
++
++#define PRINTF_BUFFER_SIZE 4096
++ printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL);
++ if (printf_buffer == NULL)
++ goto fail;
++
++ len = sprintf(printf_buffer, "%i ", -err);
++ ret = vsnprintf(printf_buffer+len, PRINTF_BUFFER_SIZE-len, fmt, ap);
++
++ BUG_ON(len + ret > PRINTF_BUFFER_SIZE-1);
++
++ dev_err(&dev->dev, "%s\n", printf_buffer);
++
++ path_buffer = error_path(dev);
++
++ if (path_buffer == NULL) {
++ printk("xenbus: failed to write error node for %s (%s)\n",
++ dev->nodename, printf_buffer);
++ goto fail;
++ }
++
++ if (xenbus_write(XBT_NIL, path_buffer, "error", printf_buffer) != 0) {
++ printk("xenbus: failed to write error node for %s (%s)\n",
++ dev->nodename, printf_buffer);
++ goto fail;
++ }
++
++fail:
++ if (printf_buffer)
++ kfree(printf_buffer);
++ if (path_buffer)
++ kfree(path_buffer);
++}
++
++
++void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt,
++ ...)
++{
++ va_list ap;
++
++ va_start(ap, fmt);
++ _dev_error(dev, err, fmt, ap);
++ va_end(ap);
++}
++EXPORT_SYMBOL_GPL(xenbus_dev_error);
++
++
++void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt,
++ ...)
++{
++ va_list ap;
++
++ va_start(ap, fmt);
++ _dev_error(dev, err, fmt, ap);
++ va_end(ap);
++
++ xenbus_switch_state(dev, XenbusStateClosing);
++}
++EXPORT_SYMBOL_GPL(xenbus_dev_fatal);
++
++
++int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn)
++{
++ int err = gnttab_grant_foreign_access(dev->otherend_id, ring_mfn, 0);
++ if (err < 0)
++ xenbus_dev_fatal(dev, err, "granting access to ring page");
++ return err;
++}
++EXPORT_SYMBOL_GPL(xenbus_grant_ring);
++
++
++int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port)
++{
++ struct evtchn_alloc_unbound alloc_unbound;
++ int err;
++
++ alloc_unbound.dom = DOMID_SELF;
++ alloc_unbound.remote_dom = dev->otherend_id;
++
++ err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
++ &alloc_unbound);
++ if (err)
++ xenbus_dev_fatal(dev, err, "allocating event channel");
++ else
++ *port = alloc_unbound.port;
++
++ return err;
++}
++EXPORT_SYMBOL_GPL(xenbus_alloc_evtchn);
++
++
++int xenbus_free_evtchn(struct xenbus_device *dev, int port)
++{
++ struct evtchn_close close;
++ int err;
++
++ close.port = port;
++
++ err = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
++ if (err)
++ xenbus_dev_error(dev, err, "freeing event channel %d", port);
++
++ return err;
++}
++EXPORT_SYMBOL_GPL(xenbus_free_evtchn);
++
++
++enum xenbus_state xenbus_read_driver_state(const char *path)
++{
++ enum xenbus_state result;
++ int err = xenbus_gather(XBT_NIL, path, "state", "%d", &result, NULL);
++ if (err)
++ result = XenbusStateUnknown;
++
++ return result;
++}
++EXPORT_SYMBOL_GPL(xenbus_read_driver_state);
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/xenbus/xenbus_comms.c 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,232 @@
++/******************************************************************************
++ * xenbus_comms.c
++ *
++ * Low level code to talks to Xen Store: ringbuffer and event channel.
++ *
++ * Copyright (C) 2005 Rusty Russell, IBM Corporation
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <linux/wait.h>
++#include <linux/interrupt.h>
++#include <linux/sched.h>
++#include <linux/err.h>
++#include <linux/ptrace.h>
++#include <xen/evtchn.h>
++#include <xen/xenbus.h>
++
++#include <asm/hypervisor.h>
++
++#include "xenbus_comms.h"
++
++#ifdef HAVE_XEN_PLATFORM_COMPAT_H
++#include <xen/platform-compat.h>
++#endif
++
++static int xenbus_irq;
++
++extern void xenbus_probe(void *);
++extern int xenstored_ready;
++static DECLARE_WORK(probe_work, xenbus_probe, NULL);
++
++static DECLARE_WAIT_QUEUE_HEAD(xb_waitq);
++
++static irqreturn_t wake_waiting(int irq, void *unused, struct pt_regs *regs)
++{
++ if (unlikely(xenstored_ready == 0)) {
++ xenstored_ready = 1;
++ schedule_work(&probe_work);
++ }
++
++ wake_up(&xb_waitq);
++ return IRQ_HANDLED;
++}
++
++static int check_indexes(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod)
++{
++ return ((prod - cons) <= XENSTORE_RING_SIZE);
++}
++
++static void *get_output_chunk(XENSTORE_RING_IDX cons,
++ XENSTORE_RING_IDX prod,
++ char *buf, uint32_t *len)
++{
++ *len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(prod);
++ if ((XENSTORE_RING_SIZE - (prod - cons)) < *len)
++ *len = XENSTORE_RING_SIZE - (prod - cons);
++ return buf + MASK_XENSTORE_IDX(prod);
++}
++
++static const void *get_input_chunk(XENSTORE_RING_IDX cons,
++ XENSTORE_RING_IDX prod,
++ const char *buf, uint32_t *len)
++{
++ *len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(cons);
++ if ((prod - cons) < *len)
++ *len = prod - cons;
++ return buf + MASK_XENSTORE_IDX(cons);
++}
++
++int xb_write(const void *data, unsigned len)
++{
++ struct xenstore_domain_interface *intf = xen_store_interface;
++ XENSTORE_RING_IDX cons, prod;
++ int rc;
++
++ while (len != 0) {
++ void *dst;
++ unsigned int avail;
++
++ rc = wait_event_interruptible(
++ xb_waitq,
++ (intf->req_prod - intf->req_cons) !=
++ XENSTORE_RING_SIZE);
++ if (rc < 0)
++ return rc;
++
++ /* Read indexes, then verify. */
++ cons = intf->req_cons;
++ prod = intf->req_prod;
++ if (!check_indexes(cons, prod)) {
++ intf->req_cons = intf->req_prod = 0;
++ return -EIO;
++ }
++
++ dst = get_output_chunk(cons, prod, intf->req, &avail);
++ if (avail == 0)
++ continue;
++ if (avail > len)
++ avail = len;
++
++ /* Must write data /after/ reading the consumer index. */
++ mb();
++
++ memcpy(dst, data, avail);
++ data += avail;
++ len -= avail;
++
++ /* Other side must not see new producer until data is there. */
++ wmb();
++ intf->req_prod += avail;
++
++ /* Implies mb(): other side will see the updated producer. */
++ notify_remote_via_evtchn(xen_store_evtchn);
++ }
++
++ return 0;
++}
++
++int xb_data_to_read(void)
++{
++ struct xenstore_domain_interface *intf = xen_store_interface;
++ return (intf->rsp_cons != intf->rsp_prod);
++}
++
++int xb_wait_for_data_to_read(void)
++{
++ return wait_event_interruptible(xb_waitq, xb_data_to_read());
++}
++
++int xb_read(void *data, unsigned len)
++{
++ struct xenstore_domain_interface *intf = xen_store_interface;
++ XENSTORE_RING_IDX cons, prod;
++ int rc;
++
++ while (len != 0) {
++ unsigned int avail;
++ const char *src;
++
++ rc = xb_wait_for_data_to_read();
++ if (rc < 0)
++ return rc;
++
++ /* Read indexes, then verify. */
++ cons = intf->rsp_cons;
++ prod = intf->rsp_prod;
++ if (!check_indexes(cons, prod)) {
++ intf->rsp_cons = intf->rsp_prod = 0;
++ return -EIO;
++ }
++
++ src = get_input_chunk(cons, prod, intf->rsp, &avail);
++ if (avail == 0)
++ continue;
++ if (avail > len)
++ avail = len;
++
++ /* Must read data /after/ reading the producer index. */
++ rmb();
++
++ memcpy(data, src, avail);
++ data += avail;
++ len -= avail;
++
++ /* Other side must not see free space until we've copied out */
++ mb();
++ intf->rsp_cons += avail;
++
++ pr_debug("Finished read of %i bytes (%i to go)\n", avail, len);
++
++ /* Implies mb(): other side will see the updated consumer. */
++ notify_remote_via_evtchn(xen_store_evtchn);
++ }
++
++ return 0;
++}
++
++/* Set up interrupt handler off store event channel. */
++int xb_init_comms(void)
++{
++ struct xenstore_domain_interface *intf = xen_store_interface;
++ int err;
++
++ if (intf->req_prod != intf->req_cons)
++ printk(KERN_ERR "XENBUS request ring is not quiescent "
++ "(%08x:%08x)!\n", intf->req_cons, intf->req_prod);
++
++ if (intf->rsp_prod != intf->rsp_cons) {
++ printk(KERN_WARNING "XENBUS response ring is not quiescent "
++ "(%08x:%08x): fixing up\n",
++ intf->rsp_cons, intf->rsp_prod);
++ intf->rsp_cons = intf->rsp_prod;
++ }
++
++ if (xenbus_irq)
++ unbind_from_irqhandler(xenbus_irq, &xb_waitq);
++
++ err = bind_caller_port_to_irqhandler(
++ xen_store_evtchn, wake_waiting,
++ 0, "xenbus", &xb_waitq);
++ if (err <= 0) {
++ printk(KERN_ERR "XENBUS request irq failed %i\n", err);
++ return err;
++ }
++
++ xenbus_irq = err;
++
++ return 0;
++}
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/xenbus/xenbus_comms.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,46 @@
++/*
++ * Private include for xenbus communications.
++ *
++ * Copyright (C) 2005 Rusty Russell, IBM Corporation
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#ifndef _XENBUS_COMMS_H
++#define _XENBUS_COMMS_H
++
++int xs_init(void);
++int xb_init_comms(void);
++
++/* Low level routines. */
++int xb_write(const void *data, unsigned len);
++int xb_read(void *data, unsigned len);
++int xb_data_to_read(void);
++int xb_wait_for_data_to_read(void);
++int xs_input_avail(void);
++extern struct xenstore_domain_interface *xen_store_interface;
++extern int xen_store_evtchn;
++
++#endif /* _XENBUS_COMMS_H */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/xenbus/xenbus_dev.c 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,404 @@
++/*
++ * xenbus_dev.c
++ *
++ * Driver giving user-space access to the kernel's xenbus connection
++ * to xenstore.
++ *
++ * Copyright (c) 2005, Christian Limpach
++ * Copyright (c) 2005, Rusty Russell, IBM Corporation
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/uio.h>
++#include <linux/notifier.h>
++#include <linux/wait.h>
++#include <linux/fs.h>
++#include <linux/poll.h>
++#include <linux/mutex.h>
++
++#include "xenbus_comms.h"
++
++#include <asm/uaccess.h>
++#include <asm/hypervisor.h>
++#include <xen/xenbus.h>
++#include <xen/xen_proc.h>
++#include <asm/hypervisor.h>
++
++#ifdef HAVE_XEN_PLATFORM_COMPAT_H
++#include <xen/platform-compat.h>
++#endif
++
++struct xenbus_dev_transaction {
++ struct list_head list;
++ struct xenbus_transaction handle;
++};
++
++struct read_buffer {
++ struct list_head list;
++ unsigned int cons;
++ unsigned int len;
++ char msg[];
++};
++
++struct xenbus_dev_data {
++ /* In-progress transaction. */
++ struct list_head transactions;
++
++ /* Active watches. */
++ struct list_head watches;
++
++ /* Partial request. */
++ unsigned int len;
++ union {
++ struct xsd_sockmsg msg;
++ char buffer[PAGE_SIZE];
++ } u;
++
++ /* Response queue. */
++ struct list_head read_buffers;
++ wait_queue_head_t read_waitq;
++
++ struct mutex reply_mutex;
++};
++
++static struct proc_dir_entry *xenbus_dev_intf;
++
++static ssize_t xenbus_dev_read(struct file *filp,
++ char __user *ubuf,
++ size_t len, loff_t *ppos)
++{
++ struct xenbus_dev_data *u = filp->private_data;
++ struct read_buffer *rb;
++ int i, ret;
++
++ mutex_lock(&u->reply_mutex);
++ while (list_empty(&u->read_buffers)) {
++ mutex_unlock(&u->reply_mutex);
++ ret = wait_event_interruptible(u->read_waitq,
++ !list_empty(&u->read_buffers));
++ if (ret)
++ return ret;
++ mutex_lock(&u->reply_mutex);
++ }
++
++ rb = list_entry(u->read_buffers.next, struct read_buffer, list);
++ for (i = 0; i < len;) {
++ put_user(rb->msg[rb->cons], ubuf + i);
++ i++;
++ rb->cons++;
++ if (rb->cons == rb->len) {
++ list_del(&rb->list);
++ kfree(rb);
++ if (list_empty(&u->read_buffers))
++ break;
++ rb = list_entry(u->read_buffers.next,
++ struct read_buffer, list);
++ }
++ }
++ mutex_unlock(&u->reply_mutex);
++
++ return i;
++}
++
++static void queue_reply(struct xenbus_dev_data *u,
++ char *data, unsigned int len)
++{
++ struct read_buffer *rb;
++
++ if (len == 0)
++ return;
++
++ rb = kmalloc(sizeof(*rb) + len, GFP_KERNEL);
++ BUG_ON(rb == NULL);
++
++ rb->cons = 0;
++ rb->len = len;
++
++ memcpy(rb->msg, data, len);
++
++ list_add_tail(&rb->list, &u->read_buffers);
++
++ wake_up(&u->read_waitq);
++}
++
++struct watch_adapter
++{
++ struct list_head list;
++ struct xenbus_watch watch;
++ struct xenbus_dev_data *dev_data;
++ char *token;
++};
++
++static void free_watch_adapter (struct watch_adapter *watch)
++{
++ kfree(watch->watch.node);
++ kfree(watch->token);
++ kfree(watch);
++}
++
++static void watch_fired(struct xenbus_watch *watch,
++ const char **vec,
++ unsigned int len)
++{
++ struct watch_adapter *adap =
++ container_of(watch, struct watch_adapter, watch);
++ struct xsd_sockmsg hdr;
++ const char *path, *token;
++ int path_len, tok_len, body_len;
++
++ path = vec[XS_WATCH_PATH];
++ token = adap->token;
++
++ path_len = strlen(path) + 1;
++ tok_len = strlen(token) + 1;
++ body_len = path_len + tok_len;
++
++ hdr.type = XS_WATCH_EVENT;
++ hdr.len = body_len;
++
++ mutex_lock(&adap->dev_data->reply_mutex);
++ queue_reply(adap->dev_data, (char *)&hdr, sizeof(hdr));
++ queue_reply(adap->dev_data, (char *)path, path_len);
++ queue_reply(adap->dev_data, (char *)token, tok_len);
++ mutex_unlock(&adap->dev_data->reply_mutex);
++}
++
++static LIST_HEAD(watch_list);
++
++static ssize_t xenbus_dev_write(struct file *filp,
++ const char __user *ubuf,
++ size_t len, loff_t *ppos)
++{
++ struct xenbus_dev_data *u = filp->private_data;
++ struct xenbus_dev_transaction *trans = NULL;
++ uint32_t msg_type;
++ void *reply;
++ char *path, *token;
++ struct watch_adapter *watch, *tmp_watch;
++ int err, rc = len;
++
++ if ((len + u->len) > sizeof(u->u.buffer)) {
++ rc = -EINVAL;
++ goto out;
++ }
++
++ if (copy_from_user(u->u.buffer + u->len, ubuf, len) != 0) {
++ rc = -EFAULT;
++ goto out;
++ }
++
++ u->len += len;
++ if ((u->len < sizeof(u->u.msg)) ||
++ (u->len < (sizeof(u->u.msg) + u->u.msg.len)))
++ return rc;
++
++ msg_type = u->u.msg.type;
++
++ switch (msg_type) {
++ case XS_TRANSACTION_START:
++ case XS_TRANSACTION_END:
++ case XS_DIRECTORY:
++ case XS_READ:
++ case XS_GET_PERMS:
++ case XS_RELEASE:
++ case XS_GET_DOMAIN_PATH:
++ case XS_WRITE:
++ case XS_MKDIR:
++ case XS_RM:
++ case XS_SET_PERMS:
++ if (msg_type == XS_TRANSACTION_START) {
++ trans = kmalloc(sizeof(*trans), GFP_KERNEL);
++ if (!trans) {
++ rc = -ENOMEM;
++ goto out;
++ }
++ }
++
++ reply = xenbus_dev_request_and_reply(&u->u.msg);
++ if (IS_ERR(reply)) {
++ kfree(trans);
++ rc = PTR_ERR(reply);
++ goto out;
++ }
++
++ if (msg_type == XS_TRANSACTION_START) {
++ trans->handle.id = simple_strtoul(reply, NULL, 0);
++ list_add(&trans->list, &u->transactions);
++ } else if (msg_type == XS_TRANSACTION_END) {
++ list_for_each_entry(trans, &u->transactions, list)
++ if (trans->handle.id == u->u.msg.tx_id)
++ break;
++ BUG_ON(&trans->list == &u->transactions);
++ list_del(&trans->list);
++ kfree(trans);
++ }
++ mutex_lock(&u->reply_mutex);
++ queue_reply(u, (char *)&u->u.msg, sizeof(u->u.msg));
++ queue_reply(u, (char *)reply, u->u.msg.len);
++ mutex_unlock(&u->reply_mutex);
++ kfree(reply);
++ break;
++
++ case XS_WATCH:
++ case XS_UNWATCH: {
++ static const char *XS_RESP = "OK";
++ struct xsd_sockmsg hdr;
++
++ path = u->u.buffer + sizeof(u->u.msg);
++ token = memchr(path, 0, u->u.msg.len);
++ if (token == NULL) {
++ rc = -EILSEQ;
++ goto out;
++ }
++ token++;
++
++ if (msg_type == XS_WATCH) {
++ watch = kmalloc(sizeof(*watch), GFP_KERNEL);
++ watch->watch.node = kmalloc(strlen(path)+1,
++ GFP_KERNEL);
++ strcpy((char *)watch->watch.node, path);
++ watch->watch.callback = watch_fired;
++ watch->token = kmalloc(strlen(token)+1, GFP_KERNEL);
++ strcpy(watch->token, token);
++ watch->dev_data = u;
++
++ err = register_xenbus_watch(&watch->watch);
++ if (err) {
++ free_watch_adapter(watch);
++ rc = err;
++ goto out;
++ }
++
++ list_add(&watch->list, &u->watches);
++ } else {
++ list_for_each_entry_safe(watch, tmp_watch,
++ &u->watches, list) {
++ if (!strcmp(watch->token, token) &&
++ !strcmp(watch->watch.node, path))
++ {
++ unregister_xenbus_watch(&watch->watch);
++ list_del(&watch->list);
++ free_watch_adapter(watch);
++ break;
++ }
++ }
++ }
++
++ hdr.type = msg_type;
++ hdr.len = strlen(XS_RESP) + 1;
++ mutex_lock(&u->reply_mutex);
++ queue_reply(u, (char *)&hdr, sizeof(hdr));
++ queue_reply(u, (char *)XS_RESP, hdr.len);
++ mutex_unlock(&u->reply_mutex);
++ break;
++ }
++
++ default:
++ rc = -EINVAL;
++ break;
++ }
++
++ out:
++ u->len = 0;
++ return rc;
++}
++
++static int xenbus_dev_open(struct inode *inode, struct file *filp)
++{
++ struct xenbus_dev_data *u;
++
++ if (xen_store_evtchn == 0)
++ return -ENOENT;
++
++ nonseekable_open(inode, filp);
++
++ u = kzalloc(sizeof(*u), GFP_KERNEL);
++ if (u == NULL)
++ return -ENOMEM;
++
++ INIT_LIST_HEAD(&u->transactions);
++ INIT_LIST_HEAD(&u->watches);
++ INIT_LIST_HEAD(&u->read_buffers);
++ init_waitqueue_head(&u->read_waitq);
++
++ mutex_init(&u->reply_mutex);
++
++ filp->private_data = u;
++
++ return 0;
++}
++
++static int xenbus_dev_release(struct inode *inode, struct file *filp)
++{
++ struct xenbus_dev_data *u = filp->private_data;
++ struct xenbus_dev_transaction *trans, *tmp;
++ struct watch_adapter *watch, *tmp_watch;
++
++ list_for_each_entry_safe(trans, tmp, &u->transactions, list) {
++ xenbus_transaction_end(trans->handle, 1);
++ list_del(&trans->list);
++ kfree(trans);
++ }
++
++ list_for_each_entry_safe(watch, tmp_watch, &u->watches, list) {
++ unregister_xenbus_watch(&watch->watch);
++ list_del(&watch->list);
++ free_watch_adapter(watch);
++ }
++
++ kfree(u);
++
++ return 0;
++}
++
++static unsigned int xenbus_dev_poll(struct file *file, poll_table *wait)
++{
++ struct xenbus_dev_data *u = file->private_data;
++
++ poll_wait(file, &u->read_waitq, wait);
++ if (!list_empty(&u->read_buffers))
++ return POLLIN | POLLRDNORM;
++ return 0;
++}
++
++static const struct file_operations xenbus_dev_file_ops = {
++ .read = xenbus_dev_read,
++ .write = xenbus_dev_write,
++ .open = xenbus_dev_open,
++ .release = xenbus_dev_release,
++ .poll = xenbus_dev_poll,
++};
++
++int xenbus_dev_init(void)
++{
++ xenbus_dev_intf = create_xen_proc_entry("xenbus", 0400);
++ if (xenbus_dev_intf)
++ xenbus_dev_intf->proc_fops = &xenbus_dev_file_ops;
++
++ return 0;
++}
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/xenbus/xenbus_probe.c 2007-08-27 14:02:08.000000000 -0400
+@@ -0,0 +1,1086 @@
++/******************************************************************************
++ * Talks to Xen Store to figure out what devices we have.
++ *
++ * Copyright (C) 2005 Rusty Russell, IBM Corporation
++ * Copyright (C) 2005 Mike Wray, Hewlett-Packard
++ * Copyright (C) 2005, 2006 XenSource Ltd
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#define DPRINTK(fmt, args...) \
++ pr_debug("xenbus_probe (%s:%d) " fmt ".\n", \
++ __FUNCTION__, __LINE__, ##args)
++
++#include <linux/kernel.h>
++#include <linux/err.h>
++#include <linux/string.h>
++#include <linux/ctype.h>
++#include <linux/fcntl.h>
++#include <linux/mm.h>
++#include <linux/notifier.h>
++#include <linux/kthread.h>
++#include <linux/mutex.h>
++
++#include <asm/io.h>
++#include <asm/page.h>
++#include <asm/maddr.h>
++#include <asm/pgtable.h>
++#include <asm/hypervisor.h>
++#include <xen/xenbus.h>
++#include <xen/xen_proc.h>
++#include <xen/evtchn.h>
++#include <xen/features.h>
++#include <xen/hvm.h>
++
++#include "xenbus_comms.h"
++#include "xenbus_probe.h"
++
++#ifdef HAVE_XEN_PLATFORM_COMPAT_H
++#include <xen/platform-compat.h>
++#endif
++
++int xen_store_evtchn;
++struct xenstore_domain_interface *xen_store_interface;
++static unsigned long xen_store_mfn;
++
++extern struct mutex xenwatch_mutex;
++
++static ATOMIC_NOTIFIER_HEAD(xenstore_chain);
++
++static void wait_for_devices(struct xenbus_driver *xendrv);
++
++static int xenbus_probe_frontend(const char *type, const char *name);
++
++static void xenbus_dev_shutdown(struct device *_dev);
++
++/* If something in array of ids matches this device, return it. */
++static const struct xenbus_device_id *
++match_device(const struct xenbus_device_id *arr, struct xenbus_device *dev)
++{
++ for (; *arr->devicetype != '\0'; arr++) {
++ if (!strcmp(arr->devicetype, dev->devicetype))
++ return arr;
++ }
++ return NULL;
++}
++
++int xenbus_match(struct device *_dev, struct device_driver *_drv)
++{
++ struct xenbus_driver *drv = to_xenbus_driver(_drv);
++
++ if (!drv->ids)
++ return 0;
++
++ return match_device(drv->ids, to_xenbus_device(_dev)) != NULL;
++}
++
++/* device/<type>/<id> => <type>-<id> */
++static int frontend_bus_id(char bus_id[BUS_ID_SIZE], const char *nodename)
++{
++ nodename = strchr(nodename, '/');
++ if (!nodename || strlen(nodename + 1) >= BUS_ID_SIZE) {
++ printk(KERN_WARNING "XENBUS: bad frontend %s\n", nodename);
++ return -EINVAL;
++ }
++
++ strlcpy(bus_id, nodename + 1, BUS_ID_SIZE);
++ if (!strchr(bus_id, '/')) {
++ printk(KERN_WARNING "XENBUS: bus_id %s no slash\n", bus_id);
++ return -EINVAL;
++ }
++ *strchr(bus_id, '/') = '-';
++ return 0;
++}
++
++
++static void free_otherend_details(struct xenbus_device *dev)
++{
++ kfree(dev->otherend);
++ dev->otherend = NULL;
++}
++
++
++static void free_otherend_watch(struct xenbus_device *dev)
++{
++ if (dev->otherend_watch.node) {
++ unregister_xenbus_watch(&dev->otherend_watch);
++ kfree(dev->otherend_watch.node);
++ dev->otherend_watch.node = NULL;
++ }
++}
++
++
++int read_otherend_details(struct xenbus_device *xendev,
++ char *id_node, char *path_node)
++{
++ int err = xenbus_gather(XBT_NIL, xendev->nodename,
++ id_node, "%i", &xendev->otherend_id,
++ path_node, NULL, &xendev->otherend,
++ NULL);
++ if (err) {
++ xenbus_dev_fatal(xendev, err,
++ "reading other end details from %s",
++ xendev->nodename);
++ return err;
++ }
++ if (strlen(xendev->otherend) == 0 ||
++ !xenbus_exists(XBT_NIL, xendev->otherend, "")) {
++ xenbus_dev_fatal(xendev, -ENOENT,
++ "unable to read other end from %s. "
++ "missing or inaccessible.",
++ xendev->nodename);
++ free_otherend_details(xendev);
++ return -ENOENT;
++ }
++
++ return 0;
++}
++
++
++static int read_backend_details(struct xenbus_device *xendev)
++{
++ return read_otherend_details(xendev, "backend-id", "backend");
++}
++
++
++/* Bus type for frontend drivers. */
++static struct xen_bus_type xenbus_frontend = {
++ .root = "device",
++ .levels = 2, /* device/type/<id> */
++ .get_bus_id = frontend_bus_id,
++ .probe = xenbus_probe_frontend,
++ .bus = {
++ .name = "xen",
++ .match = xenbus_match,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
++ .probe = xenbus_dev_probe,
++ .remove = xenbus_dev_remove,
++ .shutdown = xenbus_dev_shutdown,
++#endif
++ },
++ .dev = {
++ .bus_id = "xen",
++ },
++};
++
++static void otherend_changed(struct xenbus_watch *watch,
++ const char **vec, unsigned int len)
++{
++ struct xenbus_device *dev =
++ container_of(watch, struct xenbus_device, otherend_watch);
++ struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver);
++ enum xenbus_state state;
++
++ /* Protect us against watches firing on old details when the otherend
++ details change, say immediately after a resume. */
++ if (!dev->otherend ||
++ strncmp(dev->otherend, vec[XS_WATCH_PATH],
++ strlen(dev->otherend))) {
++ DPRINTK("Ignoring watch at %s", vec[XS_WATCH_PATH]);
++ return;
++ }
++
++ state = xenbus_read_driver_state(dev->otherend);
++
++ DPRINTK("state is %d (%s), %s, %s", state, xenbus_strstate(state),
++ dev->otherend_watch.node, vec[XS_WATCH_PATH]);
++
++ /*
++ * Ignore xenbus transitions during shutdown. This prevents us doing
++ * work that can fail e.g., when the rootfs is gone.
++ */
++ if (system_state > SYSTEM_RUNNING) {
++ struct xen_bus_type *bus = bus;
++ bus = container_of(dev->dev.bus, struct xen_bus_type, bus);
++ /* If we're frontend, drive the state machine to Closed. */
++ /* This should cause the backend to release our resources. */
++ if ((bus == &xenbus_frontend) && (state == XenbusStateClosing))
++ xenbus_frontend_closed(dev);
++ return;
++ }
++
++ if (drv->otherend_changed)
++ drv->otherend_changed(dev, state);
++}
++
++
++static int talk_to_otherend(struct xenbus_device *dev)
++{
++ struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver);
++
++ free_otherend_watch(dev);
++ free_otherend_details(dev);
++
++ return drv->read_otherend_details(dev);
++}
++
++
++static int watch_otherend(struct xenbus_device *dev)
++{
++ return xenbus_watch_path2(dev, dev->otherend, "state",
++ &dev->otherend_watch, otherend_changed);
++}
++
++
++int xenbus_dev_probe(struct device *_dev)
++{
++ struct xenbus_device *dev = to_xenbus_device(_dev);
++ struct xenbus_driver *drv = to_xenbus_driver(_dev->driver);
++ const struct xenbus_device_id *id;
++ int err;
++
++ DPRINTK("%s", dev->nodename);
++
++ if (!drv->probe) {
++ err = -ENODEV;
++ goto fail;
++ }
++
++ id = match_device(drv->ids, dev);
++ if (!id) {
++ err = -ENODEV;
++ goto fail;
++ }
++
++ err = talk_to_otherend(dev);
++ if (err) {
++ printk(KERN_WARNING
++ "xenbus_probe: talk_to_otherend on %s failed.\n",
++ dev->nodename);
++ return err;
++ }
++
++ err = drv->probe(dev, id);
++ if (err)
++ goto fail;
++
++ err = watch_otherend(dev);
++ if (err) {
++ printk(KERN_WARNING
++ "xenbus_probe: watch_otherend on %s failed.\n",
++ dev->nodename);
++ return err;
++ }
++
++ return 0;
++fail:
++ xenbus_dev_error(dev, err, "xenbus_dev_probe on %s", dev->nodename);
++ xenbus_switch_state(dev, XenbusStateClosed);
++ return -ENODEV;
++}
++
++int xenbus_dev_remove(struct device *_dev)
++{
++ struct xenbus_device *dev = to_xenbus_device(_dev);
++ struct xenbus_driver *drv = to_xenbus_driver(_dev->driver);
++
++ DPRINTK("%s", dev->nodename);
++
++ free_otherend_watch(dev);
++ free_otherend_details(dev);
++
++ if (drv->remove)
++ drv->remove(dev);
++
++ xenbus_switch_state(dev, XenbusStateClosed);
++ return 0;
++}
++
++static void xenbus_dev_shutdown(struct device *_dev)
++{
++ struct xenbus_device *dev = to_xenbus_device(_dev);
++ unsigned long timeout = 5*HZ;
++
++ DPRINTK("%s", dev->nodename);
++
++ get_device(&dev->dev);
++ if (dev->state != XenbusStateConnected) {
++ printk("%s: %s: %s != Connected, skipping\n", __FUNCTION__,
++ dev->nodename, xenbus_strstate(dev->state));
++ goto out;
++ }
++ xenbus_switch_state(dev, XenbusStateClosing);
++ timeout = wait_for_completion_timeout(&dev->down, timeout);
++ if (!timeout)
++ printk("%s: %s timeout closing device\n", __FUNCTION__, dev->nodename);
++ out:
++ put_device(&dev->dev);
++}
++
++int xenbus_register_driver_common(struct xenbus_driver *drv,
++ struct xen_bus_type *bus)
++{
++ int ret;
++
++ if (bus->error)
++ return bus->error;
++
++ drv->driver.name = drv->name;
++ drv->driver.bus = &bus->bus;
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
++ drv->driver.owner = drv->owner;
++#endif
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
++ drv->driver.probe = xenbus_dev_probe;
++ drv->driver.remove = xenbus_dev_remove;
++ drv->driver.shutdown = xenbus_dev_shutdown;
++#endif
++
++ mutex_lock(&xenwatch_mutex);
++ ret = driver_register(&drv->driver);
++ mutex_unlock(&xenwatch_mutex);
++ return ret;
++}
++
++int xenbus_register_frontend(struct xenbus_driver *drv)
++{
++ int ret;
++
++ drv->read_otherend_details = read_backend_details;
++
++ ret = xenbus_register_driver_common(drv, &xenbus_frontend);
++ if (ret)
++ return ret;
++
++ /* If this driver is loaded as a module wait for devices to attach. */
++ wait_for_devices(drv);
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(xenbus_register_frontend);
++
++void xenbus_unregister_driver(struct xenbus_driver *drv)
++{
++ driver_unregister(&drv->driver);
++}
++EXPORT_SYMBOL_GPL(xenbus_unregister_driver);
++
++struct xb_find_info
++{
++ struct xenbus_device *dev;
++ const char *nodename;
++};
++
++static int cmp_dev(struct device *dev, void *data)
++{
++ struct xenbus_device *xendev = to_xenbus_device(dev);
++ struct xb_find_info *info = data;
++
++ if (!strcmp(xendev->nodename, info->nodename)) {
++ info->dev = xendev;
++ get_device(dev);
++ return 1;
++ }
++ return 0;
++}
++
++struct xenbus_device *xenbus_device_find(const char *nodename,
++ struct bus_type *bus)
++{
++ struct xb_find_info info = { .dev = NULL, .nodename = nodename };
++
++ bus_for_each_dev(bus, NULL, &info, cmp_dev);
++ return info.dev;
++}
++
++static int cleanup_dev(struct device *dev, void *data)
++{
++ struct xenbus_device *xendev = to_xenbus_device(dev);
++ struct xb_find_info *info = data;
++ int len = strlen(info->nodename);
++
++ DPRINTK("%s", info->nodename);
++
++ /* Match the info->nodename path, or any subdirectory of that path. */
++ if (strncmp(xendev->nodename, info->nodename, len))
++ return 0;
++
++ /* If the node name is longer, ensure it really is a subdirectory. */
++ if ((strlen(xendev->nodename) > len) && (xendev->nodename[len] != '/'))
++ return 0;
++
++ info->dev = xendev;
++ get_device(dev);
++ return 1;
++}
++
++static void xenbus_cleanup_devices(const char *path, struct bus_type *bus)
++{
++ struct xb_find_info info = { .nodename = path };
++
++ do {
++ info.dev = NULL;
++ bus_for_each_dev(bus, NULL, &info, cleanup_dev);
++ if (info.dev) {
++ device_unregister(&info.dev->dev);
++ put_device(&info.dev->dev);
++ }
++ } while (info.dev);
++}
++
++static void xenbus_dev_release(struct device *dev)
++{
++ if (dev)
++ kfree(to_xenbus_device(dev));
++}
++
++static ssize_t xendev_show_nodename(struct device *dev,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13)
++ struct device_attribute *attr,
++#endif
++ char *buf)
++{
++ return sprintf(buf, "%s\n", to_xenbus_device(dev)->nodename);
++}
++DEVICE_ATTR(nodename, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_nodename, NULL);
++
++static ssize_t xendev_show_devtype(struct device *dev,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13)
++ struct device_attribute *attr,
++#endif
++ char *buf)
++{
++ return sprintf(buf, "%s\n", to_xenbus_device(dev)->devicetype);
++}
++DEVICE_ATTR(devtype, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_devtype, NULL);
++
++
++int xenbus_probe_node(struct xen_bus_type *bus,
++ const char *type,
++ const char *nodename)
++{
++ int err;
++ struct xenbus_device *xendev;
++ size_t stringlen;
++ char *tmpstring;
++
++ enum xenbus_state state = xenbus_read_driver_state(nodename);
++
++ if (bus->error)
++ return bus->error;
++
++ if (state != XenbusStateInitialising) {
++ /* Device is not new, so ignore it. This can happen if a
++ device is going away after switching to Closed. */
++ return 0;
++ }
++
++ stringlen = strlen(nodename) + 1 + strlen(type) + 1;
++ xendev = kzalloc(sizeof(*xendev) + stringlen, GFP_KERNEL);
++ if (!xendev)
++ return -ENOMEM;
++
++ xendev->state = XenbusStateInitialising;
++
++ /* Copy the strings into the extra space. */
++
++ tmpstring = (char *)(xendev + 1);
++ strcpy(tmpstring, nodename);
++ xendev->nodename = tmpstring;
++
++ tmpstring += strlen(tmpstring) + 1;
++ strcpy(tmpstring, type);
++ xendev->devicetype = tmpstring;
++ init_completion(&xendev->down);
++
++ xendev->dev.parent = &bus->dev;
++ xendev->dev.bus = &bus->bus;
++ xendev->dev.release = xenbus_dev_release;
++
++ err = bus->get_bus_id(xendev->dev.bus_id, xendev->nodename);
++ if (err)
++ goto fail;
++
++ /* Register with generic device framework. */
++ err = device_register(&xendev->dev);
++ if (err)
++ goto fail;
++
++ err = device_create_file(&xendev->dev, &dev_attr_nodename);
++ if (err)
++ goto unregister;
++ err = device_create_file(&xendev->dev, &dev_attr_devtype);
++ if (err)
++ goto unregister;
++
++ return 0;
++unregister:
++ device_remove_file(&xendev->dev, &dev_attr_nodename);
++ device_remove_file(&xendev->dev, &dev_attr_devtype);
++ device_unregister(&xendev->dev);
++fail:
++ kfree(xendev);
++ return err;
++}
++
++/* device/<typename>/<name> */
++static int xenbus_probe_frontend(const char *type, const char *name)
++{
++ char *nodename;
++ int err;
++
++ nodename = kasprintf(GFP_KERNEL, "%s/%s/%s", xenbus_frontend.root, type, name);
++ if (!nodename)
++ return -ENOMEM;
++
++ DPRINTK("%s", nodename);
++
++ err = xenbus_probe_node(&xenbus_frontend, type, nodename);
++ kfree(nodename);
++ return err;
++}
++
++static int xenbus_probe_device_type(struct xen_bus_type *bus, const char *type)
++{
++ int err = 0;
++ char **dir;
++ unsigned int dir_n = 0;
++ int i;
++
++ dir = xenbus_directory(XBT_NIL, bus->root, type, &dir_n);
++ if (IS_ERR(dir))
++ return PTR_ERR(dir);
++
++ for (i = 0; i < dir_n; i++) {
++ err = bus->probe(type, dir[i]);
++ if (err)
++ break;
++ }
++ kfree(dir);
++ return err;
++}
++
++int xenbus_probe_devices(struct xen_bus_type *bus)
++{
++ int err = 0;
++ char **dir;
++ unsigned int i, dir_n;
++
++ if (bus->error)
++ return bus->error;
++
++ dir = xenbus_directory(XBT_NIL, bus->root, "", &dir_n);
++ if (IS_ERR(dir))
++ return PTR_ERR(dir);
++
++ for (i = 0; i < dir_n; i++) {
++ err = xenbus_probe_device_type(bus, dir[i]);
++ if (err)
++ break;
++ }
++ kfree(dir);
++ return err;
++}
++
++static unsigned int char_count(const char *str, char c)
++{
++ unsigned int i, ret = 0;
++
++ for (i = 0; str[i]; i++)
++ if (str[i] == c)
++ ret++;
++ return ret;
++}
++
++static int strsep_len(const char *str, char c, unsigned int len)
++{
++ unsigned int i;
++
++ for (i = 0; str[i]; i++)
++ if (str[i] == c) {
++ if (len == 0)
++ return i;
++ len--;
++ }
++ return (len == 0) ? i : -ERANGE;
++}
++
++void dev_changed(const char *node, struct xen_bus_type *bus)
++{
++ int exists, rootlen;
++ struct xenbus_device *dev;
++ char type[BUS_ID_SIZE];
++ const char *p, *root;
++
++ if (bus->error || char_count(node, '/') < 2)
++ return;
++
++ exists = xenbus_exists(XBT_NIL, node, "");
++ if (!exists) {
++ xenbus_cleanup_devices(node, &bus->bus);
++ return;
++ }
++
++ /* backend/<type>/... or device/<type>/... */
++ p = strchr(node, '/') + 1;
++ snprintf(type, BUS_ID_SIZE, "%.*s", (int)strcspn(p, "/"), p);
++ type[BUS_ID_SIZE-1] = '\0';
++
++ rootlen = strsep_len(node, '/', bus->levels);
++ if (rootlen < 0)
++ return;
++ root = kasprintf(GFP_KERNEL, "%.*s", rootlen, node);
++ if (!root)
++ return;
++
++ dev = xenbus_device_find(root, &bus->bus);
++ if (!dev)
++ xenbus_probe_node(bus, type, root);
++ else
++ put_device(&dev->dev);
++
++ kfree(root);
++}
++
++static void frontend_changed(struct xenbus_watch *watch,
++ const char **vec, unsigned int len)
++{
++ DPRINTK("");
++
++ dev_changed(vec[XS_WATCH_PATH], &xenbus_frontend);
++}
++
++/* We watch for devices appearing and vanishing. */
++static struct xenbus_watch fe_watch = {
++ .node = "device",
++ .callback = frontend_changed,
++};
++
++static int suspend_dev(struct device *dev, void *data)
++{
++ int err = 0;
++ struct xenbus_driver *drv;
++ struct xenbus_device *xdev;
++
++ DPRINTK("");
++
++ if (dev->driver == NULL)
++ return 0;
++ drv = to_xenbus_driver(dev->driver);
++ xdev = container_of(dev, struct xenbus_device, dev);
++ if (drv->suspend)
++ err = drv->suspend(xdev);
++ if (err)
++ printk(KERN_WARNING
++ "xenbus: suspend %s failed: %i\n", dev->bus_id, err);
++ return 0;
++}
++
++static int suspend_cancel_dev(struct device *dev, void *data)
++{
++ int err = 0;
++ struct xenbus_driver *drv;
++ struct xenbus_device *xdev;
++
++ DPRINTK("");
++
++ if (dev->driver == NULL)
++ return 0;
++ drv = to_xenbus_driver(dev->driver);
++ xdev = container_of(dev, struct xenbus_device, dev);
++ if (drv->suspend_cancel)
++ err = drv->suspend_cancel(xdev);
++ if (err)
++ printk(KERN_WARNING
++ "xenbus: suspend_cancel %s failed: %i\n",
++ dev->bus_id, err);
++ return 0;
++}
++
++static int resume_dev(struct device *dev, void *data)
++{
++ int err;
++ struct xenbus_driver *drv;
++ struct xenbus_device *xdev;
++
++ DPRINTK("");
++
++ if (dev->driver == NULL)
++ return 0;
++
++ drv = to_xenbus_driver(dev->driver);
++ xdev = container_of(dev, struct xenbus_device, dev);
++
++ err = talk_to_otherend(xdev);
++ if (err) {
++ printk(KERN_WARNING
++ "xenbus: resume (talk_to_otherend) %s failed: %i\n",
++ dev->bus_id, err);
++ return err;
++ }
++
++ xdev->state = XenbusStateInitialising;
++
++ if (drv->resume) {
++ err = drv->resume(xdev);
++ if (err) {
++ printk(KERN_WARNING
++ "xenbus: resume %s failed: %i\n",
++ dev->bus_id, err);
++ return err;
++ }
++ }
++
++ err = watch_otherend(xdev);
++ if (err) {
++ printk(KERN_WARNING
++ "xenbus_probe: resume (watch_otherend) %s failed: "
++ "%d.\n", dev->bus_id, err);
++ return err;
++ }
++
++ return 0;
++}
++
++void xenbus_suspend(void)
++{
++ DPRINTK("");
++
++ if (!xenbus_frontend.error)
++ bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, suspend_dev);
++ xenbus_backend_suspend(suspend_dev);
++ xs_suspend();
++}
++EXPORT_SYMBOL_GPL(xenbus_suspend);
++
++void xenbus_resume(void)
++{
++ xb_init_comms();
++ xs_resume();
++ if (!xenbus_frontend.error)
++ bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, resume_dev);
++ xenbus_backend_resume(resume_dev);
++}
++EXPORT_SYMBOL_GPL(xenbus_resume);
++
++void xenbus_suspend_cancel(void)
++{
++ xs_suspend_cancel();
++ if (!xenbus_frontend.error)
++ bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, suspend_cancel_dev);
++ xenbus_backend_resume(suspend_cancel_dev);
++}
++EXPORT_SYMBOL_GPL(xenbus_suspend_cancel);
++
++/* A flag to determine if xenstored is 'ready' (i.e. has started) */
++int xenstored_ready = 0;
++
++
++int register_xenstore_notifier(struct notifier_block *nb)
++{
++ int ret = 0;
++
++ if (xenstored_ready > 0)
++ ret = nb->notifier_call(nb, 0, NULL);
++ else
++ atomic_notifier_chain_register(&xenstore_chain, nb);
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(register_xenstore_notifier);
++
++void unregister_xenstore_notifier(struct notifier_block *nb)
++{
++ atomic_notifier_chain_unregister(&xenstore_chain, nb);
++}
++EXPORT_SYMBOL_GPL(unregister_xenstore_notifier);
++
++
++void xenbus_probe(void *unused)
++{
++ BUG_ON((xenstored_ready <= 0));
++
++ /* Enumerate devices in xenstore and watch for changes. */
++ xenbus_probe_devices(&xenbus_frontend);
++ register_xenbus_watch(&fe_watch);
++ xenbus_backend_probe_and_watch();
++
++ /* Notify others that xenstore is up */
++ atomic_notifier_call_chain(&xenstore_chain, 0, NULL);
++}
++
++
++#if defined(CONFIG_PROC_FS) && defined(CONFIG_XEN_PRIVILEGED_GUEST)
++static struct file_operations xsd_kva_fops;
++static struct proc_dir_entry *xsd_kva_intf;
++static struct proc_dir_entry *xsd_port_intf;
++
++static int xsd_kva_mmap(struct file *file, struct vm_area_struct *vma)
++{
++ size_t size = vma->vm_end - vma->vm_start;
++
++ if ((size > PAGE_SIZE) || (vma->vm_pgoff != 0))
++ return -EINVAL;
++
++ if (remap_pfn_range(vma, vma->vm_start, mfn_to_pfn(xen_store_mfn),
++ size, vma->vm_page_prot))
++ return -EAGAIN;
++
++ return 0;
++}
++
++static int xsd_kva_read(char *page, char **start, off_t off,
++ int count, int *eof, void *data)
++{
++ int len;
++
++ len = sprintf(page, "0x%p", xen_store_interface);
++ *eof = 1;
++ return len;
++}
++
++static int xsd_port_read(char *page, char **start, off_t off,
++ int count, int *eof, void *data)
++{
++ int len;
++
++ len = sprintf(page, "%d", xen_store_evtchn);
++ *eof = 1;
++ return len;
++}
++#endif
++
++static int xenbus_probe_init(void)
++{
++ int err = 0;
++ unsigned long page = 0;
++
++ DPRINTK("");
++
++ if (!is_running_on_xen())
++ return -ENODEV;
++
++ /* Register ourselves with the kernel bus subsystem */
++ xenbus_frontend.error = bus_register(&xenbus_frontend.bus);
++ if (xenbus_frontend.error)
++ printk(KERN_WARNING
++ "XENBUS: Error registering frontend bus: %i\n",
++ xenbus_frontend.error);
++ xenbus_backend_bus_register();
++
++ /*
++ * Domain0 doesn't have a store_evtchn or store_mfn yet.
++ */
++ if (is_initial_xendomain()) {
++ struct evtchn_alloc_unbound alloc_unbound;
++
++ /* Allocate page. */
++ page = get_zeroed_page(GFP_KERNEL);
++ if (!page)
++ return -ENOMEM;
++
++ xen_store_mfn = xen_start_info->store_mfn =
++ pfn_to_mfn(virt_to_phys((void *)page) >>
++ PAGE_SHIFT);
++
++ /* Next allocate a local port which xenstored can bind to */
++ alloc_unbound.dom = DOMID_SELF;
++ alloc_unbound.remote_dom = 0;
++
++ err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
++ &alloc_unbound);
++ if (err == -ENOSYS)
++ goto err;
++ BUG_ON(err);
++ xen_store_evtchn = xen_start_info->store_evtchn =
++ alloc_unbound.port;
++
++#if defined(CONFIG_PROC_FS) && defined(CONFIG_XEN_PRIVILEGED_GUEST)
++ /* And finally publish the above info in /proc/xen */
++ xsd_kva_intf = create_xen_proc_entry("xsd_kva", 0600);
++ if (xsd_kva_intf) {
++ memcpy(&xsd_kva_fops, xsd_kva_intf->proc_fops,
++ sizeof(xsd_kva_fops));
++ xsd_kva_fops.mmap = xsd_kva_mmap;
++ xsd_kva_intf->proc_fops = &xsd_kva_fops;
++ xsd_kva_intf->read_proc = xsd_kva_read;
++ }
++ xsd_port_intf = create_xen_proc_entry("xsd_port", 0400);
++ if (xsd_port_intf)
++ xsd_port_intf->read_proc = xsd_port_read;
++#endif
++ xen_store_interface = mfn_to_virt(xen_store_mfn);
++ } else {
++ xenstored_ready = 1;
++#ifdef CONFIG_XEN
++ xen_store_evtchn = xen_start_info->store_evtchn;
++ xen_store_mfn = xen_start_info->store_mfn;
++ xen_store_interface = mfn_to_virt(xen_store_mfn);
++#else
++ xen_store_evtchn = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN);
++ xen_store_mfn = hvm_get_parameter(HVM_PARAM_STORE_PFN);
++ xen_store_interface = ioremap(xen_store_mfn << PAGE_SHIFT,
++ PAGE_SIZE);
++#endif
++ }
++
++
++ xenbus_dev_init();
++
++ /* Initialize the interface to xenstore. */
++ err = xs_init();
++ if (err) {
++ printk(KERN_WARNING
++ "XENBUS: Error initializing xenstore comms: %i\n", err);
++ goto err;
++ }
++
++ /* Register ourselves with the kernel device subsystem */
++ if (!xenbus_frontend.error) {
++ xenbus_frontend.error = device_register(&xenbus_frontend.dev);
++ if (xenbus_frontend.error) {
++ bus_unregister(&xenbus_frontend.bus);
++ printk(KERN_WARNING
++ "XENBUS: Error registering frontend device: %i\n",
++ xenbus_frontend.error);
++ }
++ }
++ xenbus_backend_device_register();
++
++ if (!is_initial_xendomain())
++ xenbus_probe(NULL);
++
++ return 0;
++
++ err:
++ if (page)
++ free_page(page);
++
++ /*
++ * Do not unregister the xenbus front/backend buses here. The buses
++ * must exist because front/backend drivers will use them when they are
++ * registered.
++ */
++
++ return err;
++}
++
++#ifdef CONFIG_XEN
++postcore_initcall(xenbus_probe_init);
++MODULE_LICENSE("Dual BSD/GPL");
++#else
++int xenbus_init(void)
++{
++ return xenbus_probe_init();
++}
++#endif
++
++static int is_disconnected_device(struct device *dev, void *data)
++{
++ struct xenbus_device *xendev = to_xenbus_device(dev);
++ struct device_driver *drv = data;
++
++ /*
++ * A device with no driver will never connect. We care only about
++ * devices which should currently be in the process of connecting.
++ */
++ if (!dev->driver)
++ return 0;
++
++ /* Is this search limited to a particular driver? */
++ if (drv && (dev->driver != drv))
++ return 0;
++
++ return (xendev->state != XenbusStateConnected);
++}
++
++static int exists_disconnected_device(struct device_driver *drv)
++{
++ if (xenbus_frontend.error)
++ return xenbus_frontend.error;
++ return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv,
++ is_disconnected_device);
++}
++
++static int print_device_status(struct device *dev, void *data)
++{
++ struct xenbus_device *xendev = to_xenbus_device(dev);
++ struct device_driver *drv = data;
++
++ /* Is this operation limited to a particular driver? */
++ if (drv && (dev->driver != drv))
++ return 0;
++
++ if (!dev->driver) {
++ /* Information only: is this too noisy? */
++ printk(KERN_INFO "XENBUS: Device with no driver: %s\n",
++ xendev->nodename);
++ } else if (xendev->state != XenbusStateConnected) {
++ printk(KERN_WARNING "XENBUS: Timeout connecting "
++ "to device: %s (state %d)\n",
++ xendev->nodename, xendev->state);
++ }
++
++ return 0;
++}
++
++/* We only wait for device setup after most initcalls have run. */
++static int ready_to_wait_for_devices;
++
++/*
++ * On a 10 second timeout, wait for all devices currently configured. We need
++ * to do this to guarantee that the filesystems and / or network devices
++ * needed for boot are available, before we can allow the boot to proceed.
++ *
++ * This needs to be on a late_initcall, to happen after the frontend device
++ * drivers have been initialised, but before the root fs is mounted.
++ *
++ * A possible improvement here would be to have the tools add a per-device
++ * flag to the store entry, indicating whether it is needed at boot time.
++ * This would allow people who knew what they were doing to accelerate their
++ * boot slightly, but of course needs tools or manual intervention to set up
++ * those flags correctly.
++ */
++static void wait_for_devices(struct xenbus_driver *xendrv)
++{
++ unsigned long timeout = jiffies + 10*HZ;
++ struct device_driver *drv = xendrv ? &xendrv->driver : NULL;
++
++ if (!ready_to_wait_for_devices || !is_running_on_xen())
++ return;
++
++ while (exists_disconnected_device(drv)) {
++ if (time_after(jiffies, timeout))
++ break;
++ schedule_timeout_interruptible(HZ/10);
++ }
++
++ bus_for_each_dev(&xenbus_frontend.bus, NULL, drv,
++ print_device_status);
++}
++
++#ifndef MODULE
++static int __init boot_wait_for_devices(void)
++{
++ if (!xenbus_frontend.error) {
++ ready_to_wait_for_devices = 1;
++ wait_for_devices(NULL);
++ }
++ return 0;
++}
++
++late_initcall(boot_wait_for_devices);
++#endif
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/xenbus/xenbus_probe.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,75 @@
++/******************************************************************************
++ * xenbus_probe.h
++ *
++ * Talks to Xen Store to figure out what devices we have.
++ *
++ * Copyright (C) 2005 Rusty Russell, IBM Corporation
++ * Copyright (C) 2005 XenSource Ltd.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#ifndef _XENBUS_PROBE_H
++#define _XENBUS_PROBE_H
++
++#if defined(CONFIG_XEN_BACKEND) || defined(CONFIG_XEN_BACKEND_MODULE)
++extern void xenbus_backend_suspend(int (*fn)(struct device *, void *));
++extern void xenbus_backend_resume(int (*fn)(struct device *, void *));
++extern void xenbus_backend_probe_and_watch(void);
++extern void xenbus_backend_bus_register(void);
++extern void xenbus_backend_device_register(void);
++#else
++static inline void xenbus_backend_suspend(int (*fn)(struct device *, void *)) {}
++static inline void xenbus_backend_resume(int (*fn)(struct device *, void *)) {}
++static inline void xenbus_backend_probe_and_watch(void) {}
++static inline void xenbus_backend_bus_register(void) {}
++static inline void xenbus_backend_device_register(void) {}
++#endif
++
++struct xen_bus_type
++{
++ char *root;
++ int error;
++ unsigned int levels;
++ int (*get_bus_id)(char bus_id[BUS_ID_SIZE], const char *nodename);
++ int (*probe)(const char *type, const char *dir);
++ struct bus_type bus;
++ struct device dev;
++};
++
++extern int xenbus_match(struct device *_dev, struct device_driver *_drv);
++extern int xenbus_dev_probe(struct device *_dev);
++extern int xenbus_dev_remove(struct device *_dev);
++extern int xenbus_register_driver_common(struct xenbus_driver *drv,
++ struct xen_bus_type *bus);
++extern int xenbus_probe_node(struct xen_bus_type *bus,
++ const char *type,
++ const char *nodename);
++extern int xenbus_probe_devices(struct xen_bus_type *bus);
++
++extern void dev_changed(const char *node, struct xen_bus_type *bus);
++
++#endif
++
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/xenbus/xenbus_probe_backend.c 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,286 @@
++/******************************************************************************
++ * Talks to Xen Store to figure out what devices we have (backend half).
++ *
++ * Copyright (C) 2005 Rusty Russell, IBM Corporation
++ * Copyright (C) 2005 Mike Wray, Hewlett-Packard
++ * Copyright (C) 2005, 2006 XenSource Ltd
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#define DPRINTK(fmt, args...) \
++ pr_debug("xenbus_probe (%s:%d) " fmt ".\n", \
++ __FUNCTION__, __LINE__, ##args)
++
++#include <linux/kernel.h>
++#include <linux/err.h>
++#include <linux/string.h>
++#include <linux/ctype.h>
++#include <linux/fcntl.h>
++#include <linux/mm.h>
++#include <linux/notifier.h>
++#include <linux/kthread.h>
++
++#include <asm/io.h>
++#include <asm/page.h>
++#include <asm/maddr.h>
++#include <asm/pgtable.h>
++#include <asm/hypervisor.h>
++#include <xen/xenbus.h>
++#include <xen/xen_proc.h>
++#include <xen/evtchn.h>
++#include <xen/features.h>
++#include <xen/hvm.h>
++
++#include "xenbus_comms.h"
++#include "xenbus_probe.h"
++
++#ifdef HAVE_XEN_PLATFORM_COMPAT_H
++#include <xen/platform-compat.h>
++#endif
++
++static int xenbus_uevent_backend(struct device *dev, char **envp,
++ int num_envp, char *buffer, int buffer_size);
++static int xenbus_probe_backend(const char *type, const char *domid);
++
++extern int read_otherend_details(struct xenbus_device *xendev,
++ char *id_node, char *path_node);
++
++static int read_frontend_details(struct xenbus_device *xendev)
++{
++ return read_otherend_details(xendev, "frontend-id", "frontend");
++}
++
++/* backend/<type>/<fe-uuid>/<id> => <type>-<fe-domid>-<id> */
++static int backend_bus_id(char bus_id[BUS_ID_SIZE], const char *nodename)
++{
++ int domid, err;
++ const char *devid, *type, *frontend;
++ unsigned int typelen;
++
++ type = strchr(nodename, '/');
++ if (!type)
++ return -EINVAL;
++ type++;
++ typelen = strcspn(type, "/");
++ if (!typelen || type[typelen] != '/')
++ return -EINVAL;
++
++ devid = strrchr(nodename, '/') + 1;
++
++ err = xenbus_gather(XBT_NIL, nodename, "frontend-id", "%i", &domid,
++ "frontend", NULL, &frontend,
++ NULL);
++ if (err)
++ return err;
++ if (strlen(frontend) == 0)
++ err = -ERANGE;
++ if (!err && !xenbus_exists(XBT_NIL, frontend, ""))
++ err = -ENOENT;
++ kfree(frontend);
++
++ if (err)
++ return err;
++
++ if (snprintf(bus_id, BUS_ID_SIZE,
++ "%.*s-%i-%s", typelen, type, domid, devid) >= BUS_ID_SIZE)
++ return -ENOSPC;
++ return 0;
++}
++
++static struct xen_bus_type xenbus_backend = {
++ .root = "backend",
++ .levels = 3, /* backend/type/<frontend>/<id> */
++ .get_bus_id = backend_bus_id,
++ .probe = xenbus_probe_backend,
++ .bus = {
++ .name = "xen-backend",
++ .match = xenbus_match,
++ .probe = xenbus_dev_probe,
++ .remove = xenbus_dev_remove,
++// .shutdown = xenbus_dev_shutdown,
++ .uevent = xenbus_uevent_backend,
++ },
++ .dev = {
++ .bus_id = "xen-backend",
++ },
++};
++
++static int xenbus_uevent_backend(struct device *dev, char **envp,
++ int num_envp, char *buffer, int buffer_size)
++{
++ struct xenbus_device *xdev;
++ struct xenbus_driver *drv;
++ int i = 0;
++ int length = 0;
++
++ DPRINTK("");
++
++ if (dev == NULL)
++ return -ENODEV;
++
++ xdev = to_xenbus_device(dev);
++ if (xdev == NULL)
++ return -ENODEV;
++
++ /* stuff we want to pass to /sbin/hotplug */
++ add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length,
++ "XENBUS_TYPE=%s", xdev->devicetype);
++
++ add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length,
++ "XENBUS_PATH=%s", xdev->nodename);
++
++ add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length,
++ "XENBUS_BASE_PATH=%s", xenbus_backend.root);
++
++ /* terminate, set to next free slot, shrink available space */
++ envp[i] = NULL;
++ envp = &envp[i];
++ num_envp -= i;
++ buffer = &buffer[length];
++ buffer_size -= length;
++
++ if (dev->driver) {
++ drv = to_xenbus_driver(dev->driver);
++ if (drv && drv->uevent)
++ return drv->uevent(xdev, envp, num_envp, buffer,
++ buffer_size);
++ }
++
++ return 0;
++}
++
++int xenbus_register_backend(struct xenbus_driver *drv)
++{
++ drv->read_otherend_details = read_frontend_details;
++
++ return xenbus_register_driver_common(drv, &xenbus_backend);
++}
++EXPORT_SYMBOL_GPL(xenbus_register_backend);
++
++/* backend/<typename>/<frontend-uuid>/<name> */
++static int xenbus_probe_backend_unit(const char *dir,
++ const char *type,
++ const char *name)
++{
++ char *nodename;
++ int err;
++
++ nodename = kasprintf(GFP_KERNEL, "%s/%s", dir, name);
++ if (!nodename)
++ return -ENOMEM;
++
++ DPRINTK("%s\n", nodename);
++
++ err = xenbus_probe_node(&xenbus_backend, type, nodename);
++ kfree(nodename);
++ return err;
++}
++
++/* backend/<typename>/<frontend-domid> */
++static int xenbus_probe_backend(const char *type, const char *domid)
++{
++ char *nodename;
++ int err = 0;
++ char **dir;
++ unsigned int i, dir_n = 0;
++
++ DPRINTK("");
++
++ nodename = kasprintf(GFP_KERNEL, "%s/%s/%s", xenbus_backend.root, type, domid);
++ if (!nodename)
++ return -ENOMEM;
++
++ dir = xenbus_directory(XBT_NIL, nodename, "", &dir_n);
++ if (IS_ERR(dir)) {
++ kfree(nodename);
++ return PTR_ERR(dir);
++ }
++
++ for (i = 0; i < dir_n; i++) {
++ err = xenbus_probe_backend_unit(nodename, type, dir[i]);
++ if (err)
++ break;
++ }
++ kfree(dir);
++ kfree(nodename);
++ return err;
++}
++
++static void backend_changed(struct xenbus_watch *watch,
++ const char **vec, unsigned int len)
++{
++ DPRINTK("");
++
++ dev_changed(vec[XS_WATCH_PATH], &xenbus_backend);
++}
++
++static struct xenbus_watch be_watch = {
++ .node = "backend",
++ .callback = backend_changed,
++};
++
++void xenbus_backend_suspend(int (*fn)(struct device *, void *))
++{
++ DPRINTK("");
++ if (!xenbus_backend.error)
++ bus_for_each_dev(&xenbus_backend.bus, NULL, NULL, fn);
++}
++
++void xenbus_backend_resume(int (*fn)(struct device *, void *))
++{
++ DPRINTK("");
++ if (!xenbus_backend.error)
++ bus_for_each_dev(&xenbus_backend.bus, NULL, NULL, fn);
++}
++
++void xenbus_backend_probe_and_watch(void)
++{
++ xenbus_probe_devices(&xenbus_backend);
++ register_xenbus_watch(&be_watch);
++}
++
++void xenbus_backend_bus_register(void)
++{
++ xenbus_backend.error = bus_register(&xenbus_backend.bus);
++ if (xenbus_backend.error)
++ printk(KERN_WARNING
++ "XENBUS: Error registering backend bus: %i\n",
++ xenbus_backend.error);
++}
++
++void xenbus_backend_device_register(void)
++{
++ if (xenbus_backend.error)
++ return;
++
++ xenbus_backend.error = device_register(&xenbus_backend.dev);
++ if (xenbus_backend.error) {
++ bus_unregister(&xenbus_backend.bus);
++ printk(KERN_WARNING
++ "XENBUS: Error registering backend device: %i\n",
++ xenbus_backend.error);
++ }
++}
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/xenbus/xenbus_xs.c 2007-08-27 14:02:10.000000000 -0400
+@@ -0,0 +1,880 @@
++/******************************************************************************
++ * xenbus_xs.c
++ *
++ * This is the kernel equivalent of the "xs" library. We don't need everything
++ * and we use xenbus_comms for communication.
++ *
++ * Copyright (C) 2005 Rusty Russell, IBM Corporation
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <linux/unistd.h>
++#include <linux/errno.h>
++#include <linux/types.h>
++#include <linux/uio.h>
++#include <linux/kernel.h>
++#include <linux/string.h>
++#include <linux/err.h>
++#include <linux/slab.h>
++#include <linux/fcntl.h>
++#include <linux/kthread.h>
++#include <linux/rwsem.h>
++#include <linux/module.h>
++#include <linux/mutex.h>
++#include <xen/xenbus.h>
++#include "xenbus_comms.h"
++
++#ifdef HAVE_XEN_PLATFORM_COMPAT_H
++#include <xen/platform-compat.h>
++#endif
++
++struct xs_stored_msg {
++ struct list_head list;
++
++ struct xsd_sockmsg hdr;
++
++ union {
++ /* Queued replies. */
++ struct {
++ char *body;
++ } reply;
++
++ /* Queued watch events. */
++ struct {
++ struct xenbus_watch *handle;
++ char **vec;
++ unsigned int vec_size;
++ } watch;
++ } u;
++};
++
++struct xs_handle {
++ /* A list of replies. Currently only one will ever be outstanding. */
++ struct list_head reply_list;
++ spinlock_t reply_lock;
++ wait_queue_head_t reply_waitq;
++
++ /*
++ * Mutex ordering: transaction_mutex -> watch_mutex -> request_mutex.
++ * response_mutex is never taken simultaneously with the other three.
++ */
++
++ /* One request at a time. */
++ struct mutex request_mutex;
++
++ /* Protect xenbus reader thread against save/restore. */
++ struct mutex response_mutex;
++
++ /* Protect transactions against save/restore. */
++ struct rw_semaphore transaction_mutex;
++
++ /* Protect watch (de)register against save/restore. */
++ struct rw_semaphore watch_mutex;
++};
++
++static struct xs_handle xs_state;
++
++/* List of registered watches, and a lock to protect it. */
++static LIST_HEAD(watches);
++static DEFINE_SPINLOCK(watches_lock);
++
++/* List of pending watch callback events, and a lock to protect it. */
++static LIST_HEAD(watch_events);
++static DEFINE_SPINLOCK(watch_events_lock);
++
++/*
++ * Details of the xenwatch callback kernel thread. The thread waits on the
++ * watch_events_waitq for work to do (queued on watch_events list). When it
++ * wakes up it acquires the xenwatch_mutex before reading the list and
++ * carrying out work.
++ */
++static pid_t xenwatch_pid;
++/* static */ DEFINE_MUTEX(xenwatch_mutex);
++static DECLARE_WAIT_QUEUE_HEAD(watch_events_waitq);
++
++static int get_error(const char *errorstring)
++{
++ unsigned int i;
++
++ for (i = 0; strcmp(errorstring, xsd_errors[i].errstring) != 0; i++) {
++ if (i == ARRAY_SIZE(xsd_errors) - 1) {
++ printk(KERN_WARNING
++ "XENBUS xen store gave: unknown error %s",
++ errorstring);
++ return EINVAL;
++ }
++ }
++ return xsd_errors[i].errnum;
++}
++
++static void *read_reply(enum xsd_sockmsg_type *type, unsigned int *len)
++{
++ struct xs_stored_msg *msg;
++ char *body;
++
++ spin_lock(&xs_state.reply_lock);
++
++ while (list_empty(&xs_state.reply_list)) {
++ spin_unlock(&xs_state.reply_lock);
++ /* XXX FIXME: Avoid synchronous wait for response here. */
++ wait_event(xs_state.reply_waitq,
++ !list_empty(&xs_state.reply_list));
++ spin_lock(&xs_state.reply_lock);
++ }
++
++ msg = list_entry(xs_state.reply_list.next,
++ struct xs_stored_msg, list);
++ list_del(&msg->list);
++
++ spin_unlock(&xs_state.reply_lock);
++
++ *type = msg->hdr.type;
++ if (len)
++ *len = msg->hdr.len;
++ body = msg->u.reply.body;
++
++ kfree(msg);
++
++ return body;
++}
++
++void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg)
++{
++ void *ret;
++ struct xsd_sockmsg req_msg = *msg;
++ int err;
++
++ if (req_msg.type == XS_TRANSACTION_START)
++ down_read(&xs_state.transaction_mutex);
++
++ mutex_lock(&xs_state.request_mutex);
++
++ err = xb_write(msg, sizeof(*msg) + msg->len);
++ if (err) {
++ msg->type = XS_ERROR;
++ ret = ERR_PTR(err);
++ } else
++ ret = read_reply(&msg->type, &msg->len);
++
++ mutex_unlock(&xs_state.request_mutex);
++
++ if ((req_msg.type == XS_TRANSACTION_END) ||
++ ((req_msg.type == XS_TRANSACTION_START) &&
++ (msg->type == XS_ERROR)))
++ up_read(&xs_state.transaction_mutex);
++
++ return ret;
++}
++
++/* Send message to xs, get kmalloc'ed reply. ERR_PTR() on error. */
++static void *xs_talkv(struct xenbus_transaction t,
++ enum xsd_sockmsg_type type,
++ const struct kvec *iovec,
++ unsigned int num_vecs,
++ unsigned int *len)
++{
++ struct xsd_sockmsg msg;
++ void *ret = NULL;
++ unsigned int i;
++ int err;
++
++ msg.tx_id = t.id;
++ msg.req_id = 0;
++ msg.type = type;
++ msg.len = 0;
++ for (i = 0; i < num_vecs; i++)
++ msg.len += iovec[i].iov_len;
++
++ mutex_lock(&xs_state.request_mutex);
++
++ err = xb_write(&msg, sizeof(msg));
++ if (err) {
++ mutex_unlock(&xs_state.request_mutex);
++ return ERR_PTR(err);
++ }
++
++ for (i = 0; i < num_vecs; i++) {
++ err = xb_write(iovec[i].iov_base, iovec[i].iov_len);;
++ if (err) {
++ mutex_unlock(&xs_state.request_mutex);
++ return ERR_PTR(err);
++ }
++ }
++
++ ret = read_reply(&msg.type, len);
++
++ mutex_unlock(&xs_state.request_mutex);
++
++ if (IS_ERR(ret))
++ return ret;
++
++ if (msg.type == XS_ERROR) {
++ err = get_error(ret);
++ kfree(ret);
++ return ERR_PTR(-err);
++ }
++
++ if (msg.type != type) {
++ if (printk_ratelimit())
++ printk(KERN_WARNING
++ "XENBUS unexpected type [%d], expected [%d]\n",
++ msg.type, type);
++ kfree(ret);
++ return ERR_PTR(-EINVAL);
++ }
++ return ret;
++}
++
++/* Simplified version of xs_talkv: single message. */
++static void *xs_single(struct xenbus_transaction t,
++ enum xsd_sockmsg_type type,
++ const char *string,
++ unsigned int *len)
++{
++ struct kvec iovec;
++
++ iovec.iov_base = (void *)string;
++ iovec.iov_len = strlen(string) + 1;
++ return xs_talkv(t, type, &iovec, 1, len);
++}
++
++/* Many commands only need an ack, don't care what it says. */
++static int xs_error(char *reply)
++{
++ if (IS_ERR(reply))
++ return PTR_ERR(reply);
++ kfree(reply);
++ return 0;
++}
++
++static unsigned int count_strings(const char *strings, unsigned int len)
++{
++ unsigned int num;
++ const char *p;
++
++ for (p = strings, num = 0; p < strings + len; p += strlen(p) + 1)
++ num++;
++
++ return num;
++}
++
++/* Return the path to dir with /name appended. Buffer must be kfree()'ed. */
++static char *join(const char *dir, const char *name)
++{
++ char *buffer;
++
++ if (strlen(name) == 0)
++ buffer = kasprintf(GFP_KERNEL, "%s", dir);
++ else
++ buffer = kasprintf(GFP_KERNEL, "%s/%s", dir, name);
++ return (!buffer) ? ERR_PTR(-ENOMEM) : buffer;
++}
++
++static char **split(char *strings, unsigned int len, unsigned int *num)
++{
++ char *p, **ret;
++
++ /* Count the strings. */
++ *num = count_strings(strings, len);
++
++ /* Transfer to one big alloc for easy freeing. */
++ ret = kmalloc(*num * sizeof(char *) + len, GFP_KERNEL);
++ if (!ret) {
++ kfree(strings);
++ return ERR_PTR(-ENOMEM);
++ }
++ memcpy(&ret[*num], strings, len);
++ kfree(strings);
++
++ strings = (char *)&ret[*num];
++ for (p = strings, *num = 0; p < strings + len; p += strlen(p) + 1)
++ ret[(*num)++] = p;
++
++ return ret;
++}
++
++char **xenbus_directory(struct xenbus_transaction t,
++ const char *dir, const char *node, unsigned int *num)
++{
++ char *strings, *path;
++ unsigned int len;
++
++ path = join(dir, node);
++ if (IS_ERR(path))
++ return (char **)path;
++
++ strings = xs_single(t, XS_DIRECTORY, path, &len);
++ kfree(path);
++ if (IS_ERR(strings))
++ return (char **)strings;
++
++ return split(strings, len, num);
++}
++EXPORT_SYMBOL_GPL(xenbus_directory);
++
++/* Check if a path exists. Return 1 if it does. */
++int xenbus_exists(struct xenbus_transaction t,
++ const char *dir, const char *node)
++{
++ char **d;
++ int dir_n;
++
++ d = xenbus_directory(t, dir, node, &dir_n);
++ if (IS_ERR(d))
++ return 0;
++ kfree(d);
++ return 1;
++}
++EXPORT_SYMBOL_GPL(xenbus_exists);
++
++/* Get the value of a single file.
++ * Returns a kmalloced value: call free() on it after use.
++ * len indicates length in bytes.
++ */
++void *xenbus_read(struct xenbus_transaction t,
++ const char *dir, const char *node, unsigned int *len)
++{
++ char *path;
++ void *ret;
++
++ path = join(dir, node);
++ if (IS_ERR(path))
++ return (void *)path;
++
++ ret = xs_single(t, XS_READ, path, len);
++ kfree(path);
++ return ret;
++}
++EXPORT_SYMBOL_GPL(xenbus_read);
++
++/* Write the value of a single file.
++ * Returns -err on failure.
++ */
++int xenbus_write(struct xenbus_transaction t,
++ const char *dir, const char *node, const char *string)
++{
++ const char *path;
++ struct kvec iovec[2];
++ int ret;
++
++ path = join(dir, node);
++ if (IS_ERR(path))
++ return PTR_ERR(path);
++
++ iovec[0].iov_base = (void *)path;
++ iovec[0].iov_len = strlen(path) + 1;
++ iovec[1].iov_base = (void *)string;
++ iovec[1].iov_len = strlen(string);
++
++ ret = xs_error(xs_talkv(t, XS_WRITE, iovec, ARRAY_SIZE(iovec), NULL));
++ kfree(path);
++ return ret;
++}
++EXPORT_SYMBOL_GPL(xenbus_write);
++
++/* Create a new directory. */
++int xenbus_mkdir(struct xenbus_transaction t,
++ const char *dir, const char *node)
++{
++ char *path;
++ int ret;
++
++ path = join(dir, node);
++ if (IS_ERR(path))
++ return PTR_ERR(path);
++
++ ret = xs_error(xs_single(t, XS_MKDIR, path, NULL));
++ kfree(path);
++ return ret;
++}
++EXPORT_SYMBOL_GPL(xenbus_mkdir);
++
++/* Destroy a file or directory (directories must be empty). */
++int xenbus_rm(struct xenbus_transaction t, const char *dir, const char *node)
++{
++ char *path;
++ int ret;
++
++ path = join(dir, node);
++ if (IS_ERR(path))
++ return PTR_ERR(path);
++
++ ret = xs_error(xs_single(t, XS_RM, path, NULL));
++ kfree(path);
++ return ret;
++}
++EXPORT_SYMBOL_GPL(xenbus_rm);
++
++/* Start a transaction: changes by others will not be seen during this
++ * transaction, and changes will not be visible to others until end.
++ */
++int xenbus_transaction_start(struct xenbus_transaction *t)
++{
++ char *id_str;
++
++ down_read(&xs_state.transaction_mutex);
++
++ id_str = xs_single(XBT_NIL, XS_TRANSACTION_START, "", NULL);
++ if (IS_ERR(id_str)) {
++ up_read(&xs_state.transaction_mutex);
++ return PTR_ERR(id_str);
++ }
++
++ t->id = simple_strtoul(id_str, NULL, 0);
++ kfree(id_str);
++ return 0;
++}
++EXPORT_SYMBOL_GPL(xenbus_transaction_start);
++
++/* End a transaction.
++ * If abandon is true, transaction is discarded instead of committed.
++ */
++int xenbus_transaction_end(struct xenbus_transaction t, int abort)
++{
++ char abortstr[2];
++ int err;
++
++ if (abort)
++ strcpy(abortstr, "F");
++ else
++ strcpy(abortstr, "T");
++
++ err = xs_error(xs_single(t, XS_TRANSACTION_END, abortstr, NULL));
++
++ up_read(&xs_state.transaction_mutex);
++
++ return err;
++}
++EXPORT_SYMBOL_GPL(xenbus_transaction_end);
++
++/* Single read and scanf: returns -errno or num scanned. */
++int xenbus_scanf(struct xenbus_transaction t,
++ const char *dir, const char *node, const char *fmt, ...)
++{
++ va_list ap;
++ int ret;
++ char *val;
++
++ val = xenbus_read(t, dir, node, NULL);
++ if (IS_ERR(val))
++ return PTR_ERR(val);
++
++ va_start(ap, fmt);
++ ret = vsscanf(val, fmt, ap);
++ va_end(ap);
++ kfree(val);
++ /* Distinctive errno. */
++ if (ret == 0)
++ return -ERANGE;
++ return ret;
++}
++EXPORT_SYMBOL_GPL(xenbus_scanf);
++
++/* Single printf and write: returns -errno or 0. */
++int xenbus_printf(struct xenbus_transaction t,
++ const char *dir, const char *node, const char *fmt, ...)
++{
++ va_list ap;
++ int ret;
++#define PRINTF_BUFFER_SIZE 4096
++ char *printf_buffer;
++
++ printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL);
++ if (printf_buffer == NULL)
++ return -ENOMEM;
++
++ va_start(ap, fmt);
++ ret = vsnprintf(printf_buffer, PRINTF_BUFFER_SIZE, fmt, ap);
++ va_end(ap);
++
++ BUG_ON(ret > PRINTF_BUFFER_SIZE-1);
++ ret = xenbus_write(t, dir, node, printf_buffer);
++
++ kfree(printf_buffer);
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(xenbus_printf);
++
++/* Takes tuples of names, scanf-style args, and void **, NULL terminated. */
++int xenbus_gather(struct xenbus_transaction t, const char *dir, ...)
++{
++ va_list ap;
++ const char *name;
++ int ret = 0;
++
++ va_start(ap, dir);
++ while (ret == 0 && (name = va_arg(ap, char *)) != NULL) {
++ const char *fmt = va_arg(ap, char *);
++ void *result = va_arg(ap, void *);
++ char *p;
++
++ p = xenbus_read(t, dir, name, NULL);
++ if (IS_ERR(p)) {
++ ret = PTR_ERR(p);
++ break;
++ }
++ if (fmt) {
++ if (sscanf(p, fmt, result) == 0)
++ ret = -EINVAL;
++ kfree(p);
++ } else
++ *(char **)result = p;
++ }
++ va_end(ap);
++ return ret;
++}
++EXPORT_SYMBOL_GPL(xenbus_gather);
++
++static int xs_watch(const char *path, const char *token)
++{
++ struct kvec iov[2];
++
++ iov[0].iov_base = (void *)path;
++ iov[0].iov_len = strlen(path) + 1;
++ iov[1].iov_base = (void *)token;
++ iov[1].iov_len = strlen(token) + 1;
++
++ return xs_error(xs_talkv(XBT_NIL, XS_WATCH, iov,
++ ARRAY_SIZE(iov), NULL));
++}
++
++static int xs_unwatch(const char *path, const char *token)
++{
++ struct kvec iov[2];
++
++ iov[0].iov_base = (char *)path;
++ iov[0].iov_len = strlen(path) + 1;
++ iov[1].iov_base = (char *)token;
++ iov[1].iov_len = strlen(token) + 1;
++
++ return xs_error(xs_talkv(XBT_NIL, XS_UNWATCH, iov,
++ ARRAY_SIZE(iov), NULL));
++}
++
++static struct xenbus_watch *find_watch(const char *token)
++{
++ struct xenbus_watch *i, *cmp;
++
++ cmp = (void *)simple_strtoul(token, NULL, 16);
++
++ list_for_each_entry(i, &watches, list)
++ if (i == cmp)
++ return i;
++
++ return NULL;
++}
++
++/* Register callback to watch this node. */
++int register_xenbus_watch(struct xenbus_watch *watch)
++{
++ /* Pointer in ascii is the token. */
++ char token[sizeof(watch) * 2 + 1];
++ int err;
++
++ sprintf(token, "%lX", (long)watch);
++
++ down_read(&xs_state.watch_mutex);
++
++ spin_lock(&watches_lock);
++ BUG_ON(find_watch(token));
++ list_add(&watch->list, &watches);
++ spin_unlock(&watches_lock);
++
++ err = xs_watch(watch->node, token);
++
++ /* Ignore errors due to multiple registration. */
++ if ((err != 0) && (err != -EEXIST)) {
++ spin_lock(&watches_lock);
++ list_del(&watch->list);
++ spin_unlock(&watches_lock);
++ }
++
++ up_read(&xs_state.watch_mutex);
++
++ return err;
++}
++EXPORT_SYMBOL_GPL(register_xenbus_watch);
++
++void unregister_xenbus_watch(struct xenbus_watch *watch)
++{
++ struct xs_stored_msg *msg, *tmp;
++ char token[sizeof(watch) * 2 + 1];
++ int err;
++
++ sprintf(token, "%lX", (long)watch);
++
++ down_read(&xs_state.watch_mutex);
++
++ spin_lock(&watches_lock);
++ BUG_ON(!find_watch(token));
++ list_del(&watch->list);
++ spin_unlock(&watches_lock);
++
++ err = xs_unwatch(watch->node, token);
++ if (err)
++ printk(KERN_WARNING
++ "XENBUS Failed to release watch %s: %i\n",
++ watch->node, err);
++
++ up_read(&xs_state.watch_mutex);
++
++ /* Cancel pending watch events. */
++ spin_lock(&watch_events_lock);
++ list_for_each_entry_safe(msg, tmp, &watch_events, list) {
++ if (msg->u.watch.handle != watch)
++ continue;
++ list_del(&msg->list);
++ kfree(msg->u.watch.vec);
++ kfree(msg);
++ }
++ spin_unlock(&watch_events_lock);
++
++ /* Flush any currently-executing callback, unless we are it. :-) */
++ if (current->pid != xenwatch_pid) {
++ mutex_lock(&xenwatch_mutex);
++ mutex_unlock(&xenwatch_mutex);
++ }
++}
++EXPORT_SYMBOL_GPL(unregister_xenbus_watch);
++
++void xs_suspend(void)
++{
++ down_write(&xs_state.transaction_mutex);
++ down_write(&xs_state.watch_mutex);
++ mutex_lock(&xs_state.request_mutex);
++ mutex_lock(&xs_state.response_mutex);
++}
++
++void xs_resume(void)
++{
++ struct xenbus_watch *watch;
++ char token[sizeof(watch) * 2 + 1];
++
++ mutex_unlock(&xs_state.response_mutex);
++ mutex_unlock(&xs_state.request_mutex);
++ up_write(&xs_state.transaction_mutex);
++
++ /* No need for watches_lock: the watch_mutex is sufficient. */
++ list_for_each_entry(watch, &watches, list) {
++ sprintf(token, "%lX", (long)watch);
++ xs_watch(watch->node, token);
++ }
++
++ up_write(&xs_state.watch_mutex);
++}
++
++void xs_suspend_cancel(void)
++{
++ mutex_unlock(&xs_state.response_mutex);
++ mutex_unlock(&xs_state.request_mutex);
++ up_write(&xs_state.watch_mutex);
++ up_write(&xs_state.transaction_mutex);
++}
++
++static int xenwatch_handle_callback(void *data)
++{
++ struct xs_stored_msg *msg = data;
++
++ msg->u.watch.handle->callback(msg->u.watch.handle,
++ (const char **)msg->u.watch.vec,
++ msg->u.watch.vec_size);
++
++ kfree(msg->u.watch.vec);
++ kfree(msg);
++
++ /* Kill this kthread if we were spawned just for this callback. */
++ if (current->pid != xenwatch_pid)
++ do_exit(0);
++
++ return 0;
++}
++
++static int xenwatch_thread(void *unused)
++{
++ struct list_head *ent;
++ struct xs_stored_msg *msg;
++
++ for (;;) {
++ wait_event_interruptible(watch_events_waitq,
++ !list_empty(&watch_events));
++
++ if (kthread_should_stop())
++ break;
++
++ mutex_lock(&xenwatch_mutex);
++
++ spin_lock(&watch_events_lock);
++ ent = watch_events.next;
++ if (ent != &watch_events)
++ list_del(ent);
++ spin_unlock(&watch_events_lock);
++
++ if (ent != &watch_events) {
++ msg = list_entry(ent, struct xs_stored_msg, list);
++ if (msg->u.watch.handle->flags & XBWF_new_thread)
++ kthread_run(xenwatch_handle_callback,
++ msg, "xenwatch_cb");
++ else
++ xenwatch_handle_callback(msg);
++ }
++
++ mutex_unlock(&xenwatch_mutex);
++ }
++
++ return 0;
++}
++
++static int process_msg(void)
++{
++ struct xs_stored_msg *msg;
++ char *body;
++ int err;
++
++ /*
++ * We must disallow save/restore while reading a xenstore message.
++ * A partial read across s/r leaves us out of sync with xenstored.
++ */
++ for (;;) {
++ err = xb_wait_for_data_to_read();
++ if (err)
++ return err;
++ mutex_lock(&xs_state.response_mutex);
++ if (xb_data_to_read())
++ break;
++ /* We raced with save/restore: pending data 'disappeared'. */
++ mutex_unlock(&xs_state.response_mutex);
++ }
++
++
++ msg = kmalloc(sizeof(*msg), GFP_KERNEL);
++ if (msg == NULL) {
++ err = -ENOMEM;
++ goto out;
++ }
++
++ err = xb_read(&msg->hdr, sizeof(msg->hdr));
++ if (err) {
++ kfree(msg);
++ goto out;
++ }
++
++ body = kmalloc(msg->hdr.len + 1, GFP_KERNEL);
++ if (body == NULL) {
++ kfree(msg);
++ err = -ENOMEM;
++ goto out;
++ }
++
++ err = xb_read(body, msg->hdr.len);
++ if (err) {
++ kfree(body);
++ kfree(msg);
++ goto out;
++ }
++ body[msg->hdr.len] = '\0';
++
++ if (msg->hdr.type == XS_WATCH_EVENT) {
++ msg->u.watch.vec = split(body, msg->hdr.len,
++ &msg->u.watch.vec_size);
++ if (IS_ERR(msg->u.watch.vec)) {
++ kfree(msg);
++ err = PTR_ERR(msg->u.watch.vec);
++ goto out;
++ }
++
++ spin_lock(&watches_lock);
++ msg->u.watch.handle = find_watch(
++ msg->u.watch.vec[XS_WATCH_TOKEN]);
++ if (msg->u.watch.handle != NULL) {
++ spin_lock(&watch_events_lock);
++ list_add_tail(&msg->list, &watch_events);
++ wake_up(&watch_events_waitq);
++ spin_unlock(&watch_events_lock);
++ } else {
++ kfree(msg->u.watch.vec);
++ kfree(msg);
++ }
++ spin_unlock(&watches_lock);
++ } else {
++ msg->u.reply.body = body;
++ spin_lock(&xs_state.reply_lock);
++ list_add_tail(&msg->list, &xs_state.reply_list);
++ spin_unlock(&xs_state.reply_lock);
++ wake_up(&xs_state.reply_waitq);
++ }
++
++ out:
++ mutex_unlock(&xs_state.response_mutex);
++ return err;
++}
++
++static int xenbus_thread(void *unused)
++{
++ int err;
++
++ for (;;) {
++ err = process_msg();
++ if (err)
++ printk(KERN_WARNING "XENBUS error %d while reading "
++ "message\n", err);
++ if (kthread_should_stop())
++ break;
++ }
++
++ return 0;
++}
++
++int xs_init(void)
++{
++ int err;
++ struct task_struct *task;
++
++ INIT_LIST_HEAD(&xs_state.reply_list);
++ spin_lock_init(&xs_state.reply_lock);
++ init_waitqueue_head(&xs_state.reply_waitq);
++
++ mutex_init(&xs_state.request_mutex);
++ mutex_init(&xs_state.response_mutex);
++ init_rwsem(&xs_state.transaction_mutex);
++ init_rwsem(&xs_state.watch_mutex);
++
++ /* Initialize the shared memory rings to talk to xenstored */
++ err = xb_init_comms();
++ if (err)
++ return err;
++
++ task = kthread_run(xenwatch_thread, NULL, "xenwatch");
++ if (IS_ERR(task))
++ return PTR_ERR(task);
++ xenwatch_pid = task->pid;
++
++ task = kthread_run(xenbus_thread, NULL, "xenbus");
++ if (IS_ERR(task))
++ return PTR_ERR(task);
++
++ return 0;
++}
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/xenoprof/xenoprofile.c 2007-08-27 14:02:03.000000000 -0400
+@@ -0,0 +1,500 @@
++/**
++ * @file xenoprofile.c
++ *
++ * @remark Copyright 2002 OProfile authors
++ * @remark Read the file COPYING
++ *
++ * @author John Levon <levon@movementarian.org>
++ *
++ * Modified by Aravind Menon and Jose Renato Santos for Xen
++ * These modifications are:
++ * Copyright (C) 2005 Hewlett-Packard Co.
++ *
++ * Separated out arch-generic part
++ * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
++ * VA Linux Systems Japan K.K.
++ */
++
++#include <linux/init.h>
++#include <linux/notifier.h>
++#include <linux/smp.h>
++#include <linux/oprofile.h>
++#include <linux/sysdev.h>
++#include <linux/slab.h>
++#include <linux/interrupt.h>
++#include <linux/vmalloc.h>
++#include <asm/pgtable.h>
++#include <xen/evtchn.h>
++#include <xen/xenoprof.h>
++#include <xen/driver_util.h>
++#include <xen/interface/xen.h>
++#include <xen/interface/xenoprof.h>
++#include "../../../drivers/oprofile/cpu_buffer.h"
++#include "../../../drivers/oprofile/event_buffer.h"
++
++#define MAX_XENOPROF_SAMPLES 16
++
++/* sample buffers shared with Xen */
++xenoprof_buf_t * xenoprof_buf[MAX_VIRT_CPUS];
++/* Shared buffer area */
++struct xenoprof_shared_buffer shared_buffer;
++
++/* Passive sample buffers shared with Xen */
++xenoprof_buf_t *p_xenoprof_buf[MAX_OPROF_DOMAINS][MAX_VIRT_CPUS];
++/* Passive shared buffer area */
++struct xenoprof_shared_buffer p_shared_buffer[MAX_OPROF_DOMAINS];
++
++static int xenoprof_start(void);
++static void xenoprof_stop(void);
++
++static int xenoprof_enabled = 0;
++static int xenoprof_is_primary = 0;
++static int active_defined;
++
++/* Number of buffers in shared area (one per VCPU) */
++int nbuf;
++/* Mappings of VIRQ_XENOPROF to irq number (per cpu) */
++int ovf_irq[NR_CPUS];
++/* cpu model type string - copied from Xen memory space on XENOPROF_init command */
++char cpu_type[XENOPROF_CPU_TYPE_SIZE];
++
++#ifdef CONFIG_PM
++
++static int xenoprof_suspend(struct sys_device * dev, pm_message_t state)
++{
++ if (xenoprof_enabled == 1)
++ xenoprof_stop();
++ return 0;
++}
++
++
++static int xenoprof_resume(struct sys_device * dev)
++{
++ if (xenoprof_enabled == 1)
++ xenoprof_start();
++ return 0;
++}
++
++
++static struct sysdev_class oprofile_sysclass = {
++ set_kset_name("oprofile"),
++ .resume = xenoprof_resume,
++ .suspend = xenoprof_suspend
++};
++
++
++static struct sys_device device_oprofile = {
++ .id = 0,
++ .cls = &oprofile_sysclass,
++};
++
++
++static int __init init_driverfs(void)
++{
++ int error;
++ if (!(error = sysdev_class_register(&oprofile_sysclass)))
++ error = sysdev_register(&device_oprofile);
++ return error;
++}
++
++
++static void exit_driverfs(void)
++{
++ sysdev_unregister(&device_oprofile);
++ sysdev_class_unregister(&oprofile_sysclass);
++}
++
++#else
++#define init_driverfs() do { } while (0)
++#define exit_driverfs() do { } while (0)
++#endif /* CONFIG_PM */
++
++unsigned long long oprofile_samples = 0;
++unsigned long long p_oprofile_samples = 0;
++
++unsigned int pdomains;
++struct xenoprof_passive passive_domains[MAX_OPROF_DOMAINS];
++
++static void xenoprof_add_pc(xenoprof_buf_t *buf, int is_passive)
++{
++ int head, tail, size;
++
++ head = buf->event_head;
++ tail = buf->event_tail;
++ size = buf->event_size;
++
++ if (tail > head) {
++ while (tail < size) {
++ oprofile_add_pc(buf->event_log[tail].eip,
++ buf->event_log[tail].mode,
++ buf->event_log[tail].event);
++ if (!is_passive)
++ oprofile_samples++;
++ else
++ p_oprofile_samples++;
++ tail++;
++ }
++ tail = 0;
++ }
++ while (tail < head) {
++ oprofile_add_pc(buf->event_log[tail].eip,
++ buf->event_log[tail].mode,
++ buf->event_log[tail].event);
++ if (!is_passive)
++ oprofile_samples++;
++ else
++ p_oprofile_samples++;
++ tail++;
++ }
++
++ buf->event_tail = tail;
++}
++
++static void xenoprof_handle_passive(void)
++{
++ int i, j;
++ int flag_domain, flag_switch = 0;
++
++ for (i = 0; i < pdomains; i++) {
++ flag_domain = 0;
++ for (j = 0; j < passive_domains[i].nbuf; j++) {
++ xenoprof_buf_t *buf = p_xenoprof_buf[i][j];
++ if (buf->event_head == buf->event_tail)
++ continue;
++ if (!flag_domain) {
++ if (!oprofile_add_domain_switch(passive_domains[i].
++ domain_id))
++ goto done;
++ flag_domain = 1;
++ }
++ xenoprof_add_pc(buf, 1);
++ flag_switch = 1;
++ }
++ }
++done:
++ if (flag_switch)
++ oprofile_add_domain_switch(COORDINATOR_DOMAIN);
++}
++
++static irqreturn_t
++xenoprof_ovf_interrupt(int irq, void * dev_id, struct pt_regs * regs)
++{
++ struct xenoprof_buf * buf;
++ int cpu;
++ static unsigned long flag;
++
++ cpu = smp_processor_id();
++ buf = xenoprof_buf[cpu];
++
++ xenoprof_add_pc(buf, 0);
++
++ if (xenoprof_is_primary && !test_and_set_bit(0, &flag)) {
++ xenoprof_handle_passive();
++ smp_mb__before_clear_bit();
++ clear_bit(0, &flag);
++ }
++
++ return IRQ_HANDLED;
++}
++
++
++static void unbind_virq(void)
++{
++ int i;
++
++ for_each_online_cpu(i) {
++ if (ovf_irq[i] >= 0) {
++ unbind_from_irqhandler(ovf_irq[i], NULL);
++ ovf_irq[i] = -1;
++ }
++ }
++}
++
++
++static int bind_virq(void)
++{
++ int i, result;
++
++ for_each_online_cpu(i) {
++ result = bind_virq_to_irqhandler(VIRQ_XENOPROF,
++ i,
++ xenoprof_ovf_interrupt,
++ SA_INTERRUPT,
++ "xenoprof",
++ NULL);
++
++ if (result < 0) {
++ unbind_virq();
++ return result;
++ }
++
++ ovf_irq[i] = result;
++ }
++
++ return 0;
++}
++
++
++static void unmap_passive_list(void)
++{
++ int i;
++ for (i = 0; i < pdomains; i++)
++ xenoprof_arch_unmap_shared_buffer(&p_shared_buffer[i]);
++ pdomains = 0;
++}
++
++
++static int map_xenoprof_buffer(int max_samples)
++{
++ struct xenoprof_get_buffer get_buffer;
++ struct xenoprof_buf *buf;
++ int ret, i;
++
++ if ( shared_buffer.buffer )
++ return 0;
++
++ get_buffer.max_samples = max_samples;
++ ret = xenoprof_arch_map_shared_buffer(&get_buffer, &shared_buffer);
++ if (ret)
++ return ret;
++ nbuf = get_buffer.nbuf;
++
++ for (i=0; i< nbuf; i++) {
++ buf = (struct xenoprof_buf*)
++ &shared_buffer.buffer[i * get_buffer.bufsize];
++ BUG_ON(buf->vcpu_id >= MAX_VIRT_CPUS);
++ xenoprof_buf[buf->vcpu_id] = buf;
++ }
++
++ return 0;
++}
++
++
++static int xenoprof_setup(void)
++{
++ int ret;
++
++ if ( (ret = map_xenoprof_buffer(MAX_XENOPROF_SAMPLES)) )
++ return ret;
++
++ if ( (ret = bind_virq()) )
++ return ret;
++
++ if (xenoprof_is_primary) {
++ /* Define dom0 as an active domain if not done yet */
++ if (!active_defined) {
++ domid_t domid;
++ ret = HYPERVISOR_xenoprof_op(XENOPROF_reset_active_list, NULL);
++ if (ret)
++ goto err;
++ domid = 0;
++ ret = HYPERVISOR_xenoprof_op(XENOPROF_set_active, &domid);
++ if (ret)
++ goto err;
++ active_defined = 1;
++ }
++
++ ret = HYPERVISOR_xenoprof_op(XENOPROF_reserve_counters, NULL);
++ if (ret)
++ goto err;
++ xenoprof_arch_counter();
++ ret = HYPERVISOR_xenoprof_op(XENOPROF_setup_events, NULL);
++
++ if (ret)
++ goto err;
++ }
++
++ ret = HYPERVISOR_xenoprof_op(XENOPROF_enable_virq, NULL);
++ if (ret)
++ goto err;
++
++ xenoprof_enabled = 1;
++ return 0;
++ err:
++ unbind_virq();
++ return ret;
++}
++
++
++static void xenoprof_shutdown(void)
++{
++ xenoprof_enabled = 0;
++
++ HYPERVISOR_xenoprof_op(XENOPROF_disable_virq, NULL);
++
++ if (xenoprof_is_primary) {
++ HYPERVISOR_xenoprof_op(XENOPROF_release_counters, NULL);
++ active_defined = 0;
++ }
++
++ unbind_virq();
++
++ xenoprof_arch_unmap_shared_buffer(&shared_buffer);
++ if (xenoprof_is_primary)
++ unmap_passive_list();
++}
++
++
++static int xenoprof_start(void)
++{
++ int ret = 0;
++
++ if (xenoprof_is_primary)
++ ret = HYPERVISOR_xenoprof_op(XENOPROF_start, NULL);
++ if (!ret)
++ xenoprof_arch_start();
++ return ret;
++}
++
++
++static void xenoprof_stop(void)
++{
++ if (xenoprof_is_primary)
++ HYPERVISOR_xenoprof_op(XENOPROF_stop, NULL);
++ xenoprof_arch_stop();
++}
++
++
++static int xenoprof_set_active(int * active_domains,
++ unsigned int adomains)
++{
++ int ret = 0;
++ int i;
++ int set_dom0 = 0;
++ domid_t domid;
++
++ if (!xenoprof_is_primary)
++ return 0;
++
++ if (adomains > MAX_OPROF_DOMAINS)
++ return -E2BIG;
++
++ ret = HYPERVISOR_xenoprof_op(XENOPROF_reset_active_list, NULL);
++ if (ret)
++ return ret;
++
++ for (i=0; i<adomains; i++) {
++ domid = active_domains[i];
++ if (domid != active_domains[i]) {
++ ret = -EINVAL;
++ goto out;
++ }
++ ret = HYPERVISOR_xenoprof_op(XENOPROF_set_active, &domid);
++ if (ret)
++ goto out;
++ if (active_domains[i] == 0)
++ set_dom0 = 1;
++ }
++ /* dom0 must always be active but may not be in the list */
++ if (!set_dom0) {
++ domid = 0;
++ ret = HYPERVISOR_xenoprof_op(XENOPROF_set_active, &domid);
++ }
++
++out:
++ if (ret)
++ HYPERVISOR_xenoprof_op(XENOPROF_reset_active_list, NULL);
++ active_defined = !ret;
++ return ret;
++}
++
++static int xenoprof_set_passive(int * p_domains,
++ unsigned int pdoms)
++{
++ int ret;
++ int i, j;
++ struct xenoprof_buf *buf;
++
++ if (!xenoprof_is_primary)
++ return 0;
++
++ if (pdoms > MAX_OPROF_DOMAINS)
++ return -E2BIG;
++
++ ret = HYPERVISOR_xenoprof_op(XENOPROF_reset_passive_list, NULL);
++ if (ret)
++ return ret;
++ unmap_passive_list();
++
++ for (i = 0; i < pdoms; i++) {
++ passive_domains[i].domain_id = p_domains[i];
++ passive_domains[i].max_samples = 2048;
++ ret = xenoprof_arch_set_passive(&passive_domains[i],
++ &p_shared_buffer[i]);
++ if (ret)
++ goto out;
++ for (j = 0; j < passive_domains[i].nbuf; j++) {
++ buf = (struct xenoprof_buf *)
++ &p_shared_buffer[i].buffer[j * passive_domains[i].bufsize];
++ BUG_ON(buf->vcpu_id >= MAX_VIRT_CPUS);
++ p_xenoprof_buf[i][buf->vcpu_id] = buf;
++ }
++ }
++
++ pdomains = pdoms;
++ return 0;
++
++out:
++ for (j = 0; j < i; j++)
++ xenoprof_arch_unmap_shared_buffer(&p_shared_buffer[i]);
++
++ return ret;
++}
++
++struct oprofile_operations xenoprof_ops = {
++#ifdef HAVE_XENOPROF_CREATE_FILES
++ .create_files = xenoprof_create_files,
++#endif
++ .set_active = xenoprof_set_active,
++ .set_passive = xenoprof_set_passive,
++ .setup = xenoprof_setup,
++ .shutdown = xenoprof_shutdown,
++ .start = xenoprof_start,
++ .stop = xenoprof_stop
++};
++
++
++/* in order to get driverfs right */
++static int using_xenoprof;
++
++int __init xenoprofile_init(struct oprofile_operations * ops)
++{
++ struct xenoprof_init init;
++ int ret, i;
++
++ ret = HYPERVISOR_xenoprof_op(XENOPROF_init, &init);
++ if (!ret) {
++ xenoprof_arch_init_counter(&init);
++ xenoprof_is_primary = init.is_primary;
++
++ /* cpu_type is detected by Xen */
++ cpu_type[XENOPROF_CPU_TYPE_SIZE-1] = 0;
++ strncpy(cpu_type, init.cpu_type, XENOPROF_CPU_TYPE_SIZE - 1);
++ xenoprof_ops.cpu_type = cpu_type;
++
++ init_driverfs();
++ using_xenoprof = 1;
++ *ops = xenoprof_ops;
++
++ for (i=0; i<NR_CPUS; i++)
++ ovf_irq[i] = -1;
++
++ active_defined = 0;
++ }
++ printk(KERN_INFO "%s: ret %d, events %d, xenoprof_is_primary %d\n",
++ __func__, ret, init.num_events, xenoprof_is_primary);
++ return ret;
++}
++
++
++void xenoprofile_exit(void)
++{
++ if (using_xenoprof)
++ exit_driverfs();
++
++ xenoprof_arch_unmap_shared_buffer(&shared_buffer);
++ if (xenoprof_is_primary) {
++ unmap_passive_list();
++ HYPERVISOR_xenoprof_op(XENOPROF_shutdown, NULL);
++ }
++}
diff --git a/trunk/2.6.22/20013_xen3-auto-include-xen-interface.patch1 b/trunk/2.6.22/20013_xen3-auto-include-xen-interface.patch1
new file mode 100644
index 0000000..c89eb88
--- /dev/null
+++ b/trunk/2.6.22/20013_xen3-auto-include-xen-interface.patch1
@@ -0,0 +1,8771 @@
+Subject: xen3 interface headers
+From: http://xenbits.xensource.com/xen-3.1-testing.hg (tip 15042)
+Patch-mainline: obsolete
+Acked-by: jbeulich@novell.com
+
+---
+ include/xen/interface/COPYING | 38 +
+ include/xen/interface/acm.h | 228 ++++++++++
+ include/xen/interface/acm_ops.h | 166 +++++++
+ include/xen/interface/arch-ia64.h | 504 +++++++++++++++++++++++
+ include/xen/interface/arch-powerpc.h | 121 +++++
+ include/xen/interface/arch-x86/xen-x86_32.h | 168 +++++++
+ include/xen/interface/arch-x86/xen-x86_64.h | 211 +++++++++
+ include/xen/interface/arch-x86/xen.h | 204 +++++++++
+ include/xen/interface/arch-x86_32.h | 27 +
+ include/xen/interface/arch-x86_64.h | 27 +
+ include/xen/interface/callback.h | 92 ++++
+ include/xen/interface/domctl.h | 478 +++++++++++++++++++++
+ include/xen/interface/elfnote.h | 233 ++++++++++
+ include/xen/interface/elfstructs.h | 527 ++++++++++++++++++++++++
+ include/xen/interface/event_channel.h | 264 ++++++++++++
+ include/xen/interface/features.h | 71 +++
+ include/xen/interface/grant_table.h | 399 ++++++++++++++++++
+ include/xen/interface/hvm/e820.h | 47 ++
+ include/xen/interface/hvm/hvm_info_table.h | 41 +
+ include/xen/interface/hvm/hvm_op.h | 73 +++
+ include/xen/interface/hvm/ioreq.h | 122 +++++
+ include/xen/interface/hvm/params.h | 55 ++
+ include/xen/interface/hvm/save.h | 462 +++++++++++++++++++++
+ include/xen/interface/hvm/vmx_assist.h | 116 +++++
+ include/xen/interface/io/blkif.h | 128 +++++
+ include/xen/interface/io/console.h | 51 ++
+ include/xen/interface/io/fbif.h | 138 ++++++
+ include/xen/interface/io/kbdif.h | 130 +++++
+ include/xen/interface/io/netif.h | 184 ++++++++
+ include/xen/interface/io/pciif.h | 83 +++
+ include/xen/interface/io/protocols.h | 21
+ include/xen/interface/io/ring.h | 299 +++++++++++++
+ include/xen/interface/io/tpmif.h | 77 +++
+ include/xen/interface/io/xenbus.h | 73 +++
+ include/xen/interface/io/xs_wire.h | 117 +++++
+ include/xen/interface/kexec.h | 137 ++++++
+ include/xen/interface/libelf.h | 241 +++++++++++
+ include/xen/interface/memory.h | 281 ++++++++++++
+ include/xen/interface/nmi.h | 78 +++
+ include/xen/interface/physdev.h | 169 +++++++
+ include/xen/interface/platform.h | 143 ++++++
+ include/xen/interface/sched.h | 121 +++++
+ include/xen/interface/sysctl.h | 182 ++++++++
+ include/xen/interface/trace.h | 119 +++++
+ include/xen/interface/vcpu.h | 192 ++++++++
+ include/xen/interface/version.h | 91 ++++
+ include/xen/interface/xen-compat.h | 51 ++
+ include/xen/interface/xen.h | 610 ++++++++++++++++++++++++++++
+ include/xen/interface/xencomm.h | 41 +
+ include/xen/interface/xenoprof.h | 132 ++++++
+ 50 files changed, 8563 insertions(+)
+
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/xen/interface/COPYING 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,38 @@
++XEN NOTICE
++==========
++
++This copyright applies to all files within this subdirectory and its
++subdirectories:
++ include/public/*.h
++ include/public/hvm/*.h
++ include/public/io/*.h
++
++The intention is that these files can be freely copied into the source
++tree of an operating system when porting that OS to run on Xen. Doing
++so does *not* cause the OS to become subject to the terms of the GPL.
++
++All other files in the Xen source distribution are covered by version
++2 of the GNU General Public License except where explicitly stated
++otherwise within individual source files.
++
++ -- Keir Fraser (on behalf of the Xen team)
++
++=====================================================================
++
++Permission is hereby granted, free of charge, to any person obtaining a copy
++of this software and associated documentation files (the "Software"), to
++deal in the Software without restriction, including without limitation the
++rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++sell copies of the Software, and to permit persons to whom the Software is
++furnished to do so, subject to the following conditions:
++
++The above copyright notice and this permission notice shall be included in
++all copies or substantial portions of the Software.
++
++THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++DEALINGS IN THE SOFTWARE.
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/xen/interface/acm.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,228 @@
++/*
++ * acm.h: Xen access control module interface defintions
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Reiner Sailer <sailer@watson.ibm.com>
++ * Copyright (c) 2005, International Business Machines Corporation.
++ */
++
++#ifndef _XEN_PUBLIC_ACM_H
++#define _XEN_PUBLIC_ACM_H
++
++#include "xen.h"
++
++/* if ACM_DEBUG defined, all hooks should
++ * print a short trace message (comment it out
++ * when not in testing mode )
++ */
++/* #define ACM_DEBUG */
++
++#ifdef ACM_DEBUG
++# define printkd(fmt, args...) printk(fmt,## args)
++#else
++# define printkd(fmt, args...)
++#endif
++
++/* default ssid reference value if not supplied */
++#define ACM_DEFAULT_SSID 0x0
++#define ACM_DEFAULT_LOCAL_SSID 0x0
++
++/* Internal ACM ERROR types */
++#define ACM_OK 0
++#define ACM_UNDEF -1
++#define ACM_INIT_SSID_ERROR -2
++#define ACM_INIT_SOID_ERROR -3
++#define ACM_ERROR -4
++
++/* External ACCESS DECISIONS */
++#define ACM_ACCESS_PERMITTED 0
++#define ACM_ACCESS_DENIED -111
++#define ACM_NULL_POINTER_ERROR -200
++
++/*
++ Error codes reported in when trying to test for a new policy
++ These error codes are reported in an array of tuples where
++ each error code is followed by a parameter describing the error
++ more closely, such as a domain id.
++*/
++#define ACM_EVTCHN_SHARING_VIOLATION 0x100
++#define ACM_GNTTAB_SHARING_VIOLATION 0x101
++#define ACM_DOMAIN_LOOKUP 0x102
++#define ACM_CHWALL_CONFLICT 0x103
++#define ACM_SSIDREF_IN_USE 0x104
++
++
++/* primary policy in lower 4 bits */
++#define ACM_NULL_POLICY 0
++#define ACM_CHINESE_WALL_POLICY 1
++#define ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY 2
++#define ACM_POLICY_UNDEFINED 15
++
++/* combinations have secondary policy component in higher 4bit */
++#define ACM_CHINESE_WALL_AND_SIMPLE_TYPE_ENFORCEMENT_POLICY \
++ ((ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY << 4) | ACM_CHINESE_WALL_POLICY)
++
++/* policy: */
++#define ACM_POLICY_NAME(X) \
++ ((X) == (ACM_NULL_POLICY)) ? "NULL" : \
++ ((X) == (ACM_CHINESE_WALL_POLICY)) ? "CHINESE WALL" : \
++ ((X) == (ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY)) ? "SIMPLE TYPE ENFORCEMENT" : \
++ ((X) == (ACM_CHINESE_WALL_AND_SIMPLE_TYPE_ENFORCEMENT_POLICY)) ? "CHINESE WALL AND SIMPLE TYPE ENFORCEMENT" : \
++ "UNDEFINED"
++
++/* the following policy versions must be increased
++ * whenever the interpretation of the related
++ * policy's data structure changes
++ */
++#define ACM_POLICY_VERSION 3
++#define ACM_CHWALL_VERSION 1
++#define ACM_STE_VERSION 1
++
++/* defines a ssid reference used by xen */
++typedef uint32_t ssidref_t;
++
++/* hooks that are known to domains */
++#define ACMHOOK_none 0
++#define ACMHOOK_sharing 1
++
++/* -------security policy relevant type definitions-------- */
++
++/* type identifier; compares to "equal" or "not equal" */
++typedef uint16_t domaintype_t;
++
++/* CHINESE WALL POLICY DATA STRUCTURES
++ *
++ * current accumulated conflict type set:
++ * When a domain is started and has a type that is in
++ * a conflict set, the conflicting types are incremented in
++ * the aggregate set. When a domain is destroyed, the
++ * conflicting types to its type are decremented.
++ * If a domain has multiple types, this procedure works over
++ * all those types.
++ *
++ * conflict_aggregate_set[i] holds the number of
++ * running domains that have a conflict with type i.
++ *
++ * running_types[i] holds the number of running domains
++ * that include type i in their ssidref-referenced type set
++ *
++ * conflict_sets[i][j] is "0" if type j has no conflict
++ * with type i and is "1" otherwise.
++ */
++/* high-16 = version, low-16 = check magic */
++#define ACM_MAGIC 0x0001debc
++
++/* each offset in bytes from start of the struct they
++ * are part of */
++
++/* V3 of the policy buffer aded a version structure */
++struct acm_policy_version
++{
++ uint32_t major;
++ uint32_t minor;
++};
++
++
++/* each buffer consists of all policy information for
++ * the respective policy given in the policy code
++ *
++ * acm_policy_buffer, acm_chwall_policy_buffer,
++ * and acm_ste_policy_buffer need to stay 32-bit aligned
++ * because we create binary policies also with external
++ * tools that assume packed representations (e.g. the java tool)
++ */
++struct acm_policy_buffer {
++ uint32_t policy_version; /* ACM_POLICY_VERSION */
++ uint32_t magic;
++ uint32_t len;
++ uint32_t policy_reference_offset;
++ uint32_t primary_policy_code;
++ uint32_t primary_buffer_offset;
++ uint32_t secondary_policy_code;
++ uint32_t secondary_buffer_offset;
++ struct acm_policy_version xml_pol_version; /* add in V3 */
++};
++
++
++struct acm_policy_reference_buffer {
++ uint32_t len;
++};
++
++struct acm_chwall_policy_buffer {
++ uint32_t policy_version; /* ACM_CHWALL_VERSION */
++ uint32_t policy_code;
++ uint32_t chwall_max_types;
++ uint32_t chwall_max_ssidrefs;
++ uint32_t chwall_max_conflictsets;
++ uint32_t chwall_ssid_offset;
++ uint32_t chwall_conflict_sets_offset;
++ uint32_t chwall_running_types_offset;
++ uint32_t chwall_conflict_aggregate_offset;
++};
++
++struct acm_ste_policy_buffer {
++ uint32_t policy_version; /* ACM_STE_VERSION */
++ uint32_t policy_code;
++ uint32_t ste_max_types;
++ uint32_t ste_max_ssidrefs;
++ uint32_t ste_ssid_offset;
++};
++
++struct acm_stats_buffer {
++ uint32_t magic;
++ uint32_t len;
++ uint32_t primary_policy_code;
++ uint32_t primary_stats_offset;
++ uint32_t secondary_policy_code;
++ uint32_t secondary_stats_offset;
++};
++
++struct acm_ste_stats_buffer {
++ uint32_t ec_eval_count;
++ uint32_t gt_eval_count;
++ uint32_t ec_denied_count;
++ uint32_t gt_denied_count;
++ uint32_t ec_cachehit_count;
++ uint32_t gt_cachehit_count;
++};
++
++struct acm_ssid_buffer {
++ uint32_t len;
++ ssidref_t ssidref;
++ uint32_t policy_reference_offset;
++ uint32_t primary_policy_code;
++ uint32_t primary_max_types;
++ uint32_t primary_types_offset;
++ uint32_t secondary_policy_code;
++ uint32_t secondary_max_types;
++ uint32_t secondary_types_offset;
++};
++
++#endif
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/xen/interface/acm_ops.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,166 @@
++/*
++ * acm_ops.h: Xen access control module hypervisor commands
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Reiner Sailer <sailer@watson.ibm.com>
++ * Copyright (c) 2005,2006 International Business Machines Corporation.
++ */
++
++#ifndef __XEN_PUBLIC_ACM_OPS_H__
++#define __XEN_PUBLIC_ACM_OPS_H__
++
++#include "xen.h"
++#include "acm.h"
++
++/*
++ * Make sure you increment the interface version whenever you modify this file!
++ * This makes sure that old versions of acm tools will stop working in a
++ * well-defined way (rather than crashing the machine, for instance).
++ */
++#define ACM_INTERFACE_VERSION 0xAAAA0009
++
++/************************************************************************/
++
++/*
++ * Prototype for this hypercall is:
++ * int acm_op(int cmd, void *args)
++ * @cmd == ACMOP_??? (access control module operation).
++ * @args == Operation-specific extra arguments (NULL if none).
++ */
++
++
++#define ACMOP_setpolicy 1
++struct acm_setpolicy {
++ /* IN */
++ uint32_t interface_version;
++ XEN_GUEST_HANDLE_64(void) pushcache;
++ uint32_t pushcache_size;
++};
++
++
++#define ACMOP_getpolicy 2
++struct acm_getpolicy {
++ /* IN */
++ uint32_t interface_version;
++ XEN_GUEST_HANDLE_64(void) pullcache;
++ uint32_t pullcache_size;
++};
++
++
++#define ACMOP_dumpstats 3
++struct acm_dumpstats {
++ /* IN */
++ uint32_t interface_version;
++ XEN_GUEST_HANDLE_64(void) pullcache;
++ uint32_t pullcache_size;
++};
++
++
++#define ACMOP_getssid 4
++#define ACM_GETBY_ssidref 1
++#define ACM_GETBY_domainid 2
++struct acm_getssid {
++ /* IN */
++ uint32_t interface_version;
++ uint32_t get_ssid_by; /* ACM_GETBY_* */
++ union {
++ domaintype_t domainid;
++ ssidref_t ssidref;
++ } id;
++ XEN_GUEST_HANDLE_64(void) ssidbuf;
++ uint32_t ssidbuf_size;
++};
++
++#define ACMOP_getdecision 5
++struct acm_getdecision {
++ /* IN */
++ uint32_t interface_version;
++ uint32_t get_decision_by1; /* ACM_GETBY_* */
++ uint32_t get_decision_by2; /* ACM_GETBY_* */
++ union {
++ domaintype_t domainid;
++ ssidref_t ssidref;
++ } id1;
++ union {
++ domaintype_t domainid;
++ ssidref_t ssidref;
++ } id2;
++ uint32_t hook;
++ /* OUT */
++ uint32_t acm_decision;
++};
++
++
++#define ACMOP_chgpolicy 6
++struct acm_change_policy {
++ /* IN */
++ uint32_t interface_version;
++ XEN_GUEST_HANDLE_64(void) policy_pushcache;
++ uint32_t policy_pushcache_size;
++ XEN_GUEST_HANDLE_64(void) del_array;
++ uint32_t delarray_size;
++ XEN_GUEST_HANDLE_64(void) chg_array;
++ uint32_t chgarray_size;
++ /* OUT */
++ /* array with error code */
++ XEN_GUEST_HANDLE_64(void) err_array;
++ uint32_t errarray_size;
++};
++
++#define ACMOP_relabeldoms 7
++struct acm_relabel_doms {
++ /* IN */
++ uint32_t interface_version;
++ XEN_GUEST_HANDLE_64(void) relabel_map;
++ uint32_t relabel_map_size;
++ /* OUT */
++ XEN_GUEST_HANDLE_64(void) err_array;
++ uint32_t errarray_size;
++};
++
++/* future interface to Xen */
++struct xen_acmctl {
++ uint32_t cmd;
++ uint32_t interface_version;
++ union {
++ struct acm_setpolicy setpolicy;
++ struct acm_getpolicy getpolicy;
++ struct acm_dumpstats dumpstats;
++ struct acm_getssid getssid;
++ struct acm_getdecision getdecision;
++ struct acm_change_policy change_policy;
++ struct acm_relabel_doms relabel_doms;
++ } u;
++};
++
++typedef struct xen_acmctl xen_acmctl_t;
++DEFINE_XEN_GUEST_HANDLE(xen_acmctl_t);
++
++#endif /* __XEN_PUBLIC_ACM_OPS_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/xen/interface/arch-ia64.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,504 @@
++/******************************************************************************
++ * arch-ia64/hypervisor-if.h
++ *
++ * Guest OS interface to IA64 Xen.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#ifndef __HYPERVISOR_IF_IA64_H__
++#define __HYPERVISOR_IF_IA64_H__
++
++/* Structural guest handles introduced in 0x00030201. */
++#if __XEN_INTERFACE_VERSION__ >= 0x00030201
++#define __DEFINE_XEN_GUEST_HANDLE(name, type) \
++ typedef struct { type *p; } __guest_handle_ ## name
++#else
++#define __DEFINE_XEN_GUEST_HANDLE(name, type) \
++ typedef type * __guest_handle_ ## name
++#endif
++
++#define DEFINE_XEN_GUEST_HANDLE(name) __DEFINE_XEN_GUEST_HANDLE(name, name)
++#define XEN_GUEST_HANDLE(name) __guest_handle_ ## name
++#define set_xen_guest_handle(hnd, val) do { (hnd).p = val; } while (0)
++#ifdef __XEN_TOOLS__
++#define get_xen_guest_handle(val, hnd) do { val = (hnd).p; } while (0)
++#endif
++
++#ifndef __ASSEMBLY__
++/* Guest handles for primitive C types. */
++__DEFINE_XEN_GUEST_HANDLE(uchar, unsigned char);
++__DEFINE_XEN_GUEST_HANDLE(uint, unsigned int);
++__DEFINE_XEN_GUEST_HANDLE(ulong, unsigned long);
++__DEFINE_XEN_GUEST_HANDLE(u64, unsigned long);
++DEFINE_XEN_GUEST_HANDLE(char);
++DEFINE_XEN_GUEST_HANDLE(int);
++DEFINE_XEN_GUEST_HANDLE(long);
++DEFINE_XEN_GUEST_HANDLE(void);
++
++typedef unsigned long xen_pfn_t;
++DEFINE_XEN_GUEST_HANDLE(xen_pfn_t);
++#endif
++
++/* Arch specific VIRQs definition */
++#define VIRQ_ITC VIRQ_ARCH_0 /* V. Virtual itc timer */
++#define VIRQ_MCA_CMC VIRQ_ARCH_1 /* MCA cmc interrupt */
++#define VIRQ_MCA_CPE VIRQ_ARCH_2 /* MCA cpe interrupt */
++
++/* Arch specific callback irq definition */
++/* using Requester-ID(RID) as callback irq */
++#define IA64_CALLBACK_IRQ_RID (1 << 31)
++
++/* Maximum number of virtual CPUs in multi-processor guests. */
++/* WARNING: before changing this, check that shared_info fits on a page */
++#define MAX_VIRT_CPUS 64
++
++#ifndef __ASSEMBLY__
++
++typedef unsigned long xen_ulong_t;
++
++#define INVALID_MFN (~0UL)
++
++#define MEM_G (1UL << 30)
++#define MEM_M (1UL << 20)
++
++#define MMIO_START (3 * MEM_G)
++#define MMIO_SIZE (512 * MEM_M)
++
++#define VGA_IO_START 0xA0000UL
++#define VGA_IO_SIZE 0x20000
++
++#define LEGACY_IO_START (MMIO_START + MMIO_SIZE)
++#define LEGACY_IO_SIZE (64*MEM_M)
++
++#define IO_PAGE_START (LEGACY_IO_START + LEGACY_IO_SIZE)
++#define IO_PAGE_SIZE PAGE_SIZE
++
++#define STORE_PAGE_START (IO_PAGE_START + IO_PAGE_SIZE)
++#define STORE_PAGE_SIZE PAGE_SIZE
++
++#define BUFFER_IO_PAGE_START (STORE_PAGE_START+PAGE_SIZE)
++#define BUFFER_IO_PAGE_SIZE PAGE_SIZE
++
++#define IO_SAPIC_START 0xfec00000UL
++#define IO_SAPIC_SIZE 0x100000
++
++#define PIB_START 0xfee00000UL
++#define PIB_SIZE 0x200000
++
++#define GFW_START (4*MEM_G -16*MEM_M)
++#define GFW_SIZE (16*MEM_M)
++
++struct pt_fpreg {
++ union {
++ unsigned long bits[2];
++ long double __dummy; /* force 16-byte alignment */
++ } u;
++};
++
++struct cpu_user_regs {
++ /* The following registers are saved by SAVE_MIN: */
++ unsigned long b6; /* scratch */
++ unsigned long b7; /* scratch */
++
++ unsigned long ar_csd; /* used by cmp8xchg16 (scratch) */
++ unsigned long ar_ssd; /* reserved for future use (scratch) */
++
++ unsigned long r8; /* scratch (return value register 0) */
++ unsigned long r9; /* scratch (return value register 1) */
++ unsigned long r10; /* scratch (return value register 2) */
++ unsigned long r11; /* scratch (return value register 3) */
++
++ unsigned long cr_ipsr; /* interrupted task's psr */
++ unsigned long cr_iip; /* interrupted task's instruction pointer */
++ unsigned long cr_ifs; /* interrupted task's function state */
++
++ unsigned long ar_unat; /* interrupted task's NaT register (preserved) */
++ unsigned long ar_pfs; /* prev function state */
++ unsigned long ar_rsc; /* RSE configuration */
++ /* The following two are valid only if cr_ipsr.cpl > 0: */
++ unsigned long ar_rnat; /* RSE NaT */
++ unsigned long ar_bspstore; /* RSE bspstore */
++
++ unsigned long pr; /* 64 predicate registers (1 bit each) */
++ unsigned long b0; /* return pointer (bp) */
++ unsigned long loadrs; /* size of dirty partition << 16 */
++
++ unsigned long r1; /* the gp pointer */
++ unsigned long r12; /* interrupted task's memory stack pointer */
++ unsigned long r13; /* thread pointer */
++
++ unsigned long ar_fpsr; /* floating point status (preserved) */
++ unsigned long r15; /* scratch */
++
++ /* The remaining registers are NOT saved for system calls. */
++
++ unsigned long r14; /* scratch */
++ unsigned long r2; /* scratch */
++ unsigned long r3; /* scratch */
++ unsigned long r16; /* scratch */
++ unsigned long r17; /* scratch */
++ unsigned long r18; /* scratch */
++ unsigned long r19; /* scratch */
++ unsigned long r20; /* scratch */
++ unsigned long r21; /* scratch */
++ unsigned long r22; /* scratch */
++ unsigned long r23; /* scratch */
++ unsigned long r24; /* scratch */
++ unsigned long r25; /* scratch */
++ unsigned long r26; /* scratch */
++ unsigned long r27; /* scratch */
++ unsigned long r28; /* scratch */
++ unsigned long r29; /* scratch */
++ unsigned long r30; /* scratch */
++ unsigned long r31; /* scratch */
++ unsigned long ar_ccv; /* compare/exchange value (scratch) */
++
++ /*
++ * Floating point registers that the kernel considers scratch:
++ */
++ struct pt_fpreg f6; /* scratch */
++ struct pt_fpreg f7; /* scratch */
++ struct pt_fpreg f8; /* scratch */
++ struct pt_fpreg f9; /* scratch */
++ struct pt_fpreg f10; /* scratch */
++ struct pt_fpreg f11; /* scratch */
++ unsigned long r4; /* preserved */
++ unsigned long r5; /* preserved */
++ unsigned long r6; /* preserved */
++ unsigned long r7; /* preserved */
++ unsigned long eml_unat; /* used for emulating instruction */
++ unsigned long pad0; /* alignment pad */
++
++};
++typedef struct cpu_user_regs cpu_user_regs_t;
++
++union vac {
++ unsigned long value;
++ struct {
++ int a_int:1;
++ int a_from_int_cr:1;
++ int a_to_int_cr:1;
++ int a_from_psr:1;
++ int a_from_cpuid:1;
++ int a_cover:1;
++ int a_bsw:1;
++ long reserved:57;
++ };
++};
++typedef union vac vac_t;
++
++union vdc {
++ unsigned long value;
++ struct {
++ int d_vmsw:1;
++ int d_extint:1;
++ int d_ibr_dbr:1;
++ int d_pmc:1;
++ int d_to_pmd:1;
++ int d_itm:1;
++ long reserved:58;
++ };
++};
++typedef union vdc vdc_t;
++
++struct mapped_regs {
++ union vac vac;
++ union vdc vdc;
++ unsigned long virt_env_vaddr;
++ unsigned long reserved1[29];
++ unsigned long vhpi;
++ unsigned long reserved2[95];
++ union {
++ unsigned long vgr[16];
++ unsigned long bank1_regs[16]; // bank1 regs (r16-r31) when bank0 active
++ };
++ union {
++ unsigned long vbgr[16];
++ unsigned long bank0_regs[16]; // bank0 regs (r16-r31) when bank1 active
++ };
++ unsigned long vnat;
++ unsigned long vbnat;
++ unsigned long vcpuid[5];
++ unsigned long reserved3[11];
++ unsigned long vpsr;
++ unsigned long vpr;
++ unsigned long reserved4[76];
++ union {
++ unsigned long vcr[128];
++ struct {
++ unsigned long dcr; // CR0
++ unsigned long itm;
++ unsigned long iva;
++ unsigned long rsv1[5];
++ unsigned long pta; // CR8
++ unsigned long rsv2[7];
++ unsigned long ipsr; // CR16
++ unsigned long isr;
++ unsigned long rsv3;
++ unsigned long iip;
++ unsigned long ifa;
++ unsigned long itir;
++ unsigned long iipa;
++ unsigned long ifs;
++ unsigned long iim; // CR24
++ unsigned long iha;
++ unsigned long rsv4[38];
++ unsigned long lid; // CR64
++ unsigned long ivr;
++ unsigned long tpr;
++ unsigned long eoi;
++ unsigned long irr[4];
++ unsigned long itv; // CR72
++ unsigned long pmv;
++ unsigned long cmcv;
++ unsigned long rsv5[5];
++ unsigned long lrr0; // CR80
++ unsigned long lrr1;
++ unsigned long rsv6[46];
++ };
++ };
++ union {
++ unsigned long reserved5[128];
++ struct {
++ unsigned long precover_ifs;
++ unsigned long unat; // not sure if this is needed until NaT arch is done
++ int interrupt_collection_enabled; // virtual psr.ic
++ /* virtual interrupt deliverable flag is evtchn_upcall_mask in
++ * shared info area now. interrupt_mask_addr is the address
++ * of evtchn_upcall_mask for current vcpu
++ */
++ unsigned char *interrupt_mask_addr;
++ int pending_interruption;
++ unsigned char vpsr_pp;
++ unsigned char reserved5_2[3];
++ unsigned long reserved5_1[4];
++ int metaphysical_mode; // 1 = use metaphys mapping, 0 = use virtual
++ int banknum; // 0 or 1, which virtual register bank is active
++ unsigned long rrs[8]; // region registers
++ unsigned long krs[8]; // kernel registers
++ unsigned long pkrs[8]; // protection key registers
++ unsigned long tmp[8]; // temp registers (e.g. for hyperprivops)
++ };
++ };
++};
++typedef struct mapped_regs mapped_regs_t;
++
++struct vpd {
++ struct mapped_regs vpd_low;
++ unsigned long reserved6[3456];
++ unsigned long vmm_avail[128];
++ unsigned long reserved7[4096];
++};
++typedef struct vpd vpd_t;
++
++struct arch_vcpu_info {
++};
++typedef struct arch_vcpu_info arch_vcpu_info_t;
++
++struct arch_shared_info {
++ /* PFN of the start_info page. */
++ unsigned long start_info_pfn;
++
++ /* Interrupt vector for event channel. */
++ int evtchn_vector;
++
++ uint64_t pad[32];
++};
++typedef struct arch_shared_info arch_shared_info_t;
++
++typedef unsigned long xen_callback_t;
++
++struct ia64_tr_entry {
++ unsigned long pte;
++ unsigned long itir;
++ unsigned long vadr;
++ unsigned long rid;
++};
++
++struct vcpu_extra_regs {
++ struct ia64_tr_entry itrs[8];
++ struct ia64_tr_entry dtrs[8];
++ unsigned long iva;
++ unsigned long dcr;
++ unsigned long event_callback_ip;
++};
++
++struct vcpu_guest_context {
++#define VGCF_EXTRA_REGS (1<<1) /* Get/Set extra regs. */
++ unsigned long flags; /* VGCF_* flags */
++
++ struct cpu_user_regs user_regs;
++ struct vcpu_extra_regs extra_regs;
++ unsigned long privregs_pfn;
++};
++typedef struct vcpu_guest_context vcpu_guest_context_t;
++DEFINE_XEN_GUEST_HANDLE(vcpu_guest_context_t);
++
++/* dom0 vp op */
++#define __HYPERVISOR_ia64_dom0vp_op __HYPERVISOR_arch_0
++/* Map io space in machine address to dom0 physical address space.
++ Currently physical assigned address equals to machine address. */
++#define IA64_DOM0VP_ioremap 0
++
++/* Convert a pseudo physical page frame number to the corresponding
++ machine page frame number. If no page is assigned, INVALID_MFN or
++ GPFN_INV_MASK is returned depending on domain's non-vti/vti mode. */
++#define IA64_DOM0VP_phystomach 1
++
++/* Convert a machine page frame number to the corresponding pseudo physical
++ page frame number of the caller domain. */
++#define IA64_DOM0VP_machtophys 3
++
++/* Reserved for future use. */
++#define IA64_DOM0VP_iounmap 4
++
++/* Unmap and free pages contained in the specified pseudo physical region. */
++#define IA64_DOM0VP_zap_physmap 5
++
++/* Assign machine page frame to dom0's pseudo physical address space. */
++#define IA64_DOM0VP_add_physmap 6
++
++/* expose the p2m table into domain */
++#define IA64_DOM0VP_expose_p2m 7
++
++/* xen perfmon */
++#define IA64_DOM0VP_perfmon 8
++
++/* gmfn version of IA64_DOM0VP_add_physmap */
++#define IA64_DOM0VP_add_physmap_with_gmfn 9
++
++// flags for page assignement to pseudo physical address space
++#define _ASSIGN_readonly 0
++#define ASSIGN_readonly (1UL << _ASSIGN_readonly)
++#define ASSIGN_writable (0UL << _ASSIGN_readonly) // dummy flag
++/* Internal only: memory attribute must be WC/UC/UCE. */
++#define _ASSIGN_nocache 1
++#define ASSIGN_nocache (1UL << _ASSIGN_nocache)
++// tlb tracking
++#define _ASSIGN_tlb_track 2
++#define ASSIGN_tlb_track (1UL << _ASSIGN_tlb_track)
++/* Internal only: associated with PGC_allocated bit */
++#define _ASSIGN_pgc_allocated 3
++#define ASSIGN_pgc_allocated (1UL << _ASSIGN_pgc_allocated)
++
++/* This structure has the same layout of struct ia64_boot_param, defined in
++ <asm/system.h>. It is redefined here to ease use. */
++struct xen_ia64_boot_param {
++ unsigned long command_line; /* physical address of cmd line args */
++ unsigned long efi_systab; /* physical address of EFI system table */
++ unsigned long efi_memmap; /* physical address of EFI memory map */
++ unsigned long efi_memmap_size; /* size of EFI memory map */
++ unsigned long efi_memdesc_size; /* size of an EFI memory map descriptor */
++ unsigned int efi_memdesc_version; /* memory descriptor version */
++ struct {
++ unsigned short num_cols; /* number of columns on console. */
++ unsigned short num_rows; /* number of rows on console. */
++ unsigned short orig_x; /* cursor's x position */
++ unsigned short orig_y; /* cursor's y position */
++ } console_info;
++ unsigned long fpswa; /* physical address of the fpswa interface */
++ unsigned long initrd_start;
++ unsigned long initrd_size;
++ unsigned long domain_start; /* va where the boot time domain begins */
++ unsigned long domain_size; /* how big is the boot domain */
++};
++
++#endif /* !__ASSEMBLY__ */
++
++/* Size of the shared_info area (this is not related to page size). */
++#define XSI_SHIFT 14
++#define XSI_SIZE (1 << XSI_SHIFT)
++/* Log size of mapped_regs area (64 KB - only 4KB is used). */
++#define XMAPPEDREGS_SHIFT 12
++#define XMAPPEDREGS_SIZE (1 << XMAPPEDREGS_SHIFT)
++/* Offset of XASI (Xen arch shared info) wrt XSI_BASE. */
++#define XMAPPEDREGS_OFS XSI_SIZE
++
++/* Hyperprivops. */
++#define HYPERPRIVOP_START 0x1
++#define HYPERPRIVOP_RFI (HYPERPRIVOP_START + 0x0)
++#define HYPERPRIVOP_RSM_DT (HYPERPRIVOP_START + 0x1)
++#define HYPERPRIVOP_SSM_DT (HYPERPRIVOP_START + 0x2)
++#define HYPERPRIVOP_COVER (HYPERPRIVOP_START + 0x3)
++#define HYPERPRIVOP_ITC_D (HYPERPRIVOP_START + 0x4)
++#define HYPERPRIVOP_ITC_I (HYPERPRIVOP_START + 0x5)
++#define HYPERPRIVOP_SSM_I (HYPERPRIVOP_START + 0x6)
++#define HYPERPRIVOP_GET_IVR (HYPERPRIVOP_START + 0x7)
++#define HYPERPRIVOP_GET_TPR (HYPERPRIVOP_START + 0x8)
++#define HYPERPRIVOP_SET_TPR (HYPERPRIVOP_START + 0x9)
++#define HYPERPRIVOP_EOI (HYPERPRIVOP_START + 0xa)
++#define HYPERPRIVOP_SET_ITM (HYPERPRIVOP_START + 0xb)
++#define HYPERPRIVOP_THASH (HYPERPRIVOP_START + 0xc)
++#define HYPERPRIVOP_PTC_GA (HYPERPRIVOP_START + 0xd)
++#define HYPERPRIVOP_ITR_D (HYPERPRIVOP_START + 0xe)
++#define HYPERPRIVOP_GET_RR (HYPERPRIVOP_START + 0xf)
++#define HYPERPRIVOP_SET_RR (HYPERPRIVOP_START + 0x10)
++#define HYPERPRIVOP_SET_KR (HYPERPRIVOP_START + 0x11)
++#define HYPERPRIVOP_FC (HYPERPRIVOP_START + 0x12)
++#define HYPERPRIVOP_GET_CPUID (HYPERPRIVOP_START + 0x13)
++#define HYPERPRIVOP_GET_PMD (HYPERPRIVOP_START + 0x14)
++#define HYPERPRIVOP_GET_EFLAG (HYPERPRIVOP_START + 0x15)
++#define HYPERPRIVOP_SET_EFLAG (HYPERPRIVOP_START + 0x16)
++#define HYPERPRIVOP_RSM_BE (HYPERPRIVOP_START + 0x17)
++#define HYPERPRIVOP_GET_PSR (HYPERPRIVOP_START + 0x18)
++#define HYPERPRIVOP_MAX (0x19)
++
++/* Fast and light hypercalls. */
++#define __HYPERVISOR_ia64_fast_eoi __HYPERVISOR_arch_1
++
++/* Xencomm macros. */
++#define XENCOMM_INLINE_MASK 0xf800000000000000UL
++#define XENCOMM_INLINE_FLAG 0x8000000000000000UL
++
++#define XENCOMM_IS_INLINE(addr) \
++ (((unsigned long)(addr) & XENCOMM_INLINE_MASK) == XENCOMM_INLINE_FLAG)
++#define XENCOMM_INLINE_ADDR(addr) \
++ ((unsigned long)(addr) & ~XENCOMM_INLINE_MASK)
++
++/* xen perfmon */
++#ifdef XEN
++#ifndef __ASSEMBLY__
++#ifndef _ASM_IA64_PERFMON_H
++
++#include <xen/list.h> // asm/perfmon.h requires struct list_head
++#include <asm/perfmon.h>
++// for PFM_xxx and pfarg_features_t, pfarg_context_t, pfarg_reg_t, pfarg_load_t
++
++#endif /* _ASM_IA64_PERFMON_H */
++
++DEFINE_XEN_GUEST_HANDLE(pfarg_features_t);
++DEFINE_XEN_GUEST_HANDLE(pfarg_context_t);
++DEFINE_XEN_GUEST_HANDLE(pfarg_reg_t);
++DEFINE_XEN_GUEST_HANDLE(pfarg_load_t);
++#endif /* __ASSEMBLY__ */
++#endif /* XEN */
++
++#endif /* __HYPERVISOR_IF_IA64_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/xen/interface/arch-powerpc.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,121 @@
++/*
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (C) IBM Corp. 2005, 2006
++ *
++ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
++ */
++
++#ifndef __XEN_PUBLIC_ARCH_PPC_64_H__
++#define __XEN_PUBLIC_ARCH_PPC_64_H__
++
++#define __DEFINE_XEN_GUEST_HANDLE(name, type) \
++ typedef struct { \
++ int __pad[(sizeof (long long) - sizeof (void *)) / sizeof (int)]; \
++ type *p; \
++ } __attribute__((__aligned__(8))) __guest_handle_ ## name
++
++#define DEFINE_XEN_GUEST_HANDLE(name) __DEFINE_XEN_GUEST_HANDLE(name, name)
++#define XEN_GUEST_HANDLE(name) __guest_handle_ ## name
++#define set_xen_guest_handle(hnd, val) \
++ do { \
++ if (sizeof ((hnd).__pad)) \
++ (hnd).__pad[0] = 0; \
++ (hnd).p = val; \
++ } while (0)
++
++#ifdef __XEN_TOOLS__
++#define get_xen_guest_handle(val, hnd) do { val = (hnd).p; } while (0)
++#endif
++
++#ifndef __ASSEMBLY__
++/* Guest handles for primitive C types. */
++__DEFINE_XEN_GUEST_HANDLE(uchar, unsigned char);
++__DEFINE_XEN_GUEST_HANDLE(uint, unsigned int);
++__DEFINE_XEN_GUEST_HANDLE(ulong, unsigned long);
++DEFINE_XEN_GUEST_HANDLE(char);
++DEFINE_XEN_GUEST_HANDLE(int);
++DEFINE_XEN_GUEST_HANDLE(long);
++DEFINE_XEN_GUEST_HANDLE(void);
++
++typedef unsigned long long xen_pfn_t;
++DEFINE_XEN_GUEST_HANDLE(xen_pfn_t);
++#endif
++
++/*
++ * Pointers and other address fields inside interface structures are padded to
++ * 64 bits. This means that field alignments aren't different between 32- and
++ * 64-bit architectures.
++ */
++/* NB. Multi-level macro ensures __LINE__ is expanded before concatenation. */
++#define __MEMORY_PADDING(_X)
++#define _MEMORY_PADDING(_X) __MEMORY_PADDING(_X)
++#define MEMORY_PADDING _MEMORY_PADDING(__LINE__)
++
++/* And the trap vector is... */
++#define TRAP_INSTR "li 0,-1; sc" /* XXX just "sc"? */
++
++#ifndef __ASSEMBLY__
++
++#define XENCOMM_INLINE_FLAG (1UL << 63)
++
++typedef uint64_t xen_ulong_t;
++
++/* User-accessible registers: need to be saved/restored for every nested Xen
++ * invocation. */
++struct cpu_user_regs
++{
++ uint64_t gprs[32];
++ uint64_t lr;
++ uint64_t ctr;
++ uint64_t srr0;
++ uint64_t srr1;
++ uint64_t pc;
++ uint64_t msr;
++ uint64_t fpscr;
++ uint64_t xer;
++ uint64_t hid4;
++ uint32_t cr;
++ uint32_t entry_vector;
++};
++typedef struct cpu_user_regs cpu_user_regs_t;
++
++typedef uint64_t tsc_timestamp_t; /* RDTSC timestamp */ /* XXX timebase */
++
++/* ONLY used to communicate with dom0! See also struct exec_domain. */
++struct vcpu_guest_context {
++ cpu_user_regs_t user_regs; /* User-level CPU registers */
++ uint64_t sdr1; /* Pagetable base */
++ /* XXX etc */
++};
++typedef struct vcpu_guest_context vcpu_guest_context_t;
++DEFINE_XEN_GUEST_HANDLE(vcpu_guest_context_t);
++
++struct arch_shared_info {
++ uint64_t pad[32];
++};
++
++struct arch_vcpu_info {
++};
++
++/* Support for multi-processor guests. */
++#define MAX_VIRT_CPUS 32
++#endif
++
++#endif
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/xen/interface/arch-x86/xen-x86_32.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,168 @@
++/******************************************************************************
++ * xen-x86_32.h
++ *
++ * Guest OS interface to x86 32-bit Xen.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (c) 2004-2007, K A Fraser
++ */
++
++#ifndef __XEN_PUBLIC_ARCH_X86_XEN_X86_32_H__
++#define __XEN_PUBLIC_ARCH_X86_XEN_X86_32_H__
++
++/*
++ * Hypercall interface:
++ * Input: %ebx, %ecx, %edx, %esi, %edi (arguments 1-5)
++ * Output: %eax
++ * Access is via hypercall page (set up by guest loader or via a Xen MSR):
++ * call hypercall_page + hypercall-number * 32
++ * Clobbered: Argument registers (e.g., 2-arg hypercall clobbers %ebx,%ecx)
++ */
++
++#if __XEN_INTERFACE_VERSION__ < 0x00030203
++/*
++ * Legacy hypercall interface:
++ * As above, except the entry sequence to the hypervisor is:
++ * mov $hypercall-number*32,%eax ; int $0x82
++ */
++#define TRAP_INSTR "int $0x82"
++#endif
++
++/*
++ * These flat segments are in the Xen-private section of every GDT. Since these
++ * are also present in the initial GDT, many OSes will be able to avoid
++ * installing their own GDT.
++ */
++#define FLAT_RING1_CS 0xe019 /* GDT index 259 */
++#define FLAT_RING1_DS 0xe021 /* GDT index 260 */
++#define FLAT_RING1_SS 0xe021 /* GDT index 260 */
++#define FLAT_RING3_CS 0xe02b /* GDT index 261 */
++#define FLAT_RING3_DS 0xe033 /* GDT index 262 */
++#define FLAT_RING3_SS 0xe033 /* GDT index 262 */
++
++#define FLAT_KERNEL_CS FLAT_RING1_CS
++#define FLAT_KERNEL_DS FLAT_RING1_DS
++#define FLAT_KERNEL_SS FLAT_RING1_SS
++#define FLAT_USER_CS FLAT_RING3_CS
++#define FLAT_USER_DS FLAT_RING3_DS
++#define FLAT_USER_SS FLAT_RING3_SS
++
++/*
++ * Virtual addresses beyond this are not modifiable by guest OSes. The
++ * machine->physical mapping table starts at this address, read-only.
++ */
++#ifdef CONFIG_X86_PAE
++#define __HYPERVISOR_VIRT_START 0xF5800000
++#define __MACH2PHYS_VIRT_START 0xF5800000
++#define __MACH2PHYS_VIRT_END 0xF6800000
++#else
++#define __HYPERVISOR_VIRT_START 0xFC000000
++#define __MACH2PHYS_VIRT_START 0xFC000000
++#define __MACH2PHYS_VIRT_END 0xFC400000
++#endif
++
++#ifndef HYPERVISOR_VIRT_START
++#define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START)
++#endif
++
++#define MACH2PHYS_VIRT_START mk_unsigned_long(__MACH2PHYS_VIRT_START)
++#define MACH2PHYS_VIRT_END mk_unsigned_long(__MACH2PHYS_VIRT_END)
++#define MACH2PHYS_NR_ENTRIES ((MACH2PHYS_VIRT_END-MACH2PHYS_VIRT_START)>>2)
++#ifndef machine_to_phys_mapping
++#define machine_to_phys_mapping ((unsigned long *)MACH2PHYS_VIRT_START)
++#endif
++
++/* 32-/64-bit invariability for control interfaces (domctl/sysctl). */
++#if defined(__XEN__) || defined(__XEN_TOOLS__)
++#undef __DEFINE_XEN_GUEST_HANDLE
++#define __DEFINE_XEN_GUEST_HANDLE(name, type) \
++ typedef struct { type *p; } \
++ __guest_handle_ ## name; \
++ typedef struct { union { type *p; uint64_aligned_t q; }; } \
++ __guest_handle_64_ ## name
++#undef set_xen_guest_handle
++#define set_xen_guest_handle(hnd, val) \
++ do { if ( sizeof(hnd) == 8 ) *(uint64_t *)&(hnd) = 0; \
++ (hnd).p = val; \
++ } while ( 0 )
++#define uint64_aligned_t uint64_t __attribute__((aligned(8)))
++#define XEN_GUEST_HANDLE_64(name) __guest_handle_64_ ## name
++#endif
++
++#ifndef __ASSEMBLY__
++
++struct cpu_user_regs {
++ uint32_t ebx;
++ uint32_t ecx;
++ uint32_t edx;
++ uint32_t esi;
++ uint32_t edi;
++ uint32_t ebp;
++ uint32_t eax;
++ uint16_t error_code; /* private */
++ uint16_t entry_vector; /* private */
++ uint32_t eip;
++ uint16_t cs;
++ uint8_t saved_upcall_mask;
++ uint8_t _pad0;
++ uint32_t eflags; /* eflags.IF == !saved_upcall_mask */
++ uint32_t esp;
++ uint16_t ss, _pad1;
++ uint16_t es, _pad2;
++ uint16_t ds, _pad3;
++ uint16_t fs, _pad4;
++ uint16_t gs, _pad5;
++};
++typedef struct cpu_user_regs cpu_user_regs_t;
++DEFINE_XEN_GUEST_HANDLE(cpu_user_regs_t);
++
++/*
++ * Page-directory addresses above 4GB do not fit into architectural %cr3.
++ * When accessing %cr3, or equivalent field in vcpu_guest_context, guests
++ * must use the following accessor macros to pack/unpack valid MFNs.
++ */
++#define xen_pfn_to_cr3(pfn) (((unsigned)(pfn) << 12) | ((unsigned)(pfn) >> 20))
++#define xen_cr3_to_pfn(cr3) (((unsigned)(cr3) >> 12) | ((unsigned)(cr3) << 20))
++
++struct arch_vcpu_info {
++ unsigned long cr2;
++ unsigned long pad[5]; /* sizeof(vcpu_info_t) == 64 */
++};
++typedef struct arch_vcpu_info arch_vcpu_info_t;
++
++struct xen_callback {
++ unsigned long cs;
++ unsigned long eip;
++};
++typedef struct xen_callback xen_callback_t;
++
++#endif /* !__ASSEMBLY__ */
++
++#endif /* __XEN_PUBLIC_ARCH_X86_XEN_X86_32_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/xen/interface/arch-x86/xen-x86_64.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,211 @@
++/******************************************************************************
++ * xen-x86_64.h
++ *
++ * Guest OS interface to x86 64-bit Xen.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (c) 2004-2006, K A Fraser
++ */
++
++#ifndef __XEN_PUBLIC_ARCH_X86_XEN_X86_64_H__
++#define __XEN_PUBLIC_ARCH_X86_XEN_X86_64_H__
++
++/*
++ * Hypercall interface:
++ * Input: %rdi, %rsi, %rdx, %r10, %r8 (arguments 1-5)
++ * Output: %rax
++ * Access is via hypercall page (set up by guest loader or via a Xen MSR):
++ * call hypercall_page + hypercall-number * 32
++ * Clobbered: argument registers (e.g., 2-arg hypercall clobbers %rdi,%rsi)
++ */
++
++#if __XEN_INTERFACE_VERSION__ < 0x00030203
++/*
++ * Legacy hypercall interface:
++ * As above, except the entry sequence to the hypervisor is:
++ * mov $hypercall-number*32,%eax ; syscall
++ * Clobbered: %rcx, %r11, argument registers (as above)
++ */
++#define TRAP_INSTR "syscall"
++#endif
++
++/*
++ * 64-bit segment selectors
++ * These flat segments are in the Xen-private section of every GDT. Since these
++ * are also present in the initial GDT, many OSes will be able to avoid
++ * installing their own GDT.
++ */
++
++#define FLAT_RING3_CS32 0xe023 /* GDT index 260 */
++#define FLAT_RING3_CS64 0xe033 /* GDT index 261 */
++#define FLAT_RING3_DS32 0xe02b /* GDT index 262 */
++#define FLAT_RING3_DS64 0x0000 /* NULL selector */
++#define FLAT_RING3_SS32 0xe02b /* GDT index 262 */
++#define FLAT_RING3_SS64 0xe02b /* GDT index 262 */
++
++#define FLAT_KERNEL_DS64 FLAT_RING3_DS64
++#define FLAT_KERNEL_DS32 FLAT_RING3_DS32
++#define FLAT_KERNEL_DS FLAT_KERNEL_DS64
++#define FLAT_KERNEL_CS64 FLAT_RING3_CS64
++#define FLAT_KERNEL_CS32 FLAT_RING3_CS32
++#define FLAT_KERNEL_CS FLAT_KERNEL_CS64
++#define FLAT_KERNEL_SS64 FLAT_RING3_SS64
++#define FLAT_KERNEL_SS32 FLAT_RING3_SS32
++#define FLAT_KERNEL_SS FLAT_KERNEL_SS64
++
++#define FLAT_USER_DS64 FLAT_RING3_DS64
++#define FLAT_USER_DS32 FLAT_RING3_DS32
++#define FLAT_USER_DS FLAT_USER_DS64
++#define FLAT_USER_CS64 FLAT_RING3_CS64
++#define FLAT_USER_CS32 FLAT_RING3_CS32
++#define FLAT_USER_CS FLAT_USER_CS64
++#define FLAT_USER_SS64 FLAT_RING3_SS64
++#define FLAT_USER_SS32 FLAT_RING3_SS32
++#define FLAT_USER_SS FLAT_USER_SS64
++
++#define __HYPERVISOR_VIRT_START 0xFFFF800000000000
++#define __HYPERVISOR_VIRT_END 0xFFFF880000000000
++#define __MACH2PHYS_VIRT_START 0xFFFF800000000000
++#define __MACH2PHYS_VIRT_END 0xFFFF804000000000
++
++#ifndef HYPERVISOR_VIRT_START
++#define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START)
++#define HYPERVISOR_VIRT_END mk_unsigned_long(__HYPERVISOR_VIRT_END)
++#endif
++
++#define MACH2PHYS_VIRT_START mk_unsigned_long(__MACH2PHYS_VIRT_START)
++#define MACH2PHYS_VIRT_END mk_unsigned_long(__MACH2PHYS_VIRT_END)
++#define MACH2PHYS_NR_ENTRIES ((MACH2PHYS_VIRT_END-MACH2PHYS_VIRT_START)>>3)
++#ifndef machine_to_phys_mapping
++#define machine_to_phys_mapping ((unsigned long *)HYPERVISOR_VIRT_START)
++#endif
++
++#ifndef __ASSEMBLY__
++
++/*
++ * int HYPERVISOR_set_segment_base(unsigned int which, unsigned long base)
++ * @which == SEGBASE_* ; @base == 64-bit base address
++ * Returns 0 on success.
++ */
++#define SEGBASE_FS 0
++#define SEGBASE_GS_USER 1
++#define SEGBASE_GS_KERNEL 2
++#define SEGBASE_GS_USER_SEL 3 /* Set user %gs specified in base[15:0] */
++
++/*
++ * int HYPERVISOR_iret(void)
++ * All arguments are on the kernel stack, in the following format.
++ * Never returns if successful. Current kernel context is lost.
++ * The saved CS is mapped as follows:
++ * RING0 -> RING3 kernel mode.
++ * RING1 -> RING3 kernel mode.
++ * RING2 -> RING3 kernel mode.
++ * RING3 -> RING3 user mode.
++ * However RING0 indicates that the guest kernel should return to iteself
++ * directly with
++ * orb $3,1*8(%rsp)
++ * iretq
++ * If flags contains VGCF_in_syscall:
++ * Restore RAX, RIP, RFLAGS, RSP.
++ * Discard R11, RCX, CS, SS.
++ * Otherwise:
++ * Restore RAX, R11, RCX, CS:RIP, RFLAGS, SS:RSP.
++ * All other registers are saved on hypercall entry and restored to user.
++ */
++/* Guest exited in SYSCALL context? Return to guest with SYSRET? */
++#define _VGCF_in_syscall 8
++#define VGCF_in_syscall (1<<_VGCF_in_syscall)
++#define VGCF_IN_SYSCALL VGCF_in_syscall
++struct iret_context {
++ /* Top of stack (%rsp at point of hypercall). */
++ uint64_t rax, r11, rcx, flags, rip, cs, rflags, rsp, ss;
++ /* Bottom of iret stack frame. */
++};
++
++#ifdef __GNUC__
++/* Anonymous union includes both 32- and 64-bit names (e.g., eax/rax). */
++#define __DECL_REG(name) union { \
++ uint64_t r ## name, e ## name; \
++ uint32_t _e ## name; \
++}
++#else
++/* Non-gcc sources must always use the proper 64-bit name (e.g., rax). */
++#define __DECL_REG(name) uint64_t r ## name
++#endif
++
++struct cpu_user_regs {
++ uint64_t r15;
++ uint64_t r14;
++ uint64_t r13;
++ uint64_t r12;
++ __DECL_REG(bp);
++ __DECL_REG(bx);
++ uint64_t r11;
++ uint64_t r10;
++ uint64_t r9;
++ uint64_t r8;
++ __DECL_REG(ax);
++ __DECL_REG(cx);
++ __DECL_REG(dx);
++ __DECL_REG(si);
++ __DECL_REG(di);
++ uint32_t error_code; /* private */
++ uint32_t entry_vector; /* private */
++ __DECL_REG(ip);
++ uint16_t cs, _pad0[1];
++ uint8_t saved_upcall_mask;
++ uint8_t _pad1[3];
++ __DECL_REG(flags); /* rflags.IF == !saved_upcall_mask */
++ __DECL_REG(sp);
++ uint16_t ss, _pad2[3];
++ uint16_t es, _pad3[3];
++ uint16_t ds, _pad4[3];
++ uint16_t fs, _pad5[3]; /* Non-zero => takes precedence over fs_base. */
++ uint16_t gs, _pad6[3]; /* Non-zero => takes precedence over gs_base_usr. */
++};
++typedef struct cpu_user_regs cpu_user_regs_t;
++DEFINE_XEN_GUEST_HANDLE(cpu_user_regs_t);
++
++#undef __DECL_REG
++
++#define xen_pfn_to_cr3(pfn) ((unsigned long)(pfn) << 12)
++#define xen_cr3_to_pfn(cr3) ((unsigned long)(cr3) >> 12)
++
++struct arch_vcpu_info {
++ unsigned long cr2;
++ unsigned long pad; /* sizeof(vcpu_info_t) == 64 */
++};
++typedef struct arch_vcpu_info arch_vcpu_info_t;
++
++typedef unsigned long xen_callback_t;
++
++#endif /* !__ASSEMBLY__ */
++
++#endif /* __XEN_PUBLIC_ARCH_X86_XEN_X86_64_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/xen/interface/arch-x86/xen.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,204 @@
++/******************************************************************************
++ * arch-x86/xen.h
++ *
++ * Guest OS interface to x86 Xen.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (c) 2004-2006, K A Fraser
++ */
++
++#ifndef __XEN_PUBLIC_ARCH_X86_XEN_H__
++#define __XEN_PUBLIC_ARCH_X86_XEN_H__
++
++/* Structural guest handles introduced in 0x00030201. */
++#if __XEN_INTERFACE_VERSION__ >= 0x00030201
++#define __DEFINE_XEN_GUEST_HANDLE(name, type) \
++ typedef struct { type *p; } __guest_handle_ ## name
++#else
++#define __DEFINE_XEN_GUEST_HANDLE(name, type) \
++ typedef type * __guest_handle_ ## name
++#endif
++
++#define DEFINE_XEN_GUEST_HANDLE(name) __DEFINE_XEN_GUEST_HANDLE(name, name)
++#define XEN_GUEST_HANDLE(name) __guest_handle_ ## name
++#define set_xen_guest_handle(hnd, val) do { (hnd).p = val; } while (0)
++#ifdef __XEN_TOOLS__
++#define get_xen_guest_handle(val, hnd) do { val = (hnd).p; } while (0)
++#endif
++
++#if defined(__i386__)
++#include "xen-x86_32.h"
++#elif defined(__x86_64__)
++#include "xen-x86_64.h"
++#endif
++
++#ifndef __ASSEMBLY__
++/* Guest handles for primitive C types. */
++__DEFINE_XEN_GUEST_HANDLE(uchar, unsigned char);
++__DEFINE_XEN_GUEST_HANDLE(uint, unsigned int);
++__DEFINE_XEN_GUEST_HANDLE(ulong, unsigned long);
++DEFINE_XEN_GUEST_HANDLE(char);
++DEFINE_XEN_GUEST_HANDLE(int);
++DEFINE_XEN_GUEST_HANDLE(long);
++DEFINE_XEN_GUEST_HANDLE(void);
++
++typedef unsigned long xen_pfn_t;
++DEFINE_XEN_GUEST_HANDLE(xen_pfn_t);
++#define PRI_xen_pfn "lx"
++#endif
++
++/*
++ * SEGMENT DESCRIPTOR TABLES
++ */
++/*
++ * A number of GDT entries are reserved by Xen. These are not situated at the
++ * start of the GDT because some stupid OSes export hard-coded selector values
++ * in their ABI. These hard-coded values are always near the start of the GDT,
++ * so Xen places itself out of the way, at the far end of the GDT.
++ */
++#define FIRST_RESERVED_GDT_PAGE 14
++#define FIRST_RESERVED_GDT_BYTE (FIRST_RESERVED_GDT_PAGE * 4096)
++#define FIRST_RESERVED_GDT_ENTRY (FIRST_RESERVED_GDT_BYTE / 8)
++
++/* Maximum number of virtual CPUs in multi-processor guests. */
++#define MAX_VIRT_CPUS 32
++
++#ifndef __ASSEMBLY__
++
++typedef unsigned long xen_ulong_t;
++
++/*
++ * Send an array of these to HYPERVISOR_set_trap_table().
++ * The privilege level specifies which modes may enter a trap via a software
++ * interrupt. On x86/64, since rings 1 and 2 are unavailable, we allocate
++ * privilege levels as follows:
++ * Level == 0: Noone may enter
++ * Level == 1: Kernel may enter
++ * Level == 2: Kernel may enter
++ * Level == 3: Everyone may enter
++ */
++#define TI_GET_DPL(_ti) ((_ti)->flags & 3)
++#define TI_GET_IF(_ti) ((_ti)->flags & 4)
++#define TI_SET_DPL(_ti,_dpl) ((_ti)->flags |= (_dpl))
++#define TI_SET_IF(_ti,_if) ((_ti)->flags |= ((!!(_if))<<2))
++struct trap_info {
++ uint8_t vector; /* exception vector */
++ uint8_t flags; /* 0-3: privilege level; 4: clear event enable? */
++ uint16_t cs; /* code selector */
++ unsigned long address; /* code offset */
++};
++typedef struct trap_info trap_info_t;
++DEFINE_XEN_GUEST_HANDLE(trap_info_t);
++
++typedef uint64_t tsc_timestamp_t; /* RDTSC timestamp */
++
++/*
++ * The following is all CPU context. Note that the fpu_ctxt block is filled
++ * in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used.
++ */
++struct vcpu_guest_context {
++ /* FPU registers come first so they can be aligned for FXSAVE/FXRSTOR. */
++ struct { char x[512]; } fpu_ctxt; /* User-level FPU registers */
++#define VGCF_I387_VALID (1<<0)
++#define VGCF_IN_KERNEL (1<<2)
++#define _VGCF_i387_valid 0
++#define VGCF_i387_valid (1<<_VGCF_i387_valid)
++#define _VGCF_in_kernel 2
++#define VGCF_in_kernel (1<<_VGCF_in_kernel)
++#define _VGCF_failsafe_disables_events 3
++#define VGCF_failsafe_disables_events (1<<_VGCF_failsafe_disables_events)
++#define _VGCF_syscall_disables_events 4
++#define VGCF_syscall_disables_events (1<<_VGCF_syscall_disables_events)
++#define _VGCF_online 5
++#define VGCF_online (1<<_VGCF_online)
++ unsigned long flags; /* VGCF_* flags */
++ struct cpu_user_regs user_regs; /* User-level CPU registers */
++ struct trap_info trap_ctxt[256]; /* Virtual IDT */
++ unsigned long ldt_base, ldt_ents; /* LDT (linear address, # ents) */
++ unsigned long gdt_frames[16], gdt_ents; /* GDT (machine frames, # ents) */
++ unsigned long kernel_ss, kernel_sp; /* Virtual TSS (only SS1/SP1) */
++ /* NB. User pagetable on x86/64 is placed in ctrlreg[1]. */
++ unsigned long ctrlreg[8]; /* CR0-CR7 (control registers) */
++ unsigned long debugreg[8]; /* DB0-DB7 (debug registers) */
++#ifdef __i386__
++ unsigned long event_callback_cs; /* CS:EIP of event callback */
++ unsigned long event_callback_eip;
++ unsigned long failsafe_callback_cs; /* CS:EIP of failsafe callback */
++ unsigned long failsafe_callback_eip;
++#else
++ unsigned long event_callback_eip;
++ unsigned long failsafe_callback_eip;
++#ifdef __XEN__
++ union {
++ unsigned long syscall_callback_eip;
++ struct {
++ unsigned int event_callback_cs; /* compat CS of event cb */
++ unsigned int failsafe_callback_cs; /* compat CS of failsafe cb */
++ };
++ };
++#else
++ unsigned long syscall_callback_eip;
++#endif
++#endif
++ unsigned long vm_assist; /* VMASST_TYPE_* bitmap */
++#ifdef __x86_64__
++ /* Segment base addresses. */
++ uint64_t fs_base;
++ uint64_t gs_base_kernel;
++ uint64_t gs_base_user;
++#endif
++};
++typedef struct vcpu_guest_context vcpu_guest_context_t;
++DEFINE_XEN_GUEST_HANDLE(vcpu_guest_context_t);
++
++struct arch_shared_info {
++ unsigned long max_pfn; /* max pfn that appears in table */
++ /* Frame containing list of mfns containing list of mfns containing p2m. */
++ xen_pfn_t pfn_to_mfn_frame_list_list;
++ unsigned long nmi_reason;
++ uint64_t pad[32];
++};
++typedef struct arch_shared_info arch_shared_info_t;
++
++#endif /* !__ASSEMBLY__ */
++
++/*
++ * Prefix forces emulation of some non-trapping instructions.
++ * Currently only CPUID.
++ */
++#ifdef __ASSEMBLY__
++#define XEN_EMULATE_PREFIX .byte 0x0f,0x0b,0x78,0x65,0x6e ;
++#define XEN_CPUID XEN_EMULATE_PREFIX cpuid
++#else
++#define XEN_EMULATE_PREFIX ".byte 0x0f,0x0b,0x78,0x65,0x6e ; "
++#define XEN_CPUID XEN_EMULATE_PREFIX "cpuid"
++#endif
++
++#endif /* __XEN_PUBLIC_ARCH_X86_XEN_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/xen/interface/arch-x86_32.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,27 @@
++/******************************************************************************
++ * arch-x86_32.h
++ *
++ * Guest OS interface to x86 32-bit Xen.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (c) 2004-2006, K A Fraser
++ */
++
++#include "arch-x86/xen.h"
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/xen/interface/arch-x86_64.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,27 @@
++/******************************************************************************
++ * arch-x86_64.h
++ *
++ * Guest OS interface to x86 64-bit Xen.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (c) 2004-2006, K A Fraser
++ */
++
++#include "arch-x86/xen.h"
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/xen/interface/callback.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,92 @@
++/******************************************************************************
++ * callback.h
++ *
++ * Register guest OS callbacks with Xen.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (c) 2006, Ian Campbell
++ */
++
++#ifndef __XEN_PUBLIC_CALLBACK_H__
++#define __XEN_PUBLIC_CALLBACK_H__
++
++#include "xen.h"
++
++/*
++ * Prototype for this hypercall is:
++ * long callback_op(int cmd, void *extra_args)
++ * @cmd == CALLBACKOP_??? (callback operation).
++ * @extra_args == Operation-specific extra arguments (NULL if none).
++ */
++
++#define CALLBACKTYPE_event 0
++#define CALLBACKTYPE_failsafe 1
++#define CALLBACKTYPE_syscall 2 /* x86_64 only */
++/*
++ * sysenter is only available on x86_32 with the
++ * supervisor_mode_kernel option enabled.
++ */
++#define CALLBACKTYPE_sysenter 3
++#define CALLBACKTYPE_nmi 4
++
++/*
++ * Disable event deliver during callback? This flag is ignored for event and
++ * NMI callbacks: event delivery is unconditionally disabled.
++ */
++#define _CALLBACKF_mask_events 0
++#define CALLBACKF_mask_events (1U << _CALLBACKF_mask_events)
++
++/*
++ * Register a callback.
++ */
++#define CALLBACKOP_register 0
++struct callback_register {
++ uint16_t type;
++ uint16_t flags;
++ xen_callback_t address;
++};
++typedef struct callback_register callback_register_t;
++DEFINE_XEN_GUEST_HANDLE(callback_register_t);
++
++/*
++ * Unregister a callback.
++ *
++ * Not all callbacks can be unregistered. -EINVAL will be returned if
++ * you attempt to unregister such a callback.
++ */
++#define CALLBACKOP_unregister 1
++struct callback_unregister {
++ uint16_t type;
++ uint16_t _unused;
++};
++typedef struct callback_unregister callback_unregister_t;
++DEFINE_XEN_GUEST_HANDLE(callback_unregister_t);
++
++#endif /* __XEN_PUBLIC_CALLBACK_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/xen/interface/domctl.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,478 @@
++/******************************************************************************
++ * domctl.h
++ *
++ * Domain management operations. For use by node control stack.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (c) 2002-2003, B Dragovic
++ * Copyright (c) 2002-2006, K Fraser
++ */
++
++#ifndef __XEN_PUBLIC_DOMCTL_H__
++#define __XEN_PUBLIC_DOMCTL_H__
++
++#if !defined(__XEN__) && !defined(__XEN_TOOLS__)
++#error "domctl operations are intended for use by node control tools only"
++#endif
++
++#include "xen.h"
++
++#define XEN_DOMCTL_INTERFACE_VERSION 0x00000005
++
++struct xenctl_cpumap {
++ XEN_GUEST_HANDLE_64(uint8_t) bitmap;
++ uint32_t nr_cpus;
++};
++
++/*
++ * NB. xen_domctl.domain is an IN/OUT parameter for this operation.
++ * If it is specified as zero, an id is auto-allocated and returned.
++ */
++#define XEN_DOMCTL_createdomain 1
++struct xen_domctl_createdomain {
++ /* IN parameters */
++ uint32_t ssidref;
++ xen_domain_handle_t handle;
++ /* Is this an HVM guest (as opposed to a PV guest)? */
++#define _XEN_DOMCTL_CDF_hvm_guest 0
++#define XEN_DOMCTL_CDF_hvm_guest (1U<<_XEN_DOMCTL_CDF_hvm_guest)
++ uint32_t flags;
++};
++typedef struct xen_domctl_createdomain xen_domctl_createdomain_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_createdomain_t);
++
++#define XEN_DOMCTL_destroydomain 2
++#define XEN_DOMCTL_pausedomain 3
++#define XEN_DOMCTL_unpausedomain 4
++#define XEN_DOMCTL_resumedomain 27
++
++#define XEN_DOMCTL_getdomaininfo 5
++struct xen_domctl_getdomaininfo {
++ /* OUT variables. */
++ domid_t domain; /* Also echoed in domctl.domain */
++ /* Domain is scheduled to die. */
++#define _XEN_DOMINF_dying 0
++#define XEN_DOMINF_dying (1U<<_XEN_DOMINF_dying)
++ /* Domain is an HVM guest (as opposed to a PV guest). */
++#define _XEN_DOMINF_hvm_guest 1
++#define XEN_DOMINF_hvm_guest (1U<<_XEN_DOMINF_hvm_guest)
++ /* The guest OS has shut down. */
++#define _XEN_DOMINF_shutdown 2
++#define XEN_DOMINF_shutdown (1U<<_XEN_DOMINF_shutdown)
++ /* Currently paused by control software. */
++#define _XEN_DOMINF_paused 3
++#define XEN_DOMINF_paused (1U<<_XEN_DOMINF_paused)
++ /* Currently blocked pending an event. */
++#define _XEN_DOMINF_blocked 4
++#define XEN_DOMINF_blocked (1U<<_XEN_DOMINF_blocked)
++ /* Domain is currently running. */
++#define _XEN_DOMINF_running 5
++#define XEN_DOMINF_running (1U<<_XEN_DOMINF_running)
++ /* CPU to which this domain is bound. */
++#define XEN_DOMINF_cpumask 255
++#define XEN_DOMINF_cpushift 8
++ /* XEN_DOMINF_shutdown guest-supplied code. */
++#define XEN_DOMINF_shutdownmask 255
++#define XEN_DOMINF_shutdownshift 16
++ uint32_t flags; /* XEN_DOMINF_* */
++ uint64_aligned_t tot_pages;
++ uint64_aligned_t max_pages;
++ uint64_aligned_t shared_info_frame; /* GMFN of shared_info struct */
++ uint64_aligned_t cpu_time;
++ uint32_t nr_online_vcpus; /* Number of VCPUs currently online. */
++ uint32_t max_vcpu_id; /* Maximum VCPUID in use by this domain. */
++ uint32_t ssidref;
++ xen_domain_handle_t handle;
++};
++typedef struct xen_domctl_getdomaininfo xen_domctl_getdomaininfo_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_getdomaininfo_t);
++
++
++#define XEN_DOMCTL_getmemlist 6
++struct xen_domctl_getmemlist {
++ /* IN variables. */
++ /* Max entries to write to output buffer. */
++ uint64_aligned_t max_pfns;
++ /* Start index in guest's page list. */
++ uint64_aligned_t start_pfn;
++ XEN_GUEST_HANDLE_64(uint64_t) buffer;
++ /* OUT variables. */
++ uint64_aligned_t num_pfns;
++};
++typedef struct xen_domctl_getmemlist xen_domctl_getmemlist_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_getmemlist_t);
++
++
++#define XEN_DOMCTL_getpageframeinfo 7
++
++#define XEN_DOMCTL_PFINFO_LTAB_SHIFT 28
++#define XEN_DOMCTL_PFINFO_NOTAB (0x0U<<28)
++#define XEN_DOMCTL_PFINFO_L1TAB (0x1U<<28)
++#define XEN_DOMCTL_PFINFO_L2TAB (0x2U<<28)
++#define XEN_DOMCTL_PFINFO_L3TAB (0x3U<<28)
++#define XEN_DOMCTL_PFINFO_L4TAB (0x4U<<28)
++#define XEN_DOMCTL_PFINFO_LTABTYPE_MASK (0x7U<<28)
++#define XEN_DOMCTL_PFINFO_LPINTAB (0x1U<<31)
++#define XEN_DOMCTL_PFINFO_XTAB (0xfU<<28) /* invalid page */
++#define XEN_DOMCTL_PFINFO_LTAB_MASK (0xfU<<28)
++
++struct xen_domctl_getpageframeinfo {
++ /* IN variables. */
++ uint64_aligned_t gmfn; /* GMFN to query */
++ /* OUT variables. */
++ /* Is the page PINNED to a type? */
++ uint32_t type; /* see above type defs */
++};
++typedef struct xen_domctl_getpageframeinfo xen_domctl_getpageframeinfo_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_getpageframeinfo_t);
++
++
++#define XEN_DOMCTL_getpageframeinfo2 8
++struct xen_domctl_getpageframeinfo2 {
++ /* IN variables. */
++ uint64_aligned_t num;
++ /* IN/OUT variables. */
++ XEN_GUEST_HANDLE_64(uint32_t) array;
++};
++typedef struct xen_domctl_getpageframeinfo2 xen_domctl_getpageframeinfo2_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_getpageframeinfo2_t);
++
++
++/*
++ * Control shadow pagetables operation
++ */
++#define XEN_DOMCTL_shadow_op 10
++
++/* Disable shadow mode. */
++#define XEN_DOMCTL_SHADOW_OP_OFF 0
++
++/* Enable shadow mode (mode contains ORed XEN_DOMCTL_SHADOW_ENABLE_* flags). */
++#define XEN_DOMCTL_SHADOW_OP_ENABLE 32
++
++/* Log-dirty bitmap operations. */
++ /* Return the bitmap and clean internal copy for next round. */
++#define XEN_DOMCTL_SHADOW_OP_CLEAN 11
++ /* Return the bitmap but do not modify internal copy. */
++#define XEN_DOMCTL_SHADOW_OP_PEEK 12
++
++/* Memory allocation accessors. */
++#define XEN_DOMCTL_SHADOW_OP_GET_ALLOCATION 30
++#define XEN_DOMCTL_SHADOW_OP_SET_ALLOCATION 31
++
++/* Legacy enable operations. */
++ /* Equiv. to ENABLE with no mode flags. */
++#define XEN_DOMCTL_SHADOW_OP_ENABLE_TEST 1
++ /* Equiv. to ENABLE with mode flag ENABLE_LOG_DIRTY. */
++#define XEN_DOMCTL_SHADOW_OP_ENABLE_LOGDIRTY 2
++ /* Equiv. to ENABLE with mode flags ENABLE_REFCOUNT and ENABLE_TRANSLATE. */
++#define XEN_DOMCTL_SHADOW_OP_ENABLE_TRANSLATE 3
++
++/* Mode flags for XEN_DOMCTL_SHADOW_OP_ENABLE. */
++ /*
++ * Shadow pagetables are refcounted: guest does not use explicit mmu
++ * operations nor write-protect its pagetables.
++ */
++#define XEN_DOMCTL_SHADOW_ENABLE_REFCOUNT (1 << 1)
++ /*
++ * Log pages in a bitmap as they are dirtied.
++ * Used for live relocation to determine which pages must be re-sent.
++ */
++#define XEN_DOMCTL_SHADOW_ENABLE_LOG_DIRTY (1 << 2)
++ /*
++ * Automatically translate GPFNs into MFNs.
++ */
++#define XEN_DOMCTL_SHADOW_ENABLE_TRANSLATE (1 << 3)
++ /*
++ * Xen does not steal virtual address space from the guest.
++ * Requires HVM support.
++ */
++#define XEN_DOMCTL_SHADOW_ENABLE_EXTERNAL (1 << 4)
++
++struct xen_domctl_shadow_op_stats {
++ uint32_t fault_count;
++ uint32_t dirty_count;
++};
++typedef struct xen_domctl_shadow_op_stats xen_domctl_shadow_op_stats_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_shadow_op_stats_t);
++
++struct xen_domctl_shadow_op {
++ /* IN variables. */
++ uint32_t op; /* XEN_DOMCTL_SHADOW_OP_* */
++
++ /* OP_ENABLE */
++ uint32_t mode; /* XEN_DOMCTL_SHADOW_ENABLE_* */
++
++ /* OP_GET_ALLOCATION / OP_SET_ALLOCATION */
++ uint32_t mb; /* Shadow memory allocation in MB */
++
++ /* OP_PEEK / OP_CLEAN */
++ XEN_GUEST_HANDLE_64(uint8_t) dirty_bitmap;
++ uint64_aligned_t pages; /* Size of buffer. Updated with actual size. */
++ struct xen_domctl_shadow_op_stats stats;
++};
++typedef struct xen_domctl_shadow_op xen_domctl_shadow_op_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_shadow_op_t);
++
++
++#define XEN_DOMCTL_max_mem 11
++struct xen_domctl_max_mem {
++ /* IN variables. */
++ uint64_aligned_t max_memkb;
++};
++typedef struct xen_domctl_max_mem xen_domctl_max_mem_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_max_mem_t);
++
++
++#define XEN_DOMCTL_setvcpucontext 12
++#define XEN_DOMCTL_getvcpucontext 13
++struct xen_domctl_vcpucontext {
++ uint32_t vcpu; /* IN */
++ XEN_GUEST_HANDLE_64(vcpu_guest_context_t) ctxt; /* IN/OUT */
++};
++typedef struct xen_domctl_vcpucontext xen_domctl_vcpucontext_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_vcpucontext_t);
++
++
++#define XEN_DOMCTL_getvcpuinfo 14
++struct xen_domctl_getvcpuinfo {
++ /* IN variables. */
++ uint32_t vcpu;
++ /* OUT variables. */
++ uint8_t online; /* currently online (not hotplugged)? */
++ uint8_t blocked; /* blocked waiting for an event? */
++ uint8_t running; /* currently scheduled on its CPU? */
++ uint64_aligned_t cpu_time; /* total cpu time consumed (ns) */
++ uint32_t cpu; /* current mapping */
++};
++typedef struct xen_domctl_getvcpuinfo xen_domctl_getvcpuinfo_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_getvcpuinfo_t);
++
++
++/* Get/set which physical cpus a vcpu can execute on. */
++#define XEN_DOMCTL_setvcpuaffinity 9
++#define XEN_DOMCTL_getvcpuaffinity 25
++struct xen_domctl_vcpuaffinity {
++ uint32_t vcpu; /* IN */
++ struct xenctl_cpumap cpumap; /* IN/OUT */
++};
++typedef struct xen_domctl_vcpuaffinity xen_domctl_vcpuaffinity_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_vcpuaffinity_t);
++
++
++#define XEN_DOMCTL_max_vcpus 15
++struct xen_domctl_max_vcpus {
++ uint32_t max; /* maximum number of vcpus */
++};
++typedef struct xen_domctl_max_vcpus xen_domctl_max_vcpus_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_max_vcpus_t);
++
++
++#define XEN_DOMCTL_scheduler_op 16
++/* Scheduler types. */
++#define XEN_SCHEDULER_SEDF 4
++#define XEN_SCHEDULER_CREDIT 5
++/* Set or get info? */
++#define XEN_DOMCTL_SCHEDOP_putinfo 0
++#define XEN_DOMCTL_SCHEDOP_getinfo 1
++struct xen_domctl_scheduler_op {
++ uint32_t sched_id; /* XEN_SCHEDULER_* */
++ uint32_t cmd; /* XEN_DOMCTL_SCHEDOP_* */
++ union {
++ struct xen_domctl_sched_sedf {
++ uint64_aligned_t period;
++ uint64_aligned_t slice;
++ uint64_aligned_t latency;
++ uint32_t extratime;
++ uint32_t weight;
++ } sedf;
++ struct xen_domctl_sched_credit {
++ uint16_t weight;
++ uint16_t cap;
++ } credit;
++ } u;
++};
++typedef struct xen_domctl_scheduler_op xen_domctl_scheduler_op_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_scheduler_op_t);
++
++
++#define XEN_DOMCTL_setdomainhandle 17
++struct xen_domctl_setdomainhandle {
++ xen_domain_handle_t handle;
++};
++typedef struct xen_domctl_setdomainhandle xen_domctl_setdomainhandle_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_setdomainhandle_t);
++
++
++#define XEN_DOMCTL_setdebugging 18
++struct xen_domctl_setdebugging {
++ uint8_t enable;
++};
++typedef struct xen_domctl_setdebugging xen_domctl_setdebugging_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_setdebugging_t);
++
++
++#define XEN_DOMCTL_irq_permission 19
++struct xen_domctl_irq_permission {
++ uint8_t pirq;
++ uint8_t allow_access; /* flag to specify enable/disable of IRQ access */
++};
++typedef struct xen_domctl_irq_permission xen_domctl_irq_permission_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_irq_permission_t);
++
++
++#define XEN_DOMCTL_iomem_permission 20
++struct xen_domctl_iomem_permission {
++ uint64_aligned_t first_mfn;/* first page (physical page number) in range */
++ uint64_aligned_t nr_mfns; /* number of pages in range (>0) */
++ uint8_t allow_access; /* allow (!0) or deny (0) access to range? */
++};
++typedef struct xen_domctl_iomem_permission xen_domctl_iomem_permission_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_iomem_permission_t);
++
++
++#define XEN_DOMCTL_ioport_permission 21
++struct xen_domctl_ioport_permission {
++ uint32_t first_port; /* first port int range */
++ uint32_t nr_ports; /* size of port range */
++ uint8_t allow_access; /* allow or deny access to range? */
++};
++typedef struct xen_domctl_ioport_permission xen_domctl_ioport_permission_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_ioport_permission_t);
++
++
++#define XEN_DOMCTL_hypercall_init 22
++struct xen_domctl_hypercall_init {
++ uint64_aligned_t gmfn; /* GMFN to be initialised */
++};
++typedef struct xen_domctl_hypercall_init xen_domctl_hypercall_init_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_hypercall_init_t);
++
++
++#define XEN_DOMCTL_arch_setup 23
++#define _XEN_DOMAINSETUP_hvm_guest 0
++#define XEN_DOMAINSETUP_hvm_guest (1UL<<_XEN_DOMAINSETUP_hvm_guest)
++#define _XEN_DOMAINSETUP_query 1 /* Get parameters (for save) */
++#define XEN_DOMAINSETUP_query (1UL<<_XEN_DOMAINSETUP_query)
++typedef struct xen_domctl_arch_setup {
++ uint64_aligned_t flags; /* XEN_DOMAINSETUP_* */
++#ifdef __ia64__
++ uint64_aligned_t bp; /* mpaddr of boot param area */
++ uint64_aligned_t maxmem; /* Highest memory address for MDT. */
++ uint64_aligned_t xsi_va; /* Xen shared_info area virtual address. */
++ uint32_t hypercall_imm; /* Break imm for Xen hypercalls. */
++#endif
++} xen_domctl_arch_setup_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_arch_setup_t);
++
++
++#define XEN_DOMCTL_settimeoffset 24
++struct xen_domctl_settimeoffset {
++ int32_t time_offset_seconds; /* applied to domain wallclock time */
++};
++typedef struct xen_domctl_settimeoffset xen_domctl_settimeoffset_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_settimeoffset_t);
++
++
++#define XEN_DOMCTL_gethvmcontext 33
++#define XEN_DOMCTL_sethvmcontext 34
++typedef struct xen_domctl_hvmcontext {
++ uint32_t size; /* IN/OUT: size of buffer / bytes filled */
++ XEN_GUEST_HANDLE_64(uint8_t) buffer; /* IN/OUT: data, or call
++ * gethvmcontext with NULL
++ * buffer to get size
++ * req'd */
++} xen_domctl_hvmcontext_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_hvmcontext_t);
++
++
++#define XEN_DOMCTL_set_address_size 35
++#define XEN_DOMCTL_get_address_size 36
++typedef struct xen_domctl_address_size {
++ uint32_t size;
++} xen_domctl_address_size_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_address_size_t);
++
++
++#define XEN_DOMCTL_real_mode_area 26
++struct xen_domctl_real_mode_area {
++ uint32_t log; /* log2 of Real Mode Area size */
++};
++typedef struct xen_domctl_real_mode_area xen_domctl_real_mode_area_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_real_mode_area_t);
++
++
++#define XEN_DOMCTL_sendtrigger 28
++#define XEN_DOMCTL_SENDTRIGGER_NMI 0
++#define XEN_DOMCTL_SENDTRIGGER_RESET 1
++#define XEN_DOMCTL_SENDTRIGGER_INIT 2
++struct xen_domctl_sendtrigger {
++ uint32_t trigger; /* IN */
++ uint32_t vcpu; /* IN */
++};
++typedef struct xen_domctl_sendtrigger xen_domctl_sendtrigger_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_sendtrigger_t);
++
++
++struct xen_domctl {
++ uint32_t cmd;
++ uint32_t interface_version; /* XEN_DOMCTL_INTERFACE_VERSION */
++ domid_t domain;
++ union {
++ struct xen_domctl_createdomain createdomain;
++ struct xen_domctl_getdomaininfo getdomaininfo;
++ struct xen_domctl_getmemlist getmemlist;
++ struct xen_domctl_getpageframeinfo getpageframeinfo;
++ struct xen_domctl_getpageframeinfo2 getpageframeinfo2;
++ struct xen_domctl_vcpuaffinity vcpuaffinity;
++ struct xen_domctl_shadow_op shadow_op;
++ struct xen_domctl_max_mem max_mem;
++ struct xen_domctl_vcpucontext vcpucontext;
++ struct xen_domctl_getvcpuinfo getvcpuinfo;
++ struct xen_domctl_max_vcpus max_vcpus;
++ struct xen_domctl_scheduler_op scheduler_op;
++ struct xen_domctl_setdomainhandle setdomainhandle;
++ struct xen_domctl_setdebugging setdebugging;
++ struct xen_domctl_irq_permission irq_permission;
++ struct xen_domctl_iomem_permission iomem_permission;
++ struct xen_domctl_ioport_permission ioport_permission;
++ struct xen_domctl_hypercall_init hypercall_init;
++ struct xen_domctl_arch_setup arch_setup;
++ struct xen_domctl_settimeoffset settimeoffset;
++ struct xen_domctl_real_mode_area real_mode_area;
++ struct xen_domctl_hvmcontext hvmcontext;
++ struct xen_domctl_address_size address_size;
++ struct xen_domctl_sendtrigger sendtrigger;
++ uint8_t pad[128];
++ } u;
++};
++typedef struct xen_domctl xen_domctl_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_t);
++
++#endif /* __XEN_PUBLIC_DOMCTL_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/xen/interface/elfnote.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,233 @@
++/******************************************************************************
++ * elfnote.h
++ *
++ * Definitions used for the Xen ELF notes.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (c) 2006, Ian Campbell, XenSource Ltd.
++ */
++
++#ifndef __XEN_PUBLIC_ELFNOTE_H__
++#define __XEN_PUBLIC_ELFNOTE_H__
++
++/*
++ * The notes should live in a PT_NOTE segment and have "Xen" in the
++ * name field.
++ *
++ * Numeric types are either 4 or 8 bytes depending on the content of
++ * the desc field.
++ *
++ * LEGACY indicated the fields in the legacy __xen_guest string which
++ * this a note type replaces.
++ */
++
++/*
++ * NAME=VALUE pair (string).
++ */
++#define XEN_ELFNOTE_INFO 0
++
++/*
++ * The virtual address of the entry point (numeric).
++ *
++ * LEGACY: VIRT_ENTRY
++ */
++#define XEN_ELFNOTE_ENTRY 1
++
++/* The virtual address of the hypercall transfer page (numeric).
++ *
++ * LEGACY: HYPERCALL_PAGE. (n.b. legacy value is a physical page
++ * number not a virtual address)
++ */
++#define XEN_ELFNOTE_HYPERCALL_PAGE 2
++
++/* The virtual address where the kernel image should be mapped (numeric).
++ *
++ * Defaults to 0.
++ *
++ * LEGACY: VIRT_BASE
++ */
++#define XEN_ELFNOTE_VIRT_BASE 3
++
++/*
++ * The offset of the ELF paddr field from the acutal required
++ * psuedo-physical address (numeric).
++ *
++ * This is used to maintain backwards compatibility with older kernels
++ * which wrote __PAGE_OFFSET into that field. This field defaults to 0
++ * if not present.
++ *
++ * LEGACY: ELF_PADDR_OFFSET. (n.b. legacy default is VIRT_BASE)
++ */
++#define XEN_ELFNOTE_PADDR_OFFSET 4
++
++/*
++ * The version of Xen that we work with (string).
++ *
++ * LEGACY: XEN_VER
++ */
++#define XEN_ELFNOTE_XEN_VERSION 5
++
++/*
++ * The name of the guest operating system (string).
++ *
++ * LEGACY: GUEST_OS
++ */
++#define XEN_ELFNOTE_GUEST_OS 6
++
++/*
++ * The version of the guest operating system (string).
++ *
++ * LEGACY: GUEST_VER
++ */
++#define XEN_ELFNOTE_GUEST_VERSION 7
++
++/*
++ * The loader type (string).
++ *
++ * LEGACY: LOADER
++ */
++#define XEN_ELFNOTE_LOADER 8
++
++/*
++ * The kernel supports PAE (x86/32 only, string = "yes", "no" or
++ * "bimodal").
++ *
++ * For compatibility with Xen 3.0.3 and earlier the "bimodal" setting
++ * may be given as "yes,bimodal" which will cause older Xen to treat
++ * this kernel as PAE.
++ *
++ * LEGACY: PAE (n.b. The legacy interface included a provision to
++ * indicate 'extended-cr3' support allowing L3 page tables to be
++ * placed above 4G. It is assumed that any kernel new enough to use
++ * these ELF notes will include this and therefore "yes" here is
++ * equivalent to "yes[entended-cr3]" in the __xen_guest interface.
++ */
++#define XEN_ELFNOTE_PAE_MODE 9
++
++/*
++ * The features supported/required by this kernel (string).
++ *
++ * The string must consist of a list of feature names (as given in
++ * features.h, without the "XENFEAT_" prefix) separated by '|'
++ * characters. If a feature is required for the kernel to function
++ * then the feature name must be preceded by a '!' character.
++ *
++ * LEGACY: FEATURES
++ */
++#define XEN_ELFNOTE_FEATURES 10
++
++/*
++ * The kernel requires the symbol table to be loaded (string = "yes" or "no")
++ * LEGACY: BSD_SYMTAB (n.b. The legacy treated the presence or absence
++ * of this string as a boolean flag rather than requiring "yes" or
++ * "no".
++ */
++#define XEN_ELFNOTE_BSD_SYMTAB 11
++
++/*
++ * The lowest address the hypervisor hole can begin at (numeric).
++ *
++ * This must not be set higher than HYPERVISOR_VIRT_START. Its presence
++ * also indicates to the hypervisor that the kernel can deal with the
++ * hole starting at a higher address.
++ */
++#define XEN_ELFNOTE_HV_START_LOW 12
++
++/*
++ * List of maddr_t-sized mask/value pairs describing how to recognize
++ * (non-present) L1 page table entries carrying valid MFNs (numeric).
++ */
++#define XEN_ELFNOTE_L1_MFN_VALID 13
++
++/*
++ * Whether or not the guest supports cooperative suspend cancellation.
++ */
++#define XEN_ELFNOTE_SUSPEND_CANCEL 14
++
++/*
++ * The number of the highest elfnote defined.
++ */
++#define XEN_ELFNOTE_MAX XEN_ELFNOTE_SUSPEND_CANCEL
++
++/*
++ * System information exported through crash notes.
++ *
++ * The kexec / kdump code will create one XEN_ELFNOTE_CRASH_INFO
++ * note in case of a system crash. This note will contain various
++ * information about the system, see xen/include/xen/elfcore.h.
++ */
++#define XEN_ELFNOTE_CRASH_INFO 0x1000001
++
++/*
++ * System registers exported through crash notes.
++ *
++ * The kexec / kdump code will create one XEN_ELFNOTE_CRASH_REGS
++ * note per cpu in case of a system crash. This note is architecture
++ * specific and will contain registers not saved in the "CORE" note.
++ * See xen/include/xen/elfcore.h for more information.
++ */
++#define XEN_ELFNOTE_CRASH_REGS 0x1000002
++
++
++/*
++ * xen dump-core none note.
++ * xm dump-core code will create one XEN_ELFNOTE_DUMPCORE_NONE
++ * in its dump file to indicate that the file is xen dump-core
++ * file. This note doesn't have any other information.
++ * See tools/libxc/xc_core.h for more information.
++ */
++#define XEN_ELFNOTE_DUMPCORE_NONE 0x2000000
++
++/*
++ * xen dump-core header note.
++ * xm dump-core code will create one XEN_ELFNOTE_DUMPCORE_HEADER
++ * in its dump file.
++ * See tools/libxc/xc_core.h for more information.
++ */
++#define XEN_ELFNOTE_DUMPCORE_HEADER 0x2000001
++
++/*
++ * xen dump-core xen version note.
++ * xm dump-core code will create one XEN_ELFNOTE_DUMPCORE_XEN_VERSION
++ * in its dump file. It contains the xen version obtained via the
++ * XENVER hypercall.
++ * See tools/libxc/xc_core.h for more information.
++ */
++#define XEN_ELFNOTE_DUMPCORE_XEN_VERSION 0x2000002
++
++/*
++ * xen dump-core format version note.
++ * xm dump-core code will create one XEN_ELFNOTE_DUMPCORE_FORMAT_VERSION
++ * in its dump file. It contains a format version identifier.
++ * See tools/libxc/xc_core.h for more information.
++ */
++#define XEN_ELFNOTE_DUMPCORE_FORMAT_VERSION 0x2000003
++
++#endif /* __XEN_PUBLIC_ELFNOTE_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/xen/interface/elfstructs.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,527 @@
++#ifndef __XEN_PUBLIC_ELFSTRUCTS_H__
++#define __XEN_PUBLIC_ELFSTRUCTS_H__ 1
++/*
++ * Copyright (c) 1995, 1996 Erik Theisen. All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ * 1. Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * 2. Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * 3. The name of the author may not be used to endorse or promote products
++ * derived from this software without specific prior written permission
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
++ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
++ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
++ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
++ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
++ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++typedef uint8_t Elf_Byte;
++
++typedef uint32_t Elf32_Addr; /* Unsigned program address */
++typedef uint32_t Elf32_Off; /* Unsigned file offset */
++typedef int32_t Elf32_Sword; /* Signed large integer */
++typedef uint32_t Elf32_Word; /* Unsigned large integer */
++typedef uint16_t Elf32_Half; /* Unsigned medium integer */
++
++typedef uint64_t Elf64_Addr;
++typedef uint64_t Elf64_Off;
++typedef int32_t Elf64_Shalf;
++
++typedef int32_t Elf64_Sword;
++typedef uint32_t Elf64_Word;
++
++typedef int64_t Elf64_Sxword;
++typedef uint64_t Elf64_Xword;
++
++typedef uint32_t Elf64_Half;
++typedef uint16_t Elf64_Quarter;
++
++/*
++ * e_ident[] identification indexes
++ * See http://www.caldera.com/developers/gabi/2000-07-17/ch4.eheader.html
++ */
++#define EI_MAG0 0 /* file ID */
++#define EI_MAG1 1 /* file ID */
++#define EI_MAG2 2 /* file ID */
++#define EI_MAG3 3 /* file ID */
++#define EI_CLASS 4 /* file class */
++#define EI_DATA 5 /* data encoding */
++#define EI_VERSION 6 /* ELF header version */
++#define EI_OSABI 7 /* OS/ABI ID */
++#define EI_ABIVERSION 8 /* ABI version */
++#define EI_PAD 9 /* start of pad bytes */
++#define EI_NIDENT 16 /* Size of e_ident[] */
++
++/* e_ident[] magic number */
++#define ELFMAG0 0x7f /* e_ident[EI_MAG0] */
++#define ELFMAG1 'E' /* e_ident[EI_MAG1] */
++#define ELFMAG2 'L' /* e_ident[EI_MAG2] */
++#define ELFMAG3 'F' /* e_ident[EI_MAG3] */
++#define ELFMAG "\177ELF" /* magic */
++#define SELFMAG 4 /* size of magic */
++
++/* e_ident[] file class */
++#define ELFCLASSNONE 0 /* invalid */
++#define ELFCLASS32 1 /* 32-bit objs */
++#define ELFCLASS64 2 /* 64-bit objs */
++#define ELFCLASSNUM 3 /* number of classes */
++
++/* e_ident[] data encoding */
++#define ELFDATANONE 0 /* invalid */
++#define ELFDATA2LSB 1 /* Little-Endian */
++#define ELFDATA2MSB 2 /* Big-Endian */
++#define ELFDATANUM 3 /* number of data encode defines */
++
++/* e_ident[] Operating System/ABI */
++#define ELFOSABI_SYSV 0 /* UNIX System V ABI */
++#define ELFOSABI_HPUX 1 /* HP-UX operating system */
++#define ELFOSABI_NETBSD 2 /* NetBSD */
++#define ELFOSABI_LINUX 3 /* GNU/Linux */
++#define ELFOSABI_HURD 4 /* GNU/Hurd */
++#define ELFOSABI_86OPEN 5 /* 86Open common IA32 ABI */
++#define ELFOSABI_SOLARIS 6 /* Solaris */
++#define ELFOSABI_MONTEREY 7 /* Monterey */
++#define ELFOSABI_IRIX 8 /* IRIX */
++#define ELFOSABI_FREEBSD 9 /* FreeBSD */
++#define ELFOSABI_TRU64 10 /* TRU64 UNIX */
++#define ELFOSABI_MODESTO 11 /* Novell Modesto */
++#define ELFOSABI_OPENBSD 12 /* OpenBSD */
++#define ELFOSABI_ARM 97 /* ARM */
++#define ELFOSABI_STANDALONE 255 /* Standalone (embedded) application */
++
++/* e_ident */
++#define IS_ELF(ehdr) ((ehdr).e_ident[EI_MAG0] == ELFMAG0 && \
++ (ehdr).e_ident[EI_MAG1] == ELFMAG1 && \
++ (ehdr).e_ident[EI_MAG2] == ELFMAG2 && \
++ (ehdr).e_ident[EI_MAG3] == ELFMAG3)
++
++/* ELF Header */
++typedef struct elfhdr {
++ unsigned char e_ident[EI_NIDENT]; /* ELF Identification */
++ Elf32_Half e_type; /* object file type */
++ Elf32_Half e_machine; /* machine */
++ Elf32_Word e_version; /* object file version */
++ Elf32_Addr e_entry; /* virtual entry point */
++ Elf32_Off e_phoff; /* program header table offset */
++ Elf32_Off e_shoff; /* section header table offset */
++ Elf32_Word e_flags; /* processor-specific flags */
++ Elf32_Half e_ehsize; /* ELF header size */
++ Elf32_Half e_phentsize; /* program header entry size */
++ Elf32_Half e_phnum; /* number of program header entries */
++ Elf32_Half e_shentsize; /* section header entry size */
++ Elf32_Half e_shnum; /* number of section header entries */
++ Elf32_Half e_shstrndx; /* section header table's "section
++ header string table" entry offset */
++} Elf32_Ehdr;
++
++typedef struct {
++ unsigned char e_ident[EI_NIDENT]; /* Id bytes */
++ Elf64_Quarter e_type; /* file type */
++ Elf64_Quarter e_machine; /* machine type */
++ Elf64_Half e_version; /* version number */
++ Elf64_Addr e_entry; /* entry point */
++ Elf64_Off e_phoff; /* Program hdr offset */
++ Elf64_Off e_shoff; /* Section hdr offset */
++ Elf64_Half e_flags; /* Processor flags */
++ Elf64_Quarter e_ehsize; /* sizeof ehdr */
++ Elf64_Quarter e_phentsize; /* Program header entry size */
++ Elf64_Quarter e_phnum; /* Number of program headers */
++ Elf64_Quarter e_shentsize; /* Section header entry size */
++ Elf64_Quarter e_shnum; /* Number of section headers */
++ Elf64_Quarter e_shstrndx; /* String table index */
++} Elf64_Ehdr;
++
++/* e_type */
++#define ET_NONE 0 /* No file type */
++#define ET_REL 1 /* relocatable file */
++#define ET_EXEC 2 /* executable file */
++#define ET_DYN 3 /* shared object file */
++#define ET_CORE 4 /* core file */
++#define ET_NUM 5 /* number of types */
++#define ET_LOPROC 0xff00 /* reserved range for processor */
++#define ET_HIPROC 0xffff /* specific e_type */
++
++/* e_machine */
++#define EM_NONE 0 /* No Machine */
++#define EM_M32 1 /* AT&T WE 32100 */
++#define EM_SPARC 2 /* SPARC */
++#define EM_386 3 /* Intel 80386 */
++#define EM_68K 4 /* Motorola 68000 */
++#define EM_88K 5 /* Motorola 88000 */
++#define EM_486 6 /* Intel 80486 - unused? */
++#define EM_860 7 /* Intel 80860 */
++#define EM_MIPS 8 /* MIPS R3000 Big-Endian only */
++/*
++ * Don't know if EM_MIPS_RS4_BE,
++ * EM_SPARC64, EM_PARISC,
++ * or EM_PPC are ABI compliant
++ */
++#define EM_MIPS_RS4_BE 10 /* MIPS R4000 Big-Endian */
++#define EM_SPARC64 11 /* SPARC v9 64-bit unoffical */
++#define EM_PARISC 15 /* HPPA */
++#define EM_SPARC32PLUS 18 /* Enhanced instruction set SPARC */
++#define EM_PPC 20 /* PowerPC */
++#define EM_PPC64 21 /* PowerPC 64-bit */
++#define EM_ARM 40 /* Advanced RISC Machines ARM */
++#define EM_ALPHA 41 /* DEC ALPHA */
++#define EM_SPARCV9 43 /* SPARC version 9 */
++#define EM_ALPHA_EXP 0x9026 /* DEC ALPHA */
++#define EM_IA_64 50 /* Intel Merced */
++#define EM_X86_64 62 /* AMD x86-64 architecture */
++#define EM_VAX 75 /* DEC VAX */
++
++/* Version */
++#define EV_NONE 0 /* Invalid */
++#define EV_CURRENT 1 /* Current */
++#define EV_NUM 2 /* number of versions */
++
++/* Section Header */
++typedef struct {
++ Elf32_Word sh_name; /* name - index into section header
++ string table section */
++ Elf32_Word sh_type; /* type */
++ Elf32_Word sh_flags; /* flags */
++ Elf32_Addr sh_addr; /* address */
++ Elf32_Off sh_offset; /* file offset */
++ Elf32_Word sh_size; /* section size */
++ Elf32_Word sh_link; /* section header table index link */
++ Elf32_Word sh_info; /* extra information */
++ Elf32_Word sh_addralign; /* address alignment */
++ Elf32_Word sh_entsize; /* section entry size */
++} Elf32_Shdr;
++
++typedef struct {
++ Elf64_Half sh_name; /* section name */
++ Elf64_Half sh_type; /* section type */
++ Elf64_Xword sh_flags; /* section flags */
++ Elf64_Addr sh_addr; /* virtual address */
++ Elf64_Off sh_offset; /* file offset */
++ Elf64_Xword sh_size; /* section size */
++ Elf64_Half sh_link; /* link to another */
++ Elf64_Half sh_info; /* misc info */
++ Elf64_Xword sh_addralign; /* memory alignment */
++ Elf64_Xword sh_entsize; /* table entry size */
++} Elf64_Shdr;
++
++/* Special Section Indexes */
++#define SHN_UNDEF 0 /* undefined */
++#define SHN_LORESERVE 0xff00 /* lower bounds of reserved indexes */
++#define SHN_LOPROC 0xff00 /* reserved range for processor */
++#define SHN_HIPROC 0xff1f /* specific section indexes */
++#define SHN_ABS 0xfff1 /* absolute value */
++#define SHN_COMMON 0xfff2 /* common symbol */
++#define SHN_HIRESERVE 0xffff /* upper bounds of reserved indexes */
++
++/* sh_type */
++#define SHT_NULL 0 /* inactive */
++#define SHT_PROGBITS 1 /* program defined information */
++#define SHT_SYMTAB 2 /* symbol table section */
++#define SHT_STRTAB 3 /* string table section */
++#define SHT_RELA 4 /* relocation section with addends*/
++#define SHT_HASH 5 /* symbol hash table section */
++#define SHT_DYNAMIC 6 /* dynamic section */
++#define SHT_NOTE 7 /* note section */
++#define SHT_NOBITS 8 /* no space section */
++#define SHT_REL 9 /* relation section without addends */
++#define SHT_SHLIB 10 /* reserved - purpose unknown */
++#define SHT_DYNSYM 11 /* dynamic symbol table section */
++#define SHT_NUM 12 /* number of section types */
++#define SHT_LOPROC 0x70000000 /* reserved range for processor */
++#define SHT_HIPROC 0x7fffffff /* specific section header types */
++#define SHT_LOUSER 0x80000000 /* reserved range for application */
++#define SHT_HIUSER 0xffffffff /* specific indexes */
++
++/* Section names */
++#define ELF_BSS ".bss" /* uninitialized data */
++#define ELF_DATA ".data" /* initialized data */
++#define ELF_DEBUG ".debug" /* debug */
++#define ELF_DYNAMIC ".dynamic" /* dynamic linking information */
++#define ELF_DYNSTR ".dynstr" /* dynamic string table */
++#define ELF_DYNSYM ".dynsym" /* dynamic symbol table */
++#define ELF_FINI ".fini" /* termination code */
++#define ELF_GOT ".got" /* global offset table */
++#define ELF_HASH ".hash" /* symbol hash table */
++#define ELF_INIT ".init" /* initialization code */
++#define ELF_REL_DATA ".rel.data" /* relocation data */
++#define ELF_REL_FINI ".rel.fini" /* relocation termination code */
++#define ELF_REL_INIT ".rel.init" /* relocation initialization code */
++#define ELF_REL_DYN ".rel.dyn" /* relocaltion dynamic link info */
++#define ELF_REL_RODATA ".rel.rodata" /* relocation read-only data */
++#define ELF_REL_TEXT ".rel.text" /* relocation code */
++#define ELF_RODATA ".rodata" /* read-only data */
++#define ELF_SHSTRTAB ".shstrtab" /* section header string table */
++#define ELF_STRTAB ".strtab" /* string table */
++#define ELF_SYMTAB ".symtab" /* symbol table */
++#define ELF_TEXT ".text" /* code */
++
++
++/* Section Attribute Flags - sh_flags */
++#define SHF_WRITE 0x1 /* Writable */
++#define SHF_ALLOC 0x2 /* occupies memory */
++#define SHF_EXECINSTR 0x4 /* executable */
++#define SHF_MASKPROC 0xf0000000 /* reserved bits for processor */
++ /* specific section attributes */
++
++/* Symbol Table Entry */
++typedef struct elf32_sym {
++ Elf32_Word st_name; /* name - index into string table */
++ Elf32_Addr st_value; /* symbol value */
++ Elf32_Word st_size; /* symbol size */
++ unsigned char st_info; /* type and binding */
++ unsigned char st_other; /* 0 - no defined meaning */
++ Elf32_Half st_shndx; /* section header index */
++} Elf32_Sym;
++
++typedef struct {
++ Elf64_Half st_name; /* Symbol name index in str table */
++ Elf_Byte st_info; /* type / binding attrs */
++ Elf_Byte st_other; /* unused */
++ Elf64_Quarter st_shndx; /* section index of symbol */
++ Elf64_Xword st_value; /* value of symbol */
++ Elf64_Xword st_size; /* size of symbol */
++} Elf64_Sym;
++
++/* Symbol table index */
++#define STN_UNDEF 0 /* undefined */
++
++/* Extract symbol info - st_info */
++#define ELF32_ST_BIND(x) ((x) >> 4)
++#define ELF32_ST_TYPE(x) (((unsigned int) x) & 0xf)
++#define ELF32_ST_INFO(b,t) (((b) << 4) + ((t) & 0xf))
++
++#define ELF64_ST_BIND(x) ((x) >> 4)
++#define ELF64_ST_TYPE(x) (((unsigned int) x) & 0xf)
++#define ELF64_ST_INFO(b,t) (((b) << 4) + ((t) & 0xf))
++
++/* Symbol Binding - ELF32_ST_BIND - st_info */
++#define STB_LOCAL 0 /* Local symbol */
++#define STB_GLOBAL 1 /* Global symbol */
++#define STB_WEAK 2 /* like global - lower precedence */
++#define STB_NUM 3 /* number of symbol bindings */
++#define STB_LOPROC 13 /* reserved range for processor */
++#define STB_HIPROC 15 /* specific symbol bindings */
++
++/* Symbol type - ELF32_ST_TYPE - st_info */
++#define STT_NOTYPE 0 /* not specified */
++#define STT_OBJECT 1 /* data object */
++#define STT_FUNC 2 /* function */
++#define STT_SECTION 3 /* section */
++#define STT_FILE 4 /* file */
++#define STT_NUM 5 /* number of symbol types */
++#define STT_LOPROC 13 /* reserved range for processor */
++#define STT_HIPROC 15 /* specific symbol types */
++
++/* Relocation entry with implicit addend */
++typedef struct {
++ Elf32_Addr r_offset; /* offset of relocation */
++ Elf32_Word r_info; /* symbol table index and type */
++} Elf32_Rel;
++
++/* Relocation entry with explicit addend */
++typedef struct {
++ Elf32_Addr r_offset; /* offset of relocation */
++ Elf32_Word r_info; /* symbol table index and type */
++ Elf32_Sword r_addend;
++} Elf32_Rela;
++
++/* Extract relocation info - r_info */
++#define ELF32_R_SYM(i) ((i) >> 8)
++#define ELF32_R_TYPE(i) ((unsigned char) (i))
++#define ELF32_R_INFO(s,t) (((s) << 8) + (unsigned char)(t))
++
++typedef struct {
++ Elf64_Xword r_offset; /* where to do it */
++ Elf64_Xword r_info; /* index & type of relocation */
++} Elf64_Rel;
++
++typedef struct {
++ Elf64_Xword r_offset; /* where to do it */
++ Elf64_Xword r_info; /* index & type of relocation */
++ Elf64_Sxword r_addend; /* adjustment value */
++} Elf64_Rela;
++
++#define ELF64_R_SYM(info) ((info) >> 32)
++#define ELF64_R_TYPE(info) ((info) & 0xFFFFFFFF)
++#define ELF64_R_INFO(s,t) (((s) << 32) + (u_int32_t)(t))
++
++/* Program Header */
++typedef struct {
++ Elf32_Word p_type; /* segment type */
++ Elf32_Off p_offset; /* segment offset */
++ Elf32_Addr p_vaddr; /* virtual address of segment */
++ Elf32_Addr p_paddr; /* physical address - ignored? */
++ Elf32_Word p_filesz; /* number of bytes in file for seg. */
++ Elf32_Word p_memsz; /* number of bytes in mem. for seg. */
++ Elf32_Word p_flags; /* flags */
++ Elf32_Word p_align; /* memory alignment */
++} Elf32_Phdr;
++
++typedef struct {
++ Elf64_Half p_type; /* entry type */
++ Elf64_Half p_flags; /* flags */
++ Elf64_Off p_offset; /* offset */
++ Elf64_Addr p_vaddr; /* virtual address */
++ Elf64_Addr p_paddr; /* physical address */
++ Elf64_Xword p_filesz; /* file size */
++ Elf64_Xword p_memsz; /* memory size */
++ Elf64_Xword p_align; /* memory & file alignment */
++} Elf64_Phdr;
++
++/* Segment types - p_type */
++#define PT_NULL 0 /* unused */
++#define PT_LOAD 1 /* loadable segment */
++#define PT_DYNAMIC 2 /* dynamic linking section */
++#define PT_INTERP 3 /* the RTLD */
++#define PT_NOTE 4 /* auxiliary information */
++#define PT_SHLIB 5 /* reserved - purpose undefined */
++#define PT_PHDR 6 /* program header */
++#define PT_NUM 7 /* Number of segment types */
++#define PT_LOPROC 0x70000000 /* reserved range for processor */
++#define PT_HIPROC 0x7fffffff /* specific segment types */
++
++/* Segment flags - p_flags */
++#define PF_X 0x1 /* Executable */
++#define PF_W 0x2 /* Writable */
++#define PF_R 0x4 /* Readable */
++#define PF_MASKPROC 0xf0000000 /* reserved bits for processor */
++ /* specific segment flags */
++
++/* Dynamic structure */
++typedef struct {
++ Elf32_Sword d_tag; /* controls meaning of d_val */
++ union {
++ Elf32_Word d_val; /* Multiple meanings - see d_tag */
++ Elf32_Addr d_ptr; /* program virtual address */
++ } d_un;
++} Elf32_Dyn;
++
++typedef struct {
++ Elf64_Xword d_tag; /* controls meaning of d_val */
++ union {
++ Elf64_Addr d_ptr;
++ Elf64_Xword d_val;
++ } d_un;
++} Elf64_Dyn;
++
++/* Dynamic Array Tags - d_tag */
++#define DT_NULL 0 /* marks end of _DYNAMIC array */
++#define DT_NEEDED 1 /* string table offset of needed lib */
++#define DT_PLTRELSZ 2 /* size of relocation entries in PLT */
++#define DT_PLTGOT 3 /* address PLT/GOT */
++#define DT_HASH 4 /* address of symbol hash table */
++#define DT_STRTAB 5 /* address of string table */
++#define DT_SYMTAB 6 /* address of symbol table */
++#define DT_RELA 7 /* address of relocation table */
++#define DT_RELASZ 8 /* size of relocation table */
++#define DT_RELAENT 9 /* size of relocation entry */
++#define DT_STRSZ 10 /* size of string table */
++#define DT_SYMENT 11 /* size of symbol table entry */
++#define DT_INIT 12 /* address of initialization func. */
++#define DT_FINI 13 /* address of termination function */
++#define DT_SONAME 14 /* string table offset of shared obj */
++#define DT_RPATH 15 /* string table offset of library
++ search path */
++#define DT_SYMBOLIC 16 /* start sym search in shared obj. */
++#define DT_REL 17 /* address of rel. tbl. w addends */
++#define DT_RELSZ 18 /* size of DT_REL relocation table */
++#define DT_RELENT 19 /* size of DT_REL relocation entry */
++#define DT_PLTREL 20 /* PLT referenced relocation entry */
++#define DT_DEBUG 21 /* bugger */
++#define DT_TEXTREL 22 /* Allow rel. mod. to unwritable seg */
++#define DT_JMPREL 23 /* add. of PLT's relocation entries */
++#define DT_BIND_NOW 24 /* Bind now regardless of env setting */
++#define DT_NUM 25 /* Number used. */
++#define DT_LOPROC 0x70000000 /* reserved range for processor */
++#define DT_HIPROC 0x7fffffff /* specific dynamic array tags */
++
++/* Standard ELF hashing function */
++unsigned int elf_hash(const unsigned char *name);
++
++/*
++ * Note Definitions
++ */
++typedef struct {
++ Elf32_Word namesz;
++ Elf32_Word descsz;
++ Elf32_Word type;
++} Elf32_Note;
++
++typedef struct {
++ Elf64_Half namesz;
++ Elf64_Half descsz;
++ Elf64_Half type;
++} Elf64_Note;
++
++
++#if defined(ELFSIZE)
++#define CONCAT(x,y) __CONCAT(x,y)
++#define ELFNAME(x) CONCAT(elf,CONCAT(ELFSIZE,CONCAT(_,x)))
++#define ELFNAME2(x,y) CONCAT(x,CONCAT(_elf,CONCAT(ELFSIZE,CONCAT(_,y))))
++#define ELFNAMEEND(x) CONCAT(x,CONCAT(_elf,ELFSIZE))
++#define ELFDEFNNAME(x) CONCAT(ELF,CONCAT(ELFSIZE,CONCAT(_,x)))
++#endif
++
++#if defined(ELFSIZE) && (ELFSIZE == 32)
++#define Elf_Ehdr Elf32_Ehdr
++#define Elf_Phdr Elf32_Phdr
++#define Elf_Shdr Elf32_Shdr
++#define Elf_Sym Elf32_Sym
++#define Elf_Rel Elf32_Rel
++#define Elf_RelA Elf32_Rela
++#define Elf_Dyn Elf32_Dyn
++#define Elf_Word Elf32_Word
++#define Elf_Sword Elf32_Sword
++#define Elf_Addr Elf32_Addr
++#define Elf_Off Elf32_Off
++#define Elf_Nhdr Elf32_Nhdr
++#define Elf_Note Elf32_Note
++
++#define ELF_R_SYM ELF32_R_SYM
++#define ELF_R_TYPE ELF32_R_TYPE
++#define ELF_R_INFO ELF32_R_INFO
++#define ELFCLASS ELFCLASS32
++
++#define ELF_ST_BIND ELF32_ST_BIND
++#define ELF_ST_TYPE ELF32_ST_TYPE
++#define ELF_ST_INFO ELF32_ST_INFO
++
++#define AuxInfo Aux32Info
++#elif defined(ELFSIZE) && (ELFSIZE == 64)
++#define Elf_Ehdr Elf64_Ehdr
++#define Elf_Phdr Elf64_Phdr
++#define Elf_Shdr Elf64_Shdr
++#define Elf_Sym Elf64_Sym
++#define Elf_Rel Elf64_Rel
++#define Elf_RelA Elf64_Rela
++#define Elf_Dyn Elf64_Dyn
++#define Elf_Word Elf64_Word
++#define Elf_Sword Elf64_Sword
++#define Elf_Addr Elf64_Addr
++#define Elf_Off Elf64_Off
++#define Elf_Nhdr Elf64_Nhdr
++#define Elf_Note Elf64_Note
++
++#define ELF_R_SYM ELF64_R_SYM
++#define ELF_R_TYPE ELF64_R_TYPE
++#define ELF_R_INFO ELF64_R_INFO
++#define ELFCLASS ELFCLASS64
++
++#define ELF_ST_BIND ELF64_ST_BIND
++#define ELF_ST_TYPE ELF64_ST_TYPE
++#define ELF_ST_INFO ELF64_ST_INFO
++
++#define AuxInfo Aux64Info
++#endif
++
++#endif /* __XEN_PUBLIC_ELFSTRUCTS_H__ */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/xen/interface/event_channel.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,264 @@
++/******************************************************************************
++ * event_channel.h
++ *
++ * Event channels between domains.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (c) 2003-2004, K A Fraser.
++ */
++
++#ifndef __XEN_PUBLIC_EVENT_CHANNEL_H__
++#define __XEN_PUBLIC_EVENT_CHANNEL_H__
++
++/*
++ * Prototype for this hypercall is:
++ * int event_channel_op(int cmd, void *args)
++ * @cmd == EVTCHNOP_??? (event-channel operation).
++ * @args == Operation-specific extra arguments (NULL if none).
++ */
++
++typedef uint32_t evtchn_port_t;
++DEFINE_XEN_GUEST_HANDLE(evtchn_port_t);
++
++/*
++ * EVTCHNOP_alloc_unbound: Allocate a port in domain <dom> and mark as
++ * accepting interdomain bindings from domain <remote_dom>. A fresh port
++ * is allocated in <dom> and returned as <port>.
++ * NOTES:
++ * 1. If the caller is unprivileged then <dom> must be DOMID_SELF.
++ * 2. <rdom> may be DOMID_SELF, allowing loopback connections.
++ */
++#define EVTCHNOP_alloc_unbound 6
++struct evtchn_alloc_unbound {
++ /* IN parameters */
++ domid_t dom, remote_dom;
++ /* OUT parameters */
++ evtchn_port_t port;
++};
++typedef struct evtchn_alloc_unbound evtchn_alloc_unbound_t;
++
++/*
++ * EVTCHNOP_bind_interdomain: Construct an interdomain event channel between
++ * the calling domain and <remote_dom>. <remote_dom,remote_port> must identify
++ * a port that is unbound and marked as accepting bindings from the calling
++ * domain. A fresh port is allocated in the calling domain and returned as
++ * <local_port>.
++ * NOTES:
++ * 2. <remote_dom> may be DOMID_SELF, allowing loopback connections.
++ */
++#define EVTCHNOP_bind_interdomain 0
++struct evtchn_bind_interdomain {
++ /* IN parameters. */
++ domid_t remote_dom;
++ evtchn_port_t remote_port;
++ /* OUT parameters. */
++ evtchn_port_t local_port;
++};
++typedef struct evtchn_bind_interdomain evtchn_bind_interdomain_t;
++
++/*
++ * EVTCHNOP_bind_virq: Bind a local event channel to VIRQ <irq> on specified
++ * vcpu.
++ * NOTES:
++ * 1. Virtual IRQs are classified as per-vcpu or global. See the VIRQ list
++ * in xen.h for the classification of each VIRQ.
++ * 2. Global VIRQs must be allocated on VCPU0 but can subsequently be
++ * re-bound via EVTCHNOP_bind_vcpu.
++ * 3. Per-vcpu VIRQs may be bound to at most one event channel per vcpu.
++ * The allocated event channel is bound to the specified vcpu and the
++ * binding cannot be changed.
++ */
++#define EVTCHNOP_bind_virq 1
++struct evtchn_bind_virq {
++ /* IN parameters. */
++ uint32_t virq;
++ uint32_t vcpu;
++ /* OUT parameters. */
++ evtchn_port_t port;
++};
++typedef struct evtchn_bind_virq evtchn_bind_virq_t;
++
++/*
++ * EVTCHNOP_bind_pirq: Bind a local event channel to PIRQ <irq>.
++ * NOTES:
++ * 1. A physical IRQ may be bound to at most one event channel per domain.
++ * 2. Only a sufficiently-privileged domain may bind to a physical IRQ.
++ */
++#define EVTCHNOP_bind_pirq 2
++struct evtchn_bind_pirq {
++ /* IN parameters. */
++ uint32_t pirq;
++#define BIND_PIRQ__WILL_SHARE 1
++ uint32_t flags; /* BIND_PIRQ__* */
++ /* OUT parameters. */
++ evtchn_port_t port;
++};
++typedef struct evtchn_bind_pirq evtchn_bind_pirq_t;
++
++/*
++ * EVTCHNOP_bind_ipi: Bind a local event channel to receive events.
++ * NOTES:
++ * 1. The allocated event channel is bound to the specified vcpu. The binding
++ * may not be changed.
++ */
++#define EVTCHNOP_bind_ipi 7
++struct evtchn_bind_ipi {
++ uint32_t vcpu;
++ /* OUT parameters. */
++ evtchn_port_t port;
++};
++typedef struct evtchn_bind_ipi evtchn_bind_ipi_t;
++
++/*
++ * EVTCHNOP_close: Close a local event channel <port>. If the channel is
++ * interdomain then the remote end is placed in the unbound state
++ * (EVTCHNSTAT_unbound), awaiting a new connection.
++ */
++#define EVTCHNOP_close 3
++struct evtchn_close {
++ /* IN parameters. */
++ evtchn_port_t port;
++};
++typedef struct evtchn_close evtchn_close_t;
++
++/*
++ * EVTCHNOP_send: Send an event to the remote end of the channel whose local
++ * endpoint is <port>.
++ */
++#define EVTCHNOP_send 4
++struct evtchn_send {
++ /* IN parameters. */
++ evtchn_port_t port;
++};
++typedef struct evtchn_send evtchn_send_t;
++
++/*
++ * EVTCHNOP_status: Get the current status of the communication channel which
++ * has an endpoint at <dom, port>.
++ * NOTES:
++ * 1. <dom> may be specified as DOMID_SELF.
++ * 2. Only a sufficiently-privileged domain may obtain the status of an event
++ * channel for which <dom> is not DOMID_SELF.
++ */
++#define EVTCHNOP_status 5
++struct evtchn_status {
++ /* IN parameters */
++ domid_t dom;
++ evtchn_port_t port;
++ /* OUT parameters */
++#define EVTCHNSTAT_closed 0 /* Channel is not in use. */
++#define EVTCHNSTAT_unbound 1 /* Channel is waiting interdom connection.*/
++#define EVTCHNSTAT_interdomain 2 /* Channel is connected to remote domain. */
++#define EVTCHNSTAT_pirq 3 /* Channel is bound to a phys IRQ line. */
++#define EVTCHNSTAT_virq 4 /* Channel is bound to a virtual IRQ line */
++#define EVTCHNSTAT_ipi 5 /* Channel is bound to a virtual IPI line */
++ uint32_t status;
++ uint32_t vcpu; /* VCPU to which this channel is bound. */
++ union {
++ struct {
++ domid_t dom;
++ } unbound; /* EVTCHNSTAT_unbound */
++ struct {
++ domid_t dom;
++ evtchn_port_t port;
++ } interdomain; /* EVTCHNSTAT_interdomain */
++ uint32_t pirq; /* EVTCHNSTAT_pirq */
++ uint32_t virq; /* EVTCHNSTAT_virq */
++ } u;
++};
++typedef struct evtchn_status evtchn_status_t;
++
++/*
++ * EVTCHNOP_bind_vcpu: Specify which vcpu a channel should notify when an
++ * event is pending.
++ * NOTES:
++ * 1. IPI-bound channels always notify the vcpu specified at bind time.
++ * This binding cannot be changed.
++ * 2. Per-VCPU VIRQ channels always notify the vcpu specified at bind time.
++ * This binding cannot be changed.
++ * 3. All other channels notify vcpu0 by default. This default is set when
++ * the channel is allocated (a port that is freed and subsequently reused
++ * has its binding reset to vcpu0).
++ */
++#define EVTCHNOP_bind_vcpu 8
++struct evtchn_bind_vcpu {
++ /* IN parameters. */
++ evtchn_port_t port;
++ uint32_t vcpu;
++};
++typedef struct evtchn_bind_vcpu evtchn_bind_vcpu_t;
++
++/*
++ * EVTCHNOP_unmask: Unmask the specified local event-channel port and deliver
++ * a notification to the appropriate VCPU if an event is pending.
++ */
++#define EVTCHNOP_unmask 9
++struct evtchn_unmask {
++ /* IN parameters. */
++ evtchn_port_t port;
++};
++typedef struct evtchn_unmask evtchn_unmask_t;
++
++/*
++ * EVTCHNOP_reset: Close all event channels associated with specified domain.
++ * NOTES:
++ * 1. <dom> may be specified as DOMID_SELF.
++ * 2. Only a sufficiently-privileged domain may specify other than DOMID_SELF.
++ */
++#define EVTCHNOP_reset 10
++struct evtchn_reset {
++ /* IN parameters. */
++ domid_t dom;
++};
++typedef struct evtchn_reset evtchn_reset_t;
++
++/*
++ * Argument to event_channel_op_compat() hypercall. Superceded by new
++ * event_channel_op() hypercall since 0x00030202.
++ */
++struct evtchn_op {
++ uint32_t cmd; /* EVTCHNOP_* */
++ union {
++ struct evtchn_alloc_unbound alloc_unbound;
++ struct evtchn_bind_interdomain bind_interdomain;
++ struct evtchn_bind_virq bind_virq;
++ struct evtchn_bind_pirq bind_pirq;
++ struct evtchn_bind_ipi bind_ipi;
++ struct evtchn_close close;
++ struct evtchn_send send;
++ struct evtchn_status status;
++ struct evtchn_bind_vcpu bind_vcpu;
++ struct evtchn_unmask unmask;
++ } u;
++};
++typedef struct evtchn_op evtchn_op_t;
++DEFINE_XEN_GUEST_HANDLE(evtchn_op_t);
++
++#endif /* __XEN_PUBLIC_EVENT_CHANNEL_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/xen/interface/features.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,71 @@
++/******************************************************************************
++ * features.h
++ *
++ * Feature flags, reported by XENVER_get_features.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (c) 2006, Keir Fraser <keir@xensource.com>
++ */
++
++#ifndef __XEN_PUBLIC_FEATURES_H__
++#define __XEN_PUBLIC_FEATURES_H__
++
++/*
++ * If set, the guest does not need to write-protect its pagetables, and can
++ * update them via direct writes.
++ */
++#define XENFEAT_writable_page_tables 0
++
++/*
++ * If set, the guest does not need to write-protect its segment descriptor
++ * tables, and can update them via direct writes.
++ */
++#define XENFEAT_writable_descriptor_tables 1
++
++/*
++ * If set, translation between the guest's 'pseudo-physical' address space
++ * and the host's machine address space are handled by the hypervisor. In this
++ * mode the guest does not need to perform phys-to/from-machine translations
++ * when performing page table operations.
++ */
++#define XENFEAT_auto_translated_physmap 2
++
++/* If set, the guest is running in supervisor mode (e.g., x86 ring 0). */
++#define XENFEAT_supervisor_mode_kernel 3
++
++/*
++ * If set, the guest does not need to allocate x86 PAE page directories
++ * below 4GB. This flag is usually implied by auto_translated_physmap.
++ */
++#define XENFEAT_pae_pgdir_above_4gb 4
++
++#define XENFEAT_NR_SUBMAPS 1
++
++#endif /* __XEN_PUBLIC_FEATURES_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/xen/interface/grant_table.h 2007-08-27 14:02:10.000000000 -0400
+@@ -0,0 +1,399 @@
++/******************************************************************************
++ * grant_table.h
++ *
++ * Interface for granting foreign access to page frames, and receiving
++ * page-ownership transfers.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (c) 2004, K A Fraser
++ */
++
++#ifndef __XEN_PUBLIC_GRANT_TABLE_H__
++#define __XEN_PUBLIC_GRANT_TABLE_H__
++
++
++/***********************************
++ * GRANT TABLE REPRESENTATION
++ */
++
++/* Some rough guidelines on accessing and updating grant-table entries
++ * in a concurrency-safe manner. For more information, Linux contains a
++ * reference implementation for guest OSes (arch/xen/kernel/grant_table.c).
++ *
++ * NB. WMB is a no-op on current-generation x86 processors. However, a
++ * compiler barrier will still be required.
++ *
++ * Introducing a valid entry into the grant table:
++ * 1. Write ent->domid.
++ * 2. Write ent->frame:
++ * GTF_permit_access: Frame to which access is permitted.
++ * GTF_accept_transfer: Pseudo-phys frame slot being filled by new
++ * frame, or zero if none.
++ * 3. Write memory barrier (WMB).
++ * 4. Write ent->flags, inc. valid type.
++ *
++ * Invalidating an unused GTF_permit_access entry:
++ * 1. flags = ent->flags.
++ * 2. Observe that !(flags & (GTF_reading|GTF_writing)).
++ * 3. Check result of SMP-safe CMPXCHG(&ent->flags, flags, 0).
++ * NB. No need for WMB as reuse of entry is control-dependent on success of
++ * step 3, and all architectures guarantee ordering of ctrl-dep writes.
++ *
++ * Invalidating an in-use GTF_permit_access entry:
++ * This cannot be done directly. Request assistance from the domain controller
++ * which can set a timeout on the use of a grant entry and take necessary
++ * action. (NB. This is not yet implemented!).
++ *
++ * Invalidating an unused GTF_accept_transfer entry:
++ * 1. flags = ent->flags.
++ * 2. Observe that !(flags & GTF_transfer_committed). [*]
++ * 3. Check result of SMP-safe CMPXCHG(&ent->flags, flags, 0).
++ * NB. No need for WMB as reuse of entry is control-dependent on success of
++ * step 3, and all architectures guarantee ordering of ctrl-dep writes.
++ * [*] If GTF_transfer_committed is set then the grant entry is 'committed'.
++ * The guest must /not/ modify the grant entry until the address of the
++ * transferred frame is written. It is safe for the guest to spin waiting
++ * for this to occur (detect by observing GTF_transfer_completed in
++ * ent->flags).
++ *
++ * Invalidating a committed GTF_accept_transfer entry:
++ * 1. Wait for (ent->flags & GTF_transfer_completed).
++ *
++ * Changing a GTF_permit_access from writable to read-only:
++ * Use SMP-safe CMPXCHG to set GTF_readonly, while checking !GTF_writing.
++ *
++ * Changing a GTF_permit_access from read-only to writable:
++ * Use SMP-safe bit-setting instruction.
++ */
++
++/*
++ * A grant table comprises a packed array of grant entries in one or more
++ * page frames shared between Xen and a guest.
++ * [XEN]: This field is written by Xen and read by the sharing guest.
++ * [GST]: This field is written by the guest and read by Xen.
++ */
++struct grant_entry {
++ /* GTF_xxx: various type and flag information. [XEN,GST] */
++ uint16_t flags;
++ /* The domain being granted foreign privileges. [GST] */
++ domid_t domid;
++ /*
++ * GTF_permit_access: Frame that @domid is allowed to map and access. [GST]
++ * GTF_accept_transfer: Frame whose ownership transferred by @domid. [XEN]
++ */
++ uint32_t frame;
++};
++typedef struct grant_entry grant_entry_t;
++
++/*
++ * Type of grant entry.
++ * GTF_invalid: This grant entry grants no privileges.
++ * GTF_permit_access: Allow @domid to map/access @frame.
++ * GTF_accept_transfer: Allow @domid to transfer ownership of one page frame
++ * to this guest. Xen writes the page number to @frame.
++ */
++#define GTF_invalid (0U<<0)
++#define GTF_permit_access (1U<<0)
++#define GTF_accept_transfer (2U<<0)
++#define GTF_type_mask (3U<<0)
++
++/*
++ * Subflags for GTF_permit_access.
++ * GTF_readonly: Restrict @domid to read-only mappings and accesses. [GST]
++ * GTF_reading: Grant entry is currently mapped for reading by @domid. [XEN]
++ * GTF_writing: Grant entry is currently mapped for writing by @domid. [XEN]
++ */
++#define _GTF_readonly (2)
++#define GTF_readonly (1U<<_GTF_readonly)
++#define _GTF_reading (3)
++#define GTF_reading (1U<<_GTF_reading)
++#define _GTF_writing (4)
++#define GTF_writing (1U<<_GTF_writing)
++
++/*
++ * Subflags for GTF_accept_transfer:
++ * GTF_transfer_committed: Xen sets this flag to indicate that it is committed
++ * to transferring ownership of a page frame. When a guest sees this flag
++ * it must /not/ modify the grant entry until GTF_transfer_completed is
++ * set by Xen.
++ * GTF_transfer_completed: It is safe for the guest to spin-wait on this flag
++ * after reading GTF_transfer_committed. Xen will always write the frame
++ * address, followed by ORing this flag, in a timely manner.
++ */
++#define _GTF_transfer_committed (2)
++#define GTF_transfer_committed (1U<<_GTF_transfer_committed)
++#define _GTF_transfer_completed (3)
++#define GTF_transfer_completed (1U<<_GTF_transfer_completed)
++
++
++/***********************************
++ * GRANT TABLE QUERIES AND USES
++ */
++
++/*
++ * Reference to a grant entry in a specified domain's grant table.
++ */
++typedef uint32_t grant_ref_t;
++
++/*
++ * Handle to track a mapping created via a grant reference.
++ */
++typedef uint32_t grant_handle_t;
++
++/*
++ * GNTTABOP_map_grant_ref: Map the grant entry (<dom>,<ref>) for access
++ * by devices and/or host CPUs. If successful, <handle> is a tracking number
++ * that must be presented later to destroy the mapping(s). On error, <handle>
++ * is a negative status code.
++ * NOTES:
++ * 1. If GNTMAP_device_map is specified then <dev_bus_addr> is the address
++ * via which I/O devices may access the granted frame.
++ * 2. If GNTMAP_host_map is specified then a mapping will be added at
++ * either a host virtual address in the current address space, or at
++ * a PTE at the specified machine address. The type of mapping to
++ * perform is selected through the GNTMAP_contains_pte flag, and the
++ * address is specified in <host_addr>.
++ * 3. Mappings should only be destroyed via GNTTABOP_unmap_grant_ref. If a
++ * host mapping is destroyed by other means then it is *NOT* guaranteed
++ * to be accounted to the correct grant reference!
++ */
++#define GNTTABOP_map_grant_ref 0
++struct gnttab_map_grant_ref {
++ /* IN parameters. */
++ uint64_t host_addr;
++ uint32_t flags; /* GNTMAP_* */
++ grant_ref_t ref;
++ domid_t dom;
++ /* OUT parameters. */
++ int16_t status; /* GNTST_* */
++ grant_handle_t handle;
++ uint64_t dev_bus_addr;
++};
++typedef struct gnttab_map_grant_ref gnttab_map_grant_ref_t;
++DEFINE_XEN_GUEST_HANDLE(gnttab_map_grant_ref_t);
++
++/*
++ * GNTTABOP_unmap_grant_ref: Destroy one or more grant-reference mappings
++ * tracked by <handle>. If <host_addr> or <dev_bus_addr> is zero, that
++ * field is ignored. If non-zero, they must refer to a device/host mapping
++ * that is tracked by <handle>
++ * NOTES:
++ * 1. The call may fail in an undefined manner if either mapping is not
++ * tracked by <handle>.
++ * 3. After executing a batch of unmaps, it is guaranteed that no stale
++ * mappings will remain in the device or host TLBs.
++ */
++#define GNTTABOP_unmap_grant_ref 1
++struct gnttab_unmap_grant_ref {
++ /* IN parameters. */
++ uint64_t host_addr;
++ uint64_t dev_bus_addr;
++ grant_handle_t handle;
++ /* OUT parameters. */
++ int16_t status; /* GNTST_* */
++};
++typedef struct gnttab_unmap_grant_ref gnttab_unmap_grant_ref_t;
++DEFINE_XEN_GUEST_HANDLE(gnttab_unmap_grant_ref_t);
++
++/*
++ * GNTTABOP_setup_table: Set up a grant table for <dom> comprising at least
++ * <nr_frames> pages. The frame addresses are written to the <frame_list>.
++ * Only <nr_frames> addresses are written, even if the table is larger.
++ * NOTES:
++ * 1. <dom> may be specified as DOMID_SELF.
++ * 2. Only a sufficiently-privileged domain may specify <dom> != DOMID_SELF.
++ * 3. Xen may not support more than a single grant-table page per domain.
++ */
++#define GNTTABOP_setup_table 2
++struct gnttab_setup_table {
++ /* IN parameters. */
++ domid_t dom;
++ uint32_t nr_frames;
++ /* OUT parameters. */
++ int16_t status; /* GNTST_* */
++ XEN_GUEST_HANDLE(ulong) frame_list;
++};
++typedef struct gnttab_setup_table gnttab_setup_table_t;
++DEFINE_XEN_GUEST_HANDLE(gnttab_setup_table_t);
++
++/*
++ * GNTTABOP_dump_table: Dump the contents of the grant table to the
++ * xen console. Debugging use only.
++ */
++#define GNTTABOP_dump_table 3
++struct gnttab_dump_table {
++ /* IN parameters. */
++ domid_t dom;
++ /* OUT parameters. */
++ int16_t status; /* GNTST_* */
++};
++typedef struct gnttab_dump_table gnttab_dump_table_t;
++DEFINE_XEN_GUEST_HANDLE(gnttab_dump_table_t);
++
++/*
++ * GNTTABOP_transfer_grant_ref: Transfer <frame> to a foreign domain. The
++ * foreign domain has previously registered its interest in the transfer via
++ * <domid, ref>.
++ *
++ * Note that, even if the transfer fails, the specified page no longer belongs
++ * to the calling domain *unless* the error is GNTST_bad_page.
++ */
++#define GNTTABOP_transfer 4
++struct gnttab_transfer {
++ /* IN parameters. */
++ xen_pfn_t mfn;
++ domid_t domid;
++ grant_ref_t ref;
++ /* OUT parameters. */
++ int16_t status;
++};
++typedef struct gnttab_transfer gnttab_transfer_t;
++DEFINE_XEN_GUEST_HANDLE(gnttab_transfer_t);
++
++
++/*
++ * GNTTABOP_copy: Hypervisor based copy
++ * source and destinations can be eithers MFNs or, for foreign domains,
++ * grant references. the foreign domain has to grant read/write access
++ * in its grant table.
++ *
++ * The flags specify what type source and destinations are (either MFN
++ * or grant reference).
++ *
++ * Note that this can also be used to copy data between two domains
++ * via a third party if the source and destination domains had previously
++ * grant appropriate access to their pages to the third party.
++ *
++ * source_offset specifies an offset in the source frame, dest_offset
++ * the offset in the target frame and len specifies the number of
++ * bytes to be copied.
++ */
++
++#define _GNTCOPY_source_gref (0)
++#define GNTCOPY_source_gref (1<<_GNTCOPY_source_gref)
++#define _GNTCOPY_dest_gref (1)
++#define GNTCOPY_dest_gref (1<<_GNTCOPY_dest_gref)
++
++#define GNTTABOP_copy 5
++typedef struct gnttab_copy {
++ /* IN parameters. */
++ struct {
++ union {
++ grant_ref_t ref;
++ xen_pfn_t gmfn;
++ } u;
++ domid_t domid;
++ uint16_t offset;
++ } source, dest;
++ uint16_t len;
++ uint16_t flags; /* GNTCOPY_* */
++ /* OUT parameters. */
++ int16_t status;
++} gnttab_copy_t;
++DEFINE_XEN_GUEST_HANDLE(gnttab_copy_t);
++
++/*
++ * GNTTABOP_query_size: Query the current and maximum sizes of the shared
++ * grant table.
++ * NOTES:
++ * 1. <dom> may be specified as DOMID_SELF.
++ * 2. Only a sufficiently-privileged domain may specify <dom> != DOMID_SELF.
++ */
++#define GNTTABOP_query_size 6
++struct gnttab_query_size {
++ /* IN parameters. */
++ domid_t dom;
++ /* OUT parameters. */
++ uint32_t nr_frames;
++ uint32_t max_nr_frames;
++ int16_t status; /* GNTST_* */
++};
++typedef struct gnttab_query_size gnttab_query_size_t;
++DEFINE_XEN_GUEST_HANDLE(gnttab_query_size_t);
++
++
++/*
++ * Bitfield values for update_pin_status.flags.
++ */
++ /* Map the grant entry for access by I/O devices. */
++#define _GNTMAP_device_map (0)
++#define GNTMAP_device_map (1<<_GNTMAP_device_map)
++ /* Map the grant entry for access by host CPUs. */
++#define _GNTMAP_host_map (1)
++#define GNTMAP_host_map (1<<_GNTMAP_host_map)
++ /* Accesses to the granted frame will be restricted to read-only access. */
++#define _GNTMAP_readonly (2)
++#define GNTMAP_readonly (1<<_GNTMAP_readonly)
++ /*
++ * GNTMAP_host_map subflag:
++ * 0 => The host mapping is usable only by the guest OS.
++ * 1 => The host mapping is usable by guest OS + current application.
++ */
++#define _GNTMAP_application_map (3)
++#define GNTMAP_application_map (1<<_GNTMAP_application_map)
++
++ /*
++ * GNTMAP_contains_pte subflag:
++ * 0 => This map request contains a host virtual address.
++ * 1 => This map request contains the machine addess of the PTE to update.
++ */
++#define _GNTMAP_contains_pte (4)
++#define GNTMAP_contains_pte (1<<_GNTMAP_contains_pte)
++
++/*
++ * Values for error status returns. All errors are -ve.
++ */
++#define GNTST_okay (0) /* Normal return. */
++#define GNTST_general_error (-1) /* General undefined error. */
++#define GNTST_bad_domain (-2) /* Unrecognsed domain id. */
++#define GNTST_bad_gntref (-3) /* Unrecognised or inappropriate gntref. */
++#define GNTST_bad_handle (-4) /* Unrecognised or inappropriate handle. */
++#define GNTST_bad_virt_addr (-5) /* Inappropriate virtual address to map. */
++#define GNTST_bad_dev_addr (-6) /* Inappropriate device address to unmap.*/
++#define GNTST_no_device_space (-7) /* Out of space in I/O MMU. */
++#define GNTST_permission_denied (-8) /* Not enough privilege for operation. */
++#define GNTST_bad_page (-9) /* Specified page was invalid for op. */
++#define GNTST_bad_copy_arg (-10) /* copy arguments cross page boundary */
++
++#define GNTTABOP_error_msgs { \
++ "okay", \
++ "undefined error", \
++ "unrecognised domain id", \
++ "invalid grant reference", \
++ "invalid mapping handle", \
++ "invalid virtual address", \
++ "invalid device address", \
++ "no spare translation slot in the I/O MMU", \
++ "permission denied", \
++ "bad page", \
++ "copy arguments cross page boundary" \
++}
++
++#endif /* __XEN_PUBLIC_GRANT_TABLE_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/xen/interface/hvm/e820.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,47 @@
++
++/*
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ */
++
++#ifndef __XEN_PUBLIC_HVM_E820_H__
++#define __XEN_PUBLIC_HVM_E820_H__
++
++/* PC BIOS standard E820 types. */
++#define E820_RAM 1
++#define E820_RESERVED 2
++#define E820_ACPI 3
++#define E820_NVS 4
++
++/* E820 location in HVM virtual address space. */
++#define E820_MAP_PAGE 0x00090000
++#define E820_MAP_NR_OFFSET 0x000001E8
++#define E820_MAP_OFFSET 0x000002D0
++
++struct e820entry {
++ uint64_t addr;
++ uint64_t size;
++ uint32_t type;
++} __attribute__((packed));
++
++#define HVM_BELOW_4G_RAM_END 0xF0000000
++
++#define HVM_BELOW_4G_MMIO_START HVM_BELOW_4G_RAM_END
++#define HVM_BELOW_4G_MMIO_LENGTH ((1ULL << 32) - HVM_BELOW_4G_MMIO_START)
++
++#endif /* __XEN_PUBLIC_HVM_E820_H__ */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/xen/interface/hvm/hvm_info_table.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,41 @@
++/******************************************************************************
++ * hvm/hvm_info_table.h
++ *
++ * HVM parameter and information table, written into guest memory map.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ */
++
++#ifndef __XEN_PUBLIC_HVM_HVM_INFO_TABLE_H__
++#define __XEN_PUBLIC_HVM_HVM_INFO_TABLE_H__
++
++#define HVM_INFO_PFN 0x09F
++#define HVM_INFO_OFFSET 0x800
++#define HVM_INFO_PADDR ((HVM_INFO_PFN << 12) + HVM_INFO_OFFSET)
++
++struct hvm_info_table {
++ char signature[8]; /* "HVM INFO" */
++ uint32_t length;
++ uint8_t checksum;
++ uint8_t acpi_enabled;
++ uint8_t apic_mode;
++ uint32_t nr_vcpus;
++};
++
++#endif /* __XEN_PUBLIC_HVM_HVM_INFO_TABLE_H__ */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/xen/interface/hvm/hvm_op.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,73 @@
++/*
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ */
++
++#ifndef __XEN_PUBLIC_HVM_HVM_OP_H__
++#define __XEN_PUBLIC_HVM_HVM_OP_H__
++
++/* Get/set subcommands: extra argument == pointer to xen_hvm_param struct. */
++#define HVMOP_set_param 0
++#define HVMOP_get_param 1
++struct xen_hvm_param {
++ domid_t domid; /* IN */
++ uint32_t index; /* IN */
++ uint64_t value; /* IN/OUT */
++};
++typedef struct xen_hvm_param xen_hvm_param_t;
++DEFINE_XEN_GUEST_HANDLE(xen_hvm_param_t);
++
++/* Set the logical level of one of a domain's PCI INTx wires. */
++#define HVMOP_set_pci_intx_level 2
++struct xen_hvm_set_pci_intx_level {
++ /* Domain to be updated. */
++ domid_t domid;
++ /* PCI INTx identification in PCI topology (domain:bus:device:intx). */
++ uint8_t domain, bus, device, intx;
++ /* Assertion level (0 = unasserted, 1 = asserted). */
++ uint8_t level;
++};
++typedef struct xen_hvm_set_pci_intx_level xen_hvm_set_pci_intx_level_t;
++DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_pci_intx_level_t);
++
++/* Set the logical level of one of a domain's ISA IRQ wires. */
++#define HVMOP_set_isa_irq_level 3
++struct xen_hvm_set_isa_irq_level {
++ /* Domain to be updated. */
++ domid_t domid;
++ /* ISA device identification, by ISA IRQ (0-15). */
++ uint8_t isa_irq;
++ /* Assertion level (0 = unasserted, 1 = asserted). */
++ uint8_t level;
++};
++typedef struct xen_hvm_set_isa_irq_level xen_hvm_set_isa_irq_level_t;
++DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_isa_irq_level_t);
++
++#define HVMOP_set_pci_link_route 4
++struct xen_hvm_set_pci_link_route {
++ /* Domain to be updated. */
++ domid_t domid;
++ /* PCI link identifier (0-3). */
++ uint8_t link;
++ /* ISA IRQ (1-15), or 0 (disable link). */
++ uint8_t isa_irq;
++};
++typedef struct xen_hvm_set_pci_link_route xen_hvm_set_pci_link_route_t;
++DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_pci_link_route_t);
++
++#endif /* __XEN_PUBLIC_HVM_HVM_OP_H__ */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/xen/interface/hvm/ioreq.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,122 @@
++/*
++ * ioreq.h: I/O request definitions for device models
++ * Copyright (c) 2004, Intel Corporation.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ */
++
++#ifndef _IOREQ_H_
++#define _IOREQ_H_
++
++#define IOREQ_READ 1
++#define IOREQ_WRITE 0
++
++#define STATE_IOREQ_NONE 0
++#define STATE_IOREQ_READY 1
++#define STATE_IOREQ_INPROCESS 2
++#define STATE_IORESP_READY 3
++
++#define IOREQ_TYPE_PIO 0 /* pio */
++#define IOREQ_TYPE_COPY 1 /* mmio ops */
++#define IOREQ_TYPE_AND 2
++#define IOREQ_TYPE_OR 3
++#define IOREQ_TYPE_XOR 4
++#define IOREQ_TYPE_XCHG 5
++#define IOREQ_TYPE_ADD 6
++#define IOREQ_TYPE_TIMEOFFSET 7
++#define IOREQ_TYPE_INVALIDATE 8 /* mapcache */
++#define IOREQ_TYPE_SUB 9
++
++/*
++ * VMExit dispatcher should cooperate with instruction decoder to
++ * prepare this structure and notify service OS and DM by sending
++ * virq
++ */
++struct ioreq {
++ uint64_t addr; /* physical address */
++ uint64_t size; /* size in bytes */
++ uint64_t count; /* for rep prefixes */
++ uint64_t data; /* data (or paddr of data) */
++ uint8_t state:4;
++ uint8_t data_is_ptr:1; /* if 1, data above is the guest paddr
++ * of the real data to use. */
++ uint8_t dir:1; /* 1=read, 0=write */
++ uint8_t df:1;
++ uint8_t type; /* I/O type */
++ uint8_t _pad0[6];
++ uint64_t io_count; /* How many IO done on a vcpu */
++};
++typedef struct ioreq ioreq_t;
++
++struct vcpu_iodata {
++ struct ioreq vp_ioreq;
++ /* Event channel port, used for notifications to/from the device model. */
++ uint32_t vp_eport;
++ uint32_t _pad0;
++};
++typedef struct vcpu_iodata vcpu_iodata_t;
++
++struct shared_iopage {
++ struct vcpu_iodata vcpu_iodata[1];
++};
++typedef struct shared_iopage shared_iopage_t;
++
++#define IOREQ_BUFFER_SLOT_NUM 80
++struct buffered_iopage {
++ unsigned int read_pointer;
++ unsigned int write_pointer;
++ ioreq_t ioreq[IOREQ_BUFFER_SLOT_NUM];
++}; /* NB. Size of this structure must be no greater than one page. */
++typedef struct buffered_iopage buffered_iopage_t;
++
++#if defined(__ia64__)
++struct pio_buffer {
++ uint32_t page_offset;
++ uint32_t pointer;
++ uint32_t data_end;
++ uint32_t buf_size;
++ void *opaque;
++};
++
++#define PIO_BUFFER_IDE_PRIMARY 0 /* I/O port = 0x1F0 */
++#define PIO_BUFFER_IDE_SECONDARY 1 /* I/O port = 0x170 */
++#define PIO_BUFFER_ENTRY_NUM 2
++struct buffered_piopage {
++ struct pio_buffer pio[PIO_BUFFER_ENTRY_NUM];
++ uint8_t buffer[1];
++};
++#endif /* defined(__ia64__) */
++
++#if defined(__i386__) || defined(__x86_64__)
++#define ACPI_PM1A_EVT_BLK_ADDRESS 0x0000000000001f40
++#define ACPI_PM1A_CNT_BLK_ADDRESS (ACPI_PM1A_EVT_BLK_ADDRESS + 0x04)
++#define ACPI_PM_TMR_BLK_ADDRESS (ACPI_PM1A_EVT_BLK_ADDRESS + 0x08)
++#endif /* defined(__i386__) || defined(__x86_64__) */
++
++#endif /* _IOREQ_H_ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/xen/interface/hvm/params.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,55 @@
++/*
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ */
++
++#ifndef __XEN_PUBLIC_HVM_PARAMS_H__
++#define __XEN_PUBLIC_HVM_PARAMS_H__
++
++#include "hvm_op.h"
++
++/*
++ * Parameter space for HVMOP_{set,get}_param.
++ */
++
++/*
++ * How should CPU0 event-channel notifications be delivered?
++ * val[63:56] == 0: val[55:0] is a delivery GSI (Global System Interrupt).
++ * val[63:56] == 1: val[55:0] is a delivery PCI INTx line, as follows:
++ * Domain = val[47:32], Bus = val[31:16],
++ * DevFn = val[15: 8], IntX = val[ 1: 0]
++ * If val == 0 then CPU0 event-channel notifications are not delivered.
++ */
++#define HVM_PARAM_CALLBACK_IRQ 0
++
++/*
++ * These are not used by Xen. They are here for convenience of HVM-guest
++ * xenbus implementations.
++ */
++#define HVM_PARAM_STORE_PFN 1
++#define HVM_PARAM_STORE_EVTCHN 2
++
++#define HVM_PARAM_PAE_ENABLED 4
++
++#define HVM_PARAM_IOREQ_PFN 5
++
++#define HVM_PARAM_BUFIOREQ_PFN 6
++
++#define HVM_NR_PARAMS 7
++
++#endif /* __XEN_PUBLIC_HVM_PARAMS_H__ */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/xen/interface/hvm/save.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,462 @@
++/*
++ * hvm/save.h
++ *
++ * Structure definitions for HVM state that is held by Xen and must
++ * be saved along with the domain's memory and device-model state.
++ *
++ *
++ * Copyright (c) 2007 XenSource Ltd.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ */
++
++#ifndef __XEN_PUBLIC_HVM_SAVE_H__
++#define __XEN_PUBLIC_HVM_SAVE_H__
++
++/*
++ * Structures in this header *must* have the same layout in 32bit
++ * and 64bit environments: this means that all fields must be explicitly
++ * sized types and aligned to their sizes, and the structs must be
++ * a multiple of eight bytes long.
++ *
++ * Only the state necessary for saving and restoring (i.e. fields
++ * that are analogous to actual hardware state) should go in this file.
++ * Internal mechanisms should be kept in Xen-private headers.
++ */
++
++/*
++ * Each entry is preceded by a descriptor giving its type and length
++ */
++struct hvm_save_descriptor {
++ uint16_t typecode; /* Used to demux the various types below */
++ uint16_t instance; /* Further demux within a type */
++ uint32_t length; /* In bytes, *not* including this descriptor */
++};
++
++
++/*
++ * Each entry has a datatype associated with it: for example, the CPU state
++ * is saved as a HVM_SAVE_TYPE(CPU), which has HVM_SAVE_LENGTH(CPU),
++ * and is identified by a descriptor with typecode HVM_SAVE_CODE(CPU).
++ * DECLARE_HVM_SAVE_TYPE binds these things together with some type-system
++ * ugliness.
++ */
++
++#define DECLARE_HVM_SAVE_TYPE(_x, _code, _type) \
++ struct __HVM_SAVE_TYPE_##_x { _type t; char c[_code]; }
++
++#define HVM_SAVE_TYPE(_x) typeof (((struct __HVM_SAVE_TYPE_##_x *)(0))->t)
++#define HVM_SAVE_LENGTH(_x) (sizeof (HVM_SAVE_TYPE(_x)))
++#define HVM_SAVE_CODE(_x) (sizeof (((struct __HVM_SAVE_TYPE_##_x *)(0))->c))
++
++
++/*
++ * Save/restore header: general info about the save file.
++ */
++
++#define HVM_FILE_MAGIC 0x54381286
++#define HVM_FILE_VERSION 0x00000001
++
++struct hvm_save_header {
++ uint32_t magic; /* Must be HVM_FILE_MAGIC */
++ uint32_t version; /* File format version */
++ uint64_t changeset; /* Version of Xen that saved this file */
++ uint32_t cpuid; /* CPUID[0x01][%eax] on the saving machine */
++ uint32_t pad0;
++};
++
++DECLARE_HVM_SAVE_TYPE(HEADER, 1, struct hvm_save_header);
++
++
++/*
++ * Processor
++ */
++
++struct hvm_hw_cpu {
++ uint8_t fpu_regs[512];
++
++ uint64_t rax;
++ uint64_t rbx;
++ uint64_t rcx;
++ uint64_t rdx;
++ uint64_t rbp;
++ uint64_t rsi;
++ uint64_t rdi;
++ uint64_t rsp;
++ uint64_t r8;
++ uint64_t r9;
++ uint64_t r10;
++ uint64_t r11;
++ uint64_t r12;
++ uint64_t r13;
++ uint64_t r14;
++ uint64_t r15;
++
++ uint64_t rip;
++ uint64_t rflags;
++
++ uint64_t cr0;
++ uint64_t cr2;
++ uint64_t cr3;
++ uint64_t cr4;
++
++ uint64_t dr0;
++ uint64_t dr1;
++ uint64_t dr2;
++ uint64_t dr3;
++ uint64_t dr6;
++ uint64_t dr7;
++
++ uint32_t cs_sel;
++ uint32_t ds_sel;
++ uint32_t es_sel;
++ uint32_t fs_sel;
++ uint32_t gs_sel;
++ uint32_t ss_sel;
++ uint32_t tr_sel;
++ uint32_t ldtr_sel;
++
++ uint32_t cs_limit;
++ uint32_t ds_limit;
++ uint32_t es_limit;
++ uint32_t fs_limit;
++ uint32_t gs_limit;
++ uint32_t ss_limit;
++ uint32_t tr_limit;
++ uint32_t ldtr_limit;
++ uint32_t idtr_limit;
++ uint32_t gdtr_limit;
++
++ uint64_t cs_base;
++ uint64_t ds_base;
++ uint64_t es_base;
++ uint64_t fs_base;
++ uint64_t gs_base;
++ uint64_t ss_base;
++ uint64_t tr_base;
++ uint64_t ldtr_base;
++ uint64_t idtr_base;
++ uint64_t gdtr_base;
++
++ uint32_t cs_arbytes;
++ uint32_t ds_arbytes;
++ uint32_t es_arbytes;
++ uint32_t fs_arbytes;
++ uint32_t gs_arbytes;
++ uint32_t ss_arbytes;
++ uint32_t tr_arbytes;
++ uint32_t ldtr_arbytes;
++
++ uint32_t sysenter_cs;
++ uint32_t padding0;
++
++ uint64_t sysenter_esp;
++ uint64_t sysenter_eip;
++
++ /* msr for em64t */
++ uint64_t shadow_gs;
++
++ /* msr content saved/restored. */
++ uint64_t msr_flags;
++ uint64_t msr_lstar;
++ uint64_t msr_star;
++ uint64_t msr_cstar;
++ uint64_t msr_syscall_mask;
++ uint64_t msr_efer;
++
++ /* guest's idea of what rdtsc() would return */
++ uint64_t tsc;
++
++ /* pending event, if any */
++ union {
++ uint32_t pending_event;
++ struct {
++ uint8_t pending_vector:8;
++ uint8_t pending_type:3;
++ uint8_t pending_error_valid:1;
++ uint32_t pending_reserved:19;
++ uint8_t pending_valid:1;
++ };
++ };
++ /* error code for pending event */
++ uint32_t error_code;
++};
++
++DECLARE_HVM_SAVE_TYPE(CPU, 2, struct hvm_hw_cpu);
++
++
++/*
++ * PIC
++ */
++
++struct hvm_hw_vpic {
++ /* IR line bitmasks. */
++ uint8_t irr;
++ uint8_t imr;
++ uint8_t isr;
++
++ /* Line IRx maps to IRQ irq_base+x */
++ uint8_t irq_base;
++
++ /*
++ * Where are we in ICW2-4 initialisation (0 means no init in progress)?
++ * Bits 0-1 (=x): Next write at A=1 sets ICW(x+1).
++ * Bit 2: ICW1.IC4 (1 == ICW4 included in init sequence)
++ * Bit 3: ICW1.SNGL (0 == ICW3 included in init sequence)
++ */
++ uint8_t init_state:4;
++
++ /* IR line with highest priority. */
++ uint8_t priority_add:4;
++
++ /* Reads from A=0 obtain ISR or IRR? */
++ uint8_t readsel_isr:1;
++
++ /* Reads perform a polling read? */
++ uint8_t poll:1;
++
++ /* Automatically clear IRQs from the ISR during INTA? */
++ uint8_t auto_eoi:1;
++
++ /* Automatically rotate IRQ priorities during AEOI? */
++ uint8_t rotate_on_auto_eoi:1;
++
++ /* Exclude slave inputs when considering in-service IRQs? */
++ uint8_t special_fully_nested_mode:1;
++
++ /* Special mask mode excludes masked IRs from AEOI and priority checks. */
++ uint8_t special_mask_mode:1;
++
++ /* Is this a master PIC or slave PIC? (NB. This is not programmable.) */
++ uint8_t is_master:1;
++
++ /* Edge/trigger selection. */
++ uint8_t elcr;
++
++ /* Virtual INT output. */
++ uint8_t int_output;
++};
++
++DECLARE_HVM_SAVE_TYPE(PIC, 3, struct hvm_hw_vpic);
++
++
++/*
++ * IO-APIC
++ */
++
++#ifdef __ia64__
++#define VIOAPIC_IS_IOSAPIC 1
++#define VIOAPIC_NUM_PINS 24
++#else
++#define VIOAPIC_NUM_PINS 48 /* 16 ISA IRQs, 32 non-legacy PCI IRQS. */
++#endif
++
++struct hvm_hw_vioapic {
++ uint64_t base_address;
++ uint32_t ioregsel;
++ uint32_t id;
++ union vioapic_redir_entry
++ {
++ uint64_t bits;
++ struct {
++ uint8_t vector;
++ uint8_t delivery_mode:3;
++ uint8_t dest_mode:1;
++ uint8_t delivery_status:1;
++ uint8_t polarity:1;
++ uint8_t remote_irr:1;
++ uint8_t trig_mode:1;
++ uint8_t mask:1;
++ uint8_t reserve:7;
++#if !VIOAPIC_IS_IOSAPIC
++ uint8_t reserved[4];
++ uint8_t dest_id;
++#else
++ uint8_t reserved[3];
++ uint16_t dest_id;
++#endif
++ } fields;
++ } redirtbl[VIOAPIC_NUM_PINS];
++};
++
++DECLARE_HVM_SAVE_TYPE(IOAPIC, 4, struct hvm_hw_vioapic);
++
++
++/*
++ * LAPIC
++ */
++
++struct hvm_hw_lapic {
++ uint64_t apic_base_msr;
++ uint32_t disabled; /* VLAPIC_xx_DISABLED */
++ uint32_t timer_divisor;
++};
++
++DECLARE_HVM_SAVE_TYPE(LAPIC, 5, struct hvm_hw_lapic);
++
++struct hvm_hw_lapic_regs {
++ /* A 4k page of register state */
++ uint8_t data[0x400];
++};
++
++DECLARE_HVM_SAVE_TYPE(LAPIC_REGS, 6, struct hvm_hw_lapic_regs);
++
++
++/*
++ * IRQs
++ */
++
++struct hvm_hw_pci_irqs {
++ /*
++ * Virtual interrupt wires for a single PCI bus.
++ * Indexed by: device*4 + INTx#.
++ */
++ union {
++ DECLARE_BITMAP(i, 32*4);
++ uint64_t pad[2];
++ };
++};
++
++DECLARE_HVM_SAVE_TYPE(PCI_IRQ, 7, struct hvm_hw_pci_irqs);
++
++struct hvm_hw_isa_irqs {
++ /*
++ * Virtual interrupt wires for ISA devices.
++ * Indexed by ISA IRQ (assumes no ISA-device IRQ sharing).
++ */
++ union {
++ DECLARE_BITMAP(i, 16);
++ uint64_t pad[1];
++ };
++};
++
++DECLARE_HVM_SAVE_TYPE(ISA_IRQ, 8, struct hvm_hw_isa_irqs);
++
++struct hvm_hw_pci_link {
++ /*
++ * PCI-ISA interrupt router.
++ * Each PCI <device:INTx#> is 'wire-ORed' into one of four links using
++ * the traditional 'barber's pole' mapping ((device + INTx#) & 3).
++ * The router provides a programmable mapping from each link to a GSI.
++ */
++ uint8_t route[4];
++ uint8_t pad0[4];
++};
++
++DECLARE_HVM_SAVE_TYPE(PCI_LINK, 9, struct hvm_hw_pci_link);
++
++/*
++ * PIT
++ */
++
++struct hvm_hw_pit {
++ struct hvm_hw_pit_channel {
++ uint32_t count; /* can be 65536 */
++ uint16_t latched_count;
++ uint8_t count_latched;
++ uint8_t status_latched;
++ uint8_t status;
++ uint8_t read_state;
++ uint8_t write_state;
++ uint8_t write_latch;
++ uint8_t rw_mode;
++ uint8_t mode;
++ uint8_t bcd; /* not supported */
++ uint8_t gate; /* timer start */
++ } channels[3]; /* 3 x 16 bytes */
++ uint32_t speaker_data_on;
++ uint32_t pad0;
++};
++
++DECLARE_HVM_SAVE_TYPE(PIT, 10, struct hvm_hw_pit);
++
++
++/*
++ * RTC
++ */
++
++#define RTC_CMOS_SIZE 14
++struct hvm_hw_rtc {
++ /* CMOS bytes */
++ uint8_t cmos_data[RTC_CMOS_SIZE];
++ /* Index register for 2-part operations */
++ uint8_t cmos_index;
++ uint8_t pad0;
++};
++
++DECLARE_HVM_SAVE_TYPE(RTC, 11, struct hvm_hw_rtc);
++
++
++/*
++ * HPET
++ */
++
++#define HPET_TIMER_NUM 3 /* 3 timers supported now */
++struct hvm_hw_hpet {
++ /* Memory-mapped, software visible registers */
++ uint64_t capability; /* capabilities */
++ uint64_t res0; /* reserved */
++ uint64_t config; /* configuration */
++ uint64_t res1; /* reserved */
++ uint64_t isr; /* interrupt status reg */
++ uint64_t res2[25]; /* reserved */
++ uint64_t mc64; /* main counter */
++ uint64_t res3; /* reserved */
++ struct { /* timers */
++ uint64_t config; /* configuration/cap */
++ uint64_t cmp; /* comparator */
++ uint64_t fsb; /* FSB route, not supported now */
++ uint64_t res4; /* reserved */
++ } timers[HPET_TIMER_NUM];
++ uint64_t res5[4*(24-HPET_TIMER_NUM)]; /* reserved, up to 0x3ff */
++
++ /* Hidden register state */
++ uint64_t period[HPET_TIMER_NUM]; /* Last value written to comparator */
++};
++
++DECLARE_HVM_SAVE_TYPE(HPET, 12, struct hvm_hw_hpet);
++
++
++/*
++ * PM timer
++ */
++
++struct hvm_hw_pmtimer {
++ uint32_t tmr_val; /* PM_TMR_BLK.TMR_VAL: 32bit free-running counter */
++ uint16_t pm1a_sts; /* PM1a_EVT_BLK.PM1a_STS: status register */
++ uint16_t pm1a_en; /* PM1a_EVT_BLK.PM1a_EN: enable register */
++};
++
++DECLARE_HVM_SAVE_TYPE(PMTIMER, 13, struct hvm_hw_pmtimer);
++
++/*
++ * Largest type-code in use
++ */
++#define HVM_SAVE_CODE_MAX 13
++
++
++/*
++ * The series of save records is teminated by a zero-type, zero-length
++ * descriptor.
++ */
++
++struct hvm_save_end {};
++DECLARE_HVM_SAVE_TYPE(END, 0, struct hvm_save_end);
++
++#endif /* __XEN_PUBLIC_HVM_SAVE_H__ */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/xen/interface/hvm/vmx_assist.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,116 @@
++/*
++ * vmx_assist.h: Context definitions for the VMXASSIST world switch.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Leendert van Doorn, leendert@watson.ibm.com
++ * Copyright (c) 2005, International Business Machines Corporation.
++ */
++
++#ifndef _VMX_ASSIST_H_
++#define _VMX_ASSIST_H_
++
++#define VMXASSIST_BASE 0xD0000
++#define VMXASSIST_MAGIC 0x17101966
++#define VMXASSIST_MAGIC_OFFSET (VMXASSIST_BASE+8)
++
++#define VMXASSIST_NEW_CONTEXT (VMXASSIST_BASE + 12)
++#define VMXASSIST_OLD_CONTEXT (VMXASSIST_NEW_CONTEXT + 4)
++
++#ifndef __ASSEMBLY__
++
++union vmcs_arbytes {
++ struct arbyte_fields {
++ unsigned int seg_type : 4,
++ s : 1,
++ dpl : 2,
++ p : 1,
++ reserved0 : 4,
++ avl : 1,
++ reserved1 : 1,
++ default_ops_size: 1,
++ g : 1,
++ null_bit : 1,
++ reserved2 : 15;
++ } fields;
++ unsigned int bytes;
++};
++
++/*
++ * World switch state
++ */
++struct vmx_assist_context {
++ uint32_t eip; /* execution pointer */
++ uint32_t esp; /* stack pointer */
++ uint32_t eflags; /* flags register */
++ uint32_t cr0;
++ uint32_t cr3; /* page table directory */
++ uint32_t cr4;
++ uint32_t idtr_limit; /* idt */
++ uint32_t idtr_base;
++ uint32_t gdtr_limit; /* gdt */
++ uint32_t gdtr_base;
++ uint32_t cs_sel; /* cs selector */
++ uint32_t cs_limit;
++ uint32_t cs_base;
++ union vmcs_arbytes cs_arbytes;
++ uint32_t ds_sel; /* ds selector */
++ uint32_t ds_limit;
++ uint32_t ds_base;
++ union vmcs_arbytes ds_arbytes;
++ uint32_t es_sel; /* es selector */
++ uint32_t es_limit;
++ uint32_t es_base;
++ union vmcs_arbytes es_arbytes;
++ uint32_t ss_sel; /* ss selector */
++ uint32_t ss_limit;
++ uint32_t ss_base;
++ union vmcs_arbytes ss_arbytes;
++ uint32_t fs_sel; /* fs selector */
++ uint32_t fs_limit;
++ uint32_t fs_base;
++ union vmcs_arbytes fs_arbytes;
++ uint32_t gs_sel; /* gs selector */
++ uint32_t gs_limit;
++ uint32_t gs_base;
++ union vmcs_arbytes gs_arbytes;
++ uint32_t tr_sel; /* task selector */
++ uint32_t tr_limit;
++ uint32_t tr_base;
++ union vmcs_arbytes tr_arbytes;
++ uint32_t ldtr_sel; /* ldtr selector */
++ uint32_t ldtr_limit;
++ uint32_t ldtr_base;
++ union vmcs_arbytes ldtr_arbytes;
++};
++typedef struct vmx_assist_context vmx_assist_context_t;
++
++#endif /* __ASSEMBLY__ */
++
++#endif /* _VMX_ASSIST_H_ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/xen/interface/io/blkif.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,128 @@
++/******************************************************************************
++ * blkif.h
++ *
++ * Unified block-device I/O interface for Xen guest OSes.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (c) 2003-2004, Keir Fraser
++ */
++
++#ifndef __XEN_PUBLIC_IO_BLKIF_H__
++#define __XEN_PUBLIC_IO_BLKIF_H__
++
++#include "ring.h"
++#include "../grant_table.h"
++
++/*
++ * Front->back notifications: When enqueuing a new request, sending a
++ * notification can be made conditional on req_event (i.e., the generic
++ * hold-off mechanism provided by the ring macros). Backends must set
++ * req_event appropriately (e.g., using RING_FINAL_CHECK_FOR_REQUESTS()).
++ *
++ * Back->front notifications: When enqueuing a new response, sending a
++ * notification can be made conditional on rsp_event (i.e., the generic
++ * hold-off mechanism provided by the ring macros). Frontends must set
++ * rsp_event appropriately (e.g., using RING_FINAL_CHECK_FOR_RESPONSES()).
++ */
++
++#ifndef blkif_vdev_t
++#define blkif_vdev_t uint16_t
++#endif
++#define blkif_sector_t uint64_t
++
++/*
++ * REQUEST CODES.
++ */
++#define BLKIF_OP_READ 0
++#define BLKIF_OP_WRITE 1
++/*
++ * Recognised only if "feature-barrier" is present in backend xenbus info.
++ * The "feature_barrier" node contains a boolean indicating whether barrier
++ * requests are likely to succeed or fail. Either way, a barrier request
++ * may fail at any time with BLKIF_RSP_EOPNOTSUPP if it is unsupported by
++ * the underlying block-device hardware. The boolean simply indicates whether
++ * or not it is worthwhile for the frontend to attempt barrier requests.
++ * If a backend does not recognise BLKIF_OP_WRITE_BARRIER, it should *not*
++ * create the "feature-barrier" node!
++ */
++#define BLKIF_OP_WRITE_BARRIER 2
++
++/*
++ * Maximum scatter/gather segments per request.
++ * This is carefully chosen so that sizeof(blkif_ring_t) <= PAGE_SIZE.
++ * NB. This could be 12 if the ring indexes weren't stored in the same page.
++ */
++#define BLKIF_MAX_SEGMENTS_PER_REQUEST 11
++
++struct blkif_request_segment {
++ grant_ref_t gref; /* reference to I/O buffer frame */
++ /* @first_sect: first sector in frame to transfer (inclusive). */
++ /* @last_sect: last sector in frame to transfer (inclusive). */
++ uint8_t first_sect, last_sect;
++};
++
++struct blkif_request {
++ uint8_t operation; /* BLKIF_OP_??? */
++ uint8_t nr_segments; /* number of segments */
++ blkif_vdev_t handle; /* only for read/write requests */
++ uint64_t id; /* private guest value, echoed in resp */
++ blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
++ struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
++};
++typedef struct blkif_request blkif_request_t;
++
++struct blkif_response {
++ uint64_t id; /* copied from request */
++ uint8_t operation; /* copied from request */
++ int16_t status; /* BLKIF_RSP_??? */
++};
++typedef struct blkif_response blkif_response_t;
++
++/*
++ * STATUS RETURN CODES.
++ */
++ /* Operation not supported (only happens on barrier writes). */
++#define BLKIF_RSP_EOPNOTSUPP -2
++ /* Operation failed for some unspecified reason (-EIO). */
++#define BLKIF_RSP_ERROR -1
++ /* Operation completed successfully. */
++#define BLKIF_RSP_OKAY 0
++
++/*
++ * Generate blkif ring structures and types.
++ */
++
++DEFINE_RING_TYPES(blkif, struct blkif_request, struct blkif_response);
++
++#define VDISK_CDROM 0x1
++#define VDISK_REMOVABLE 0x2
++#define VDISK_READONLY 0x4
++
++#endif /* __XEN_PUBLIC_IO_BLKIF_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/xen/interface/io/console.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,51 @@
++/******************************************************************************
++ * console.h
++ *
++ * Console I/O interface for Xen guest OSes.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (c) 2005, Keir Fraser
++ */
++
++#ifndef __XEN_PUBLIC_IO_CONSOLE_H__
++#define __XEN_PUBLIC_IO_CONSOLE_H__
++
++typedef uint32_t XENCONS_RING_IDX;
++
++#define MASK_XENCONS_IDX(idx, ring) ((idx) & (sizeof(ring)-1))
++
++struct xencons_interface {
++ char in[1024];
++ char out[2048];
++ XENCONS_RING_IDX in_cons, in_prod;
++ XENCONS_RING_IDX out_cons, out_prod;
++};
++
++#endif /* __XEN_PUBLIC_IO_CONSOLE_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/xen/interface/io/fbif.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,138 @@
++/*
++ * fbif.h -- Xen virtual frame buffer device
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (C) 2005 Anthony Liguori <aliguori@us.ibm.com>
++ * Copyright (C) 2006 Red Hat, Inc., Markus Armbruster <armbru@redhat.com>
++ */
++
++#ifndef __XEN_PUBLIC_IO_FBIF_H__
++#define __XEN_PUBLIC_IO_FBIF_H__
++
++/* Out events (frontend -> backend) */
++
++/*
++ * Out events may be sent only when requested by backend, and receipt
++ * of an unknown out event is an error.
++ */
++
++/* Event type 1 currently not used */
++/*
++ * Framebuffer update notification event
++ * Capable frontend sets feature-update in xenstore.
++ * Backend requests it by setting request-update in xenstore.
++ */
++#define XENFB_TYPE_UPDATE 2
++
++struct xenfb_update
++{
++ uint8_t type; /* XENFB_TYPE_UPDATE */
++ int32_t x; /* source x */
++ int32_t y; /* source y */
++ int32_t width; /* rect width */
++ int32_t height; /* rect height */
++};
++
++#define XENFB_OUT_EVENT_SIZE 40
++
++union xenfb_out_event
++{
++ uint8_t type;
++ struct xenfb_update update;
++ char pad[XENFB_OUT_EVENT_SIZE];
++};
++
++/* In events (backend -> frontend) */
++
++/*
++ * Frontends should ignore unknown in events.
++ * No in events currently defined.
++ */
++
++#define XENFB_IN_EVENT_SIZE 40
++
++union xenfb_in_event
++{
++ uint8_t type;
++ char pad[XENFB_IN_EVENT_SIZE];
++};
++
++/* shared page */
++
++#define XENFB_IN_RING_SIZE 1024
++#define XENFB_IN_RING_LEN (XENFB_IN_RING_SIZE / XENFB_IN_EVENT_SIZE)
++#define XENFB_IN_RING_OFFS 1024
++#define XENFB_IN_RING(page) \
++ ((union xenfb_in_event *)((char *)(page) + XENFB_IN_RING_OFFS))
++#define XENFB_IN_RING_REF(page, idx) \
++ (XENFB_IN_RING((page))[(idx) % XENFB_IN_RING_LEN])
++
++#define XENFB_OUT_RING_SIZE 2048
++#define XENFB_OUT_RING_LEN (XENFB_OUT_RING_SIZE / XENFB_OUT_EVENT_SIZE)
++#define XENFB_OUT_RING_OFFS (XENFB_IN_RING_OFFS + XENFB_IN_RING_SIZE)
++#define XENFB_OUT_RING(page) \
++ ((union xenfb_out_event *)((char *)(page) + XENFB_OUT_RING_OFFS))
++#define XENFB_OUT_RING_REF(page, idx) \
++ (XENFB_OUT_RING((page))[(idx) % XENFB_OUT_RING_LEN])
++
++struct xenfb_page
++{
++ uint32_t in_cons, in_prod;
++ uint32_t out_cons, out_prod;
++
++ int32_t width; /* the width of the framebuffer (in pixels) */
++ int32_t height; /* the height of the framebuffer (in pixels) */
++ uint32_t line_length; /* the length of a row of pixels (in bytes) */
++ uint32_t mem_length; /* the length of the framebuffer (in bytes) */
++ uint8_t depth; /* the depth of a pixel (in bits) */
++
++ /*
++ * Framebuffer page directory
++ *
++ * Each directory page holds PAGE_SIZE / sizeof(*pd)
++ * framebuffer pages, and can thus map up to PAGE_SIZE *
++ * PAGE_SIZE / sizeof(*pd) bytes. With PAGE_SIZE == 4096 and
++ * sizeof(unsigned long) == 4, that's 4 Megs. Two directory
++ * pages should be enough for a while.
++ */
++ unsigned long pd[2];
++};
++
++/*
++ * Wart: xenkbd needs to know resolution. Put it here until a better
++ * solution is found, but don't leak it to the backend.
++ */
++#ifdef __KERNEL__
++#define XENFB_WIDTH 800
++#define XENFB_HEIGHT 600
++#define XENFB_DEPTH 32
++#endif
++
++#endif
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/xen/interface/io/kbdif.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,130 @@
++/*
++ * kbdif.h -- Xen virtual keyboard/mouse
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (C) 2005 Anthony Liguori <aliguori@us.ibm.com>
++ * Copyright (C) 2006 Red Hat, Inc., Markus Armbruster <armbru@redhat.com>
++ */
++
++#ifndef __XEN_PUBLIC_IO_KBDIF_H__
++#define __XEN_PUBLIC_IO_KBDIF_H__
++
++/* In events (backend -> frontend) */
++
++/*
++ * Frontends should ignore unknown in events.
++ */
++
++/* Pointer movement event */
++#define XENKBD_TYPE_MOTION 1
++/* Event type 2 currently not used */
++/* Key event (includes pointer buttons) */
++#define XENKBD_TYPE_KEY 3
++/*
++ * Pointer position event
++ * Capable backend sets feature-abs-pointer in xenstore.
++ * Frontend requests ot instead of XENKBD_TYPE_MOTION by setting
++ * request-abs-update in xenstore.
++ */
++#define XENKBD_TYPE_POS 4
++
++struct xenkbd_motion
++{
++ uint8_t type; /* XENKBD_TYPE_MOTION */
++ int32_t rel_x; /* relative X motion */
++ int32_t rel_y; /* relative Y motion */
++};
++
++struct xenkbd_key
++{
++ uint8_t type; /* XENKBD_TYPE_KEY */
++ uint8_t pressed; /* 1 if pressed; 0 otherwise */
++ uint32_t keycode; /* KEY_* from linux/input.h */
++};
++
++struct xenkbd_position
++{
++ uint8_t type; /* XENKBD_TYPE_POS */
++ int32_t abs_x; /* absolute X position (in FB pixels) */
++ int32_t abs_y; /* absolute Y position (in FB pixels) */
++};
++
++#define XENKBD_IN_EVENT_SIZE 40
++
++union xenkbd_in_event
++{
++ uint8_t type;
++ struct xenkbd_motion motion;
++ struct xenkbd_key key;
++ struct xenkbd_position pos;
++ char pad[XENKBD_IN_EVENT_SIZE];
++};
++
++/* Out events (frontend -> backend) */
++
++/*
++ * Out events may be sent only when requested by backend, and receipt
++ * of an unknown out event is an error.
++ * No out events currently defined.
++ */
++
++#define XENKBD_OUT_EVENT_SIZE 40
++
++union xenkbd_out_event
++{
++ uint8_t type;
++ char pad[XENKBD_OUT_EVENT_SIZE];
++};
++
++/* shared page */
++
++#define XENKBD_IN_RING_SIZE 2048
++#define XENKBD_IN_RING_LEN (XENKBD_IN_RING_SIZE / XENKBD_IN_EVENT_SIZE)
++#define XENKBD_IN_RING_OFFS 1024
++#define XENKBD_IN_RING(page) \
++ ((union xenkbd_in_event *)((char *)(page) + XENKBD_IN_RING_OFFS))
++#define XENKBD_IN_RING_REF(page, idx) \
++ (XENKBD_IN_RING((page))[(idx) % XENKBD_IN_RING_LEN])
++
++#define XENKBD_OUT_RING_SIZE 1024
++#define XENKBD_OUT_RING_LEN (XENKBD_OUT_RING_SIZE / XENKBD_OUT_EVENT_SIZE)
++#define XENKBD_OUT_RING_OFFS (XENKBD_IN_RING_OFFS + XENKBD_IN_RING_SIZE)
++#define XENKBD_OUT_RING(page) \
++ ((union xenkbd_out_event *)((char *)(page) + XENKBD_OUT_RING_OFFS))
++#define XENKBD_OUT_RING_REF(page, idx) \
++ (XENKBD_OUT_RING((page))[(idx) % XENKBD_OUT_RING_LEN])
++
++struct xenkbd_page
++{
++ uint32_t in_cons, in_prod;
++ uint32_t out_cons, out_prod;
++};
++
++#endif
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/xen/interface/io/netif.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,184 @@
++/******************************************************************************
++ * netif.h
++ *
++ * Unified network-device I/O interface for Xen guest OSes.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (c) 2003-2004, Keir Fraser
++ */
++
++#ifndef __XEN_PUBLIC_IO_NETIF_H__
++#define __XEN_PUBLIC_IO_NETIF_H__
++
++#include "ring.h"
++#include "../grant_table.h"
++
++/*
++ * Notifications after enqueuing any type of message should be conditional on
++ * the appropriate req_event or rsp_event field in the shared ring.
++ * If the client sends notification for rx requests then it should specify
++ * feature 'feature-rx-notify' via xenbus. Otherwise the backend will assume
++ * that it cannot safely queue packets (as it may not be kicked to send them).
++ */
++
++/*
++ * This is the 'wire' format for packets:
++ * Request 1: netif_tx_request -- NETTXF_* (any flags)
++ * [Request 2: netif_tx_extra] (only if request 1 has NETTXF_extra_info)
++ * [Request 3: netif_tx_extra] (only if request 2 has XEN_NETIF_EXTRA_MORE)
++ * Request 4: netif_tx_request -- NETTXF_more_data
++ * Request 5: netif_tx_request -- NETTXF_more_data
++ * ...
++ * Request N: netif_tx_request -- 0
++ */
++
++/* Protocol checksum field is blank in the packet (hardware offload)? */
++#define _NETTXF_csum_blank (0)
++#define NETTXF_csum_blank (1U<<_NETTXF_csum_blank)
++
++/* Packet data has been validated against protocol checksum. */
++#define _NETTXF_data_validated (1)
++#define NETTXF_data_validated (1U<<_NETTXF_data_validated)
++
++/* Packet continues in the next request descriptor. */
++#define _NETTXF_more_data (2)
++#define NETTXF_more_data (1U<<_NETTXF_more_data)
++
++/* Packet to be followed by extra descriptor(s). */
++#define _NETTXF_extra_info (3)
++#define NETTXF_extra_info (1U<<_NETTXF_extra_info)
++
++struct netif_tx_request {
++ grant_ref_t gref; /* Reference to buffer page */
++ uint16_t offset; /* Offset within buffer page */
++ uint16_t flags; /* NETTXF_* */
++ uint16_t id; /* Echoed in response message. */
++ uint16_t size; /* Packet size in bytes. */
++};
++typedef struct netif_tx_request netif_tx_request_t;
++
++/* Types of netif_extra_info descriptors. */
++#define XEN_NETIF_EXTRA_TYPE_NONE (0) /* Never used - invalid */
++#define XEN_NETIF_EXTRA_TYPE_GSO (1) /* u.gso */
++#define XEN_NETIF_EXTRA_TYPE_MAX (2)
++
++/* netif_extra_info flags. */
++#define _XEN_NETIF_EXTRA_FLAG_MORE (0)
++#define XEN_NETIF_EXTRA_FLAG_MORE (1U<<_XEN_NETIF_EXTRA_FLAG_MORE)
++
++/* GSO types - only TCPv4 currently supported. */
++#define XEN_NETIF_GSO_TYPE_TCPV4 (1)
++
++/*
++ * This structure needs to fit within both netif_tx_request and
++ * netif_rx_response for compatibility.
++ */
++struct netif_extra_info {
++ uint8_t type; /* XEN_NETIF_EXTRA_TYPE_* */
++ uint8_t flags; /* XEN_NETIF_EXTRA_FLAG_* */
++
++ union {
++ struct {
++ /*
++ * Maximum payload size of each segment. For example, for TCP this
++ * is just the path MSS.
++ */
++ uint16_t size;
++
++ /*
++ * GSO type. This determines the protocol of the packet and any
++ * extra features required to segment the packet properly.
++ */
++ uint8_t type; /* XEN_NETIF_GSO_TYPE_* */
++
++ /* Future expansion. */
++ uint8_t pad;
++
++ /*
++ * GSO features. This specifies any extra GSO features required
++ * to process this packet, such as ECN support for TCPv4.
++ */
++ uint16_t features; /* XEN_NETIF_GSO_FEAT_* */
++ } gso;
++
++ uint16_t pad[3];
++ } u;
++};
++
++struct netif_tx_response {
++ uint16_t id;
++ int16_t status; /* NETIF_RSP_* */
++};
++typedef struct netif_tx_response netif_tx_response_t;
++
++struct netif_rx_request {
++ uint16_t id; /* Echoed in response message. */
++ grant_ref_t gref; /* Reference to incoming granted frame */
++};
++typedef struct netif_rx_request netif_rx_request_t;
++
++/* Packet data has been validated against protocol checksum. */
++#define _NETRXF_data_validated (0)
++#define NETRXF_data_validated (1U<<_NETRXF_data_validated)
++
++/* Protocol checksum field is blank in the packet (hardware offload)? */
++#define _NETRXF_csum_blank (1)
++#define NETRXF_csum_blank (1U<<_NETRXF_csum_blank)
++
++/* Packet continues in the next request descriptor. */
++#define _NETRXF_more_data (2)
++#define NETRXF_more_data (1U<<_NETRXF_more_data)
++
++/* Packet to be followed by extra descriptor(s). */
++#define _NETRXF_extra_info (3)
++#define NETRXF_extra_info (1U<<_NETRXF_extra_info)
++
++struct netif_rx_response {
++ uint16_t id;
++ uint16_t offset; /* Offset in page of start of received packet */
++ uint16_t flags; /* NETRXF_* */
++ int16_t status; /* -ve: BLKIF_RSP_* ; +ve: Rx'ed pkt size. */
++};
++typedef struct netif_rx_response netif_rx_response_t;
++
++/*
++ * Generate netif ring structures and types.
++ */
++
++DEFINE_RING_TYPES(netif_tx, struct netif_tx_request, struct netif_tx_response);
++DEFINE_RING_TYPES(netif_rx, struct netif_rx_request, struct netif_rx_response);
++
++#define NETIF_RSP_DROPPED -2
++#define NETIF_RSP_ERROR -1
++#define NETIF_RSP_OKAY 0
++/* No response: used for auxiliary requests (e.g., netif_tx_extra). */
++#define NETIF_RSP_NULL 1
++
++#endif
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/xen/interface/io/pciif.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,83 @@
++/*
++ * PCI Backend/Frontend Common Data Structures & Macros
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
++ */
++#ifndef __XEN_PCI_COMMON_H__
++#define __XEN_PCI_COMMON_H__
++
++/* Be sure to bump this number if you change this file */
++#define XEN_PCI_MAGIC "7"
++
++/* xen_pci_sharedinfo flags */
++#define _XEN_PCIF_active (0)
++#define XEN_PCIF_active (1<<_XEN_PCI_active)
++
++/* xen_pci_op commands */
++#define XEN_PCI_OP_conf_read (0)
++#define XEN_PCI_OP_conf_write (1)
++
++/* xen_pci_op error numbers */
++#define XEN_PCI_ERR_success (0)
++#define XEN_PCI_ERR_dev_not_found (-1)
++#define XEN_PCI_ERR_invalid_offset (-2)
++#define XEN_PCI_ERR_access_denied (-3)
++#define XEN_PCI_ERR_not_implemented (-4)
++/* XEN_PCI_ERR_op_failed - backend failed to complete the operation */
++#define XEN_PCI_ERR_op_failed (-5)
++
++struct xen_pci_op {
++ /* IN: what action to perform: XEN_PCI_OP_* */
++ uint32_t cmd;
++
++ /* OUT: will contain an error number (if any) from errno.h */
++ int32_t err;
++
++ /* IN: which device to touch */
++ uint32_t domain; /* PCI Domain/Segment */
++ uint32_t bus;
++ uint32_t devfn;
++
++ /* IN: which configuration registers to touch */
++ int32_t offset;
++ int32_t size;
++
++ /* IN/OUT: Contains the result after a READ or the value to WRITE */
++ uint32_t value;
++};
++
++struct xen_pci_sharedinfo {
++ /* flags - XEN_PCIF_* */
++ uint32_t flags;
++ struct xen_pci_op op;
++};
++
++#endif /* __XEN_PCI_COMMON_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/xen/interface/io/protocols.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,21 @@
++#ifndef __XEN_PROTOCOLS_H__
++#define __XEN_PROTOCOLS_H__
++
++#define XEN_IO_PROTO_ABI_X86_32 "x86_32-abi"
++#define XEN_IO_PROTO_ABI_X86_64 "x86_64-abi"
++#define XEN_IO_PROTO_ABI_IA64 "ia64-abi"
++#define XEN_IO_PROTO_ABI_POWERPC64 "powerpc64-abi"
++
++#if defined(__i386__)
++# define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_X86_32
++#elif defined(__x86_64__)
++# define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_X86_64
++#elif defined(__ia64__)
++# define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_IA64
++#elif defined(__powerpc64__)
++# define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_POWERPC64
++#else
++# error arch fixup needed here
++#endif
++
++#endif
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/xen/interface/io/ring.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,299 @@
++/******************************************************************************
++ * ring.h
++ *
++ * Shared producer-consumer ring macros.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Tim Deegan and Andrew Warfield November 2004.
++ */
++
++#ifndef __XEN_PUBLIC_IO_RING_H__
++#define __XEN_PUBLIC_IO_RING_H__
++
++typedef unsigned int RING_IDX;
++
++/* Round a 32-bit unsigned constant down to the nearest power of two. */
++#define __RD2(_x) (((_x) & 0x00000002) ? 0x2 : ((_x) & 0x1))
++#define __RD4(_x) (((_x) & 0x0000000c) ? __RD2((_x)>>2)<<2 : __RD2(_x))
++#define __RD8(_x) (((_x) & 0x000000f0) ? __RD4((_x)>>4)<<4 : __RD4(_x))
++#define __RD16(_x) (((_x) & 0x0000ff00) ? __RD8((_x)>>8)<<8 : __RD8(_x))
++#define __RD32(_x) (((_x) & 0xffff0000) ? __RD16((_x)>>16)<<16 : __RD16(_x))
++
++/*
++ * Calculate size of a shared ring, given the total available space for the
++ * ring and indexes (_sz), and the name tag of the request/response structure.
++ * A ring contains as many entries as will fit, rounded down to the nearest
++ * power of two (so we can mask with (size-1) to loop around).
++ */
++#define __RING_SIZE(_s, _sz) \
++ (__RD32(((_sz) - (long)(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0])))
++
++/*
++ * Macros to make the correct C datatypes for a new kind of ring.
++ *
++ * To make a new ring datatype, you need to have two message structures,
++ * let's say request_t, and response_t already defined.
++ *
++ * In a header where you want the ring datatype declared, you then do:
++ *
++ * DEFINE_RING_TYPES(mytag, request_t, response_t);
++ *
++ * These expand out to give you a set of types, as you can see below.
++ * The most important of these are:
++ *
++ * mytag_sring_t - The shared ring.
++ * mytag_front_ring_t - The 'front' half of the ring.
++ * mytag_back_ring_t - The 'back' half of the ring.
++ *
++ * To initialize a ring in your code you need to know the location and size
++ * of the shared memory area (PAGE_SIZE, for instance). To initialise
++ * the front half:
++ *
++ * mytag_front_ring_t front_ring;
++ * SHARED_RING_INIT((mytag_sring_t *)shared_page);
++ * FRONT_RING_INIT(&front_ring, (mytag_sring_t *)shared_page, PAGE_SIZE);
++ *
++ * Initializing the back follows similarly (note that only the front
++ * initializes the shared ring):
++ *
++ * mytag_back_ring_t back_ring;
++ * BACK_RING_INIT(&back_ring, (mytag_sring_t *)shared_page, PAGE_SIZE);
++ */
++
++#define DEFINE_RING_TYPES(__name, __req_t, __rsp_t) \
++ \
++/* Shared ring entry */ \
++union __name##_sring_entry { \
++ __req_t req; \
++ __rsp_t rsp; \
++}; \
++ \
++/* Shared ring page */ \
++struct __name##_sring { \
++ RING_IDX req_prod, req_event; \
++ RING_IDX rsp_prod, rsp_event; \
++ uint8_t pad[48]; \
++ union __name##_sring_entry ring[1]; /* variable-length */ \
++}; \
++ \
++/* "Front" end's private variables */ \
++struct __name##_front_ring { \
++ RING_IDX req_prod_pvt; \
++ RING_IDX rsp_cons; \
++ unsigned int nr_ents; \
++ struct __name##_sring *sring; \
++}; \
++ \
++/* "Back" end's private variables */ \
++struct __name##_back_ring { \
++ RING_IDX rsp_prod_pvt; \
++ RING_IDX req_cons; \
++ unsigned int nr_ents; \
++ struct __name##_sring *sring; \
++}; \
++ \
++/* Syntactic sugar */ \
++typedef struct __name##_sring __name##_sring_t; \
++typedef struct __name##_front_ring __name##_front_ring_t; \
++typedef struct __name##_back_ring __name##_back_ring_t
++
++/*
++ * Macros for manipulating rings.
++ *
++ * FRONT_RING_whatever works on the "front end" of a ring: here
++ * requests are pushed on to the ring and responses taken off it.
++ *
++ * BACK_RING_whatever works on the "back end" of a ring: here
++ * requests are taken off the ring and responses put on.
++ *
++ * N.B. these macros do NO INTERLOCKS OR FLOW CONTROL.
++ * This is OK in 1-for-1 request-response situations where the
++ * requestor (front end) never has more than RING_SIZE()-1
++ * outstanding requests.
++ */
++
++/* Initialising empty rings */
++#define SHARED_RING_INIT(_s) do { \
++ (_s)->req_prod = (_s)->rsp_prod = 0; \
++ (_s)->req_event = (_s)->rsp_event = 1; \
++ memset((_s)->pad, 0, sizeof((_s)->pad)); \
++} while(0)
++
++#define FRONT_RING_INIT(_r, _s, __size) do { \
++ (_r)->req_prod_pvt = 0; \
++ (_r)->rsp_cons = 0; \
++ (_r)->nr_ents = __RING_SIZE(_s, __size); \
++ (_r)->sring = (_s); \
++} while (0)
++
++#define BACK_RING_INIT(_r, _s, __size) do { \
++ (_r)->rsp_prod_pvt = 0; \
++ (_r)->req_cons = 0; \
++ (_r)->nr_ents = __RING_SIZE(_s, __size); \
++ (_r)->sring = (_s); \
++} while (0)
++
++/* Initialize to existing shared indexes -- for recovery */
++#define FRONT_RING_ATTACH(_r, _s, __size) do { \
++ (_r)->sring = (_s); \
++ (_r)->req_prod_pvt = (_s)->req_prod; \
++ (_r)->rsp_cons = (_s)->rsp_prod; \
++ (_r)->nr_ents = __RING_SIZE(_s, __size); \
++} while (0)
++
++#define BACK_RING_ATTACH(_r, _s, __size) do { \
++ (_r)->sring = (_s); \
++ (_r)->rsp_prod_pvt = (_s)->rsp_prod; \
++ (_r)->req_cons = (_s)->req_prod; \
++ (_r)->nr_ents = __RING_SIZE(_s, __size); \
++} while (0)
++
++/* How big is this ring? */
++#define RING_SIZE(_r) \
++ ((_r)->nr_ents)
++
++/* Number of free requests (for use on front side only). */
++#define RING_FREE_REQUESTS(_r) \
++ (RING_SIZE(_r) - ((_r)->req_prod_pvt - (_r)->rsp_cons))
++
++/* Test if there is an empty slot available on the front ring.
++ * (This is only meaningful from the front. )
++ */
++#define RING_FULL(_r) \
++ (RING_FREE_REQUESTS(_r) == 0)
++
++/* Test if there are outstanding messages to be processed on a ring. */
++#define RING_HAS_UNCONSUMED_RESPONSES(_r) \
++ ((_r)->sring->rsp_prod - (_r)->rsp_cons)
++
++#ifdef __GNUC__
++#define RING_HAS_UNCONSUMED_REQUESTS(_r) ({ \
++ unsigned int req = (_r)->sring->req_prod - (_r)->req_cons; \
++ unsigned int rsp = RING_SIZE(_r) - \
++ ((_r)->req_cons - (_r)->rsp_prod_pvt); \
++ req < rsp ? req : rsp; \
++})
++#else
++/* Same as above, but without the nice GCC ({ ... }) syntax. */
++#define RING_HAS_UNCONSUMED_REQUESTS(_r) \
++ ((((_r)->sring->req_prod - (_r)->req_cons) < \
++ (RING_SIZE(_r) - ((_r)->req_cons - (_r)->rsp_prod_pvt))) ? \
++ ((_r)->sring->req_prod - (_r)->req_cons) : \
++ (RING_SIZE(_r) - ((_r)->req_cons - (_r)->rsp_prod_pvt)))
++#endif
++
++/* Direct access to individual ring elements, by index. */
++#define RING_GET_REQUEST(_r, _idx) \
++ (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req))
++
++#define RING_GET_RESPONSE(_r, _idx) \
++ (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp))
++
++/* Loop termination condition: Would the specified index overflow the ring? */
++#define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \
++ (((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r))
++
++#define RING_PUSH_REQUESTS(_r) do { \
++ wmb(); /* back sees requests /before/ updated producer index */ \
++ (_r)->sring->req_prod = (_r)->req_prod_pvt; \
++} while (0)
++
++#define RING_PUSH_RESPONSES(_r) do { \
++ wmb(); /* front sees responses /before/ updated producer index */ \
++ (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt; \
++} while (0)
++
++/*
++ * Notification hold-off (req_event and rsp_event):
++ *
++ * When queueing requests or responses on a shared ring, it may not always be
++ * necessary to notify the remote end. For example, if requests are in flight
++ * in a backend, the front may be able to queue further requests without
++ * notifying the back (if the back checks for new requests when it queues
++ * responses).
++ *
++ * When enqueuing requests or responses:
++ *
++ * Use RING_PUSH_{REQUESTS,RESPONSES}_AND_CHECK_NOTIFY(). The second argument
++ * is a boolean return value. True indicates that the receiver requires an
++ * asynchronous notification.
++ *
++ * After dequeuing requests or responses (before sleeping the connection):
++ *
++ * Use RING_FINAL_CHECK_FOR_REQUESTS() or RING_FINAL_CHECK_FOR_RESPONSES().
++ * The second argument is a boolean return value. True indicates that there
++ * are pending messages on the ring (i.e., the connection should not be put
++ * to sleep).
++ *
++ * These macros will set the req_event/rsp_event field to trigger a
++ * notification on the very next message that is enqueued. If you want to
++ * create batches of work (i.e., only receive a notification after several
++ * messages have been enqueued) then you will need to create a customised
++ * version of the FINAL_CHECK macro in your own code, which sets the event
++ * field appropriately.
++ */
++
++#define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do { \
++ RING_IDX __old = (_r)->sring->req_prod; \
++ RING_IDX __new = (_r)->req_prod_pvt; \
++ wmb(); /* back sees requests /before/ updated producer index */ \
++ (_r)->sring->req_prod = __new; \
++ mb(); /* back sees new requests /before/ we check req_event */ \
++ (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) < \
++ (RING_IDX)(__new - __old)); \
++} while (0)
++
++#define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do { \
++ RING_IDX __old = (_r)->sring->rsp_prod; \
++ RING_IDX __new = (_r)->rsp_prod_pvt; \
++ wmb(); /* front sees responses /before/ updated producer index */ \
++ (_r)->sring->rsp_prod = __new; \
++ mb(); /* front sees new responses /before/ we check rsp_event */ \
++ (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) < \
++ (RING_IDX)(__new - __old)); \
++} while (0)
++
++#define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do { \
++ (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
++ if (_work_to_do) break; \
++ (_r)->sring->req_event = (_r)->req_cons + 1; \
++ mb(); \
++ (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
++} while (0)
++
++#define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do { \
++ (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
++ if (_work_to_do) break; \
++ (_r)->sring->rsp_event = (_r)->rsp_cons + 1; \
++ mb(); \
++ (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
++} while (0)
++
++#endif /* __XEN_PUBLIC_IO_RING_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/xen/interface/io/tpmif.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,77 @@
++/******************************************************************************
++ * tpmif.h
++ *
++ * TPM I/O interface for Xen guest OSes.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (c) 2005, IBM Corporation
++ *
++ * Author: Stefan Berger, stefanb@us.ibm.com
++ * Grant table support: Mahadevan Gomathisankaran
++ *
++ * This code has been derived from tools/libxc/xen/io/netif.h
++ *
++ * Copyright (c) 2003-2004, Keir Fraser
++ */
++
++#ifndef __XEN_PUBLIC_IO_TPMIF_H__
++#define __XEN_PUBLIC_IO_TPMIF_H__
++
++#include "../grant_table.h"
++
++struct tpmif_tx_request {
++ unsigned long addr; /* Machine address of packet. */
++ grant_ref_t ref; /* grant table access reference */
++ uint16_t unused;
++ uint16_t size; /* Packet size in bytes. */
++};
++typedef struct tpmif_tx_request tpmif_tx_request_t;
++
++/*
++ * The TPMIF_TX_RING_SIZE defines the number of pages the
++ * front-end and backend can exchange (= size of array).
++ */
++typedef uint32_t TPMIF_RING_IDX;
++
++#define TPMIF_TX_RING_SIZE 1
++
++/* This structure must fit in a memory page. */
++
++struct tpmif_ring {
++ struct tpmif_tx_request req;
++};
++typedef struct tpmif_ring tpmif_ring_t;
++
++struct tpmif_tx_interface {
++ struct tpmif_ring ring[TPMIF_TX_RING_SIZE];
++};
++typedef struct tpmif_tx_interface tpmif_tx_interface_t;
++
++#endif
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/xen/interface/io/xenbus.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,73 @@
++/*****************************************************************************
++ * xenbus.h
++ *
++ * Xenbus protocol details.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (C) 2005 XenSource Ltd.
++ */
++
++#ifndef _XEN_PUBLIC_IO_XENBUS_H
++#define _XEN_PUBLIC_IO_XENBUS_H
++
++/*
++ * The state of either end of the Xenbus, i.e. the current communication
++ * status of initialisation across the bus. States here imply nothing about
++ * the state of the connection between the driver and the kernel's device
++ * layers.
++ */
++enum xenbus_state {
++ XenbusStateUnknown = 0,
++
++ XenbusStateInitialising = 1,
++
++ /*
++ * InitWait: Finished early initialisation but waiting for information
++ * from the peer or hotplug scripts.
++ */
++ XenbusStateInitWait = 2,
++
++ /*
++ * Initialised: Waiting for a connection from the peer.
++ */
++ XenbusStateInitialised = 3,
++
++ XenbusStateConnected = 4,
++
++ /*
++ * Closing: The device is being closed due to an error or an unplug event.
++ */
++ XenbusStateClosing = 5,
++
++ XenbusStateClosed = 6
++};
++typedef enum xenbus_state XenbusState;
++
++#endif /* _XEN_PUBLIC_IO_XENBUS_H */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/xen/interface/io/xs_wire.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,117 @@
++/*
++ * Details of the "wire" protocol between Xen Store Daemon and client
++ * library or guest kernel.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (C) 2005 Rusty Russell IBM Corporation
++ */
++
++#ifndef _XS_WIRE_H
++#define _XS_WIRE_H
++
++enum xsd_sockmsg_type
++{
++ XS_DEBUG,
++ XS_DIRECTORY,
++ XS_READ,
++ XS_GET_PERMS,
++ XS_WATCH,
++ XS_UNWATCH,
++ XS_TRANSACTION_START,
++ XS_TRANSACTION_END,
++ XS_INTRODUCE,
++ XS_RELEASE,
++ XS_GET_DOMAIN_PATH,
++ XS_WRITE,
++ XS_MKDIR,
++ XS_RM,
++ XS_SET_PERMS,
++ XS_WATCH_EVENT,
++ XS_ERROR,
++ XS_IS_DOMAIN_INTRODUCED,
++ XS_RESUME
++};
++
++#define XS_WRITE_NONE "NONE"
++#define XS_WRITE_CREATE "CREATE"
++#define XS_WRITE_CREATE_EXCL "CREATE|EXCL"
++
++/* We hand errors as strings, for portability. */
++struct xsd_errors
++{
++ int errnum;
++ const char *errstring;
++};
++#define XSD_ERROR(x) { x, #x }
++static struct xsd_errors xsd_errors[] __attribute__((unused)) = {
++ XSD_ERROR(EINVAL),
++ XSD_ERROR(EACCES),
++ XSD_ERROR(EEXIST),
++ XSD_ERROR(EISDIR),
++ XSD_ERROR(ENOENT),
++ XSD_ERROR(ENOMEM),
++ XSD_ERROR(ENOSPC),
++ XSD_ERROR(EIO),
++ XSD_ERROR(ENOTEMPTY),
++ XSD_ERROR(ENOSYS),
++ XSD_ERROR(EROFS),
++ XSD_ERROR(EBUSY),
++ XSD_ERROR(EAGAIN),
++ XSD_ERROR(EISCONN)
++};
++
++struct xsd_sockmsg
++{
++ uint32_t type; /* XS_??? */
++ uint32_t req_id;/* Request identifier, echoed in daemon's response. */
++ uint32_t tx_id; /* Transaction id (0 if not related to a transaction). */
++ uint32_t len; /* Length of data following this. */
++
++ /* Generally followed by nul-terminated string(s). */
++};
++
++enum xs_watch_type
++{
++ XS_WATCH_PATH = 0,
++ XS_WATCH_TOKEN
++};
++
++/* Inter-domain shared memory communications. */
++#define XENSTORE_RING_SIZE 1024
++typedef uint32_t XENSTORE_RING_IDX;
++#define MASK_XENSTORE_IDX(idx) ((idx) & (XENSTORE_RING_SIZE-1))
++struct xenstore_domain_interface {
++ char req[XENSTORE_RING_SIZE]; /* Requests to xenstore daemon. */
++ char rsp[XENSTORE_RING_SIZE]; /* Replies and async watch events. */
++ XENSTORE_RING_IDX req_cons, req_prod;
++ XENSTORE_RING_IDX rsp_cons, rsp_prod;
++};
++
++#endif /* _XS_WIRE_H */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/xen/interface/kexec.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,137 @@
++/******************************************************************************
++ * kexec.h - Public portion
++ *
++ * Xen port written by:
++ * - Simon 'Horms' Horman <horms@verge.net.au>
++ * - Magnus Damm <magnus@valinux.co.jp>
++ */
++
++#ifndef _XEN_PUBLIC_KEXEC_H
++#define _XEN_PUBLIC_KEXEC_H
++
++
++/* This file describes the Kexec / Kdump hypercall interface for Xen.
++ *
++ * Kexec under vanilla Linux allows a user to reboot the physical machine
++ * into a new user-specified kernel. The Xen port extends this idea
++ * to allow rebooting of the machine from dom0. When kexec for dom0
++ * is used to reboot, both the hypervisor and the domains get replaced
++ * with some other kernel. It is possible to kexec between vanilla
++ * Linux and Xen and back again. Xen to Xen works well too.
++ *
++ * The hypercall interface for kexec can be divided into three main
++ * types of hypercall operations:
++ *
++ * 1) Range information:
++ * This is used by the dom0 kernel to ask the hypervisor about various
++ * address information. This information is needed to allow kexec-tools
++ * to fill in the ELF headers for /proc/vmcore properly.
++ *
++ * 2) Load and unload of images:
++ * There are no big surprises here, the kexec binary from kexec-tools
++ * runs in userspace in dom0. The tool loads/unloads data into the
++ * dom0 kernel such as new kernel, initramfs and hypervisor. When
++ * loaded the dom0 kernel performs a load hypercall operation, and
++ * before releasing all page references the dom0 kernel calls unload.
++ *
++ * 3) Kexec operation:
++ * This is used to start a previously loaded kernel.
++ */
++
++#include "xen.h"
++
++#if defined(__i386__) || defined(__x86_64__)
++#define KEXEC_XEN_NO_PAGES 17
++#endif
++
++/*
++ * Prototype for this hypercall is:
++ * int kexec_op(int cmd, void *args)
++ * @cmd == KEXEC_CMD_...
++ * KEXEC operation to perform
++ * @args == Operation-specific extra arguments (NULL if none).
++ */
++
++/*
++ * Kexec supports two types of operation:
++ * - kexec into a regular kernel, very similar to a standard reboot
++ * - KEXEC_TYPE_DEFAULT is used to specify this type
++ * - kexec into a special "crash kernel", aka kexec-on-panic
++ * - KEXEC_TYPE_CRASH is used to specify this type
++ * - parts of our system may be broken at kexec-on-panic time
++ * - the code should be kept as simple and self-contained as possible
++ */
++
++#define KEXEC_TYPE_DEFAULT 0
++#define KEXEC_TYPE_CRASH 1
++
++
++/* The kexec implementation for Xen allows the user to load two
++ * types of kernels, KEXEC_TYPE_DEFAULT and KEXEC_TYPE_CRASH.
++ * All data needed for a kexec reboot is kept in one xen_kexec_image_t
++ * per "instance". The data mainly consists of machine address lists to pages
++ * together with destination addresses. The data in xen_kexec_image_t
++ * is passed to the "code page" which is one page of code that performs
++ * the final relocations before jumping to the new kernel.
++ */
++
++typedef struct xen_kexec_image {
++#if defined(__i386__) || defined(__x86_64__)
++ unsigned long page_list[KEXEC_XEN_NO_PAGES];
++#endif
++ unsigned long indirection_page;
++ unsigned long start_address;
++} xen_kexec_image_t;
++
++/*
++ * Perform kexec having previously loaded a kexec or kdump kernel
++ * as appropriate.
++ * type == KEXEC_TYPE_DEFAULT or KEXEC_TYPE_CRASH [in]
++ */
++#define KEXEC_CMD_kexec 0
++typedef struct xen_kexec_exec {
++ int type;
++} xen_kexec_exec_t;
++
++/*
++ * Load/Unload kernel image for kexec or kdump.
++ * type == KEXEC_TYPE_DEFAULT or KEXEC_TYPE_CRASH [in]
++ * image == relocation information for kexec (ignored for unload) [in]
++ */
++#define KEXEC_CMD_kexec_load 1
++#define KEXEC_CMD_kexec_unload 2
++typedef struct xen_kexec_load {
++ int type;
++ xen_kexec_image_t image;
++} xen_kexec_load_t;
++
++#define KEXEC_RANGE_MA_CRASH 0 /* machine address and size of crash area */
++#define KEXEC_RANGE_MA_XEN 1 /* machine address and size of Xen itself */
++#define KEXEC_RANGE_MA_CPU 2 /* machine address and size of a CPU note */
++
++/*
++ * Find the address and size of certain memory areas
++ * range == KEXEC_RANGE_... [in]
++ * nr == physical CPU number (starting from 0) if KEXEC_RANGE_MA_CPU [in]
++ * size == number of bytes reserved in window [out]
++ * start == address of the first byte in the window [out]
++ */
++#define KEXEC_CMD_kexec_get_range 3
++typedef struct xen_kexec_range {
++ int range;
++ int nr;
++ unsigned long size;
++ unsigned long start;
++} xen_kexec_range_t;
++
++#endif /* _XEN_PUBLIC_KEXEC_H */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/xen/interface/libelf.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,241 @@
++#ifndef __XC_LIBELF__
++#define __XC_LIBELF__ 1
++
++#if defined(__i386__) || defined(__x86_64) || defined(__ia64__)
++#define XEN_ELF_LITTLE_ENDIAN
++#elif defined(__powerpc__)
++#define XEN_ELF_BIG_ENDIAN
++#else
++#error define architectural endianness
++#endif
++
++#undef ELFSIZE
++#include "elfnote.h"
++#include "elfstructs.h"
++#include "features.h"
++
++/* ------------------------------------------------------------------------ */
++
++typedef union {
++ Elf32_Ehdr e32;
++ Elf64_Ehdr e64;
++} elf_ehdr;
++
++typedef union {
++ Elf32_Phdr e32;
++ Elf64_Phdr e64;
++} elf_phdr;
++
++typedef union {
++ Elf32_Shdr e32;
++ Elf64_Shdr e64;
++} elf_shdr;
++
++typedef union {
++ Elf32_Sym e32;
++ Elf64_Sym e64;
++} elf_sym;
++
++typedef union {
++ Elf32_Rel e32;
++ Elf64_Rel e64;
++} elf_rel;
++
++typedef union {
++ Elf32_Rela e32;
++ Elf64_Rela e64;
++} elf_rela;
++
++typedef union {
++ Elf32_Note e32;
++ Elf64_Note e64;
++} elf_note;
++
++struct elf_binary {
++ /* elf binary */
++ const char *image;
++ size_t size;
++ char class;
++ char data;
++
++ const elf_ehdr *ehdr;
++ const char *sec_strtab;
++ const elf_shdr *sym_tab;
++ const char *sym_strtab;
++
++ /* loaded to */
++ char *dest;
++ uint64_t pstart;
++ uint64_t pend;
++ uint64_t reloc_offset;
++
++#ifndef __XEN__
++ /* misc */
++ FILE *log;
++#endif
++ int verbose;
++};
++
++/* ------------------------------------------------------------------------ */
++/* accessing elf header fields */
++
++#ifdef XEN_ELF_BIG_ENDIAN
++# define NATIVE_ELFDATA ELFDATA2MSB
++#else
++# define NATIVE_ELFDATA ELFDATA2LSB
++#endif
++
++#define elf_32bit(elf) (ELFCLASS32 == (elf)->class)
++#define elf_64bit(elf) (ELFCLASS64 == (elf)->class)
++#define elf_msb(elf) (ELFDATA2MSB == (elf)->data)
++#define elf_lsb(elf) (ELFDATA2LSB == (elf)->data)
++#define elf_swap(elf) (NATIVE_ELFDATA != (elf)->data)
++
++#define elf_uval(elf, str, elem) \
++ ((ELFCLASS64 == (elf)->class) \
++ ? elf_access_unsigned((elf), (str), \
++ offsetof(typeof(*(str)),e64.elem), \
++ sizeof((str)->e64.elem)) \
++ : elf_access_unsigned((elf), (str), \
++ offsetof(typeof(*(str)),e32.elem), \
++ sizeof((str)->e32.elem)))
++
++#define elf_sval(elf, str, elem) \
++ ((ELFCLASS64 == (elf)->class) \
++ ? elf_access_signed((elf), (str), \
++ offsetof(typeof(*(str)),e64.elem), \
++ sizeof((str)->e64.elem)) \
++ : elf_access_signed((elf), (str), \
++ offsetof(typeof(*(str)),e32.elem), \
++ sizeof((str)->e32.elem)))
++
++#define elf_size(elf, str) \
++ ((ELFCLASS64 == (elf)->class) \
++ ? sizeof((str)->e64) \
++ : sizeof((str)->e32))
++
++uint64_t elf_access_unsigned(struct elf_binary *elf, const void *ptr,
++ uint64_t offset, size_t size);
++int64_t elf_access_signed(struct elf_binary *elf, const void *ptr,
++ uint64_t offset, size_t size);
++
++uint64_t elf_round_up(struct elf_binary *elf, uint64_t addr);
++
++/* ------------------------------------------------------------------------ */
++/* xc_libelf_tools.c */
++
++int elf_shdr_count(struct elf_binary *elf);
++int elf_phdr_count(struct elf_binary *elf);
++
++const elf_shdr *elf_shdr_by_name(struct elf_binary *elf, const char *name);
++const elf_shdr *elf_shdr_by_index(struct elf_binary *elf, int index);
++const elf_phdr *elf_phdr_by_index(struct elf_binary *elf, int index);
++
++const char *elf_section_name(struct elf_binary *elf, const elf_shdr * shdr);
++const void *elf_section_start(struct elf_binary *elf, const elf_shdr * shdr);
++const void *elf_section_end(struct elf_binary *elf, const elf_shdr * shdr);
++
++const void *elf_segment_start(struct elf_binary *elf, const elf_phdr * phdr);
++const void *elf_segment_end(struct elf_binary *elf, const elf_phdr * phdr);
++
++const elf_sym *elf_sym_by_name(struct elf_binary *elf, const char *symbol);
++const elf_sym *elf_sym_by_index(struct elf_binary *elf, int index);
++
++const char *elf_note_name(struct elf_binary *elf, const elf_note * note);
++const void *elf_note_desc(struct elf_binary *elf, const elf_note * note);
++uint64_t elf_note_numeric(struct elf_binary *elf, const elf_note * note);
++const elf_note *elf_note_next(struct elf_binary *elf, const elf_note * note);
++
++int elf_is_elfbinary(const void *image);
++int elf_phdr_is_loadable(struct elf_binary *elf, const elf_phdr * phdr);
++
++/* ------------------------------------------------------------------------ */
++/* xc_libelf_loader.c */
++
++int elf_init(struct elf_binary *elf, const char *image, size_t size);
++#ifdef __XEN__
++void elf_set_verbose(struct elf_binary *elf);
++#else
++void elf_set_logfile(struct elf_binary *elf, FILE * log, int verbose);
++#endif
++
++void elf_parse_binary(struct elf_binary *elf);
++void elf_load_binary(struct elf_binary *elf);
++
++void *elf_get_ptr(struct elf_binary *elf, unsigned long addr);
++uint64_t elf_lookup_addr(struct elf_binary *elf, const char *symbol);
++
++/* ------------------------------------------------------------------------ */
++/* xc_libelf_relocate.c */
++
++int elf_reloc(struct elf_binary *elf);
++
++/* ------------------------------------------------------------------------ */
++/* xc_libelf_dominfo.c */
++
++#define UNSET_ADDR ((uint64_t)-1)
++
++enum xen_elfnote_type {
++ XEN_ENT_NONE = 0,
++ XEN_ENT_LONG = 1,
++ XEN_ENT_STR = 2
++};
++
++struct xen_elfnote {
++ enum xen_elfnote_type type;
++ const char *name;
++ union {
++ const char *str;
++ uint64_t num;
++ } data;
++};
++
++struct elf_dom_parms {
++ /* raw */
++ const char *guest_info;
++ const void *elf_note_start;
++ const void *elf_note_end;
++ struct xen_elfnote elf_notes[XEN_ELFNOTE_MAX + 1];
++
++ /* parsed */
++ char guest_os[16];
++ char guest_ver[16];
++ char xen_ver[16];
++ char loader[16];
++ int pae;
++ int bsd_symtab;
++ uint64_t virt_base;
++ uint64_t virt_entry;
++ uint64_t virt_hypercall;
++ uint64_t virt_hv_start_low;
++ uint64_t elf_paddr_offset;
++ uint32_t f_supported[XENFEAT_NR_SUBMAPS];
++ uint32_t f_required[XENFEAT_NR_SUBMAPS];
++
++ /* calculated */
++ uint64_t virt_offset;
++ uint64_t virt_kstart;
++ uint64_t virt_kend;
++};
++
++static inline void elf_xen_feature_set(int nr, uint32_t * addr)
++{
++ addr[nr >> 5] |= 1 << (nr & 31);
++}
++static inline int elf_xen_feature_get(int nr, uint32_t * addr)
++{
++ return !!(addr[nr >> 5] & (1 << (nr & 31)));
++}
++
++int elf_xen_parse_features(const char *features,
++ uint32_t *supported,
++ uint32_t *required);
++int elf_xen_parse_note(struct elf_binary *elf,
++ struct elf_dom_parms *parms,
++ const elf_note *note);
++int elf_xen_parse_guest_info(struct elf_binary *elf,
++ struct elf_dom_parms *parms);
++int elf_xen_parse(struct elf_binary *elf,
++ struct elf_dom_parms *parms);
++
++#endif /* __XC_LIBELF__ */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/xen/interface/memory.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,281 @@
++/******************************************************************************
++ * memory.h
++ *
++ * Memory reservation and information.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (c) 2005, Keir Fraser <keir@xensource.com>
++ */
++
++#ifndef __XEN_PUBLIC_MEMORY_H__
++#define __XEN_PUBLIC_MEMORY_H__
++
++/*
++ * Increase or decrease the specified domain's memory reservation. Returns the
++ * number of extents successfully allocated or freed.
++ * arg == addr of struct xen_memory_reservation.
++ */
++#define XENMEM_increase_reservation 0
++#define XENMEM_decrease_reservation 1
++#define XENMEM_populate_physmap 6
++struct xen_memory_reservation {
++
++ /*
++ * XENMEM_increase_reservation:
++ * OUT: MFN (*not* GMFN) bases of extents that were allocated
++ * XENMEM_decrease_reservation:
++ * IN: GMFN bases of extents to free
++ * XENMEM_populate_physmap:
++ * IN: GPFN bases of extents to populate with memory
++ * OUT: GMFN bases of extents that were allocated
++ * (NB. This command also updates the mach_to_phys translation table)
++ */
++ XEN_GUEST_HANDLE(xen_pfn_t) extent_start;
++
++ /* Number of extents, and size/alignment of each (2^extent_order pages). */
++ xen_ulong_t nr_extents;
++ unsigned int extent_order;
++
++ /*
++ * Maximum # bits addressable by the user of the allocated region (e.g.,
++ * I/O devices often have a 32-bit limitation even in 64-bit systems). If
++ * zero then the user has no addressing restriction.
++ * This field is not used by XENMEM_decrease_reservation.
++ */
++ unsigned int address_bits;
++
++ /*
++ * Domain whose reservation is being changed.
++ * Unprivileged domains can specify only DOMID_SELF.
++ */
++ domid_t domid;
++};
++typedef struct xen_memory_reservation xen_memory_reservation_t;
++DEFINE_XEN_GUEST_HANDLE(xen_memory_reservation_t);
++
++/*
++ * An atomic exchange of memory pages. If return code is zero then
++ * @out.extent_list provides GMFNs of the newly-allocated memory.
++ * Returns zero on complete success, otherwise a negative error code.
++ * On complete success then always @nr_exchanged == @in.nr_extents.
++ * On partial success @nr_exchanged indicates how much work was done.
++ */
++#define XENMEM_exchange 11
++struct xen_memory_exchange {
++ /*
++ * [IN] Details of memory extents to be exchanged (GMFN bases).
++ * Note that @in.address_bits is ignored and unused.
++ */
++ struct xen_memory_reservation in;
++
++ /*
++ * [IN/OUT] Details of new memory extents.
++ * We require that:
++ * 1. @in.domid == @out.domid
++ * 2. @in.nr_extents << @in.extent_order ==
++ * @out.nr_extents << @out.extent_order
++ * 3. @in.extent_start and @out.extent_start lists must not overlap
++ * 4. @out.extent_start lists GPFN bases to be populated
++ * 5. @out.extent_start is overwritten with allocated GMFN bases
++ */
++ struct xen_memory_reservation out;
++
++ /*
++ * [OUT] Number of input extents that were successfully exchanged:
++ * 1. The first @nr_exchanged input extents were successfully
++ * deallocated.
++ * 2. The corresponding first entries in the output extent list correctly
++ * indicate the GMFNs that were successfully exchanged.
++ * 3. All other input and output extents are untouched.
++ * 4. If not all input exents are exchanged then the return code of this
++ * command will be non-zero.
++ * 5. THIS FIELD MUST BE INITIALISED TO ZERO BY THE CALLER!
++ */
++ xen_ulong_t nr_exchanged;
++};
++typedef struct xen_memory_exchange xen_memory_exchange_t;
++DEFINE_XEN_GUEST_HANDLE(xen_memory_exchange_t);
++
++/*
++ * Returns the maximum machine frame number of mapped RAM in this system.
++ * This command always succeeds (it never returns an error code).
++ * arg == NULL.
++ */
++#define XENMEM_maximum_ram_page 2
++
++/*
++ * Returns the current or maximum memory reservation, in pages, of the
++ * specified domain (may be DOMID_SELF). Returns -ve errcode on failure.
++ * arg == addr of domid_t.
++ */
++#define XENMEM_current_reservation 3
++#define XENMEM_maximum_reservation 4
++
++/*
++ * Returns the maximum GPFN in use by the guest, or -ve errcode on failure.
++ */
++#define XENMEM_maximum_gpfn 14
++
++/*
++ * Returns a list of MFN bases of 2MB extents comprising the machine_to_phys
++ * mapping table. Architectures which do not have a m2p table do not implement
++ * this command.
++ * arg == addr of xen_machphys_mfn_list_t.
++ */
++#define XENMEM_machphys_mfn_list 5
++struct xen_machphys_mfn_list {
++ /*
++ * Size of the 'extent_start' array. Fewer entries will be filled if the
++ * machphys table is smaller than max_extents * 2MB.
++ */
++ unsigned int max_extents;
++
++ /*
++ * Pointer to buffer to fill with list of extent starts. If there are
++ * any large discontiguities in the machine address space, 2MB gaps in
++ * the machphys table will be represented by an MFN base of zero.
++ */
++ XEN_GUEST_HANDLE(xen_pfn_t) extent_start;
++
++ /*
++ * Number of extents written to the above array. This will be smaller
++ * than 'max_extents' if the machphys table is smaller than max_e * 2MB.
++ */
++ unsigned int nr_extents;
++};
++typedef struct xen_machphys_mfn_list xen_machphys_mfn_list_t;
++DEFINE_XEN_GUEST_HANDLE(xen_machphys_mfn_list_t);
++
++/*
++ * Returns the location in virtual address space of the machine_to_phys
++ * mapping table. Architectures which do not have a m2p table, or which do not
++ * map it by default into guest address space, do not implement this command.
++ * arg == addr of xen_machphys_mapping_t.
++ */
++#define XENMEM_machphys_mapping 12
++struct xen_machphys_mapping {
++ xen_ulong_t v_start, v_end; /* Start and end virtual addresses. */
++ xen_ulong_t max_mfn; /* Maximum MFN that can be looked up. */
++};
++typedef struct xen_machphys_mapping xen_machphys_mapping_t;
++DEFINE_XEN_GUEST_HANDLE(xen_machphys_mapping_t);
++
++/*
++ * Sets the GPFN at which a particular page appears in the specified guest's
++ * pseudophysical address space.
++ * arg == addr of xen_add_to_physmap_t.
++ */
++#define XENMEM_add_to_physmap 7
++struct xen_add_to_physmap {
++ /* Which domain to change the mapping for. */
++ domid_t domid;
++
++ /* Source mapping space. */
++#define XENMAPSPACE_shared_info 0 /* shared info page */
++#define XENMAPSPACE_grant_table 1 /* grant table page */
++ unsigned int space;
++
++ /* Index into source mapping space. */
++ xen_ulong_t idx;
++
++ /* GPFN where the source mapping page should appear. */
++ xen_pfn_t gpfn;
++};
++typedef struct xen_add_to_physmap xen_add_to_physmap_t;
++DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_t);
++
++/*
++ * Translates a list of domain-specific GPFNs into MFNs. Returns a -ve error
++ * code on failure. This call only works for auto-translated guests.
++ */
++#define XENMEM_translate_gpfn_list 8
++struct xen_translate_gpfn_list {
++ /* Which domain to translate for? */
++ domid_t domid;
++
++ /* Length of list. */
++ xen_ulong_t nr_gpfns;
++
++ /* List of GPFNs to translate. */
++ XEN_GUEST_HANDLE(xen_pfn_t) gpfn_list;
++
++ /*
++ * Output list to contain MFN translations. May be the same as the input
++ * list (in which case each input GPFN is overwritten with the output MFN).
++ */
++ XEN_GUEST_HANDLE(xen_pfn_t) mfn_list;
++};
++typedef struct xen_translate_gpfn_list xen_translate_gpfn_list_t;
++DEFINE_XEN_GUEST_HANDLE(xen_translate_gpfn_list_t);
++
++/*
++ * Returns the pseudo-physical memory map as it was when the domain
++ * was started (specified by XENMEM_set_memory_map).
++ * arg == addr of xen_memory_map_t.
++ */
++#define XENMEM_memory_map 9
++struct xen_memory_map {
++ /*
++ * On call the number of entries which can be stored in buffer. On
++ * return the number of entries which have been stored in
++ * buffer.
++ */
++ unsigned int nr_entries;
++
++ /*
++ * Entries in the buffer are in the same format as returned by the
++ * BIOS INT 0x15 EAX=0xE820 call.
++ */
++ XEN_GUEST_HANDLE(void) buffer;
++};
++typedef struct xen_memory_map xen_memory_map_t;
++DEFINE_XEN_GUEST_HANDLE(xen_memory_map_t);
++
++/*
++ * Returns the real physical memory map. Passes the same structure as
++ * XENMEM_memory_map.
++ * arg == addr of xen_memory_map_t.
++ */
++#define XENMEM_machine_memory_map 10
++
++/*
++ * Set the pseudo-physical memory map of a domain, as returned by
++ * XENMEM_memory_map.
++ * arg == addr of xen_foreign_memory_map_t.
++ */
++#define XENMEM_set_memory_map 13
++struct xen_foreign_memory_map {
++ domid_t domid;
++ struct xen_memory_map map;
++};
++typedef struct xen_foreign_memory_map xen_foreign_memory_map_t;
++DEFINE_XEN_GUEST_HANDLE(xen_foreign_memory_map_t);
++
++#endif /* __XEN_PUBLIC_MEMORY_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/xen/interface/nmi.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,78 @@
++/******************************************************************************
++ * nmi.h
++ *
++ * NMI callback registration and reason codes.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (c) 2005, Keir Fraser <keir@xensource.com>
++ */
++
++#ifndef __XEN_PUBLIC_NMI_H__
++#define __XEN_PUBLIC_NMI_H__
++
++/*
++ * NMI reason codes:
++ * Currently these are x86-specific, stored in arch_shared_info.nmi_reason.
++ */
++ /* I/O-check error reported via ISA port 0x61, bit 6. */
++#define _XEN_NMIREASON_io_error 0
++#define XEN_NMIREASON_io_error (1UL << _XEN_NMIREASON_io_error)
++ /* Parity error reported via ISA port 0x61, bit 7. */
++#define _XEN_NMIREASON_parity_error 1
++#define XEN_NMIREASON_parity_error (1UL << _XEN_NMIREASON_parity_error)
++ /* Unknown hardware-generated NMI. */
++#define _XEN_NMIREASON_unknown 2
++#define XEN_NMIREASON_unknown (1UL << _XEN_NMIREASON_unknown)
++
++/*
++ * long nmi_op(unsigned int cmd, void *arg)
++ * NB. All ops return zero on success, else a negative error code.
++ */
++
++/*
++ * Register NMI callback for this (calling) VCPU. Currently this only makes
++ * sense for domain 0, vcpu 0. All other callers will be returned EINVAL.
++ * arg == pointer to xennmi_callback structure.
++ */
++#define XENNMI_register_callback 0
++struct xennmi_callback {
++ unsigned long handler_address;
++ unsigned long pad;
++};
++typedef struct xennmi_callback xennmi_callback_t;
++DEFINE_XEN_GUEST_HANDLE(xennmi_callback_t);
++
++/*
++ * Deregister NMI callback for this (calling) VCPU.
++ * arg == NULL.
++ */
++#define XENNMI_unregister_callback 1
++
++#endif /* __XEN_PUBLIC_NMI_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/xen/interface/physdev.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,169 @@
++/*
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ */
++
++#ifndef __XEN_PUBLIC_PHYSDEV_H__
++#define __XEN_PUBLIC_PHYSDEV_H__
++
++/*
++ * Prototype for this hypercall is:
++ * int physdev_op(int cmd, void *args)
++ * @cmd == PHYSDEVOP_??? (physdev operation).
++ * @args == Operation-specific extra arguments (NULL if none).
++ */
++
++/*
++ * Notify end-of-interrupt (EOI) for the specified IRQ.
++ * @arg == pointer to physdev_eoi structure.
++ */
++#define PHYSDEVOP_eoi 12
++struct physdev_eoi {
++ /* IN */
++ uint32_t irq;
++};
++typedef struct physdev_eoi physdev_eoi_t;
++DEFINE_XEN_GUEST_HANDLE(physdev_eoi_t);
++
++/*
++ * Query the status of an IRQ line.
++ * @arg == pointer to physdev_irq_status_query structure.
++ */
++#define PHYSDEVOP_irq_status_query 5
++struct physdev_irq_status_query {
++ /* IN */
++ uint32_t irq;
++ /* OUT */
++ uint32_t flags; /* XENIRQSTAT_* */
++};
++typedef struct physdev_irq_status_query physdev_irq_status_query_t;
++DEFINE_XEN_GUEST_HANDLE(physdev_irq_status_query_t);
++
++/* Need to call PHYSDEVOP_eoi when the IRQ has been serviced? */
++#define _XENIRQSTAT_needs_eoi (0)
++#define XENIRQSTAT_needs_eoi (1U<<_XENIRQSTAT_needs_eoi)
++
++/* IRQ shared by multiple guests? */
++#define _XENIRQSTAT_shared (1)
++#define XENIRQSTAT_shared (1U<<_XENIRQSTAT_shared)
++
++/*
++ * Set the current VCPU's I/O privilege level.
++ * @arg == pointer to physdev_set_iopl structure.
++ */
++#define PHYSDEVOP_set_iopl 6
++struct physdev_set_iopl {
++ /* IN */
++ uint32_t iopl;
++};
++typedef struct physdev_set_iopl physdev_set_iopl_t;
++DEFINE_XEN_GUEST_HANDLE(physdev_set_iopl_t);
++
++/*
++ * Set the current VCPU's I/O-port permissions bitmap.
++ * @arg == pointer to physdev_set_iobitmap structure.
++ */
++#define PHYSDEVOP_set_iobitmap 7
++struct physdev_set_iobitmap {
++ /* IN */
++ XEN_GUEST_HANDLE_00030205(uint8_t) bitmap;
++ uint32_t nr_ports;
++};
++typedef struct physdev_set_iobitmap physdev_set_iobitmap_t;
++DEFINE_XEN_GUEST_HANDLE(physdev_set_iobitmap_t);
++
++/*
++ * Read or write an IO-APIC register.
++ * @arg == pointer to physdev_apic structure.
++ */
++#define PHYSDEVOP_apic_read 8
++#define PHYSDEVOP_apic_write 9
++struct physdev_apic {
++ /* IN */
++ unsigned long apic_physbase;
++ uint32_t reg;
++ /* IN or OUT */
++ uint32_t value;
++};
++typedef struct physdev_apic physdev_apic_t;
++DEFINE_XEN_GUEST_HANDLE(physdev_apic_t);
++
++/*
++ * Allocate or free a physical upcall vector for the specified IRQ line.
++ * @arg == pointer to physdev_irq structure.
++ */
++#define PHYSDEVOP_alloc_irq_vector 10
++#define PHYSDEVOP_free_irq_vector 11
++struct physdev_irq {
++ /* IN */
++ uint32_t irq;
++ /* IN or OUT */
++ uint32_t vector;
++};
++typedef struct physdev_irq physdev_irq_t;
++DEFINE_XEN_GUEST_HANDLE(physdev_irq_t);
++
++/*
++ * Argument to physdev_op_compat() hypercall. Superceded by new physdev_op()
++ * hypercall since 0x00030202.
++ */
++struct physdev_op {
++ uint32_t cmd;
++ union {
++ struct physdev_irq_status_query irq_status_query;
++ struct physdev_set_iopl set_iopl;
++ struct physdev_set_iobitmap set_iobitmap;
++ struct physdev_apic apic_op;
++ struct physdev_irq irq_op;
++ } u;
++};
++typedef struct physdev_op physdev_op_t;
++DEFINE_XEN_GUEST_HANDLE(physdev_op_t);
++
++/*
++ * Notify that some PIRQ-bound event channels have been unmasked.
++ * ** This command is obsolete since interface version 0x00030202 and is **
++ * ** unsupported by newer versions of Xen. **
++ */
++#define PHYSDEVOP_IRQ_UNMASK_NOTIFY 4
++
++/*
++ * These all-capitals physdev operation names are superceded by the new names
++ * (defined above) since interface version 0x00030202.
++ */
++#define PHYSDEVOP_IRQ_STATUS_QUERY PHYSDEVOP_irq_status_query
++#define PHYSDEVOP_SET_IOPL PHYSDEVOP_set_iopl
++#define PHYSDEVOP_SET_IOBITMAP PHYSDEVOP_set_iobitmap
++#define PHYSDEVOP_APIC_READ PHYSDEVOP_apic_read
++#define PHYSDEVOP_APIC_WRITE PHYSDEVOP_apic_write
++#define PHYSDEVOP_ASSIGN_VECTOR PHYSDEVOP_alloc_irq_vector
++#define PHYSDEVOP_FREE_VECTOR PHYSDEVOP_free_irq_vector
++#define PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY XENIRQSTAT_needs_eoi
++#define PHYSDEVOP_IRQ_SHARED XENIRQSTAT_shared
++
++#endif /* __XEN_PUBLIC_PHYSDEV_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/xen/interface/platform.h 2007-08-27 14:02:09.000000000 -0400
+@@ -0,0 +1,143 @@
++/******************************************************************************
++ * platform.h
++ *
++ * Hardware platform operations. Intended for use by domain-0 kernel.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (c) 2002-2006, K Fraser
++ */
++
++#ifndef __XEN_PUBLIC_PLATFORM_H__
++#define __XEN_PUBLIC_PLATFORM_H__
++
++#include "xen.h"
++
++#define XENPF_INTERFACE_VERSION 0x03000001
++
++/*
++ * Set clock such that it would read <secs,nsecs> after 00:00:00 UTC,
++ * 1 January, 1970 if the current system time was <system_time>.
++ */
++#define XENPF_settime 17
++struct xenpf_settime {
++ /* IN variables. */
++ uint32_t secs;
++ uint32_t nsecs;
++ uint64_t system_time;
++};
++typedef struct xenpf_settime xenpf_settime_t;
++DEFINE_XEN_GUEST_HANDLE(xenpf_settime_t);
++
++/*
++ * Request memory range (@mfn, @mfn+@nr_mfns-1) to have type @type.
++ * On x86, @type is an architecture-defined MTRR memory type.
++ * On success, returns the MTRR that was used (@reg) and a handle that can
++ * be passed to XENPF_DEL_MEMTYPE to accurately tear down the new setting.
++ * (x86-specific).
++ */
++#define XENPF_add_memtype 31
++struct xenpf_add_memtype {
++ /* IN variables. */
++ xen_pfn_t mfn;
++ uint64_t nr_mfns;
++ uint32_t type;
++ /* OUT variables. */
++ uint32_t handle;
++ uint32_t reg;
++};
++typedef struct xenpf_add_memtype xenpf_add_memtype_t;
++DEFINE_XEN_GUEST_HANDLE(xenpf_add_memtype_t);
++
++/*
++ * Tear down an existing memory-range type. If @handle is remembered then it
++ * should be passed in to accurately tear down the correct setting (in case
++ * of overlapping memory regions with differing types). If it is not known
++ * then @handle should be set to zero. In all cases @reg must be set.
++ * (x86-specific).
++ */
++#define XENPF_del_memtype 32
++struct xenpf_del_memtype {
++ /* IN variables. */
++ uint32_t handle;
++ uint32_t reg;
++};
++typedef struct xenpf_del_memtype xenpf_del_memtype_t;
++DEFINE_XEN_GUEST_HANDLE(xenpf_del_memtype_t);
++
++/* Read current type of an MTRR (x86-specific). */
++#define XENPF_read_memtype 33
++struct xenpf_read_memtype {
++ /* IN variables. */
++ uint32_t reg;
++ /* OUT variables. */
++ xen_pfn_t mfn;
++ uint64_t nr_mfns;
++ uint32_t type;
++};
++typedef struct xenpf_read_memtype xenpf_read_memtype_t;
++DEFINE_XEN_GUEST_HANDLE(xenpf_read_memtype_t);
++
++#define XENPF_microcode_update 35
++struct xenpf_microcode_update {
++ /* IN variables. */
++ XEN_GUEST_HANDLE(void) data; /* Pointer to microcode data */
++ uint32_t length; /* Length of microcode data. */
++};
++typedef struct xenpf_microcode_update xenpf_microcode_update_t;
++DEFINE_XEN_GUEST_HANDLE(xenpf_microcode_update_t);
++
++#define XENPF_platform_quirk 39
++#define QUIRK_NOIRQBALANCING 1 /* Do not restrict IO-APIC RTE targets */
++#define QUIRK_IOAPIC_BAD_REGSEL 2 /* IO-APIC REGSEL forgets its value */
++#define QUIRK_IOAPIC_GOOD_REGSEL 3 /* IO-APIC REGSEL behaves properly */
++struct xenpf_platform_quirk {
++ /* IN variables. */
++ uint32_t quirk_id;
++};
++typedef struct xenpf_platform_quirk xenpf_platform_quirk_t;
++DEFINE_XEN_GUEST_HANDLE(xenpf_platform_quirk_t);
++
++struct xen_platform_op {
++ uint32_t cmd;
++ uint32_t interface_version; /* XENPF_INTERFACE_VERSION */
++ union {
++ struct xenpf_settime settime;
++ struct xenpf_add_memtype add_memtype;
++ struct xenpf_del_memtype del_memtype;
++ struct xenpf_read_memtype read_memtype;
++ struct xenpf_microcode_update microcode;
++ struct xenpf_platform_quirk platform_quirk;
++ uint8_t pad[128];
++ } u;
++};
++typedef struct xen_platform_op xen_platform_op_t;
++DEFINE_XEN_GUEST_HANDLE(xen_platform_op_t);
++
++#endif /* __XEN_PUBLIC_PLATFORM_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/xen/interface/sched.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,121 @@
++/******************************************************************************
++ * sched.h
++ *
++ * Scheduler state interactions
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (c) 2005, Keir Fraser <keir@xensource.com>
++ */
++
++#ifndef __XEN_PUBLIC_SCHED_H__
++#define __XEN_PUBLIC_SCHED_H__
++
++#include "event_channel.h"
++
++/*
++ * The prototype for this hypercall is:
++ * long sched_op(int cmd, void *arg)
++ * @cmd == SCHEDOP_??? (scheduler operation).
++ * @arg == Operation-specific extra argument(s), as described below.
++ *
++ * Versions of Xen prior to 3.0.2 provided only the following legacy version
++ * of this hypercall, supporting only the commands yield, block and shutdown:
++ * long sched_op(int cmd, unsigned long arg)
++ * @cmd == SCHEDOP_??? (scheduler operation).
++ * @arg == 0 (SCHEDOP_yield and SCHEDOP_block)
++ * == SHUTDOWN_* code (SCHEDOP_shutdown)
++ * This legacy version is available to new guests as sched_op_compat().
++ */
++
++/*
++ * Voluntarily yield the CPU.
++ * @arg == NULL.
++ */
++#define SCHEDOP_yield 0
++
++/*
++ * Block execution of this VCPU until an event is received for processing.
++ * If called with event upcalls masked, this operation will atomically
++ * reenable event delivery and check for pending events before blocking the
++ * VCPU. This avoids a "wakeup waiting" race.
++ * @arg == NULL.
++ */
++#define SCHEDOP_block 1
++
++/*
++ * Halt execution of this domain (all VCPUs) and notify the system controller.
++ * @arg == pointer to sched_shutdown structure.
++ */
++#define SCHEDOP_shutdown 2
++struct sched_shutdown {
++ unsigned int reason; /* SHUTDOWN_* */
++};
++typedef struct sched_shutdown sched_shutdown_t;
++DEFINE_XEN_GUEST_HANDLE(sched_shutdown_t);
++
++/*
++ * Poll a set of event-channel ports. Return when one or more are pending. An
++ * optional timeout may be specified.
++ * @arg == pointer to sched_poll structure.
++ */
++#define SCHEDOP_poll 3
++struct sched_poll {
++ XEN_GUEST_HANDLE(evtchn_port_t) ports;
++ unsigned int nr_ports;
++ uint64_t timeout;
++};
++typedef struct sched_poll sched_poll_t;
++DEFINE_XEN_GUEST_HANDLE(sched_poll_t);
++
++/*
++ * Declare a shutdown for another domain. The main use of this function is
++ * in interpreting shutdown requests and reasons for fully-virtualized
++ * domains. A para-virtualized domain may use SCHEDOP_shutdown directly.
++ * @arg == pointer to sched_remote_shutdown structure.
++ */
++#define SCHEDOP_remote_shutdown 4
++struct sched_remote_shutdown {
++ domid_t domain_id; /* Remote domain ID */
++ unsigned int reason; /* SHUTDOWN_xxx reason */
++};
++typedef struct sched_remote_shutdown sched_remote_shutdown_t;
++DEFINE_XEN_GUEST_HANDLE(sched_remote_shutdown_t);
++
++/*
++ * Reason codes for SCHEDOP_shutdown. These may be interpreted by control
++ * software to determine the appropriate action. For the most part, Xen does
++ * not care about the shutdown code.
++ */
++#define SHUTDOWN_poweroff 0 /* Domain exited normally. Clean up and kill. */
++#define SHUTDOWN_reboot 1 /* Clean up, kill, and then restart. */
++#define SHUTDOWN_suspend 2 /* Clean up, save suspend info, kill. */
++#define SHUTDOWN_crash 3 /* Tell controller we've crashed. */
++
++#endif /* __XEN_PUBLIC_SCHED_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/xen/interface/sysctl.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,182 @@
++/******************************************************************************
++ * sysctl.h
++ *
++ * System management operations. For use by node control stack.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (c) 2002-2006, K Fraser
++ */
++
++#ifndef __XEN_PUBLIC_SYSCTL_H__
++#define __XEN_PUBLIC_SYSCTL_H__
++
++#if !defined(__XEN__) && !defined(__XEN_TOOLS__)
++#error "sysctl operations are intended for use by node control tools only"
++#endif
++
++#include "xen.h"
++#include "domctl.h"
++
++#define XEN_SYSCTL_INTERFACE_VERSION 0x00000003
++
++/*
++ * Read console content from Xen buffer ring.
++ */
++#define XEN_SYSCTL_readconsole 1
++struct xen_sysctl_readconsole {
++ /* IN variables. */
++ uint32_t clear; /* Non-zero -> clear after reading. */
++ XEN_GUEST_HANDLE_64(char) buffer; /* Buffer start */
++ /* IN/OUT variables. */
++ uint32_t count; /* In: Buffer size; Out: Used buffer size */
++};
++typedef struct xen_sysctl_readconsole xen_sysctl_readconsole_t;
++DEFINE_XEN_GUEST_HANDLE(xen_sysctl_readconsole_t);
++
++/* Get trace buffers machine base address */
++#define XEN_SYSCTL_tbuf_op 2
++struct xen_sysctl_tbuf_op {
++ /* IN variables */
++#define XEN_SYSCTL_TBUFOP_get_info 0
++#define XEN_SYSCTL_TBUFOP_set_cpu_mask 1
++#define XEN_SYSCTL_TBUFOP_set_evt_mask 2
++#define XEN_SYSCTL_TBUFOP_set_size 3
++#define XEN_SYSCTL_TBUFOP_enable 4
++#define XEN_SYSCTL_TBUFOP_disable 5
++ uint32_t cmd;
++ /* IN/OUT variables */
++ struct xenctl_cpumap cpu_mask;
++ uint32_t evt_mask;
++ /* OUT variables */
++ uint64_aligned_t buffer_mfn;
++ uint32_t size;
++};
++typedef struct xen_sysctl_tbuf_op xen_sysctl_tbuf_op_t;
++DEFINE_XEN_GUEST_HANDLE(xen_sysctl_tbuf_op_t);
++
++/*
++ * Get physical information about the host machine
++ */
++#define XEN_SYSCTL_physinfo 3
++struct xen_sysctl_physinfo {
++ uint32_t threads_per_core;
++ uint32_t cores_per_socket;
++ uint32_t sockets_per_node;
++ uint32_t nr_nodes;
++ uint32_t cpu_khz;
++ uint64_aligned_t total_pages;
++ uint64_aligned_t free_pages;
++ uint64_aligned_t scrub_pages;
++ uint32_t hw_cap[8];
++};
++typedef struct xen_sysctl_physinfo xen_sysctl_physinfo_t;
++DEFINE_XEN_GUEST_HANDLE(xen_sysctl_physinfo_t);
++
++/*
++ * Get the ID of the current scheduler.
++ */
++#define XEN_SYSCTL_sched_id 4
++struct xen_sysctl_sched_id {
++ /* OUT variable */
++ uint32_t sched_id;
++};
++typedef struct xen_sysctl_sched_id xen_sysctl_sched_id_t;
++DEFINE_XEN_GUEST_HANDLE(xen_sysctl_sched_id_t);
++
++/* Interface for controlling Xen software performance counters. */
++#define XEN_SYSCTL_perfc_op 5
++/* Sub-operations: */
++#define XEN_SYSCTL_PERFCOP_reset 1 /* Reset all counters to zero. */
++#define XEN_SYSCTL_PERFCOP_query 2 /* Get perfctr information. */
++struct xen_sysctl_perfc_desc {
++ char name[80]; /* name of perf counter */
++ uint32_t nr_vals; /* number of values for this counter */
++};
++typedef struct xen_sysctl_perfc_desc xen_sysctl_perfc_desc_t;
++DEFINE_XEN_GUEST_HANDLE(xen_sysctl_perfc_desc_t);
++typedef uint32_t xen_sysctl_perfc_val_t;
++DEFINE_XEN_GUEST_HANDLE(xen_sysctl_perfc_val_t);
++
++struct xen_sysctl_perfc_op {
++ /* IN variables. */
++ uint32_t cmd; /* XEN_SYSCTL_PERFCOP_??? */
++ /* OUT variables. */
++ uint32_t nr_counters; /* number of counters description */
++ uint32_t nr_vals; /* number of values */
++ /* counter information (or NULL) */
++ XEN_GUEST_HANDLE_64(xen_sysctl_perfc_desc_t) desc;
++ /* counter values (or NULL) */
++ XEN_GUEST_HANDLE_64(xen_sysctl_perfc_val_t) val;
++};
++typedef struct xen_sysctl_perfc_op xen_sysctl_perfc_op_t;
++DEFINE_XEN_GUEST_HANDLE(xen_sysctl_perfc_op_t);
++
++#define XEN_SYSCTL_getdomaininfolist 6
++struct xen_sysctl_getdomaininfolist {
++ /* IN variables. */
++ domid_t first_domain;
++ uint32_t max_domains;
++ XEN_GUEST_HANDLE_64(xen_domctl_getdomaininfo_t) buffer;
++ /* OUT variables. */
++ uint32_t num_domains;
++};
++typedef struct xen_sysctl_getdomaininfolist xen_sysctl_getdomaininfolist_t;
++DEFINE_XEN_GUEST_HANDLE(xen_sysctl_getdomaininfolist_t);
++
++/*
++ * Inject debug keys into Xen.
++ */
++#define XEN_SYSCTL_debug_keys 7
++struct xen_sysctl_debug_keys {
++ /* IN variables. */
++ XEN_GUEST_HANDLE_64(char) keys;
++ uint32_t nr_keys;
++};
++typedef struct xen_sysctl_debug_keys xen_sysctl_debug_keys_t;
++DEFINE_XEN_GUEST_HANDLE(xen_sysctl_debug_keys_t);
++
++struct xen_sysctl {
++ uint32_t cmd;
++ uint32_t interface_version; /* XEN_SYSCTL_INTERFACE_VERSION */
++ union {
++ struct xen_sysctl_readconsole readconsole;
++ struct xen_sysctl_tbuf_op tbuf_op;
++ struct xen_sysctl_physinfo physinfo;
++ struct xen_sysctl_sched_id sched_id;
++ struct xen_sysctl_perfc_op perfc_op;
++ struct xen_sysctl_getdomaininfolist getdomaininfolist;
++ struct xen_sysctl_debug_keys debug_keys;
++ uint8_t pad[128];
++ } u;
++};
++typedef struct xen_sysctl xen_sysctl_t;
++DEFINE_XEN_GUEST_HANDLE(xen_sysctl_t);
++
++#endif /* __XEN_PUBLIC_SYSCTL_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/xen/interface/trace.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,119 @@
++/******************************************************************************
++ * include/public/trace.h
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Mark Williamson, (C) 2004 Intel Research Cambridge
++ * Copyright (C) 2005 Bin Ren
++ */
++
++#ifndef __XEN_PUBLIC_TRACE_H__
++#define __XEN_PUBLIC_TRACE_H__
++
++/* Trace classes */
++#define TRC_CLS_SHIFT 16
++#define TRC_GEN 0x0001f000 /* General trace */
++#define TRC_SCHED 0x0002f000 /* Xen Scheduler trace */
++#define TRC_DOM0OP 0x0004f000 /* Xen DOM0 operation trace */
++#define TRC_HVM 0x0008f000 /* Xen HVM trace */
++#define TRC_MEM 0x0010f000 /* Xen memory trace */
++#define TRC_ALL 0xfffff000
++
++/* Trace subclasses */
++#define TRC_SUBCLS_SHIFT 12
++
++/* trace subclasses for SVM */
++#define TRC_HVM_ENTRYEXIT 0x00081000 /* VMENTRY and #VMEXIT */
++#define TRC_HVM_HANDLER 0x00082000 /* various HVM handlers */
++
++/* Trace events per class */
++#define TRC_LOST_RECORDS (TRC_GEN + 1)
++
++#define TRC_SCHED_DOM_ADD (TRC_SCHED + 1)
++#define TRC_SCHED_DOM_REM (TRC_SCHED + 2)
++#define TRC_SCHED_SLEEP (TRC_SCHED + 3)
++#define TRC_SCHED_WAKE (TRC_SCHED + 4)
++#define TRC_SCHED_YIELD (TRC_SCHED + 5)
++#define TRC_SCHED_BLOCK (TRC_SCHED + 6)
++#define TRC_SCHED_SHUTDOWN (TRC_SCHED + 7)
++#define TRC_SCHED_CTL (TRC_SCHED + 8)
++#define TRC_SCHED_ADJDOM (TRC_SCHED + 9)
++#define TRC_SCHED_SWITCH (TRC_SCHED + 10)
++#define TRC_SCHED_S_TIMER_FN (TRC_SCHED + 11)
++#define TRC_SCHED_T_TIMER_FN (TRC_SCHED + 12)
++#define TRC_SCHED_DOM_TIMER_FN (TRC_SCHED + 13)
++#define TRC_SCHED_SWITCH_INFPREV (TRC_SCHED + 14)
++#define TRC_SCHED_SWITCH_INFNEXT (TRC_SCHED + 15)
++
++#define TRC_MEM_PAGE_GRANT_MAP (TRC_MEM + 1)
++#define TRC_MEM_PAGE_GRANT_UNMAP (TRC_MEM + 2)
++#define TRC_MEM_PAGE_GRANT_TRANSFER (TRC_MEM + 3)
++
++/* trace events per subclass */
++#define TRC_HVM_VMENTRY (TRC_HVM_ENTRYEXIT + 0x01)
++#define TRC_HVM_VMEXIT (TRC_HVM_ENTRYEXIT + 0x02)
++#define TRC_HVM_PF_XEN (TRC_HVM_HANDLER + 0x01)
++#define TRC_HVM_PF_INJECT (TRC_HVM_HANDLER + 0x02)
++#define TRC_HVM_INJ_EXC (TRC_HVM_HANDLER + 0x03)
++#define TRC_HVM_INJ_VIRQ (TRC_HVM_HANDLER + 0x04)
++#define TRC_HVM_REINJ_VIRQ (TRC_HVM_HANDLER + 0x05)
++#define TRC_HVM_IO_READ (TRC_HVM_HANDLER + 0x06)
++#define TRC_HVM_IO_WRITE (TRC_HVM_HANDLER + 0x07)
++#define TRC_HVM_CR_READ (TRC_HVM_HANDLER + 0x08)
++#define TRC_HVM_CR_WRITE (TRC_HVM_HANDLER + 0x09)
++#define TRC_HVM_DR_READ (TRC_HVM_HANDLER + 0x0A)
++#define TRC_HVM_DR_WRITE (TRC_HVM_HANDLER + 0x0B)
++#define TRC_HVM_MSR_READ (TRC_HVM_HANDLER + 0x0C)
++#define TRC_HVM_MSR_WRITE (TRC_HVM_HANDLER + 0x0D)
++#define TRC_HVM_CPUID (TRC_HVM_HANDLER + 0x0E)
++#define TRC_HVM_INTR (TRC_HVM_HANDLER + 0x0F)
++#define TRC_HVM_NMI (TRC_HVM_HANDLER + 0x10)
++#define TRC_HVM_SMI (TRC_HVM_HANDLER + 0x11)
++#define TRC_HVM_VMMCALL (TRC_HVM_HANDLER + 0x12)
++#define TRC_HVM_HLT (TRC_HVM_HANDLER + 0x13)
++#define TRC_HVM_INVLPG (TRC_HVM_HANDLER + 0x14)
++
++/* This structure represents a single trace buffer record. */
++struct t_rec {
++ uint64_t cycles; /* cycle counter timestamp */
++ uint32_t event; /* event ID */
++ unsigned long data[5]; /* event data items */
++};
++
++/*
++ * This structure contains the metadata for a single trace buffer. The head
++ * field, indexes into an array of struct t_rec's.
++ */
++struct t_buf {
++ uint32_t cons; /* Next item to be consumed by control tools. */
++ uint32_t prod; /* Next item to be produced by Xen. */
++ /* 'nr_recs' records follow immediately after the meta-data header. */
++};
++
++#endif /* __XEN_PUBLIC_TRACE_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/xen/interface/vcpu.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,192 @@
++/******************************************************************************
++ * vcpu.h
++ *
++ * VCPU initialisation, query, and hotplug.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (c) 2005, Keir Fraser <keir@xensource.com>
++ */
++
++#ifndef __XEN_PUBLIC_VCPU_H__
++#define __XEN_PUBLIC_VCPU_H__
++
++/*
++ * Prototype for this hypercall is:
++ * int vcpu_op(int cmd, int vcpuid, void *extra_args)
++ * @cmd == VCPUOP_??? (VCPU operation).
++ * @vcpuid == VCPU to operate on.
++ * @extra_args == Operation-specific extra arguments (NULL if none).
++ */
++
++/*
++ * Initialise a VCPU. Each VCPU can be initialised only once. A
++ * newly-initialised VCPU will not run until it is brought up by VCPUOP_up.
++ *
++ * @extra_arg == pointer to vcpu_guest_context structure containing initial
++ * state for the VCPU.
++ */
++#define VCPUOP_initialise 0
++
++/*
++ * Bring up a VCPU. This makes the VCPU runnable. This operation will fail
++ * if the VCPU has not been initialised (VCPUOP_initialise).
++ */
++#define VCPUOP_up 1
++
++/*
++ * Bring down a VCPU (i.e., make it non-runnable).
++ * There are a few caveats that callers should observe:
++ * 1. This operation may return, and VCPU_is_up may return false, before the
++ * VCPU stops running (i.e., the command is asynchronous). It is a good
++ * idea to ensure that the VCPU has entered a non-critical loop before
++ * bringing it down. Alternatively, this operation is guaranteed
++ * synchronous if invoked by the VCPU itself.
++ * 2. After a VCPU is initialised, there is currently no way to drop all its
++ * references to domain memory. Even a VCPU that is down still holds
++ * memory references via its pagetable base pointer and GDT. It is good
++ * practise to move a VCPU onto an 'idle' or default page table, LDT and
++ * GDT before bringing it down.
++ */
++#define VCPUOP_down 2
++
++/* Returns 1 if the given VCPU is up. */
++#define VCPUOP_is_up 3
++
++/*
++ * Return information about the state and running time of a VCPU.
++ * @extra_arg == pointer to vcpu_runstate_info structure.
++ */
++#define VCPUOP_get_runstate_info 4
++struct vcpu_runstate_info {
++ /* VCPU's current state (RUNSTATE_*). */
++ int state;
++ /* When was current state entered (system time, ns)? */
++ uint64_t state_entry_time;
++ /*
++ * Time spent in each RUNSTATE_* (ns). The sum of these times is
++ * guaranteed not to drift from system time.
++ */
++ uint64_t time[4];
++};
++typedef struct vcpu_runstate_info vcpu_runstate_info_t;
++DEFINE_XEN_GUEST_HANDLE(vcpu_runstate_info_t);
++
++/* VCPU is currently running on a physical CPU. */
++#define RUNSTATE_running 0
++
++/* VCPU is runnable, but not currently scheduled on any physical CPU. */
++#define RUNSTATE_runnable 1
++
++/* VCPU is blocked (a.k.a. idle). It is therefore not runnable. */
++#define RUNSTATE_blocked 2
++
++/*
++ * VCPU is not runnable, but it is not blocked.
++ * This is a 'catch all' state for things like hotplug and pauses by the
++ * system administrator (or for critical sections in the hypervisor).
++ * RUNSTATE_blocked dominates this state (it is the preferred state).
++ */
++#define RUNSTATE_offline 3
++
++/*
++ * Register a shared memory area from which the guest may obtain its own
++ * runstate information without needing to execute a hypercall.
++ * Notes:
++ * 1. The registered address may be virtual or physical or guest handle,
++ * depending on the platform. Virtual address or guest handle should be
++ * registered on x86 systems.
++ * 2. Only one shared area may be registered per VCPU. The shared area is
++ * updated by the hypervisor each time the VCPU is scheduled. Thus
++ * runstate.state will always be RUNSTATE_running and
++ * runstate.state_entry_time will indicate the system time at which the
++ * VCPU was last scheduled to run.
++ * @extra_arg == pointer to vcpu_register_runstate_memory_area structure.
++ */
++#define VCPUOP_register_runstate_memory_area 5
++struct vcpu_register_runstate_memory_area {
++ union {
++ XEN_GUEST_HANDLE(vcpu_runstate_info_t) h;
++ struct vcpu_runstate_info *v;
++ uint64_t p;
++ } addr;
++};
++typedef struct vcpu_register_runstate_memory_area vcpu_register_runstate_memory_area_t;
++DEFINE_XEN_GUEST_HANDLE(vcpu_register_runstate_memory_area_t);
++
++/*
++ * Set or stop a VCPU's periodic timer. Every VCPU has one periodic timer
++ * which can be set via these commands. Periods smaller than one millisecond
++ * may not be supported.
++ */
++#define VCPUOP_set_periodic_timer 6 /* arg == vcpu_set_periodic_timer_t */
++#define VCPUOP_stop_periodic_timer 7 /* arg == NULL */
++struct vcpu_set_periodic_timer {
++ uint64_t period_ns;
++};
++typedef struct vcpu_set_periodic_timer vcpu_set_periodic_timer_t;
++DEFINE_XEN_GUEST_HANDLE(vcpu_set_periodic_timer_t);
++
++/*
++ * Set or stop a VCPU's single-shot timer. Every VCPU has one single-shot
++ * timer which can be set via these commands.
++ */
++#define VCPUOP_set_singleshot_timer 8 /* arg == vcpu_set_singleshot_timer_t */
++#define VCPUOP_stop_singleshot_timer 9 /* arg == NULL */
++struct vcpu_set_singleshot_timer {
++ uint64_t timeout_abs_ns; /* Absolute system time value in nanoseconds. */
++ uint32_t flags; /* VCPU_SSHOTTMR_??? */
++};
++typedef struct vcpu_set_singleshot_timer vcpu_set_singleshot_timer_t;
++DEFINE_XEN_GUEST_HANDLE(vcpu_set_singleshot_timer_t);
++
++/* Flags to VCPUOP_set_singleshot_timer. */
++ /* Require the timeout to be in the future (return -ETIME if it's passed). */
++#define _VCPU_SSHOTTMR_future (0)
++#define VCPU_SSHOTTMR_future (1U << _VCPU_SSHOTTMR_future)
++
++/*
++ * Register a memory location in the guest address space for the
++ * vcpu_info structure. This allows the guest to place the vcpu_info
++ * structure in a convenient place, such as in a per-cpu data area.
++ * The pointer need not be page aligned, but the structure must not
++ * cross a page boundary.
++ *
++ * If the specified mfn is INVALID_MFN, then it reverts to using the
++ * vcpu_info structure in the shared_info page.
++ */
++#define VCPUOP_register_vcpu_info 10 /* arg == struct vcpu_info */
++struct vcpu_register_vcpu_info {
++ xen_pfn_t mfn; /* mfn of page to place vcpu_info */
++ uint32_t offset; /* offset within page */
++};
++typedef struct vcpu_register_vcpu_info vcpu_register_vcpu_info_t;
++DEFINE_XEN_GUEST_HANDLE(vcpu_register_vcpu_info_t);
++
++#endif /* __XEN_PUBLIC_VCPU_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/xen/interface/version.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,91 @@
++/******************************************************************************
++ * version.h
++ *
++ * Xen version, type, and compile information.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (c) 2005, Nguyen Anh Quynh <aquynh@gmail.com>
++ * Copyright (c) 2005, Keir Fraser <keir@xensource.com>
++ */
++
++#ifndef __XEN_PUBLIC_VERSION_H__
++#define __XEN_PUBLIC_VERSION_H__
++
++/* NB. All ops return zero on success, except XENVER_{version,pagesize} */
++
++/* arg == NULL; returns major:minor (16:16). */
++#define XENVER_version 0
++
++/* arg == xen_extraversion_t. */
++#define XENVER_extraversion 1
++typedef char xen_extraversion_t[16];
++#define XEN_EXTRAVERSION_LEN (sizeof(xen_extraversion_t))
++
++/* arg == xen_compile_info_t. */
++#define XENVER_compile_info 2
++struct xen_compile_info {
++ char compiler[64];
++ char compile_by[16];
++ char compile_domain[32];
++ char compile_date[32];
++};
++typedef struct xen_compile_info xen_compile_info_t;
++
++#define XENVER_capabilities 3
++typedef char xen_capabilities_info_t[1024];
++#define XEN_CAPABILITIES_INFO_LEN (sizeof(xen_capabilities_info_t))
++
++#define XENVER_changeset 4
++typedef char xen_changeset_info_t[64];
++#define XEN_CHANGESET_INFO_LEN (sizeof(xen_changeset_info_t))
++
++#define XENVER_platform_parameters 5
++struct xen_platform_parameters {
++ unsigned long virt_start;
++};
++typedef struct xen_platform_parameters xen_platform_parameters_t;
++
++#define XENVER_get_features 6
++struct xen_feature_info {
++ unsigned int submap_idx; /* IN: which 32-bit submap to return */
++ uint32_t submap; /* OUT: 32-bit submap */
++};
++typedef struct xen_feature_info xen_feature_info_t;
++
++/* Declares the features reported by XENVER_get_features. */
++#include "features.h"
++
++/* arg == NULL; returns host memory page size. */
++#define XENVER_pagesize 7
++
++/* arg == xen_domain_handle_t. */
++#define XENVER_guest_handle 8
++
++#endif /* __XEN_PUBLIC_VERSION_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/xen/interface/xen-compat.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,51 @@
++/******************************************************************************
++ * xen-compat.h
++ *
++ * Guest OS interface to Xen. Compatibility layer.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (c) 2006, Christian Limpach
++ */
++
++#ifndef __XEN_PUBLIC_XEN_COMPAT_H__
++#define __XEN_PUBLIC_XEN_COMPAT_H__
++
++#define __XEN_LATEST_INTERFACE_VERSION__ 0x00030205
++
++#if defined(__XEN__) || defined(__XEN_TOOLS__)
++/* Xen is built with matching headers and implements the latest interface. */
++#define __XEN_INTERFACE_VERSION__ __XEN_LATEST_INTERFACE_VERSION__
++#elif !defined(__XEN_INTERFACE_VERSION__)
++/* Guests which do not specify a version get the legacy interface. */
++#define __XEN_INTERFACE_VERSION__ 0x00000000
++#endif
++
++#if __XEN_INTERFACE_VERSION__ > __XEN_LATEST_INTERFACE_VERSION__
++#error "These header files do not support the requested interface version."
++#endif
++
++/* Fields defined as a Xen guest handle since 0x00030205. */
++#if __XEN_INTERFACE_VERSION__ >= 0x00030205
++#define XEN_GUEST_HANDLE_00030205(type) XEN_GUEST_HANDLE(type)
++#else
++#define XEN_GUEST_HANDLE_00030205(type) type *
++#endif
++
++#endif /* __XEN_PUBLIC_XEN_COMPAT_H__ */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/xen/interface/xen.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,610 @@
++/******************************************************************************
++ * xen.h
++ *
++ * Guest OS interface to Xen.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (c) 2004, K A Fraser
++ */
++
++#ifndef __XEN_PUBLIC_XEN_H__
++#define __XEN_PUBLIC_XEN_H__
++
++#include "xen-compat.h"
++
++#if defined(__i386__) || defined(__x86_64__)
++#include "arch-x86/xen.h"
++#elif defined(__ia64__)
++#include "arch-ia64.h"
++#elif defined(__powerpc__)
++#include "arch-powerpc.h"
++#else
++#error "Unsupported architecture"
++#endif
++
++/*
++ * HYPERCALLS
++ */
++
++#define __HYPERVISOR_set_trap_table 0
++#define __HYPERVISOR_mmu_update 1
++#define __HYPERVISOR_set_gdt 2
++#define __HYPERVISOR_stack_switch 3
++#define __HYPERVISOR_set_callbacks 4
++#define __HYPERVISOR_fpu_taskswitch 5
++#define __HYPERVISOR_sched_op_compat 6 /* compat since 0x00030101 */
++#define __HYPERVISOR_platform_op 7
++#define __HYPERVISOR_set_debugreg 8
++#define __HYPERVISOR_get_debugreg 9
++#define __HYPERVISOR_update_descriptor 10
++#define __HYPERVISOR_memory_op 12
++#define __HYPERVISOR_multicall 13
++#define __HYPERVISOR_update_va_mapping 14
++#define __HYPERVISOR_set_timer_op 15
++#define __HYPERVISOR_event_channel_op_compat 16 /* compat since 0x00030202 */
++#define __HYPERVISOR_xen_version 17
++#define __HYPERVISOR_console_io 18
++#define __HYPERVISOR_physdev_op_compat 19 /* compat since 0x00030202 */
++#define __HYPERVISOR_grant_table_op 20
++#define __HYPERVISOR_vm_assist 21
++#define __HYPERVISOR_update_va_mapping_otherdomain 22
++#define __HYPERVISOR_iret 23 /* x86 only */
++#define __HYPERVISOR_vcpu_op 24
++#define __HYPERVISOR_set_segment_base 25 /* x86/64 only */
++#define __HYPERVISOR_mmuext_op 26
++#define __HYPERVISOR_acm_op 27
++#define __HYPERVISOR_nmi_op 28
++#define __HYPERVISOR_sched_op 29
++#define __HYPERVISOR_callback_op 30
++#define __HYPERVISOR_xenoprof_op 31
++#define __HYPERVISOR_event_channel_op 32
++#define __HYPERVISOR_physdev_op 33
++#define __HYPERVISOR_hvm_op 34
++#define __HYPERVISOR_sysctl 35
++#define __HYPERVISOR_domctl 36
++#define __HYPERVISOR_kexec_op 37
++
++/* Architecture-specific hypercall definitions. */
++#define __HYPERVISOR_arch_0 48
++#define __HYPERVISOR_arch_1 49
++#define __HYPERVISOR_arch_2 50
++#define __HYPERVISOR_arch_3 51
++#define __HYPERVISOR_arch_4 52
++#define __HYPERVISOR_arch_5 53
++#define __HYPERVISOR_arch_6 54
++#define __HYPERVISOR_arch_7 55
++
++/*
++ * HYPERCALL COMPATIBILITY.
++ */
++
++/* New sched_op hypercall introduced in 0x00030101. */
++#if __XEN_INTERFACE_VERSION__ < 0x00030101
++#undef __HYPERVISOR_sched_op
++#define __HYPERVISOR_sched_op __HYPERVISOR_sched_op_compat
++#endif
++
++/* New event-channel and physdev hypercalls introduced in 0x00030202. */
++#if __XEN_INTERFACE_VERSION__ < 0x00030202
++#undef __HYPERVISOR_event_channel_op
++#define __HYPERVISOR_event_channel_op __HYPERVISOR_event_channel_op_compat
++#undef __HYPERVISOR_physdev_op
++#define __HYPERVISOR_physdev_op __HYPERVISOR_physdev_op_compat
++#endif
++
++/* New platform_op hypercall introduced in 0x00030204. */
++#if __XEN_INTERFACE_VERSION__ < 0x00030204
++#define __HYPERVISOR_dom0_op __HYPERVISOR_platform_op
++#endif
++
++/*
++ * VIRTUAL INTERRUPTS
++ *
++ * Virtual interrupts that a guest OS may receive from Xen.
++ *
++ * In the side comments, 'V.' denotes a per-VCPU VIRQ while 'G.' denotes a
++ * global VIRQ. The former can be bound once per VCPU and cannot be re-bound.
++ * The latter can be allocated only once per guest: they must initially be
++ * allocated to VCPU0 but can subsequently be re-bound.
++ */
++#define VIRQ_TIMER 0 /* V. Timebase update, and/or requested timeout. */
++#define VIRQ_DEBUG 1 /* V. Request guest to dump debug info. */
++#define VIRQ_CONSOLE 2 /* G. (DOM0) Bytes received on emergency console. */
++#define VIRQ_DOM_EXC 3 /* G. (DOM0) Exceptional event for some domain. */
++#define VIRQ_TBUF 4 /* G. (DOM0) Trace buffer has records available. */
++#define VIRQ_DEBUGGER 6 /* G. (DOM0) A domain has paused for debugging. */
++#define VIRQ_XENOPROF 7 /* V. XenOprofile interrupt: new sample available */
++#define VIRQ_CON_RING 8 /* G. (DOM0) Bytes received on console */
++
++/* Architecture-specific VIRQ definitions. */
++#define VIRQ_ARCH_0 16
++#define VIRQ_ARCH_1 17
++#define VIRQ_ARCH_2 18
++#define VIRQ_ARCH_3 19
++#define VIRQ_ARCH_4 20
++#define VIRQ_ARCH_5 21
++#define VIRQ_ARCH_6 22
++#define VIRQ_ARCH_7 23
++
++#define NR_VIRQS 24
++
++/*
++ * MMU-UPDATE REQUESTS
++ *
++ * HYPERVISOR_mmu_update() accepts a list of (ptr, val) pairs.
++ * A foreigndom (FD) can be specified (or DOMID_SELF for none).
++ * Where the FD has some effect, it is described below.
++ * ptr[1:0] specifies the appropriate MMU_* command.
++ *
++ * ptr[1:0] == MMU_NORMAL_PT_UPDATE:
++ * Updates an entry in a page table. If updating an L1 table, and the new
++ * table entry is valid/present, the mapped frame must belong to the FD, if
++ * an FD has been specified. If attempting to map an I/O page then the
++ * caller assumes the privilege of the FD.
++ * FD == DOMID_IO: Permit /only/ I/O mappings, at the priv level of the caller.
++ * FD == DOMID_XEN: Map restricted areas of Xen's heap space.
++ * ptr[:2] -- Machine address of the page-table entry to modify.
++ * val -- Value to write.
++ *
++ * ptr[1:0] == MMU_MACHPHYS_UPDATE:
++ * Updates an entry in the machine->pseudo-physical mapping table.
++ * ptr[:2] -- Machine address within the frame whose mapping to modify.
++ * The frame must belong to the FD, if one is specified.
++ * val -- Value to write into the mapping entry.
++ */
++#define MMU_NORMAL_PT_UPDATE 0 /* checked '*ptr = val'. ptr is MA. */
++#define MMU_MACHPHYS_UPDATE 1 /* ptr = MA of frame to modify entry for */
++
++/*
++ * MMU EXTENDED OPERATIONS
++ *
++ * HYPERVISOR_mmuext_op() accepts a list of mmuext_op structures.
++ * A foreigndom (FD) can be specified (or DOMID_SELF for none).
++ * Where the FD has some effect, it is described below.
++ *
++ * cmd: MMUEXT_(UN)PIN_*_TABLE
++ * mfn: Machine frame number to be (un)pinned as a p.t. page.
++ * The frame must belong to the FD, if one is specified.
++ *
++ * cmd: MMUEXT_NEW_BASEPTR
++ * mfn: Machine frame number of new page-table base to install in MMU.
++ *
++ * cmd: MMUEXT_NEW_USER_BASEPTR [x86/64 only]
++ * mfn: Machine frame number of new page-table base to install in MMU
++ * when in user space.
++ *
++ * cmd: MMUEXT_TLB_FLUSH_LOCAL
++ * No additional arguments. Flushes local TLB.
++ *
++ * cmd: MMUEXT_INVLPG_LOCAL
++ * linear_addr: Linear address to be flushed from the local TLB.
++ *
++ * cmd: MMUEXT_TLB_FLUSH_MULTI
++ * vcpumask: Pointer to bitmap of VCPUs to be flushed.
++ *
++ * cmd: MMUEXT_INVLPG_MULTI
++ * linear_addr: Linear address to be flushed.
++ * vcpumask: Pointer to bitmap of VCPUs to be flushed.
++ *
++ * cmd: MMUEXT_TLB_FLUSH_ALL
++ * No additional arguments. Flushes all VCPUs' TLBs.
++ *
++ * cmd: MMUEXT_INVLPG_ALL
++ * linear_addr: Linear address to be flushed from all VCPUs' TLBs.
++ *
++ * cmd: MMUEXT_FLUSH_CACHE
++ * No additional arguments. Writes back and flushes cache contents.
++ *
++ * cmd: MMUEXT_SET_LDT
++ * linear_addr: Linear address of LDT base (NB. must be page-aligned).
++ * nr_ents: Number of entries in LDT.
++ */
++#define MMUEXT_PIN_L1_TABLE 0
++#define MMUEXT_PIN_L2_TABLE 1
++#define MMUEXT_PIN_L3_TABLE 2
++#define MMUEXT_PIN_L4_TABLE 3
++#define MMUEXT_UNPIN_TABLE 4
++#define MMUEXT_NEW_BASEPTR 5
++#define MMUEXT_TLB_FLUSH_LOCAL 6
++#define MMUEXT_INVLPG_LOCAL 7
++#define MMUEXT_TLB_FLUSH_MULTI 8
++#define MMUEXT_INVLPG_MULTI 9
++#define MMUEXT_TLB_FLUSH_ALL 10
++#define MMUEXT_INVLPG_ALL 11
++#define MMUEXT_FLUSH_CACHE 12
++#define MMUEXT_SET_LDT 13
++#define MMUEXT_NEW_USER_BASEPTR 15
++
++#ifndef __ASSEMBLY__
++struct mmuext_op {
++ unsigned int cmd;
++ union {
++ /* [UN]PIN_TABLE, NEW_BASEPTR, NEW_USER_BASEPTR */
++ xen_pfn_t mfn;
++ /* INVLPG_LOCAL, INVLPG_ALL, SET_LDT */
++ unsigned long linear_addr;
++ } arg1;
++ union {
++ /* SET_LDT */
++ unsigned int nr_ents;
++ /* TLB_FLUSH_MULTI, INVLPG_MULTI */
++ XEN_GUEST_HANDLE_00030205(void) vcpumask;
++ } arg2;
++};
++typedef struct mmuext_op mmuext_op_t;
++DEFINE_XEN_GUEST_HANDLE(mmuext_op_t);
++#endif
++
++/* These are passed as 'flags' to update_va_mapping. They can be ORed. */
++/* When specifying UVMF_MULTI, also OR in a pointer to a CPU bitmap. */
++/* UVMF_LOCAL is merely UVMF_MULTI with a NULL bitmap pointer. */
++#define UVMF_NONE (0UL<<0) /* No flushing at all. */
++#define UVMF_TLB_FLUSH (1UL<<0) /* Flush entire TLB(s). */
++#define UVMF_INVLPG (2UL<<0) /* Flush only one entry. */
++#define UVMF_FLUSHTYPE_MASK (3UL<<0)
++#define UVMF_MULTI (0UL<<2) /* Flush subset of TLBs. */
++#define UVMF_LOCAL (0UL<<2) /* Flush local TLB. */
++#define UVMF_ALL (1UL<<2) /* Flush all TLBs. */
++
++/*
++ * Commands to HYPERVISOR_console_io().
++ */
++#define CONSOLEIO_write 0
++#define CONSOLEIO_read 1
++
++/*
++ * Commands to HYPERVISOR_vm_assist().
++ */
++#define VMASST_CMD_enable 0
++#define VMASST_CMD_disable 1
++
++/* x86/32 guests: simulate full 4GB segment limits. */
++#define VMASST_TYPE_4gb_segments 0
++
++/* x86/32 guests: trap (vector 15) whenever above vmassist is used. */
++#define VMASST_TYPE_4gb_segments_notify 1
++
++/*
++ * x86 guests: support writes to bottom-level PTEs.
++ * NB1. Page-directory entries cannot be written.
++ * NB2. Guest must continue to remove all writable mappings of PTEs.
++ */
++#define VMASST_TYPE_writable_pagetables 2
++
++/* x86/PAE guests: support PDPTs above 4GB. */
++#define VMASST_TYPE_pae_extended_cr3 3
++
++#define MAX_VMASST_TYPE 3
++
++#ifndef __ASSEMBLY__
++
++typedef uint16_t domid_t;
++
++/* Domain ids >= DOMID_FIRST_RESERVED cannot be used for ordinary domains. */
++#define DOMID_FIRST_RESERVED (0x7FF0U)
++
++/* DOMID_SELF is used in certain contexts to refer to oneself. */
++#define DOMID_SELF (0x7FF0U)
++
++/*
++ * DOMID_IO is used to restrict page-table updates to mapping I/O memory.
++ * Although no Foreign Domain need be specified to map I/O pages, DOMID_IO
++ * is useful to ensure that no mappings to the OS's own heap are accidentally
++ * installed. (e.g., in Linux this could cause havoc as reference counts
++ * aren't adjusted on the I/O-mapping code path).
++ * This only makes sense in MMUEXT_SET_FOREIGNDOM, but in that context can
++ * be specified by any calling domain.
++ */
++#define DOMID_IO (0x7FF1U)
++
++/*
++ * DOMID_XEN is used to allow privileged domains to map restricted parts of
++ * Xen's heap space (e.g., the machine_to_phys table).
++ * This only makes sense in MMUEXT_SET_FOREIGNDOM, and is only permitted if
++ * the caller is privileged.
++ */
++#define DOMID_XEN (0x7FF2U)
++
++/*
++ * Send an array of these to HYPERVISOR_mmu_update().
++ * NB. The fields are natural pointer/address size for this architecture.
++ */
++struct mmu_update {
++ uint64_t ptr; /* Machine address of PTE. */
++ uint64_t val; /* New contents of PTE. */
++};
++typedef struct mmu_update mmu_update_t;
++DEFINE_XEN_GUEST_HANDLE(mmu_update_t);
++
++/*
++ * Send an array of these to HYPERVISOR_multicall().
++ * NB. The fields are natural register size for this architecture.
++ */
++struct multicall_entry {
++ unsigned long op, result;
++ unsigned long args[6];
++};
++typedef struct multicall_entry multicall_entry_t;
++DEFINE_XEN_GUEST_HANDLE(multicall_entry_t);
++
++/*
++ * Event channel endpoints per domain:
++ * 1024 if a long is 32 bits; 4096 if a long is 64 bits.
++ */
++#define NR_EVENT_CHANNELS (sizeof(unsigned long) * sizeof(unsigned long) * 64)
++
++struct vcpu_time_info {
++ /*
++ * Updates to the following values are preceded and followed by an
++ * increment of 'version'. The guest can therefore detect updates by
++ * looking for changes to 'version'. If the least-significant bit of
++ * the version number is set then an update is in progress and the guest
++ * must wait to read a consistent set of values.
++ * The correct way to interact with the version number is similar to
++ * Linux's seqlock: see the implementations of read_seqbegin/read_seqretry.
++ */
++ uint32_t version;
++ uint32_t pad0;
++ uint64_t tsc_timestamp; /* TSC at last update of time vals. */
++ uint64_t system_time; /* Time, in nanosecs, since boot. */
++ /*
++ * Current system time:
++ * system_time +
++ * ((((tsc - tsc_timestamp) << tsc_shift) * tsc_to_system_mul) >> 32)
++ * CPU frequency (Hz):
++ * ((10^9 << 32) / tsc_to_system_mul) >> tsc_shift
++ */
++ uint32_t tsc_to_system_mul;
++ int8_t tsc_shift;
++ int8_t pad1[3];
++}; /* 32 bytes */
++typedef struct vcpu_time_info vcpu_time_info_t;
++
++struct vcpu_info {
++ /*
++ * 'evtchn_upcall_pending' is written non-zero by Xen to indicate
++ * a pending notification for a particular VCPU. It is then cleared
++ * by the guest OS /before/ checking for pending work, thus avoiding
++ * a set-and-check race. Note that the mask is only accessed by Xen
++ * on the CPU that is currently hosting the VCPU. This means that the
++ * pending and mask flags can be updated by the guest without special
++ * synchronisation (i.e., no need for the x86 LOCK prefix).
++ * This may seem suboptimal because if the pending flag is set by
++ * a different CPU then an IPI may be scheduled even when the mask
++ * is set. However, note:
++ * 1. The task of 'interrupt holdoff' is covered by the per-event-
++ * channel mask bits. A 'noisy' event that is continually being
++ * triggered can be masked at source at this very precise
++ * granularity.
++ * 2. The main purpose of the per-VCPU mask is therefore to restrict
++ * reentrant execution: whether for concurrency control, or to
++ * prevent unbounded stack usage. Whatever the purpose, we expect
++ * that the mask will be asserted only for short periods at a time,
++ * and so the likelihood of a 'spurious' IPI is suitably small.
++ * The mask is read before making an event upcall to the guest: a
++ * non-zero mask therefore guarantees that the VCPU will not receive
++ * an upcall activation. The mask is cleared when the VCPU requests
++ * to block: this avoids wakeup-waiting races.
++ */
++ uint8_t evtchn_upcall_pending;
++ uint8_t evtchn_upcall_mask;
++ unsigned long evtchn_pending_sel;
++ struct arch_vcpu_info arch;
++ struct vcpu_time_info time;
++}; /* 64 bytes (x86) */
++#ifndef __XEN__
++typedef struct vcpu_info vcpu_info_t;
++#endif
++
++/*
++ * Xen/kernel shared data -- pointer provided in start_info.
++ *
++ * This structure is defined to be both smaller than a page, and the
++ * only data on the shared page, but may vary in actual size even within
++ * compatible Xen versions; guests should not rely on the size
++ * of this structure remaining constant.
++ */
++struct shared_info {
++ struct vcpu_info vcpu_info[MAX_VIRT_CPUS];
++
++ /*
++ * A domain can create "event channels" on which it can send and receive
++ * asynchronous event notifications. There are three classes of event that
++ * are delivered by this mechanism:
++ * 1. Bi-directional inter- and intra-domain connections. Domains must
++ * arrange out-of-band to set up a connection (usually by allocating
++ * an unbound 'listener' port and avertising that via a storage service
++ * such as xenstore).
++ * 2. Physical interrupts. A domain with suitable hardware-access
++ * privileges can bind an event-channel port to a physical interrupt
++ * source.
++ * 3. Virtual interrupts ('events'). A domain can bind an event-channel
++ * port to a virtual interrupt source, such as the virtual-timer
++ * device or the emergency console.
++ *
++ * Event channels are addressed by a "port index". Each channel is
++ * associated with two bits of information:
++ * 1. PENDING -- notifies the domain that there is a pending notification
++ * to be processed. This bit is cleared by the guest.
++ * 2. MASK -- if this bit is clear then a 0->1 transition of PENDING
++ * will cause an asynchronous upcall to be scheduled. This bit is only
++ * updated by the guest. It is read-only within Xen. If a channel
++ * becomes pending while the channel is masked then the 'edge' is lost
++ * (i.e., when the channel is unmasked, the guest must manually handle
++ * pending notifications as no upcall will be scheduled by Xen).
++ *
++ * To expedite scanning of pending notifications, any 0->1 pending
++ * transition on an unmasked channel causes a corresponding bit in a
++ * per-vcpu selector word to be set. Each bit in the selector covers a
++ * 'C long' in the PENDING bitfield array.
++ */
++ unsigned long evtchn_pending[sizeof(unsigned long) * 8];
++ unsigned long evtchn_mask[sizeof(unsigned long) * 8];
++
++ /*
++ * Wallclock time: updated only by control software. Guests should base
++ * their gettimeofday() syscall on this wallclock-base value.
++ */
++ uint32_t wc_version; /* Version counter: see vcpu_time_info_t. */
++ uint32_t wc_sec; /* Secs 00:00:00 UTC, Jan 1, 1970. */
++ uint32_t wc_nsec; /* Nsecs 00:00:00 UTC, Jan 1, 1970. */
++
++ struct arch_shared_info arch;
++
++};
++#ifndef __XEN__
++typedef struct shared_info shared_info_t;
++#endif
++
++/*
++ * Start-of-day memory layout:
++ * 1. The domain is started within contiguous virtual-memory region.
++ * 2. The contiguous region ends on an aligned 4MB boundary.
++ * 3. This the order of bootstrap elements in the initial virtual region:
++ * a. relocated kernel image
++ * b. initial ram disk [mod_start, mod_len]
++ * c. list of allocated page frames [mfn_list, nr_pages]
++ * d. start_info_t structure [register ESI (x86)]
++ * e. bootstrap page tables [pt_base, CR3 (x86)]
++ * f. bootstrap stack [register ESP (x86)]
++ * 4. Bootstrap elements are packed together, but each is 4kB-aligned.
++ * 5. The initial ram disk may be omitted.
++ * 6. The list of page frames forms a contiguous 'pseudo-physical' memory
++ * layout for the domain. In particular, the bootstrap virtual-memory
++ * region is a 1:1 mapping to the first section of the pseudo-physical map.
++ * 7. All bootstrap elements are mapped read-writable for the guest OS. The
++ * only exception is the bootstrap page table, which is mapped read-only.
++ * 8. There is guaranteed to be at least 512kB padding after the final
++ * bootstrap element. If necessary, the bootstrap virtual region is
++ * extended by an extra 4MB to ensure this.
++ */
++
++#define MAX_GUEST_CMDLINE 1024
++struct start_info {
++ /* THE FOLLOWING ARE FILLED IN BOTH ON INITIAL BOOT AND ON RESUME. */
++ char magic[32]; /* "xen-<version>-<platform>". */
++ unsigned long nr_pages; /* Total pages allocated to this domain. */
++ unsigned long shared_info; /* MACHINE address of shared info struct. */
++ uint32_t flags; /* SIF_xxx flags. */
++ xen_pfn_t store_mfn; /* MACHINE page number of shared page. */
++ uint32_t store_evtchn; /* Event channel for store communication. */
++ union {
++ struct {
++ xen_pfn_t mfn; /* MACHINE page number of console page. */
++ uint32_t evtchn; /* Event channel for console page. */
++ } domU;
++ struct {
++ uint32_t info_off; /* Offset of console_info struct. */
++ uint32_t info_size; /* Size of console_info struct from start.*/
++ } dom0;
++ } console;
++ /* THE FOLLOWING ARE ONLY FILLED IN ON INITIAL BOOT (NOT RESUME). */
++ unsigned long pt_base; /* VIRTUAL address of page directory. */
++ unsigned long nr_pt_frames; /* Number of bootstrap p.t. frames. */
++ unsigned long mfn_list; /* VIRTUAL address of page-frame list. */
++ unsigned long mod_start; /* VIRTUAL address of pre-loaded module. */
++ unsigned long mod_len; /* Size (bytes) of pre-loaded module. */
++ int8_t cmd_line[MAX_GUEST_CMDLINE];
++};
++typedef struct start_info start_info_t;
++
++/* New console union for dom0 introduced in 0x00030203. */
++#if __XEN_INTERFACE_VERSION__ < 0x00030203
++#define console_mfn console.domU.mfn
++#define console_evtchn console.domU.evtchn
++#endif
++
++/* These flags are passed in the 'flags' field of start_info_t. */
++#define SIF_PRIVILEGED (1<<0) /* Is the domain privileged? */
++#define SIF_INITDOMAIN (1<<1) /* Is this the initial control domain? */
++
++typedef struct dom0_vga_console_info {
++ uint8_t video_type; /* DOM0_VGA_CONSOLE_??? */
++#define XEN_VGATYPE_TEXT_MODE_3 0x03
++#define XEN_VGATYPE_VESA_LFB 0x23
++
++ union {
++ struct {
++ /* Font height, in pixels. */
++ uint16_t font_height;
++ /* Cursor location (column, row). */
++ uint16_t cursor_x, cursor_y;
++ /* Number of rows and columns (dimensions in characters). */
++ uint16_t rows, columns;
++ } text_mode_3;
++
++ struct {
++ /* Width and height, in pixels. */
++ uint16_t width, height;
++ /* Bytes per scan line. */
++ uint16_t bytes_per_line;
++ /* Bits per pixel. */
++ uint16_t bits_per_pixel;
++ /* LFB physical address, and size (in units of 64kB). */
++ uint32_t lfb_base;
++ uint32_t lfb_size;
++ /* RGB mask offsets and sizes, as defined by VBE 1.2+ */
++ uint8_t red_pos, red_size;
++ uint8_t green_pos, green_size;
++ uint8_t blue_pos, blue_size;
++ uint8_t rsvd_pos, rsvd_size;
++ } vesa_lfb;
++ } u;
++} dom0_vga_console_info_t;
++
++typedef uint8_t xen_domain_handle_t[16];
++
++/* Turn a plain number into a C unsigned long constant. */
++#define __mk_unsigned_long(x) x ## UL
++#define mk_unsigned_long(x) __mk_unsigned_long(x)
++
++DEFINE_XEN_GUEST_HANDLE(uint8_t);
++DEFINE_XEN_GUEST_HANDLE(uint16_t);
++DEFINE_XEN_GUEST_HANDLE(uint32_t);
++DEFINE_XEN_GUEST_HANDLE(uint64_t);
++
++#else /* __ASSEMBLY__ */
++
++/* In assembly code we cannot use C numeric constant suffixes. */
++#define mk_unsigned_long(x) x
++
++#endif /* !__ASSEMBLY__ */
++
++/* Default definitions for macros used by domctl/sysctl. */
++#if defined(__XEN__) || defined(__XEN_TOOLS__)
++#ifndef uint64_aligned_t
++#define uint64_aligned_t uint64_t
++#endif
++#ifndef XEN_GUEST_HANDLE_64
++#define XEN_GUEST_HANDLE_64(name) XEN_GUEST_HANDLE(name)
++#endif
++#endif
++
++#endif /* __XEN_PUBLIC_XEN_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/xen/interface/xencomm.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,41 @@
++/*
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (C) IBM Corp. 2006
++ */
++
++#ifndef _XEN_XENCOMM_H_
++#define _XEN_XENCOMM_H_
++
++/* A xencomm descriptor is a scatter/gather list containing physical
++ * addresses corresponding to a virtually contiguous memory area. The
++ * hypervisor translates these physical addresses to machine addresses to copy
++ * to and from the virtually contiguous area.
++ */
++
++#define XENCOMM_MAGIC 0x58434F4D /* 'XCOM' */
++#define XENCOMM_INVALID (~0UL)
++
++struct xencomm_desc {
++ uint32_t magic;
++ uint32_t nr_addrs; /* the number of entries in address[] */
++ uint64_t address[0];
++};
++
++#endif /* _XEN_XENCOMM_H_ */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/xen/interface/xenoprof.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,132 @@
++/******************************************************************************
++ * xenoprof.h
++ *
++ * Interface for enabling system wide profiling based on hardware performance
++ * counters
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (C) 2005 Hewlett-Packard Co.
++ * Written by Aravind Menon & Jose Renato Santos
++ */
++
++#ifndef __XEN_PUBLIC_XENOPROF_H__
++#define __XEN_PUBLIC_XENOPROF_H__
++
++#include "xen.h"
++
++/*
++ * Commands to HYPERVISOR_xenoprof_op().
++ */
++#define XENOPROF_init 0
++#define XENOPROF_reset_active_list 1
++#define XENOPROF_reset_passive_list 2
++#define XENOPROF_set_active 3
++#define XENOPROF_set_passive 4
++#define XENOPROF_reserve_counters 5
++#define XENOPROF_counter 6
++#define XENOPROF_setup_events 7
++#define XENOPROF_enable_virq 8
++#define XENOPROF_start 9
++#define XENOPROF_stop 10
++#define XENOPROF_disable_virq 11
++#define XENOPROF_release_counters 12
++#define XENOPROF_shutdown 13
++#define XENOPROF_get_buffer 14
++#define XENOPROF_last_op 14
++
++#define MAX_OPROF_EVENTS 32
++#define MAX_OPROF_DOMAINS 25
++#define XENOPROF_CPU_TYPE_SIZE 64
++
++/* Xenoprof performance events (not Xen events) */
++struct event_log {
++ uint64_t eip;
++ uint8_t mode;
++ uint8_t event;
++};
++
++/* Xenoprof buffer shared between Xen and domain - 1 per VCPU */
++struct xenoprof_buf {
++ uint32_t event_head;
++ uint32_t event_tail;
++ uint32_t event_size;
++ uint32_t vcpu_id;
++ uint64_t xen_samples;
++ uint64_t kernel_samples;
++ uint64_t user_samples;
++ uint64_t lost_samples;
++ struct event_log event_log[1];
++};
++#ifndef __XEN__
++typedef struct xenoprof_buf xenoprof_buf_t;
++DEFINE_XEN_GUEST_HANDLE(xenoprof_buf_t);
++#endif
++
++struct xenoprof_init {
++ int32_t num_events;
++ int32_t is_primary;
++ char cpu_type[XENOPROF_CPU_TYPE_SIZE];
++};
++typedef struct xenoprof_init xenoprof_init_t;
++DEFINE_XEN_GUEST_HANDLE(xenoprof_init_t);
++
++struct xenoprof_get_buffer {
++ int32_t max_samples;
++ int32_t nbuf;
++ int32_t bufsize;
++ uint64_t buf_gmaddr;
++};
++typedef struct xenoprof_get_buffer xenoprof_get_buffer_t;
++DEFINE_XEN_GUEST_HANDLE(xenoprof_get_buffer_t);
++
++struct xenoprof_counter {
++ uint32_t ind;
++ uint64_t count;
++ uint32_t enabled;
++ uint32_t event;
++ uint32_t hypervisor;
++ uint32_t kernel;
++ uint32_t user;
++ uint64_t unit_mask;
++};
++typedef struct xenoprof_counter xenoprof_counter_t;
++DEFINE_XEN_GUEST_HANDLE(xenoprof_counter_t);
++
++typedef struct xenoprof_passive {
++ uint16_t domain_id;
++ int32_t max_samples;
++ int32_t nbuf;
++ int32_t bufsize;
++ uint64_t buf_gmaddr;
++} xenoprof_passive_t;
++DEFINE_XEN_GUEST_HANDLE(xenoprof_passive_t);
++
++
++#endif /* __XEN_PUBLIC_XENOPROF_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
diff --git a/trunk/2.6.22/20014_xen3-auto-xen-kconfig.patch1 b/trunk/2.6.22/20014_xen3-auto-xen-kconfig.patch1
new file mode 100644
index 0000000..98b6d55
--- /dev/null
+++ b/trunk/2.6.22/20014_xen3-auto-xen-kconfig.patch1
@@ -0,0 +1,887 @@
+Subject: xen3 xen-kconfig
+From: http://xenbits.xensource.com/xen-3.1-testing.hg (tip 15042)
+Patch-mainline: obsolete
+Acked-by: jbeulich@novell.com
+
+Index: head-2007-09-25/arch/i386/Kconfig
+===================================================================
+--- head-2007-09-25.orig/arch/i386/Kconfig 2007-09-25 14:22:36.000000000 +0200
++++ head-2007-09-25/arch/i386/Kconfig 2007-09-25 14:34:51.000000000 +0200
+@@ -16,6 +16,7 @@ config X86_32
+
+ config GENERIC_TIME
+ bool
++ depends on !X86_XEN
+ default y
+
+ config CLOCKSOURCE_WATCHDOG
+@@ -131,6 +132,15 @@ config X86_PC
+ help
+ Choose this option if your computer is a standard PC or compatible.
+
++config X86_XEN
++ bool "Xen-compatible"
++ select X86_UP_APIC if !SMP && XEN_PRIVILEGED_GUEST
++ select X86_UP_IOAPIC if !SMP && XEN_PRIVILEGED_GUEST
++ select SWIOTLB
++ help
++ Choose this option if you plan to run this kernel on top of the
++ Xen Hypervisor.
++
+ config X86_ELAN
+ bool "AMD Elan"
+ help
+@@ -261,6 +271,7 @@ source "arch/i386/Kconfig.cpu"
+
+ config HPET_TIMER
+ bool "HPET Timer Support"
++ depends on !X86_XEN
+ help
+ This enables the use of the HPET for the kernel's internal timer.
+ HPET is the next generation timer replacing legacy 8254s.
+@@ -311,7 +322,7 @@ source "kernel/Kconfig.preempt"
+
+ config X86_UP_APIC
+ bool "Local APIC support on uniprocessors"
+- depends on !SMP && !(X86_VISWS || X86_VOYAGER || X86_GENERICARCH)
++ depends on !SMP && !(X86_VISWS || X86_VOYAGER || X86_GENERICARCH || XEN_UNPRIVILEGED_GUEST)
+ help
+ A local APIC (Advanced Programmable Interrupt Controller) is an
+ integrated interrupt controller in the CPU. If you have a single-CPU
+@@ -336,12 +347,12 @@ config X86_UP_IOAPIC
+
+ config X86_LOCAL_APIC
+ bool
+- depends on X86_UP_APIC || ((X86_VISWS || SMP) && !X86_VOYAGER) || X86_GENERICARCH
++ depends on X86_UP_APIC || ((X86_VISWS || SMP) && !(X86_VOYAGER || XEN_UNPRIVILEGED_GUEST)) || X86_GENERICARCH
+ default y
+
+ config X86_IO_APIC
+ bool
+- depends on X86_UP_IOAPIC || (SMP && !(X86_VISWS || X86_VOYAGER)) || X86_GENERICARCH
++ depends on X86_UP_IOAPIC || (SMP && !(X86_VISWS || X86_VOYAGER || XEN_UNPRIVILEGED_GUEST)) || X86_GENERICARCH
+ default y
+
+ config X86_VISWS_APIC
+@@ -351,7 +362,7 @@ config X86_VISWS_APIC
+
+ config X86_MCE
+ bool "Machine Check Exception"
+- depends on !X86_VOYAGER
++ depends on !(X86_VOYAGER || X86_XEN)
+ ---help---
+ Machine Check Exception support allows the processor to notify the
+ kernel if it detects a problem (e.g. overheating, component failure).
+@@ -450,6 +461,7 @@ config X86_REBOOTFIXUPS
+
+ config MICROCODE
+ tristate "/dev/cpu/microcode - Intel IA32 CPU microcode support"
++ depends on !XEN_UNPRIVILEGED_GUEST
+ select FW_LOADER
+ ---help---
+ If you say Y here and also to "/dev file system support" in the
+@@ -473,6 +485,7 @@ config MICROCODE_OLD_INTERFACE
+
+ config X86_MSR
+ tristate "/dev/cpu/*/msr - Model-specific register support"
++ depends on !X86_XEN
+ help
+ This device gives privileged processes access to the x86
+ Model-Specific Registers (MSRs). It is a character device with
+@@ -488,6 +501,10 @@ config X86_CPUID
+ with major 203 and minors 0 to 31 for /dev/cpu/0/cpuid to
+ /dev/cpu/31/cpuid.
+
++config SWIOTLB
++ bool
++ default n
++
+ source "drivers/firmware/Kconfig"
+
+ choice
+@@ -674,6 +691,7 @@ config HIGHPTE
+
+ config MATH_EMULATION
+ bool "Math emulation"
++ depends on !X86_XEN
+ ---help---
+ Linux can emulate a math coprocessor (used for floating point
+ operations) if you don't have one. 486DX and Pentium processors have
+@@ -699,6 +717,8 @@ config MATH_EMULATION
+
+ config MTRR
+ bool "MTRR (Memory Type Range Register) support"
++ depends on !XEN_UNPRIVILEGED_GUEST
++ default y if X86_XEN
+ ---help---
+ On Intel P6 family processors (Pentium Pro, Pentium II and later)
+ the Memory Type Range Registers (MTRRs) may be used to control
+@@ -733,7 +753,7 @@ config MTRR
+
+ config EFI
+ bool "Boot from EFI support"
+- depends on ACPI
++ depends on ACPI && !X86_XEN
+ default n
+ ---help---
+ This enables the kernel to boot on EFI platforms using
+@@ -751,7 +771,7 @@ config EFI
+
+ config IRQBALANCE
+ bool "Enable kernel irq balancing"
+- depends on SMP && X86_IO_APIC
++ depends on SMP && X86_IO_APIC && !X86_XEN
+ default y
+ help
+ The default yes will allow the kernel to do irq load balancing.
+@@ -785,6 +805,7 @@ source kernel/Kconfig.hz
+
+ config KEXEC
+ bool "kexec system call"
++ depends on !XEN_UNPRIVILEGED_GUEST
+ help
+ kexec is a system call that implements the ability to shutdown your
+ current kernel, and to start another kernel. It is like a reboot
+@@ -900,6 +921,7 @@ config HOTPLUG_CPU
+
+ config COMPAT_VDSO
+ bool "Compat VDSO support"
++ depends on !X86_XEN
+ default y
+ help
+ Map the VDSO to the predictable old-style address too.
+@@ -917,15 +939,17 @@ config ARCH_ENABLE_MEMORY_HOTPLUG
+ depends on HIGHMEM
+
+ menu "Power management options (ACPI, APM)"
+- depends on !X86_VOYAGER
++ depends on !(X86_VOYAGER || XEN_UNPRIVILEGED_GUEST)
+
++if !X86_XEN
+ source kernel/power/Kconfig
++endif
+
+ source "drivers/acpi/Kconfig"
+
+ menuconfig APM
+ tristate "APM (Advanced Power Management) BIOS support"
+- depends on PM && !X86_VISWS
++ depends on PM && !(X86_VISWS || X86_XEN)
+ ---help---
+ APM is a BIOS specification for saving power using several different
+ techniques. This is mostly useful for battery powered laptops with
+@@ -1094,6 +1118,7 @@ choice
+
+ config PCI_GOBIOS
+ bool "BIOS"
++ depends on !X86_XEN
+
+ config PCI_GOMMCONFIG
+ bool "MMConfig"
+@@ -1101,6 +1126,13 @@ config PCI_GOMMCONFIG
+ config PCI_GODIRECT
+ bool "Direct"
+
++config PCI_GOXEN_FE
++ bool "Xen PCI Frontend"
++ depends on X86_XEN
++ help
++ The PCI device frontend driver allows the kernel to import arbitrary
++ PCI devices from a PCI backend to support PCI driver domains.
++
+ config PCI_GOANY
+ bool "Any"
+
+@@ -1108,7 +1140,7 @@ endchoice
+
+ config PCI_BIOS
+ bool
+- depends on !X86_VISWS && PCI && (PCI_GOBIOS || PCI_GOANY)
++ depends on !(X86_VISWS || X86_XEN) && PCI && (PCI_GOBIOS || PCI_GOANY)
+ default y
+
+ config PCI_DIRECT
+@@ -1121,6 +1153,18 @@ config PCI_MMCONFIG
+ depends on PCI && ACPI && (PCI_GOMMCONFIG || PCI_GOANY)
+ default y
+
++config XEN_PCIDEV_FRONTEND
++ bool
++ depends on PCI && X86_XEN && (PCI_GOXEN_FE || PCI_GOANY)
++ default y
++
++config XEN_PCIDEV_FE_DEBUG
++ bool "Xen PCI Frontend Debugging"
++ depends on XEN_PCIDEV_FRONTEND
++ default n
++ help
++ Enables some debug statements within the PCI Frontend.
++
+ source "drivers/pci/pcie/Kconfig"
+
+ source "drivers/pci/Kconfig"
+@@ -1131,7 +1175,7 @@ config ISA_DMA_API
+
+ config ISA
+ bool "ISA support"
+- depends on !(X86_VOYAGER || X86_VISWS)
++ depends on !(X86_VOYAGER || X86_VISWS || X86_XEN)
+ help
+ Find out whether you have ISA slots on your motherboard. ISA is the
+ name of a bus system, i.e. the way the CPU talks to the other stuff
+@@ -1158,7 +1202,7 @@ config EISA
+ source "drivers/eisa/Kconfig"
+
+ config MCA
+- bool "MCA support" if !(X86_VISWS || X86_VOYAGER)
++ bool "MCA support" if !(X86_VISWS || X86_VOYAGER || X86_XEN)
+ default y if X86_VOYAGER
+ help
+ MicroChannel Architecture is found in some IBM PS/2 machines and
+@@ -1234,6 +1278,8 @@ source "security/Kconfig"
+
+ source "crypto/Kconfig"
+
++source "drivers/xen/Kconfig"
++
+ source "lib/Kconfig"
+
+ #
+@@ -1259,7 +1305,7 @@ config X86_SMP
+
+ config X86_HT
+ bool
+- depends on SMP && !(X86_VISWS || X86_VOYAGER)
++ depends on SMP && !(X86_VISWS || X86_VOYAGER || X86_XEN)
+ default y
+
+ config X86_BIOS_REBOOT
+@@ -1272,6 +1318,16 @@ config X86_TRAMPOLINE
+ depends on X86_SMP || (X86_VOYAGER && SMP)
+ default y
+
++config X86_NO_TSS
++ bool
++ depends on X86_XEN
++ default y
++
++config X86_NO_IDT
++ bool
++ depends on X86_XEN
++ default y
++
+ config KTIME_SCALAR
+ bool
+ default y
+Index: head-2007-09-25/arch/i386/Kconfig.cpu
+===================================================================
+--- head-2007-09-25.orig/arch/i386/Kconfig.cpu 2007-09-25 14:22:36.000000000 +0200
++++ head-2007-09-25/arch/i386/Kconfig.cpu 2007-09-25 14:34:51.000000000 +0200
+@@ -274,7 +274,7 @@ config X86_PPRO_FENCE
+
+ config X86_F00F_BUG
+ bool
+- depends on M586MMX || M586TSC || M586 || M486 || M386
++ depends on (M586MMX || M586TSC || M586 || M486 || M386) && !X86_NO_IDT
+ default y
+
+ config X86_WP_WORKS_OK
+@@ -334,7 +334,7 @@ config X86_OOSTORE
+
+ config X86_TSC
+ bool
+- depends on (MWINCHIP3D || MWINCHIP2 || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2) && !X86_NUMAQ
++ depends on (MWINCHIP3D || MWINCHIP2 || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2) && !X86_NUMAQ && !X86_XEN
+ default y
+
+ # this should be set for all -march=.. options where the compiler
+Index: head-2007-09-25/arch/i386/Kconfig.debug
+===================================================================
+--- head-2007-09-25.orig/arch/i386/Kconfig.debug 2007-09-25 14:22:36.000000000 +0200
++++ head-2007-09-25/arch/i386/Kconfig.debug 2007-09-25 14:34:51.000000000 +0200
+@@ -79,6 +79,7 @@ config X86_MPPARSE
+ config DOUBLEFAULT
+ default y
+ bool "Enable doublefault exception handler" if EMBEDDED
++ depends on !X86_NO_TSS
+ help
+ This option allows trapping of rare doublefault exceptions that
+ would otherwise cause a system to silently reboot. Disabling this
+Index: head-2007-09-25/arch/x86_64/Kconfig
+===================================================================
+--- head-2007-09-25.orig/arch/x86_64/Kconfig 2007-09-25 14:22:36.000000000 +0200
++++ head-2007-09-25/arch/x86_64/Kconfig 2007-09-25 14:34:51.000000000 +0200
+@@ -183,6 +183,22 @@ config GENERIC_CPU
+
+ endchoice
+
++config X86_64_XEN
++ bool "Enable Xen compatible kernel"
++ select SWIOTLB
++ help
++ This option will compile a kernel compatible with Xen hypervisor
++
++config X86_NO_TSS
++ bool
++ depends on X86_64_XEN
++ default y
++
++config X86_NO_IDT
++ bool
++ depends on X86_64_XEN
++ default y
++
+ #
+ # Define implied options from the CPU selection here
+ #
+@@ -203,6 +219,7 @@ config X86_INTERNODE_CACHE_BYTES
+
+ config X86_TSC
+ bool
++ depends on !X86_64_XEN
+ default y
+
+ config X86_GOOD_APIC
+@@ -251,7 +268,7 @@ config X86_CPUID
+
+ config X86_HT
+ bool
+- depends on SMP && !MK8
++ depends on SMP && !MK8 && !X86_64_XEN
+ default y
+
+ config MATH_EMULATION
+@@ -265,14 +282,22 @@ config EISA
+
+ config X86_IO_APIC
+ bool
++ depends !XEN_UNPRIVILEGED_GUEST
+ default y
+
++config X86_XEN_GENAPIC
++ bool
++ depends X86_64_XEN
++ default XEN_PRIVILEGED_GUEST || SMP
++
+ config X86_LOCAL_APIC
+ bool
++ depends !XEN_UNPRIVILEGED_GUEST
+ default y
+
+ config MTRR
+ bool "MTRR (Memory Type Range Register) support"
++ depends on !XEN_UNPRIVILEGED_GUEST
+ ---help---
+ On Intel P6 family processors (Pentium Pro, Pentium II and later)
+ the Memory Type Range Registers (MTRRs) may be used to control
+@@ -313,7 +338,7 @@ config SMP
+
+ config SCHED_SMT
+ bool "SMT (Hyperthreading) scheduler support"
+- depends on SMP
++ depends on SMP && !X86_64_XEN
+ default n
+ help
+ SMT scheduler support improves the CPU scheduler's decision making
+@@ -323,7 +348,7 @@ config SCHED_SMT
+
+ config SCHED_MC
+ bool "Multi-core scheduler support"
+- depends on SMP
++ depends on SMP && !X86_64_XEN
+ default y
+ help
+ Multi-core scheduler support improves the CPU scheduler's decision
+@@ -334,7 +359,7 @@ source "kernel/Kconfig.preempt"
+
+ config NUMA
+ bool "Non Uniform Memory Access (NUMA) Support"
+- depends on SMP
++ depends on SMP && !X86_64_XEN
+ help
+ Enable NUMA (Non Uniform Memory Access) support. The kernel
+ will try to allocate memory used by a CPU on the local memory
+@@ -390,7 +415,7 @@ config ARCH_DISCONTIGMEM_DEFAULT
+
+ config ARCH_SPARSEMEM_ENABLE
+ def_bool y
+- depends on (NUMA || EXPERIMENTAL)
++ depends on (NUMA || EXPERIMENTAL) && !X86_64_XEN
+
+ config ARCH_MEMORY_PROBE
+ def_bool y
+@@ -418,6 +443,7 @@ config NR_CPUS
+ int "Maximum number of CPUs (2-255)"
+ range 2 255
+ depends on SMP
++ default "16" if X86_64_XEN
+ default "8"
+ help
+ This allows you to specify the maximum number of CPUs which this
+@@ -443,6 +469,7 @@ config ARCH_ENABLE_MEMORY_HOTPLUG
+
+ config HPET_TIMER
+ bool
++ depends on !X86_64_XEN
+ default y
+ help
+ Use the IA-PC HPET (High Precision Event Timer) to manage
+@@ -463,7 +490,7 @@ config IOMMU
+ default y
+ select SWIOTLB
+ select AGP
+- depends on PCI
++ depends on PCI && !X86_64_XEN
+ help
+ Support for full DMA access of devices with 32bit memory access only
+ on systems with more than 3GB. This is usually needed for USB,
+@@ -478,7 +505,7 @@ config IOMMU
+ config CALGARY_IOMMU
+ bool "IBM Calgary IOMMU support"
+ select SWIOTLB
+- depends on PCI && EXPERIMENTAL
++ depends on PCI && !X86_64_XEN && EXPERIMENTAL
+ help
+ Support for hardware IOMMUs in IBM's xSeries x366 and x460
+ systems. Needed to run systems with more than 3GB of memory
+@@ -516,6 +543,7 @@ config SWIOTLB
+
+ config X86_MCE
+ bool "Machine check support" if EMBEDDED
++ depends on !X86_64_XEN
+ default y
+ help
+ Include a machine check error handler to report hardware errors.
+@@ -541,6 +569,7 @@ config X86_MCE_AMD
+
+ config KEXEC
+ bool "kexec system call"
++ depends on !XEN_UNPRIVILEGED_GUEST
+ help
+ kexec is a system call that implements the ability to shutdown your
+ current kernel, and to start another kernel. It is like a reboot
+@@ -691,8 +720,11 @@ config GENERIC_PENDING_IRQ
+ default y
+
+ menu "Power management options"
++ depends on !XEN_UNPRIVILEGED_GUEST
+
++if !X86_64_XEN
+ source kernel/power/Kconfig
++endif
+
+ source "drivers/acpi/Kconfig"
+
+@@ -716,6 +748,21 @@ config PCI_MMCONFIG
+ bool "Support mmconfig PCI config space access"
+ depends on PCI && ACPI
+
++config XEN_PCIDEV_FRONTEND
++ bool "Xen PCI Frontend"
++ depends on PCI && X86_64_XEN
++ default y
++ help
++ The PCI device frontend driver allows the kernel to import arbitrary
++ PCI devices from a PCI backend to support PCI driver domains.
++
++config XEN_PCIDEV_FE_DEBUG
++ bool "Xen PCI Frontend Debugging"
++ depends on XEN_PCIDEV_FRONTEND
++ default n
++ help
++ Enables some debug statements within the PCI Frontend.
++
+ source "drivers/pci/pcie/Kconfig"
+
+ source "drivers/pci/Kconfig"
+@@ -786,4 +833,6 @@ source "security/Kconfig"
+
+ source "crypto/Kconfig"
+
++source "drivers/xen/Kconfig"
++
+ source "lib/Kconfig"
+Index: head-2007-09-25/drivers/acpi/Kconfig
+===================================================================
+--- head-2007-09-25.orig/drivers/acpi/Kconfig 2007-09-25 14:22:36.000000000 +0200
++++ head-2007-09-25/drivers/acpi/Kconfig 2007-09-25 14:34:51.000000000 +0200
+@@ -48,7 +48,7 @@ if ACPI
+
+ config ACPI_SLEEP
+ bool "Sleep States"
+- depends on X86 && (!SMP || SUSPEND_SMP)
++ depends on X86 && (!SMP || SUSPEND_SMP) && !XEN
+ depends on PM
+ default y
+ ---help---
+@@ -327,6 +327,7 @@ config ACPI_SYSTEM
+ config X86_PM_TIMER
+ bool "Power Management Timer Support" if EMBEDDED
+ depends on X86
++ depends on !XEN
+ default y
+ help
+ The Power Management Timer is available on all ACPI-capable,
+Index: head-2007-09-25/drivers/char/tpm/Kconfig
+===================================================================
+--- head-2007-09-25.orig/drivers/char/tpm/Kconfig 2007-09-25 14:22:36.000000000 +0200
++++ head-2007-09-25/drivers/char/tpm/Kconfig 2007-09-25 14:34:51.000000000 +0200
+@@ -59,5 +59,13 @@ config TCG_INFINEON
+ Further information on this driver and the supported hardware
+ can be found at http://www.prosec.rub.de/tpm
+
+-endmenu
++config TCG_XEN
++ tristate "XEN TPM Interface"
++ depends on TCG_TPM && XEN
++ ---help---
++ If you want to make TPM support available to a Xen user domain,
++ say Yes and it will be accessible from within Linux.
++ To compile this driver as a module, choose M here; the module
++ will be called tpm_xenu.
+
++endmenu
+Index: head-2007-09-25/drivers/firmware/Kconfig
+===================================================================
+--- head-2007-09-25.orig/drivers/firmware/Kconfig 2007-09-25 14:22:36.000000000 +0200
++++ head-2007-09-25/drivers/firmware/Kconfig 2007-09-25 14:34:51.000000000 +0200
+@@ -8,6 +8,7 @@ menu "Firmware Drivers"
+ config EDD
+ tristate "BIOS Enhanced Disk Drive calls determine boot disk"
+ depends on !IA64
++ depends on !XEN
+ help
+ Say Y or M here if you want to enable BIOS Enhanced Disk Drive
+ Services real mode BIOS calls to determine which disk
+Index: head-2007-09-25/drivers/serial/Kconfig
+===================================================================
+--- head-2007-09-25.orig/drivers/serial/Kconfig 2007-09-25 14:22:36.000000000 +0200
++++ head-2007-09-25/drivers/serial/Kconfig 2007-09-25 14:34:51.000000000 +0200
+@@ -12,6 +12,7 @@ menu "Serial drivers"
+ config SERIAL_8250
+ tristate "8250/16550 and compatible serial support"
+ depends on (BROKEN || !SPARC)
++ depends on !XEN_DISABLE_SERIAL
+ select SERIAL_CORE
+ ---help---
+ This selects whether you want to include the driver for the standard
+Index: head-2007-09-25/drivers/video/console/Kconfig
+===================================================================
+--- head-2007-09-25.orig/drivers/video/console/Kconfig 2007-09-25 14:22:36.000000000 +0200
++++ head-2007-09-25/drivers/video/console/Kconfig 2007-09-25 14:34:51.000000000 +0200
+@@ -46,6 +46,7 @@ config VGACON_SOFT_SCROLLBACK_SIZE
+ config VIDEO_SELECT
+ bool "Video mode selection support"
+ depends on X86 && VGA_CONSOLE
++ depends on !XEN
+ ---help---
+ This enables support for text mode selection on kernel startup. If
+ you want to take advantage of some high-resolution text mode your
+Index: head-2007-09-25/drivers/xen/Kconfig
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2007-09-25/drivers/xen/Kconfig 2007-09-25 14:34:51.000000000 +0200
+@@ -0,0 +1,260 @@
++#
++# This Kconfig describe xen options
++#
++
++mainmenu "Xen Configuration"
++
++config XEN
++ bool
++ default y if X86_XEN || X86_64_XEN
++ help
++ This is the Linux Xen port.
++
++if XEN
++config XEN_INTERFACE_VERSION
++ hex
++ default 0x00030205
++
++menu "XEN"
++
++config XEN_PRIVILEGED_GUEST
++ bool "Privileged Guest (domain 0)"
++ depends XEN
++ default n
++ help
++ Support for privileged operation (domain 0)
++
++config XEN_UNPRIVILEGED_GUEST
++ bool
++ default !XEN_PRIVILEGED_GUEST
++
++config XEN_PRIVCMD
++ bool
++ depends on PROC_FS
++ default y
++
++config XEN_XENBUS_DEV
++ bool
++ depends on PROC_FS
++ default y
++
++config XEN_BACKEND
++ tristate "Backend driver support"
++ default y
++ help
++ Support for backend device drivers that provide I/O services
++ to other virtual machines.
++
++config XEN_BLKDEV_BACKEND
++ tristate "Block-device backend driver"
++ depends on XEN_BACKEND
++ default y
++ help
++ The block-device backend driver allows the kernel to export its
++ block devices to other guests via a high-performance shared-memory
++ interface.
++
++config XEN_BLKDEV_TAP
++ tristate "Block-device tap backend driver"
++ depends on XEN_BACKEND
++ default XEN_PRIVILEGED_GUEST
++ help
++ The block tap driver is an alternative to the block back driver
++ and allows VM block requests to be redirected to userspace through
++ a device interface. The tap allows user-space development of
++ high-performance block backends, where disk images may be implemented
++ as files, in memory, or on other hosts across the network. This
++ driver can safely coexist with the existing blockback driver.
++
++config XEN_NETDEV_BACKEND
++ tristate "Network-device backend driver"
++ depends on XEN_BACKEND && NET
++ default y
++ help
++ The network-device backend driver allows the kernel to export its
++ network devices to other guests via a high-performance shared-memory
++ interface.
++
++config XEN_NETDEV_PIPELINED_TRANSMITTER
++ bool "Pipelined transmitter (DANGEROUS)"
++ depends on XEN_NETDEV_BACKEND
++ default n
++ help
++ If the net backend is a dumb domain, such as a transparent Ethernet
++ bridge with no local IP interface, it is safe to say Y here to get
++ slightly lower network overhead.
++ If the backend has a local IP interface; or may be doing smart things
++ like reassembling packets to perform firewall filtering; or if you
++ are unsure; or if you experience network hangs when this option is
++ enabled; then you must say N here.
++
++config XEN_NETDEV_LOOPBACK
++ tristate "Network-device loopback driver"
++ depends on XEN_NETDEV_BACKEND
++ default y
++ help
++ A two-interface loopback device to emulate a local netfront-netback
++ connection.
++
++config XEN_PCIDEV_BACKEND
++ tristate "PCI-device backend driver"
++ depends on PCI && XEN_BACKEND
++ default XEN_PRIVILEGED_GUEST
++ help
++ The PCI device backend driver allows the kernel to export arbitrary
++ PCI devices to other guests. If you select this to be a module, you
++ will need to make sure no other driver has bound to the device(s)
++ you want to make visible to other guests.
++
++choice
++ prompt "PCI Backend Mode"
++ depends on XEN_PCIDEV_BACKEND
++ default XEN_PCIDEV_BACKEND_VPCI
++
++config XEN_PCIDEV_BACKEND_VPCI
++ bool "Virtual PCI"
++ ---help---
++ This PCI Backend hides the true PCI topology and makes the frontend
++ think there is a single PCI bus with only the exported devices on it.
++ For example, a device at 03:05.0 will be re-assigned to 00:00.0. A
++ second device at 02:1a.1 will be re-assigned to 00:01.1.
++
++config XEN_PCIDEV_BACKEND_PASS
++ bool "Passthrough"
++ ---help---
++ This PCI Backend provides a real view of the PCI topology to the
++ frontend (for example, a device at 06:01.b will still appear at
++ 06:01.b to the frontend). This is similar to how Xen 2.0.x exposed
++ PCI devices to its driver domains. This may be required for drivers
++ which depend on finding their hardward in certain bus/slot
++ locations.
++
++config XEN_PCIDEV_BACKEND_SLOT
++ bool "Slot"
++ ---help---
++ This PCI Backend hides the true PCI topology and makes the frontend
++ think there is a single PCI bus with only the exported devices on it.
++ Contrary to the virtual PCI backend, a function becomes a new slot.
++ For example, a device at 03:05.2 will be re-assigned to 00:00.0. A
++ second device at 02:1a.1 will be re-assigned to 00:01.0.
++
++endchoice
++
++config XEN_PCIDEV_BE_DEBUG
++ bool "PCI Backend Debugging"
++ depends on XEN_PCIDEV_BACKEND
++ default n
++
++config XEN_TPMDEV_BACKEND
++ tristate "TPM-device backend driver"
++ depends on XEN_BACKEND
++ default n
++ help
++ The TPM-device backend driver
++
++config XEN_BLKDEV_FRONTEND
++ tristate "Block-device frontend driver"
++ depends on XEN
++ default y
++ help
++ The block-device frontend driver allows the kernel to access block
++ devices mounted within another guest OS. Unless you are building a
++ dedicated device-driver domain, or your master control domain
++ (domain 0), then you almost certainly want to say Y here.
++
++config XEN_NETDEV_FRONTEND
++ tristate "Network-device frontend driver"
++ depends on XEN && NET
++ default y
++ help
++ The network-device frontend driver allows the kernel to access
++ network interfaces within another guest OS. Unless you are building a
++ dedicated device-driver domain, or your master control domain
++ (domain 0), then you almost certainly want to say Y here.
++
++config XEN_FRAMEBUFFER
++ tristate "Framebuffer-device frontend driver"
++ depends on XEN && FB
++ select FB_CFB_FILLRECT
++ select FB_CFB_COPYAREA
++ select FB_CFB_IMAGEBLIT
++ default y
++ help
++ The framebuffer-device frontend drivers allows the kernel to create a
++ virtual framebuffer. This framebuffer can be viewed in another
++ domain. Unless this domain has access to a real video card, you
++ probably want to say Y here.
++
++config XEN_KEYBOARD
++ tristate "Keyboard-device frontend driver"
++ depends on XEN && XEN_FRAMEBUFFER && INPUT
++ default y
++ help
++ The keyboard-device frontend driver allows the kernel to create a
++ virtual keyboard. This keyboard can then be driven by another
++ domain. If you've said Y to CONFIG_XEN_FRAMEBUFFER, you probably
++ want to say Y here.
++
++config XEN_SCRUB_PAGES
++ bool "Scrub memory before freeing it to Xen"
++ default y
++ help
++ Erase memory contents before freeing it back to Xen's global
++ pool. This ensures that any secrets contained within that
++ memory (e.g., private keys) cannot be found by other guests that
++ may be running on the machine. Most people will want to say Y here.
++ If security is not a concern then you may increase performance by
++ saying N.
++
++config XEN_DISABLE_SERIAL
++ bool "Disable serial port drivers"
++ default y
++ help
++ Disable serial port drivers, allowing the Xen console driver
++ to provide a serial console at ttyS0.
++
++config XEN_SYSFS
++ tristate "Export Xen attributes in sysfs"
++ depends on SYSFS
++ default y
++ help
++ Xen hypervisor attributes will show up under /sys/hypervisor/.
++
++choice
++ prompt "Xen version compatibility"
++ default XEN_COMPAT_030002_AND_LATER
++
++ config XEN_COMPAT_030002_AND_LATER
++ bool "3.0.2 and later"
++
++ config XEN_COMPAT_030004_AND_LATER
++ bool "3.0.4 and later"
++
++ config XEN_COMPAT_LATEST_ONLY
++ bool "no compatibility code"
++
++endchoice
++
++config XEN_COMPAT
++ hex
++ default 0xffffff if XEN_COMPAT_LATEST_ONLY
++ default 0x030004 if XEN_COMPAT_030004_AND_LATER
++ default 0x030002 if XEN_COMPAT_030002_AND_LATER
++ default 0
++
++endmenu
++
++config HAVE_IRQ_IGNORE_UNHANDLED
++ bool
++ default y
++
++config NO_IDLE_HZ
++ bool
++ default y
++
++config XEN_SMPBOOT
++ bool
++ default y
++ depends on SMP
++
++endif
+Index: head-2007-09-25/fs/Kconfig
+===================================================================
+--- head-2007-09-25.orig/fs/Kconfig 2007-09-25 14:22:36.000000000 +0200
++++ head-2007-09-25/fs/Kconfig 2007-09-25 14:34:51.000000000 +0200
+@@ -1014,6 +1014,7 @@ config TMPFS_POSIX_ACL
+ config HUGETLBFS
+ bool "HugeTLB file system support"
+ depends on X86 || IA64 || PPC64 || SPARC64 || SUPERH || BROKEN
++ depends on !XEN
+ help
+ hugetlbfs is a filesystem backing for HugeTLB pages, based on
+ ramfs. For architectures that support it, say Y here and read
+Index: head-2007-09-25/kernel/Kconfig.preempt
+===================================================================
+--- head-2007-09-25.orig/kernel/Kconfig.preempt 2007-09-25 14:22:36.000000000 +0200
++++ head-2007-09-25/kernel/Kconfig.preempt 2007-09-25 14:34:51.000000000 +0200
+@@ -35,6 +35,7 @@ config PREEMPT_VOLUNTARY
+
+ config PREEMPT
+ bool "Preemptible Kernel (Low-Latency Desktop)"
++ depends on !XEN
+ help
+ This option reduces the latency of the kernel by making
+ all kernel code (that is not executing in a critical section)
+Index: head-2007-09-25/mm/Kconfig
+===================================================================
+--- head-2007-09-25.orig/mm/Kconfig 2007-09-25 14:22:36.000000000 +0200
++++ head-2007-09-25/mm/Kconfig 2007-09-25 14:34:51.000000000 +0200
+@@ -132,11 +132,14 @@ config MEMORY_HOTPLUG_SPARSE
+ # Default to 4 for wider testing, though 8 might be more appropriate.
+ # ARM's adjust_pte (unused if VIPT) depends on mm-wide page_table_lock.
+ # PA-RISC 7xxx's spinlock_t would enlarge struct page from 32 to 44 bytes.
++# XEN on x86 architecture uses the mapping field on pagetable pages to store a
++# pointer to the destructor. This conflicts with pte_lock_deinit().
+ #
+ config SPLIT_PTLOCK_CPUS
+ int
+ default "4096" if ARM && !CPU_CACHE_VIPT
+ default "4096" if PARISC && !PA20
++ default "4096" if X86_XEN || X86_64_XEN
+ default "4"
+
+ #
diff --git a/trunk/2.6.22/20015_xen3-auto-common.patch1 b/trunk/2.6.22/20015_xen3-auto-common.patch1
new file mode 100644
index 0000000..94a9b99
--- /dev/null
+++ b/trunk/2.6.22/20015_xen3-auto-common.patch1
@@ -0,0 +1,2101 @@
+Subject: xen3 common
+From: http://xenbits.xensource.com/xen-3.1-testing.hg (tip 15042)
+Patch-mainline: obsolete
+Acked-by: jbeulich@novell.com
+
+Index: head-2007-09-25/drivers/char/mem.c
+===================================================================
+--- head-2007-09-25.orig/drivers/char/mem.c 2007-09-25 14:22:36.000000000 +0200
++++ head-2007-09-25/drivers/char/mem.c 2007-09-25 14:35:02.000000000 +0200
+@@ -101,6 +101,7 @@ static inline int valid_mmap_phys_addr_r
+ }
+ #endif
+
++#ifndef ARCH_HAS_DEV_MEM
+ /*
+ * This funcion reads the *physical* memory. The f_pos points directly to the
+ * memory location.
+@@ -223,6 +224,7 @@ static ssize_t write_mem(struct file * f
+ *ppos += written;
+ return written;
+ }
++#endif
+
+ #ifndef __HAVE_PHYS_MEM_ACCESS_PROT
+ static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
+@@ -809,6 +811,7 @@ static int open_port(struct inode * inod
+ #define open_kmem open_mem
+ #define open_oldmem open_mem
+
++#ifndef ARCH_HAS_DEV_MEM
+ static const struct file_operations mem_fops = {
+ .llseek = memory_lseek,
+ .read = read_mem,
+@@ -817,6 +820,9 @@ static const struct file_operations mem_
+ .open = open_mem,
+ .get_unmapped_area = get_unmapped_area_mem,
+ };
++#else
++extern const struct file_operations mem_fops;
++#endif
+
+ static const struct file_operations kmem_fops = {
+ .llseek = memory_lseek,
+Index: head-2007-09-25/drivers/char/tpm/Makefile
+===================================================================
+--- head-2007-09-25.orig/drivers/char/tpm/Makefile 2007-09-25 14:22:36.000000000 +0200
++++ head-2007-09-25/drivers/char/tpm/Makefile 2007-09-25 14:35:02.000000000 +0200
+@@ -9,3 +9,5 @@ obj-$(CONFIG_TCG_TIS) += tpm_tis.o
+ obj-$(CONFIG_TCG_NSC) += tpm_nsc.o
+ obj-$(CONFIG_TCG_ATMEL) += tpm_atmel.o
+ obj-$(CONFIG_TCG_INFINEON) += tpm_infineon.o
++obj-$(CONFIG_TCG_XEN) += tpm_xenu.o
++tpm_xenu-y = tpm_xen.o tpm_vtpm.o
+Index: head-2007-09-25/drivers/char/tpm/tpm.h
+===================================================================
+--- head-2007-09-25.orig/drivers/char/tpm/tpm.h 2007-09-25 14:22:36.000000000 +0200
++++ head-2007-09-25/drivers/char/tpm/tpm.h 2007-09-25 14:35:02.000000000 +0200
+@@ -106,6 +106,9 @@ struct tpm_chip {
+ struct dentry **bios_dir;
+
+ struct list_head list;
++#ifdef CONFIG_XEN
++ void *priv;
++#endif
+ };
+
+ #define to_tpm_chip(n) container_of(n, struct tpm_chip, vendor)
+@@ -122,6 +125,18 @@ static inline void tpm_write_index(int b
+ outb(value & 0xFF, base+1);
+ }
+
++#ifdef CONFIG_XEN
++static inline void *chip_get_private(const struct tpm_chip *chip)
++{
++ return chip->priv;
++}
++
++static inline void chip_set_private(struct tpm_chip *chip, void *priv)
++{
++ chip->priv = priv;
++}
++#endif
++
+ extern void tpm_get_timeouts(struct tpm_chip *);
+ extern void tpm_gen_interrupt(struct tpm_chip *);
+ extern void tpm_continue_selftest(struct tpm_chip *);
+Index: head-2007-09-25/drivers/char/tpm/tpm_vtpm.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2007-09-25/drivers/char/tpm/tpm_vtpm.c 2007-09-25 14:35:02.000000000 +0200
+@@ -0,0 +1,542 @@
++/*
++ * Copyright (C) 2006 IBM Corporation
++ *
++ * Authors:
++ * Stefan Berger <stefanb@us.ibm.com>
++ *
++ * Generic device driver part for device drivers in a virtualized
++ * environment.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation, version 2 of the
++ * License.
++ *
++ */
++
++#include <asm/uaccess.h>
++#include <linux/list.h>
++#include <linux/device.h>
++#include <linux/interrupt.h>
++#include <linux/platform_device.h>
++#include "tpm.h"
++#include "tpm_vtpm.h"
++
++/* read status bits */
++enum {
++ STATUS_BUSY = 0x01,
++ STATUS_DATA_AVAIL = 0x02,
++ STATUS_READY = 0x04
++};
++
++struct transmission {
++ struct list_head next;
++
++ unsigned char *request;
++ size_t request_len;
++ size_t request_buflen;
++
++ unsigned char *response;
++ size_t response_len;
++ size_t response_buflen;
++
++ unsigned int flags;
++};
++
++enum {
++ TRANSMISSION_FLAG_WAS_QUEUED = 0x1
++};
++
++
++enum {
++ DATAEX_FLAG_QUEUED_ONLY = 0x1
++};
++
++
++/* local variables */
++
++/* local function prototypes */
++static int _vtpm_send_queued(struct tpm_chip *chip);
++
++
++/* =============================================================
++ * Some utility functions
++ * =============================================================
++ */
++static void vtpm_state_init(struct vtpm_state *vtpms)
++{
++ vtpms->current_request = NULL;
++ spin_lock_init(&vtpms->req_list_lock);
++ init_waitqueue_head(&vtpms->req_wait_queue);
++ INIT_LIST_HEAD(&vtpms->queued_requests);
++
++ vtpms->current_response = NULL;
++ spin_lock_init(&vtpms->resp_list_lock);
++ init_waitqueue_head(&vtpms->resp_wait_queue);
++
++ vtpms->disconnect_time = jiffies;
++}
++
++
++static inline struct transmission *transmission_alloc(void)
++{
++ return kzalloc(sizeof(struct transmission), GFP_ATOMIC);
++}
++
++static unsigned char *
++transmission_set_req_buffer(struct transmission *t,
++ unsigned char *buffer, size_t len)
++{
++ if (t->request_buflen < len) {
++ kfree(t->request);
++ t->request = kmalloc(len, GFP_KERNEL);
++ if (!t->request) {
++ t->request_buflen = 0;
++ return NULL;
++ }
++ t->request_buflen = len;
++ }
++
++ memcpy(t->request, buffer, len);
++ t->request_len = len;
++
++ return t->request;
++}
++
++static unsigned char *
++transmission_set_res_buffer(struct transmission *t,
++ const unsigned char *buffer, size_t len)
++{
++ if (t->response_buflen < len) {
++ kfree(t->response);
++ t->response = kmalloc(len, GFP_ATOMIC);
++ if (!t->response) {
++ t->response_buflen = 0;
++ return NULL;
++ }
++ t->response_buflen = len;
++ }
++
++ memcpy(t->response, buffer, len);
++ t->response_len = len;
++
++ return t->response;
++}
++
++static inline void transmission_free(struct transmission *t)
++{
++ kfree(t->request);
++ kfree(t->response);
++ kfree(t);
++}
++
++/* =============================================================
++ * Interface with the lower layer driver
++ * =============================================================
++ */
++/*
++ * Lower layer uses this function to make a response available.
++ */
++int vtpm_vd_recv(const struct tpm_chip *chip,
++ const unsigned char *buffer, size_t count,
++ void *ptr)
++{
++ unsigned long flags;
++ int ret_size = 0;
++ struct transmission *t;
++ struct vtpm_state *vtpms;
++
++ vtpms = (struct vtpm_state *)chip_get_private(chip);
++
++ /*
++ * The list with requests must contain one request
++ * only and the element there must be the one that
++ * was passed to me from the front-end.
++ */
++ spin_lock_irqsave(&vtpms->resp_list_lock, flags);
++ if (vtpms->current_request != ptr) {
++ spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
++ return 0;
++ }
++
++ if ((t = vtpms->current_request)) {
++ transmission_free(t);
++ vtpms->current_request = NULL;
++ }
++
++ t = transmission_alloc();
++ if (t) {
++ if (!transmission_set_res_buffer(t, buffer, count)) {
++ transmission_free(t);
++ spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
++ return -ENOMEM;
++ }
++ ret_size = count;
++ vtpms->current_response = t;
++ wake_up_interruptible(&vtpms->resp_wait_queue);
++ }
++ spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
++
++ return ret_size;
++}
++
++
++/*
++ * Lower layer indicates its status (connected/disconnected)
++ */
++void vtpm_vd_status(const struct tpm_chip *chip, u8 vd_status)
++{
++ struct vtpm_state *vtpms;
++
++ vtpms = (struct vtpm_state *)chip_get_private(chip);
++
++ vtpms->vd_status = vd_status;
++ if ((vtpms->vd_status & TPM_VD_STATUS_CONNECTED) == 0) {
++ vtpms->disconnect_time = jiffies;
++ }
++}
++
++/* =============================================================
++ * Interface with the generic TPM driver
++ * =============================================================
++ */
++static int vtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
++{
++ int rc = 0;
++ unsigned long flags;
++ struct vtpm_state *vtpms;
++
++ vtpms = (struct vtpm_state *)chip_get_private(chip);
++
++ /*
++ * Check if the previous operation only queued the command
++ * In this case there won't be a response, so I just
++ * return from here and reset that flag. In any other
++ * case I should receive a response from the back-end.
++ */
++ spin_lock_irqsave(&vtpms->resp_list_lock, flags);
++ if ((vtpms->flags & DATAEX_FLAG_QUEUED_ONLY) != 0) {
++ vtpms->flags &= ~DATAEX_FLAG_QUEUED_ONLY;
++ spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
++ /*
++ * The first few commands (measurements) must be
++ * queued since it might not be possible to talk to the
++ * TPM, yet.
++ * Return a response of up to 30 '0's.
++ */
++
++ count = min_t(size_t, count, 30);
++ memset(buf, 0x0, count);
++ return count;
++ }
++ /*
++ * Check whether something is in the responselist and if
++ * there's nothing in the list wait for something to appear.
++ */
++
++ if (!vtpms->current_response) {
++ spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
++ interruptible_sleep_on_timeout(&vtpms->resp_wait_queue,
++ 1000);
++ spin_lock_irqsave(&vtpms->resp_list_lock ,flags);
++ }
++
++ if (vtpms->current_response) {
++ struct transmission *t = vtpms->current_response;
++ vtpms->current_response = NULL;
++ rc = min(count, t->response_len);
++ memcpy(buf, t->response, rc);
++ transmission_free(t);
++ }
++
++ spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
++ return rc;
++}
++
++static int vtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
++{
++ int rc = 0;
++ unsigned long flags;
++ struct transmission *t = transmission_alloc();
++ struct vtpm_state *vtpms;
++
++ vtpms = (struct vtpm_state *)chip_get_private(chip);
++
++ if (!t)
++ return -ENOMEM;
++ /*
++ * If there's a current request, it must be the
++ * previous request that has timed out.
++ */
++ spin_lock_irqsave(&vtpms->req_list_lock, flags);
++ if (vtpms->current_request != NULL) {
++ printk("WARNING: Sending although there is a request outstanding.\n"
++ " Previous request must have timed out.\n");
++ transmission_free(vtpms->current_request);
++ vtpms->current_request = NULL;
++ }
++ spin_unlock_irqrestore(&vtpms->req_list_lock, flags);
++
++ /*
++ * Queue the packet if the driver below is not
++ * ready, yet, or there is any packet already
++ * in the queue.
++ * If the driver below is ready, unqueue all
++ * packets first before sending our current
++ * packet.
++ * For each unqueued packet, except for the
++ * last (=current) packet, call the function
++ * tpm_xen_recv to wait for the response to come
++ * back.
++ */
++ if ((vtpms->vd_status & TPM_VD_STATUS_CONNECTED) == 0) {
++ if (time_after(jiffies,
++ vtpms->disconnect_time + HZ * 10)) {
++ rc = -ENOENT;
++ } else {
++ goto queue_it;
++ }
++ } else {
++ /*
++ * Send all queued packets.
++ */
++ if (_vtpm_send_queued(chip) == 0) {
++
++ vtpms->current_request = t;
++
++ rc = vtpm_vd_send(vtpms->tpm_private,
++ buf,
++ count,
++ t);
++ /*
++ * The generic TPM driver will call
++ * the function to receive the response.
++ */
++ if (rc < 0) {
++ vtpms->current_request = NULL;
++ goto queue_it;
++ }
++ } else {
++queue_it:
++ if (!transmission_set_req_buffer(t, buf, count)) {
++ transmission_free(t);
++ rc = -ENOMEM;
++ goto exit;
++ }
++ /*
++ * An error occurred. Don't event try
++ * to send the current request. Just
++ * queue it.
++ */
++ spin_lock_irqsave(&vtpms->req_list_lock, flags);
++ vtpms->flags |= DATAEX_FLAG_QUEUED_ONLY;
++ list_add_tail(&t->next, &vtpms->queued_requests);
++ spin_unlock_irqrestore(&vtpms->req_list_lock, flags);
++ }
++ }
++
++exit:
++ return rc;
++}
++
++
++/*
++ * Send all queued requests.
++ */
++static int _vtpm_send_queued(struct tpm_chip *chip)
++{
++ int rc;
++ int error = 0;
++ long flags;
++ unsigned char buffer[1];
++ struct vtpm_state *vtpms;
++ vtpms = (struct vtpm_state *)chip_get_private(chip);
++
++ spin_lock_irqsave(&vtpms->req_list_lock, flags);
++
++ while (!list_empty(&vtpms->queued_requests)) {
++ /*
++ * Need to dequeue them.
++ * Read the result into a dummy buffer.
++ */
++ struct transmission *qt = (struct transmission *)
++ vtpms->queued_requests.next;
++ list_del(&qt->next);
++ vtpms->current_request = qt;
++ spin_unlock_irqrestore(&vtpms->req_list_lock, flags);
++
++ rc = vtpm_vd_send(vtpms->tpm_private,
++ qt->request,
++ qt->request_len,
++ qt);
++
++ if (rc < 0) {
++ spin_lock_irqsave(&vtpms->req_list_lock, flags);
++ if ((qt = vtpms->current_request) != NULL) {
++ /*
++ * requeue it at the beginning
++ * of the list
++ */
++ list_add(&qt->next,
++ &vtpms->queued_requests);
++ }
++ vtpms->current_request = NULL;
++ error = 1;
++ break;
++ }
++ /*
++ * After this point qt is not valid anymore!
++ * It is freed when the front-end is delivering
++ * the data by calling tpm_recv
++ */
++ /*
++ * Receive response into provided dummy buffer
++ */
++ rc = vtpm_recv(chip, buffer, sizeof(buffer));
++ spin_lock_irqsave(&vtpms->req_list_lock, flags);
++ }
++
++ spin_unlock_irqrestore(&vtpms->req_list_lock, flags);
++
++ return error;
++}
++
++static void vtpm_cancel(struct tpm_chip *chip)
++{
++ unsigned long flags;
++ struct vtpm_state *vtpms = (struct vtpm_state *)chip_get_private(chip);
++
++ spin_lock_irqsave(&vtpms->resp_list_lock,flags);
++
++ if (!vtpms->current_response && vtpms->current_request) {
++ spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
++ interruptible_sleep_on(&vtpms->resp_wait_queue);
++ spin_lock_irqsave(&vtpms->resp_list_lock,flags);
++ }
++
++ if (vtpms->current_response) {
++ struct transmission *t = vtpms->current_response;
++ vtpms->current_response = NULL;
++ transmission_free(t);
++ }
++
++ spin_unlock_irqrestore(&vtpms->resp_list_lock,flags);
++}
++
++static u8 vtpm_status(struct tpm_chip *chip)
++{
++ u8 rc = 0;
++ unsigned long flags;
++ struct vtpm_state *vtpms;
++
++ vtpms = (struct vtpm_state *)chip_get_private(chip);
++
++ spin_lock_irqsave(&vtpms->resp_list_lock, flags);
++ /*
++ * Data are available if:
++ * - there's a current response
++ * - the last packet was queued only (this is fake, but necessary to
++ * get the generic TPM layer to call the receive function.)
++ */
++ if (vtpms->current_response ||
++ 0 != (vtpms->flags & DATAEX_FLAG_QUEUED_ONLY)) {
++ rc = STATUS_DATA_AVAIL;
++ } else if (!vtpms->current_response && !vtpms->current_request) {
++ rc = STATUS_READY;
++ }
++
++ spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
++ return rc;
++}
++
++static struct file_operations vtpm_ops = {
++ .owner = THIS_MODULE,
++ .llseek = no_llseek,
++ .open = tpm_open,
++ .read = tpm_read,
++ .write = tpm_write,
++ .release = tpm_release,
++};
++
++static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
++static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
++static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL);
++static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL);
++static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL);
++static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated,
++ NULL);
++static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
++static DEVICE_ATTR(cancel, S_IWUSR |S_IWGRP, NULL, tpm_store_cancel);
++
++static struct attribute *vtpm_attrs[] = {
++ &dev_attr_pubek.attr,
++ &dev_attr_pcrs.attr,
++ &dev_attr_enabled.attr,
++ &dev_attr_active.attr,
++ &dev_attr_owned.attr,
++ &dev_attr_temp_deactivated.attr,
++ &dev_attr_caps.attr,
++ &dev_attr_cancel.attr,
++ NULL,
++};
++
++static struct attribute_group vtpm_attr_grp = { .attrs = vtpm_attrs };
++
++#define TPM_LONG_TIMEOUT (10 * 60 * HZ)
++
++static struct tpm_vendor_specific tpm_vtpm = {
++ .recv = vtpm_recv,
++ .send = vtpm_send,
++ .cancel = vtpm_cancel,
++ .status = vtpm_status,
++ .req_complete_mask = STATUS_BUSY | STATUS_DATA_AVAIL,
++ .req_complete_val = STATUS_DATA_AVAIL,
++ .req_canceled = STATUS_READY,
++ .attr_group = &vtpm_attr_grp,
++ .miscdev = {
++ .fops = &vtpm_ops,
++ },
++ .duration = {
++ TPM_LONG_TIMEOUT,
++ TPM_LONG_TIMEOUT,
++ TPM_LONG_TIMEOUT,
++ },
++};
++
++struct tpm_chip *init_vtpm(struct device *dev,
++ struct tpm_private *tp)
++{
++ long rc;
++ struct tpm_chip *chip;
++ struct vtpm_state *vtpms;
++
++ vtpms = kzalloc(sizeof(struct vtpm_state), GFP_KERNEL);
++ if (!vtpms)
++ return ERR_PTR(-ENOMEM);
++
++ vtpm_state_init(vtpms);
++ vtpms->tpm_private = tp;
++
++ chip = tpm_register_hardware(dev, &tpm_vtpm);
++ if (!chip) {
++ rc = -ENODEV;
++ goto err_free_mem;
++ }
++
++ chip_set_private(chip, vtpms);
++
++ return chip;
++
++err_free_mem:
++ kfree(vtpms);
++
++ return ERR_PTR(rc);
++}
++
++void cleanup_vtpm(struct device *dev)
++{
++ struct tpm_chip *chip = dev_get_drvdata(dev);
++ struct vtpm_state *vtpms = (struct vtpm_state*)chip_get_private(chip);
++ tpm_remove_hardware(dev);
++ kfree(vtpms);
++}
+Index: head-2007-09-25/drivers/char/tpm/tpm_vtpm.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2007-09-25/drivers/char/tpm/tpm_vtpm.h 2007-09-25 14:35:02.000000000 +0200
+@@ -0,0 +1,55 @@
++#ifndef TPM_VTPM_H
++#define TPM_VTPM_H
++
++struct tpm_chip;
++struct tpm_private;
++
++struct vtpm_state {
++ struct transmission *current_request;
++ spinlock_t req_list_lock;
++ wait_queue_head_t req_wait_queue;
++
++ struct list_head queued_requests;
++
++ struct transmission *current_response;
++ spinlock_t resp_list_lock;
++ wait_queue_head_t resp_wait_queue; // processes waiting for responses
++
++ u8 vd_status;
++ u8 flags;
++
++ unsigned long disconnect_time;
++
++ /*
++ * The following is a private structure of the underlying
++ * driver. It is passed as parameter in the send function.
++ */
++ struct tpm_private *tpm_private;
++};
++
++
++enum vdev_status {
++ TPM_VD_STATUS_DISCONNECTED = 0x0,
++ TPM_VD_STATUS_CONNECTED = 0x1
++};
++
++/* this function is called from tpm_vtpm.c */
++int vtpm_vd_send(struct tpm_private * tp,
++ const u8 * buf, size_t count, void *ptr);
++
++/* these functions are offered by tpm_vtpm.c */
++struct tpm_chip *init_vtpm(struct device *,
++ struct tpm_private *);
++void cleanup_vtpm(struct device *);
++int vtpm_vd_recv(const struct tpm_chip* chip,
++ const unsigned char *buffer, size_t count, void *ptr);
++void vtpm_vd_status(const struct tpm_chip *, u8 status);
++
++static inline struct tpm_private *tpm_private_from_dev(struct device *dev)
++{
++ struct tpm_chip *chip = dev_get_drvdata(dev);
++ struct vtpm_state *vtpms = chip_get_private(chip);
++ return vtpms->tpm_private;
++}
++
++#endif
+Index: head-2007-09-25/drivers/char/tpm/tpm_xen.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2007-09-25/drivers/char/tpm/tpm_xen.c 2007-09-25 14:35:02.000000000 +0200
+@@ -0,0 +1,720 @@
++/*
++ * Copyright (c) 2005, IBM Corporation
++ *
++ * Author: Stefan Berger, stefanb@us.ibm.com
++ * Grant table support: Mahadevan Gomathisankaran
++ *
++ * This code has been derived from drivers/xen/netfront/netfront.c
++ *
++ * Copyright (c) 2002-2004, K A Fraser
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <linux/errno.h>
++#include <linux/err.h>
++#include <linux/interrupt.h>
++#include <linux/mutex.h>
++#include <asm/uaccess.h>
++#include <xen/evtchn.h>
++#include <xen/interface/grant_table.h>
++#include <xen/interface/io/tpmif.h>
++#include <xen/gnttab.h>
++#include <xen/xenbus.h>
++#include "tpm.h"
++#include "tpm_vtpm.h"
++
++#undef DEBUG
++
++/* local structures */
++struct tpm_private {
++ struct tpm_chip *chip;
++
++ tpmif_tx_interface_t *tx;
++ atomic_t refcnt;
++ unsigned int irq;
++ u8 is_connected;
++ u8 is_suspended;
++
++ spinlock_t tx_lock;
++
++ struct tx_buffer *tx_buffers[TPMIF_TX_RING_SIZE];
++
++ atomic_t tx_busy;
++ void *tx_remember;
++
++ domid_t backend_id;
++ wait_queue_head_t wait_q;
++
++ struct xenbus_device *dev;
++ int ring_ref;
++};
++
++struct tx_buffer {
++ unsigned int size; // available space in data
++ unsigned int len; // used space in data
++ unsigned char *data; // pointer to a page
++};
++
++
++/* locally visible variables */
++static grant_ref_t gref_head;
++static struct tpm_private *my_priv;
++
++/* local function prototypes */
++static irqreturn_t tpmif_int(int irq,
++ void *tpm_priv,
++ struct pt_regs *ptregs);
++static void tpmif_rx_action(unsigned long unused);
++static int tpmif_connect(struct xenbus_device *dev,
++ struct tpm_private *tp,
++ domid_t domid);
++static DECLARE_TASKLET(tpmif_rx_tasklet, tpmif_rx_action, 0);
++static int tpmif_allocate_tx_buffers(struct tpm_private *tp);
++static void tpmif_free_tx_buffers(struct tpm_private *tp);
++static void tpmif_set_connected_state(struct tpm_private *tp,
++ u8 newstate);
++static int tpm_xmit(struct tpm_private *tp,
++ const u8 * buf, size_t count, int userbuffer,
++ void *remember);
++static void destroy_tpmring(struct tpm_private *tp);
++void __exit tpmif_exit(void);
++
++#define DPRINTK(fmt, args...) \
++ pr_debug("xen_tpm_fr (%s:%d) " fmt, __FUNCTION__, __LINE__, ##args)
++#define IPRINTK(fmt, args...) \
++ printk(KERN_INFO "xen_tpm_fr: " fmt, ##args)
++#define WPRINTK(fmt, args...) \
++ printk(KERN_WARNING "xen_tpm_fr: " fmt, ##args)
++
++#define GRANT_INVALID_REF 0
++
++
++static inline int
++tx_buffer_copy(struct tx_buffer *txb, const u8 *src, int len,
++ int isuserbuffer)
++{
++ int copied = len;
++
++ if (len > txb->size)
++ copied = txb->size;
++ if (isuserbuffer) {
++ if (copy_from_user(txb->data, src, copied))
++ return -EFAULT;
++ } else {
++ memcpy(txb->data, src, copied);
++ }
++ txb->len = len;
++ return copied;
++}
++
++static inline struct tx_buffer *tx_buffer_alloc(void)
++{
++ struct tx_buffer *txb;
++
++ txb = kzalloc(sizeof(struct tx_buffer), GFP_KERNEL);
++ if (!txb)
++ return NULL;
++
++ txb->len = 0;
++ txb->size = PAGE_SIZE;
++ txb->data = (unsigned char *)__get_free_page(GFP_KERNEL);
++ if (txb->data == NULL) {
++ kfree(txb);
++ txb = NULL;
++ }
++
++ return txb;
++}
++
++
++static inline void tx_buffer_free(struct tx_buffer *txb)
++{
++ if (txb) {
++ free_page((long)txb->data);
++ kfree(txb);
++ }
++}
++
++/**************************************************************
++ Utility function for the tpm_private structure
++**************************************************************/
++static void tpm_private_init(struct tpm_private *tp)
++{
++ spin_lock_init(&tp->tx_lock);
++ init_waitqueue_head(&tp->wait_q);
++ atomic_set(&tp->refcnt, 1);
++}
++
++static void tpm_private_put(void)
++{
++ if (!atomic_dec_and_test(&my_priv->refcnt))
++ return;
++
++ tpmif_free_tx_buffers(my_priv);
++ kfree(my_priv);
++ my_priv = NULL;
++}
++
++static struct tpm_private *tpm_private_get(void)
++{
++ int err;
++
++ if (my_priv) {
++ atomic_inc(&my_priv->refcnt);
++ return my_priv;
++ }
++
++ my_priv = kzalloc(sizeof(struct tpm_private), GFP_KERNEL);
++ if (!my_priv)
++ return NULL;
++
++ tpm_private_init(my_priv);
++ err = tpmif_allocate_tx_buffers(my_priv);
++ if (err < 0)
++ tpm_private_put();
++
++ return my_priv;
++}
++
++/**************************************************************
++
++ The interface to let the tpm plugin register its callback
++ function and send data to another partition using this module
++
++**************************************************************/
++
++static DEFINE_MUTEX(suspend_lock);
++/*
++ * Send data via this module by calling this function
++ */
++int vtpm_vd_send(struct tpm_private *tp,
++ const u8 * buf, size_t count, void *ptr)
++{
++ int sent;
++
++ mutex_lock(&suspend_lock);
++ sent = tpm_xmit(tp, buf, count, 0, ptr);
++ mutex_unlock(&suspend_lock);
++
++ return sent;
++}
++
++/**************************************************************
++ XENBUS support code
++**************************************************************/
++
++static int setup_tpmring(struct xenbus_device *dev,
++ struct tpm_private *tp)
++{
++ tpmif_tx_interface_t *sring;
++ int err;
++
++ tp->ring_ref = GRANT_INVALID_REF;
++
++ sring = (void *)__get_free_page(GFP_KERNEL);
++ if (!sring) {
++ xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
++ return -ENOMEM;
++ }
++ tp->tx = sring;
++
++ err = xenbus_grant_ring(dev, virt_to_mfn(tp->tx));
++ if (err < 0) {
++ free_page((unsigned long)sring);
++ tp->tx = NULL;
++ xenbus_dev_fatal(dev, err, "allocating grant reference");
++ goto fail;
++ }
++ tp->ring_ref = err;
++
++ err = tpmif_connect(dev, tp, dev->otherend_id);
++ if (err)
++ goto fail;
++
++ return 0;
++fail:
++ destroy_tpmring(tp);
++ return err;
++}
++
++
++static void destroy_tpmring(struct tpm_private *tp)
++{
++ tpmif_set_connected_state(tp, 0);
++
++ if (tp->ring_ref != GRANT_INVALID_REF) {
++ gnttab_end_foreign_access(tp->ring_ref, 0,
++ (unsigned long)tp->tx);
++ tp->ring_ref = GRANT_INVALID_REF;
++ tp->tx = NULL;
++ }
++
++ if (tp->irq)
++ unbind_from_irqhandler(tp->irq, tp);
++
++ tp->irq = 0;
++}
++
++
++static int talk_to_backend(struct xenbus_device *dev,
++ struct tpm_private *tp)
++{
++ const char *message = NULL;
++ int err;
++ struct xenbus_transaction xbt;
++
++ err = setup_tpmring(dev, tp);
++ if (err) {
++ xenbus_dev_fatal(dev, err, "setting up ring");
++ goto out;
++ }
++
++again:
++ err = xenbus_transaction_start(&xbt);
++ if (err) {
++ xenbus_dev_fatal(dev, err, "starting transaction");
++ goto destroy_tpmring;
++ }
++
++ err = xenbus_printf(xbt, dev->nodename,
++ "ring-ref","%u", tp->ring_ref);
++ if (err) {
++ message = "writing ring-ref";
++ goto abort_transaction;
++ }
++
++ err = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
++ irq_to_evtchn_port(tp->irq));
++ if (err) {
++ message = "writing event-channel";
++ goto abort_transaction;
++ }
++
++ err = xenbus_transaction_end(xbt, 0);
++ if (err == -EAGAIN)
++ goto again;
++ if (err) {
++ xenbus_dev_fatal(dev, err, "completing transaction");
++ goto destroy_tpmring;
++ }
++
++ xenbus_switch_state(dev, XenbusStateConnected);
++
++ return 0;
++
++abort_transaction:
++ xenbus_transaction_end(xbt, 1);
++ if (message)
++ xenbus_dev_error(dev, err, "%s", message);
++destroy_tpmring:
++ destroy_tpmring(tp);
++out:
++ return err;
++}
++
++/**
++ * Callback received when the backend's state changes.
++ */
++static void backend_changed(struct xenbus_device *dev,
++ enum xenbus_state backend_state)
++{
++ struct tpm_private *tp = tpm_private_from_dev(&dev->dev);
++ DPRINTK("\n");
++
++ switch (backend_state) {
++ case XenbusStateInitialising:
++ case XenbusStateInitWait:
++ case XenbusStateInitialised:
++ case XenbusStateUnknown:
++ break;
++
++ case XenbusStateConnected:
++ tpmif_set_connected_state(tp, 1);
++ break;
++
++ case XenbusStateClosing:
++ tpmif_set_connected_state(tp, 0);
++ xenbus_frontend_closed(dev);
++ break;
++
++ case XenbusStateClosed:
++ tpmif_set_connected_state(tp, 0);
++ if (tp->is_suspended == 0)
++ device_unregister(&dev->dev);
++ xenbus_frontend_closed(dev);
++ break;
++ }
++}
++
++static int tpmfront_probe(struct xenbus_device *dev,
++ const struct xenbus_device_id *id)
++{
++ int err;
++ int handle;
++ struct tpm_private *tp = tpm_private_get();
++
++ if (!tp)
++ return -ENOMEM;
++
++ tp->chip = init_vtpm(&dev->dev, tp);
++ if (IS_ERR(tp->chip))
++ return PTR_ERR(tp->chip);
++
++ err = xenbus_scanf(XBT_NIL, dev->nodename,
++ "handle", "%i", &handle);
++ if (XENBUS_EXIST_ERR(err))
++ return err;
++
++ if (err < 0) {
++ xenbus_dev_fatal(dev,err,"reading virtual-device");
++ return err;
++ }
++
++ tp->dev = dev;
++
++ err = talk_to_backend(dev, tp);
++ if (err) {
++ tpm_private_put();
++ return err;
++ }
++
++ return 0;
++}
++
++
++static int tpmfront_remove(struct xenbus_device *dev)
++{
++ struct tpm_private *tp = tpm_private_from_dev(&dev->dev);
++ destroy_tpmring(tp);
++ cleanup_vtpm(&dev->dev);
++ return 0;
++}
++
++static int tpmfront_suspend(struct xenbus_device *dev)
++{
++ struct tpm_private *tp = tpm_private_from_dev(&dev->dev);
++ u32 ctr;
++
++ /* Take the lock, preventing any application from sending. */
++ mutex_lock(&suspend_lock);
++ tp->is_suspended = 1;
++
++ for (ctr = 0; atomic_read(&tp->tx_busy); ctr++) {
++ if ((ctr % 10) == 0)
++ printk("TPM-FE [INFO]: Waiting for outstanding "
++ "request.\n");
++ /* Wait for a request to be responded to. */
++ interruptible_sleep_on_timeout(&tp->wait_q, 100);
++ }
++
++ return 0;
++}
++
++static int tpmfront_suspend_finish(struct tpm_private *tp)
++{
++ tp->is_suspended = 0;
++ /* Allow applications to send again. */
++ mutex_unlock(&suspend_lock);
++ return 0;
++}
++
++static int tpmfront_suspend_cancel(struct xenbus_device *dev)
++{
++ struct tpm_private *tp = tpm_private_from_dev(&dev->dev);
++ return tpmfront_suspend_finish(tp);
++}
++
++static int tpmfront_resume(struct xenbus_device *dev)
++{
++ struct tpm_private *tp = tpm_private_from_dev(&dev->dev);
++ destroy_tpmring(tp);
++ return talk_to_backend(dev, tp);
++}
++
++static int tpmif_connect(struct xenbus_device *dev,
++ struct tpm_private *tp,
++ domid_t domid)
++{
++ int err;
++
++ tp->backend_id = domid;
++
++ err = bind_listening_port_to_irqhandler(
++ domid, tpmif_int, SA_SAMPLE_RANDOM, "tpmif", tp);
++ if (err <= 0) {
++ WPRINTK("bind_listening_port_to_irqhandler failed "
++ "(err=%d)\n", err);
++ return err;
++ }
++ tp->irq = err;
++
++ return 0;
++}
++
++static struct xenbus_device_id tpmfront_ids[] = {
++ { "vtpm" },
++ { "" }
++};
++
++static struct xenbus_driver tpmfront = {
++ .name = "vtpm",
++ .owner = THIS_MODULE,
++ .ids = tpmfront_ids,
++ .probe = tpmfront_probe,
++ .remove = tpmfront_remove,
++ .resume = tpmfront_resume,
++ .otherend_changed = backend_changed,
++ .suspend = tpmfront_suspend,
++ .suspend_cancel = tpmfront_suspend_cancel,
++};
++
++static void __init init_tpm_xenbus(void)
++{
++ xenbus_register_frontend(&tpmfront);
++}
++
++static int tpmif_allocate_tx_buffers(struct tpm_private *tp)
++{
++ unsigned int i;
++
++ for (i = 0; i < TPMIF_TX_RING_SIZE; i++) {
++ tp->tx_buffers[i] = tx_buffer_alloc();
++ if (!tp->tx_buffers[i]) {
++ tpmif_free_tx_buffers(tp);
++ return -ENOMEM;
++ }
++ }
++ return 0;
++}
++
++static void tpmif_free_tx_buffers(struct tpm_private *tp)
++{
++ unsigned int i;
++
++ for (i = 0; i < TPMIF_TX_RING_SIZE; i++)
++ tx_buffer_free(tp->tx_buffers[i]);
++}
++
++static void tpmif_rx_action(unsigned long priv)
++{
++ struct tpm_private *tp = (struct tpm_private *)priv;
++ int i = 0;
++ unsigned int received;
++ unsigned int offset = 0;
++ u8 *buffer;
++ tpmif_tx_request_t *tx = &tp->tx->ring[i].req;
++
++ atomic_set(&tp->tx_busy, 0);
++ wake_up_interruptible(&tp->wait_q);
++
++ received = tx->size;
++
++ buffer = kmalloc(received, GFP_ATOMIC);
++ if (!buffer)
++ return;
++
++ for (i = 0; i < TPMIF_TX_RING_SIZE && offset < received; i++) {
++ struct tx_buffer *txb = tp->tx_buffers[i];
++ tpmif_tx_request_t *tx;
++ unsigned int tocopy;
++
++ tx = &tp->tx->ring[i].req;
++ tocopy = tx->size;
++ if (tocopy > PAGE_SIZE)
++ tocopy = PAGE_SIZE;
++
++ memcpy(&buffer[offset], txb->data, tocopy);
++
++ gnttab_release_grant_reference(&gref_head, tx->ref);
++
++ offset += tocopy;
++ }
++
++ vtpm_vd_recv(tp->chip, buffer, received, tp->tx_remember);
++ kfree(buffer);
++}
++
++
++static irqreturn_t tpmif_int(int irq, void *tpm_priv, struct pt_regs *ptregs)
++{
++ struct tpm_private *tp = tpm_priv;
++ unsigned long flags;
++
++ spin_lock_irqsave(&tp->tx_lock, flags);
++ tpmif_rx_tasklet.data = (unsigned long)tp;
++ tasklet_schedule(&tpmif_rx_tasklet);
++ spin_unlock_irqrestore(&tp->tx_lock, flags);
++
++ return IRQ_HANDLED;
++}
++
++
++static int tpm_xmit(struct tpm_private *tp,
++ const u8 * buf, size_t count, int isuserbuffer,
++ void *remember)
++{
++ tpmif_tx_request_t *tx;
++ TPMIF_RING_IDX i;
++ unsigned int offset = 0;
++
++ spin_lock_irq(&tp->tx_lock);
++
++ if (unlikely(atomic_read(&tp->tx_busy))) {
++ printk("tpm_xmit: There's an outstanding request/response "
++ "on the way!\n");
++ spin_unlock_irq(&tp->tx_lock);
++ return -EBUSY;
++ }
++
++ if (tp->is_connected != 1) {
++ spin_unlock_irq(&tp->tx_lock);
++ return -EIO;
++ }
++
++ for (i = 0; count > 0 && i < TPMIF_TX_RING_SIZE; i++) {
++ struct tx_buffer *txb = tp->tx_buffers[i];
++ int copied;
++
++ if (!txb) {
++ DPRINTK("txb (i=%d) is NULL. buffers initilized?\n"
++ "Not transmitting anything!\n", i);
++ spin_unlock_irq(&tp->tx_lock);
++ return -EFAULT;
++ }
++
++ copied = tx_buffer_copy(txb, &buf[offset], count,
++ isuserbuffer);
++ if (copied < 0) {
++ /* An error occurred */
++ spin_unlock_irq(&tp->tx_lock);
++ return copied;
++ }
++ count -= copied;
++ offset += copied;
++
++ tx = &tp->tx->ring[i].req;
++ tx->addr = virt_to_machine(txb->data);
++ tx->size = txb->len;
++
++ DPRINTK("First 4 characters sent by TPM-FE are "
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ txb->data[0],txb->data[1],txb->data[2],txb->data[3]);
++
++ /* Get the granttable reference for this page. */
++ tx->ref = gnttab_claim_grant_reference(&gref_head);
++ if (tx->ref == -ENOSPC) {
++ spin_unlock_irq(&tp->tx_lock);
++ DPRINTK("Grant table claim reference failed in "
++ "func:%s line:%d file:%s\n",
++ __FUNCTION__, __LINE__, __FILE__);
++ return -ENOSPC;
++ }
++ gnttab_grant_foreign_access_ref(tx->ref,
++ tp->backend_id,
++ virt_to_mfn(txb->data),
++ 0 /*RW*/);
++ wmb();
++ }
++
++ atomic_set(&tp->tx_busy, 1);
++ tp->tx_remember = remember;
++
++ mb();
++
++ notify_remote_via_irq(tp->irq);
++
++ spin_unlock_irq(&tp->tx_lock);
++ return offset;
++}
++
++
++static void tpmif_notify_upperlayer(struct tpm_private *tp)
++{
++ /* Notify upper layer about the state of the connection to the BE. */
++ vtpm_vd_status(tp->chip, (tp->is_connected
++ ? TPM_VD_STATUS_CONNECTED
++ : TPM_VD_STATUS_DISCONNECTED));
++}
++
++
++static void tpmif_set_connected_state(struct tpm_private *tp, u8 is_connected)
++{
++ /*
++ * Don't notify upper layer if we are in suspend mode and
++ * should disconnect - assumption is that we will resume
++ * The mutex keeps apps from sending.
++ */
++ if (is_connected == 0 && tp->is_suspended == 1)
++ return;
++
++ /*
++ * Unlock the mutex if we are connected again
++ * after being suspended - now resuming.
++ * This also removes the suspend state.
++ */
++ if (is_connected == 1 && tp->is_suspended == 1)
++ tpmfront_suspend_finish(tp);
++
++ if (is_connected != tp->is_connected) {
++ tp->is_connected = is_connected;
++ tpmif_notify_upperlayer(tp);
++ }
++}
++
++
++
++/* =================================================================
++ * Initialization function.
++ * =================================================================
++ */
++
++
++static int __init tpmif_init(void)
++{
++ struct tpm_private *tp;
++
++ if (is_initial_xendomain())
++ return -EPERM;
++
++ tp = tpm_private_get();
++ if (!tp)
++ return -ENOMEM;
++
++ IPRINTK("Initialising the vTPM driver.\n");
++ if (gnttab_alloc_grant_references(TPMIF_TX_RING_SIZE,
++ &gref_head) < 0) {
++ tpm_private_put();
++ return -EFAULT;
++ }
++
++ init_tpm_xenbus();
++ return 0;
++}
++
++
++module_init(tpmif_init);
++
++MODULE_LICENSE("Dual BSD/GPL");
+Index: head-2007-09-25/include/linux/elfnote.h
+===================================================================
+--- head-2007-09-25.orig/include/linux/elfnote.h 2007-09-25 14:22:36.000000000 +0200
++++ head-2007-09-25/include/linux/elfnote.h 2007-09-25 14:35:02.000000000 +0200
+@@ -38,7 +38,7 @@
+ * e.g. ELFNOTE(XYZCo, 42, .asciz, "forty-two")
+ * ELFNOTE(XYZCo, 12, .long, 0xdeadbeef)
+ */
+-#define ELFNOTE(name, type, desctype, descdata) \
++#define ELFNOTE(name, type, desctype, descdata...) \
+ .pushsection .note.name, "",@note ; \
+ .align 4 ; \
+ .long 2f - 1f /* namesz */ ; \
+Index: head-2007-09-25/include/linux/gfp.h
+===================================================================
+--- head-2007-09-25.orig/include/linux/gfp.h 2007-09-25 14:22:36.000000000 +0200
++++ head-2007-09-25/include/linux/gfp.h 2007-09-25 14:35:02.000000000 +0200
+@@ -115,7 +115,11 @@ static inline enum zone_type gfp_zone(gf
+ */
+
+ #ifndef HAVE_ARCH_FREE_PAGE
+-static inline void arch_free_page(struct page *page, int order) { }
++/*
++ * If arch_free_page returns non-zero then the generic free_page code can
++ * immediately bail: the arch-specific function has done all the work.
++ */
++static inline int arch_free_page(struct page *page, int order) { return 0; }
+ #endif
+ #ifndef HAVE_ARCH_ALLOC_PAGE
+ static inline void arch_alloc_page(struct page *page, int order) { }
+Index: head-2007-09-25/include/linux/interrupt.h
+===================================================================
+--- head-2007-09-25.orig/include/linux/interrupt.h 2007-09-25 14:22:36.000000000 +0200
++++ head-2007-09-25/include/linux/interrupt.h 2007-09-25 14:35:02.000000000 +0200
+@@ -207,6 +207,12 @@ static inline int disable_irq_wake(unsig
+
+ #endif /* CONFIG_GENERIC_HARDIRQS */
+
++#ifdef CONFIG_HAVE_IRQ_IGNORE_UNHANDLED
++int irq_ignore_unhandled(unsigned int irq);
++#else
++#define irq_ignore_unhandled(irq) 0
++#endif
++
+ #ifndef __ARCH_SET_SOFTIRQ_PENDING
+ #define set_softirq_pending(x) (local_softirq_pending() = (x))
+ #define or_softirq_pending(x) (local_softirq_pending() |= (x))
+Index: head-2007-09-25/include/linux/kexec.h
+===================================================================
+--- head-2007-09-25.orig/include/linux/kexec.h 2007-09-25 14:22:36.000000000 +0200
++++ head-2007-09-25/include/linux/kexec.h 2007-09-25 14:35:02.000000000 +0200
+@@ -46,6 +46,13 @@
+ KEXEC_CORE_NOTE_NAME_BYTES + \
+ KEXEC_CORE_NOTE_DESC_BYTES )
+
++#ifndef KEXEC_ARCH_HAS_PAGE_MACROS
++#define kexec_page_to_pfn(page) page_to_pfn(page)
++#define kexec_pfn_to_page(pfn) pfn_to_page(pfn)
++#define kexec_virt_to_phys(addr) virt_to_phys(addr)
++#define kexec_phys_to_virt(addr) phys_to_virt(addr)
++#endif
++
+ /*
+ * This structure is used to hold the arguments that are used when loading
+ * kernel binaries.
+@@ -106,6 +113,12 @@ struct kimage {
+ extern NORET_TYPE void machine_kexec(struct kimage *image) ATTRIB_NORET;
+ extern int machine_kexec_prepare(struct kimage *image);
+ extern void machine_kexec_cleanup(struct kimage *image);
++#ifdef CONFIG_XEN
++extern int xen_machine_kexec_load(struct kimage *image);
++extern void xen_machine_kexec_unload(struct kimage *image);
++extern void xen_machine_kexec_setup_resources(void);
++extern void xen_machine_kexec_register_resources(struct resource *res);
++#endif
+ extern asmlinkage long sys_kexec_load(unsigned long entry,
+ unsigned long nr_segments,
+ struct kexec_segment __user *segments,
+Index: head-2007-09-25/include/linux/mm.h
+===================================================================
+--- head-2007-09-25.orig/include/linux/mm.h 2007-09-25 14:22:36.000000000 +0200
++++ head-2007-09-25/include/linux/mm.h 2007-09-25 14:35:02.000000000 +0200
+@@ -169,6 +169,9 @@ extern unsigned int kobjsize(const void
+ #define VM_MAPPED_COPY 0x01000000 /* T if mapped copy of data (nommu mmap) */
+ #define VM_INSERTPAGE 0x02000000 /* The vma has had "vm_insert_page()" done on it */
+ #define VM_ALWAYSDUMP 0x04000000 /* Always include in core dumps */
++#ifdef CONFIG_XEN
++#define VM_FOREIGN 0x08000000 /* Has pages belonging to another VM */
++#endif
+
+ #ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */
+ #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
+@@ -208,6 +211,10 @@ struct vm_operations_struct {
+ /* notification that a previously read-only page is about to become
+ * writable, if an error is returned it will cause a SIGBUS */
+ int (*page_mkwrite)(struct vm_area_struct *vma, struct page *page);
++ /* Area-specific function for clearing the PTE at @ptep. Returns the
++ * original value of @ptep. */
++ pte_t (*zap_pte)(struct vm_area_struct *vma,
++ unsigned long addr, pte_t *ptep, int is_fullmm);
+ #ifdef CONFIG_NUMA
+ int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
+ struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
+Index: head-2007-09-25/include/linux/page-flags.h
+===================================================================
+--- head-2007-09-25.orig/include/linux/page-flags.h 2007-09-25 14:22:36.000000000 +0200
++++ head-2007-09-25/include/linux/page-flags.h 2007-09-25 14:35:02.000000000 +0200
+@@ -104,6 +104,8 @@
+ #define PG_uncached 31 /* Page has been mapped as uncached */
+ #endif
+
++#define PG_foreign 20 /* Page is owned by foreign allocator. */
++
+ /*
+ * Manipulation of page state flags
+ */
+@@ -270,6 +272,18 @@ static inline void __ClearPageTail(struc
+ #define SetPageUncached(page) set_bit(PG_uncached, &(page)->flags)
+ #define ClearPageUncached(page) clear_bit(PG_uncached, &(page)->flags)
+
++#define PageForeign(page) test_bit(PG_foreign, &(page)->flags)
++#define SetPageForeign(page, dtor) do { \
++ set_bit(PG_foreign, &(page)->flags); \
++ (page)->index = (long)(dtor); \
++} while (0)
++#define ClearPageForeign(page) do { \
++ clear_bit(PG_foreign, &(page)->flags); \
++ (page)->index = 0; \
++} while (0)
++#define PageForeignDestructor(page) \
++ ( (void (*) (struct page *)) (page)->index )(page)
++
+ struct page; /* forward declaration */
+
+ extern void cancel_dirty_page(struct page *page, unsigned int account_size);
+Index: head-2007-09-25/include/linux/skbuff.h
+===================================================================
+--- head-2007-09-25.orig/include/linux/skbuff.h 2007-09-25 14:22:36.000000000 +0200
++++ head-2007-09-25/include/linux/skbuff.h 2007-09-25 14:35:02.000000000 +0200
+@@ -212,6 +212,8 @@ typedef unsigned char *sk_buff_data_t;
+ * @local_df: allow local fragmentation
+ * @cloned: Head may be cloned (check refcnt to be sure)
+ * @nohdr: Payload reference only, must not modify header
++ * @proto_data_valid: Protocol data validated since arriving at localhost
++ * @proto_csum_blank: Protocol csum must be added before leaving localhost
+ * @pkt_type: Packet class
+ * @fclone: skbuff clone status
+ * @ip_summed: Driver fed us an IP checksum
+@@ -277,7 +279,13 @@ struct sk_buff {
+ nfctinfo:3;
+ __u8 pkt_type:3,
+ fclone:2,
++#ifndef CONFIG_XEN
+ ipvs_property:1;
++#else
++ ipvs_property:1,
++ proto_data_valid:1,
++ proto_csum_blank:1;
++#endif
+ __be16 protocol;
+
+ void (*destructor)(struct sk_buff *skb);
+Index: head-2007-09-25/kernel/irq/spurious.c
+===================================================================
+--- head-2007-09-25.orig/kernel/irq/spurious.c 2007-09-25 14:22:36.000000000 +0200
++++ head-2007-09-25/kernel/irq/spurious.c 2007-09-25 14:35:02.000000000 +0200
+@@ -172,7 +172,8 @@ void note_interrupt(unsigned int irq, st
+ irqreturn_t action_ret)
+ {
+ if (unlikely(action_ret != IRQ_HANDLED)) {
+- desc->irqs_unhandled++;
++ if (!irq_ignore_unhandled(irq))
++ desc->irqs_unhandled++;
+ if (unlikely(action_ret != IRQ_NONE))
+ report_bad_irq(irq, desc, action_ret);
+ }
+Index: head-2007-09-25/kernel/kexec.c
+===================================================================
+--- head-2007-09-25.orig/kernel/kexec.c 2007-09-25 14:22:36.000000000 +0200
++++ head-2007-09-25/kernel/kexec.c 2007-09-25 14:35:02.000000000 +0200
+@@ -331,13 +331,27 @@ static int kimage_is_destination_range(s
+ return 0;
+ }
+
+-static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order)
++static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order, unsigned long limit)
+ {
+ struct page *pages;
+
+ pages = alloc_pages(gfp_mask, order);
+ if (pages) {
+ unsigned int count, i;
++#ifdef CONFIG_XEN
++ int address_bits;
++
++ if (limit == ~0UL)
++ address_bits = BITS_PER_LONG;
++ else
++ address_bits = long_log2(limit);
++
++ if (xen_create_contiguous_region((unsigned long)page_address(pages),
++ order, address_bits) < 0) {
++ __free_pages(pages, order);
++ return NULL;
++ }
++#endif
+ pages->mapping = NULL;
+ set_page_private(pages, order);
+ count = 1 << order;
+@@ -356,6 +370,9 @@ static void kimage_free_pages(struct pag
+ count = 1 << order;
+ for (i = 0; i < count; i++)
+ ClearPageReserved(page + i);
++#ifdef CONFIG_XEN
++ xen_destroy_contiguous_region((unsigned long)page_address(page), order);
++#endif
+ __free_pages(page, order);
+ }
+
+@@ -401,10 +418,10 @@ static struct page *kimage_alloc_normal_
+ do {
+ unsigned long pfn, epfn, addr, eaddr;
+
+- pages = kimage_alloc_pages(GFP_KERNEL, order);
++ pages = kimage_alloc_pages(GFP_KERNEL, order, KEXEC_CONTROL_MEMORY_LIMIT);
+ if (!pages)
+ break;
+- pfn = page_to_pfn(pages);
++ pfn = kexec_page_to_pfn(pages);
+ epfn = pfn + count;
+ addr = pfn << PAGE_SHIFT;
+ eaddr = epfn << PAGE_SHIFT;
+@@ -438,6 +455,7 @@ static struct page *kimage_alloc_normal_
+ return pages;
+ }
+
++#ifndef CONFIG_XEN
+ static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
+ unsigned int order)
+ {
+@@ -491,7 +509,7 @@ static struct page *kimage_alloc_crash_c
+ }
+ /* If I don't overlap any segments I have found my hole! */
+ if (i == image->nr_segments) {
+- pages = pfn_to_page(hole_start >> PAGE_SHIFT);
++ pages = kexec_pfn_to_page(hole_start >> PAGE_SHIFT);
+ break;
+ }
+ }
+@@ -518,6 +536,13 @@ struct page *kimage_alloc_control_pages(
+
+ return pages;
+ }
++#else /* !CONFIG_XEN */
++struct page *kimage_alloc_control_pages(struct kimage *image,
++ unsigned int order)
++{
++ return kimage_alloc_normal_control_pages(image, order);
++}
++#endif
+
+ static int kimage_add_entry(struct kimage *image, kimage_entry_t entry)
+ {
+@@ -533,7 +558,7 @@ static int kimage_add_entry(struct kimag
+ return -ENOMEM;
+
+ ind_page = page_address(page);
+- *image->entry = virt_to_phys(ind_page) | IND_INDIRECTION;
++ *image->entry = kexec_virt_to_phys(ind_page) | IND_INDIRECTION;
+ image->entry = ind_page;
+ image->last_entry = ind_page +
+ ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1);
+@@ -594,13 +619,13 @@ static int kimage_terminate(struct kimag
+ #define for_each_kimage_entry(image, ptr, entry) \
+ for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
+ ptr = (entry & IND_INDIRECTION)? \
+- phys_to_virt((entry & PAGE_MASK)): ptr +1)
++ kexec_phys_to_virt((entry & PAGE_MASK)): ptr +1)
+
+ static void kimage_free_entry(kimage_entry_t entry)
+ {
+ struct page *page;
+
+- page = pfn_to_page(entry >> PAGE_SHIFT);
++ page = kexec_pfn_to_page(entry >> PAGE_SHIFT);
+ kimage_free_pages(page);
+ }
+
+@@ -612,6 +637,10 @@ static void kimage_free(struct kimage *i
+ if (!image)
+ return;
+
++#ifdef CONFIG_XEN
++ xen_machine_kexec_unload(image);
++#endif
++
+ kimage_free_extra_pages(image);
+ for_each_kimage_entry(image, ptr, entry) {
+ if (entry & IND_INDIRECTION) {
+@@ -687,7 +716,7 @@ static struct page *kimage_alloc_page(st
+ * have a match.
+ */
+ list_for_each_entry(page, &image->dest_pages, lru) {
+- addr = page_to_pfn(page) << PAGE_SHIFT;
++ addr = kexec_page_to_pfn(page) << PAGE_SHIFT;
+ if (addr == destination) {
+ list_del(&page->lru);
+ return page;
+@@ -698,16 +727,16 @@ static struct page *kimage_alloc_page(st
+ kimage_entry_t *old;
+
+ /* Allocate a page, if we run out of memory give up */
+- page = kimage_alloc_pages(gfp_mask, 0);
++ page = kimage_alloc_pages(gfp_mask, 0, KEXEC_SOURCE_MEMORY_LIMIT);
+ if (!page)
+ return NULL;
+ /* If the page cannot be used file it away */
+- if (page_to_pfn(page) >
++ if (kexec_page_to_pfn(page) >
+ (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) {
+ list_add(&page->lru, &image->unuseable_pages);
+ continue;
+ }
+- addr = page_to_pfn(page) << PAGE_SHIFT;
++ addr = kexec_page_to_pfn(page) << PAGE_SHIFT;
+
+ /* If it is the destination page we want use it */
+ if (addr == destination)
+@@ -730,7 +759,7 @@ static struct page *kimage_alloc_page(st
+ struct page *old_page;
+
+ old_addr = *old & PAGE_MASK;
+- old_page = pfn_to_page(old_addr >> PAGE_SHIFT);
++ old_page = kexec_pfn_to_page(old_addr >> PAGE_SHIFT);
+ copy_highpage(page, old_page);
+ *old = addr | (*old & ~PAGE_MASK);
+
+@@ -780,7 +809,7 @@ static int kimage_load_normal_segment(st
+ result = -ENOMEM;
+ goto out;
+ }
+- result = kimage_add_page(image, page_to_pfn(page)
++ result = kimage_add_page(image, kexec_page_to_pfn(page)
+ << PAGE_SHIFT);
+ if (result < 0)
+ goto out;
+@@ -812,6 +841,7 @@ out:
+ return result;
+ }
+
++#ifndef CONFIG_XEN
+ static int kimage_load_crash_segment(struct kimage *image,
+ struct kexec_segment *segment)
+ {
+@@ -834,7 +864,7 @@ static int kimage_load_crash_segment(str
+ char *ptr;
+ size_t uchunk, mchunk;
+
+- page = pfn_to_page(maddr >> PAGE_SHIFT);
++ page = kexec_pfn_to_page(maddr >> PAGE_SHIFT);
+ if (page == 0) {
+ result = -ENOMEM;
+ goto out;
+@@ -883,6 +913,13 @@ static int kimage_load_segment(struct ki
+
+ return result;
+ }
++#else /* CONFIG_XEN */
++static int kimage_load_segment(struct kimage *image,
++ struct kexec_segment *segment)
++{
++ return kimage_load_normal_segment(image, segment);
++}
++#endif
+
+ /*
+ * Exec Kernel system call: for obvious reasons only root may call it.
+@@ -993,6 +1030,13 @@ asmlinkage long sys_kexec_load(unsigned
+ if (result)
+ goto out;
+ }
++#ifdef CONFIG_XEN
++ if (image) {
++ result = xen_machine_kexec_load(image);
++ if (result)
++ goto out;
++ }
++#endif
+ /* Install the new kernel, and Uninstall the old */
+ image = xchg(dest_image, image);
+
+@@ -1047,7 +1091,6 @@ void crash_kexec(struct pt_regs *regs)
+ {
+ int locked;
+
+-
+ /* Take the kexec_lock here to prevent sys_kexec_load
+ * running on one cpu from replacing the crash kernel
+ * we are using after a panic on a different cpu.
+Index: head-2007-09-25/lib/Makefile
+===================================================================
+--- head-2007-09-25.orig/lib/Makefile 2007-09-25 14:22:36.000000000 +0200
++++ head-2007-09-25/lib/Makefile 2007-09-25 14:35:02.000000000 +0200
+@@ -58,6 +58,7 @@ obj-$(CONFIG_SMP) += percpu_counter.o
+ obj-$(CONFIG_AUDIT_GENERIC) += audit.o
+
+ obj-$(CONFIG_SWIOTLB) += swiotlb.o
++swiotlb-$(CONFIG_XEN) := ../arch/i386/kernel/swiotlb.o
+ obj-$(CONFIG_FAULT_INJECTION) += fault-inject.o
+
+ lib-$(CONFIG_GENERIC_BUG) += bug.o
+Index: head-2007-09-25/mm/highmem.c
+===================================================================
+--- head-2007-09-25.orig/mm/highmem.c 2007-09-25 14:22:36.000000000 +0200
++++ head-2007-09-25/mm/highmem.c 2007-09-25 14:35:02.000000000 +0200
+@@ -158,6 +158,17 @@ start:
+ return vaddr;
+ }
+
++#ifdef CONFIG_XEN
++void kmap_flush_unused(void)
++{
++ spin_lock(&kmap_lock);
++ flush_all_zero_pkmaps();
++ spin_unlock(&kmap_lock);
++}
++
++EXPORT_SYMBOL(kmap_flush_unused);
++#endif
++
+ void fastcall *kmap_high(struct page *page)
+ {
+ unsigned long vaddr;
+Index: head-2007-09-25/mm/memory.c
+===================================================================
+--- head-2007-09-25.orig/mm/memory.c 2007-09-25 14:22:36.000000000 +0200
++++ head-2007-09-25/mm/memory.c 2007-09-25 14:35:02.000000000 +0200
+@@ -404,7 +404,8 @@ struct page *vm_normal_page(struct vm_ar
+ * and that the resulting page looks ok.
+ */
+ if (unlikely(!pfn_valid(pfn))) {
+- print_bad_pte(vma, pte, addr);
++ if (!(vma->vm_flags & VM_RESERVED))
++ print_bad_pte(vma, pte, addr);
+ return NULL;
+ }
+
+@@ -662,8 +663,12 @@ static unsigned long zap_pte_range(struc
+ page->index > details->last_index))
+ continue;
+ }
+- ptent = ptep_get_and_clear_full(mm, addr, pte,
+- tlb->fullmm);
++ if (unlikely(vma->vm_ops && vma->vm_ops->zap_pte))
++ ptent = vma->vm_ops->zap_pte(vma, addr, pte,
++ tlb->fullmm);
++ else
++ ptent = ptep_get_and_clear_full(mm, addr, pte,
++ tlb->fullmm);
+ tlb_remove_tlb_entry(tlb, pte, addr);
+ if (unlikely(!page))
+ continue;
+@@ -896,6 +901,7 @@ unsigned long zap_page_range(struct vm_a
+ tlb_finish_mmu(tlb, address, end);
+ return end;
+ }
++EXPORT_SYMBOL(zap_page_range);
+
+ /*
+ * Do a quick page-table lookup for a single page.
+@@ -1035,6 +1041,26 @@ int get_user_pages(struct task_struct *t
+ continue;
+ }
+
++#ifdef CONFIG_XEN
++ if (vma && (vma->vm_flags & VM_FOREIGN)) {
++ struct page **map = vma->vm_private_data;
++ int offset = (start - vma->vm_start) >> PAGE_SHIFT;
++ if (map[offset] != NULL) {
++ if (pages) {
++ struct page *page = map[offset];
++
++ pages[i] = page;
++ get_page(page);
++ }
++ if (vmas)
++ vmas[i] = vma;
++ i++;
++ start += PAGE_SIZE;
++ len--;
++ continue;
++ }
++ }
++#endif
+ if (!vma || (vma->vm_flags & (VM_IO | VM_PFNMAP))
+ || !(vm_flags & vma->vm_flags))
+ return i ? : -EFAULT;
+Index: head-2007-09-25/mm/page_alloc.c
+===================================================================
+--- head-2007-09-25.orig/mm/page_alloc.c 2007-09-25 14:22:36.000000000 +0200
++++ head-2007-09-25/mm/page_alloc.c 2007-09-25 14:35:02.000000000 +0200
+@@ -206,7 +206,11 @@ static void bad_page(struct page *page)
+ 1 << PG_slab |
+ 1 << PG_swapcache |
+ 1 << PG_writeback |
+- 1 << PG_buddy );
++ 1 << PG_buddy |
++#ifdef CONFIG_X86_XEN
++ 1 << PG_pinned |
++#endif
++ 1 << PG_foreign );
+ set_page_count(page, 0);
+ reset_page_mapcount(page);
+ page->mapping = NULL;
+@@ -442,7 +446,11 @@ static inline int free_pages_check(struc
+ 1 << PG_swapcache |
+ 1 << PG_writeback |
+ 1 << PG_reserved |
+- 1 << PG_buddy ))))
++ 1 << PG_buddy |
++#ifdef CONFIG_X86_XEN
++ 1 << PG_pinned |
++#endif
++ 1 << PG_foreign ))))
+ bad_page(page);
+ /*
+ * PageReclaim == PageTail. It is only an error
+@@ -504,6 +512,12 @@ static void __free_pages_ok(struct page
+ int i;
+ int reserved = 0;
+
++#ifdef CONFIG_XEN
++ if (PageForeign(page)) {
++ PageForeignDestructor(page);
++ return;
++ }
++#endif
+ for (i = 0 ; i < (1 << order) ; ++i)
+ reserved += free_pages_check(page + i);
+ if (reserved)
+@@ -598,7 +612,11 @@ static int prep_new_page(struct page *pa
+ 1 << PG_swapcache |
+ 1 << PG_writeback |
+ 1 << PG_reserved |
+- 1 << PG_buddy ))))
++ 1 << PG_buddy |
++#ifdef CONFIG_X86_XEN
++ 1 << PG_pinned |
++#endif
++ 1 << PG_foreign ))))
+ bad_page(page);
+
+ /*
+@@ -781,6 +799,12 @@ static void fastcall free_hot_cold_page(
+ struct per_cpu_pages *pcp;
+ unsigned long flags;
+
++#ifdef CONFIG_XEN
++ if (PageForeign(page)) {
++ PageForeignDestructor(page);
++ return;
++ }
++#endif
+ if (PageAnon(page))
+ page->mapping = NULL;
+ if (free_pages_check(page))
+Index: head-2007-09-25/net/core/dev.c
+===================================================================
+--- head-2007-09-25.orig/net/core/dev.c 2007-09-25 14:22:36.000000000 +0200
++++ head-2007-09-25/net/core/dev.c 2007-09-25 14:35:02.000000000 +0200
+@@ -118,6 +118,12 @@
+ #include <linux/ctype.h>
+ #include <linux/if_arp.h>
+
++#ifdef CONFIG_XEN
++#include <net/ip.h>
++#include <linux/tcp.h>
++#include <linux/udp.h>
++#endif
++
+ /*
+ * The list of packet types we will receive (as opposed to discard)
+ * and the routines to invoke.
+@@ -1456,6 +1462,43 @@ out_kfree_skb:
+ } \
+ }
+
++#ifdef CONFIG_XEN
++inline int skb_checksum_setup(struct sk_buff *skb)
++{
++ if (skb->proto_csum_blank) {
++ if (skb->protocol != htons(ETH_P_IP))
++ goto out;
++ skb->h.raw = (unsigned char *)skb->nh.iph + 4*skb->nh.iph->ihl;
++ if (skb->h.raw >= skb->tail)
++ goto out;
++ switch (skb->nh.iph->protocol) {
++ case IPPROTO_TCP:
++ skb->csum = offsetof(struct tcphdr, check);
++ break;
++ case IPPROTO_UDP:
++ skb->csum = offsetof(struct udphdr, check);
++ break;
++ default:
++ if (net_ratelimit())
++ printk(KERN_ERR "Attempting to checksum a non-"
++ "TCP/UDP packet, dropping a protocol"
++ " %d packet", skb->nh.iph->protocol);
++ goto out;
++ }
++ if ((skb->h.raw + skb->csum + 2) > skb->tail)
++ goto out;
++ skb->ip_summed = CHECKSUM_HW;
++ skb->proto_csum_blank = 0;
++ }
++ return 0;
++out:
++ return -EPROTO;
++}
++#else
++inline int skb_checksum_setup(struct sk_buff *skb) { return 0; }
++#endif
++
++
+ /**
+ * dev_queue_xmit - transmit a buffer
+ * @skb: buffer to transmit
+@@ -1488,6 +1531,12 @@ int dev_queue_xmit(struct sk_buff *skb)
+ struct Qdisc *q;
+ int rc = -ENOMEM;
+
++ /* If a checksum-deferred packet is forwarded to a device that needs a
++ * checksum, correct the pointers and force checksumming.
++ */
++ if (skb_checksum_setup(skb))
++ goto out_kfree_skb;
++
+ /* GSO will handle the following emulations directly. */
+ if (netif_needs_gso(dev, skb))
+ goto gso;
+@@ -1874,6 +1923,19 @@ int netif_receive_skb(struct sk_buff *sk
+ }
+ #endif
+
++#ifdef CONFIG_XEN
++ switch (skb->ip_summed) {
++ case CHECKSUM_UNNECESSARY:
++ skb->proto_data_valid = 1;
++ break;
++ case CHECKSUM_HW:
++ /* XXX Implement me. */
++ default:
++ skb->proto_data_valid = 0;
++ break;
++ }
++#endif
++
+ list_for_each_entry_rcu(ptype, &ptype_all, list) {
+ if (!ptype->dev || ptype->dev == skb->dev) {
+ if (pt_prev)
+@@ -3778,6 +3840,7 @@ EXPORT_SYMBOL(unregister_netdevice_notif
+ EXPORT_SYMBOL(net_enable_timestamp);
+ EXPORT_SYMBOL(net_disable_timestamp);
+ EXPORT_SYMBOL(dev_get_flags);
++EXPORT_SYMBOL(skb_checksum_setup);
+
+ #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
+ EXPORT_SYMBOL(br_handle_frame_hook);
+Index: head-2007-09-25/net/core/skbuff.c
+===================================================================
+--- head-2007-09-25.orig/net/core/skbuff.c 2007-09-25 14:22:36.000000000 +0200
++++ head-2007-09-25/net/core/skbuff.c 2007-09-25 14:35:02.000000000 +0200
+@@ -416,6 +416,10 @@ struct sk_buff *skb_clone(struct sk_buff
+ C(local_df);
+ n->cloned = 1;
+ n->nohdr = 0;
++#ifdef CONFIG_XEN
++ C(proto_data_valid);
++ C(proto_csum_blank);
++#endif
+ C(pkt_type);
+ C(ip_summed);
+ C(priority);
diff --git a/trunk/2.6.22/20016_xen3-auto-arch-i386.patch1 b/trunk/2.6.22/20016_xen3-auto-arch-i386.patch1
new file mode 100644
index 0000000..159d584
--- /dev/null
+++ b/trunk/2.6.22/20016_xen3-auto-arch-i386.patch1
@@ -0,0 +1,483 @@
+Subject: xen3 arch-i386
+From: http://xenbits.xensource.com/xen-3.1-testing.hg (tip 15042)
+Patch-mainline: obsolete
+Acked-by: jbeulich@novell.com
+
+---
+ arch/i386/Makefile | 24 ++++++++++++++++++++++
+ arch/i386/kernel/Makefile | 22 ++++++++++++++++++--
+ arch/i386/kernel/acpi/Makefile | 4 +++
+ arch/i386/kernel/asm-offsets.c | 7 +++++-
+ arch/i386/kernel/cpu/Makefile | 5 ++++
+ arch/i386/kernel/cpu/mtrr/Makefile | 7 ++++++
+ arch/i386/kernel/crash.c | 4 +++
+ arch/i386/kernel/machine_kexec.c | 40 +++++++++++++++++++++++++++++++++++++
+ arch/i386/kernel/sysenter.c | 18 ++++++++++++++++
+ arch/i386/kernel/vm86.c | 12 +++++++++++
+ arch/i386/mm/Makefile | 8 +++++++
+ arch/i386/oprofile/Makefile | 7 ++++++
+ arch/i386/pci/Makefile | 9 ++++++++
+ arch/i386/power/Makefile | 4 ++-
+ include/asm-i386/apic.h | 2 +
+ include/asm-i386/kexec.h | 14 ++++++++++++
+ 16 files changed, 183 insertions(+), 4 deletions(-)
+
+--- linux-2.6.22.orig/arch/i386/kernel/acpi/Makefile
++++ linux-2.6.22/arch/i386/kernel/acpi/Makefile
+@@ -5,3 +5,7 @@ ifneq ($(CONFIG_ACPI_PROCESSOR),)
+ obj-y += cstate.o processor.o
+ endif
+
++ifdef CONFIG_XEN
++include $(srctree)/scripts/Makefile.xen
++obj-y := $(call cherrypickxen, $(obj-y), $(src))
++endif
+--- linux-2.6.22.orig/arch/i386/kernel/asm-offsets.c
++++ linux-2.6.22/arch/i386/kernel/asm-offsets.c
+@@ -92,9 +92,14 @@ void foo(void)
+ OFFSET(pbe_orig_address, pbe, orig_address);
+ OFFSET(pbe_next, pbe, next);
+
++#ifndef CONFIG_X86_NO_TSS
+ /* Offset from the sysenter stack to tss.esp0 */
+- DEFINE(TSS_sysenter_esp0, offsetof(struct tss_struct, x86_tss.esp0) -
++ DEFINE(SYSENTER_stack_esp0, offsetof(struct tss_struct, x86_tss.esp0) -
+ sizeof(struct tss_struct));
++#else
++ /* sysenter stack points directly to esp0 */
++ DEFINE(SYSENTER_stack_esp0, 0);
++#endif
+
+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
+--- linux-2.6.22.orig/arch/i386/kernel/cpu/Makefile
++++ linux-2.6.22/arch/i386/kernel/cpu/Makefile
+@@ -19,3 +19,8 @@ obj-$(CONFIG_MTRR) += mtrr/
+ obj-$(CONFIG_CPU_FREQ) += cpufreq/
+
+ obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o
++
++ifdef CONFIG_XEN
++include $(srctree)/scripts/Makefile.xen
++obj-y := $(call cherrypickxen, $(obj-y), $(src))
++endif
+--- linux-2.6.22.orig/arch/i386/kernel/cpu/mtrr/Makefile
++++ linux-2.6.22/arch/i386/kernel/cpu/mtrr/Makefile
+@@ -1,3 +1,10 @@
+ obj-y := main.o if.o generic.o state.o
+ obj-$(CONFIG_X86_32) += amd.o cyrix.o centaur.o
+
++ifdef CONFIG_XEN
++include $(srctree)/scripts/Makefile.xen
++n-obj-xen := generic.o state.o amd.o cyrix.o centaur.o
++
++obj-y := $(call filterxen, $(obj-y), $(n-obj-xen))
++obj-y := $(call cherrypickxen, $(obj-y))
++endif
+--- linux-2.6.22.orig/arch/i386/kernel/crash.c
++++ linux-2.6.22/arch/i386/kernel/crash.c
+@@ -31,6 +31,7 @@
+ /* This keeps a track of which one is crashing cpu. */
+ static int crashing_cpu;
+
++#ifndef CONFIG_XEN
+ #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
+ static atomic_t waiting_for_crash_ipi;
+
+@@ -112,6 +113,7 @@ static void nmi_shootdown_cpus(void)
+ /* There are no cpus to shootdown */
+ }
+ #endif
++#endif /* CONFIG_XEN */
+
+ void machine_crash_shutdown(struct pt_regs *regs)
+ {
+@@ -128,10 +130,12 @@ void machine_crash_shutdown(struct pt_re
+
+ /* Make a note of crashing cpu. Will be used in NMI callback.*/
+ crashing_cpu = safe_smp_processor_id();
++#ifndef CONFIG_XEN
+ nmi_shootdown_cpus();
+ lapic_shutdown();
+ #if defined(CONFIG_X86_IO_APIC)
+ disable_IO_APIC();
+ #endif
++#endif /* CONFIG_XEN */
+ crash_save_cpu(regs, safe_smp_processor_id());
+ }
+--- linux-2.6.22.orig/arch/i386/kernel/machine_kexec.c
++++ linux-2.6.22/arch/i386/kernel/machine_kexec.c
+@@ -20,6 +20,10 @@
+ #include <asm/desc.h>
+ #include <asm/system.h>
+
++#ifdef CONFIG_XEN
++#include <xen/interface/kexec.h>
++#endif
++
+ #define PAGE_ALIGNED __attribute__ ((__aligned__(PAGE_SIZE)))
+ static u32 kexec_pgd[1024] PAGE_ALIGNED;
+ #ifdef CONFIG_X86_PAE
+@@ -71,6 +75,40 @@ static void load_segments(void)
+ #undef __STR
+ }
+
++#ifdef CONFIG_XEN
++
++#define __ma(x) (pfn_to_mfn(__pa((x)) >> PAGE_SHIFT) << PAGE_SHIFT)
++
++#if PAGES_NR > KEXEC_XEN_NO_PAGES
++#error PAGES_NR is greater than KEXEC_XEN_NO_PAGES - Xen support will break
++#endif
++
++#if PA_CONTROL_PAGE != 0
++#error PA_CONTROL_PAGE is non zero - Xen support will break
++#endif
++
++void machine_kexec_setup_load_arg(xen_kexec_image_t *xki, struct kimage *image)
++{
++ void *control_page;
++
++ memset(xki->page_list, 0, sizeof(xki->page_list));
++
++ control_page = page_address(image->control_code_page);
++ memcpy(control_page, relocate_kernel, PAGE_SIZE);
++
++ xki->page_list[PA_CONTROL_PAGE] = __ma(control_page);
++ xki->page_list[PA_PGD] = __ma(kexec_pgd);
++#ifdef CONFIG_X86_PAE
++ xki->page_list[PA_PMD_0] = __ma(kexec_pmd0);
++ xki->page_list[PA_PMD_1] = __ma(kexec_pmd1);
++#endif
++ xki->page_list[PA_PTE_0] = __ma(kexec_pte0);
++ xki->page_list[PA_PTE_1] = __ma(kexec_pte1);
++
++}
++
++#endif /* CONFIG_XEN */
++
+ /*
+ * A architecture hook called to validate the
+ * proposed image and prepare the control pages
+@@ -97,6 +135,7 @@ void machine_kexec_cleanup(struct kimage
+ {
+ }
+
++#ifndef CONFIG_XEN
+ /*
+ * Do not allocate memory (or fail in any way) in machine_kexec().
+ * We are past the point of no return, committed to rebooting now.
+@@ -147,6 +186,7 @@ NORET_TYPE void machine_kexec(struct kim
+ relocate_kernel((unsigned long)image->head, (unsigned long)page_list,
+ image->start, cpu_has_pae);
+ }
++#endif
+
+ /* crashkernel=size@addr specifies the location to reserve for
+ * a crash kernel. By reserving this memory we guarantee
+--- linux-2.6.22.orig/arch/i386/kernel/Makefile
++++ linux-2.6.22/arch/i386/kernel/Makefile
+@@ -47,6 +47,12 @@ obj-y += pcspeaker.o
+
+ obj-$(CONFIG_SCx200) += scx200.o
+
++ifdef CONFIG_XEN
++vsyscall_note := vsyscall-note-xen.o
++else
++vsyscall_note := vsyscall-note.o
++endif
++
+ # vsyscall.o contains the vsyscall DSO images as __initdata.
+ # We must build both images before we can assemble it.
+ # Note: kbuild does not track this dependency due to usage of .incbin
+@@ -68,7 +74,7 @@ SYSCFLAGS_vsyscall-int80.so = $(vsyscall
+
+ $(obj)/vsyscall-int80.so $(obj)/vsyscall-sysenter.so: \
+ $(obj)/vsyscall-%.so: $(src)/vsyscall.lds \
+- $(obj)/vsyscall-%.o $(obj)/vsyscall-note.o FORCE
++ $(obj)/vsyscall-%.o $(obj)/$(vsyscall_note) FORCE
+ $(call if_changed,syscall)
+
+ # We also create a special relocatable object that should mirror the symbol
+@@ -80,10 +86,22 @@ $(obj)/built-in.o: ld_flags += -R $(obj)
+
+ SYSCFLAGS_vsyscall-syms.o = -r
+ $(obj)/vsyscall-syms.o: $(src)/vsyscall.lds \
+- $(obj)/vsyscall-sysenter.o $(obj)/vsyscall-note.o FORCE
++ $(obj)/vsyscall-sysenter.o $(obj)/$(vsyscall_note) FORCE
+ $(call if_changed,syscall)
+
+ k8-y += ../../x86_64/kernel/k8.o
+ stacktrace-y += ../../x86_64/kernel/stacktrace.o
+ early-quirks-y += ../../x86_64/kernel/early-quirks.o
+
++ifdef CONFIG_XEN
++include $(srctree)/scripts/Makefile.xen
++
++obj-y += fixup.o
++microcode-$(subst m,y,$(CONFIG_MICROCODE)) := microcode-xen.o
++n-obj-xen := i8259.o timers/ reboot.o smpboot.o trampoline.o
++
++obj-y := $(call filterxen, $(obj-y), $(n-obj-xen))
++obj-y := $(call cherrypickxen, $(obj-y))
++extra-y := $(call cherrypickxen, $(extra-y))
++%/head-xen.o %/head-xen.s: EXTRA_AFLAGS :=
++endif
+--- linux-2.6.22.orig/arch/i386/kernel/sysenter.c
++++ linux-2.6.22/arch/i386/kernel/sysenter.c
+@@ -37,6 +37,10 @@ enum {
+ #define VDSO_DEFAULT VDSO_ENABLED
+ #endif
+
++#ifdef CONFIG_XEN
++#include <xen/interface/callback.h>
++#endif
++
+ /*
+ * Should the kernel map a VDSO page into processes and pass its
+ * address down to glibc upon exec()?
+@@ -175,6 +179,7 @@ static __init void relocate_vdso(Elf32_E
+
+ void enable_sep_cpu(void)
+ {
++#ifndef CONFIG_X86_NO_TSS
+ int cpu = get_cpu();
+ struct tss_struct *tss = &per_cpu(init_tss, cpu);
+
+@@ -189,6 +194,7 @@ void enable_sep_cpu(void)
+ wrmsr(MSR_IA32_SYSENTER_ESP, tss->x86_tss.esp1, 0);
+ wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long) sysenter_entry, 0);
+ put_cpu();
++#endif
+ }
+
+ static struct vm_area_struct gate_vma;
+@@ -242,6 +248,18 @@ int __init sysenter_setup(void)
+
+ syscall_pages[0] = virt_to_page(syscall_page);
+
++#ifdef CONFIG_XEN
++ if (boot_cpu_has(X86_FEATURE_SEP)) {
++ static struct callback_register __initdata sysenter = {
++ .type = CALLBACKTYPE_sysenter,
++ .address = { __KERNEL_CS, (unsigned long)sysenter_entry },
++ };
++
++ if (HYPERVISOR_callback_op(CALLBACKOP_register, &sysenter) < 0)
++ clear_bit(X86_FEATURE_SEP, boot_cpu_data.x86_capability);
++ }
++#endif
++
+ gate_vma_init();
+
+ printk("Compat vDSO mapped to %08lx.\n", __fix_to_virt(FIX_VDSO));
+--- linux-2.6.22.orig/arch/i386/kernel/vm86.c
++++ linux-2.6.22/arch/i386/kernel/vm86.c
+@@ -125,7 +125,9 @@ static int copy_vm86_regs_from_user(stru
+ struct pt_regs * FASTCALL(save_v86_state(struct kernel_vm86_regs * regs));
+ struct pt_regs * fastcall save_v86_state(struct kernel_vm86_regs * regs)
+ {
++#ifndef CONFIG_X86_NO_TSS
+ struct tss_struct *tss;
++#endif
+ struct pt_regs *ret;
+ unsigned long tmp;
+
+@@ -148,12 +150,16 @@ struct pt_regs * fastcall save_v86_state
+ do_exit(SIGSEGV);
+ }
+
++#ifndef CONFIG_X86_NO_TSS
+ tss = &per_cpu(init_tss, get_cpu());
++#endif
+ current->thread.esp0 = current->thread.saved_esp0;
+ current->thread.sysenter_cs = __KERNEL_CS;
+ load_esp0(tss, &current->thread);
+ current->thread.saved_esp0 = 0;
++#ifndef CONFIG_X86_NO_TSS
+ put_cpu();
++#endif
+
+ ret = KVM86->regs32;
+
+@@ -279,7 +285,9 @@ out:
+
+ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk)
+ {
++#ifndef CONFIG_X86_NO_TSS
+ struct tss_struct *tss;
++#endif
+ /*
+ * make sure the vm86() system call doesn't try to do anything silly
+ */
+@@ -324,12 +332,16 @@ static void do_sys_vm86(struct kernel_vm
+ tsk->thread.saved_fs = info->regs32->xfs;
+ savesegment(gs, tsk->thread.saved_gs);
+
++#ifndef CONFIG_X86_NO_TSS
+ tss = &per_cpu(init_tss, get_cpu());
++#endif
+ tsk->thread.esp0 = (unsigned long) &info->VM86_TSS_ESP0;
+ if (cpu_has_sep)
+ tsk->thread.sysenter_cs = 0;
+ load_esp0(tss, &tsk->thread);
++#ifndef CONFIG_X86_NO_TSS
+ put_cpu();
++#endif
+
+ tsk->thread.screen_bitmap = info->screen_bitmap;
+ if (info->flags & VM86_SCREEN_BITMAP)
+--- linux-2.6.22.orig/arch/i386/Makefile
++++ linux-2.6.22/arch/i386/Makefile
+@@ -60,6 +60,11 @@ AFLAGS += $(call as-instr,.cfi_startproc
+
+ CFLAGS += $(cflags-y)
+
++cppflags-$(CONFIG_XEN) += \
++ -D__XEN_INTERFACE_VERSION__=$(CONFIG_XEN_INTERFACE_VERSION)
++
++CPPFLAGS += $(cppflags-y)
++
+ # Default subarch .c files
+ mcore-y := mach-default
+
+@@ -83,6 +88,10 @@ mcore-$(CONFIG_X86_BIGSMP) := mach-defau
+ mflags-$(CONFIG_X86_SUMMIT) := -Iinclude/asm-i386/mach-summit
+ mcore-$(CONFIG_X86_SUMMIT) := mach-default
+
++# Xen subarch support
++mflags-$(CONFIG_X86_XEN) := -Iinclude/asm-i386/mach-xen
++mcore-$(CONFIG_X86_XEN) := mach-xen
++
+ # generic subarchitecture
+ mflags-$(CONFIG_X86_GENERICARCH) := -Iinclude/asm-i386/mach-generic
+ mcore-$(CONFIG_X86_GENERICARCH) := mach-default
+@@ -118,6 +127,19 @@ boot := arch/i386/boot
+ PHONY += zImage bzImage compressed zlilo bzlilo \
+ zdisk bzdisk fdimage fdimage144 fdimage288 isoimage install
+
++ifdef CONFIG_XEN
++CPPFLAGS := -Iinclude$(if $(KBUILD_SRC),2)/asm/mach-xen $(CPPFLAGS)
++head-y := arch/i386/kernel/head-xen.o arch/i386/kernel/init_task-xen.o
++boot := arch/i386/boot-xen
++.PHONY: vmlinuz
++all: vmlinuz
++
++vmlinuz: vmlinux
++ $(Q)$(MAKE) $(build)=$(boot) $@
++
++install:
++ $(Q)$(MAKE) $(build)=$(boot) XENGUEST=$(XENGUEST) $@
++else
+ all: bzImage
+
+ # KBUILD_IMAGE specify target image being built
+@@ -140,6 +162,7 @@ fdimage fdimage144 fdimage288 isoimage:
+
+ install:
+ $(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(KBUILD_IMAGE) install
++endif
+
+ archclean:
+ $(Q)$(MAKE) $(clean)=arch/i386/boot
+@@ -158,3 +181,4 @@ endef
+ CLEAN_FILES += arch/$(ARCH)/boot/fdimage \
+ arch/$(ARCH)/boot/image.iso \
+ arch/$(ARCH)/boot/mtools.conf
++CLEAN_FILES += vmlinuz vmlinux-stripped
+--- linux-2.6.22.orig/arch/i386/mm/Makefile
++++ linux-2.6.22/arch/i386/mm/Makefile
+@@ -8,3 +8,11 @@ obj-$(CONFIG_NUMA) += discontig.o
+ obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
+ obj-$(CONFIG_HIGHMEM) += highmem.o
+ obj-$(CONFIG_BOOT_IOREMAP) += boot_ioremap.o
++
++ifdef CONFIG_XEN
++include $(srctree)/scripts/Makefile.xen
++
++obj-y += hypervisor.o
++
++obj-y := $(call cherrypickxen, $(obj-y))
++endif
+--- linux-2.6.22.orig/arch/i386/oprofile/Makefile
++++ linux-2.6.22/arch/i386/oprofile/Makefile
+@@ -6,7 +6,14 @@ DRIVER_OBJS = $(addprefix ../../../drive
+ oprofilefs.o oprofile_stats.o \
+ timer_int.o )
+
++ifdef CONFIG_XEN
++XENOPROF_COMMON_OBJS = $(addprefix ../../../drivers/xen/xenoprof/, \
++ xenoprofile.o)
++oprofile-y := $(DRIVER_OBJS) \
++ $(XENOPROF_COMMON_OBJS) xenoprof.o
++else
+ oprofile-y := $(DRIVER_OBJS) init.o backtrace.o
+ oprofile-$(CONFIG_X86_LOCAL_APIC) += nmi_int.o op_model_athlon.o \
+ op_model_ppro.o op_model_p4.o
+ oprofile-$(CONFIG_X86_IO_APIC) += nmi_timer_int.o
++endif
+--- linux-2.6.22.orig/arch/i386/pci/Makefile
++++ linux-2.6.22/arch/i386/pci/Makefile
+@@ -4,6 +4,10 @@ obj-$(CONFIG_PCI_BIOS) += pcbios.o
+ obj-$(CONFIG_PCI_MMCONFIG) += mmconfig.o direct.o mmconfig-shared.o
+ obj-$(CONFIG_PCI_DIRECT) += direct.o
+
++# pcifront should be after pcbios.o, mmconfig.o, and direct.o as it should only
++# take over if direct access to the PCI bus is unavailable
++obj-$(CONFIG_XEN_PCIDEV_FRONTEND) += pcifront.o
++
+ pci-y := fixup.o
+ pci-$(CONFIG_ACPI) += acpi.o
+ pci-y += legacy.o irq.o
+@@ -12,3 +16,8 @@ pci-$(CONFIG_X86_VISWS) := visws.o fixu
+ pci-$(CONFIG_X86_NUMAQ) := numa.o irq.o
+
+ obj-y += $(pci-y) common.o early.o
++
++ifdef CONFIG_XEN
++include $(srctree)/scripts/Makefile.xen
++obj-y := $(call cherrypickxen, $(obj-y))
++endif
+--- linux-2.6.22.orig/arch/i386/power/Makefile
++++ linux-2.6.22/arch/i386/power/Makefile
+@@ -1,2 +1,4 @@
+-obj-$(CONFIG_PM) += cpu.o
++obj-$(CONFIG_PM_LEGACY) += cpu.o
++obj-$(CONFIG_SOFTWARE_SUSPEND) += cpu.o
++obj-$(CONFIG_ACPI_SLEEP) += cpu.o
+ obj-$(CONFIG_SOFTWARE_SUSPEND) += swsusp.o suspend.o
+--- linux-2.6.22.orig/include/asm-i386/apic.h
++++ linux-2.6.22/include/asm-i386/apic.h
+@@ -111,7 +111,9 @@ extern int APIC_init_uniprocessor (void)
+
+ extern void enable_NMI_through_LVT0 (void * dummy);
+
++#ifndef CONFIG_XEN
+ #define ARCH_APICTIMER_STOPS_ON_C3 1
++#endif
+
+ extern int timer_over_8254;
+ extern int local_apic_timer_c2_ok;
+--- linux-2.6.22.orig/include/asm-i386/kexec.h
++++ linux-2.6.22/include/asm-i386/kexec.h
+@@ -94,6 +94,20 @@ relocate_kernel(unsigned long indirectio
+ unsigned long start_address,
+ unsigned int has_pae) ATTRIB_NORET;
+
++
++/* Under Xen we need to work with machine addresses. These macros give the
++ * machine address of a certain page to the generic kexec code instead of
++ * the pseudo physical address which would be given by the default macros.
++ */
++
++#ifdef CONFIG_XEN
++#define KEXEC_ARCH_HAS_PAGE_MACROS
++#define kexec_page_to_pfn(page) pfn_to_mfn(page_to_pfn(page))
++#define kexec_pfn_to_page(pfn) pfn_to_page(mfn_to_pfn(pfn))
++#define kexec_virt_to_phys(addr) virt_to_machine(addr)
++#define kexec_phys_to_virt(addr) phys_to_virt(machine_to_phys(addr))
++#endif
++
+ #endif /* __ASSEMBLY__ */
+
+ #endif /* _I386_KEXEC_H */
diff --git a/trunk/2.6.22/20017_xen3-auto-arch-x86_64.patch1 b/trunk/2.6.22/20017_xen3-auto-arch-x86_64.patch1
new file mode 100644
index 0000000..c393a86
--- /dev/null
+++ b/trunk/2.6.22/20017_xen3-auto-arch-x86_64.patch1
@@ -0,0 +1,502 @@
+Subject: xen3 arch-x86_64
+From: http://xenbits.xensource.com/xen-3.1-testing.hg (tip 15042)
+Patch-mainline: obsolete
+Acked-by: jbeulich@novell.com
+
+Index: head-2007-09-03/arch/x86_64/ia32/Makefile
+===================================================================
+--- head-2007-09-03.orig/arch/x86_64/ia32/Makefile 2007-09-03 09:42:56.000000000 +0200
++++ head-2007-09-03/arch/x86_64/ia32/Makefile 2007-09-03 09:44:29.000000000 +0200
+@@ -27,9 +27,25 @@ quiet_cmd_syscall = SYSCALL $@
+ -Wl,-soname=linux-gate.so.1 -o $@ \
+ -Wl,-T,$(filter-out FORCE,$^)
+
++$(obj)/vsyscall-int80.so \
+ $(obj)/vsyscall-sysenter.so $(obj)/vsyscall-syscall.so: \
+ $(obj)/vsyscall-%.so: $(src)/vsyscall.lds $(obj)/vsyscall-%.o FORCE
+ $(call if_changed,syscall)
+
+-AFLAGS_vsyscall-sysenter.o = -m32 -Wa,-32
+-AFLAGS_vsyscall-syscall.o = -m32 -Wa,-32
++AFLAGS_vsyscall-sysenter.o = -m32 -Wa,-32 -Iarch/i386/kernel
++AFLAGS_vsyscall-syscall.o = -m32 -Wa,-32 -Iarch/i386/kernel
++
++ifdef CONFIG_XEN
++AFLAGS_vsyscall-int80.o = -m32 -Wa,-32 -Iarch/i386/kernel
++CFLAGS_syscall32-xen.o += -DUSE_INT80
++AFLAGS_syscall32_syscall-xen.o += -DUSE_INT80
++
++$(obj)/syscall32_syscall-xen.o: \
++ $(foreach F,int80 sysenter syscall,$(obj)/vsyscall-$F.so)
++
++targets := $(foreach F,int80 sysenter syscall,vsyscall-$F.o vsyscall-$F.so)
++
++include $(srctree)/scripts/Makefile.xen
++
++obj-y := $(call cherrypickxen, $(obj-y))
++endif
+Index: head-2007-09-03/arch/x86_64/ia32/vsyscall-sigreturn.S
+===================================================================
+--- head-2007-09-03.orig/arch/x86_64/ia32/vsyscall-sigreturn.S 2007-09-03 09:42:56.000000000 +0200
++++ head-2007-09-03/arch/x86_64/ia32/vsyscall-sigreturn.S 2007-09-03 09:44:29.000000000 +0200
+@@ -139,5 +139,5 @@ __kernel_rt_sigreturn:
+ .align 4
+ .LENDFDE3:
+
+-#include "../../i386/kernel/vsyscall-note.S"
++#include <vsyscall-note.S>
+
+Index: head-2007-09-03/arch/x86_64/kernel/acpi/Makefile
+===================================================================
+--- head-2007-09-03.orig/arch/x86_64/kernel/acpi/Makefile 2007-09-03 09:42:56.000000000 +0200
++++ head-2007-09-03/arch/x86_64/kernel/acpi/Makefile 2007-09-03 09:44:29.000000000 +0200
+@@ -7,3 +7,4 @@ obj-y += processor.o
+ processor-y := ../../../i386/kernel/acpi/processor.o ../../../i386/kernel/acpi/cstate.o
+ endif
+
++boot-$(CONFIG_XEN) := ../../../i386/kernel/acpi/boot-xen.o
+Index: head-2007-09-03/arch/x86_64/kernel/asm-offsets.c
+===================================================================
+--- head-2007-09-03.orig/arch/x86_64/kernel/asm-offsets.c 2007-09-03 09:42:57.000000000 +0200
++++ head-2007-09-03/arch/x86_64/kernel/asm-offsets.c 2007-09-03 09:44:29.000000000 +0200
+@@ -75,8 +75,10 @@ int main(void)
+ DEFINE(pbe_address, offsetof(struct pbe, address));
+ DEFINE(pbe_orig_address, offsetof(struct pbe, orig_address));
+ DEFINE(pbe_next, offsetof(struct pbe, next));
++#ifndef CONFIG_X86_NO_TSS
+ BLANK();
+ DEFINE(TSS_ist, offsetof(struct tss_struct, ist));
++#endif
+ BLANK();
+ DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx));
+ BLANK();
+Index: head-2007-09-03/arch/x86_64/kernel/crash.c
+===================================================================
+--- head-2007-09-03.orig/arch/x86_64/kernel/crash.c 2007-09-03 09:42:57.000000000 +0200
++++ head-2007-09-03/arch/x86_64/kernel/crash.c 2007-09-03 09:44:29.000000000 +0200
+@@ -28,6 +28,7 @@
+ /* This keeps a track of which one is crashing cpu. */
+ static int crashing_cpu;
+
++#ifndef CONFIG_XEN
+ #ifdef CONFIG_SMP
+ static atomic_t waiting_for_crash_ipi;
+
+@@ -107,6 +108,7 @@ static void nmi_shootdown_cpus(void)
+ /* There are no cpus to shootdown */
+ }
+ #endif
++#endif /* CONFIG_XEN */
+
+ void machine_crash_shutdown(struct pt_regs *regs)
+ {
+@@ -124,12 +126,14 @@ void machine_crash_shutdown(struct pt_re
+
+ /* Make a note of crashing cpu. Will be used in NMI callback.*/
+ crashing_cpu = smp_processor_id();
++
++#ifndef CONFIG_XEN
+ nmi_shootdown_cpus();
+
+ if(cpu_has_apic)
+ disable_local_APIC();
+
+ disable_IO_APIC();
+-
++#endif /* CONFIG_XEN */
+ crash_save_cpu(regs, smp_processor_id());
+ }
+Index: head-2007-09-03/arch/x86_64/kernel/init_task.c
+===================================================================
+--- head-2007-09-03.orig/arch/x86_64/kernel/init_task.c 2007-09-03 09:42:57.000000000 +0200
++++ head-2007-09-03/arch/x86_64/kernel/init_task.c 2007-09-03 09:44:29.000000000 +0200
+@@ -37,6 +37,8 @@ union thread_union init_thread_union
+ struct task_struct init_task = INIT_TASK(init_task);
+
+ EXPORT_SYMBOL(init_task);
++
++#ifndef CONFIG_X86_NO_TSS
+ /*
+ * per-CPU TSS segments. Threads are completely 'soft' on Linux,
+ * no more per-task TSS's. The TSS size is kept cacheline-aligned
+@@ -50,5 +52,6 @@ DEFINE_PER_CPU(struct tss_struct, init_t
+ * debugging, no special alignment required.
+ */
+ DEFINE_PER_CPU(struct orig_ist, orig_ist);
++#endif
+
+ #define ALIGN_TO_4K __attribute__((section(".data.init_task")))
+Index: head-2007-09-03/arch/x86_64/kernel/machine_kexec.c
+===================================================================
+--- head-2007-09-03.orig/arch/x86_64/kernel/machine_kexec.c 2007-09-03 09:42:57.000000000 +0200
++++ head-2007-09-03/arch/x86_64/kernel/machine_kexec.c 2007-09-03 09:44:29.000000000 +0200
+@@ -24,6 +24,104 @@ static u64 kexec_pud1[512] PAGE_ALIGNED;
+ static u64 kexec_pmd1[512] PAGE_ALIGNED;
+ static u64 kexec_pte1[512] PAGE_ALIGNED;
+
++#ifdef CONFIG_XEN
++
++/* In the case of Xen, override hypervisor functions to be able to create
++ * a regular identity mapping page table...
++ */
++
++#include <xen/interface/kexec.h>
++#include <xen/interface/memory.h>
++
++#define x__pmd(x) ((pmd_t) { (x) } )
++#define x__pud(x) ((pud_t) { (x) } )
++#define x__pgd(x) ((pgd_t) { (x) } )
++
++#define x_pmd_val(x) ((x).pmd)
++#define x_pud_val(x) ((x).pud)
++#define x_pgd_val(x) ((x).pgd)
++
++static inline void x_set_pmd(pmd_t *dst, pmd_t val)
++{
++ x_pmd_val(*dst) = x_pmd_val(val);
++}
++
++static inline void x_set_pud(pud_t *dst, pud_t val)
++{
++ x_pud_val(*dst) = phys_to_machine(x_pud_val(val));
++}
++
++static inline void x_pud_clear (pud_t *pud)
++{
++ x_pud_val(*pud) = 0;
++}
++
++static inline void x_set_pgd(pgd_t *dst, pgd_t val)
++{
++ x_pgd_val(*dst) = phys_to_machine(x_pgd_val(val));
++}
++
++static inline void x_pgd_clear (pgd_t * pgd)
++{
++ x_pgd_val(*pgd) = 0;
++}
++
++#define X__PAGE_KERNEL_LARGE_EXEC \
++ _PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_PSE
++#define X_KERNPG_TABLE _PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY
++
++#define __ma(x) (pfn_to_mfn(__pa((x)) >> PAGE_SHIFT) << PAGE_SHIFT)
++
++#if PAGES_NR > KEXEC_XEN_NO_PAGES
++#error PAGES_NR is greater than KEXEC_XEN_NO_PAGES - Xen support will break
++#endif
++
++#if PA_CONTROL_PAGE != 0
++#error PA_CONTROL_PAGE is non zero - Xen support will break
++#endif
++
++void machine_kexec_setup_load_arg(xen_kexec_image_t *xki, struct kimage *image)
++{
++ void *control_page;
++ void *table_page;
++
++ memset(xki->page_list, 0, sizeof(xki->page_list));
++
++ control_page = page_address(image->control_code_page) + PAGE_SIZE;
++ memcpy(control_page, relocate_kernel, PAGE_SIZE);
++
++ table_page = page_address(image->control_code_page);
++
++ xki->page_list[PA_CONTROL_PAGE] = __ma(control_page);
++ xki->page_list[PA_TABLE_PAGE] = __ma(table_page);
++
++ xki->page_list[PA_PGD] = __ma(kexec_pgd);
++ xki->page_list[PA_PUD_0] = __ma(kexec_pud0);
++ xki->page_list[PA_PUD_1] = __ma(kexec_pud1);
++ xki->page_list[PA_PMD_0] = __ma(kexec_pmd0);
++ xki->page_list[PA_PMD_1] = __ma(kexec_pmd1);
++ xki->page_list[PA_PTE_0] = __ma(kexec_pte0);
++ xki->page_list[PA_PTE_1] = __ma(kexec_pte1);
++}
++
++#else /* CONFIG_XEN */
++
++#define x__pmd(x) __pmd(x)
++#define x__pud(x) __pud(x)
++#define x__pgd(x) __pgd(x)
++
++#define x_set_pmd(x, y) set_pmd(x, y)
++#define x_set_pud(x, y) set_pud(x, y)
++#define x_set_pgd(x, y) set_pgd(x, y)
++
++#define x_pud_clear(x) pud_clear(x)
++#define x_pgd_clear(x) pgd_clear(x)
++
++#define X__PAGE_KERNEL_LARGE_EXEC __PAGE_KERNEL_LARGE_EXEC
++#define X_KERNPG_TABLE _KERNPG_TABLE
++
++#endif /* CONFIG_XEN */
++
+ static void init_level2_page(pmd_t *level2p, unsigned long addr)
+ {
+ unsigned long end_addr;
+@@ -31,7 +129,7 @@ static void init_level2_page(pmd_t *leve
+ addr &= PAGE_MASK;
+ end_addr = addr + PUD_SIZE;
+ while (addr < end_addr) {
+- set_pmd(level2p++, __pmd(addr | __PAGE_KERNEL_LARGE_EXEC));
++ x_set_pmd(level2p++, x__pmd(addr | X__PAGE_KERNEL_LARGE_EXEC));
+ addr += PMD_SIZE;
+ }
+ }
+@@ -56,12 +154,12 @@ static int init_level3_page(struct kimag
+ }
+ level2p = (pmd_t *)page_address(page);
+ init_level2_page(level2p, addr);
+- set_pud(level3p++, __pud(__pa(level2p) | _KERNPG_TABLE));
++ x_set_pud(level3p++, x__pud(__pa(level2p) | X_KERNPG_TABLE));
+ addr += PUD_SIZE;
+ }
+ /* clear the unused entries */
+ while (addr < end_addr) {
+- pud_clear(level3p++);
++ x_pud_clear(level3p++);
+ addr += PUD_SIZE;
+ }
+ out:
+@@ -92,12 +190,12 @@ static int init_level4_page(struct kimag
+ if (result) {
+ goto out;
+ }
+- set_pgd(level4p++, __pgd(__pa(level3p) | _KERNPG_TABLE));
++ x_set_pgd(level4p++, x__pgd(__pa(level3p) | X_KERNPG_TABLE));
+ addr += PGDIR_SIZE;
+ }
+ /* clear the unused entries */
+ while (addr < end_addr) {
+- pgd_clear(level4p++);
++ x_pgd_clear(level4p++);
+ addr += PGDIR_SIZE;
+ }
+ out:
+@@ -108,8 +206,14 @@ out:
+ static int init_pgtable(struct kimage *image, unsigned long start_pgtable)
+ {
+ pgd_t *level4p;
++ unsigned long x_end_pfn = end_pfn;
++
++#ifdef CONFIG_XEN
++ x_end_pfn = HYPERVISOR_memory_op(XENMEM_maximum_ram_page, NULL);
++#endif
++
+ level4p = (pgd_t *)__va(start_pgtable);
+- return init_level4_page(image, level4p, 0, end_pfn << PAGE_SHIFT);
++ return init_level4_page(image, level4p, 0, x_end_pfn << PAGE_SHIFT);
+ }
+
+ int machine_kexec_prepare(struct kimage *image)
+@@ -133,6 +237,7 @@ void machine_kexec_cleanup(struct kimage
+ return;
+ }
+
++#ifndef CONFIG_XEN
+ /*
+ * Do not allocate memory (or fail in any way) in machine_kexec().
+ * We are past the point of no return, committed to rebooting now.
+@@ -171,6 +276,7 @@ NORET_TYPE void machine_kexec(struct kim
+ relocate_kernel((unsigned long)image->head, (unsigned long)page_list,
+ image->start);
+ }
++#endif
+
+ /* crashkernel=size@addr specifies the location to reserve for
+ * a crash kernel. By reserving this memory we guarantee
+Index: head-2007-09-03/arch/x86_64/kernel/Makefile
+===================================================================
+--- head-2007-09-03.orig/arch/x86_64/kernel/Makefile 2007-09-03 09:42:56.000000000 +0200
++++ head-2007-09-03/arch/x86_64/kernel/Makefile 2007-09-03 09:44:29.000000000 +0200
+@@ -22,10 +22,12 @@ obj-$(CONFIG_MICROCODE) += microcode.o
+ obj-$(CONFIG_X86_CPUID) += cpuid.o
+ obj-$(CONFIG_SMP) += smp.o smpboot.o trampoline.o tsc_sync.o
+ obj-y += apic.o nmi.o
++obj-$(CONFIG_X86_XEN_GENAPIC) += genapic.o genapic_xen.o
+ obj-y += io_apic.o mpparse.o genapic.o genapic_flat.o
+ obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o crash.o
+ obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
+-obj-$(CONFIG_PM) += suspend.o
++obj-$(CONFIG_SOFTWARE_SUSPEND) += suspend.o
++obj-$(CONFIG_ACPI_SLEEP) += suspend.o
+ obj-$(CONFIG_SOFTWARE_SUSPEND) += suspend_asm.o
+ obj-$(CONFIG_CPU_FREQ) += cpufreq/
+ obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
+@@ -59,3 +61,19 @@ msr-$(subst m,y,$(CONFIG_X86_MSR)) += .
+ alternative-y += ../../i386/kernel/alternative.o
+ pcspeaker-y += ../../i386/kernel/pcspeaker.o
+ perfctr-watchdog-y += ../../i386/kernel/cpu/perfctr-watchdog.o
++
++ifdef CONFIG_XEN
++time-y += ../../i386/kernel/time-xen.o
++pci-dma-y += ../../i386/kernel/pci-dma-xen.o
++microcode-$(subst m,y,$(CONFIG_MICROCODE)) := ../../i386/kernel/microcode-xen.o
++quirks-y := ../../i386/kernel/quirks-xen.o
++
++n-obj-xen := i8259.o reboot.o i8237.o smpboot.o trampoline.o
++
++include $(srctree)/scripts/Makefile.xen
++
++obj-y := $(call filterxen, $(obj-y), $(n-obj-xen))
++obj-y := $(call cherrypickxen, $(obj-y))
++extra-y := $(call cherrypickxen, $(extra-y))
++%/head-xen.o %/head-xen.s: EXTRA_AFLAGS :=
++endif
+Index: head-2007-09-03/arch/x86_64/Makefile
+===================================================================
+--- head-2007-09-03.orig/arch/x86_64/Makefile 2007-09-03 09:42:56.000000000 +0200
++++ head-2007-09-03/arch/x86_64/Makefile 2007-09-03 09:44:29.000000000 +0200
+@@ -36,6 +36,10 @@ cflags-$(CONFIG_MCORE2) += \
+ $(call cc-option,-march=core2,$(call cc-option,-mtune=generic))
+ cflags-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=generic)
+
++cppflags-$(CONFIG_XEN) += \
++ -D__XEN_INTERFACE_VERSION__=$(CONFIG_XEN_INTERFACE_VERSION)
++CPPFLAGS += $(cppflags-y)
++
+ cflags-y += -m64
+ cflags-y += -mno-red-zone
+ cflags-y += -mcmodel=kernel
+@@ -87,6 +91,21 @@ boot := arch/x86_64/boot
+ PHONY += bzImage bzlilo install archmrproper \
+ fdimage fdimage144 fdimage288 isoimage archclean
+
++ifdef CONFIG_XEN
++CPPFLAGS := -Iinclude$(if $(KBUILD_SRC),2)/asm/mach-xen $(CPPFLAGS)
++head-y := arch/x86_64/kernel/head-xen.o arch/x86_64/kernel/head64-xen.o arch/x86_64/kernel/init_task.o
++LDFLAGS_vmlinux := -e _start
++boot := arch/i386/boot-xen
++.PHONY: vmlinuz
++#Default target when executing "make"
++all: vmlinuz
++
++vmlinuz: vmlinux
++ $(Q)$(MAKE) $(build)=$(boot) $@
++
++install:
++ $(Q)$(MAKE) $(build)=$(boot) XENGUEST=$(XENGUEST) $@
++else
+ #Default target when executing "make"
+ all: bzImage
+
+@@ -107,6 +126,7 @@ fdimage fdimage144 fdimage288 isoimage:
+
+ install:
+ $(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(BOOTIMAGE) $@
++endif
+
+ archclean:
+ $(Q)$(MAKE) $(clean)=$(boot)
+Index: head-2007-09-03/arch/x86_64/mm/Makefile
+===================================================================
+--- head-2007-09-03.orig/arch/x86_64/mm/Makefile 2007-09-03 09:42:57.000000000 +0200
++++ head-2007-09-03/arch/x86_64/mm/Makefile 2007-09-03 09:44:29.000000000 +0200
+@@ -9,3 +9,13 @@ obj-$(CONFIG_K8_NUMA) += k8topology.o
+ obj-$(CONFIG_ACPI_NUMA) += srat.o
+
+ hugetlbpage-y = ../../i386/mm/hugetlbpage.o
++
++ifdef CONFIG_XEN
++include $(srctree)/scripts/Makefile.xen
++
++ioremap-y += ../../i386/mm/ioremap-xen.o
++hypervisor-y += ../../i386/mm/hypervisor.o
++obj-y += hypervisor.o
++
++obj-y := $(call cherrypickxen, $(obj-y))
++endif
+Index: head-2007-09-03/arch/x86_64/oprofile/Makefile
+===================================================================
+--- head-2007-09-03.orig/arch/x86_64/oprofile/Makefile 2007-09-03 09:42:57.000000000 +0200
++++ head-2007-09-03/arch/x86_64/oprofile/Makefile 2007-09-03 09:44:29.000000000 +0200
+@@ -11,9 +11,15 @@ DRIVER_OBJS = $(addprefix ../../../drive
+ oprofilefs.o oprofile_stats.o \
+ timer_int.o )
+
++ifdef CONFIG_XEN
++XENOPROF_COMMON_OBJS = $(addprefix ../../../drivers/xen/xenoprof/, \
++ xenoprofile.o)
++OPROFILE-y := xenoprof.o
++else
+ OPROFILE-y := init.o backtrace.o
+ OPROFILE-$(CONFIG_X86_LOCAL_APIC) += nmi_int.o op_model_athlon.o op_model_p4.o \
+ op_model_ppro.o
+ OPROFILE-$(CONFIG_X86_IO_APIC) += nmi_timer_int.o
+-
+-oprofile-y = $(DRIVER_OBJS) $(addprefix ../../i386/oprofile/, $(OPROFILE-y))
++endif
++oprofile-y = $(DRIVER_OBJS) $(XENOPROF_COMMON_OBJS) \
++ $(addprefix ../../i386/oprofile/, $(OPROFILE-y))
+Index: head-2007-09-03/arch/x86_64/pci/Makefile
+===================================================================
+--- head-2007-09-03.orig/arch/x86_64/pci/Makefile 2007-09-03 09:42:57.000000000 +0200
++++ head-2007-09-03/arch/x86_64/pci/Makefile 2007-09-03 09:44:29.000000000 +0200
+@@ -15,8 +15,13 @@ obj-$(CONFIG_PCI_MMCONFIG) += mmconfig.o
+
+ obj-$(CONFIG_NUMA) += k8-bus.o
+
++# pcifront should be after mmconfig.o and direct.o as it should only
++# take over if direct access to the PCI bus is unavailable
++obj-$(CONFIG_XEN_PCIDEV_FRONTEND) += pcifront.o
++
+ direct-y += ../../i386/pci/direct.o
+ acpi-y += ../../i386/pci/acpi.o
++pcifront-y += ../../i386/pci/pcifront.o
+ legacy-y += ../../i386/pci/legacy.o
+ irq-y += ../../i386/pci/irq.o
+ common-y += ../../i386/pci/common.o
+@@ -25,3 +30,10 @@ i386-y += ../../i386/pci/i386.o
+ init-y += ../../i386/pci/init.o
+ early-y += ../../i386/pci/early.o
+ mmconfig-shared-y += ../../i386/pci/mmconfig-shared.o
++
++ifdef CONFIG_XEN
++irq-y := ../../i386/pci/irq-xen.o
++include $(srctree)/scripts/Makefile.xen
++
++obj-y := $(call cherrypickxen, $(obj-y))
++endif
+Index: head-2007-09-03/include/asm-x86_64/apic.h
+===================================================================
+--- head-2007-09-03.orig/include/asm-x86_64/apic.h 2007-09-03 09:42:57.000000000 +0200
++++ head-2007-09-03/include/asm-x86_64/apic.h 2007-09-03 09:44:29.000000000 +0200
+@@ -93,11 +93,13 @@ extern void setup_APIC_extened_lvt(unsig
+ #define K8_APIC_EXT_INT_MSG_EXT 0x7
+ #define K8_APIC_EXT_LVT_ENTRY_THRESHOLD 0
+
++#ifndef CONFIG_XEN
+ void smp_send_timer_broadcast_ipi(void);
+ void switch_APIC_timer_to_ipi(void *cpumask);
+ void switch_ipi_to_APIC_timer(void *cpumask);
+
+ #define ARCH_APICTIMER_STOPS_ON_C3 1
++#endif
+
+ extern unsigned boot_cpu_id;
+ extern int local_apic_timer_c2_ok;
+Index: head-2007-09-03/include/asm-x86_64/kexec.h
+===================================================================
+--- head-2007-09-03.orig/include/asm-x86_64/kexec.h 2007-09-03 09:42:57.000000000 +0200
++++ head-2007-09-03/include/asm-x86_64/kexec.h 2007-09-03 09:44:29.000000000 +0200
+@@ -89,6 +89,19 @@ relocate_kernel(unsigned long indirectio
+ unsigned long page_list,
+ unsigned long start_address) ATTRIB_NORET;
+
++/* Under Xen we need to work with machine addresses. These macros give the
++ * machine address of a certain page to the generic kexec code instead of
++ * the pseudo physical address which would be given by the default macros.
++ */
++
++#ifdef CONFIG_XEN
++#define KEXEC_ARCH_HAS_PAGE_MACROS
++#define kexec_page_to_pfn(page) pfn_to_mfn(page_to_pfn(page))
++#define kexec_pfn_to_page(pfn) pfn_to_page(mfn_to_pfn(pfn))
++#define kexec_virt_to_phys(addr) virt_to_machine(addr)
++#define kexec_phys_to_virt(addr) phys_to_virt(machine_to_phys(addr))
++#endif
++
+ #endif /* __ASSEMBLY__ */
+
+ #endif /* _X86_64_KEXEC_H */
diff --git a/trunk/2.6.22/20018_15130-x86_64-vsyscall-user.patch1 b/trunk/2.6.22/20018_15130-x86_64-vsyscall-user.patch1
new file mode 100644
index 0000000..b02ced9
--- /dev/null
+++ b/trunk/2.6.22/20018_15130-x86_64-vsyscall-user.patch1
@@ -0,0 +1,51 @@
+# HG changesets 15130+15170 patch
+# User Ian Campbell <ian.campbell@xensource.com>
+# Node ID a40967e39652fee1edebfddec4b533bdded923a1
+# Parent f6928d6369999cd063edd361d592579c2483196b
+Subject: LINUX/x86_64: Ensure that the initial page tables allow userspace
+mappings of the vsyscall page. This matches native behaviour by
+setting the U bit on the L2-L4 page table entries and controlling
+access using the L1 entries.
+
+Signed-off-by: Ian Campbell <ian.campbell@xensource.com>
+
+LINUX/x86_64: Cleanup 15129:a40967e39652 by using __pgd rather than
+abusing mk_kernel_pgd.
+
+Also set the user bit on the vsyscall entry in the user pgd.
+
+Both changes suggested by Jan Beulich.
+
+Signed-off-by: Ian Campbell <ian.campbell@xensource.com>
+
+Acked-by: jbeulich@novell.com
+
+---
+ arch/x86_64/mm/init-xen.c | 2 +-
+ include/asm-x86_64/mach-xen/asm/pgalloc.h | 4 ++--
+ 2 files changed, 3 insertions(+), 3 deletions(-)
+
+--- a/arch/x86_64/mm/init-xen.c 2007-08-27 14:01:25.000000000 -0400
++++ b/arch/x86_64/mm/init-xen.c 2007-08-27 14:02:03.000000000 -0400
+@@ -580,7 +580,7 @@ void __init xen_init_pt(void)
+
+ /* Construct mapping of initial pte page in our own directories. */
+ init_level4_pgt[pgd_index(__START_KERNEL_map)] =
+- mk_kernel_pgd(__pa_symbol(level3_kernel_pgt));
++ __pgd(__pa_symbol(level3_kernel_pgt) | _PAGE_TABLE);
+ level3_kernel_pgt[pud_index(__START_KERNEL_map)] =
+ __pud(__pa_symbol(level2_kernel_pgt) |
+ _KERNPG_TABLE);
+--- a/include/asm-x86_64/mach-xen/asm/pgalloc.h 2007-08-27 14:01:25.000000000 -0400
++++ b/include/asm-x86_64/mach-xen/asm/pgalloc.h 2007-08-27 14:01:58.000000000 -0400
+@@ -146,8 +146,8 @@ static inline pgd_t *pgd_alloc(struct mm
+ /*
+ * Set level3_user_pgt for vsyscall area
+ */
+- set_pgd(__user_pgd(pgd) + pgd_index(VSYSCALL_START),
+- mk_kernel_pgd(__pa_symbol(level3_user_pgt)));
++ set_pgd(__user_pgd(pgd) + pgd_index(VSYSCALL_START),
++ __pgd(__pa_symbol(level3_user_pgt) | _PAGE_TABLE));
+ return pgd;
+ }
+
diff --git a/trunk/2.6.22/20019_15181-dma-tracking.patch1 b/trunk/2.6.22/20019_15181-dma-tracking.patch1
new file mode 100644
index 0000000..2bf8906
--- /dev/null
+++ b/trunk/2.6.22/20019_15181-dma-tracking.patch1
@@ -0,0 +1,551 @@
+# HG changeset 15181+33+41 patch
+# User kfraser@localhost.localdomain
+# Date 1180518373 -3600
+# Node ID 45f939d0c72493d237783419996bbca0132551df
+# Parent 1f7a6456c330272a3cec13b31fc1ba9b4db898ec
+Subject: gnttab: Add basic DMA tracking
+
+This patch adds basic tracking of outstanding DMA requests on
+grant table entries marked as PageForeign.
+
+When a PageForeign struct page is about to be mapped for DMA,
+we set its map count to 1 (or zero in actual value). This is
+then checked for when we need to free a grant table entry early
+to ensure that we don't free an entry that's currently used for
+DMA.
+
+So any entry that has been marked for DMA will not be freed early.
+
+If the unmapping API had a struct page (which exists for the sg
+case) then we could do this properly.
+
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+
+(added the interfacing bits from 15180)
+
+Subject: gnttab: Fix copy_grant_page race with seqlock
+
+Previously gnttab_copy_grant_page would always unmap the grant table
+entry, even if DMA operations were outstanding. This would allow a
+hostile guest to free a page still used by DMA to the hypervisor.
+
+This patch fixes this by making sure that we don't free the grant
+table entry if a DMA operation has taken place. To achieve this a
+seqlock is used to synchronise the DMA operations and
+copy_grant_page.
+
+The DMA operations use the read side of the seqlock so performance
+should be largely unaffected.
+
+Thanks to Isaku Yamahata for noticing the race condition.
+
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+
+Subject: Make dma address conversion logic of gnttab dma arch specific.
+
+gnttab_dma_map_page() and gnttab_dma_unmap_page() uses machine address
+with dma address interchangebly. However it doesn't work with auto
+translated mode enabled (i.e. on ia64) because
+
+- bus address space(dma_addr_t) is different from machine address
+ space(maddr_t).
+ With the terminology in xen/include/public/mm.h,
+ dma_addr_t is maddr and maddr_t is gmaddr.
+ So they should be handled differently with auto translated physmap
+ mode
+ enabled.
+
+- dma address conversion depends on dma api implementation and
+ its paravirtualization.
+ "pfn_valid(mfn_to_local_pfn(maddr >> PAGE_SHIFT)" check in
+ gnttab_dma_map_page() doesn't make sense with auto translate physmap
+ mode enabled.
+
+To address those issues, split those logic from gnttab_dma_map_page()
+and gnttab_dma_unmap_page(), and put it into arch specific files.
+This patch doesn't change the already existing x86 logic.
+
+Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
+
+Acked-by: jbeulich@novell.com
+
+---
+ arch/i386/kernel/pci-dma-xen.c | 19 +++-
+ arch/i386/kernel/swiotlb.c | 27 ++++-
+ drivers/xen/core/gnttab.c | 124 +++++++++++++++++++++++++++
+ include/asm-i386/mach-xen/asm/gnttab_dma.h | 41 ++++++++
+ include/asm-x86_64/mach-xen/asm/gnttab_dma.h | 1
+ include/xen/gnttab.h | 28 ++++++
+ include/xen/interface/grant_table.h | 23 +++++
+ 7 files changed, 252 insertions(+), 11 deletions(-)
+
+--- a/arch/i386/kernel/pci-dma-xen.c 2007-08-27 14:01:24.000000000 -0400
++++ b/arch/i386/kernel/pci-dma-xen.c 2007-08-27 14:02:07.000000000 -0400
+@@ -15,9 +15,11 @@
+ #include <linux/version.h>
+ #include <asm/io.h>
+ #include <xen/balloon.h>
++#include <xen/gnttab.h>
+ #include <asm/swiotlb.h>
+ #include <asm/tlbflush.h>
+ #include <asm-i386/mach-xen/asm/swiotlb.h>
++#include <asm-i386/mach-xen/asm/gnttab_dma.h>
+ #include <asm/bug.h>
+
+ #ifdef __x86_64__
+@@ -90,7 +92,7 @@ dma_map_sg(struct device *hwdev, struct
+ } else {
+ for (i = 0; i < nents; i++ ) {
+ sg[i].dma_address =
+- page_to_bus(sg[i].page) + sg[i].offset;
++ gnttab_dma_map_page(sg[i].page) + sg[i].offset;
+ sg[i].dma_length = sg[i].length;
+ BUG_ON(!sg[i].page);
+ IOMMU_BUG_ON(address_needs_mapping(
+@@ -108,9 +110,15 @@ void
+ dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
+ enum dma_data_direction direction)
+ {
++ int i;
++
+ BUG_ON(direction == DMA_NONE);
+ if (swiotlb)
+ swiotlb_unmap_sg(hwdev, sg, nents, direction);
++ else {
++ for (i = 0; i < nents; i++ )
++ gnttab_dma_unmap_page(sg[i].dma_address);
++ }
+ }
+ EXPORT_SYMBOL(dma_unmap_sg);
+
+@@ -127,7 +135,7 @@ dma_map_page(struct device *dev, struct
+ dma_addr = swiotlb_map_page(
+ dev, page, offset, size, direction);
+ } else {
+- dma_addr = page_to_bus(page) + offset;
++ dma_addr = gnttab_dma_map_page(page) + offset;
+ IOMMU_BUG_ON(address_needs_mapping(dev, dma_addr));
+ }
+
+@@ -142,6 +150,8 @@ dma_unmap_page(struct device *dev, dma_a
+ BUG_ON(direction == DMA_NONE);
+ if (swiotlb)
+ swiotlb_unmap_page(dev, dma_address, size, direction);
++ else
++ gnttab_dma_unmap_page(dma_address);
+ }
+ EXPORT_SYMBOL(dma_unmap_page);
+ #endif /* CONFIG_HIGHMEM */
+@@ -326,7 +336,8 @@ dma_map_single(struct device *dev, void
+ if (swiotlb) {
+ dma = swiotlb_map_single(dev, ptr, size, direction);
+ } else {
+- dma = virt_to_bus(ptr);
++ dma = gnttab_dma_map_page(virt_to_page(ptr)) +
++ offset_in_page(ptr);
+ IOMMU_BUG_ON(range_straddles_page_boundary(ptr, size));
+ IOMMU_BUG_ON(address_needs_mapping(dev, dma));
+ }
+@@ -344,6 +355,8 @@ dma_unmap_single(struct device *dev, dma
+ BUG();
+ if (swiotlb)
+ swiotlb_unmap_single(dev, dma_addr, size, direction);
++ else
++ gnttab_dma_unmap_page(dma_addr);
+ }
+ EXPORT_SYMBOL(dma_unmap_single);
+
+--- a/arch/i386/kernel/swiotlb.c 2007-08-27 14:01:25.000000000 -0400
++++ b/arch/i386/kernel/swiotlb.c 2007-08-27 14:02:07.000000000 -0400
+@@ -25,15 +25,15 @@
+ #include <asm/pci.h>
+ #include <asm/dma.h>
+ #include <asm/uaccess.h>
++#include <xen/gnttab.h>
+ #include <xen/interface/memory.h>
++#include <asm-i386/mach-xen/asm/gnttab_dma.h>
+
+ int swiotlb;
+ EXPORT_SYMBOL(swiotlb);
+
+ #define OFFSET(val,align) ((unsigned long)((val) & ( (align) - 1)))
+
+-#define SG_ENT_PHYS_ADDRESS(sg) (page_to_bus((sg)->page) + (sg)->offset)
+-
+ /*
+ * Maximum allowable number of contiguous slabs to map,
+ * must be a power of 2. What is the appropriate value ?
+@@ -468,7 +468,8 @@ swiotlb_full(struct device *dev, size_t
+ dma_addr_t
+ swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
+ {
+- dma_addr_t dev_addr = virt_to_bus(ptr);
++ dma_addr_t dev_addr = gnttab_dma_map_page(virt_to_page(ptr)) +
++ offset_in_page(ptr);
+ void *map;
+ struct phys_addr buffer;
+
+@@ -486,6 +487,7 @@ swiotlb_map_single(struct device *hwdev,
+ /*
+ * Oh well, have to allocate and map a bounce buffer.
+ */
++ gnttab_dma_unmap_page(dev_addr);
+ buffer.page = virt_to_page(ptr);
+ buffer.offset = (unsigned long)ptr & ~PAGE_MASK;
+ map = map_single(hwdev, buffer, size, dir);
+@@ -513,6 +515,8 @@ swiotlb_unmap_single(struct device *hwde
+ BUG_ON(dir == DMA_NONE);
+ if (in_swiotlb_aperture(dev_addr))
+ unmap_single(hwdev, bus_to_virt(dev_addr), size, dir);
++ else
++ gnttab_dma_unmap_page(dev_addr);
+ }
+
+ /*
+@@ -571,8 +575,10 @@ swiotlb_map_sg(struct device *hwdev, str
+ BUG_ON(dir == DMA_NONE);
+
+ for (i = 0; i < nelems; i++, sg++) {
+- dev_addr = SG_ENT_PHYS_ADDRESS(sg);
++ dev_addr = gnttab_dma_map_page(sg->page) + sg->offset;
++
+ if (address_needs_mapping(hwdev, dev_addr)) {
++ gnttab_dma_unmap_page(dev_addr);
+ buffer.page = sg->page;
+ buffer.offset = sg->offset;
+ map = map_single(hwdev, buffer, sg->length, dir);
+@@ -605,10 +611,12 @@ swiotlb_unmap_sg(struct device *hwdev, s
+ BUG_ON(dir == DMA_NONE);
+
+ for (i = 0; i < nelems; i++, sg++)
+- if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
++ if (in_swiotlb_aperture(sg->dma_address))
+ unmap_single(hwdev,
+ (void *)bus_to_virt(sg->dma_address),
+ sg->dma_length, dir);
++ else
++ gnttab_dma_unmap_page(sg->dma_address);
+ }
+
+ /*
+@@ -627,7 +635,7 @@ swiotlb_sync_sg_for_cpu(struct device *h
+ BUG_ON(dir == DMA_NONE);
+
+ for (i = 0; i < nelems; i++, sg++)
+- if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
++ if (in_swiotlb_aperture(sg->dma_address))
+ sync_single(hwdev,
+ (void *)bus_to_virt(sg->dma_address),
+ sg->dma_length, dir);
+@@ -642,7 +650,7 @@ swiotlb_sync_sg_for_device(struct device
+ BUG_ON(dir == DMA_NONE);
+
+ for (i = 0; i < nelems; i++, sg++)
+- if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
++ if (in_swiotlb_aperture(sg->dma_address))
+ sync_single(hwdev,
+ (void *)bus_to_virt(sg->dma_address),
+ sg->dma_length, dir);
+@@ -659,8 +667,9 @@ swiotlb_map_page(struct device *hwdev, s
+ dma_addr_t dev_addr;
+ char *map;
+
+- dev_addr = page_to_bus(page) + offset;
++ dev_addr = gnttab_dma_map_page(page) + offset;
+ if (address_needs_mapping(hwdev, dev_addr)) {
++ gnttab_dma_unmap_page(dev_addr);
+ buffer.page = page;
+ buffer.offset = offset;
+ map = map_single(hwdev, buffer, size, direction);
+@@ -681,6 +690,8 @@ swiotlb_unmap_page(struct device *hwdev,
+ BUG_ON(direction == DMA_NONE);
+ if (in_swiotlb_aperture(dma_address))
+ unmap_single(hwdev, bus_to_virt(dma_address), size, direction);
++ else
++ gnttab_dma_unmap_page(dma_address);
+ }
+
+ #endif
+--- a/drivers/xen/core/gnttab.c 2007-08-27 14:01:25.000000000 -0400
++++ b/drivers/xen/core/gnttab.c 2007-08-27 14:01:25.000000000 -0400
+@@ -34,6 +34,7 @@
+ #include <linux/module.h>
+ #include <linux/sched.h>
+ #include <linux/mm.h>
++#include <linux/seqlock.h>
+ #include <xen/interface/xen.h>
+ #include <xen/gnttab.h>
+ #include <asm/pgtable.h>
+@@ -42,6 +43,7 @@
+ #include <asm/io.h>
+ #include <xen/interface/memory.h>
+ #include <xen/driver_util.h>
++#include <asm/gnttab_dma.h>
+
+ #ifdef HAVE_XEN_PLATFORM_COMPAT_H
+ #include <xen/platform-compat.h>
+@@ -63,6 +65,8 @@ static struct grant_entry *shared;
+
+ static struct gnttab_free_callback *gnttab_free_callback_list;
+
++static DEFINE_SEQLOCK(gnttab_dma_lock);
++
+ static int gnttab_expand(unsigned int req_entries);
+
+ #define RPP (PAGE_SIZE / sizeof(grant_ref_t))
+@@ -490,6 +494,126 @@ static int gnttab_map(unsigned int start
+ return 0;
+ }
+
++static void gnttab_page_free(struct page *page)
++{
++ ClearPageForeign(page);
++ gnttab_reset_grant_page(page);
++ put_page(page);
++}
++
++/*
++ * Must not be called with IRQs off. This should only be used on the
++ * slow path.
++ *
++ * Copy a foreign granted page to local memory.
++ */
++int gnttab_copy_grant_page(grant_ref_t ref, struct page **pagep)
++{
++ struct gnttab_unmap_and_replace unmap;
++ mmu_update_t mmu;
++ struct page *page;
++ struct page *new_page;
++ void *new_addr;
++ void *addr;
++ paddr_t pfn;
++ maddr_t mfn;
++ maddr_t new_mfn;
++ int err;
++
++ page = *pagep;
++ if (!get_page_unless_zero(page))
++ return -ENOENT;
++
++ err = -ENOMEM;
++ new_page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
++ if (!new_page)
++ goto out;
++
++ new_addr = page_address(new_page);
++ addr = page_address(page);
++ memcpy(new_addr, addr, PAGE_SIZE);
++
++ pfn = page_to_pfn(page);
++ mfn = pfn_to_mfn(pfn);
++ new_mfn = virt_to_mfn(new_addr);
++
++ write_seqlock(&gnttab_dma_lock);
++
++ /* Make seq visible before checking page_mapped. */
++ smp_mb();
++
++ /* Has the page been DMA-mapped? */
++ if (unlikely(page_mapped(page))) {
++ write_sequnlock(&gnttab_dma_lock);
++ put_page(new_page);
++ err = -EBUSY;
++ goto out;
++ }
++
++ if (!xen_feature(XENFEAT_auto_translated_physmap))
++ set_phys_to_machine(pfn, new_mfn);
++
++ gnttab_set_replace_op(&unmap, (unsigned long)addr,
++ (unsigned long)new_addr, ref);
++
++ err = HYPERVISOR_grant_table_op(GNTTABOP_unmap_and_replace,
++ &unmap, 1);
++ BUG_ON(err);
++ BUG_ON(unmap.status);
++
++ write_sequnlock(&gnttab_dma_lock);
++
++ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
++ set_phys_to_machine(page_to_pfn(new_page), INVALID_P2M_ENTRY);
++
++ mmu.ptr = (new_mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
++ mmu.val = pfn;
++ err = HYPERVISOR_mmu_update(&mmu, 1, NULL, DOMID_SELF);
++ BUG_ON(err);
++ }
++
++ new_page->mapping = page->mapping;
++ new_page->index = page->index;
++ set_bit(PG_foreign, &new_page->flags);
++ *pagep = new_page;
++
++ SetPageForeign(page, gnttab_page_free);
++ page->mapping = NULL;
++
++out:
++ put_page(page);
++ return err;
++}
++EXPORT_SYMBOL(gnttab_copy_grant_page);
++
++/*
++ * Keep track of foreign pages marked as PageForeign so that we don't
++ * return them to the remote domain prematurely.
++ *
++ * PageForeign pages are pinned down by increasing their mapcount.
++ *
++ * All other pages are simply returned as is.
++ */
++void __gnttab_dma_map_page(struct page *page)
++{
++ unsigned int seq;
++
++ if (!is_running_on_xen() || !PageForeign(page))
++ return;
++
++ do {
++ seq = read_seqbegin(&gnttab_dma_lock);
++
++ if (gnttab_dma_local_pfn(page))
++ break;
++
++ atomic_set(&page->_mapcount, 0);
++
++ /* Make _mapcount visible before read_seqretry. */
++ smp_mb();
++ } while (unlikely(read_seqretry(&gnttab_dma_lock, seq)));
++}
++
+ int gnttab_resume(void)
+ {
+ if (max_nr_grant_frames() < nr_grant_frames)
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/asm-i386/mach-xen/asm/gnttab_dma.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1,41 @@
++/*
++ * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
++ * Copyright (c) 2007 Isaku Yamahata <yamahata at valinux co jp>
++ * VA Linux Systems Japan K.K.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ */
++
++#ifndef _ASM_I386_GNTTAB_DMA_H
++#define _ASM_I386_GNTTAB_DMA_H
++
++static inline int gnttab_dma_local_pfn(struct page *page)
++{
++ /* Has it become a local MFN? */
++ return pfn_valid(mfn_to_local_pfn(pfn_to_mfn(page_to_pfn(page))));
++}
++
++static inline maddr_t gnttab_dma_map_page(struct page *page)
++{
++ __gnttab_dma_map_page(page);
++ return page_to_bus(page);
++}
++
++static inline void gnttab_dma_unmap_page(maddr_t maddr)
++{
++ __gnttab_dma_unmap_page(virt_to_page(bus_to_virt(maddr)));
++}
++
++#endif /* _ASM_I386_GNTTAB_DMA_H */
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/asm-x86_64/mach-xen/asm/gnttab_dma.h 2007-08-27 14:01:25.000000000 -0400
+@@ -0,0 +1 @@
++#include <asm-i386/mach-xen/asm/gnttab_dma.h>
+--- a/include/xen/gnttab.h 2007-08-27 14:01:25.000000000 -0400
++++ b/include/xen/gnttab.h 2007-08-27 14:01:25.000000000 -0400
+@@ -39,6 +39,7 @@
+
+ #include <asm/hypervisor.h>
+ #include <asm/maddr.h> /* maddr_t */
++#include <linux/mm.h>
+ #include <xen/interface/grant_table.h>
+ #include <xen/features.h>
+
+@@ -101,6 +102,18 @@ void gnttab_grant_foreign_access_ref(gra
+ void gnttab_grant_foreign_transfer_ref(grant_ref_t, domid_t domid,
+ unsigned long pfn);
+
++int gnttab_copy_grant_page(grant_ref_t ref, struct page **pagep);
++void __gnttab_dma_map_page(struct page *page);
++static inline void __gnttab_dma_unmap_page(struct page *page)
++{
++}
++
++static inline void gnttab_reset_grant_page(struct page *page)
++{
++ init_page_count(page);
++ reset_page_mapcount(page);
++}
++
+ int gnttab_suspend(void);
+ int gnttab_resume(void);
+
+@@ -135,4 +148,19 @@ gnttab_set_unmap_op(struct gnttab_unmap_
+ unmap->dev_bus_addr = 0;
+ }
+
++static inline void
++gnttab_set_replace_op(struct gnttab_unmap_and_replace *unmap, maddr_t addr,
++ maddr_t new_addr, grant_handle_t handle)
++{
++ if (xen_feature(XENFEAT_auto_translated_physmap)) {
++ unmap->host_addr = __pa(addr);
++ unmap->new_addr = __pa(new_addr);
++ } else {
++ unmap->host_addr = addr;
++ unmap->new_addr = new_addr;
++ }
++
++ unmap->handle = handle;
++}
++
+ #endif /* __ASM_GNTTAB_H__ */
+--- a/include/xen/interface/grant_table.h 2007-08-27 14:01:25.000000000 -0400
++++ b/include/xen/interface/grant_table.h 2007-08-27 14:01:25.000000000 -0400
+@@ -328,6 +328,29 @@ struct gnttab_query_size {
+ typedef struct gnttab_query_size gnttab_query_size_t;
+ DEFINE_XEN_GUEST_HANDLE(gnttab_query_size_t);
+
++/*
++ * GNTTABOP_unmap_and_replace: Destroy one or more grant-reference mappings
++ * tracked by <handle> but atomically replace the page table entry with one
++ * pointing to the machine address under <new_addr>. <new_addr> will be
++ * redirected to the null entry.
++ * NOTES:
++ * 1. The call may fail in an undefined manner if either mapping is not
++ * tracked by <handle>.
++ * 2. After executing a batch of unmaps, it is guaranteed that no stale
++ * mappings will remain in the device or host TLBs.
++ */
++#define GNTTABOP_unmap_and_replace 7
++struct gnttab_unmap_and_replace {
++ /* IN parameters. */
++ uint64_t host_addr;
++ uint64_t new_addr;
++ grant_handle_t handle;
++ /* OUT parameters. */
++ int16_t status; /* GNTST_* */
++};
++typedef struct gnttab_unmap_and_replace gnttab_unmap_and_replace_t;
++DEFINE_XEN_GUEST_HANDLE(gnttab_unmap_and_replace_t);
++
+
+ /*
+ * Bitfield values for update_pin_status.flags.
diff --git a/trunk/2.6.22/20020_30-bit-field-booleans.patch1 b/trunk/2.6.22/20020_30-bit-field-booleans.patch1
new file mode 100644
index 0000000..2d752a4
--- /dev/null
+++ b/trunk/2.6.22/20020_30-bit-field-booleans.patch1
@@ -0,0 +1,38 @@
+# HG changeset 30 patch
+# User kfraser@localhost.localdomain
+# Date 1180964151 -3600
+# Node ID 45dfe4cfc5ef81f158cbf301a10939ed66dcc483
+# Parent 2bd50dc2ffbcae334c8d36f64f1ff09330242394
+Subject: netback: Bit-field booleans must be unsigned.
+Signed-off-by: Keir Fraser <keir@xensource.com>
+Acked-by: carnold@novell.com
+References: 306896
+
+Index: linux-2.6.22/drivers/xen/netback/common.h
+===================================================================
+--- linux-2.6.22.orig/drivers/xen/netback/common.h
++++ linux-2.6.22/drivers/xen/netback/common.h
+@@ -78,8 +78,8 @@ typedef struct netif_st {
+ int features;
+
+ /* Internal feature information. */
+- int can_queue:1; /* can queue packets for receiver? */
+- int copying_receiver:1; /* copy packets to receiver? */
++ u8 can_queue:1; /* can queue packets for receiver? */
++ u8 copying_receiver:1; /* copy packets to receiver? */
+
+ /* Allow netif_be_start_xmit() to peek ahead in the rx request ring. */
+ RING_IDX rx_req_cons_peek;
+Index: linux-2.6.22/drivers/xen/netback/netback.c
+===================================================================
+--- linux-2.6.22.orig/drivers/xen/netback/netback.c
++++ linux-2.6.22/drivers/xen/netback/netback.c
+@@ -46,7 +46,7 @@
+ struct netbk_rx_meta {
+ skb_frag_t frag;
+ int id;
+- int copy:1;
++ u8 copy:1;
+ };
+
+ static void netif_idx_release(u16 pending_idx);
diff --git a/trunk/2.6.22/20021_42-freeze.patch1 b/trunk/2.6.22/20021_42-freeze.patch1
new file mode 100644
index 0000000..4f2fde8
--- /dev/null
+++ b/trunk/2.6.22/20021_42-freeze.patch1
@@ -0,0 +1,67 @@
+# HG changeset 42+74 patch
+# User kfraser@localhost.localdomain
+# Date 1181573766 -3600
+# Node ID c09686d2bbffa5ec2152f685df0eaa090ddddd83
+# Parent 07a5f92187ac57ffbd107571b2019be2f53bda98
+Subject: Kernel threads need to be aware of freeze request,
+by checking in their main loop.
+
+xenbus/xenwatch threads are special with PF_NOFREEZE, since we're sure
+that they do nothing to block other native device drivers.
+
+Signed-off-by Ke Yu <ke.yu@intel.com>
+Signed-off-by Kevin Tian <kevin.tian@intel.com>
+
+blktap: blktap kthread must respond to freeze requests.
+Signed-off-by: Keir Fraser <keir@xensource.com>
+
+Acked-by: jbeulich@novell.com
+
+---
+ drivers/xen/blkback/blkback.c | 3 +++
+ drivers/xen/blktap/blktap.c | 3 +++
+ drivers/xen/xenbus/xenbus_xs.c | 2 ++
+ 3 files changed, 8 insertions(+)
+
+--- a/drivers/xen/blkback/blkback.c 2007-08-27 14:01:25.000000000 -0400
++++ b/drivers/xen/blkback/blkback.c 2007-08-27 14:02:03.000000000 -0400
+@@ -208,6 +208,9 @@ int blkif_schedule(void *arg)
+ printk(KERN_DEBUG "%s: started\n", current->comm);
+
+ while (!kthread_should_stop()) {
++ if (try_to_freeze())
++ continue;
++
+ wait_event_interruptible(
+ blkif->wq,
+ blkif->waiting_reqs || kthread_should_stop());
+--- a/drivers/xen/blktap/blktap.c 2007-08-27 14:01:25.000000000 -0400
++++ b/drivers/xen/blktap/blktap.c 2007-08-27 14:02:03.000000000 -0400
+@@ -942,6 +942,9 @@ int tap_blkif_schedule(void *arg)
+ printk(KERN_DEBUG "%s: started\n", current->comm);
+
+ while (!kthread_should_stop()) {
++ if (try_to_freeze())
++ continue;
++
+ wait_event_interruptible(
+ blkif->wq,
+ blkif->waiting_reqs || kthread_should_stop());
+--- a/drivers/xen/xenbus/xenbus_xs.c 2007-08-27 14:01:25.000000000 -0400
++++ b/drivers/xen/xenbus/xenbus_xs.c 2007-08-27 14:01:25.000000000 -0400
+@@ -718,6 +718,7 @@ static int xenwatch_thread(void *unused)
+ struct list_head *ent;
+ struct xs_stored_msg *msg;
+
++ current->flags |= PF_NOFREEZE;
+ for (;;) {
+ wait_event_interruptible(watch_events_waitq,
+ !list_empty(&watch_events));
+@@ -836,6 +837,7 @@ static int xenbus_thread(void *unused)
+ {
+ int err;
+
++ current->flags |= PF_NOFREEZE;
+ for (;;) {
+ err = process_msg();
+ if (err)
diff --git a/trunk/2.6.22/20022_67-edd.patch1 b/trunk/2.6.22/20022_67-edd.patch1
new file mode 100644
index 0000000..a957b0e
--- /dev/null
+++ b/trunk/2.6.22/20022_67-edd.patch1
@@ -0,0 +1,209 @@
+# HG changeset 67+71+76 patch
+# User kfraser@localhost.localdomain
+# Date 1182362965 -3600
+# Node ID 706976fe8333a88c4d3482253927faa3b4714b15
+# Parent 496e3157a35c32d7a550223914cfb92389a80874
+Subject: linux/x86: Obtain EDD info from Xen
+Signed-off-by: Jan Beulich <jbeulich@novell.com>
+Signed-off-by: Keir Fraser <keir@xensource.com>
+
+---
+ arch/i386/kernel/setup-xen.c | 3 ++
+ arch/x86_64/kernel/setup-xen.c | 3 ++
+ drivers/firmware/Kconfig | 1
+ drivers/xen/core/Makefile | 2 -
+ drivers/xen/core/firmware.c | 55 +++++++++++++++++++++++++++++++++++++++
+ include/xen/firmware.h | 8 +++++
+ include/xen/interface/platform.h | 32 ++++++++++++++++++++++
+ 7 files changed, 102 insertions(+), 2 deletions(-)
+
+--- a/arch/i386/kernel/setup-xen.c 2007-08-27 14:01:24.000000000 -0400
++++ b/arch/i386/kernel/setup-xen.c 2007-08-27 14:02:09.000000000 -0400
+@@ -66,6 +66,7 @@
+ #include <xen/interface/physdev.h>
+ #include <xen/interface/memory.h>
+ #include <xen/features.h>
++#include <xen/firmware.h>
+ #include <xen/xencons.h>
+ #include <setup_arch.h>
+ #include <bios_ebda.h>
+@@ -740,6 +741,7 @@ struct edd edd;
+ #ifdef CONFIG_EDD_MODULE
+ EXPORT_SYMBOL(edd);
+ #endif
++#ifndef CONFIG_XEN
+ /**
+ * copy_edd() - Copy the BIOS EDD information
+ * from boot_params into a safe place.
+@@ -752,6 +754,7 @@ static inline void copy_edd(void)
+ edd.mbr_signature_nr = EDD_MBR_SIG_NR;
+ edd.edd_info_nr = EDD_NR;
+ }
++#endif
+ #else
+ static inline void copy_edd(void)
+ {
+--- a/arch/x86_64/kernel/setup-xen.c 2007-08-27 14:01:25.000000000 -0400
++++ b/arch/x86_64/kernel/setup-xen.c 2007-08-27 14:02:09.000000000 -0400
+@@ -71,6 +71,7 @@
+ #include <asm/hypervisor.h>
+ #include <xen/interface/nmi.h>
+ #include <xen/features.h>
++#include <xen/firmware.h>
+ #include <xen/xencons.h>
+ #define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
+ #define PFN_PHYS(x) ((x) << PAGE_SHIFT)
+@@ -534,6 +535,7 @@ struct edd edd;
+ #ifdef CONFIG_EDD_MODULE
+ EXPORT_SYMBOL(edd);
+ #endif
++#ifndef CONFIG_XEN
+ /**
+ * copy_edd() - Copy the BIOS EDD information
+ * from boot_params into a safe place.
+@@ -546,6 +548,7 @@ static inline void copy_edd(void)
+ edd.mbr_signature_nr = EDD_MBR_SIG_NR;
+ edd.edd_info_nr = EDD_NR;
+ }
++#endif
+ #else
+ static inline void copy_edd(void)
+ {
+--- a/drivers/firmware/Kconfig 2007-08-27 14:01:25.000000000 -0400
++++ b/drivers/firmware/Kconfig 2007-08-27 14:01:25.000000000 -0400
+@@ -8,7 +8,6 @@ menu "Firmware Drivers"
+ config EDD
+ tristate "BIOS Enhanced Disk Drive calls determine boot disk"
+ depends on !IA64
+- depends on !XEN
+ help
+ Say Y or M here if you want to enable BIOS Enhanced Disk Drive
+ Services real mode BIOS calls to determine which disk
+--- a/drivers/xen/core/Makefile 2007-08-27 14:01:25.000000000 -0400
++++ b/drivers/xen/core/Makefile 2007-08-27 14:02:04.000000000 -0400
+@@ -2,7 +2,7 @@
+ # Makefile for the linux kernel.
+ #
+
+-obj-y := evtchn.o gnttab.o features.o reboot.o machine_reboot.o
++obj-y := evtchn.o gnttab.o features.o reboot.o machine_reboot.o firmware.o
+
+ obj-$(CONFIG_PROC_FS) += xen_proc.o
+ obj-$(CONFIG_SYSFS) += hypervisor_sysfs.o
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/core/firmware.c 2007-08-27 14:02:09.000000000 -0400
+@@ -0,0 +1,55 @@
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/init.h>
++#include <linux/edd.h>
++#include <xen/interface/platform.h>
++#include <asm/hypervisor.h>
++
++#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
++void __init copy_edd(void)
++{
++ int ret;
++ struct xen_platform_op op;
++
++ if (!is_initial_xendomain())
++ return;
++
++ op.cmd = XENPF_firmware_info;
++
++ op.u.firmware_info.type = XEN_FW_DISK_INFO;
++ for (op.u.firmware_info.index = 0;
++ edd.edd_info_nr < EDDMAXNR;
++ op.u.firmware_info.index++) {
++ struct edd_info *info = edd.edd_info + edd.edd_info_nr;
++
++ info->params.length = sizeof(info->params);
++ set_xen_guest_handle(op.u.firmware_info.u.disk_info.edd_params,
++ &info->params);
++ ret = HYPERVISOR_platform_op(&op);
++ if (ret)
++ break;
++
++#define C(x) info->x = op.u.firmware_info.u.disk_info.x
++ C(device);
++ C(version);
++ C(interface_support);
++ C(legacy_max_cylinder);
++ C(legacy_max_head);
++ C(legacy_sectors_per_track);
++#undef C
++
++ edd.edd_info_nr++;
++ }
++
++ op.u.firmware_info.type = XEN_FW_DISK_MBR_SIGNATURE;
++ for (op.u.firmware_info.index = 0;
++ edd.mbr_signature_nr < EDD_MBR_SIG_MAX;
++ op.u.firmware_info.index++) {
++ ret = HYPERVISOR_platform_op(&op);
++ if (ret)
++ break;
++ edd.mbr_signature[edd.mbr_signature_nr++] =
++ op.u.firmware_info.u.disk_mbr_signature.mbr_signature;
++ }
++}
++#endif
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/include/xen/firmware.h 2007-08-27 14:02:09.000000000 -0400
+@@ -0,0 +1,8 @@
++#ifndef __XEN_FIRMWARE_H__
++#define __XEN_FIRMWARE_H__
++
++#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
++void copy_edd(void);
++#endif
++
++#endif /* __XEN_FIRMWARE_H__ */
+--- a/include/xen/interface/platform.h 2007-08-27 14:01:25.000000000 -0400
++++ b/include/xen/interface/platform.h 2007-08-27 14:02:09.000000000 -0400
+@@ -114,6 +114,37 @@ struct xenpf_platform_quirk {
+ typedef struct xenpf_platform_quirk xenpf_platform_quirk_t;
+ DEFINE_XEN_GUEST_HANDLE(xenpf_platform_quirk_t);
+
++#define XENPF_firmware_info 50
++#define XEN_FW_DISK_INFO 1
++#define XEN_FW_DISK_MBR_SIGNATURE 2
++struct xenpf_firmware_info {
++ /* IN variables. */
++ uint32_t type;
++ uint32_t index;
++ /* OUT variables. */
++ union {
++ struct {
++ /* Int13, Fn48: Check Extensions Present. */
++ uint8_t device; /* %dl: bios device number */
++ uint8_t version; /* %ah: major version */
++ uint16_t interface_support; /* %cx: support bitmap */
++ /* Int13, Fn08: Legacy Get Device Parameters. */
++ uint16_t legacy_max_cylinder; /* %cl[7:6]:%ch: max cyl # */
++ uint8_t legacy_max_head; /* %dh: max head # */
++ uint8_t legacy_sectors_per_track; /* %cl[5:0]: max sector # */
++ /* Int13, Fn41: Get Device Parameters (as filled into %ds:%esi). */
++ /* NB. First uint16_t of buffer must be set to buffer size. */
++ XEN_GUEST_HANDLE(void) edd_params;
++ } disk_info; /* XEN_FW_DISK_INFO */
++ struct {
++ uint8_t device; /* bios device number */
++ uint32_t mbr_signature; /* offset 0x1b8 in mbr */
++ } disk_mbr_signature; /* XEN_FW_DISK_MBR_SIGNATURE */
++ } u;
++};
++typedef struct xenpf_firmware_info xenpf_firmware_info_t;
++DEFINE_XEN_GUEST_HANDLE(xenpf_firmware_info_t);
++
+ struct xen_platform_op {
+ uint32_t cmd;
+ uint32_t interface_version; /* XENPF_INTERFACE_VERSION */
+@@ -124,6 +155,7 @@ struct xen_platform_op {
+ struct xenpf_read_memtype read_memtype;
+ struct xenpf_microcode_update microcode;
+ struct xenpf_platform_quirk platform_quirk;
++ struct xenpf_firmware_info firmware_info;
+ uint8_t pad[128];
+ } u;
+ };
diff --git a/trunk/2.6.22/20023_70-edid.patch1 b/trunk/2.6.22/20023_70-edid.patch1
new file mode 100644
index 0000000..cdec9d8
--- /dev/null
+++ b/trunk/2.6.22/20023_70-edid.patch1
@@ -0,0 +1,118 @@
+# HG changeset 70 patch
+# User kfraser@localhost.localdomain
+# Date 1182364134 -3600
+# Node ID db1ad4e3caf50efd45726d4b6e483869e20a8e51
+# Parent 9f2badfda534e764a5c14291909595bbb8af2ee5
+Subject: linux/x86: Obtain EDID info from Xen
+Signed-off-by: Jan Beulich <jbeulich@novell.com>
+Signed-off-by: Keir Fraser <keir@xensource.com>
+
+---
+ arch/i386/kernel/setup-xen.c | 5 ++++-
+ arch/x86_64/kernel/setup-xen.c | 2 +-
+ drivers/xen/core/firmware.c | 19 +++++++++++++++++++
+ include/xen/firmware.h | 2 ++
+ include/xen/interface/platform.h | 12 ++++++++++--
+ 5 files changed, 36 insertions(+), 4 deletions(-)
+
+--- a/arch/i386/kernel/setup-xen.c 2007-08-27 14:01:25.000000000 -0400
++++ b/arch/i386/kernel/setup-xen.c 2007-08-27 14:02:03.000000000 -0400
+@@ -156,6 +156,9 @@ struct sys_desc_table_struct {
+ };
+ struct edid_info edid_info;
+ EXPORT_SYMBOL_GPL(edid_info);
++#ifndef CONFIG_XEN
++#define copy_edid() (edid_info = EDID_INFO)
++#endif
+ struct ist_info ist_info;
+ #if defined(CONFIG_X86_SPEEDSTEP_SMI) || \
+ defined(CONFIG_X86_SPEEDSTEP_SMI_MODULE)
+@@ -1623,7 +1626,7 @@ void __init setup_arch(char **cmdline_p)
+ ROOT_DEV = MKDEV(UNNAMED_MAJOR,0);
+ drive_info = DRIVE_INFO;
+ screen_info = SCREEN_INFO;
+- edid_info = EDID_INFO;
++ copy_edid();
+ apm_info.bios = APM_BIOS_INFO;
+ ist_info = IST_INFO;
+ saved_videomode = VIDEO_MODE;
+--- a/arch/x86_64/kernel/setup-xen.c 2007-08-27 14:01:25.000000000 -0400
++++ b/arch/x86_64/kernel/setup-xen.c 2007-08-27 14:02:03.000000000 -0400
+@@ -616,7 +616,7 @@ void __init setup_arch(char **cmdline_p)
+ } else
+ screen_info.orig_video_isVGA = 0;
+
+- edid_info = EDID_INFO;
++ copy_edid();
+ saved_video_mode = SAVED_VIDEO_MODE;
+ bootloader_type = LOADER_TYPE;
+
+--- a/drivers/xen/core/firmware.c 2007-08-27 14:01:25.000000000 -0400
++++ b/drivers/xen/core/firmware.c 2007-08-27 14:01:25.000000000 -0400
+@@ -2,6 +2,7 @@
+ #include <linux/errno.h>
+ #include <linux/init.h>
+ #include <linux/edd.h>
++#include <video/edid.h>
+ #include <xen/interface/platform.h>
+ #include <asm/hypervisor.h>
+
+@@ -53,3 +54,21 @@ void __init copy_edd(void)
+ }
+ }
+ #endif
++
++void __init copy_edid(void)
++{
++#if defined(CONFIG_FIRMWARE_EDID) && defined(CONFIG_X86)
++ struct xen_platform_op op;
++
++ if (!is_initial_xendomain())
++ return;
++
++ op.cmd = XENPF_firmware_info;
++ op.u.firmware_info.index = 0;
++ op.u.firmware_info.type = XEN_FW_VBEDDC_INFO;
++ set_xen_guest_handle(op.u.firmware_info.u.vbeddc_info.edid,
++ edid_info.dummy);
++ if (HYPERVISOR_platform_op(&op) != 0)
++ memset(edid_info.dummy, 0x13, sizeof(edid_info.dummy));
++#endif
++}
+--- a/include/xen/firmware.h 2007-08-27 14:01:25.000000000 -0400
++++ b/include/xen/firmware.h 2007-08-27 14:01:25.000000000 -0400
+@@ -5,4 +5,6 @@
+ void copy_edd(void);
+ #endif
+
++void copy_edid(void);
++
+ #endif /* __XEN_FIRMWARE_H__ */
+--- a/include/xen/interface/platform.h 2007-08-27 14:01:25.000000000 -0400
++++ b/include/xen/interface/platform.h 2007-08-27 14:01:25.000000000 -0400
+@@ -115,8 +115,9 @@ typedef struct xenpf_platform_quirk xenp
+ DEFINE_XEN_GUEST_HANDLE(xenpf_platform_quirk_t);
+
+ #define XENPF_firmware_info 50
+-#define XEN_FW_DISK_INFO 1
+-#define XEN_FW_DISK_MBR_SIGNATURE 2
++#define XEN_FW_DISK_INFO 1 /* from int 13 AH=08/41/48 */
++#define XEN_FW_DISK_MBR_SIGNATURE 2 /* from MBR offset 0x1b8 */
++#define XEN_FW_VBEDDC_INFO 3 /* from int 10 AX=4f15 */
+ struct xenpf_firmware_info {
+ /* IN variables. */
+ uint32_t type;
+@@ -140,6 +141,13 @@ struct xenpf_firmware_info {
+ uint8_t device; /* bios device number */
+ uint32_t mbr_signature; /* offset 0x1b8 in mbr */
+ } disk_mbr_signature; /* XEN_FW_DISK_MBR_SIGNATURE */
++ struct {
++ /* Int10, AX=4F15: Get EDID info. */
++ uint8_t capabilities;
++ uint8_t edid_transfer_time;
++ /* must refer to 128-byte buffer */
++ XEN_GUEST_HANDLE(uint8_t) edid;
++ } vbeddc_info; /* XEN_FW_VBEDDC_INFO */
+ } u;
+ };
+ typedef struct xenpf_firmware_info xenpf_firmware_info_t;
diff --git a/trunk/2.6.22/20024_79-balloon-highmem.patch1 b/trunk/2.6.22/20024_79-balloon-highmem.patch1
new file mode 100644
index 0000000..4e15c15
--- /dev/null
+++ b/trunk/2.6.22/20024_79-balloon-highmem.patch1
@@ -0,0 +1,42 @@
+# HG changeset 79 patch
+# User kfraser@localhost.localdomain
+# Date 1183324052 -3600
+# Node ID 57ab8dd47580c2f726556fe1c46b5401f2bddb1e
+# Parent 0be610b725fae4cd6de8f0b111660a186f93b86d
+Subject: Stop low memory from appearing -ve in /proc/meminfo when ballooned.
+Signed-off-by: Mark Williamson <mark.williamson@cl.cam.ac.uk>
+
+---
+ drivers/xen/balloon/balloon.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/drivers/xen/balloon/balloon.c 2007-08-27 14:01:25.000000000 -0400
++++ b/drivers/xen/balloon/balloon.c 2007-08-27 14:02:03.000000000 -0400
+@@ -83,6 +83,7 @@ static unsigned long frame_list[PAGE_SIZ
+
+ /* VM /proc information for memory */
+ extern unsigned long totalram_pages;
++extern unsigned long totalhigh_pages;
+
+ /* List of ballooned pages, threaded through the mem_map array. */
+ static LIST_HEAD(ballooned_pages);
+@@ -118,6 +119,7 @@ static void balloon_append(struct page *
+ if (PageHighMem(page)) {
+ list_add_tail(PAGE_TO_LIST(page), &ballooned_pages);
+ bs.balloon_high++;
++ totalhigh_pages--;
+ } else {
+ list_add(PAGE_TO_LIST(page), &ballooned_pages);
+ bs.balloon_low++;
+@@ -135,8 +137,10 @@ static struct page *balloon_retrieve(voi
+ page = LIST_TO_PAGE(ballooned_pages.next);
+ UNLIST_PAGE(page);
+
+- if (PageHighMem(page))
++ if (PageHighMem(page)) {
+ bs.balloon_high--;
++ totalhigh_pages++;
++ }
+ else
+ bs.balloon_low--;
+
diff --git a/trunk/2.6.22/20025_80-blk-teardown.patch1 b/trunk/2.6.22/20025_80-blk-teardown.patch1
new file mode 100644
index 0000000..7c5899d
--- /dev/null
+++ b/trunk/2.6.22/20025_80-blk-teardown.patch1
@@ -0,0 +1,57 @@
+# HG changeset 80 patch
+# User kfraser@localhost.localdomain
+# Date 1183377939 -3600
+# Node ID 4a284f968015fa4cd50d9d4c7695534c87c7bce6
+# Parent 57ab8dd47580c2f726556fe1c46b5401f2bddb1e
+Subject: blktap/blkback: Tear down sysfs nodes before freeing blkdev structures.
+Signed-off-by: Keir Fraser <keir@xensource.com>
+
+Acked-by: jbeulich@novell.com
+
+---
+ drivers/xen/blkback/xenbus.c | 6 +++---
+ drivers/xen/blktap/xenbus.c | 4 ++--
+ 2 files changed, 5 insertions(+), 5 deletions(-)
+
+--- a/drivers/xen/blkback/xenbus.c 2007-08-27 14:01:25.000000000 -0400
++++ b/drivers/xen/blkback/xenbus.c 2007-08-27 14:01:50.000000000 -0400
+@@ -173,6 +173,9 @@ static int blkback_remove(struct xenbus_
+
+ DPRINTK("");
+
++ if (be->major || be->minor)
++ xenvbd_sysfs_delif(dev);
++
+ if (be->backend_watch.node) {
+ unregister_xenbus_watch(&be->backend_watch);
+ kfree(be->backend_watch.node);
+@@ -186,9 +189,6 @@ static int blkback_remove(struct xenbus_
+ be->blkif = NULL;
+ }
+
+- if (be->major || be->minor)
+- xenvbd_sysfs_delif(dev);
+-
+ kfree(be);
+ dev->dev.driver_data = NULL;
+ return 0;
+--- a/drivers/xen/blktap/xenbus.c 2007-08-27 14:01:25.000000000 -0400
++++ b/drivers/xen/blktap/xenbus.c 2007-08-27 14:01:50.000000000 -0400
+@@ -168,6 +168,8 @@ static int blktap_remove(struct xenbus_d
+ {
+ struct backend_info *be = dev->dev.driver_data;
+
++ if (be->group_added)
++ xentap_sysfs_delif(be->dev);
+ if (be->backend_watch.node) {
+ unregister_xenbus_watch(&be->backend_watch);
+ kfree(be->backend_watch.node);
+@@ -180,8 +182,6 @@ static int blktap_remove(struct xenbus_d
+ tap_blkif_free(be->blkif);
+ be->blkif = NULL;
+ }
+- if (be->group_added)
+- xentap_sysfs_delif(be->dev);
+ kfree(be);
+ dev->dev.driver_data = NULL;
+ return 0;
diff --git a/trunk/2.6.22/20026_81-clock-was-set.patch1 b/trunk/2.6.22/20026_81-clock-was-set.patch1
new file mode 100644
index 0000000..bd70c35
--- /dev/null
+++ b/trunk/2.6.22/20026_81-clock-was-set.patch1
@@ -0,0 +1,48 @@
+# HG changeset 81 patch
+# User Ian Campbell <ian.campbell@xensource.com>
+# Date 1183393164 -3600
+# Node ID cb040341e05af32c804afef4216ec5491dcbf9e3
+# Parent 4a284f968015fa4cd50d9d4c7695534c87c7bce6
+Subject: Do not call clock_was_set() from interrupt context.
+
+Currently clock_was_set() is a nop but on newer kernels it is not and
+cannot be called from interrupt context. Prepare for that by deferring
+to a workqueue. Since a timer interrupt can occur before
+init_workqueue() is called we need to protect against the possibility
+that keventd hasn't started yet.
+
+(drop unused variable max_ntp_tick).
+
+Signed-off-by: Ian Campbell <ian.campbell@xensource.com>
+
+Acked-by: jbeulich@novell.com
+
+---
+ arch/i386/kernel/time-xen.c | 9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+--- a/arch/i386/kernel/time-xen.c 2007-08-27 14:01:25.000000000 -0400
++++ b/arch/i386/kernel/time-xen.c 2007-08-27 14:02:04.000000000 -0400
+@@ -127,6 +127,12 @@ static DEFINE_PER_CPU(struct vcpu_runsta
+ /* Must be signed, as it's compared with s64 quantities which can be -ve. */
+ #define NS_PER_TICK (1000000000LL/HZ)
+
++static void __clock_was_set(void *unused)
++{
++ clock_was_set();
++}
++static DECLARE_WORK(clock_was_set_work, __clock_was_set, NULL);
++
+ static inline void __normalize_time(time_t *sec, s64 *nsec)
+ {
+ while (*nsec >= NSEC_PER_SEC) {
+@@ -667,7 +673,8 @@ irqreturn_t timer_interrupt(int irq, voi
+
+ if (shadow_tv_version != HYPERVISOR_shared_info->wc_version) {
+ update_wallclock();
+- clock_was_set();
++ if (keventd_up())
++ schedule_work(&clock_was_set_work);
+ }
+
+ write_sequnlock(&xtime_lock);
diff --git a/trunk/2.6.22/20027_82-blkdev-wait.patch1 b/trunk/2.6.22/20027_82-blkdev-wait.patch1
new file mode 100644
index 0000000..8f5fdc2
--- /dev/null
+++ b/trunk/2.6.22/20027_82-blkdev-wait.patch1
@@ -0,0 +1,92 @@
+# HG changeset 82 patch
+# User Christian Limpach <Christian.Limpach@xensource.com>
+# Date 1183716504 -3600
+# Node ID 11483a00c017ea82a4c2948724eb36ba6ec5c0ba
+# Parent cb040341e05af32c804afef4216ec5491dcbf9e3
+Subject: Delay wait for block devices until after the disk is added.
+
+Signed-off-by: Christian Limpach <Christian.Limpach@xensource.com>
+
+Acked-by: jbeulich@novell.com
+
+---
+ drivers/xen/blkfront/blkfront.c | 10 ++++++++++
+ drivers/xen/blkfront/block.h | 1 +
+ drivers/xen/xenbus/xenbus_probe.c | 5 ++++-
+ include/xen/xenbus.h | 1 +
+ 4 files changed, 16 insertions(+), 1 deletion(-)
+
+--- a/drivers/xen/blkfront/blkfront.c 2007-08-27 14:01:25.000000000 -0400
++++ b/drivers/xen/blkfront/blkfront.c 2007-08-27 14:02:03.000000000 -0400
+@@ -354,6 +354,8 @@ static void connect(struct blkfront_info
+ spin_unlock_irq(&blkif_io_lock);
+
+ add_disk(info->gd);
++
++ info->is_ready = 1;
+ }
+
+ /**
+@@ -862,6 +864,13 @@ static void blkif_recover(struct blkfron
+ spin_unlock_irq(&blkif_io_lock);
+ }
+
++int blkfront_is_ready(struct xenbus_device *dev)
++{
++ struct blkfront_info *info = dev->dev.driver_data;
++
++ return info->is_ready;
++}
++
+
+ /* ** Driver Registration ** */
+
+@@ -880,6 +889,7 @@ static struct xenbus_driver blkfront = {
+ .remove = blkfront_remove,
+ .resume = blkfront_resume,
+ .otherend_changed = backend_changed,
++ .is_ready = blkfront_is_ready,
+ };
+
+
+--- a/drivers/xen/blkfront/block.h 2007-08-27 14:01:25.000000000 -0400
++++ b/drivers/xen/blkfront/block.h 2007-08-27 14:01:25.000000000 -0400
+@@ -111,6 +111,7 @@ struct blkfront_info
+ struct blk_shadow shadow[BLK_RING_SIZE];
+ unsigned long shadow_free;
+ int feature_barrier;
++ int is_ready;
+
+ /**
+ * The number of people holding this device open. We won't allow a
+--- a/drivers/xen/xenbus/xenbus_probe.c 2007-08-27 14:01:25.000000000 -0400
++++ b/drivers/xen/xenbus/xenbus_probe.c 2007-08-27 14:02:01.000000000 -0400
+@@ -992,6 +992,7 @@ static int is_disconnected_device(struct
+ {
+ struct xenbus_device *xendev = to_xenbus_device(dev);
+ struct device_driver *drv = data;
++ struct xenbus_driver *xendrv;
+
+ /*
+ * A device with no driver will never connect. We care only about
+@@ -1004,7 +1005,9 @@ static int is_disconnected_device(struct
+ if (drv && (dev->driver != drv))
+ return 0;
+
+- return (xendev->state != XenbusStateConnected);
++ xendrv = to_xenbus_driver(dev->driver);
++ return (xendev->state != XenbusStateConnected ||
++ (xendrv->is_ready && !xendrv->is_ready(xendev)));
+ }
+
+ static int exists_disconnected_device(struct device_driver *drv)
+--- a/include/xen/xenbus.h 2007-08-27 14:01:25.000000000 -0400
++++ b/include/xen/xenbus.h 2007-08-27 14:01:25.000000000 -0400
+@@ -106,6 +106,7 @@ struct xenbus_driver {
+ int (*uevent)(struct xenbus_device *, char **, int, char *, int);
+ struct device_driver driver;
+ int (*read_otherend_details)(struct xenbus_device *dev);
++ int (*is_ready)(struct xenbus_device *dev);
+ };
+
+ static inline struct xenbus_driver *to_xenbus_driver(struct device_driver *drv)
diff --git a/trunk/2.6.22/20028_93-swiotlb.patch1 b/trunk/2.6.22/20028_93-swiotlb.patch1
new file mode 100644
index 0000000..4d28651
--- /dev/null
+++ b/trunk/2.6.22/20028_93-swiotlb.patch1
@@ -0,0 +1,146 @@
+# HG changeset 93+99+100+101+105 patch
+# User kfraser@localhost.localdomain
+# Date 1183985110 -3600
+# Node ID 08cf42135056cbc07a6d790d4851e0e4b160f847
+# Parent f833757672a70ee43afd0bfbfaa22cec3b132445
+Subject: x86: dma_map_sg() must handle multi-page segments.
+Signed-off-by: Keir Fraser <keir@xensource.com>
+
+Subject: swiotlb: Handle sync invocations on subregions of a mapped region.
+Signed-off-by: Keir Fraser <keir@xensource.com>
+
+Subject: swiotlb: Keep offset in a page strictly smaller than PAGE_SIZE.
+Signed-off-by: Keir Fraser <keir@xensource.com>
+
+Subject: swiotlb: Allow sync on arbitrary offsets into dma-mapped region.
+Signed-off-by: Keir Fraser <keir@xensource.com>
+
+swiotlb: dma_addr_to_phys_addr() should be static.
+Signed-off-by: Keir Fraser <keir@xensource.com>
+
+Acked-by: jbeulich@novell.com
+
+Index: head-2007-08-07/arch/i386/kernel/pci-dma-xen.c
+===================================================================
+--- head-2007-08-07.orig/arch/i386/kernel/pci-dma-xen.c 2007-08-07 09:47:30.000000000 +0200
++++ head-2007-08-07/arch/i386/kernel/pci-dma-xen.c 2007-08-07 09:48:10.000000000 +0200
+@@ -97,6 +97,9 @@ dma_map_sg(struct device *hwdev, struct
+ BUG_ON(!sg[i].page);
+ IOMMU_BUG_ON(address_needs_mapping(
+ hwdev, sg[i].dma_address));
++ IOMMU_BUG_ON(range_straddles_page_boundary(
++ page_to_pseudophys(sg[i].page) + sg[i].offset,
++ sg[i].length));
+ }
+ rc = nents;
+ }
+@@ -338,7 +341,7 @@ dma_map_single(struct device *dev, void
+ } else {
+ dma = gnttab_dma_map_page(virt_to_page(ptr)) +
+ offset_in_page(ptr);
+- IOMMU_BUG_ON(range_straddles_page_boundary(ptr, size));
++ IOMMU_BUG_ON(range_straddles_page_boundary(__pa(ptr), size));
+ IOMMU_BUG_ON(address_needs_mapping(dev, dma));
+ }
+
+Index: head-2007-08-07/arch/i386/kernel/swiotlb.c
+===================================================================
+--- head-2007-08-07.orig/arch/i386/kernel/swiotlb.c 2007-08-07 09:47:30.000000000 +0200
++++ head-2007-08-07/arch/i386/kernel/swiotlb.c 2007-08-07 09:48:50.000000000 +0200
+@@ -304,6 +304,7 @@ map_single(struct device *hwdev, struct
+ unsigned long flags;
+ char *dma_addr;
+ unsigned int nslots, stride, index, wrap;
++ struct phys_addr slot_buf;
+ int i;
+
+ /*
+@@ -375,13 +376,29 @@ map_single(struct device *hwdev, struct
+ * This is needed when we sync the memory. Then we sync the buffer if
+ * needed.
+ */
+- io_tlb_orig_addr[index] = buffer;
++ slot_buf = buffer;
++ for (i = 0; i < nslots; i++) {
++ slot_buf.page += slot_buf.offset >> PAGE_SHIFT;
++ slot_buf.offset &= PAGE_SIZE - 1;
++ io_tlb_orig_addr[index+i] = slot_buf;
++ slot_buf.offset += 1 << IO_TLB_SHIFT;
++ }
+ if ((dir == DMA_TO_DEVICE) || (dir == DMA_BIDIRECTIONAL))
+ __sync_single(buffer, dma_addr, size, DMA_TO_DEVICE);
+
+ return dma_addr;
+ }
+
++static struct phys_addr dma_addr_to_phys_addr(char *dma_addr)
++{
++ int index = (dma_addr - iotlb_virt_start) >> IO_TLB_SHIFT;
++ struct phys_addr buffer = io_tlb_orig_addr[index];
++ buffer.offset += (long)dma_addr & ((1 << IO_TLB_SHIFT) - 1);
++ buffer.page += buffer.offset >> PAGE_SHIFT;
++ buffer.offset &= PAGE_SIZE - 1;
++ return buffer;
++}
++
+ /*
+ * dma_addr is the kernel virtual address of the bounce buffer to unmap.
+ */
+@@ -391,7 +408,7 @@ unmap_single(struct device *hwdev, char
+ unsigned long flags;
+ int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
+ int index = (dma_addr - iotlb_virt_start) >> IO_TLB_SHIFT;
+- struct phys_addr buffer = io_tlb_orig_addr[index];
++ struct phys_addr buffer = dma_addr_to_phys_addr(dma_addr);
+
+ /*
+ * First, sync the memory before unmapping the entry
+@@ -431,8 +448,7 @@ unmap_single(struct device *hwdev, char
+ static void
+ sync_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
+ {
+- int index = (dma_addr - iotlb_virt_start) >> IO_TLB_SHIFT;
+- struct phys_addr buffer = io_tlb_orig_addr[index];
++ struct phys_addr buffer = dma_addr_to_phys_addr(dma_addr);
+ BUG_ON((dir != DMA_FROM_DEVICE) && (dir != DMA_TO_DEVICE));
+ __sync_single(buffer, dma_addr, size, dir);
+ }
+@@ -480,7 +496,7 @@ swiotlb_map_single(struct device *hwdev,
+ * we can safely return the device addr and not worry about bounce
+ * buffering it.
+ */
+- if (!range_straddles_page_boundary(ptr, size) &&
++ if (!range_straddles_page_boundary(__pa(ptr), size) &&
+ !address_needs_mapping(hwdev, dev_addr))
+ return dev_addr;
+
+@@ -577,7 +593,9 @@ swiotlb_map_sg(struct device *hwdev, str
+ for (i = 0; i < nelems; i++, sg++) {
+ dev_addr = gnttab_dma_map_page(sg->page) + sg->offset;
+
+- if (address_needs_mapping(hwdev, dev_addr)) {
++ if (range_straddles_page_boundary(page_to_pseudophys(sg->page)
++ + sg->offset, sg->length)
++ || address_needs_mapping(hwdev, dev_addr)) {
+ gnttab_dma_unmap_page(dev_addr);
+ buffer.page = sg->page;
+ buffer.offset = sg->offset;
+Index: head-2007-08-07/include/asm-i386/mach-xen/asm/dma-mapping.h
+===================================================================
+--- head-2007-08-07.orig/include/asm-i386/mach-xen/asm/dma-mapping.h 2007-08-07 09:47:09.000000000 +0200
++++ head-2007-08-07/include/asm-i386/mach-xen/asm/dma-mapping.h 2007-08-07 09:48:10.000000000 +0200
+@@ -23,11 +23,11 @@ address_needs_mapping(struct device *hwd
+ }
+
+ static inline int
+-range_straddles_page_boundary(void *p, size_t size)
++range_straddles_page_boundary(paddr_t p, size_t size)
+ {
+ extern unsigned long *contiguous_bitmap;
+- return (((((unsigned long)p & ~PAGE_MASK) + size) > PAGE_SIZE) &&
+- !test_bit(__pa(p) >> PAGE_SHIFT, contiguous_bitmap));
++ return ((((p & ~PAGE_MASK) + size) > PAGE_SIZE) &&
++ !test_bit(p >> PAGE_SHIFT, contiguous_bitmap));
+ }
+
+ #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
diff --git a/trunk/2.6.22/20029_95-privcmd-wrlock.patch1 b/trunk/2.6.22/20029_95-privcmd-wrlock.patch1
new file mode 100644
index 0000000..61140bb
--- /dev/null
+++ b/trunk/2.6.22/20029_95-privcmd-wrlock.patch1
@@ -0,0 +1,72 @@
+# HG changeset 95 patch
+# User kfraser@localhost.localdomain
+# Date 1184000854 -3600
+# Node ID 21d5238ee2ec892825cc8905cc6ffa08b021fc88
+# Parent d36fd1c5db16e4531f78889a094fe0aeaa139995
+Subject: privcmd: Take write lock on mm semaphore when calling
+*remap_pfn_range(), as these function mess with fields in the vma
+structure.
+Signed-off-by: Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
+Signed-off-by: Hollis Blanchard <hollisb@us.ibm.com>
+
+Acked-by: jbeulich@novell.com
+
+---
+ drivers/xen/privcmd/privcmd.c | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+--- a/drivers/xen/privcmd/privcmd.c 2007-08-27 14:01:25.000000000 -0400
++++ b/drivers/xen/privcmd/privcmd.c 2007-08-27 14:02:03.000000000 -0400
+@@ -111,7 +111,7 @@ static int privcmd_ioctl(struct inode *i
+ if (copy_from_user(&msg, p, sizeof(msg)))
+ return -EFAULT;
+
+- down_read(&mm->mmap_sem);
++ down_write(&mm->mmap_sem);
+
+ vma = find_vma(mm, msg.va);
+ rc = -EINVAL;
+@@ -153,7 +153,7 @@ static int privcmd_ioctl(struct inode *i
+ rc = 0;
+
+ mmap_out:
+- up_read(&mm->mmap_sem);
++ up_write(&mm->mmap_sem);
+ ret = rc;
+ }
+ break;
+@@ -176,14 +176,14 @@ static int privcmd_ioctl(struct inode *i
+ if ((m.num <= 0) || (nr_pages > (LONG_MAX >> PAGE_SHIFT)))
+ return -EINVAL;
+
+- down_read(&mm->mmap_sem);
++ down_write(&mm->mmap_sem);
+
+ vma = find_vma(mm, m.addr);
+ if (!vma ||
+ (m.addr != vma->vm_start) ||
+ ((m.addr + (nr_pages << PAGE_SHIFT)) != vma->vm_end) ||
+ !privcmd_enforce_singleshot_mapping(vma)) {
+- up_read(&mm->mmap_sem);
++ up_write(&mm->mmap_sem);
+ return -EINVAL;
+ }
+
+@@ -191,7 +191,7 @@ static int privcmd_ioctl(struct inode *i
+ addr = m.addr;
+ for (i = 0; i < nr_pages; i++, addr += PAGE_SIZE, p++) {
+ if (get_user(mfn, p)) {
+- up_read(&mm->mmap_sem);
++ up_write(&mm->mmap_sem);
+ return -EFAULT;
+ }
+
+@@ -202,7 +202,7 @@ static int privcmd_ioctl(struct inode *i
+ put_user(0xF0000000 | mfn, p);
+ }
+
+- up_read(&mm->mmap_sem);
++ up_write(&mm->mmap_sem);
+ ret = 0;
+ }
+ break;
diff --git a/trunk/2.6.22/20002_fix-pae-vmalloc-sync.patch b/trunk/2.6.22/20030_136-pae-vmalloc-sync-all.patch1
index 913b79c..b75af4b 100644
--- a/trunk/2.6.22/20002_fix-pae-vmalloc-sync.patch
+++ b/trunk/2.6.22/20030_136-pae-vmalloc-sync-all.patch1
@@ -1,12 +1,23 @@
-diff --exclude=debian --exclude='.git*' -Naur ubuntu-xen/arch/i386/mm/fault-xen.c ubuntu-xen-tip-3.1/arch/i386/mm/fault-xen.c
---- ubuntu-xen/arch/i386/mm/fault-xen.c 2007-09-24 13:00:02.000000000 -0400
-+++ ubuntu-xen-tip-3.1/arch/i386/mm/fault-xen.c 2007-09-24 13:16:48.000000000 -0400
-@@ -708,8 +708,11 @@
+# HG changeset 136 patch
+# User Keir Fraser <keir@xensource.com>
+# Date 1184403059 -3600
+# Node ID 34ebf92ad28d53f70ca02966c9f926f7d83bafbb
+# Parent 9debaf36090515b4ce54712c4641781bc263b1a6
+Subject: xen/i386: Fix vmalloc_sync_all() for PAE.
+Signed-off-by: Keir Fraser <keir@xensource.com>
+
+Acked-by: jbeulich@novell.com
+
+Index: head-2007-08-07/arch/i386/mm/fault-xen.c
+===================================================================
+--- head-2007-08-07.orig/arch/i386/mm/fault-xen.c 2007-08-07 09:47:09.000000000 +0200
++++ head-2007-08-07/arch/i386/mm/fault-xen.c 2007-08-07 09:57:59.000000000 +0200
+@@ -739,18 +739,31 @@ void vmalloc_sync_all(void)
* problematic: insync can only get set bits added, and updates to
* start are only improving performance (without affecting correctness
* if undone).
+ * XEN: To work on PAE, we need to iterate over PMDs rather than PGDs.
-+ * This change works just fine with 2-level paging too.
++ * This change works just fine with 2-level paging too.
*/
- static DECLARE_BITMAP(insync, PTRS_PER_PGD);
+#define sync_index(a) ((a) >> PMD_SHIFT)
@@ -14,24 +25,21 @@ diff --exclude=debian --exclude='.git*' -Naur ubuntu-xen/arch/i386/mm/fault-xen.
static unsigned long start = TASK_SIZE;
unsigned long address;
-@@ -717,12 +720,22 @@
- return;
-
BUILD_BUG_ON(TASK_SIZE & ~PGDIR_MASK);
- for (address = start; address >= TASK_SIZE; address += PGDIR_SIZE) {
- if (!test_bit(pgd_index(address), insync)) {
+ for (address = start;
-+ address >= TASK_SIZE && address < hypervisor_virt_start;
-+ address += 1UL << PMD_SHIFT) {
-+ if (!test_bit(sync_index(address), insync)) {
++ address >= TASK_SIZE && address < hypervisor_virt_start;
++ address += 1UL << PMD_SHIFT) {
++ if (!test_bit(sync_index(address), insync)) {
unsigned long flags;
struct page *page;
spin_lock_irqsave(&pgd_lock, flags);
+ /*
-+ * XEN: vmalloc_sync_one() failure path logic assumes
-+ * pgd_list is not empty.
-+ */
++ * XEN: vmalloc_sync_one() failure path logic assumes
++ * pgd_list is non-empty.
++ */
+ if (unlikely(!pgd_list)) {
+ spin_unlock_irqrestore(&pgd_lock, flags);
+ return;
@@ -39,7 +47,7 @@ diff --exclude=debian --exclude='.git*' -Naur ubuntu-xen/arch/i386/mm/fault-xen.
for (page = pgd_list; page; page =
(struct page *)page->index)
if (!vmalloc_sync_one(page_address(page),
-@@ -732,9 +745,9 @@
+@@ -760,10 +773,10 @@ void vmalloc_sync_all(void)
}
spin_unlock_irqrestore(&pgd_lock, flags);
if (!page)
@@ -49,6 +57,7 @@ diff --exclude=debian --exclude='.git*' -Naur ubuntu-xen/arch/i386/mm/fault-xen.
- if (address == start && test_bit(pgd_index(address), insync))
- start = address + PGDIR_SIZE;
+ if (address == start && test_bit(sync_index(address), insync))
-+ start = address + (1UL << PGDIR_SIZE);
++ start = address + (1UL << PMD_SHIFT);
}
}
+ #endif
diff --git a/trunk/2.6.22/20031_137-netfront-copy-release.patch1 b/trunk/2.6.22/20031_137-netfront-copy-release.patch1
new file mode 100644
index 0000000..1e58158
--- /dev/null
+++ b/trunk/2.6.22/20031_137-netfront-copy-release.patch1
@@ -0,0 +1,128 @@
+# HG changeset 137 patch
+# User Ian Campbell <ian.campbell@xensource.com>
+# Date 1184590655 -3600
+# Node ID 41918416db51d1eeaba7c71259e1c0f0ea3426f6
+# Parent 34ebf92ad28d53f70ca02966c9f926f7d83bafbb
+Subject: [NETFRONT] Implement netif_release_rx_bufs for the copying case.
+
+Also reduce the log level when we are unable end foreign access to a
+grant.
+
+Signed-off-by: Ian Campbell <ian.campbell@xensource.com>
+
+Acked-by: jbeulich@novell.com
+
+Index: head-2007-08-07/drivers/xen/core/gnttab.c
+===================================================================
+--- head-2007-08-07.orig/drivers/xen/core/gnttab.c 2007-08-07 09:47:30.000000000 +0200
++++ head-2007-08-07/drivers/xen/core/gnttab.c 2007-08-07 09:58:05.000000000 +0200
+@@ -186,7 +186,7 @@ int gnttab_end_foreign_access_ref(grant_
+ nflags = shared[ref].flags;
+ do {
+ if ((flags = nflags) & (GTF_reading|GTF_writing)) {
+- printk(KERN_ALERT "WARNING: g.e. still in use!\n");
++ printk(KERN_DEBUG "WARNING: g.e. still in use!\n");
+ return 0;
+ }
+ } while ((nflags = synch_cmpxchg_subword(&shared[ref].flags, flags, 0)) !=
+@@ -206,7 +206,7 @@ void gnttab_end_foreign_access(grant_ref
+ } else {
+ /* XXX This needs to be fixed so that the ref and page are
+ placed on a list to be freed up later. */
+- printk(KERN_WARNING
++ printk(KERN_DEBUG
+ "WARNING: leaking g.e. and page still in use!\n");
+ }
+ }
+Index: head-2007-08-07/drivers/xen/netfront/netfront.c
+===================================================================
+--- head-2007-08-07.orig/drivers/xen/netfront/netfront.c 2007-08-07 09:47:09.000000000 +0200
++++ head-2007-08-07/drivers/xen/netfront/netfront.c 2007-08-07 09:58:05.000000000 +0200
+@@ -1510,7 +1510,7 @@ static void netif_release_tx_bufs(struct
+ }
+ }
+
+-static void netif_release_rx_bufs(struct netfront_info *np)
++static void netif_release_rx_bufs_flip(struct netfront_info *np)
+ {
+ struct mmu_update *mmu = np->rx_mmu;
+ struct multicall_entry *mcl = np->rx_mcl;
+@@ -1520,11 +1520,6 @@ static void netif_release_rx_bufs(struct
+ int xfer = 0, noxfer = 0, unused = 0;
+ int id, ref, rc;
+
+- if (np->copying_receiver) {
+- WPRINTK("%s: fix me for copying receiver.\n", __FUNCTION__);
+- return;
+- }
+-
+ skb_queue_head_init(&free_list);
+
+ spin_lock_bh(&np->rx_lock);
+@@ -1571,7 +1566,7 @@ static void netif_release_rx_bufs(struct
+ xfer++;
+ }
+
+- IPRINTK("%s: %d xfer, %d noxfer, %d unused\n",
++ DPRINTK("%s: %d xfer, %d noxfer, %d unused\n",
+ __FUNCTION__, xfer, noxfer, unused);
+
+ if (xfer) {
+@@ -1598,6 +1593,45 @@ static void netif_release_rx_bufs(struct
+ spin_unlock_bh(&np->rx_lock);
+ }
+
++static void netif_release_rx_bufs_copy(struct netfront_info *np)
++{
++ struct sk_buff *skb;
++ int i, ref;
++ int busy = 0, inuse = 0;
++
++ spin_lock_bh(&np->rx_lock);
++
++ for (i = 0; i < NET_RX_RING_SIZE; i++) {
++ ref = np->grant_rx_ref[i];
++
++ if (ref == GRANT_INVALID_REF)
++ continue;
++
++ inuse++;
++
++ skb = np->rx_skbs[i];
++
++ if (!gnttab_end_foreign_access_ref(ref, 0))
++ {
++ busy++;
++ continue;
++ }
++
++ gnttab_release_grant_reference(&np->gref_rx_head, ref);
++ np->grant_rx_ref[i] = GRANT_INVALID_REF;
++ add_id_to_freelist(np->rx_skbs, i);
++
++ skb_shinfo(skb)->nr_frags = 0;
++ dev_kfree_skb(skb);
++ }
++
++ if (busy)
++ DPRINTK("%s: Unable to release %d of %d inuse grant references out of %ld total.\n",
++ __FUNCTION__, busy, inuse, NET_RX_RING_SIZE);
++
++ spin_unlock_bh(&np->rx_lock);
++}
++
+ static int network_close(struct net_device *dev)
+ {
+ struct netfront_info *np = netdev_priv(dev);
+@@ -1778,7 +1812,10 @@ static void netif_uninit(struct net_devi
+ {
+ struct netfront_info *np = netdev_priv(dev);
+ netif_release_tx_bufs(np);
+- netif_release_rx_bufs(np);
++ if (np->copying_receiver)
++ netif_release_rx_bufs_copy(np);
++ else
++ netif_release_rx_bufs_flip(np);
+ gnttab_free_grant_references(np->gref_tx_head);
+ gnttab_free_grant_references(np->gref_rx_head);
+ }
diff --git a/trunk/2.6.22/20032_141-driver-autoload.patch1 b/trunk/2.6.22/20032_141-driver-autoload.patch1
new file mode 100644
index 0000000..740704a
--- /dev/null
+++ b/trunk/2.6.22/20032_141-driver-autoload.patch1
@@ -0,0 +1,120 @@
+# HG changeset 141 patch
+# User kfraser@localhost.localdomain
+# Date 1184678500 -3600
+# Node ID 5e294e29a43ee8a608d454353ee19b83f72d4757
+# Parent 3ce2b9fc4900103af7b4f180ff6622b480d55c84
+Subject: Xen frontend driver module autoloading.
+
+Implements module autoloading for the xen frontend drivers by adding a
+uevent function for the frontend xenbus and some module aliases to the
+individual drivers.
+
+From: Gerd Hoffmann <kraxel@redhat.com>
+Signed-off-by: Keir Fraser <keir@xensource.com>
+
+Acked-by: jbeulich@novell.com
+
+Index: head-2007-08-07/drivers/xen/blkfront/blkfront.c
+===================================================================
+--- head-2007-08-07.orig/drivers/xen/blkfront/blkfront.c 2007-08-07 09:48:09.000000000 +0200
++++ head-2007-08-07/drivers/xen/blkfront/blkfront.c 2007-08-07 09:58:14.000000000 +0200
+@@ -879,7 +879,7 @@ static struct xenbus_device_id blkfront_
+ { "vbd" },
+ { "" }
+ };
+-
++MODULE_ALIAS("xen:vbd");
+
+ static struct xenbus_driver blkfront = {
+ .name = "vbd",
+Index: head-2007-08-07/drivers/xen/fbfront/xenfb.c
+===================================================================
+--- head-2007-08-07.orig/drivers/xen/fbfront/xenfb.c 2007-08-07 09:47:09.000000000 +0200
++++ head-2007-08-07/drivers/xen/fbfront/xenfb.c 2007-08-07 09:58:14.000000000 +0200
+@@ -718,6 +718,7 @@ static struct xenbus_device_id xenfb_ids
+ { "vfb" },
+ { "" }
+ };
++MODULE_ALIAS("xen:vfb");
+
+ static struct xenbus_driver xenfb = {
+ .name = "vfb",
+Index: head-2007-08-07/drivers/xen/fbfront/xenkbd.c
+===================================================================
+--- head-2007-08-07.orig/drivers/xen/fbfront/xenkbd.c 2007-08-07 09:47:09.000000000 +0200
++++ head-2007-08-07/drivers/xen/fbfront/xenkbd.c 2007-08-07 09:58:14.000000000 +0200
+@@ -299,6 +299,7 @@ static struct xenbus_device_id xenkbd_id
+ { "vkbd" },
+ { "" }
+ };
++MODULE_ALIAS("xen:vkbd");
+
+ static struct xenbus_driver xenkbd = {
+ .name = "vkbd",
+Index: head-2007-08-07/drivers/xen/netfront/netfront.c
+===================================================================
+--- head-2007-08-07.orig/drivers/xen/netfront/netfront.c 2007-08-07 09:58:05.000000000 +0200
++++ head-2007-08-07/drivers/xen/netfront/netfront.c 2007-08-07 09:58:14.000000000 +0200
+@@ -2110,6 +2110,7 @@ static struct xenbus_device_id netfront_
+ { "vif" },
+ { "" }
+ };
++MODULE_ALIAS("xen:vif");
+
+
+ static struct xenbus_driver netfront = {
+Index: head-2007-08-07/drivers/xen/pcifront/xenbus.c
+===================================================================
+--- head-2007-08-07.orig/drivers/xen/pcifront/xenbus.c 2007-08-07 09:34:51.000000000 +0200
++++ head-2007-08-07/drivers/xen/pcifront/xenbus.c 2007-08-07 09:58:14.000000000 +0200
+@@ -273,6 +273,7 @@ static struct xenbus_device_id xenpci_id
+ {"pci"},
+ {{0}},
+ };
++MODULE_ALIAS("xen:pci");
+
+ static struct xenbus_driver xenbus_pcifront_driver = {
+ .name = "pcifront",
+Index: head-2007-08-07/drivers/xen/xenbus/xenbus_probe.c
+===================================================================
+--- head-2007-08-07.orig/drivers/xen/xenbus/xenbus_probe.c 2007-08-07 09:48:09.000000000 +0200
++++ head-2007-08-07/drivers/xen/xenbus/xenbus_probe.c 2007-08-07 09:58:14.000000000 +0200
+@@ -165,6 +165,30 @@ static int read_backend_details(struct x
+ return read_otherend_details(xendev, "backend-id", "backend");
+ }
+
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
++static int xenbus_uevent_frontend(struct device *dev, char **envp,
++ int num_envp, char *buffer, int buffer_size)
++{
++ struct xenbus_device *xdev;
++ int length = 0, i = 0;
++
++ if (dev == NULL)
++ return -ENODEV;
++ xdev = to_xenbus_device(dev);
++ if (xdev == NULL)
++ return -ENODEV;
++
++ /* stuff we want to pass to /sbin/hotplug */
++ add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length,
++ "XENBUS_TYPE=%s", xdev->devicetype);
++ add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length,
++ "XENBUS_PATH=%s", xdev->nodename);
++ add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length,
++ "MODALIAS=xen:%s", xdev->devicetype);
++
++ return 0;
++}
++#endif
+
+ /* Bus type for frontend drivers. */
+ static struct xen_bus_type xenbus_frontend = {
+@@ -179,6 +203,7 @@ static struct xen_bus_type xenbus_fronte
+ .probe = xenbus_dev_probe,
+ .remove = xenbus_dev_remove,
+ .shutdown = xenbus_dev_shutdown,
++ .uevent = xenbus_uevent_frontend,
+ #endif
+ },
+ .dev = {
diff --git a/trunk/2.6.22/20033_144-xenbus-dev-wait.patch1 b/trunk/2.6.22/20033_144-xenbus-dev-wait.patch1
new file mode 100644
index 0000000..0187b6a
--- /dev/null
+++ b/trunk/2.6.22/20033_144-xenbus-dev-wait.patch1
@@ -0,0 +1,104 @@
+# HG changeset 144+146+150 patch
+# User kfraser@localhost.localdomain
+# Date 1185266340 -3600
+# Node ID d88e59a7334ae584900a9f7221d494bcd9ef2a63
+# Parent c68699484a654681a2912e70411286f13119c01f
+Subject: xenbus: Wait for 30s for devices to connect (previously 10s).
+Give a visual update to the user on the console every 5s during this
+period.
+Signed-off-by: Keir Fraser <keir@xensource.com>
+
+Subject: xenbus: Improvements to wait_for_devices().
+ 1. When printing a warning about a timed-out device, print the
+ current state of both ends of the device connection (i.e., backend as
+ well as frontend).
+ 2. A device is 'not yet connected' only when the local state is *less
+ than* XenbusStateConnected. If the state is Closing or Closed
+ (usually because of an explicit failure when trying to make the
+ connection) then we should not wait for the connection to occur -- it
+ will never happen!
+
+Signed-off-by: Keir Fraser <keir@xensource.com>
+
+Subject: Wait for up to 5 minutes for devices to connect.
+
+Heavy load in domain 0 can cause very long delays setting up the
+backend.
+
+Signed-off-by: Ian Campbell <ian.campbell@xensource.com>
+
+Acked-by: jbeulich@novell.com
+
+Index: head-2007-08-07/drivers/xen/xenbus/xenbus_probe.c
+===================================================================
+--- head-2007-08-07.orig/drivers/xen/xenbus/xenbus_probe.c 2007-08-07 09:59:32.000000000 +0200
++++ head-2007-08-07/drivers/xen/xenbus/xenbus_probe.c 2007-08-07 10:00:01.000000000 +0200
+@@ -1031,7 +1031,7 @@ static int is_disconnected_device(struct
+ return 0;
+
+ xendrv = to_xenbus_driver(dev->driver);
+- return (xendev->state != XenbusStateConnected ||
++ return (xendev->state < XenbusStateConnected ||
+ (xendrv->is_ready && !xendrv->is_ready(xendev)));
+ }
+
+@@ -1056,10 +1056,13 @@ static int print_device_status(struct de
+ /* Information only: is this too noisy? */
+ printk(KERN_INFO "XENBUS: Device with no driver: %s\n",
+ xendev->nodename);
+- } else if (xendev->state != XenbusStateConnected) {
++ } else if (xendev->state < XenbusStateConnected) {
++ enum xenbus_state rstate = XenbusStateUnknown;
++ if (xendev->otherend)
++ rstate = xenbus_read_driver_state(xendev->otherend);
+ printk(KERN_WARNING "XENBUS: Timeout connecting "
+- "to device: %s (state %d)\n",
+- xendev->nodename, xendev->state);
++ "to device: %s (local state %d, remote state %d)\n",
++ xendev->nodename, xendev->state, rstate);
+ }
+
+ return 0;
+@@ -1069,7 +1072,7 @@ static int print_device_status(struct de
+ static int ready_to_wait_for_devices;
+
+ /*
+- * On a 10 second timeout, wait for all devices currently configured. We need
++ * On a 5-minute timeout, wait for all devices currently configured. We need
+ * to do this to guarantee that the filesystems and / or network devices
+ * needed for boot are available, before we can allow the boot to proceed.
+ *
+@@ -1084,18 +1087,30 @@ static int ready_to_wait_for_devices;
+ */
+ static void wait_for_devices(struct xenbus_driver *xendrv)
+ {
+- unsigned long timeout = jiffies + 10*HZ;
++ unsigned long start = jiffies;
+ struct device_driver *drv = xendrv ? &xendrv->driver : NULL;
++ unsigned int seconds_waited = 0;
+
+ if (!ready_to_wait_for_devices || !is_running_on_xen())
+ return;
+
+ while (exists_disconnected_device(drv)) {
+- if (time_after(jiffies, timeout))
+- break;
++ if (time_after(jiffies, start + (seconds_waited+5)*HZ)) {
++ if (!seconds_waited)
++ printk(KERN_WARNING "XENBUS: Waiting for "
++ "devices to initialise: ");
++ seconds_waited += 5;
++ printk("%us...", 300 - seconds_waited);
++ if (seconds_waited == 300)
++ break;
++ }
++
+ schedule_timeout_interruptible(HZ/10);
+ }
+
++ if (seconds_waited)
++ printk("\n");
++
+ bus_for_each_dev(&xenbus_frontend.bus, NULL, drv,
+ print_device_status);
+ }
diff --git a/trunk/2.6.22/20034_145-xenbus-error-path.patch1 b/trunk/2.6.22/20034_145-xenbus-error-path.patch1
new file mode 100644
index 0000000..ffcf2e9
--- /dev/null
+++ b/trunk/2.6.22/20034_145-xenbus-error-path.patch1
@@ -0,0 +1,24 @@
+# HG changeset 145 patch
+# User kfraser@localhost.localdomain
+# Date 1185293349 -3600
+# Node ID 3b0bce92b2f254242c785d2662776e04a0817301
+# Parent d88e59a7334ae584900a9f7221d494bcd9ef2a63
+Subject: xenbus: Fix obvious use-after-free spotted by Coverity checker.
+Signed-off-by: Adrian Bunk <bunk@stusta.de>
+
+Acked-by: jbeulich@novell.com
+
+Index: head-2007-08-07/drivers/xen/xenbus/xenbus_xs.c
+===================================================================
+--- head-2007-08-07.orig/drivers/xen/xenbus/xenbus_xs.c 2007-08-07 09:59:32.000000000 +0200
++++ head-2007-08-07/drivers/xen/xenbus/xenbus_xs.c 2007-08-07 10:00:04.000000000 +0200
+@@ -802,8 +802,8 @@ static int process_msg(void)
+ msg->u.watch.vec = split(body, msg->hdr.len,
+ &msg->u.watch.vec_size);
+ if (IS_ERR(msg->u.watch.vec)) {
+- kfree(msg);
+ err = PTR_ERR(msg->u.watch.vec);
++ kfree(msg);
+ goto out;
+ }
+
diff --git a/trunk/2.6.22/20035_148-blkfront-no-bounce-bufs.patch1 b/trunk/2.6.22/20035_148-blkfront-no-bounce-bufs.patch1
new file mode 100644
index 0000000..98ebe0f
--- /dev/null
+++ b/trunk/2.6.22/20035_148-blkfront-no-bounce-bufs.patch1
@@ -0,0 +1,25 @@
+# HG changeset 148 patch
+# User Ian Campbell <ian.campbell@xensource.com>
+# Date 1185543936 -3600
+# Node ID 667228bf8fc5f1a21719e11c7eb269d0188a2d60
+# Parent 88a17da7f3362126182423100a9d7d4c0d854139
+Subject: BLKFRONT: Make sure we don't use bounce buffers, we don't need them.
+
+Signed-off-by: Ian Campbell <ian.campbell@xensource.com>
+
+Acked-by: jbeulich@novell.com
+
+Index: head-2007-08-07/drivers/xen/blkfront/vbd.c
+===================================================================
+--- head-2007-08-07.orig/drivers/xen/blkfront/vbd.c 2007-08-07 09:34:50.000000000 +0200
++++ head-2007-08-07/drivers/xen/blkfront/vbd.c 2007-08-07 10:00:07.000000000 +0200
+@@ -213,6 +213,9 @@ xlvbd_init_blk_queue(struct gendisk *gd,
+ /* Make sure buffer addresses are sector-aligned. */
+ blk_queue_dma_alignment(rq, 511);
+
++ /* Make sure we don't use bounce buffers. */
++ blk_queue_bounce_limit(rq, BLK_BOUNCE_ANY);
++
+ gd->queue = rq;
+
+ return 0;
diff --git a/trunk/2.6.22/20036_152-netloop-check-cloned-skb.patch1 b/trunk/2.6.22/20036_152-netloop-check-cloned-skb.patch1
new file mode 100644
index 0000000..6036525
--- /dev/null
+++ b/trunk/2.6.22/20036_152-netloop-check-cloned-skb.patch1
@@ -0,0 +1,35 @@
+# HG changeset 152 patch
+# User kfraser@localhost.localdomain
+# Date 1185977806 -3600
+# Node ID 8d5ae51a09a66ff450b46ebb09ff99475604ed91
+# Parent 1372bc676080a527c98cd4de82637edd319794e7
+Subject: [NET] netloop: Do not clobber cloned skb page frags
+
+The netloop driver tries to localise foreign mappings by
+copying them. Unfortunately, it does so by directly modifying
+skb page frags without checking whether the skb is cloned or
+not. In fact, the packet is going to be cloned more often
+than not.
+
+This may result in either data corruption on DMA or a
+page fault in dom0 which kills the whole machine.
+
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+
+Acked-by: jbeulich@novell.com
+
+Index: head-2007-08-07/drivers/xen/netback/loopback.c
+===================================================================
+--- head-2007-08-07.orig/drivers/xen/netback/loopback.c 2007-08-07 09:47:09.000000000 +0200
++++ head-2007-08-07/drivers/xen/netback/loopback.c 2007-08-07 10:00:10.000000000 +0200
+@@ -99,6 +99,10 @@ static int skb_remove_foreign_references
+
+ BUG_ON(skb_shinfo(skb)->frag_list);
+
++ if (skb_cloned(skb) &&
++ unlikely(pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
++ return 0;
++
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ pfn = page_to_pfn(skb_shinfo(skb)->frags[i].page);
+ if (!is_foreign(pfn))
diff --git a/trunk/2.6.22/20037_157-netfront-skb-deref.patch1 b/trunk/2.6.22/20037_157-netfront-skb-deref.patch1
new file mode 100644
index 0000000..5f6b822
--- /dev/null
+++ b/trunk/2.6.22/20037_157-netfront-skb-deref.patch1
@@ -0,0 +1,35 @@
+# HG changeset 157 patch
+# User kfraser@localhost.localdomain
+# Date 1187004079 -3600
+# Node ID 877c2e42a701d6a32ca30f35da34ade0b935f820
+# Parent d2f9b7e3623114e6a45c916f21b348fda122fa8e
+Subject: net front: Avoid deref'ing skb after it is potentially freed.
+Signed-off-by: Keir Fraser <keir@xensource.com>
+
+Acked-by: jbeulich@novell.com
+
+Index: head-2007-08-22/drivers/xen/netfront/netfront.c
+===================================================================
+--- head-2007-08-22.orig/drivers/xen/netfront/netfront.c 2007-08-22 09:52:40.000000000 +0200
++++ head-2007-08-22/drivers/xen/netfront/netfront.c 2007-08-22 09:52:45.000000000 +0200
+@@ -1015,6 +1015,10 @@ static int network_start_xmit(struct sk_
+ if (notify)
+ notify_remote_via_irq(np->irq);
+
++ np->stats.tx_bytes += skb->len;
++ np->stats.tx_packets++;
++
++ /* Note: It is not safe to access skb after network_tx_buf_gc()! */
+ network_tx_buf_gc(dev);
+
+ if (!netfront_tx_slot_available(np))
+@@ -1022,9 +1026,6 @@ static int network_start_xmit(struct sk_
+
+ spin_unlock_irq(&np->tx_lock);
+
+- np->stats.tx_bytes += skb->len;
+- np->stats.tx_packets++;
+-
+ return 0;
+
+ drop:
diff --git a/trunk/2.6.22/40001_i386-fix-xen_l1_entry_update-for-highptes.patch b/trunk/2.6.22/20038_252-l1-entry-update-highpte.patch1
index d4c059c..dcffacf 100644
--- a/trunk/2.6.22/40001_i386-fix-xen_l1_entry_update-for-highptes.patch
+++ b/trunk/2.6.22/20038_252-l1-entry-update-highpte.patch1
@@ -1,15 +1,18 @@
-# HG changeset patch
+# HG changeset 252 patch
# User Keir Fraser <keir@xensource.com>
# Date 1192114936 -3600
# Node ID e797297402885cc19e0799c7bcaf3e1acb427523
# Parent 48a6d8bc31b8717c4218fc5e3c5bc9d848703db4
-i386: Fix xen_l1_entry_update() for highptes.
+Subject: i386: Fix xen_l1_entry_update() for highptes.
Signed-off-by: Keir Fraser <keir@xensource.com>
-diff -r 48a6d8bc31b8 -r e79729740288 arch/i386/mm/hypervisor.c
---- a/arch/i386/mm/hypervisor.c Wed Oct 10 11:32:15 2007 +0100
-+++ b/arch/i386/mm/hypervisor.c Thu Oct 11 16:02:16 2007 +0100
-@@ -47,7 +47,12 @@ void xen_l1_entry_update(pte_t *ptr, pte
+Acked-by: jbeulich@novell.com
+
+Index: 10.3-2007-10-22/arch/i386/mm/hypervisor.c
+===================================================================
+--- 10.3-2007-10-22.orig/arch/i386/mm/hypervisor.c 2007-10-22 13:48:11.000000000 +0200
++++ 10.3-2007-10-22/arch/i386/mm/hypervisor.c 2007-10-22 13:49:28.000000000 +0200
+@@ -57,7 +57,12 @@
void xen_l1_entry_update(pte_t *ptr, pte_t val)
{
mmu_update_t u;
@@ -19,6 +22,6 @@ diff -r 48a6d8bc31b8 -r e79729740288 arch/i386/mm/hypervisor.c
+#else
u.ptr = virt_to_machine(ptr);
+#endif
- u.val = __pte_val(val);
+ u.val = pte_val_ma(val);
BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
}
diff --git a/trunk/2.6.22/20039_265-ptep_get_and_clear.patch1 b/trunk/2.6.22/20039_265-ptep_get_and_clear.patch1
new file mode 100644
index 0000000..b7cf4fa
--- /dev/null
+++ b/trunk/2.6.22/20039_265-ptep_get_and_clear.patch1
@@ -0,0 +1,74 @@
+# HG changeset 265+266 patch
+# User Keir Fraser <keir@xensource.com>
+# Date 1192720728 -3600
+# Node ID 7837d0ec57bce4578c17018ca1ef1ded64fe1dd3
+# Parent 3116d92146522ae5989b40e1915d0baf6daa1008
+Subject: Fix ptep_get_and_clear() on init_mm pte that is not mapped into
+current address space.
+Signed-off-by: Keir Fraser <keir@xensource.com>
+
+Fix ptep_get_and_clear(): atomic operation required on user ptes to
+get correct snapshot of A/D bits.
+
+Signed-off-by: Keir Fraser <keir@xensource.com>
+
+Acked-by: jbeulich@novell.com
+
+Index: 10.3-2007-10-22/include/asm-i386/mach-xen/asm/pgtable-2level.h
+===================================================================
+--- 10.3-2007-10-22.orig/include/asm-i386/mach-xen/asm/pgtable-2level.h 2007-10-22 13:48:12.000000000 +0200
++++ 10.3-2007-10-22/include/asm-i386/mach-xen/asm/pgtable-2level.h 2007-10-22 13:52:04.000000000 +0200
+@@ -42,10 +42,9 @@ static inline pte_t ptep_get_and_clear(s
+ {
+ pte_t pte = *ptep;
+ if (!pte_none(pte)) {
+- if (mm != &init_mm)
++ if ((mm != &init_mm) ||
++ HYPERVISOR_update_va_mapping(addr, __pte(0), 0))
+ pte = __pte_ma(xchg(&ptep->pte_low, 0));
+- else
+- HYPERVISOR_update_va_mapping(addr, __pte(0), 0);
+ }
+ return pte;
+ }
+Index: 10.3-2007-10-22/include/asm-i386/mach-xen/asm/pgtable-3level.h
+===================================================================
+--- 10.3-2007-10-22.orig/include/asm-i386/mach-xen/asm/pgtable-3level.h 2007-10-22 13:48:12.000000000 +0200
++++ 10.3-2007-10-22/include/asm-i386/mach-xen/asm/pgtable-3level.h 2007-10-22 13:52:04.000000000 +0200
+@@ -125,7 +125,8 @@ static inline pte_t ptep_get_and_clear(s
+ {
+ pte_t pte = *ptep;
+ if (!pte_none(pte)) {
+- if (mm != &init_mm) {
++ if ((mm != &init_mm) ||
++ HYPERVISOR_update_va_mapping(addr, __pte(0), 0)) {
+ uint64_t val = pte_val_ma(pte);
+ if (__cmpxchg64(ptep, val, 0) != val) {
+ /* xchg acts as a barrier before the setting of the high bits */
+@@ -133,8 +134,7 @@ static inline pte_t ptep_get_and_clear(s
+ pte.pte_high = ptep->pte_high;
+ ptep->pte_high = 0;
+ }
+- } else
+- HYPERVISOR_update_va_mapping(addr, __pte(0), 0);
++ }
+ }
+ return pte;
+ }
+Index: 10.3-2007-10-22/include/asm-x86_64/mach-xen/asm/pgtable.h
+===================================================================
+--- 10.3-2007-10-22.orig/include/asm-x86_64/mach-xen/asm/pgtable.h 2007-10-22 13:48:12.000000000 +0200
++++ 10.3-2007-10-22/include/asm-x86_64/mach-xen/asm/pgtable.h 2007-10-22 13:52:04.000000000 +0200
+@@ -282,10 +282,9 @@ static inline pte_t ptep_get_and_clear(s
+ {
+ pte_t pte = *ptep;
+ if (!pte_none(pte)) {
+- if (mm != &init_mm)
++ if ((mm != &init_mm) ||
++ HYPERVISOR_update_va_mapping(addr, __pte(0), 0))
+ pte = __pte_ma(xchg(&ptep->pte, 0));
+- else
+- HYPERVISOR_update_va_mapping(addr, __pte(0), 0);
+ }
+ return pte;
+ }
diff --git a/trunk/2.6.22/20040_xen3-fixup-common.patch1 b/trunk/2.6.22/20040_xen3-fixup-common.patch1
new file mode 100644
index 0000000..fdcca5b
--- /dev/null
+++ b/trunk/2.6.22/20040_xen3-fixup-common.patch1
@@ -0,0 +1,365 @@
+Subject: Fix xen build.
+From: jbeulich@novell.com
+
+$subject says all.
+
+---
+ drivers/ide/ide-lib.c | 9 +++++++++
+ drivers/oprofile/buffer_sync.c | 35 ++++++++++++++++++++++++-----------
+ drivers/oprofile/cpu_buffer.c | 6 ++++++
+ drivers/oprofile/oprof.c | 2 ++
+ drivers/oprofile/oprofile_files.c | 6 ++++++
+ include/linux/gfp.h | 6 +-----
+ include/linux/mm.h | 2 ++
+ include/linux/oprofile.h | 6 ++++--
+ include/linux/page-flags.h | 3 +--
+ kernel/timer.c | 14 ++++++++++----
+ mm/memory.c | 4 ++++
+ 11 files changed, 69 insertions(+), 24 deletions(-)
+
+--- a/drivers/ide/ide-lib.c 2007-08-27 14:01:24.000000000 -0400
++++ b/drivers/ide/ide-lib.c 2007-08-27 14:01:25.000000000 -0400
+@@ -341,12 +341,21 @@ void ide_toggle_bounce(ide_drive_t *driv
+ {
+ u64 addr = BLK_BOUNCE_HIGH; /* dma64_addr_t */
+
++#ifndef CONFIG_XEN
++ if (!PCI_DMA_BUS_IS_PHYS) {
++ addr = BLK_BOUNCE_ANY;
++ } else if (on && drive->media == ide_disk) {
++ if (HWIF(drive)->pci_dev)
++ addr = HWIF(drive)->pci_dev->dma_mask;
++ }
++#else
+ if (on && drive->media == ide_disk) {
+ if (!PCI_DMA_BUS_IS_PHYS)
+ addr = BLK_BOUNCE_ANY;
+ else if (HWIF(drive)->pci_dev)
+ addr = HWIF(drive)->pci_dev->dma_mask;
+ }
++#endif
+
+ if (drive->queue)
+ blk_queue_bounce_limit(drive->queue, addr);
+--- a/drivers/oprofile/buffer_sync.c 2007-08-27 14:01:24.000000000 -0400
++++ b/drivers/oprofile/buffer_sync.c 2007-08-27 14:01:25.000000000 -0400
+@@ -43,7 +43,9 @@ static cpumask_t marked_cpus = CPU_MASK_
+ static DEFINE_SPINLOCK(task_mortuary);
+ static void process_task_mortuary(void);
+
++#ifdef CONFIG_XEN
+ static int cpu_current_domain[NR_CPUS];
++#endif
+
+ /* Take ownership of the task struct and place it on the
+ * list for processing. Only after two full buffer syncs
+@@ -152,11 +154,13 @@ static void end_sync(void)
+ int sync_start(void)
+ {
+ int err;
++#ifdef CONFIG_XEN
+ int i;
+
+ for (i = 0; i < NR_CPUS; i++) {
+ cpu_current_domain[i] = COORDINATOR_DOMAIN;
+ }
++#endif
+
+ start_cpu_work();
+
+@@ -304,12 +308,14 @@ static void add_cpu_mode_switch(unsigned
+ }
+ }
+
++#ifdef CONFIG_XEN
+ static void add_domain_switch(unsigned long domain_id)
+ {
+ add_event_entry(ESCAPE_CODE);
+ add_event_entry(DOMAIN_SWITCH_CODE);
+ add_event_entry(domain_id);
+ }
++#endif
+
+ static void
+ add_user_ctx_switch(struct task_struct const * task, unsigned long cookie)
+@@ -533,11 +539,14 @@ void sync_buffer(int cpu)
+
+ add_cpu_switch(cpu);
+
++#ifdef CONFIG_XEN
+ /* We need to assign the first samples in this CPU buffer to the
+ same domain that we were processing at the last sync_buffer */
+ if (cpu_current_domain[cpu] != COORDINATOR_DOMAIN) {
+ add_domain_switch(cpu_current_domain[cpu]);
+ }
++#endif
++
+ /* Remember, only we can modify tail_pos */
+
+ available = get_slots(cpu_buf);
+@@ -555,8 +564,10 @@ void sync_buffer(int cpu)
+ } else if (s->event == CPU_TRACE_BEGIN) {
+ state = sb_bt_start;
+ add_trace_begin();
++#ifdef CONFIG_XEN
+ } else if (s->event == CPU_DOMAIN_SWITCH) {
+- domain_switch = 1;
++ domain_switch = 1;
++#endif
+ } else {
+ struct mm_struct * oldmm = mm;
+
+@@ -570,21 +581,21 @@ void sync_buffer(int cpu)
+ add_user_ctx_switch(new, cookie);
+ }
+ } else {
++#ifdef CONFIG_XEN
+ if (domain_switch) {
+ cpu_current_domain[cpu] = s->eip;
+ add_domain_switch(s->eip);
+ domain_switch = 0;
+- } else {
+- if (cpu_current_domain[cpu] !=
++ } else if (cpu_current_domain[cpu] !=
+ COORDINATOR_DOMAIN) {
+- add_sample_entry(s->eip, s->event);
+- }
+- else if (state >= sb_bt_start &&
+- !add_sample(mm, s, cpu_mode)) {
+- if (state == sb_bt_start) {
+- state = sb_bt_ignore;
+- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
+- }
++ add_sample_entry(s->eip, s->event);
++ } else
++#endif
++ if (state >= sb_bt_start &&
++ !add_sample(mm, s, cpu_mode)) {
++ if (state == sb_bt_start) {
++ state = sb_bt_ignore;
++ atomic_inc(&oprofile_stats.bt_lost_no_mapping);
+ }
+ }
+ }
+@@ -593,10 +604,12 @@ void sync_buffer(int cpu)
+ }
+ release_mm(mm);
+
++#ifdef CONFIG_XEN
+ /* We reset domain to COORDINATOR at each CPU switch */
+ if (cpu_current_domain[cpu] != COORDINATOR_DOMAIN) {
+ add_domain_switch(COORDINATOR_DOMAIN);
+ }
++#endif
+
+ mark_done(cpu);
+
+--- a/drivers/oprofile/cpu_buffer.c 2007-08-27 14:01:24.000000000 -0400
++++ b/drivers/oprofile/cpu_buffer.c 2007-08-27 14:01:25.000000000 -0400
+@@ -38,7 +38,11 @@ static void wq_sync_buffer(struct work_s
+ #define DEFAULT_TIMER_EXPIRE (HZ / 10)
+ static int work_enabled;
+
++#ifndef CONFIG_XEN
++#define current_domain COORDINATOR_DOMAIN
++#else
+ static int32_t current_domain = COORDINATOR_DOMAIN;
++#endif
+
+ void free_cpu_buffers(void)
+ {
+@@ -281,6 +285,7 @@ void oprofile_add_trace(unsigned long pc
+ add_sample(cpu_buf, pc, 0);
+ }
+
++#ifdef CONFIG_XEN
+ int oprofile_add_domain_switch(int32_t domain_id)
+ {
+ struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[smp_processor_id()];
+@@ -299,6 +304,7 @@ int oprofile_add_domain_switch(int32_t d
+
+ return 1;
+ }
++#endif
+
+ /*
+ * This serves to avoid cpu buffer overflow, and makes sure
+--- a/drivers/oprofile/oprof.c 2007-08-27 14:01:24.000000000 -0400
++++ b/drivers/oprofile/oprof.c 2007-08-27 14:01:25.000000000 -0400
+@@ -37,6 +37,7 @@ static DEFINE_MUTEX(start_mutex);
+ */
+ static int timer = 0;
+
++#ifdef CONFIG_XEN
+ int oprofile_set_active(int active_domains[], unsigned int adomains)
+ {
+ int err;
+@@ -62,6 +63,7 @@ int oprofile_set_passive(int passive_dom
+ mutex_unlock(&start_mutex);
+ return err;
+ }
++#endif
+
+ int oprofile_setup(void)
+ {
+--- a/drivers/oprofile/oprofile_files.c 2007-08-27 14:01:24.000000000 -0400
++++ b/drivers/oprofile/oprofile_files.c 2007-08-27 14:01:25.000000000 -0400
+@@ -124,6 +124,8 @@ static const struct file_operations dump
+ .write = dump_write,
+ };
+
++#ifdef CONFIG_XEN
++
+ #define TMPBUFSIZE 512
+
+ static unsigned int adomains = 0;
+@@ -313,12 +315,16 @@ static struct file_operations passive_do
+ .write = pdomain_write,
+ };
+
++#endif /* CONFIG_XEN */
++
+ void oprofile_create_files(struct super_block * sb, struct dentry * root)
+ {
+ oprofilefs_create_file(sb, root, "enable", &enable_fops);
+ oprofilefs_create_file_perm(sb, root, "dump", &dump_fops, 0666);
++#ifdef CONFIG_XEN
+ oprofilefs_create_file(sb, root, "active_domains", &active_domain_ops);
+ oprofilefs_create_file(sb, root, "passive_domains", &passive_domain_ops);
++#endif
+ oprofilefs_create_file(sb, root, "buffer", &event_buffer_fops);
+ oprofilefs_create_ulong(sb, root, "buffer_size", &fs_buffer_size);
+ oprofilefs_create_ulong(sb, root, "buffer_watershed", &fs_buffer_watershed);
+--- a/include/linux/gfp.h 2007-08-27 14:01:25.000000000 -0400
++++ b/include/linux/gfp.h 2007-08-27 14:01:25.000000000 -0400
+@@ -115,11 +115,7 @@ static inline enum zone_type gfp_zone(gf
+ */
+
+ #ifndef HAVE_ARCH_FREE_PAGE
+-/*
+- * If arch_free_page returns non-zero then the generic free_page code can
+- * immediately bail: the arch-specific function has done all the work.
+- */
+-static inline int arch_free_page(struct page *page, int order) { return 0; }
++static inline void arch_free_page(struct page *page, int order) { }
+ #endif
+ #ifndef HAVE_ARCH_ALLOC_PAGE
+ static inline void arch_alloc_page(struct page *page, int order) { }
+--- a/include/linux/mm.h 2007-08-27 14:01:25.000000000 -0400
++++ b/include/linux/mm.h 2007-08-27 14:01:25.000000000 -0400
+@@ -211,10 +211,12 @@ struct vm_operations_struct {
+ /* notification that a previously read-only page is about to become
+ * writable, if an error is returned it will cause a SIGBUS */
+ int (*page_mkwrite)(struct vm_area_struct *vma, struct page *page);
++#ifdef CONFIG_XEN
+ /* Area-specific function for clearing the PTE at @ptep. Returns the
+ * original value of @ptep. */
+ pte_t (*zap_pte)(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep, int is_fullmm);
++#endif
+ #ifdef CONFIG_NUMA
+ int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
+ struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
+--- a/include/linux/oprofile.h 2007-08-27 14:01:24.000000000 -0400
++++ b/include/linux/oprofile.h 2007-08-27 14:01:25.000000000 -0400
+@@ -16,8 +16,9 @@
+ #include <linux/types.h>
+ #include <linux/spinlock.h>
+ #include <asm/atomic.h>
+-
++#ifdef CONFIG_XEN
+ #include <xen/interface/xenoprof.h>
++#endif
+
+ struct super_block;
+ struct dentry;
+@@ -29,11 +30,12 @@ struct oprofile_operations {
+ /* create any necessary configuration files in the oprofile fs.
+ * Optional. */
+ int (*create_files)(struct super_block * sb, struct dentry * root);
++#ifdef CONFIG_XEN
+ /* setup active domains with Xen */
+ int (*set_active)(int *active_domains, unsigned int adomains);
+ /* setup passive domains with Xen */
+ int (*set_passive)(int *passive_domains, unsigned int pdomains);
+-
++#endif
+ /* Do any necessary interrupt setup. Optional. */
+ int (*setup)(void);
+ /* Do any necessary interrupt shutdown. Optional. */
+--- a/include/linux/page-flags.h 2007-08-27 14:01:25.000000000 -0400
++++ b/include/linux/page-flags.h 2007-08-27 14:01:25.000000000 -0400
+@@ -88,6 +88,7 @@
+
+ #define PG_mappedtodisk 16 /* Has blocks allocated on-disk */
+ #define PG_reclaim 17 /* To be reclaimed asap */
++#define PG_foreign 18 /* Page is owned by foreign allocator. */
+ #define PG_buddy 19 /* Page is free, on buddy lists */
+
+ /* PG_owner_priv_1 users should have descriptive aliases */
+@@ -104,8 +105,6 @@
+ #define PG_uncached 31 /* Page has been mapped as uncached */
+ #endif
+
+-#define PG_foreign 20 /* Page is owned by foreign allocator. */
+-
+ /*
+ * Manipulation of page state flags
+ */
+--- a/kernel/timer.c 2007-08-27 14:01:24.000000000 -0400
++++ b/kernel/timer.c 2007-08-27 14:01:25.000000000 -0400
+@@ -781,7 +781,7 @@ static unsigned long cmp_next_hrtimer_ev
+ unsigned long get_next_timer_interrupt(unsigned long now)
+ {
+ tvec_base_t *base = __get_cpu_var(tvec_bases);
+- unsigned long expires, sl_next;
++ unsigned long expires;
+
+ spin_lock(&base->lock);
+ expires = __next_timer_interrupt(base);
+@@ -790,11 +790,17 @@ unsigned long get_next_timer_interrupt(u
+ if (time_before_eq(expires, now))
+ return now;
+
++#ifndef CONFIG_XEN
++ return cmp_next_hrtimer_event(now, expires);
++#else
+ expires = cmp_next_hrtimer_event(now, expires);
+- sl_next = softlockup_get_next_event();
++ {
++ unsigned long sl_next = softlockup_get_next_event();
+
+- return expires <= now || expires - now < sl_next
+- ? expires : now + sl_next;
++ return expires <= now || expires - now < sl_next
++ ? expires : now + sl_next;
++ }
++#endif
+ }
+
+ #ifdef CONFIG_NO_IDLE_HZ
+--- a/mm/memory.c 2007-08-27 14:01:25.000000000 -0400
++++ b/mm/memory.c 2007-08-27 14:01:25.000000000 -0400
+@@ -404,7 +404,9 @@ struct page *vm_normal_page(struct vm_ar
+ * and that the resulting page looks ok.
+ */
+ if (unlikely(!pfn_valid(pfn))) {
++#ifdef CONFIG_XEN
+ if (!(vma->vm_flags & VM_RESERVED))
++#endif
+ print_bad_pte(vma, pte, addr);
+ return NULL;
+ }
+@@ -663,10 +665,12 @@ static unsigned long zap_pte_range(struc
+ page->index > details->last_index))
+ continue;
+ }
++#ifdef CONFIG_XEN
+ if (unlikely(vma->vm_ops && vma->vm_ops->zap_pte))
+ ptent = vma->vm_ops->zap_pte(vma, addr, pte,
+ tlb->fullmm);
+ else
++#endif
+ ptent = ptep_get_and_clear_full(mm, addr, pte,
+ tlb->fullmm);
+ tlb_remove_tlb_entry(tlb, pte, addr);
diff --git a/trunk/2.6.22/20041_xen3-fixup-arch-i386.patch1 b/trunk/2.6.22/20041_xen3-fixup-arch-i386.patch1
new file mode 100644
index 0000000..89a22a3
--- /dev/null
+++ b/trunk/2.6.22/20041_xen3-fixup-arch-i386.patch1
@@ -0,0 +1,76 @@
+Subject: xen3 i386 build fixes.
+From: jbeulich@novell.com
+Patch-mainline: obsolete
+
+$subject says all.
+
+Index: 10.3-2007-11-26/arch/i386/kernel/Makefile
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/Makefile 2007-11-26 14:16:17.000000000 +0100
++++ 10.3-2007-11-26/arch/i386/kernel/Makefile 2007-11-26 14:17:13.000000000 +0100
+@@ -58,7 +58,7 @@ endif
+ # Note: kbuild does not track this dependency due to usage of .incbin
+ $(obj)/vsyscall.o: $(obj)/vsyscall-int80.so $(obj)/vsyscall-sysenter.so
+ targets += $(foreach F,int80 sysenter,vsyscall-$F.o vsyscall-$F.so)
+-targets += vsyscall-note.o vsyscall.lds
++targets += $(vsyscall_note) vsyscall.lds
+
+ # The DSO images are built using a special linker script.
+ quiet_cmd_syscall = SYSCALL $@
+Index: 10.3-2007-11-26/arch/i386/kernel/crash.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/crash.c 2007-11-26 14:16:17.000000000 +0100
++++ 10.3-2007-11-26/arch/i386/kernel/crash.c 2007-11-26 14:17:13.000000000 +0100
+@@ -27,11 +27,10 @@
+
+ #include <mach_ipi.h>
+
+-
++#ifndef CONFIG_XEN
+ /* This keeps a track of which one is crashing cpu. */
+ static int crashing_cpu;
+
+-#ifndef CONFIG_XEN
+ #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
+ static atomic_t waiting_for_crash_ipi;
+
+@@ -128,9 +127,9 @@ void machine_crash_shutdown(struct pt_re
+ /* The kernel is broken so disable interrupts */
+ local_irq_disable();
+
++#ifndef CONFIG_XEN
+ /* Make a note of crashing cpu. Will be used in NMI callback.*/
+ crashing_cpu = safe_smp_processor_id();
+-#ifndef CONFIG_XEN
+ nmi_shootdown_cpus();
+ lapic_shutdown();
+ #if defined(CONFIG_X86_IO_APIC)
+Index: 10.3-2007-11-26/arch/i386/power/Makefile
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/power/Makefile 2007-08-07 09:34:51.000000000 +0200
++++ 10.3-2007-11-26/arch/i386/power/Makefile 2007-11-26 14:17:13.000000000 +0100
+@@ -1,4 +1,4 @@
+-obj-$(CONFIG_PM_LEGACY) += cpu.o
++obj-$(subst m,y,$(CONFIG_APM)) += cpu.o
+ obj-$(CONFIG_SOFTWARE_SUSPEND) += cpu.o
+ obj-$(CONFIG_ACPI_SLEEP) += cpu.o
+ obj-$(CONFIG_SOFTWARE_SUSPEND) += swsusp.o suspend.o
+Index: 10.3-2007-11-26/include/asm-i386/mach-xen/asm/page.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-i386/mach-xen/asm/page.h 2007-11-26 14:16:17.000000000 +0100
++++ 10.3-2007-11-26/include/asm-i386/mach-xen/asm/page.h 2007-11-26 14:17:13.000000000 +0100
+@@ -37,14 +37,6 @@
+ #include <xen/interface/xen.h>
+ #include <xen/features.h>
+
+-#define arch_free_page(_page,_order) \
+-({ int foreign = PageForeign(_page); \
+- if (foreign) \
+- PageForeignDestructor(_page); \
+- foreign; \
+-})
+-#define HAVE_ARCH_FREE_PAGE
+-
+ #ifdef CONFIG_X86_USE_3DNOW
+
+ #include <asm/mmx.h>
diff --git a/trunk/2.6.22/20042_xen3-fixup-arch-x86_64.patch1 b/trunk/2.6.22/20042_xen3-fixup-arch-x86_64.patch1
new file mode 100644
index 0000000..3262f43
--- /dev/null
+++ b/trunk/2.6.22/20042_xen3-fixup-arch-x86_64.patch1
@@ -0,0 +1,103 @@
+Subject: Fix x86_64 xen build.
+From: jbeulich@novell.com
+Patch-mainline: obsolete
+
+$subject says all.
+
+---
+ arch/x86_64/kernel/crash.c | 4 ++--
+ arch/x86_64/kernel/pci-swiotlb-xen.c | 2 ++
+ arch/x86_64/kernel/process-xen.c | 7 -------
+ include/asm-x86_64/i387.h | 7 ++++++-
+ include/asm-x86_64/mach-xen/asm/page.h | 8 --------
+ 5 files changed, 10 insertions(+), 18 deletions(-)
+
+--- a/arch/x86_64/kernel/crash.c 2007-08-27 14:01:25.000000000 -0400
++++ b/arch/x86_64/kernel/crash.c 2007-08-27 14:01:25.000000000 -0400
+@@ -25,10 +25,10 @@
+ #include <asm/hw_irq.h>
+ #include <asm/mach_apic.h>
+
++#ifndef CONFIG_XEN
+ /* This keeps a track of which one is crashing cpu. */
+ static int crashing_cpu;
+
+-#ifndef CONFIG_XEN
+ #ifdef CONFIG_SMP
+ static atomic_t waiting_for_crash_ipi;
+
+@@ -124,10 +124,10 @@ void machine_crash_shutdown(struct pt_re
+ /* The kernel is broken so disable interrupts */
+ local_irq_disable();
+
++#ifndef CONFIG_XEN
+ /* Make a note of crashing cpu. Will be used in NMI callback.*/
+ crashing_cpu = smp_processor_id();
+
+-#ifndef CONFIG_XEN
+ nmi_shootdown_cpus();
+
+ if(cpu_has_apic)
+--- a/arch/x86_64/kernel/pci-swiotlb-xen.c 2007-08-27 14:01:25.000000000 -0400
++++ b/arch/x86_64/kernel/pci-swiotlb-xen.c 2007-08-27 14:02:03.000000000 -0400
+@@ -13,6 +13,8 @@ int swiotlb __read_mostly;
+ EXPORT_SYMBOL(swiotlb);
+ #endif
+
++void swiotlb_init(void);
++
+ struct dma_mapping_ops swiotlb_dma_ops = {
+ #if 0
+ .mapping_error = swiotlb_dma_mapping_error,
+--- a/arch/x86_64/kernel/process-xen.c 2007-08-27 14:01:25.000000000 -0400
++++ b/arch/x86_64/kernel/process-xen.c 2007-08-27 14:02:03.000000000 -0400
+@@ -465,13 +465,6 @@ out:
+ return err;
+ }
+
+-static inline void __save_init_fpu( struct task_struct *tsk )
+-{
+- asm volatile( "rex64 ; fxsave %0 ; fnclex"
+- : "=m" (tsk->thread.i387.fxsave));
+- tsk->thread_info->status &= ~TS_USEDFPU;
+-}
+-
+ /*
+ * switch_to(x,y) should switch tasks from x to y.
+ *
+--- a/include/asm-x86_64/i387.h 2007-08-27 12:09:22.000000000 -0400
++++ b/include/asm-x86_64/i387.h 2007-08-27 14:01:25.000000000 -0400
+@@ -191,10 +191,15 @@ static inline void kernel_fpu_end(void)
+ preempt_enable();
+ }
+
+-static inline void save_init_fpu(struct task_struct *tsk)
++static inline void __save_init_fpu(struct task_struct *tsk)
+ {
+ __fxsave_clear(tsk);
+ task_thread_info(tsk)->status &= ~TS_USEDFPU;
++}
++
++static inline void save_init_fpu(struct task_struct *tsk)
++{
++ __save_init_fpu(tsk);
+ stts();
+ }
+
+--- a/include/asm-x86_64/mach-xen/asm/page.h 2007-08-27 14:01:25.000000000 -0400
++++ b/include/asm-x86_64/mach-xen/asm/page.h 2007-08-27 14:01:58.000000000 -0400
+@@ -16,14 +16,6 @@
+ */
+ #define _PAGE_PRESENT 0x001
+
+-#define arch_free_page(_page,_order) \
+-({ int foreign = PageForeign(_page); \
+- if (foreign) \
+- PageForeignDestructor(_page); \
+- foreign; \
+-})
+-#define HAVE_ARCH_FREE_PAGE
+-
+ /* PAGE_SHIFT determines the page size */
+ #define PAGE_SHIFT 12
+ #ifdef __ASSEMBLY__
diff --git a/trunk/2.6.22/20043_xen3-patch-2.6.18.patch1 b/trunk/2.6.22/20043_xen3-patch-2.6.18.patch1
new file mode 100644
index 0000000..3d8dd09
--- /dev/null
+++ b/trunk/2.6.22/20043_xen3-patch-2.6.18.patch1
@@ -0,0 +1,394 @@
+From: www.kernel.org
+Subject: Linux 2.6.18
+Patch-mainline: 2.6.18
+
+Automatically created from "patches.kernel.org/patch-2.6.18" by xen-port-patches.py
+
+Acked-by: jbeulich@novell.com
+
+Index: 10.3-2007-11-26/arch/i386/Kconfig
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/Kconfig 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/arch/i386/Kconfig 2007-09-03 09:52:56.000000000 +0200
+@@ -16,7 +16,6 @@ config X86_32
+
+ config GENERIC_TIME
+ bool
+- depends on !X86_XEN
+ default y
+
+ config CLOCKSOURCE_WATCHDOG
+Index: 10.3-2007-11-26/arch/i386/kernel/Makefile
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/Makefile 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/arch/i386/kernel/Makefile 2007-09-03 09:52:56.000000000 +0200
+@@ -98,7 +98,7 @@ include $(srctree)/scripts/Makefile.xen
+
+ obj-y += fixup.o
+ microcode-$(subst m,y,$(CONFIG_MICROCODE)) := microcode-xen.o
+-n-obj-xen := i8259.o timers/ reboot.o smpboot.o trampoline.o
++n-obj-xen := i8253.o i8259.o reboot.o smpboot.o trampoline.o tsc.o
+
+ obj-y := $(call filterxen, $(obj-y), $(n-obj-xen))
+ obj-y := $(call cherrypickxen, $(obj-y))
+Index: 10.3-2007-11-26/arch/i386/kernel/time-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/time-xen.c 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/arch/i386/kernel/time-xen.c 2007-12-06 17:31:37.000000000 +0100
+@@ -75,8 +75,13 @@
+
+ #if defined (__i386__)
+ #include <asm/i8259.h>
++#include <asm/i8253.h>
++DEFINE_SPINLOCK(i8253_lock);
++EXPORT_SYMBOL(i8253_lock);
+ #endif
+
++#define XEN_SHIFT 22
++
+ int pit_latch_buggy; /* extern */
+
+ #if defined(__x86_64__)
+@@ -96,10 +101,6 @@ extern unsigned long wall_jiffies;
+ DEFINE_SPINLOCK(rtc_lock);
+ EXPORT_SYMBOL(rtc_lock);
+
+-extern struct init_timer_opts timer_tsc_init;
+-extern struct timer_opts timer_tsc;
+-#define timer_none timer_tsc
+-
+ /* These are peridically updated in shared_info, and then copied here. */
+ struct shadow_time_info {
+ u64 tsc_timestamp; /* TSC at last update of time vals. */
+@@ -244,6 +245,7 @@ static u64 get_nsec_offset(struct shadow
+ return scale_delta(delta, shadow->tsc_to_nsec_mul, shadow->tsc_shift);
+ }
+
++#ifdef CONFIG_X86_64
+ static unsigned long get_usec_offset(struct shadow_time_info *shadow)
+ {
+ u64 now, delta;
+@@ -251,6 +253,7 @@ static unsigned long get_usec_offset(str
+ delta = now - shadow->tsc_timestamp;
+ return scale_delta(delta, shadow->tsc_to_usec_mul, shadow->tsc_shift);
+ }
++#endif
+
+ static void __update_wallclock(time_t sec, long nsec)
+ {
+@@ -360,6 +363,8 @@ void rtc_cmos_write(unsigned char val, u
+ }
+ EXPORT_SYMBOL(rtc_cmos_write);
+
++#ifdef CONFIG_X86_64
++
+ /*
+ * This version of gettimeofday has microsecond resolution
+ * and better than microsecond precision on fast x86 machines with TSC.
+@@ -488,6 +493,8 @@ int do_settimeofday(struct timespec *tv)
+
+ EXPORT_SYMBOL(do_settimeofday);
+
++#endif
++
+ static void sync_xen_wallclock(unsigned long dummy);
+ static DEFINE_TIMER(sync_xen_wallclock_timer, sync_xen_wallclock, 0, 0);
+ static void sync_xen_wallclock(unsigned long dummy)
+@@ -539,11 +546,15 @@ static int set_rtc_mmss(unsigned long no
+ return retval;
+ }
+
++#ifdef CONFIG_X86_64
+ /* monotonic_clock(): returns # of nanoseconds passed since time_init()
+ * Note: This function is required to return accurate
+ * time even in the absence of multiple timer ticks.
+ */
+ unsigned long long monotonic_clock(void)
++#else
++unsigned long long sched_clock(void)
++#endif
+ {
+ int cpu = get_cpu();
+ struct shadow_time_info *shadow = &per_cpu(shadow_time, cpu);
+@@ -563,9 +574,9 @@ unsigned long long monotonic_clock(void)
+
+ return time;
+ }
++#ifdef CONFIG_X86_64
+ EXPORT_SYMBOL(monotonic_clock);
+
+-#ifdef __x86_64__
+ unsigned long long sched_clock(void)
+ {
+ return monotonic_clock();
+@@ -735,6 +746,87 @@ irqreturn_t timer_interrupt(int irq, voi
+ return IRQ_HANDLED;
+ }
+
++#ifndef CONFIG_X86_64
++
++void tsc_init(void)
++{
++ init_cpu_khz();
++ printk(KERN_INFO "Xen reported: %u.%03u MHz processor.\n",
++ cpu_khz / 1000, cpu_khz % 1000);
++
++ use_tsc_delay();
++}
++
++#include <linux/clocksource.h>
++
++void mark_tsc_unstable(void)
++{
++#ifndef CONFIG_XEN /* XXX Should tell the hypervisor about this fact. */
++ tsc_unstable = 1;
++#endif
++}
++EXPORT_SYMBOL_GPL(mark_tsc_unstable);
++
++static cycle_t xen_clocksource_read(void)
++{
++ cycle_t ret = sched_clock();
++
++#ifdef CONFIG_SMP
++ for (;;) {
++ static cycle_t last_ret;
++#ifndef CONFIG_64BIT
++ cycle_t last = cmpxchg64(&last_ret, 0, 0);
++#else
++ cycle_t last = last_ret;
++#define cmpxchg64 cmpxchg
++#endif
++
++ if ((s64)(ret - last) < 0) {
++ if (last - ret > permitted_clock_jitter
++ && printk_ratelimit()) {
++ unsigned int cpu = get_cpu();
++ struct shadow_time_info *shadow = &per_cpu(shadow_time, cpu);
++
++ printk(KERN_WARNING "clocksource/%u: "
++ "Time went backwards: "
++ "ret=%Lx delta=%Ld shadow=%Lx offset=%Lx\n",
++ cpu, ret, ret - last,
++ shadow->system_timestamp,
++ get_nsec_offset(shadow));
++ put_cpu();
++ }
++ ret = last;
++ }
++ if (cmpxchg64(&last_ret, last, ret) == last)
++ break;
++ }
++#endif
++
++ return ret;
++}
++
++static struct clocksource clocksource_xen = {
++ .name = "xen",
++ .rating = 400,
++ .read = xen_clocksource_read,
++ .mask = CLOCKSOURCE_MASK(64),
++ .mult = 1 << XEN_SHIFT, /* time directly in nanoseconds */
++ .shift = XEN_SHIFT,
++ .is_continuous = 1,
++};
++
++static int __init init_xen_clocksource(void)
++{
++ clocksource_xen.mult = clocksource_khz2mult(cpu_khz,
++ clocksource_xen.shift);
++
++ return clocksource_register(&clocksource_xen);
++}
++
++module_init(init_xen_clocksource);
++
++#endif
++
+ static void init_missing_ticks_accounting(int cpu)
+ {
+ struct vcpu_register_runstate_memory_area area;
+@@ -942,11 +1034,11 @@ void __init time_init(void)
+
+ update_wallclock();
+
++#ifdef CONFIG_X86_64
+ init_cpu_khz();
+ printk(KERN_INFO "Xen reported: %u.%03u MHz processor.\n",
+ cpu_khz / 1000, cpu_khz % 1000);
+
+-#if defined(__x86_64__)
+ vxtime.mode = VXTIME_TSC;
+ vxtime.quot = (1000000L << 32) / vxtime_hz;
+ vxtime.tsc_quot = (1000L << 32) / cpu_khz;
+Index: 10.3-2007-11-26/drivers/xen/Kconfig
+===================================================================
+--- 10.3-2007-11-26.orig/drivers/xen/Kconfig 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/drivers/xen/Kconfig 2007-09-03 09:52:56.000000000 +0200
+@@ -216,6 +216,7 @@ config XEN_DISABLE_SERIAL
+ config XEN_SYSFS
+ tristate "Export Xen attributes in sysfs"
+ depends on SYSFS
++ select SYS_HYPERVISOR
+ default y
+ help
+ Xen hypervisor attributes will show up under /sys/hypervisor/.
+Index: 10.3-2007-11-26/drivers/xen/core/Makefile
+===================================================================
+--- 10.3-2007-11-26.orig/drivers/xen/core/Makefile 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/drivers/xen/core/Makefile 2007-09-03 09:52:56.000000000 +0200
+@@ -5,7 +5,7 @@
+ obj-y := evtchn.o gnttab.o features.o reboot.o machine_reboot.o firmware.o
+
+ obj-$(CONFIG_PROC_FS) += xen_proc.o
+-obj-$(CONFIG_SYSFS) += hypervisor_sysfs.o
++obj-$(CONFIG_SYS_HYPERVISOR) += hypervisor_sysfs.o
+ obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o
+ obj-$(CONFIG_XEN_SYSFS) += xen_sysfs.o
+ obj-$(CONFIG_XEN_SMPBOOT) += smpboot.o
+Index: 10.3-2007-11-26/drivers/xen/core/hypervisor_sysfs.c
+===================================================================
+--- 10.3-2007-11-26.orig/drivers/xen/core/hypervisor_sysfs.c 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/drivers/xen/core/hypervisor_sysfs.c 2007-09-03 09:52:56.000000000 +0200
+@@ -12,8 +12,6 @@
+ #include <linux/kobject.h>
+ #include <xen/hypervisor_sysfs.h>
+
+-decl_subsys(hypervisor, NULL, NULL);
+-
+ static ssize_t hyp_sysfs_show(struct kobject *kobj,
+ struct attribute *attr,
+ char *buffer)
+@@ -37,7 +35,7 @@ static ssize_t hyp_sysfs_store(struct ko
+ return 0;
+ }
+
+-struct sysfs_ops hyp_sysfs_ops = {
++static struct sysfs_ops hyp_sysfs_ops = {
+ .show = hyp_sysfs_show,
+ .store = hyp_sysfs_store,
+ };
+@@ -52,8 +50,7 @@ static int __init hypervisor_subsys_init
+ return -ENODEV;
+
+ hypervisor_subsys.kset.kobj.ktype = &hyp_sysfs_kobj_type;
+- return subsystem_register(&hypervisor_subsys);
++ return 0;
+ }
+
+ device_initcall(hypervisor_subsys_init);
+-EXPORT_SYMBOL_GPL(hypervisor_subsys);
+Index: 10.3-2007-11-26/include/asm-i386/mach-xen/asm/processor.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-i386/mach-xen/asm/processor.h 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/include/asm-i386/mach-xen/asm/processor.h 2007-09-03 09:52:56.000000000 +0200
+@@ -23,7 +23,7 @@
+ #include <xen/interface/physdev.h>
+
+ /* flag for disabling the tsc */
+-extern int tsc_disable;
++#define tsc_disable 0
+
+ struct desc_struct {
+ unsigned long a,b;
+Index: 10.3-2007-11-26/include/asm-i386/thread_info.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-i386/thread_info.h 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/include/asm-i386/thread_info.h 2007-09-03 09:52:56.000000000 +0200
+@@ -160,7 +160,11 @@ static inline struct thread_info *curren
+ #define _TIF_ALLWORK_MASK (0x0000FFFF & ~_TIF_SECCOMP)
+
+ /* flags to check in __switch_to() */
++#ifndef CONFIG_XEN
+ #define _TIF_WORK_CTXSW (_TIF_DEBUG|_TIF_IO_BITMAP)
++#else
++#define _TIF_WORK_CTXSW _TIF_DEBUG
++#endif
+
+ /*
+ * Thread-synchronous status.
+Index: 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/timer.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-x86_64/mach-xen/asm/timer.h 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/timer.h 2007-09-03 09:52:56.000000000 +0200
+@@ -2,39 +2,8 @@
+ #define _ASMi386_TIMER_H
+ #include <linux/init.h>
+
+-/**
+- * struct timer_ops - used to define a timer source
+- *
+- * @name: name of the timer.
+- * @init: Probes and initializes the timer. Takes clock= override
+- * string as an argument. Returns 0 on success, anything else
+- * on failure.
+- * @mark_offset: called by the timer interrupt.
+- * @get_offset: called by gettimeofday(). Returns the number of microseconds
+- * since the last timer interupt.
+- * @monotonic_clock: returns the number of nanoseconds since the init of the
+- * timer.
+- * @delay: delays this many clock cycles.
+- */
+-struct timer_opts {
+- char* name;
+- void (*mark_offset)(void);
+- unsigned long (*get_offset)(void);
+- unsigned long long (*monotonic_clock)(void);
+- void (*delay)(unsigned long);
+- unsigned long (*read_timer)(void);
+- int (*suspend)(pm_message_t state);
+- int (*resume)(void);
+-};
+-
+-struct init_timer_opts {
+- int (*init)(char *override);
+- struct timer_opts *opts;
+-};
+-
+ #define TICK_SIZE (tick_nsec / 1000)
+
+-extern struct timer_opts* __init select_timer(void);
+ extern void clock_fallback(void);
+ void setup_pit_timer(void);
+
+@@ -42,26 +11,13 @@ void setup_pit_timer(void);
+
+ extern int pit_latch_buggy;
+
+-extern struct timer_opts *cur_timer;
+ extern int timer_ack;
+
+ /* list of externed timers */
+-extern struct timer_opts timer_none;
+-extern struct timer_opts timer_pit;
+-extern struct init_timer_opts timer_pit_init;
+-extern struct init_timer_opts timer_tsc_init;
+-#ifdef CONFIG_X86_CYCLONE_TIMER
+-extern struct init_timer_opts timer_cyclone_init;
+-#endif
+-
+ extern unsigned long calibrate_tsc(void);
+ extern void init_cpu_khz(void);
+ #ifdef CONFIG_HPET_TIMER
+-extern struct init_timer_opts timer_hpet_init;
+ extern unsigned long calibrate_tsc_hpet(unsigned long *tsc_hpet_quotient_ptr);
+ #endif
+
+-#ifdef CONFIG_X86_PM_TIMER
+-extern struct init_timer_opts timer_pmtmr_init;
+-#endif
+ #endif
+Index: 10.3-2007-11-26/include/xen/hypervisor_sysfs.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/xen/hypervisor_sysfs.h 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/include/xen/hypervisor_sysfs.h 2007-09-03 09:52:56.000000000 +0200
+@@ -20,8 +20,6 @@ static struct hyp_sysfs_attr _name##_at
+ static struct hyp_sysfs_attr _name##_attr = \
+ __ATTR(_name, 0644, _name##_show, _name##_store)
+
+-extern struct subsystem hypervisor_subsys;
+-
+ struct hyp_sysfs_attr {
+ struct attribute attr;
+ ssize_t (*show)(struct hyp_sysfs_attr *, char *);
diff --git a/trunk/2.6.22/20044_xen3-patch-2.6.19.patch1 b/trunk/2.6.22/20044_xen3-patch-2.6.19.patch1
new file mode 100644
index 0000000..908b07d
--- /dev/null
+++ b/trunk/2.6.22/20044_xen3-patch-2.6.19.patch1
@@ -0,0 +1,12637 @@
+From: www.kernel.org
+Subject: Linux 2.6.19
+Patch-mainline: 2.6.19
+
+Automatically created from "patches.kernel.org/patch-2.6.19" by xen-port-patches.py
+
+Acked-by: jbeulich@novell.com
+
+Index: 10.3-2007-11-26/arch/i386/Kconfig
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/Kconfig 2007-09-03 09:52:56.000000000 +0200
++++ 10.3-2007-11-26/arch/i386/Kconfig 2007-10-22 13:53:08.000000000 +0200
+@@ -222,7 +222,7 @@ endchoice
+ config PARAVIRT
+ bool "Paravirtualization support (EXPERIMENTAL)"
+ depends on EXPERIMENTAL
+- depends on !(X86_VISWS || X86_VOYAGER)
++ depends on !(X86_VISWS || X86_VOYAGER || X86_XEN)
+ help
+ Paravirtualization is a way of running multiple instances of
+ Linux on the same machine, under a hypervisor. This option
+Index: 10.3-2007-11-26/arch/i386/kernel/acpi/Makefile
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/acpi/Makefile 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/arch/i386/kernel/acpi/Makefile 2007-10-22 13:53:08.000000000 +0200
+@@ -7,5 +7,7 @@ endif
+
+ ifdef CONFIG_XEN
+ include $(srctree)/scripts/Makefile.xen
++n-obj-xen := cstate.o
++obj-y := $(call filterxen, $(obj-y), $(n-obj-xen))
+ obj-y := $(call cherrypickxen, $(obj-y), $(src))
+ endif
+Index: 10.3-2007-11-26/arch/i386/kernel/acpi/boot-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/acpi/boot-xen.c 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/arch/i386/kernel/acpi/boot-xen.c 2007-10-22 13:53:08.000000000 +0200
+@@ -26,9 +26,12 @@
+ #include <linux/init.h>
+ #include <linux/acpi.h>
+ #include <linux/efi.h>
++#include <linux/cpumask.h>
+ #include <linux/module.h>
+ #include <linux/dmi.h>
+ #include <linux/irq.h>
++#include <linux/bootmem.h>
++#include <linux/ioport.h>
+
+ #include <asm/pgtable.h>
+ #include <asm/io_apic.h>
+@@ -36,11 +39,17 @@
+ #include <asm/io.h>
+ #include <asm/mpspec.h>
+
+-#ifdef CONFIG_X86_64
++static int __initdata acpi_force = 0;
++
++#ifdef CONFIG_ACPI
++int acpi_disabled = 0;
++#else
++int acpi_disabled = 1;
++#endif
++EXPORT_SYMBOL(acpi_disabled);
+
+-extern void __init clustered_apic_check(void);
++#ifdef CONFIG_X86_64
+
+-extern int gsi_irq_sharing(int gsi);
+ #include <asm/proto.h>
+
+ static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id) { return 0; }
+@@ -53,8 +62,6 @@ static inline int acpi_madt_oem_check(ch
+ #include <mach_mpparse.h>
+ #endif /* CONFIG_X86_LOCAL_APIC */
+
+-static inline int gsi_irq_sharing(int gsi) { return gsi; }
+-
+ #endif /* X86 */
+
+ #define BAD_MADT_ENTRY(entry, end) ( \
+@@ -63,7 +70,7 @@ static inline int gsi_irq_sharing(int gs
+
+ #define PREFIX "ACPI: "
+
+-int acpi_noirq __initdata; /* skip ACPI IRQ initialization */
++int acpi_noirq; /* skip ACPI IRQ initialization */
+ int acpi_pci_disabled __initdata; /* skip ACPI PCI scan and IRQ initialization */
+ int acpi_ht __initdata = 1; /* enable HT */
+
+@@ -75,6 +82,7 @@ EXPORT_SYMBOL(acpi_strict);
+ acpi_interrupt_flags acpi_sci_flags __initdata;
+ int acpi_sci_override_gsi __initdata;
+ int acpi_skip_timer_override __initdata;
++int acpi_use_timer_override __initdata;
+
+ #ifdef CONFIG_X86_LOCAL_APIC
+ static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
+@@ -327,7 +335,7 @@ acpi_parse_ioapic(acpi_table_entry_heade
+ /*
+ * Parse Interrupt Source Override for the ACPI SCI
+ */
+-static void acpi_sci_ioapic_setup(u32 gsi, u16 polarity, u16 trigger)
++static void acpi_sci_ioapic_setup(u32 bus_irq, u32 gsi, u16 polarity, u16 trigger)
+ {
+ if (trigger == 0) /* compatible SCI trigger is level */
+ trigger = 3;
+@@ -347,13 +355,13 @@ static void acpi_sci_ioapic_setup(u32 gs
+ * If GSI is < 16, this will update its flags,
+ * else it will create a new mp_irqs[] entry.
+ */
+- mp_override_legacy_irq(gsi, polarity, trigger, gsi);
++ mp_override_legacy_irq(bus_irq, polarity, trigger, gsi);
+
+ /*
+ * stash over-ride to indicate we've been here
+ * and for later update of acpi_fadt
+ */
+- acpi_sci_override_gsi = gsi;
++ acpi_sci_override_gsi = bus_irq;
+ return;
+ }
+
+@@ -371,7 +379,7 @@ acpi_parse_int_src_ovr(acpi_table_entry_
+ acpi_table_print_madt_entry(header);
+
+ if (intsrc->bus_irq == acpi_fadt.sci_int) {
+- acpi_sci_ioapic_setup(intsrc->global_irq,
++ acpi_sci_ioapic_setup(intsrc->bus_irq, intsrc->global_irq,
+ intsrc->flags.polarity,
+ intsrc->flags.trigger);
+ return 0;
+@@ -461,12 +469,7 @@ void __init acpi_pic_sci_set_trigger(uns
+
+ int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
+ {
+-#ifdef CONFIG_X86_IO_APIC
+- if (use_pci_vector() && !platform_legacy_irq(gsi))
+- *irq = IO_APIC_VECTOR(gsi);
+- else
+-#endif
+- *irq = gsi_irq_sharing(gsi);
++ *irq = gsi;
+ return 0;
+ }
+
+@@ -508,16 +511,76 @@ EXPORT_SYMBOL(acpi_register_gsi);
+ #ifdef CONFIG_ACPI_HOTPLUG_CPU
+ int acpi_map_lsapic(acpi_handle handle, int *pcpu)
+ {
+- /* TBD */
+- return -EINVAL;
++ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
++ union acpi_object *obj;
++ struct acpi_table_lapic *lapic;
++ cpumask_t tmp_map, new_map;
++ u8 physid;
++ int cpu;
++
++ if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
++ return -EINVAL;
++
++ if (!buffer.length || !buffer.pointer)
++ return -EINVAL;
++
++ obj = buffer.pointer;
++ if (obj->type != ACPI_TYPE_BUFFER ||
++ obj->buffer.length < sizeof(*lapic)) {
++ kfree(buffer.pointer);
++ return -EINVAL;
++ }
++
++ lapic = (struct acpi_table_lapic *)obj->buffer.pointer;
++
++ if ((lapic->header.type != ACPI_MADT_LAPIC) ||
++ (!lapic->flags.enabled)) {
++ kfree(buffer.pointer);
++ return -EINVAL;
++ }
++
++ physid = lapic->id;
++
++ kfree(buffer.pointer);
++ buffer.length = ACPI_ALLOCATE_BUFFER;
++ buffer.pointer = NULL;
++
++ tmp_map = cpu_present_map;
++ mp_register_lapic(physid, lapic->flags.enabled);
++
++ /*
++ * If mp_register_lapic successfully generates a new logical cpu
++ * number, then the following will get us exactly what was mapped
++ */
++ cpus_andnot(new_map, cpu_present_map, tmp_map);
++ if (cpus_empty(new_map)) {
++ printk ("Unable to map lapic to logical cpu number\n");
++ return -EINVAL;
++ }
++
++ cpu = first_cpu(new_map);
++
++ *pcpu = cpu;
++ return 0;
+ }
+
+ EXPORT_SYMBOL(acpi_map_lsapic);
+
+ int acpi_unmap_lsapic(int cpu)
+ {
+- /* TBD */
+- return -EINVAL;
++ int i;
++
++ for_each_possible_cpu(i) {
++ if (x86_acpiid_to_apicid[i] == x86_cpu_to_apicid[cpu]) {
++ x86_acpiid_to_apicid[i] = -1;
++ break;
++ }
++ }
++ x86_cpu_to_apicid[cpu] = -1;
++ cpu_clear(cpu, cpu_present_map);
++ num_processors--;
++
++ return (0);
+ }
+
+ EXPORT_SYMBOL(acpi_unmap_lsapic);
+@@ -582,6 +645,8 @@ static int __init acpi_parse_sbf(unsigne
+ static int __init acpi_parse_hpet(unsigned long phys, unsigned long size)
+ {
+ struct acpi_table_hpet *hpet_tbl;
++ struct resource *hpet_res;
++ resource_size_t res_start;
+
+ if (!phys || !size)
+ return -EINVAL;
+@@ -597,12 +662,26 @@ static int __init acpi_parse_hpet(unsign
+ "memory.\n");
+ return -1;
+ }
++
++#define HPET_RESOURCE_NAME_SIZE 9
++ hpet_res = alloc_bootmem(sizeof(*hpet_res) + HPET_RESOURCE_NAME_SIZE);
++ if (hpet_res) {
++ memset(hpet_res, 0, sizeof(*hpet_res));
++ hpet_res->name = (void *)&hpet_res[1];
++ hpet_res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
++ snprintf((char *)hpet_res->name, HPET_RESOURCE_NAME_SIZE,
++ "HPET %u", hpet_tbl->number);
++ hpet_res->end = (1 * 1024) - 1;
++ }
++
+ #ifdef CONFIG_X86_64
+ vxtime.hpet_address = hpet_tbl->addr.addrl |
+ ((long)hpet_tbl->addr.addrh << 32);
+
+ printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n",
+ hpet_tbl->id, vxtime.hpet_address);
++
++ res_start = vxtime.hpet_address;
+ #else /* X86 */
+ {
+ extern unsigned long hpet_address;
+@@ -610,9 +689,17 @@ static int __init acpi_parse_hpet(unsign
+ hpet_address = hpet_tbl->addr.addrl;
+ printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n",
+ hpet_tbl->id, hpet_address);
++
++ res_start = hpet_address;
+ }
+ #endif /* X86 */
+
++ if (hpet_res) {
++ hpet_res->start = res_start;
++ hpet_res->end += res_start;
++ insert_resource(&iomem_resource, hpet_res);
++ }
++
+ return 0;
+ }
+ #else
+@@ -796,7 +883,7 @@ static int __init acpi_parse_madt_ioapic
+ * pretend we got one so we can set the SCI flags.
+ */
+ if (!acpi_sci_override_gsi)
+- acpi_sci_ioapic_setup(acpi_fadt.sci_int, 0, 0);
++ acpi_sci_ioapic_setup(acpi_fadt.sci_int, acpi_fadt.sci_int, 0, 0);
+
+ /* Fill in identity legacy mapings where no override */
+ mp_config_acpi_legacy_irqs();
+@@ -863,8 +950,6 @@ static void __init acpi_process_madt(voi
+ return;
+ }
+
+-extern int acpi_force;
+-
+ #ifdef __i386__
+
+ static int __init disable_acpi_irq(struct dmi_system_id *d)
+@@ -1166,3 +1251,82 @@ int __init acpi_boot_init(void)
+
+ return 0;
+ }
++
++static int __init parse_acpi(char *arg)
++{
++ if (!arg)
++ return -EINVAL;
++
++ /* "acpi=off" disables both ACPI table parsing and interpreter */
++ if (strcmp(arg, "off") == 0) {
++ disable_acpi();
++ }
++ /* acpi=force to over-ride black-list */
++ else if (strcmp(arg, "force") == 0) {
++ acpi_force = 1;
++ acpi_ht = 1;
++ acpi_disabled = 0;
++ }
++ /* acpi=strict disables out-of-spec workarounds */
++ else if (strcmp(arg, "strict") == 0) {
++ acpi_strict = 1;
++ }
++ /* Limit ACPI just to boot-time to enable HT */
++ else if (strcmp(arg, "ht") == 0) {
++ if (!acpi_force)
++ disable_acpi();
++ acpi_ht = 1;
++ }
++ /* "acpi=noirq" disables ACPI interrupt routing */
++ else if (strcmp(arg, "noirq") == 0) {
++ acpi_noirq_set();
++ } else {
++ /* Core will printk when we return error. */
++ return -EINVAL;
++ }
++ return 0;
++}
++early_param("acpi", parse_acpi);
++
++/* FIXME: Using pci= for an ACPI parameter is a travesty. */
++static int __init parse_pci(char *arg)
++{
++ if (arg && strcmp(arg, "noacpi") == 0)
++ acpi_disable_pci();
++ return 0;
++}
++early_param("pci", parse_pci);
++
++#ifdef CONFIG_X86_IO_APIC
++static int __init parse_acpi_skip_timer_override(char *arg)
++{
++ acpi_skip_timer_override = 1;
++ return 0;
++}
++early_param("acpi_skip_timer_override", parse_acpi_skip_timer_override);
++
++static int __init parse_acpi_use_timer_override(char *arg)
++{
++ acpi_use_timer_override = 1;
++ return 0;
++}
++early_param("acpi_use_timer_override", parse_acpi_use_timer_override);
++#endif /* CONFIG_X86_IO_APIC */
++
++static int __init setup_acpi_sci(char *s)
++{
++ if (!s)
++ return -EINVAL;
++ if (!strcmp(s, "edge"))
++ acpi_sci_flags.trigger = 1;
++ else if (!strcmp(s, "level"))
++ acpi_sci_flags.trigger = 3;
++ else if (!strcmp(s, "high"))
++ acpi_sci_flags.polarity = 1;
++ else if (!strcmp(s, "low"))
++ acpi_sci_flags.polarity = 3;
++ else
++ return -EINVAL;
++ return 0;
++}
++early_param("acpi_sci", setup_acpi_sci);
+Index: 10.3-2007-11-26/arch/i386/kernel/apic-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/apic-xen.c 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/arch/i386/kernel/apic-xen.c 2007-10-22 13:53:08.000000000 +0200
+@@ -54,7 +54,6 @@ static cpumask_t timer_bcast_ipi;
+ /*
+ * Knob to control our willingness to enable the local APIC.
+ */
+-int enable_local_apic __initdata = 0; /* -1=force-disable, +1=force-enable */
+
+ /*
+ * Debug level
+@@ -102,7 +101,7 @@ int get_physical_broadcast(void)
+
+ #ifndef CONFIG_XEN
+ #ifndef CONFIG_SMP
+-static void up_apic_timer_interrupt_call(struct pt_regs *regs)
++static void up_apic_timer_interrupt_call(void)
+ {
+ int cpu = smp_processor_id();
+
+@@ -111,11 +110,11 @@ static void up_apic_timer_interrupt_call
+ */
+ per_cpu(irq_stat, cpu).apic_timer_irqs++;
+
+- smp_local_timer_interrupt(regs);
++ smp_local_timer_interrupt();
+ }
+ #endif
+
+-void smp_send_timer_broadcast_ipi(struct pt_regs *regs)
++void smp_send_timer_broadcast_ipi(void)
+ {
+ cpumask_t mask;
+
+@@ -128,7 +127,7 @@ void smp_send_timer_broadcast_ipi(struct
+ * We can directly call the apic timer interrupt handler
+ * in UP case. Minus all irq related functions
+ */
+- up_apic_timer_interrupt_call(regs);
++ up_apic_timer_interrupt_call();
+ #endif
+ }
+ }
+Index: 10.3-2007-11-26/arch/i386/kernel/cpu/common-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/cpu/common-xen.c 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/arch/i386/kernel/cpu/common-xen.c 2007-10-22 13:53:08.000000000 +0200
+@@ -43,7 +43,7 @@ struct cpu_dev * cpu_devs[X86_VENDOR_NUM
+
+ extern int disable_pse;
+
+-static void default_init(struct cpuinfo_x86 * c)
++static void __cpuinit default_init(struct cpuinfo_x86 * c)
+ {
+ /* Not much we can do here... */
+ /* Check if at least it has cpuid */
+@@ -56,7 +56,7 @@ static void default_init(struct cpuinfo_
+ }
+ }
+
+-static struct cpu_dev default_cpu = {
++static struct cpu_dev __cpuinitdata default_cpu = {
+ .c_init = default_init,
+ .c_vendor = "Unknown",
+ };
+@@ -191,7 +191,16 @@ static void __cpuinit get_cpu_vendor(str
+
+ static int __init x86_fxsr_setup(char * s)
+ {
++ /* Tell all the other CPU's to not use it... */
+ disable_x86_fxsr = 1;
++
++ /*
++ * ... and clear the bits early in the boot_cpu_data
++ * so that the bootup process doesn't try to do this
++ * either.
++ */
++ clear_bit(X86_FEATURE_FXSR, boot_cpu_data.x86_capability);
++ clear_bit(X86_FEATURE_XMM, boot_cpu_data.x86_capability);
+ return 1;
+ }
+ __setup("nofxsr", x86_fxsr_setup);
+@@ -272,7 +281,7 @@ static void __init early_cpu_detect(void
+ }
+ }
+
+-void __cpuinit generic_identify(struct cpuinfo_x86 * c)
++static void __cpuinit generic_identify(struct cpuinfo_x86 * c)
+ {
+ u32 tfms, xlvl;
+ int ebx;
+@@ -698,8 +707,7 @@ old_gdt:
+ */
+ atomic_inc(&init_mm.mm_count);
+ current->active_mm = &init_mm;
+- if (current->mm)
+- BUG();
++ BUG_ON(current->mm);
+ enter_lazy_tlb(&init_mm, current);
+
+ load_esp0(t, thread);
+@@ -712,7 +720,7 @@ old_gdt:
+ #endif
+
+ /* Clear %fs and %gs. */
+- asm volatile ("xorl %eax, %eax; movl %eax, %fs; movl %eax, %gs");
++ asm volatile ("movl %0, %%fs; movl %0, %%gs" : : "r" (0));
+
+ /* Clear all 6 debug registers: */
+ set_debugreg(0, 0);
+Index: 10.3-2007-11-26/arch/i386/kernel/crash.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/crash.c 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/arch/i386/kernel/crash.c 2007-10-22 13:53:08.000000000 +0200
+@@ -135,6 +135,8 @@ void machine_crash_shutdown(struct pt_re
+ #if defined(CONFIG_X86_IO_APIC)
+ disable_IO_APIC();
+ #endif
+-#endif /* CONFIG_XEN */
+ crash_save_cpu(regs, safe_smp_processor_id());
++#else
++ crash_save_cpu(regs, smp_processor_id());
++#endif /* CONFIG_XEN */
+ }
+Index: 10.3-2007-11-26/arch/i386/kernel/entry-xen.S
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/entry-xen.S 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/arch/i386/kernel/entry-xen.S 2007-10-22 13:53:08.000000000 +0200
+@@ -80,8 +80,12 @@ VM_MASK = 0x00020000
+ NMI_MASK = 0x80000000
+
+ #ifndef CONFIG_XEN
+-#define DISABLE_INTERRUPTS cli
+-#define ENABLE_INTERRUPTS sti
++/* These are replaces for paravirtualization */
++#define DISABLE_INTERRUPTS cli
++#define ENABLE_INTERRUPTS sti
++#define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
++#define INTERRUPT_RETURN iret
++#define GET_CR0_INTO_EAX movl %cr0, %eax
+ #else
+ /* Offsets into shared_info_t. */
+ #define evtchn_upcall_pending /* 0 */
+@@ -99,15 +103,29 @@ NMI_MASK = 0x80000000
+
+ #define __DISABLE_INTERRUPTS movb $1,evtchn_upcall_mask(%esi)
+ #define __ENABLE_INTERRUPTS movb $0,evtchn_upcall_mask(%esi)
++#define __TEST_PENDING testb $0xFF,evtchn_upcall_pending(%esi)
+ #define DISABLE_INTERRUPTS GET_VCPU_INFO ; \
+ __DISABLE_INTERRUPTS
+ #define ENABLE_INTERRUPTS GET_VCPU_INFO ; \
+ __ENABLE_INTERRUPTS
+-#define __TEST_PENDING testb $0xFF,evtchn_upcall_pending(%esi)
++#define ENABLE_INTERRUPTS_SYSEXIT __ENABLE_INTERRUPTS ; \
++sysexit_scrit: /**** START OF SYSEXIT CRITICAL REGION ****/ ; \
++ __TEST_PENDING ; \
++ jnz 14f # process more events if necessary... ; \
++ movl ESI(%esp), %esi ; \
++ sysexit ; \
++14: __DISABLE_INTERRUPTS ; \
++ TRACE_IRQS_OFF ; \
++sysexit_ecrit: /**** END OF SYSEXIT CRITICAL REGION ****/ ; \
++ push %esp ; \
++ call evtchn_do_upcall ; \
++ add $4,%esp ; \
++ jmp ret_from_intr
++#define INTERRUPT_RETURN iret
+ #endif
+
+ #ifdef CONFIG_PREEMPT
+-#define preempt_stop cli; TRACE_IRQS_OFF
++#define preempt_stop DISABLE_INTERRUPTS; TRACE_IRQS_OFF
+ #else
+ #define preempt_stop
+ #define resume_kernel restore_nocheck
+@@ -206,18 +224,21 @@ NMI_MASK = 0x80000000
+
+ #define RING0_INT_FRAME \
+ CFI_STARTPROC simple;\
++ CFI_SIGNAL_FRAME;\
+ CFI_DEF_CFA esp, 3*4;\
+ /*CFI_OFFSET cs, -2*4;*/\
+ CFI_OFFSET eip, -3*4
+
+ #define RING0_EC_FRAME \
+ CFI_STARTPROC simple;\
++ CFI_SIGNAL_FRAME;\
+ CFI_DEF_CFA esp, 4*4;\
+ /*CFI_OFFSET cs, -2*4;*/\
+ CFI_OFFSET eip, -3*4
+
+ #define RING0_PTREGS_FRAME \
+ CFI_STARTPROC simple;\
++ CFI_SIGNAL_FRAME;\
+ CFI_DEF_CFA esp, OLDESP-EBX;\
+ /*CFI_OFFSET cs, CS-OLDESP;*/\
+ CFI_OFFSET eip, EIP-OLDESP;\
+@@ -263,8 +284,9 @@ ret_from_intr:
+ check_userspace:
+ movl EFLAGS(%esp), %eax # mix EFLAGS and CS
+ movb CS(%esp), %al
+- testl $(VM_MASK | 2), %eax
+- jz resume_kernel
++ andl $(VM_MASK | SEGMENT_RPL_MASK), %eax
++ cmpl $USER_RPL, %eax
++ jb resume_kernel # not returning to v8086 or userspace
+ ENTRY(resume_userspace)
+ DISABLE_INTERRUPTS # make sure we don't miss an interrupt
+ # setting need_resched or sigpending
+@@ -277,7 +299,7 @@ ENTRY(resume_userspace)
+
+ #ifdef CONFIG_PREEMPT
+ ENTRY(resume_kernel)
+- cli
++ DISABLE_INTERRUPTS
+ cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ?
+ jnz restore_nocheck
+ need_resched:
+@@ -297,6 +319,7 @@ need_resched:
+ # sysenter call handler stub
+ ENTRY(sysenter_entry)
+ CFI_STARTPROC simple
++ CFI_SIGNAL_FRAME
+ CFI_DEF_CFA esp, 0
+ CFI_REGISTER esp, ebp
+ movl SYSENTER_stack_esp0(%esp),%esp
+@@ -305,7 +328,7 @@ sysenter_past_esp:
+ * No need to follow this irqs on/off section: the syscall
+ * disabled irqs and here we enable it straight after entry:
+ */
+- sti
++ ENABLE_INTERRUPTS
+ pushl $(__USER_DS)
+ CFI_ADJUST_CFA_OFFSET 4
+ /*CFI_REL_OFFSET ss, 0*/
+@@ -359,26 +382,8 @@ sysenter_past_esp:
+ movl EIP(%esp), %edx
+ movl OLDESP(%esp), %ecx
+ xorl %ebp,%ebp
+-#ifdef CONFIG_XEN
+ TRACE_IRQS_ON
+- __ENABLE_INTERRUPTS
+-sysexit_scrit: /**** START OF SYSEXIT CRITICAL REGION ****/
+- __TEST_PENDING
+- jnz 14f # process more events if necessary...
+- movl ESI(%esp), %esi
+- sysexit
+-14: __DISABLE_INTERRUPTS
+- TRACE_IRQS_OFF
+-sysexit_ecrit: /**** END OF SYSEXIT CRITICAL REGION ****/
+- push %esp
+- call evtchn_do_upcall
+- add $4,%esp
+- jmp ret_from_intr
+-#else
+- TRACE_IRQS_ON
+- sti
+- sysexit
+-#endif /* !CONFIG_XEN */
++ ENABLE_INTERRUPTS_SYSEXIT
+ CFI_ENDPROC
+
+
+@@ -419,8 +424,8 @@ restore_all:
+ # See comments in process.c:copy_thread() for details.
+ movb OLDSS(%esp), %ah
+ movb CS(%esp), %al
+- andl $(VM_MASK | (4 << 8) | 3), %eax
+- cmpl $((4 << 8) | 3), %eax
++ andl $(VM_MASK | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
++ cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
+ CFI_REMEMBER_STATE
+ je ldt_ss # returning to user-space with LDT SS
+ restore_nocheck:
+@@ -442,12 +447,11 @@ restore_nocheck_notrace:
+ RESTORE_REGS
+ addl $4, %esp
+ CFI_ADJUST_CFA_OFFSET -4
+-1: iret
++1: INTERRUPT_RETURN
+ .section .fixup,"ax"
+ iret_exc:
+ #ifndef CONFIG_XEN
+- TRACE_IRQS_ON
+- sti
++ ENABLE_INTERRUPTS
+ #endif
+ pushl $0 # no error code
+ pushl $do_iret_error
+@@ -473,7 +477,7 @@ ldt_ss:
+ * dosemu and wine happy. */
+ subl $8, %esp # reserve space for switch16 pointer
+ CFI_ADJUST_CFA_OFFSET 8
+- cli
++ DISABLE_INTERRUPTS
+ TRACE_IRQS_OFF
+ movl %esp, %eax
+ /* Set up the 16bit stack frame with switch32 pointer on top,
+@@ -483,7 +487,7 @@ ldt_ss:
+ TRACE_IRQS_IRET
+ RESTORE_REGS
+ lss 20+4(%esp), %esp # switch to 16bit stack
+-1: iret
++1: INTERRUPT_RETURN
+ .section __ex_table,"a"
+ .align 4
+ .long 1b,iret_exc
+@@ -499,7 +503,7 @@ scrit: /**** START OF CRITICAL REGION **
+ RESTORE_REGS
+ addl $4, %esp
+ CFI_ADJUST_CFA_OFFSET -4
+-1: iret
++1: INTERRUPT_RETURN
+ .section __ex_table,"a"
+ .align 4
+ .long 1b,iret_exc
+@@ -688,11 +692,9 @@ ENTRY(name) \
+ #define UNWIND_ESPFIX_STACK
+ #endif
+
+-ENTRY(divide_error)
+- RING0_INT_FRAME
+- pushl $0 # no error code
+- CFI_ADJUST_CFA_OFFSET 4
+- pushl $do_divide_error
++KPROBE_ENTRY(page_fault)
++ RING0_EC_FRAME
++ pushl $do_page_fault
+ CFI_ADJUST_CFA_OFFSET 4
+ ALIGN
+ error_code:
+@@ -742,6 +744,7 @@ error_code:
+ call *%edi
+ jmp ret_from_exception
+ CFI_ENDPROC
++KPROBE_END(page_fault)
+
+ #ifdef CONFIG_XEN
+ # A note on the "critical region" in our callback handler.
+@@ -901,7 +904,7 @@ ENTRY(device_not_available)
+ CFI_ADJUST_CFA_OFFSET 4
+ SAVE_ALL
+ #ifndef CONFIG_XEN
+- movl %cr0, %eax
++ GET_CR0_INTO_EAX
+ testl $0x4, %eax # EM (math emulation bit)
+ je device_available_emulate
+ pushl $0 # temporary storage for ORIG_EIP
+@@ -936,9 +939,15 @@ device_available_emulate:
+ jne ok; \
+ label: \
+ movl SYSENTER_stack_esp0+offset(%esp),%esp; \
++ CFI_DEF_CFA esp, 0; \
++ CFI_UNDEFINED eip; \
+ pushfl; \
++ CFI_ADJUST_CFA_OFFSET 4; \
+ pushl $__KERNEL_CS; \
+- pushl $sysenter_past_esp
++ CFI_ADJUST_CFA_OFFSET 4; \
++ pushl $sysenter_past_esp; \
++ CFI_ADJUST_CFA_OFFSET 4; \
++ CFI_REL_OFFSET eip, 0
+ #endif /* CONFIG_XEN */
+
+ KPROBE_ENTRY(debug)
+@@ -957,7 +966,8 @@ debug_stack_correct:
+ call do_debug
+ jmp ret_from_exception
+ CFI_ENDPROC
+- .previous .text
++KPROBE_END(debug)
++
+ #ifndef CONFIG_XEN
+ /*
+ * NMI is doubly nasty. It can happen _while_ we're handling
+@@ -967,7 +977,7 @@ debug_stack_correct:
+ * check whether we got an NMI on the debug path where the debug
+ * fault happened on the sysenter path.
+ */
+-ENTRY(nmi)
++KPROBE_ENTRY(nmi)
+ RING0_INT_FRAME
+ pushl %eax
+ CFI_ADJUST_CFA_OFFSET 4
+@@ -992,6 +1002,7 @@ ENTRY(nmi)
+ cmpl $sysenter_entry,12(%esp)
+ je nmi_debug_stack_check
+ nmi_stack_correct:
++ /* We have a RING0_INT_FRAME here */
+ pushl %eax
+ CFI_ADJUST_CFA_OFFSET 4
+ SAVE_ALL
+@@ -1002,9 +1013,12 @@ nmi_stack_correct:
+ CFI_ENDPROC
+
+ nmi_stack_fixup:
++ RING0_INT_FRAME
+ FIX_STACK(12,nmi_stack_correct, 1)
+ jmp nmi_stack_correct
++
+ nmi_debug_stack_check:
++ /* We have a RING0_INT_FRAME here */
+ cmpw $__KERNEL_CS,16(%esp)
+ jne nmi_stack_correct
+ cmpl $debug,(%esp)
+@@ -1015,8 +1029,10 @@ nmi_debug_stack_check:
+ jmp nmi_stack_correct
+
+ nmi_16bit_stack:
+- RING0_INT_FRAME
+- /* create the pointer to lss back */
++ /* We have a RING0_INT_FRAME here.
++ *
++ * create the pointer to lss back
++ */
+ pushl %ss
+ CFI_ADJUST_CFA_OFFSET 4
+ pushl %esp
+@@ -1037,14 +1053,14 @@ nmi_16bit_stack:
+ call do_nmi
+ RESTORE_REGS
+ lss 12+4(%esp), %esp # back to 16bit stack
+-1: iret
++1: INTERRUPT_RETURN
+ CFI_ENDPROC
+ .section __ex_table,"a"
+ .align 4
+ .long 1b,iret_exc
+ .previous
+ #else
+-ENTRY(nmi)
++KPROBE_ENTRY(nmi)
+ RING0_INT_FRAME
+ pushl %eax
+ CFI_ADJUST_CFA_OFFSET 4
+@@ -1056,6 +1072,7 @@ ENTRY(nmi)
+ jmp restore_all
+ CFI_ENDPROC
+ #endif
++KPROBE_END(nmi)
+
+ KPROBE_ENTRY(int3)
+ RING0_INT_FRAME
+@@ -1067,7 +1084,7 @@ KPROBE_ENTRY(int3)
+ call do_int3
+ jmp ret_from_exception
+ CFI_ENDPROC
+- .previous .text
++KPROBE_END(int3)
+
+ ENTRY(overflow)
+ RING0_INT_FRAME
+@@ -1132,7 +1149,7 @@ KPROBE_ENTRY(general_protection)
+ CFI_ADJUST_CFA_OFFSET 4
+ jmp error_code
+ CFI_ENDPROC
+- .previous .text
++KPROBE_END(general_protection)
+
+ ENTRY(alignment_check)
+ RING0_EC_FRAME
+@@ -1141,13 +1158,14 @@ ENTRY(alignment_check)
+ jmp error_code
+ CFI_ENDPROC
+
+-KPROBE_ENTRY(page_fault)
+- RING0_EC_FRAME
+- pushl $do_page_fault
++ENTRY(divide_error)
++ RING0_INT_FRAME
++ pushl $0 # no error code
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl $do_divide_error
+ CFI_ADJUST_CFA_OFFSET 4
+ jmp error_code
+ CFI_ENDPROC
+- .previous .text
+
+ #ifdef CONFIG_X86_MCE
+ ENTRY(machine_check)
+@@ -1209,6 +1227,19 @@ ENTRY(fixup_4gb_segment)
+ jmp error_code
+ CFI_ENDPROC
+
++ENTRY(kernel_thread_helper)
++ pushl $0 # fake return address for unwinder
++ CFI_STARTPROC
++ movl %edx,%eax
++ push %edx
++ CFI_ADJUST_CFA_OFFSET 4
++ call *%ebx
++ push %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ call do_exit
++ CFI_ENDPROC
++ENDPROC(kernel_thread_helper)
++
+ .section .rodata,"a"
+ .align 4
+ #include "syscall_table.S"
+Index: 10.3-2007-11-26/arch/i386/kernel/head-xen.S
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/head-xen.S 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/arch/i386/kernel/head-xen.S 2007-10-22 13:53:08.000000000 +0200
+@@ -62,7 +62,7 @@ ENTRY(startup_32)
+ movl %eax,%gs
+ cld # gcc2 wants the direction flag cleared at all times
+
+- pushl %eax # fake return address
++ pushl $0 # fake return address for unwinder
+ jmp start_kernel
+
+ #define HYPERCALL_PAGE_OFFSET 0x1000
+Index: 10.3-2007-11-26/arch/i386/kernel/io_apic-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/io_apic-xen.c 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/arch/i386/kernel/io_apic-xen.c 2007-10-22 13:53:08.000000000 +0200
+@@ -31,6 +31,9 @@
+ #include <linux/acpi.h>
+ #include <linux/module.h>
+ #include <linux/sysdev.h>
++#include <linux/pci.h>
++#include <linux/msi.h>
++#include <linux/htirq.h>
+
+ #include <asm/io.h>
+ #include <asm/smp.h>
+@@ -38,13 +41,15 @@
+ #include <asm/timer.h>
+ #include <asm/i8259.h>
+ #include <asm/nmi.h>
++#include <asm/msidef.h>
++#include <asm/hypertransport.h>
+
+ #include <mach_apic.h>
++#include <mach_apicdef.h>
+
+ #include "io_ports.h"
+
+ #ifdef CONFIG_XEN
+-
+ #include <xen/interface/xen.h>
+ #include <xen/interface/physdev.h>
+
+@@ -55,32 +60,7 @@
+
+ unsigned long io_apic_irqs;
+
+-static inline unsigned int xen_io_apic_read(unsigned int apic, unsigned int reg)
+-{
+- struct physdev_apic apic_op;
+- int ret;
+-
+- apic_op.apic_physbase = mp_ioapics[apic].mpc_apicaddr;
+- apic_op.reg = reg;
+- ret = HYPERVISOR_physdev_op(PHYSDEVOP_apic_read, &apic_op);
+- if (ret)
+- return ret;
+- return apic_op.value;
+-}
+-
+-static inline void xen_io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
+-{
+- struct physdev_apic apic_op;
+-
+- apic_op.apic_physbase = mp_ioapics[apic].mpc_apicaddr;
+- apic_op.reg = reg;
+- apic_op.value = value;
+- HYPERVISOR_physdev_op(PHYSDEVOP_apic_write, &apic_op);
+-}
+-
+-#define io_apic_read(a,r) xen_io_apic_read(a,r)
+-#define io_apic_write(a,r,v) xen_io_apic_write(a,r,v)
+-
++#define clear_IO_APIC() ((void)0)
+ #endif /* CONFIG_XEN */
+
+ int (*ioapic_renumber_irq)(int ioapic, int irq);
+@@ -105,7 +85,7 @@ int sis_apic_bug = -1;
+ */
+ int nr_ioapic_registers[MAX_IO_APICS];
+
+-int disable_timer_pin_1 __initdata;
++static int disable_timer_pin_1 __initdata;
+
+ /*
+ * Rough estimation of how many shared IRQs there are, can
+@@ -125,12 +105,122 @@ static struct irq_pin_list {
+ int apic, pin, next;
+ } irq_2_pin[PIN_MAP_SIZE];
+
+-int vector_irq[NR_VECTORS] __read_mostly = { [0 ... NR_VECTORS - 1] = -1};
+-#ifdef CONFIG_PCI_MSI
+-#define vector_to_irq(vector) \
+- (platform_legacy_irq(vector) ? vector : vector_irq[vector])
++#ifndef CONFIG_XEN
++struct io_apic {
++ unsigned int index;
++ unsigned int unused[3];
++ unsigned int data;
++};
++
++static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx)
++{
++ return (void __iomem *) __fix_to_virt(FIX_IO_APIC_BASE_0 + idx)
++ + (mp_ioapics[idx].mpc_apicaddr & ~PAGE_MASK);
++}
++#endif
++
++static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg)
++{
++#ifndef CONFIG_XEN
++ struct io_apic __iomem *io_apic = io_apic_base(apic);
++ writel(reg, &io_apic->index);
++ return readl(&io_apic->data);
+ #else
+-#define vector_to_irq(vector) (vector)
++ struct physdev_apic apic_op;
++ int ret;
++
++ apic_op.apic_physbase = mp_ioapics[apic].mpc_apicaddr;
++ apic_op.reg = reg;
++ ret = HYPERVISOR_physdev_op(PHYSDEVOP_apic_read, &apic_op);
++ if (ret)
++ return ret;
++ return apic_op.value;
++#endif
++}
++
++static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
++{
++#ifndef CONFIG_XEN
++ struct io_apic __iomem *io_apic = io_apic_base(apic);
++ writel(reg, &io_apic->index);
++ writel(value, &io_apic->data);
++#else
++ struct physdev_apic apic_op;
++
++ apic_op.apic_physbase = mp_ioapics[apic].mpc_apicaddr;
++ apic_op.reg = reg;
++ apic_op.value = value;
++ HYPERVISOR_physdev_op(PHYSDEVOP_apic_write, &apic_op);
++#endif
++}
++
++#ifndef CONFIG_XEN
++/*
++ * Re-write a value: to be used for read-modify-write
++ * cycles where the read already set up the index register.
++ *
++ * Older SiS APIC requires we rewrite the index register
++ */
++static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value)
++{
++ volatile struct io_apic *io_apic = io_apic_base(apic);
++ if (sis_apic_bug)
++ writel(reg, &io_apic->index);
++ writel(value, &io_apic->data);
++}
++#else
++#define io_apic_modify io_apic_write
++#endif
++
++union entry_union {
++ struct { u32 w1, w2; };
++ struct IO_APIC_route_entry entry;
++};
++
++static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin)
++{
++ union entry_union eu;
++ unsigned long flags;
++ spin_lock_irqsave(&ioapic_lock, flags);
++ eu.w1 = io_apic_read(apic, 0x10 + 2 * pin);
++ eu.w2 = io_apic_read(apic, 0x11 + 2 * pin);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++ return eu.entry;
++}
++
++/*
++ * When we write a new IO APIC routing entry, we need to write the high
++ * word first! If the mask bit in the low word is clear, we will enable
++ * the interrupt, and we need to make sure the entry is fully populated
++ * before that happens.
++ */
++static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
++{
++ unsigned long flags;
++ union entry_union eu;
++ eu.entry = e;
++ spin_lock_irqsave(&ioapic_lock, flags);
++ io_apic_write(apic, 0x11 + 2*pin, eu.w2);
++ io_apic_write(apic, 0x10 + 2*pin, eu.w1);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++}
++
++#ifndef CONFIG_XEN
++/*
++ * When we mask an IO APIC routing entry, we need to write the low
++ * word first, in order to set the mask bit before we change the
++ * high bits!
++ */
++static void ioapic_mask_entry(int apic, int pin)
++{
++ unsigned long flags;
++ union entry_union eu = { .entry.mask = 1 };
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ io_apic_write(apic, 0x10 + 2*pin, eu.w1);
++ io_apic_write(apic, 0x11 + 2*pin, eu.w2);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++}
+ #endif
+
+ /*
+@@ -156,9 +246,7 @@ static void add_pin_to_irq(unsigned int
+ entry->pin = pin;
+ }
+
+-#ifdef CONFIG_XEN
+-#define clear_IO_APIC() ((void)0)
+-#else
++#ifndef CONFIG_XEN
+ /*
+ * Reroute an IRQ to a different pin.
+ */
+@@ -243,25 +331,16 @@ static void unmask_IO_APIC_irq (unsigned
+ static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
+ {
+ struct IO_APIC_route_entry entry;
+- unsigned long flags;
+
+ /* Check delivery_mode to be sure we're not clearing an SMI pin */
+- spin_lock_irqsave(&ioapic_lock, flags);
+- *(((int*)&entry) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
+- *(((int*)&entry) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
+- spin_unlock_irqrestore(&ioapic_lock, flags);
++ entry = ioapic_read_entry(apic, pin);
+ if (entry.delivery_mode == dest_SMI)
+ return;
+
+ /*
+ * Disable it in the IO-APIC irq-routing table:
+ */
+- memset(&entry, 0, sizeof(entry));
+- entry.mask = 1;
+- spin_lock_irqsave(&ioapic_lock, flags);
+- io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry) + 0));
+- io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry) + 1));
+- spin_unlock_irqrestore(&ioapic_lock, flags);
++ ioapic_mask_entry(apic, pin);
+ }
+
+ static void clear_IO_APIC (void)
+@@ -301,7 +380,7 @@ static void set_ioapic_affinity_irq(unsi
+ break;
+ entry = irq_2_pin + entry->next;
+ }
+- set_irq_info(irq, cpumask);
++ set_native_irq_info(irq, cpumask);
+ spin_unlock_irqrestore(&ioapic_lock, flags);
+ }
+
+@@ -1207,40 +1286,40 @@ static inline int IO_APIC_irq_trigger(in
+ /* irq_vectors is indexed by the sum of all RTEs in all I/O APICs. */
+ u8 irq_vector[NR_IRQ_VECTORS] __read_mostly; /* = { FIRST_DEVICE_VECTOR , 0 }; */
+
+-int assign_irq_vector(int irq)
++static int __assign_irq_vector(int irq)
+ {
+- unsigned long flags;
+ int vector;
+ struct physdev_irq irq_op;
+
+- BUG_ON(irq != AUTO_ASSIGN && (unsigned)irq >= NR_IRQ_VECTORS);
++ BUG_ON((unsigned)irq >= NR_IRQ_VECTORS);
+
+- spin_lock_irqsave(&vector_lock, flags);
+-
+- if (irq != AUTO_ASSIGN && IO_APIC_VECTOR(irq) > 0) {
+- spin_unlock_irqrestore(&vector_lock, flags);
+- return IO_APIC_VECTOR(irq);
+- }
++ if (irq_vector[irq] > 0)
++ return irq_vector[irq];
+
+ irq_op.irq = irq;
+- if (HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) {
+- spin_unlock_irqrestore(&vector_lock, flags);
++ if (HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op))
+ return -ENOSPC;
+- }
+
+ vector = irq_op.vector;
+- vector_irq[vector] = irq;
+- if (irq != AUTO_ASSIGN)
+- IO_APIC_VECTOR(irq) = vector;
++ irq_vector[irq] = vector;
++
++ return vector;
++}
++
++static int assign_irq_vector(int irq)
++{
++ unsigned long flags;
++ int vector;
+
++ spin_lock_irqsave(&vector_lock, flags);
++ vector = __assign_irq_vector(irq);
+ spin_unlock_irqrestore(&vector_lock, flags);
+
+ return vector;
+ }
+
+ #ifndef CONFIG_XEN
+-static struct hw_interrupt_type ioapic_level_type;
+-static struct hw_interrupt_type ioapic_edge_type;
++static struct irq_chip ioapic_chip;
+
+ #define IOAPIC_AUTO -1
+ #define IOAPIC_EDGE 0
+@@ -1248,16 +1327,16 @@ static struct hw_interrupt_type ioapic_e
+
+ static void ioapic_register_intr(int irq, int vector, unsigned long trigger)
+ {
+- unsigned idx;
+-
+- idx = use_pci_vector() && !platform_legacy_irq(irq) ? vector : irq;
+-
+ if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
+ trigger == IOAPIC_LEVEL)
+- irq_desc[idx].chip = &ioapic_level_type;
+- else
+- irq_desc[idx].chip = &ioapic_edge_type;
+- set_intr_gate(vector, interrupt[idx]);
++ set_irq_chip_and_handler_name(irq, &ioapic_chip,
++ handle_fasteoi_irq, "fasteoi");
++ else {
++ irq_desc[irq].status |= IRQ_DELAYED_DISABLE;
++ set_irq_chip_and_handler_name(irq, &ioapic_chip,
++ handle_edge_irq, "edge");
++ }
++ set_intr_gate(vector, interrupt[irq]);
+ }
+ #else
+ #define ioapic_register_intr(_irq,_vector,_trigger) ((void)0)
+@@ -1328,9 +1407,8 @@ static void __init setup_IO_APIC_irqs(vo
+ if (!apic && (irq < 16))
+ disable_8259A_irq(irq);
+ }
++ ioapic_write_entry(apic, pin, entry);
+ spin_lock_irqsave(&ioapic_lock, flags);
+- io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1));
+- io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0));
+ set_native_irq_info(irq, TARGET_CPUS);
+ spin_unlock_irqrestore(&ioapic_lock, flags);
+ }
+@@ -1347,7 +1425,6 @@ static void __init setup_IO_APIC_irqs(vo
+ static void __init setup_ExtINT_IRQ0_pin(unsigned int apic, unsigned int pin, int vector)
+ {
+ struct IO_APIC_route_entry entry;
+- unsigned long flags;
+
+ memset(&entry,0,sizeof(entry));
+
+@@ -1372,15 +1449,13 @@ static void __init setup_ExtINT_IRQ0_pin
+ * The timer IRQ doesn't have to know that behind the
+ * scene we have a 8259A-master in AEOI mode ...
+ */
+- irq_desc[0].chip = &ioapic_edge_type;
++ irq_desc[0].chip = &ioapic_chip;
++ set_irq_handler(0, handle_edge_irq);
+
+ /*
+ * Add it to the IO-APIC irq-routing table:
+ */
+- spin_lock_irqsave(&ioapic_lock, flags);
+- io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1));
+- io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0));
+- spin_unlock_irqrestore(&ioapic_lock, flags);
++ ioapic_write_entry(apic, pin, entry);
+
+ enable_8259A_irq(0);
+ }
+@@ -1490,10 +1565,7 @@ void __init print_IO_APIC(void)
+ for (i = 0; i <= reg_01.bits.entries; i++) {
+ struct IO_APIC_route_entry entry;
+
+- spin_lock_irqsave(&ioapic_lock, flags);
+- *(((int *)&entry)+0) = io_apic_read(apic, 0x10+i*2);
+- *(((int *)&entry)+1) = io_apic_read(apic, 0x11+i*2);
+- spin_unlock_irqrestore(&ioapic_lock, flags);
++ entry = ioapic_read_entry(apic, i);
+
+ printk(KERN_DEBUG " %02x %03X %02X ",
+ i,
+@@ -1513,17 +1585,12 @@ void __init print_IO_APIC(void)
+ );
+ }
+ }
+- if (use_pci_vector())
+- printk(KERN_INFO "Using vector-based indexing\n");
+ printk(KERN_DEBUG "IRQ to pin mappings:\n");
+ for (i = 0; i < NR_IRQS; i++) {
+ struct irq_pin_list *entry = irq_2_pin + i;
+ if (entry->pin < 0)
+ continue;
+- if (use_pci_vector() && !platform_legacy_irq(i))
+- printk(KERN_DEBUG "IRQ%d ", IO_APIC_VECTOR(i));
+- else
+- printk(KERN_DEBUG "IRQ%d ", i);
++ printk(KERN_DEBUG "IRQ%d ", i);
+ for (;;) {
+ printk("-> %d:%d", entry->apic, entry->pin);
+ if (!entry->next)
+@@ -1716,10 +1783,7 @@ static void __init enable_IO_APIC(void)
+ /* See if any of the pins is in ExtINT mode */
+ for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
+ struct IO_APIC_route_entry entry;
+- spin_lock_irqsave(&ioapic_lock, flags);
+- *(((int *)&entry) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
+- *(((int *)&entry) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
+- spin_unlock_irqrestore(&ioapic_lock, flags);
++ entry = ioapic_read_entry(apic, pin);
+
+
+ /* If the interrupt line is enabled and in ExtInt mode
+@@ -1777,7 +1841,6 @@ void disable_IO_APIC(void)
+ */
+ if (ioapic_i8259.pin != -1) {
+ struct IO_APIC_route_entry entry;
+- unsigned long flags;
+
+ memset(&entry, 0, sizeof(entry));
+ entry.mask = 0; /* Enabled */
+@@ -1794,12 +1857,7 @@ void disable_IO_APIC(void)
+ /*
+ * Add it to the IO-APIC irq-routing table:
+ */
+- spin_lock_irqsave(&ioapic_lock, flags);
+- io_apic_write(ioapic_i8259.apic, 0x11+2*ioapic_i8259.pin,
+- *(((int *)&entry)+1));
+- io_apic_write(ioapic_i8259.apic, 0x10+2*ioapic_i8259.pin,
+- *(((int *)&entry)+0));
+- spin_unlock_irqrestore(&ioapic_lock, flags);
++ ioapic_write_entry(ioapic_i8259.apic, ioapic_i8259.pin, entry);
+ }
+ disconnect_bsp_APIC(ioapic_i8259.pin != -1);
+ #endif
+@@ -1966,6 +2024,8 @@ static int __init timer_irq_works(void)
+ */
+
+ /*
++ * Startup quirk:
++ *
+ * Starting up a edge-triggered IO-APIC interrupt is
+ * nasty - we need to make sure that we get the edge.
+ * If it is already asserted for some reason, we need
+@@ -1973,8 +2033,10 @@ static int __init timer_irq_works(void)
+ *
+ * This is not complete - we should be able to fake
+ * an edge even if it isn't on the 8259A...
++ *
++ * (We do this for level-triggered IRQs too - it cannot hurt.)
+ */
+-static unsigned int startup_edge_ioapic_irq(unsigned int irq)
++static unsigned int startup_ioapic_irq(unsigned int irq)
+ {
+ int was_pending = 0;
+ unsigned long flags;
+@@ -1991,47 +2053,18 @@ static unsigned int startup_edge_ioapic_
+ return was_pending;
+ }
+
+-/*
+- * Once we have recorded IRQ_PENDING already, we can mask the
+- * interrupt for real. This prevents IRQ storms from unhandled
+- * devices.
+- */
+-static void ack_edge_ioapic_irq(unsigned int irq)
+-{
+- move_irq(irq);
+- if ((irq_desc[irq].status & (IRQ_PENDING | IRQ_DISABLED))
+- == (IRQ_PENDING | IRQ_DISABLED))
+- mask_IO_APIC_irq(irq);
+- ack_APIC_irq();
+-}
+-
+-/*
+- * Level triggered interrupts can just be masked,
+- * and shutting down and starting up the interrupt
+- * is the same as enabling and disabling them -- except
+- * with a startup need to return a "was pending" value.
+- *
+- * Level triggered interrupts are special because we
+- * do not touch any IO-APIC register while handling
+- * them. We ack the APIC in the end-IRQ handler, not
+- * in the start-IRQ-handler. Protection against reentrance
+- * from the same interrupt is still provided, both by the
+- * generic IRQ layer and by the fact that an unacked local
+- * APIC does not accept IRQs.
+- */
+-static unsigned int startup_level_ioapic_irq (unsigned int irq)
++static void ack_ioapic_irq(unsigned int irq)
+ {
+- unmask_IO_APIC_irq(irq);
+-
+- return 0; /* don't check for pending */
++ move_native_irq(irq);
++ ack_APIC_irq();
+ }
+
+-static void end_level_ioapic_irq (unsigned int irq)
++static void ack_ioapic_quirk_irq(unsigned int irq)
+ {
+ unsigned long v;
+ int i;
+
+- move_irq(irq);
++ move_native_irq(irq);
+ /*
+ * It appears there is an erratum which affects at least version 0x11
+ * of I/O APIC (that's the 82093AA and cores integrated into various
+@@ -2051,7 +2084,7 @@ static void end_level_ioapic_irq (unsign
+ * operation to prevent an edge-triggered interrupt escaping meanwhile.
+ * The idea is from Manfred Spraul. --macro
+ */
+- i = IO_APIC_VECTOR(irq);
++ i = irq_vector[irq];
+
+ v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1));
+
+@@ -2066,104 +2099,24 @@ static void end_level_ioapic_irq (unsign
+ }
+ }
+
+-#ifdef CONFIG_PCI_MSI
+-static unsigned int startup_edge_ioapic_vector(unsigned int vector)
+-{
+- int irq = vector_to_irq(vector);
+-
+- return startup_edge_ioapic_irq(irq);
+-}
+-
+-static void ack_edge_ioapic_vector(unsigned int vector)
+-{
+- int irq = vector_to_irq(vector);
+-
+- move_native_irq(vector);
+- ack_edge_ioapic_irq(irq);
+-}
+-
+-static unsigned int startup_level_ioapic_vector (unsigned int vector)
+-{
+- int irq = vector_to_irq(vector);
+-
+- return startup_level_ioapic_irq (irq);
+-}
+-
+-static void end_level_ioapic_vector (unsigned int vector)
+-{
+- int irq = vector_to_irq(vector);
+-
+- move_native_irq(vector);
+- end_level_ioapic_irq(irq);
+-}
+-
+-static void mask_IO_APIC_vector (unsigned int vector)
+-{
+- int irq = vector_to_irq(vector);
+-
+- mask_IO_APIC_irq(irq);
+-}
+-
+-static void unmask_IO_APIC_vector (unsigned int vector)
+-{
+- int irq = vector_to_irq(vector);
+-
+- unmask_IO_APIC_irq(irq);
+-}
+-
+-#ifdef CONFIG_SMP
+-static void set_ioapic_affinity_vector (unsigned int vector,
+- cpumask_t cpu_mask)
++static int ioapic_retrigger_irq(unsigned int irq)
+ {
+- int irq = vector_to_irq(vector);
+-
+- set_native_irq_info(vector, cpu_mask);
+- set_ioapic_affinity_irq(irq, cpu_mask);
+-}
+-#endif
+-#endif
+-
+-static int ioapic_retrigger(unsigned int irq)
+-{
+- send_IPI_self(IO_APIC_VECTOR(irq));
++ send_IPI_self(irq_vector[irq]);
+
+ return 1;
+ }
+
+-/*
+- * Level and edge triggered IO-APIC interrupts need different handling,
+- * so we use two separate IRQ descriptors. Edge triggered IRQs can be
+- * handled with the level-triggered descriptor, but that one has slightly
+- * more overhead. Level-triggered interrupts cannot be handled with the
+- * edge-triggered handler, without risking IRQ storms and other ugly
+- * races.
+- */
+-static struct hw_interrupt_type ioapic_edge_type __read_mostly = {
+- .typename = "IO-APIC-edge",
+- .startup = startup_edge_ioapic,
+- .shutdown = shutdown_edge_ioapic,
+- .enable = enable_edge_ioapic,
+- .disable = disable_edge_ioapic,
+- .ack = ack_edge_ioapic,
+- .end = end_edge_ioapic,
++static struct irq_chip ioapic_chip __read_mostly = {
++ .name = "IO-APIC",
++ .startup = startup_ioapic_irq,
++ .mask = mask_IO_APIC_irq,
++ .unmask = unmask_IO_APIC_irq,
++ .ack = ack_ioapic_irq,
++ .eoi = ack_ioapic_quirk_irq,
+ #ifdef CONFIG_SMP
+- .set_affinity = set_ioapic_affinity,
++ .set_affinity = set_ioapic_affinity_irq,
+ #endif
+- .retrigger = ioapic_retrigger,
+-};
+-
+-static struct hw_interrupt_type ioapic_level_type __read_mostly = {
+- .typename = "IO-APIC-level",
+- .startup = startup_level_ioapic,
+- .shutdown = shutdown_level_ioapic,
+- .enable = enable_level_ioapic,
+- .disable = disable_level_ioapic,
+- .ack = mask_and_ack_level_ioapic,
+- .end = end_level_ioapic,
+-#ifdef CONFIG_SMP
+- .set_affinity = set_ioapic_affinity,
+-#endif
+- .retrigger = ioapic_retrigger,
++ .retrigger = ioapic_retrigger_irq,
+ };
+ #endif /* !CONFIG_XEN */
+
+@@ -2184,12 +2137,7 @@ static inline void init_IO_APIC_traps(vo
+ */
+ for (irq = 0; irq < NR_IRQS ; irq++) {
+ int tmp = irq;
+- if (use_pci_vector()) {
+- if (!platform_legacy_irq(tmp))
+- if ((tmp = vector_to_irq(tmp)) == -1)
+- continue;
+- }
+- if (IO_APIC_IRQ(tmp) && !IO_APIC_VECTOR(tmp)) {
++ if (IO_APIC_IRQ(tmp) && !irq_vector[tmp]) {
+ /*
+ * Hmm.. We don't have an entry for this,
+ * so default to an old-fashioned 8259
+@@ -2200,22 +2148,23 @@ static inline void init_IO_APIC_traps(vo
+ #ifndef CONFIG_XEN
+ else
+ /* Strange. Oh, well.. */
+- irq_desc[irq].chip = &no_irq_type;
++ irq_desc[irq].chip = &no_irq_chip;
+ #endif
+ }
+ }
+ }
+
+ #ifndef CONFIG_XEN
+-static void enable_lapic_irq (unsigned int irq)
+-{
+- unsigned long v;
++/*
++ * The local APIC irq-chip implementation:
++ */
+
+- v = apic_read(APIC_LVT0);
+- apic_write_around(APIC_LVT0, v & ~APIC_LVT_MASKED);
++static void ack_apic(unsigned int irq)
++{
++ ack_APIC_irq();
+ }
+
+-static void disable_lapic_irq (unsigned int irq)
++static void mask_lapic_irq (unsigned int irq)
+ {
+ unsigned long v;
+
+@@ -2223,21 +2172,19 @@ static void disable_lapic_irq (unsigned
+ apic_write_around(APIC_LVT0, v | APIC_LVT_MASKED);
+ }
+
+-static void ack_lapic_irq (unsigned int irq)
++static void unmask_lapic_irq (unsigned int irq)
+ {
+- ack_APIC_irq();
+-}
++ unsigned long v;
+
+-static void end_lapic_irq (unsigned int i) { /* nothing */ }
++ v = apic_read(APIC_LVT0);
++ apic_write_around(APIC_LVT0, v & ~APIC_LVT_MASKED);
++}
+
+-static struct hw_interrupt_type lapic_irq_type __read_mostly = {
+- .typename = "local-APIC-edge",
+- .startup = NULL, /* startup_irq() not used for IRQ0 */
+- .shutdown = NULL, /* shutdown_irq() not used for IRQ0 */
+- .enable = enable_lapic_irq,
+- .disable = disable_lapic_irq,
+- .ack = ack_lapic_irq,
+- .end = end_lapic_irq
++static struct irq_chip lapic_chip __read_mostly = {
++ .name = "local-APIC-edge",
++ .mask = mask_lapic_irq,
++ .unmask = unmask_lapic_irq,
++ .eoi = ack_apic,
+ };
+
+ static void setup_nmi (void)
+@@ -2270,17 +2217,13 @@ static inline void unlock_ExtINT_logic(v
+ int apic, pin, i;
+ struct IO_APIC_route_entry entry0, entry1;
+ unsigned char save_control, save_freq_select;
+- unsigned long flags;
+
+ pin = find_isa_irq_pin(8, mp_INT);
+ apic = find_isa_irq_apic(8, mp_INT);
+ if (pin == -1)
+ return;
+
+- spin_lock_irqsave(&ioapic_lock, flags);
+- *(((int *)&entry0) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
+- *(((int *)&entry0) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
+- spin_unlock_irqrestore(&ioapic_lock, flags);
++ entry0 = ioapic_read_entry(apic, pin);
+ clear_IO_APIC_pin(apic, pin);
+
+ memset(&entry1, 0, sizeof(entry1));
+@@ -2293,10 +2236,7 @@ static inline void unlock_ExtINT_logic(v
+ entry1.trigger = 0;
+ entry1.vector = 0;
+
+- spin_lock_irqsave(&ioapic_lock, flags);
+- io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry1) + 1));
+- io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry1) + 0));
+- spin_unlock_irqrestore(&ioapic_lock, flags);
++ ioapic_write_entry(apic, pin, entry1);
+
+ save_control = CMOS_READ(RTC_CONTROL);
+ save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
+@@ -2315,10 +2255,7 @@ static inline void unlock_ExtINT_logic(v
+ CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
+ clear_IO_APIC_pin(apic, pin);
+
+- spin_lock_irqsave(&ioapic_lock, flags);
+- io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry0) + 1));
+- io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry0) + 0));
+- spin_unlock_irqrestore(&ioapic_lock, flags);
++ ioapic_write_entry(apic, pin, entry0);
+ }
+
+ int timer_uses_ioapic_pin_0;
+@@ -2418,7 +2355,8 @@ static inline void check_timer(void)
+ printk(KERN_INFO "...trying to set up timer as Virtual Wire IRQ...");
+
+ disable_8259A_irq(0);
+- irq_desc[0].chip = &lapic_irq_type;
++ set_irq_chip_and_handler_name(0, &lapic_chip, handle_fasteoi_irq,
++ "fasteio");
+ apic_write_around(APIC_LVT0, APIC_DM_FIXED | vector); /* Fixed mode */
+ enable_8259A_irq(0);
+
+@@ -2530,17 +2468,12 @@ static int ioapic_suspend(struct sys_dev
+ {
+ struct IO_APIC_route_entry *entry;
+ struct sysfs_ioapic_data *data;
+- unsigned long flags;
+ int i;
+
+ data = container_of(dev, struct sysfs_ioapic_data, dev);
+ entry = data->entry;
+- spin_lock_irqsave(&ioapic_lock, flags);
+- for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ ) {
+- *(((int *)entry) + 1) = io_apic_read(dev->id, 0x11 + 2 * i);
+- *(((int *)entry) + 0) = io_apic_read(dev->id, 0x10 + 2 * i);
+- }
+- spin_unlock_irqrestore(&ioapic_lock, flags);
++ for (i = 0; i < nr_ioapic_registers[dev->id]; i ++)
++ entry[i] = ioapic_read_entry(dev->id, i);
+
+ return 0;
+ }
+@@ -2562,11 +2495,9 @@ static int ioapic_resume(struct sys_devi
+ reg_00.bits.ID = mp_ioapics[dev->id].mpc_apicid;
+ io_apic_write(dev->id, 0, reg_00.raw);
+ }
+- for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ ) {
+- io_apic_write(dev->id, 0x11+2*i, *(((int *)entry)+1));
+- io_apic_write(dev->id, 0x10+2*i, *(((int *)entry)+0));
+- }
+ spin_unlock_irqrestore(&ioapic_lock, flags);
++ for (i = 0; i < nr_ioapic_registers[dev->id]; i ++)
++ ioapic_write_entry(dev->id, i, entry[i]);
+
+ return 0;
+ }
+@@ -2612,6 +2543,242 @@ static int __init ioapic_init_sysfs(void
+
+ device_initcall(ioapic_init_sysfs);
+
++#ifndef CONFIG_XEN
++/*
++ * Dynamic irq allocate and deallocation
++ */
++int create_irq(void)
++{
++ /* Allocate an unused irq */
++ int irq, new, vector;
++ unsigned long flags;
++
++ irq = -ENOSPC;
++ spin_lock_irqsave(&vector_lock, flags);
++ for (new = (NR_IRQS - 1); new >= 0; new--) {
++ if (platform_legacy_irq(new))
++ continue;
++ if (irq_vector[new] != 0)
++ continue;
++ vector = __assign_irq_vector(new);
++ if (likely(vector > 0))
++ irq = new;
++ break;
++ }
++ spin_unlock_irqrestore(&vector_lock, flags);
++
++ if (irq >= 0) {
++#ifndef CONFIG_XEN
++ set_intr_gate(vector, interrupt[irq]);
++#endif
++ dynamic_irq_init(irq);
++ }
++ return irq;
++}
++
++void destroy_irq(unsigned int irq)
++{
++ unsigned long flags;
++
++ dynamic_irq_cleanup(irq);
++
++ spin_lock_irqsave(&vector_lock, flags);
++ irq_vector[irq] = 0;
++ spin_unlock_irqrestore(&vector_lock, flags);
++}
++#endif
++
++/*
++ * MSI mesage composition
++ */
++#ifdef CONFIG_PCI_MSI
++static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg)
++{
++ int vector;
++ unsigned dest;
++
++ vector = assign_irq_vector(irq);
++ if (vector >= 0) {
++ dest = cpu_mask_to_apicid(TARGET_CPUS);
++
++ msg->address_hi = MSI_ADDR_BASE_HI;
++ msg->address_lo =
++ MSI_ADDR_BASE_LO |
++ ((INT_DEST_MODE == 0) ?
++ MSI_ADDR_DEST_MODE_PHYSICAL:
++ MSI_ADDR_DEST_MODE_LOGICAL) |
++ ((INT_DELIVERY_MODE != dest_LowestPrio) ?
++ MSI_ADDR_REDIRECTION_CPU:
++ MSI_ADDR_REDIRECTION_LOWPRI) |
++ MSI_ADDR_DEST_ID(dest);
++
++ msg->data =
++ MSI_DATA_TRIGGER_EDGE |
++ MSI_DATA_LEVEL_ASSERT |
++ ((INT_DELIVERY_MODE != dest_LowestPrio) ?
++ MSI_DATA_DELIVERY_FIXED:
++ MSI_DATA_DELIVERY_LOWPRI) |
++ MSI_DATA_VECTOR(vector);
++ }
++ return vector;
++}
++
++#ifdef CONFIG_SMP
++static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
++{
++ struct msi_msg msg;
++ unsigned int dest;
++ cpumask_t tmp;
++ int vector;
++
++ cpus_and(tmp, mask, cpu_online_map);
++ if (cpus_empty(tmp))
++ tmp = TARGET_CPUS;
++
++ vector = assign_irq_vector(irq);
++ if (vector < 0)
++ return;
++
++ dest = cpu_mask_to_apicid(mask);
++
++ read_msi_msg(irq, &msg);
++
++ msg.data &= ~MSI_DATA_VECTOR_MASK;
++ msg.data |= MSI_DATA_VECTOR(vector);
++ msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
++ msg.address_lo |= MSI_ADDR_DEST_ID(dest);
++
++ write_msi_msg(irq, &msg);
++ set_native_irq_info(irq, mask);
++}
++#endif /* CONFIG_SMP */
++
++/*
++ * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices,
++ * which implement the MSI or MSI-X Capability Structure.
++ */
++static struct irq_chip msi_chip = {
++ .name = "PCI-MSI",
++ .unmask = unmask_msi_irq,
++ .mask = mask_msi_irq,
++ .ack = ack_ioapic_irq,
++#ifdef CONFIG_SMP
++ .set_affinity = set_msi_irq_affinity,
++#endif
++ .retrigger = ioapic_retrigger_irq,
++};
++
++int arch_setup_msi_irq(unsigned int irq, struct pci_dev *dev)
++{
++ struct msi_msg msg;
++ int ret;
++ ret = msi_compose_msg(dev, irq, &msg);
++ if (ret < 0)
++ return ret;
++
++ write_msi_msg(irq, &msg);
++
++ set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq,
++ "edge");
++
++ return 0;
++}
++
++void arch_teardown_msi_irq(unsigned int irq)
++{
++ return;
++}
++
++#endif /* CONFIG_PCI_MSI */
++
++/*
++ * Hypertransport interrupt support
++ */
++#ifdef CONFIG_HT_IRQ
++
++#ifdef CONFIG_SMP
++
++static void target_ht_irq(unsigned int irq, unsigned int dest)
++{
++ struct ht_irq_msg msg;
++ fetch_ht_irq_msg(irq, &msg);
++
++ msg.address_lo &= ~(HT_IRQ_LOW_DEST_ID_MASK);
++ msg.address_hi &= ~(HT_IRQ_HIGH_DEST_ID_MASK);
++
++ msg.address_lo |= HT_IRQ_LOW_DEST_ID(dest);
++ msg.address_hi |= HT_IRQ_HIGH_DEST_ID(dest);
++
++ write_ht_irq_msg(irq, &msg);
++}
++
++static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask)
++{
++ unsigned int dest;
++ cpumask_t tmp;
++
++ cpus_and(tmp, mask, cpu_online_map);
++ if (cpus_empty(tmp))
++ tmp = TARGET_CPUS;
++
++ cpus_and(mask, tmp, CPU_MASK_ALL);
++
++ dest = cpu_mask_to_apicid(mask);
++
++ target_ht_irq(irq, dest);
++ set_native_irq_info(irq, mask);
++}
++#endif
++
++static struct irq_chip ht_irq_chip = {
++ .name = "PCI-HT",
++ .mask = mask_ht_irq,
++ .unmask = unmask_ht_irq,
++ .ack = ack_ioapic_irq,
++#ifdef CONFIG_SMP
++ .set_affinity = set_ht_irq_affinity,
++#endif
++ .retrigger = ioapic_retrigger_irq,
++};
++
++int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
++{
++ int vector;
++
++ vector = assign_irq_vector(irq);
++ if (vector >= 0) {
++ struct ht_irq_msg msg;
++ unsigned dest;
++ cpumask_t tmp;
++
++ cpus_clear(tmp);
++ cpu_set(vector >> 8, tmp);
++ dest = cpu_mask_to_apicid(tmp);
++
++ msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest);
++
++ msg.address_lo =
++ HT_IRQ_LOW_BASE |
++ HT_IRQ_LOW_DEST_ID(dest) |
++ HT_IRQ_LOW_VECTOR(vector) |
++ ((INT_DEST_MODE == 0) ?
++ HT_IRQ_LOW_DM_PHYSICAL :
++ HT_IRQ_LOW_DM_LOGICAL) |
++ HT_IRQ_LOW_RQEOI_EDGE |
++ ((INT_DELIVERY_MODE != dest_LowestPrio) ?
++ HT_IRQ_LOW_MT_FIXED :
++ HT_IRQ_LOW_MT_ARBITRATED) |
++ HT_IRQ_LOW_IRQ_MASKED;
++
++ write_ht_irq_msg(irq, &msg);
++
++ set_irq_chip_and_handler_name(irq, &ht_irq_chip,
++ handle_edge_irq, "edge");
++ }
++ return vector;
++}
++#endif /* CONFIG_HT_IRQ */
++
+ /* --------------------------------------------------------------------------
+ ACPI-based IOAPIC Configuration
+ -------------------------------------------------------------------------- */
+@@ -2765,13 +2932,34 @@ int io_apic_set_pci_routing (int ioapic,
+ if (!ioapic && (irq < 16))
+ disable_8259A_irq(irq);
+
++ ioapic_write_entry(ioapic, pin, entry);
+ spin_lock_irqsave(&ioapic_lock, flags);
+- io_apic_write(ioapic, 0x11+2*pin, *(((int *)&entry)+1));
+- io_apic_write(ioapic, 0x10+2*pin, *(((int *)&entry)+0));
+- set_native_irq_info(use_pci_vector() ? entry.vector : irq, TARGET_CPUS);
++ set_native_irq_info(irq, TARGET_CPUS);
+ spin_unlock_irqrestore(&ioapic_lock, flags);
+
+ return 0;
+ }
+
+ #endif /* CONFIG_ACPI */
++
++static int __init parse_disable_timer_pin_1(char *arg)
++{
++ disable_timer_pin_1 = 1;
++ return 0;
++}
++early_param("disable_timer_pin_1", parse_disable_timer_pin_1);
++
++static int __init parse_enable_timer_pin_1(char *arg)
++{
++ disable_timer_pin_1 = -1;
++ return 0;
++}
++early_param("enable_timer_pin_1", parse_enable_timer_pin_1);
++
++static int __init parse_noapic(char *arg)
++{
++ /* disable IO-APIC */
++ disable_ioapic_setup();
++ return 0;
++}
++early_param("noapic", parse_noapic);
+Index: 10.3-2007-11-26/arch/i386/kernel/irq-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/irq-xen.c 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/arch/i386/kernel/irq-xen.c 2007-10-22 13:53:08.000000000 +0200
+@@ -53,8 +53,10 @@ static union irq_ctx *softirq_ctx[NR_CPU
+ */
+ fastcall unsigned int do_IRQ(struct pt_regs *regs)
+ {
++ struct pt_regs *old_regs;
+ /* high bit used in ret_from_ code */
+ int irq = ~regs->orig_eax;
++ struct irq_desc *desc = irq_desc + irq;
+ #ifdef CONFIG_4KSTACKS
+ union irq_ctx *curctx, *irqctx;
+ u32 *isp;
+@@ -66,6 +68,7 @@ fastcall unsigned int do_IRQ(struct pt_r
+ BUG();
+ }
+
++ old_regs = set_irq_regs(regs);
+ irq_enter();
+ #ifdef CONFIG_DEBUG_STACKOVERFLOW
+ /* Debugging check for stack overflow: is there less than 1KB free? */
+@@ -110,19 +113,20 @@ fastcall unsigned int do_IRQ(struct pt_r
+ (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
+
+ asm volatile(
+- " xchgl %%ebx,%%esp \n"
+- " call __do_IRQ \n"
++ " xchgl %%ebx,%%esp \n"
++ " call *%%edi \n"
+ " movl %%ebx,%%esp \n"
+ : "=a" (arg1), "=d" (arg2), "=b" (ebx)
+- : "0" (irq), "1" (regs), "2" (isp)
+- : "memory", "cc", "ecx"
++ : "0" (irq), "1" (desc), "2" (isp),
++ "D" (desc->handle_irq)
++ : "memory", "cc"
+ );
+ } else
+ #endif
+- __do_IRQ(irq, regs);
++ desc->handle_irq(irq, desc);
+
+ irq_exit();
+-
++ set_irq_regs(old_regs);
+ return 1;
+ }
+
+@@ -253,7 +257,8 @@ int show_interrupts(struct seq_file *p,
+ for_each_online_cpu(j)
+ seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
+ #endif
+- seq_printf(p, " %14s", irq_desc[i].chip->typename);
++ seq_printf(p, " %8s", irq_desc[i].chip->name);
++ seq_printf(p, "-%-8s", irq_desc[i].name);
+ seq_printf(p, " %s", action->name);
+
+ for (action=action->next; action; action = action->next)
+Index: 10.3-2007-11-26/arch/i386/kernel/ldt-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/ldt-xen.c 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/arch/i386/kernel/ldt-xen.c 2007-10-22 13:53:08.000000000 +0200
+@@ -1,5 +1,5 @@
+ /*
+- * linux/kernel/ldt.c
++ * linux/arch/i386/kernel/ldt.c
+ *
+ * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds
+ * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
+Index: 10.3-2007-11-26/arch/i386/kernel/machine_kexec.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/machine_kexec.c 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/arch/i386/kernel/machine_kexec.c 2007-10-22 13:53:08.000000000 +0200
+@@ -137,6 +137,7 @@ NORET_TYPE void machine_kexec(struct kim
+ */
+ static int __init parse_crashkernel(char *arg)
+ {
++#ifndef CONFIG_XEN
+ unsigned long size, base;
+ size = memparse(arg, &arg);
+ if (*arg == '@') {
+@@ -147,6 +148,10 @@ static int __init parse_crashkernel(char
+ crashk_res.start = base;
+ crashk_res.end = base + size - 1;
+ }
++#else
++ printk("Ignoring crashkernel command line, "
++ "parameter will be supplied by xen\n");
++#endif
+ return 0;
+ }
+ early_param("crashkernel", parse_crashkernel);
+Index: 10.3-2007-11-26/arch/i386/kernel/microcode-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/microcode-xen.c 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/arch/i386/kernel/microcode-xen.c 2007-10-22 13:53:08.000000000 +0200
+@@ -2,6 +2,7 @@
+ * Intel CPU Microcode Update Driver for Linux
+ *
+ * Copyright (C) 2000-2004 Tigran Aivazian
++ * 2006 Shaohua Li <shaohua.li@intel.com>
+ *
+ * This driver allows to upgrade microcode on Intel processors
+ * belonging to IA-32 family - PentiumPro, Pentium II,
+@@ -33,7 +34,9 @@
+ #include <linux/spinlock.h>
+ #include <linux/mm.h>
+ #include <linux/mutex.h>
+-#include <linux/syscalls.h>
++#include <linux/cpu.h>
++#include <linux/firmware.h>
++#include <linux/platform_device.h>
+
+ #include <asm/msr.h>
+ #include <asm/uaccess.h>
+@@ -55,12 +58,7 @@ module_param(verbose, int, 0644);
+ /* no concurrent ->write()s are allowed on /dev/cpu/microcode */
+ static DEFINE_MUTEX(microcode_mutex);
+
+-static int microcode_open (struct inode *unused1, struct file *unused2)
+-{
+- return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
+-}
+-
+-
++#ifdef CONFIG_MICROCODE_OLD_INTERFACE
+ static int do_microcode_update (const void __user *ubuf, size_t len)
+ {
+ int err;
+@@ -85,6 +83,11 @@ static int do_microcode_update (const vo
+ return err;
+ }
+
++static int microcode_open (struct inode *unused1, struct file *unused2)
++{
++ return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
++}
++
+ static ssize_t microcode_write (struct file *file, const char __user *buf, size_t len, loff_t *ppos)
+ {
+ ssize_t ret;
+@@ -117,7 +120,7 @@ static struct miscdevice microcode_dev =
+ .fops = &microcode_fops,
+ };
+
+-static int __init microcode_init (void)
++static int __init microcode_dev_init (void)
+ {
+ int error;
+
+@@ -129,6 +132,68 @@ static int __init microcode_init (void)
+ return error;
+ }
+
++ return 0;
++}
++
++static void __exit microcode_dev_exit (void)
++{
++ misc_deregister(&microcode_dev);
++}
++
++MODULE_ALIAS_MISCDEV(MICROCODE_MINOR);
++#else
++#define microcode_dev_init() 0
++#define microcode_dev_exit() do { } while(0)
++#endif
++
++/* fake device for request_firmware */
++static struct platform_device *microcode_pdev;
++
++static int request_microcode(void)
++{
++ char name[30];
++ const struct cpuinfo_x86 *c = &boot_cpu_data;
++ const struct firmware *firmware;
++ int error;
++ struct xen_platform_op op;
++
++ sprintf(name,"intel-ucode/%02x-%02x-%02x",
++ c->x86, c->x86_model, c->x86_mask);
++ error = request_firmware(&firmware, name, &microcode_pdev->dev);
++ if (error) {
++ pr_debug("ucode data file %s load failed\n", name);
++ return error;
++ }
++
++ op.cmd = XENPF_microcode_update;
++ set_xen_guest_handle(op.u.microcode.data, (void *)firmware->data);
++ op.u.microcode.length = firmware->size;
++ error = HYPERVISOR_platform_op(&op);
++
++ release_firmware(firmware);
++
++ if (error)
++ pr_debug("ucode load failed\n");
++
++ return error;
++}
++
++static int __init microcode_init (void)
++{
++ int error;
++
++ error = microcode_dev_init();
++ if (error)
++ return error;
++ microcode_pdev = platform_device_register_simple("microcode", -1,
++ NULL, 0);
++ if (IS_ERR(microcode_pdev)) {
++ microcode_dev_exit();
++ return PTR_ERR(microcode_pdev);
++ }
++
++ request_microcode();
++
+ printk(KERN_INFO
+ "IA-32 Microcode Update Driver: v" MICROCODE_VERSION " <tigran@veritas.com>\n");
+ return 0;
+@@ -136,9 +201,9 @@ static int __init microcode_init (void)
+
+ static void __exit microcode_exit (void)
+ {
+- misc_deregister(&microcode_dev);
++ microcode_dev_exit();
++ platform_device_unregister(microcode_pdev);
+ }
+
+ module_init(microcode_init)
+ module_exit(microcode_exit)
+-MODULE_ALIAS_MISCDEV(MICROCODE_MINOR);
+Index: 10.3-2007-11-26/arch/i386/kernel/mpparse-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/mpparse-xen.c 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/arch/i386/kernel/mpparse-xen.c 2007-10-22 13:53:08.000000000 +0200
+@@ -30,6 +30,7 @@
+ #include <asm/io_apic.h>
+
+ #include <mach_apic.h>
++#include <mach_apicdef.h>
+ #include <mach_mpparse.h>
+ #include <bios_ebda.h>
+
+@@ -68,7 +69,7 @@ unsigned int def_to_bigsmp = 0;
+ /* Processor that is doing the boot up */
+ unsigned int boot_cpu_physical_apicid = -1U;
+ /* Internal processor count */
+-static unsigned int __devinitdata num_processors;
++unsigned int __cpuinitdata num_processors;
+
+ /* Bitmask of physically existing CPUs */
+ physid_mask_t phys_cpu_present_map;
+@@ -235,12 +236,14 @@ static void __init MP_bus_info (struct m
+
+ mpc_oem_bus_info(m, str, translation_table[mpc_record]);
+
++#if MAX_MP_BUSSES < 256
+ if (m->mpc_busid >= MAX_MP_BUSSES) {
+ printk(KERN_WARNING "MP table busid value (%d) for bustype %s "
+ " is too large, max. supported is %d\n",
+ m->mpc_busid, str, MAX_MP_BUSSES - 1);
+ return;
+ }
++#endif
+
+ if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA)-1) == 0) {
+ mp_bus_id_to_type[m->mpc_busid] = MP_BUS_ISA;
+@@ -300,19 +303,6 @@ static void __init MP_lintsrc_info (stru
+ m->mpc_irqtype, m->mpc_irqflag & 3,
+ (m->mpc_irqflag >> 2) &3, m->mpc_srcbusid,
+ m->mpc_srcbusirq, m->mpc_destapic, m->mpc_destapiclint);
+- /*
+- * Well it seems all SMP boards in existence
+- * use ExtINT/LVT1 == LINT0 and
+- * NMI/LVT2 == LINT1 - the following check
+- * will show us if this assumptions is false.
+- * Until then we do not have to add baggage.
+- */
+- if ((m->mpc_irqtype == mp_ExtINT) &&
+- (m->mpc_destapiclint != 0))
+- BUG();
+- if ((m->mpc_irqtype == mp_NMI) &&
+- (m->mpc_destapiclint != 1))
+- BUG();
+ }
+
+ #ifdef CONFIG_X86_NUMAQ
+@@ -838,8 +828,7 @@ int es7000_plat;
+
+ #ifdef CONFIG_ACPI
+
+-void __init mp_register_lapic_address (
+- u64 address)
++void __init mp_register_lapic_address(u64 address)
+ {
+ #ifndef CONFIG_XEN
+ mp_lapic_addr = (unsigned long) address;
+@@ -853,13 +842,10 @@ void __init mp_register_lapic_address (
+ #endif
+ }
+
+-
+-void __devinit mp_register_lapic (
+- u8 id,
+- u8 enabled)
++void __devinit mp_register_lapic (u8 id, u8 enabled)
+ {
+ struct mpc_config_processor processor;
+- int boot_cpu = 0;
++ int boot_cpu = 0;
+
+ if (MAX_APICS - id <= 0) {
+ printk(KERN_WARNING "Processor #%d invalid (max %d)\n",
+@@ -898,11 +884,9 @@ static struct mp_ioapic_routing {
+ u32 pin_programmed[4];
+ } mp_ioapic_routing[MAX_IO_APICS];
+
+-
+-static int mp_find_ioapic (
+- int gsi)
++static int mp_find_ioapic (int gsi)
+ {
+- int i = 0;
++ int i = 0;
+
+ /* Find the IOAPIC that manages this GSI. */
+ for (i = 0; i < nr_ioapics; i++) {
+@@ -915,15 +899,11 @@ static int mp_find_ioapic (
+
+ return -1;
+ }
+-
+
+-void __init mp_register_ioapic (
+- u8 id,
+- u32 address,
+- u32 gsi_base)
++void __init mp_register_ioapic(u8 id, u32 address, u32 gsi_base)
+ {
+- int idx = 0;
+- int tmpid;
++ int idx = 0;
++ int tmpid;
+
+ if (nr_ioapics >= MAX_IO_APICS) {
+ printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded "
+@@ -971,16 +951,10 @@ void __init mp_register_ioapic (
+ mp_ioapics[idx].mpc_apicver, mp_ioapics[idx].mpc_apicaddr,
+ mp_ioapic_routing[idx].gsi_base,
+ mp_ioapic_routing[idx].gsi_end);
+-
+- return;
+ }
+
+-
+-void __init mp_override_legacy_irq (
+- u8 bus_irq,
+- u8 polarity,
+- u8 trigger,
+- u32 gsi)
++void __init
++mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi)
+ {
+ struct mpc_config_intsrc intsrc;
+ int ioapic = -1;
+@@ -1018,15 +992,13 @@ void __init mp_override_legacy_irq (
+ mp_irqs[mp_irq_entries] = intsrc;
+ if (++mp_irq_entries == MAX_IRQ_SOURCES)
+ panic("Max # of irq sources exceeded!\n");
+-
+- return;
+ }
+
+ void __init mp_config_acpi_legacy_irqs (void)
+ {
+ struct mpc_config_intsrc intsrc;
+- int i = 0;
+- int ioapic = -1;
++ int i = 0;
++ int ioapic = -1;
+
+ /*
+ * Fabricate the legacy ISA bus (bus #31).
+@@ -1095,12 +1067,12 @@ void __init mp_config_acpi_legacy_irqs (
+
+ #define MAX_GSI_NUM 4096
+
+-int mp_register_gsi (u32 gsi, int triggering, int polarity)
++int mp_register_gsi(u32 gsi, int triggering, int polarity)
+ {
+- int ioapic = -1;
+- int ioapic_pin = 0;
+- int idx, bit = 0;
+- static int pci_irq = 16;
++ int ioapic = -1;
++ int ioapic_pin = 0;
++ int idx, bit = 0;
++ static int pci_irq = 16;
+ /*
+ * Mapping between Global System Interrups, which
+ * represent all possible interrupts, and IRQs
+Index: 10.3-2007-11-26/arch/i386/kernel/pci-dma-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/pci-dma-xen.c 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/arch/i386/kernel/pci-dma-xen.c 2007-10-22 13:53:08.000000000 +0200
+@@ -83,8 +83,7 @@ dma_map_sg(struct device *hwdev, struct
+ {
+ int i, rc;
+
+- if (direction == DMA_NONE)
+- BUG();
++ BUG_ON(!valid_dma_direction(direction));
+ WARN_ON(nents == 0 || sg[0].length == 0);
+
+ if (swiotlb) {
+@@ -115,7 +114,7 @@ dma_unmap_sg(struct device *hwdev, struc
+ {
+ int i;
+
+- BUG_ON(direction == DMA_NONE);
++ BUG_ON(!valid_dma_direction(direction));
+ if (swiotlb)
+ swiotlb_unmap_sg(hwdev, sg, nents, direction);
+ else {
+@@ -132,8 +131,7 @@ dma_map_page(struct device *dev, struct
+ {
+ dma_addr_t dma_addr;
+
+- BUG_ON(direction == DMA_NONE);
+-
++ BUG_ON(!valid_dma_direction(direction));
+ if (swiotlb) {
+ dma_addr = swiotlb_map_page(
+ dev, page, offset, size, direction);
+@@ -150,7 +148,7 @@ void
+ dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
+ enum dma_data_direction direction)
+ {
+- BUG_ON(direction == DMA_NONE);
++ BUG_ON(!valid_dma_direction(direction));
+ if (swiotlb)
+ swiotlb_unmap_page(dev, dma_address, size, direction);
+ else
+@@ -332,8 +330,7 @@ dma_map_single(struct device *dev, void
+ {
+ dma_addr_t dma;
+
+- if (direction == DMA_NONE)
+- BUG();
++ BUG_ON(!valid_dma_direction(direction));
+ WARN_ON(size == 0);
+
+ if (swiotlb) {
+@@ -354,8 +351,7 @@ void
+ dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
+ enum dma_data_direction direction)
+ {
+- if (direction == DMA_NONE)
+- BUG();
++ BUG_ON(!valid_dma_direction(direction));
+ if (swiotlb)
+ swiotlb_unmap_single(dev, dma_addr, size, direction);
+ else
+Index: 10.3-2007-11-26/arch/i386/kernel/process-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/process-xen.c 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/arch/i386/kernel/process-xen.c 2007-10-22 13:53:08.000000000 +0200
+@@ -37,6 +37,7 @@
+ #include <linux/kallsyms.h>
+ #include <linux/ptrace.h>
+ #include <linux/random.h>
++#include <linux/personality.h>
+
+ #include <asm/uaccess.h>
+ #include <asm/pgtable.h>
+@@ -186,7 +187,7 @@ void cpu_idle(void)
+ void cpu_idle_wait(void)
+ {
+ unsigned int cpu, this_cpu = get_cpu();
+- cpumask_t map;
++ cpumask_t map, tmp = current->cpus_allowed;
+
+ set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
+ put_cpu();
+@@ -208,6 +209,8 @@ void cpu_idle_wait(void)
+ }
+ cpus_and(map, map, cpu_online_map);
+ } while (!cpus_empty(map));
++
++ set_cpus_allowed(current, tmp);
+ }
+ EXPORT_SYMBOL_GPL(cpu_idle_wait);
+
+@@ -240,9 +243,9 @@ void show_regs(struct pt_regs * regs)
+ if (user_mode_vm(regs))
+ printk(" ESP: %04x:%08lx",0xffff & regs->xss,regs->esp);
+ printk(" EFLAGS: %08lx %s (%s %.*s)\n",
+- regs->eflags, print_tainted(), system_utsname.release,
+- (int)strcspn(system_utsname.version, " "),
+- system_utsname.version);
++ regs->eflags, print_tainted(), init_utsname()->release,
++ (int)strcspn(init_utsname()->version, " "),
++ init_utsname()->version);
+ printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
+ regs->eax,regs->ebx,regs->ecx,regs->edx);
+ printk("ESI: %08lx EDI: %08lx EBP: %08lx",
+@@ -264,15 +267,6 @@ void show_regs(struct pt_regs * regs)
+ * the "args".
+ */
+ extern void kernel_thread_helper(void);
+-__asm__(".section .text\n"
+- ".align 4\n"
+- "kernel_thread_helper:\n\t"
+- "movl %edx,%eax\n\t"
+- "pushl %edx\n\t"
+- "call *%ebx\n\t"
+- "pushl %eax\n\t"
+- "call do_exit\n"
+- ".previous");
+
+ /*
+ * Create a kernel thread
+@@ -290,7 +284,7 @@ int kernel_thread(int (*fn)(void *), voi
+ regs.xes = __USER_DS;
+ regs.orig_eax = -1;
+ regs.eip = (unsigned long) kernel_thread_helper;
+- regs.xcs = GET_KERNEL_CS();
++ regs.xcs = __KERNEL_CS | get_kernel_rpl();
+ regs.eflags = X86_EFLAGS_IF | X86_EFLAGS_SF | X86_EFLAGS_PF | 0x2;
+
+ /* Ok, create the new process.. */
+@@ -368,13 +362,12 @@ int copy_thread(int nr, unsigned long cl
+
+ tsk = current;
+ if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) {
+- p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
++ p->thread.io_bitmap_ptr = kmemdup(tsk->thread.io_bitmap_ptr,
++ IO_BITMAP_BYTES, GFP_KERNEL);
+ if (!p->thread.io_bitmap_ptr) {
+ p->thread.io_bitmap_max = 0;
+ return -ENOMEM;
+ }
+- memcpy(p->thread.io_bitmap_ptr, tsk->thread.io_bitmap_ptr,
+- IO_BITMAP_BYTES);
+ set_tsk_thread_flag(p, TIF_IO_BITMAP);
+ }
+
+@@ -847,7 +840,7 @@ asmlinkage int sys_get_thread_area(struc
+
+ unsigned long arch_align_stack(unsigned long sp)
+ {
+- if (randomize_va_space)
++ if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
+ sp -= get_random_int() % 8192;
+ return sp & ~0xf;
+ }
+Index: 10.3-2007-11-26/arch/i386/kernel/setup-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/setup-xen.c 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/arch/i386/kernel/setup-xen.c 2007-10-22 13:53:08.000000000 +0200
+@@ -56,6 +56,7 @@
+ #include <asm/apic.h>
+ #include <asm/e820.h>
+ #include <asm/mpspec.h>
++#include <asm/mmzone.h>
+ #include <asm/setup.h>
+ #include <asm/arch_hooks.h>
+ #include <asm/sections.h>
+@@ -105,18 +106,6 @@ EXPORT_SYMBOL(boot_cpu_data);
+
+ unsigned long mmu_cr4_features;
+
+-#ifdef CONFIG_ACPI
+- int acpi_disabled = 0;
+-#else
+- int acpi_disabled = 1;
+-#endif
+-EXPORT_SYMBOL(acpi_disabled);
+-
+-#ifdef CONFIG_ACPI
+-int __initdata acpi_force = 0;
+-extern acpi_interrupt_flags acpi_sci_flags;
+-#endif
+-
+ /* for MCA, but anyone else can use it if they want */
+ unsigned int machine_id;
+ #ifdef CONFIG_MCA
+@@ -170,7 +159,6 @@ struct e820map machine_e820;
+ #endif
+
+ extern void early_cpu_init(void);
+-extern void generic_apic_probe(char *);
+ extern int root_mountflags;
+
+ unsigned long saved_videomode;
+@@ -243,9 +231,6 @@ static struct resource adapter_rom_resou
+ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+ } };
+
+-#define ADAPTER_ROM_RESOURCES \
+- (sizeof adapter_rom_resources / sizeof adapter_rom_resources[0])
+-
+ static struct resource video_rom_resource = {
+ .name = "Video ROM",
+ .start = 0xc0000,
+@@ -307,9 +292,6 @@ static struct resource standard_io_resou
+ .flags = IORESOURCE_BUSY | IORESOURCE_IO
+ } };
+
+-#define STANDARD_IO_RESOURCES \
+- (sizeof standard_io_resources / sizeof standard_io_resources[0])
+-
+ #define romsignature(x) (*(unsigned short *)(x) == 0xaa55)
+
+ static int __init romchecksum(unsigned char *rom, unsigned long length)
+@@ -372,7 +354,7 @@ static void __init probe_roms(void)
+ }
+
+ /* check for adapter roms on 2k boundaries */
+- for (i = 0; i < ADAPTER_ROM_RESOURCES && start < upper; start += 2048) {
++ for (i = 0; i < ARRAY_SIZE(adapter_rom_resources) && start < upper; start += 2048) {
+ rom = isa_bus_to_virt(start);
+ if (!romsignature(rom))
+ continue;
+@@ -764,246 +746,152 @@ static inline void copy_edd(void)
+ }
+ #endif
+
+-static void __init parse_cmdline_early (char ** cmdline_p)
++static int __initdata user_defined_memmap = 0;
++
++/*
++ * "mem=nopentium" disables the 4MB page tables.
++ * "mem=XXX[kKmM]" defines a memory region from HIGH_MEM
++ * to <mem>, overriding the bios size.
++ * "memmap=XXX[KkmM]@XXX[KkmM]" defines a memory region from
++ * <start> to <start>+<mem>, overriding the bios size.
++ *
++ * HPA tells me bootloaders need to parse mem=, so no new
++ * option should be mem= [also see Documentation/i386/boot.txt]
++ */
++static int __init parse_mem(char *arg)
+ {
+- char c = ' ', *to = command_line, *from = saved_command_line;
+- int len = 0, max_cmdline;
+- int userdef = 0;
+-
+- if ((max_cmdline = MAX_GUEST_CMDLINE) > COMMAND_LINE_SIZE)
+- max_cmdline = COMMAND_LINE_SIZE;
+- memcpy(saved_command_line, xen_start_info->cmd_line, max_cmdline);
+- /* Save unparsed command line copy for /proc/cmdline */
+- saved_command_line[max_cmdline-1] = '\0';
+-
+- for (;;) {
+- if (c != ' ')
+- goto next_char;
+- /*
+- * "mem=nopentium" disables the 4MB page tables.
+- * "mem=XXX[kKmM]" defines a memory region from HIGH_MEM
+- * to <mem>, overriding the bios size.
+- * "memmap=XXX[KkmM]@XXX[KkmM]" defines a memory region from
+- * <start> to <start>+<mem>, overriding the bios size.
+- *
+- * HPA tells me bootloaders need to parse mem=, so no new
+- * option should be mem= [also see Documentation/i386/boot.txt]
+- */
+- if (!memcmp(from, "mem=", 4)) {
+- if (to != command_line)
+- to--;
+- if (!memcmp(from+4, "nopentium", 9)) {
+- from += 9+4;
+- clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability);
+- disable_pse = 1;
+- } else {
+- /* If the user specifies memory size, we
+- * limit the BIOS-provided memory map to
+- * that size. exactmap can be used to specify
+- * the exact map. mem=number can be used to
+- * trim the existing memory map.
+- */
+- unsigned long long mem_size;
+-
+- mem_size = memparse(from+4, &from);
+- limit_regions(mem_size);
+- userdef=1;
+- }
+- }
++ if (!arg)
++ return -EINVAL;
+
+- else if (!memcmp(from, "memmap=", 7)) {
+- if (to != command_line)
+- to--;
+- if (!memcmp(from+7, "exactmap", 8)) {
+-#ifdef CONFIG_CRASH_DUMP
+- /* If we are doing a crash dump, we
+- * still need to know the real mem
+- * size before original memory map is
+- * reset.
+- */
+- find_max_pfn();
+- saved_max_pfn = max_pfn;
+-#endif
+- from += 8+7;
+- e820.nr_map = 0;
+- userdef = 1;
+- } else {
+- /* If the user specifies memory size, we
+- * limit the BIOS-provided memory map to
+- * that size. exactmap can be used to specify
+- * the exact map. mem=number can be used to
+- * trim the existing memory map.
+- */
+- unsigned long long start_at, mem_size;
++ if (strcmp(arg, "nopentium") == 0) {
++ clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability);
++ disable_pse = 1;
++ } else {
++ /* If the user specifies memory size, we
++ * limit the BIOS-provided memory map to
++ * that size. exactmap can be used to specify
++ * the exact map. mem=number can be used to
++ * trim the existing memory map.
++ */
++ unsigned long long mem_size;
+
+- mem_size = memparse(from+7, &from);
+- if (*from == '@') {
+- start_at = memparse(from+1, &from);
+- add_memory_region(start_at, mem_size, E820_RAM);
+- } else if (*from == '#') {
+- start_at = memparse(from+1, &from);
+- add_memory_region(start_at, mem_size, E820_ACPI);
+- } else if (*from == '$') {
+- start_at = memparse(from+1, &from);
+- add_memory_region(start_at, mem_size, E820_RESERVED);
+- } else {
+- limit_regions(mem_size);
+- userdef=1;
+- }
+- }
+- }
+-
+- else if (!memcmp(from, "noexec=", 7))
+- noexec_setup(from + 7);
++ mem_size = memparse(arg, &arg);
++ limit_regions(mem_size);
++ user_defined_memmap = 1;
++ }
++ return 0;
++}
++early_param("mem", parse_mem);
+
++static int __init parse_memmap(char *arg)
++{
++ if (!arg)
++ return -EINVAL;
+
+-#ifdef CONFIG_X86_MPPARSE
+- /*
+- * If the BIOS enumerates physical processors before logical,
+- * maxcpus=N at enumeration-time can be used to disable HT.
++ if (strcmp(arg, "exactmap") == 0) {
++#ifdef CONFIG_CRASH_DUMP
++ /* If we are doing a crash dump, we
++ * still need to know the real mem
++ * size before original memory map is
++ * reset.
+ */
+- else if (!memcmp(from, "maxcpus=", 8)) {
+- extern unsigned int maxcpus;
+-
+- maxcpus = simple_strtoul(from + 8, NULL, 0);
+- }
++ find_max_pfn();
++ saved_max_pfn = max_pfn;
+ #endif
++ e820.nr_map = 0;
++ user_defined_memmap = 1;
++ } else {
++ /* If the user specifies memory size, we
++ * limit the BIOS-provided memory map to
++ * that size. exactmap can be used to specify
++ * the exact map. mem=number can be used to
++ * trim the existing memory map.
++ */
++ unsigned long long start_at, mem_size;
+
+-#ifdef CONFIG_ACPI
+- /* "acpi=off" disables both ACPI table parsing and interpreter */
+- else if (!memcmp(from, "acpi=off", 8)) {
+- disable_acpi();
+- }
+-
+- /* acpi=force to over-ride black-list */
+- else if (!memcmp(from, "acpi=force", 10)) {
+- acpi_force = 1;
+- acpi_ht = 1;
+- acpi_disabled = 0;
+- }
+-
+- /* acpi=strict disables out-of-spec workarounds */
+- else if (!memcmp(from, "acpi=strict", 11)) {
+- acpi_strict = 1;
+- }
+-
+- /* Limit ACPI just to boot-time to enable HT */
+- else if (!memcmp(from, "acpi=ht", 7)) {
+- if (!acpi_force)
+- disable_acpi();
+- acpi_ht = 1;
+- }
+-
+- /* "pci=noacpi" disable ACPI IRQ routing and PCI scan */
+- else if (!memcmp(from, "pci=noacpi", 10)) {
+- acpi_disable_pci();
+- }
+- /* "acpi=noirq" disables ACPI interrupt routing */
+- else if (!memcmp(from, "acpi=noirq", 10)) {
+- acpi_noirq_set();
++ mem_size = memparse(arg, &arg);
++ if (*arg == '@') {
++ start_at = memparse(arg+1, &arg);
++ add_memory_region(start_at, mem_size, E820_RAM);
++ } else if (*arg == '#') {
++ start_at = memparse(arg+1, &arg);
++ add_memory_region(start_at, mem_size, E820_ACPI);
++ } else if (*arg == '$') {
++ start_at = memparse(arg+1, &arg);
++ add_memory_region(start_at, mem_size, E820_RESERVED);
++ } else {
++ limit_regions(mem_size);
++ user_defined_memmap = 1;
+ }
++ }
++ return 0;
++}
++early_param("memmap", parse_memmap);
+
+- else if (!memcmp(from, "acpi_sci=edge", 13))
+- acpi_sci_flags.trigger = 1;
+-
+- else if (!memcmp(from, "acpi_sci=level", 14))
+- acpi_sci_flags.trigger = 3;
++#ifdef CONFIG_PROC_VMCORE
++/* elfcorehdr= specifies the location of elf core header
++ * stored by the crashed kernel.
++ */
++static int __init parse_elfcorehdr(char *arg)
++{
++ if (!arg)
++ return -EINVAL;
+
+- else if (!memcmp(from, "acpi_sci=high", 13))
+- acpi_sci_flags.polarity = 1;
++ elfcorehdr_addr = memparse(arg, &arg);
++ return 0;
++}
++early_param("elfcorehdr", parse_elfcorehdr);
++#endif /* CONFIG_PROC_VMCORE */
+
+- else if (!memcmp(from, "acpi_sci=low", 12))
+- acpi_sci_flags.polarity = 3;
++/*
++ * highmem=size forces highmem to be exactly 'size' bytes.
++ * This works even on boxes that have no highmem otherwise.
++ * This also works to reduce highmem size on bigger boxes.
++ */
++static int __init parse_highmem(char *arg)
++{
++ if (!arg)
++ return -EINVAL;
+
+-#ifdef CONFIG_X86_IO_APIC
+- else if (!memcmp(from, "acpi_skip_timer_override", 24))
+- acpi_skip_timer_override = 1;
++ highmem_pages = memparse(arg, &arg) >> PAGE_SHIFT;
++ return 0;
++}
++early_param("highmem", parse_highmem);
+
+- if (!memcmp(from, "disable_timer_pin_1", 19))
+- disable_timer_pin_1 = 1;
+- if (!memcmp(from, "enable_timer_pin_1", 18))
+- disable_timer_pin_1 = -1;
+-
+- /* disable IO-APIC */
+- else if (!memcmp(from, "noapic", 6))
+- disable_ioapic_setup();
+-#endif /* CONFIG_X86_IO_APIC */
+-#endif /* CONFIG_ACPI */
++/*
++ * vmalloc=size forces the vmalloc area to be exactly 'size'
++ * bytes. This can be used to increase (or decrease) the
++ * vmalloc area - the default is 128m.
++ */
++static int __init parse_vmalloc(char *arg)
++{
++ if (!arg)
++ return -EINVAL;
+
+-#ifdef CONFIG_X86_LOCAL_APIC
+- /* enable local APIC */
+- else if (!memcmp(from, "lapic", 5))
+- lapic_enable();
+-
+- /* disable local APIC */
+- else if (!memcmp(from, "nolapic", 6))
+- lapic_disable();
+-#endif /* CONFIG_X86_LOCAL_APIC */
++ __VMALLOC_RESERVE = memparse(arg, &arg);
++ return 0;
++}
++early_param("vmalloc", parse_vmalloc);
+
+-#ifdef CONFIG_KEXEC
+- /* crashkernel=size@addr specifies the location to reserve for
+- * a crash kernel. By reserving this memory we guarantee
+- * that linux never set's it up as a DMA target.
+- * Useful for holding code to do something appropriate
+- * after a kernel panic.
+- */
+- else if (!memcmp(from, "crashkernel=", 12)) {
+ #ifndef CONFIG_XEN
+- unsigned long size, base;
+- size = memparse(from+12, &from);
+- if (*from == '@') {
+- base = memparse(from+1, &from);
+- /* FIXME: Do I want a sanity check
+- * to validate the memory range?
+- */
+- crashk_res.start = base;
+- crashk_res.end = base + size - 1;
+- }
+-#else
+- printk("Ignoring crashkernel command line, "
+- "parameter will be supplied by xen\n");
+-#endif
+- }
+-#endif
+-#ifdef CONFIG_PROC_VMCORE
+- /* elfcorehdr= specifies the location of elf core header
+- * stored by the crashed kernel.
+- */
+- else if (!memcmp(from, "elfcorehdr=", 11))
+- elfcorehdr_addr = memparse(from+11, &from);
+-#endif
++/*
++ * reservetop=size reserves a hole at the top of the kernel address space which
++ * a hypervisor can load into later. Needed for dynamically loaded hypervisors,
++ * so relocating the fixmap can be done before paging initialization.
++ */
++static int __init parse_reservetop(char *arg)
++{
++ unsigned long address;
+
+- /*
+- * highmem=size forces highmem to be exactly 'size' bytes.
+- * This works even on boxes that have no highmem otherwise.
+- * This also works to reduce highmem size on bigger boxes.
+- */
+- else if (!memcmp(from, "highmem=", 8))
+- highmem_pages = memparse(from+8, &from) >> PAGE_SHIFT;
+-
+- /*
+- * vmalloc=size forces the vmalloc area to be exactly 'size'
+- * bytes. This can be used to increase (or decrease) the
+- * vmalloc area - the default is 128m.
+- */
+- else if (!memcmp(from, "vmalloc=", 8))
+- __VMALLOC_RESERVE = memparse(from+8, &from);
++ if (!arg)
++ return -EINVAL;
+
+- next_char:
+- c = *(from++);
+- if (!c)
+- break;
+- if (COMMAND_LINE_SIZE <= ++len)
+- break;
+- *(to++) = c;
+- }
+- *to = '\0';
+- *cmdline_p = command_line;
+- if (userdef) {
+- printk(KERN_INFO "user-defined physical RAM map:\n");
+- print_memory_map("user");
+- }
++ address = memparse(arg, &arg);
++ reserve_top_address(address);
++ return 0;
+ }
++early_param("reservetop", parse_reservetop);
++#endif
+
+ /*
+ * Callback for efi_memory_walk.
+@@ -1024,7 +912,7 @@ efi_find_max_pfn(unsigned long start, un
+ static int __init
+ efi_memory_present_wrapper(unsigned long start, unsigned long end, void *arg)
+ {
+- memory_present(0, start, end);
++ memory_present(0, PFN_UP(start), PFN_DOWN(end));
+ return 0;
+ }
+
+@@ -1262,6 +1150,14 @@ static unsigned long __init setup_memory
+ }
+ printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
+ pages_to_mb(highend_pfn - highstart_pfn));
++ num_physpages = highend_pfn;
++ high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
++#else
++ num_physpages = max_low_pfn;
++ high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
++#endif
++#ifdef CONFIG_FLATMEM
++ max_mapnr = num_physpages;
+ #endif
+ printk(KERN_NOTICE "%ldMB LOWMEM available.\n",
+ pages_to_mb(max_low_pfn));
+@@ -1273,9 +1169,9 @@ static unsigned long __init setup_memory
+
+ void __init zone_sizes_init(void)
+ {
+- unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};
+- unsigned int max_dma, low;
++ unsigned long max_zone_pfns[MAX_NR_ZONES];
+
++ memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
+ /*
+ * XEN: Our notion of "DMA memory" is fake when running over Xen.
+ * We simply put all RAM in the DMA zone so that those drivers which
+@@ -1283,19 +1179,16 @@ void __init zone_sizes_init(void)
+ * Those drivers that *do* require lowmem are screwed anyway when
+ * running over Xen!
+ */
+- max_dma = max_low_pfn;
+- low = max_low_pfn;
+-
+- if (low < max_dma)
+- zones_size[ZONE_DMA] = low;
+- else {
+- zones_size[ZONE_DMA] = max_dma;
+- zones_size[ZONE_NORMAL] = low - max_dma;
++ max_zone_pfns[ZONE_DMA] = max_low_pfn;
++ max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
+ #ifdef CONFIG_HIGHMEM
+- zones_size[ZONE_HIGHMEM] = highend_pfn - low;
++ max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
++ add_active_range(0, 0, highend_pfn);
++#else
++ add_active_range(0, 0, max_low_pfn);
+ #endif
+- }
+- free_area_init(zones_size);
++
++ free_area_init_nodes(max_zone_pfns);
+ }
+ #else
+ extern unsigned long __init setup_memory(void);
+@@ -1352,6 +1245,7 @@ void __init setup_bootmem_allocator(void
+ */
+ acpi_reserve_bootmem();
+ #endif
++ numa_kva_reserve();
+ #endif /* !CONFIG_XEN */
+
+ #ifdef CONFIG_BLK_DEV_INITRD
+@@ -1541,7 +1435,7 @@ static int __init request_standard_resou
+ request_resource(&iomem_resource, &video_ram_resource);
+
+ /* request I/O space for devices used on all i[345]86 PCs */
+- for (i = 0; i < STANDARD_IO_RESOURCES; i++)
++ for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++)
+ request_resource(&ioport_resource, &standard_io_resources[i]);
+ return 0;
+ }
+@@ -1692,17 +1586,19 @@ void __init setup_arch(char **cmdline_p)
+ data_resource.start = virt_to_phys(_etext);
+ data_resource.end = virt_to_phys(_edata)-1;
+
+- parse_cmdline_early(cmdline_p);
++ if ((i = MAX_GUEST_CMDLINE) > COMMAND_LINE_SIZE)
++ i = COMMAND_LINE_SIZE;
++ memcpy(saved_command_line, xen_start_info->cmd_line, i);
++ saved_command_line[i - 1] = '\0';
++ parse_early_param();
+
+-#ifdef CONFIG_EARLY_PRINTK
+- {
+- char *s = strstr(*cmdline_p, "earlyprintk=");
+- if (s) {
+- setup_early_printk(strchr(s, '=') + 1);
+- printk("early console enabled\n");
+- }
++ if (user_defined_memmap) {
++ printk(KERN_INFO "user-defined physical RAM map:\n");
++ print_memory_map("user");
+ }
+-#endif
++
++ strlcpy(command_line, saved_command_line, COMMAND_LINE_SIZE);
++ *cmdline_p = command_line;
+
+ max_low_pfn = setup_memory();
+
+@@ -1778,7 +1674,7 @@ void __init setup_arch(char **cmdline_p)
+ dmi_scan_machine();
+
+ #ifdef CONFIG_X86_GENERICARCH
+- generic_apic_probe(*cmdline_p);
++ generic_apic_probe();
+ #endif
+ if (efi_enabled)
+ efi_map_memmap();
+@@ -1799,9 +1695,11 @@ void __init setup_arch(char **cmdline_p)
+ acpi_boot_table_init();
+ #endif
+
++#ifdef CONFIG_PCI
+ #ifdef CONFIG_X86_IO_APIC
+ check_acpi_pci(); /* Checks more than just ACPI actually */
+ #endif
++#endif
+
+ #ifdef CONFIG_ACPI
+ acpi_boot_init();
+Index: 10.3-2007-11-26/arch/i386/kernel/smp-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/smp-xen.c 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/arch/i386/kernel/smp-xen.c 2007-10-22 13:53:08.000000000 +0200
+@@ -279,8 +279,7 @@ static inline void leave_mm (unsigned lo
+ * 2) Leave the mm if we are in the lazy tlb mode.
+ */
+
+-irqreturn_t smp_invalidate_interrupt(int irq, void *dev_id,
+- struct pt_regs *regs)
++irqreturn_t smp_invalidate_interrupt(int irq, void *dev_id)
+ {
+ unsigned long cpu;
+
+@@ -443,8 +442,7 @@ void flush_tlb_all(void)
+
+ #else
+
+-irqreturn_t smp_invalidate_interrupt(int irq, void *dev_id,
+- struct pt_regs *regs)
++irqreturn_t smp_invalidate_interrupt(int irq, void *dev_id)
+ { return 0; }
+ void flush_tlb_current_task(void)
+ { xen_tlb_flush_mask(&current->mm->cpu_vm_mask); }
+@@ -586,16 +584,14 @@ void smp_send_stop(void)
+ * all the work is done automatically when
+ * we return from the interrupt.
+ */
+-irqreturn_t smp_reschedule_interrupt(int irq, void *dev_id,
+- struct pt_regs *regs)
++irqreturn_t smp_reschedule_interrupt(int irq, void *dev_id)
+ {
+
+ return IRQ_HANDLED;
+ }
+
+ #include <linux/kallsyms.h>
+-irqreturn_t smp_call_function_interrupt(int irq, void *dev_id,
+- struct pt_regs *regs)
++irqreturn_t smp_call_function_interrupt(int irq, void *dev_id)
+ {
+ void (*func) (void *info) = call_data->func;
+ void *info = call_data->info;
+@@ -622,3 +618,69 @@ irqreturn_t smp_call_function_interrupt(
+ return IRQ_HANDLED;
+ }
+
++/*
++ * this function sends a 'generic call function' IPI to one other CPU
++ * in the system.
++ *
++ * cpu is a standard Linux logical CPU number.
++ */
++static void
++__smp_call_function_single(int cpu, void (*func) (void *info), void *info,
++ int nonatomic, int wait)
++{
++ struct call_data_struct data;
++ int cpus = 1;
++
++ data.func = func;
++ data.info = info;
++ atomic_set(&data.started, 0);
++ data.wait = wait;
++ if (wait)
++ atomic_set(&data.finished, 0);
++
++ call_data = &data;
++ wmb();
++ /* Send a message to all other CPUs and wait for them to respond */
++ send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNCTION_VECTOR);
++
++ /* Wait for response */
++ while (atomic_read(&data.started) != cpus)
++ cpu_relax();
++
++ if (!wait)
++ return;
++
++ while (atomic_read(&data.finished) != cpus)
++ cpu_relax();
++}
++
++/*
++ * smp_call_function_single - Run a function on another CPU
++ * @func: The function to run. This must be fast and non-blocking.
++ * @info: An arbitrary pointer to pass to the function.
++ * @nonatomic: Currently unused.
++ * @wait: If true, wait until function has completed on other CPUs.
++ *
++ * Retrurns 0 on success, else a negative status code.
++ *
++ * Does not return until the remote CPU is nearly ready to execute <func>
++ * or is or has executed.
++ */
++
++int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
++ int nonatomic, int wait)
++{
++ /* prevent preemption and reschedule on another processor */
++ int me = get_cpu();
++ if (cpu == me) {
++ WARN_ON(1);
++ put_cpu();
++ return -EBUSY;
++ }
++ spin_lock_bh(&call_lock);
++ __smp_call_function_single(cpu, func, info, nonatomic, wait);
++ spin_unlock_bh(&call_lock);
++ put_cpu();
++ return 0;
++}
++EXPORT_SYMBOL(smp_call_function_single);
+Index: 10.3-2007-11-26/arch/i386/kernel/time-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/time-xen.c 2007-12-06 17:31:37.000000000 +0100
++++ 10.3-2007-11-26/arch/i386/kernel/time-xen.c 2007-12-06 17:31:58.000000000 +0100
+@@ -88,7 +88,6 @@ int pit_latch_buggy; /* ext
+ unsigned long vxtime_hz = PIT_TICK_RATE;
+ struct vxtime_data __vxtime __section_vxtime; /* for vsyscalls */
+ volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
+-unsigned long __wall_jiffies __section_wall_jiffies = INITIAL_JIFFIES;
+ struct timespec __xtime __section_xtime;
+ struct timezone __sys_tz __section_sys_tz;
+ #endif
+@@ -96,8 +95,6 @@ struct timezone __sys_tz __section_sys_t
+ unsigned int cpu_khz; /* Detected as we calibrate the TSC */
+ EXPORT_SYMBOL(cpu_khz);
+
+-extern unsigned long wall_jiffies;
+-
+ DEFINE_SPINLOCK(rtc_lock);
+ EXPORT_SYMBOL(rtc_lock);
+
+@@ -261,11 +258,10 @@ static void __update_wallclock(time_t se
+ time_t wtm_sec, xtime_sec;
+ u64 tmp, wc_nsec;
+
+- /* Adjust wall-clock time base based on wall_jiffies ticks. */
++ /* Adjust wall-clock time base. */
+ wc_nsec = processed_system_time;
+ wc_nsec += sec * (u64)NSEC_PER_SEC;
+ wc_nsec += nsec;
+- wc_nsec -= (jiffies - wall_jiffies) * (u64)NS_PER_TICK;
+
+ /* Split wallclock base into seconds and nanoseconds. */
+ tmp = wc_nsec;
+@@ -383,13 +379,10 @@ void do_gettimeofday(struct timeval *tv)
+ shadow = &per_cpu(shadow_time, cpu);
+
+ do {
+- unsigned long lost;
+-
+ local_time_version = shadow->version;
+ seq = read_seqbegin(&xtime_lock);
+
+ usec = get_usec_offset(shadow);
+- lost = jiffies - wall_jiffies;
+
+ /*
+ * If time_adjust is negative then NTP is slowing the clock
+@@ -399,12 +392,7 @@ void do_gettimeofday(struct timeval *tv)
+ if (unlikely(time_adjust < 0)) {
+ max_ntp_tick = (USEC_PER_SEC / HZ) - tickadj;
+ usec = min(usec, max_ntp_tick);
+-
+- if (lost)
+- usec += lost * max_ntp_tick;
+ }
+- else if (unlikely(lost))
+- usec += lost * (USEC_PER_SEC / HZ);
+
+ sec = xtime.tv_sec;
+ usec += (xtime.tv_nsec / NSEC_PER_USEC);
+@@ -509,7 +497,7 @@ static void sync_xen_wallclock(unsigned
+ write_seqlock_irq(&xtime_lock);
+
+ sec = xtime.tv_sec;
+- nsec = xtime.tv_nsec + ((jiffies - wall_jiffies) * (u64)NS_PER_TICK);
++ nsec = xtime.tv_nsec;
+ __normalize_time(&sec, &nsec);
+
+ op.cmd = XENPF_settime;
+@@ -583,7 +571,6 @@ unsigned long long sched_clock(void)
+ }
+ #endif
+
+-#if defined(CONFIG_SMP) && defined(CONFIG_FRAME_POINTER)
+ unsigned long profile_pc(struct pt_regs *regs)
+ {
+ unsigned long pc = instruction_pointer(regs);
+@@ -604,21 +591,38 @@ unsigned long profile_pc(struct pt_regs
+ return ((unsigned long *)regs->rsp)[1];
+ }
+ #else
+- if (!user_mode_vm(regs) && in_lock_functions(pc))
++#ifdef CONFIG_SMP
++ if (!user_mode_vm(regs) && in_lock_functions(pc)) {
++#ifdef CONFIG_FRAME_POINTER
+ return *(unsigned long *)(regs->ebp + 4);
++#else
++ unsigned long *sp;
++ if ((regs->xcs & 2) == 0)
++ sp = (unsigned long *)&regs->esp;
++ else
++ sp = (unsigned long *)regs->esp;
++ /* Return address is either directly at stack pointer
++ or above a saved eflags. Eflags has bits 22-31 zero,
++ kernel addresses don't. */
++ if (sp[0] >> 22)
++ return sp[0];
++ if (sp[1] >> 22)
++ return sp[1];
++#endif
++ }
++#endif
+ #endif
+
+ return pc;
+ }
+ EXPORT_SYMBOL(profile_pc);
+-#endif
+
+ /*
+ * This is the same as the above, except we _also_ save the current
+ * Time Stamp Counter value at the time of the timer interrupt, so that
+ * we later on can estimate the time of day more exactly.
+ */
+-irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
++irqreturn_t timer_interrupt(int irq, void *dev_id)
+ {
+ s64 delta, delta_cpu, stolen, blocked;
+ u64 sched_time;
+@@ -676,10 +680,14 @@ irqreturn_t timer_interrupt(int irq, voi
+ }
+
+ /* System-wide jiffy work. */
+- while (delta >= NS_PER_TICK) {
+- delta -= NS_PER_TICK;
+- processed_system_time += NS_PER_TICK;
+- do_timer(regs);
++ if (delta >= NS_PER_TICK) {
++ do_div(delta, NS_PER_TICK);
++ processed_system_time += delta * NS_PER_TICK;
++ while (delta > HZ) {
++ do_timer(HZ);
++ delta -= HZ;
++ }
++ do_timer(delta);
+ }
+
+ if (shadow_tv_version != HYPERVISOR_shared_info->wc_version) {
+@@ -724,7 +732,7 @@ irqreturn_t timer_interrupt(int irq, voi
+ if (delta_cpu > 0) {
+ do_div(delta_cpu, NS_PER_TICK);
+ per_cpu(processed_system_time, cpu) += delta_cpu * NS_PER_TICK;
+- if (user_mode_vm(regs))
++ if (user_mode_vm(get_irq_regs()))
+ account_user_time(current, (cputime_t)delta_cpu);
+ else
+ account_system_time(current, HARDIRQ_OFFSET,
+@@ -738,10 +746,10 @@ irqreturn_t timer_interrupt(int irq, voi
+ /* Local timer processing (see update_process_times()). */
+ run_local_timers();
+ if (rcu_pending(cpu))
+- rcu_check_callbacks(cpu, user_mode_vm(regs));
++ rcu_check_callbacks(cpu, user_mode_vm(get_irq_regs()));
+ scheduler_tick();
+ run_posix_cpu_timers(current);
+- profile_tick(CPU_PROFILING, regs);
++ profile_tick(CPU_PROFILING);
+
+ return IRQ_HANDLED;
+ }
+@@ -913,16 +921,19 @@ void notify_arch_cmos_timer(void)
+ mod_timer(&sync_xen_wallclock_timer, jiffies + 1);
+ }
+
+-static long clock_cmos_diff, sleep_start;
++static long clock_cmos_diff;
++static unsigned long sleep_start;
+
+ static int timer_suspend(struct sys_device *dev, pm_message_t state)
+ {
+ /*
+ * Estimate time zone so that set_time can update the clock
+ */
+- clock_cmos_diff = -get_cmos_time();
++ unsigned long ctime = get_cmos_time();
++
++ clock_cmos_diff = -ctime;
+ clock_cmos_diff += get_seconds();
+- sleep_start = get_cmos_time();
++ sleep_start = ctime;
+ return 0;
+ }
+
+@@ -930,19 +941,29 @@ static int timer_resume(struct sys_devic
+ {
+ unsigned long flags;
+ unsigned long sec;
+- unsigned long sleep_length;
+-
++ unsigned long ctime = get_cmos_time();
++ long sleep_length = (ctime - sleep_start) * HZ;
++ struct timespec ts;
++
++ if (sleep_length < 0) {
++ printk(KERN_WARNING "CMOS clock skew detected in timer resume!\n");
++ /* The time after the resume must not be earlier than the time
++ * before the suspend or some nasty things will happen
++ */
++ sleep_length = 0;
++ ctime = sleep_start;
++ }
+ #ifdef CONFIG_HPET_TIMER
+ if (is_hpet_enabled())
+ hpet_reenable();
+ #endif
+- sec = get_cmos_time() + clock_cmos_diff;
+- sleep_length = (get_cmos_time() - sleep_start) * HZ;
++
++ sec = ctime + clock_cmos_diff;
++ ts.tv_sec = sec;
++ ts.tv_nsec = 0;
++ do_settimeofday(&ts);
+ write_seqlock_irqsave(&xtime_lock, flags);
+- xtime.tv_sec = sec;
+- xtime.tv_nsec = 0;
+ jiffies_64 += sleep_length;
+- wall_jiffies += sleep_length;
+ write_sequnlock_irqrestore(&xtime_lock, flags);
+ touch_softlockup_watchdog();
+ return 0;
+@@ -976,10 +997,11 @@ extern void (*late_time_init)(void);
+ /* Duplicate of time_init() below, with hpet_enable part added */
+ static void __init hpet_time_init(void)
+ {
+- xtime.tv_sec = get_cmos_time();
+- xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ);
+- set_normalized_timespec(&wall_to_monotonic,
+- -xtime.tv_sec, -xtime.tv_nsec);
++ struct timespec ts;
++ ts.tv_sec = get_cmos_time();
++ ts.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ);
++
++ do_settimeofday(&ts);
+
+ if ((hpet_enable() >= 0) && hpet_use_timer) {
+ printk("Using HPET for base-timer\n");
+Index: 10.3-2007-11-26/arch/i386/kernel/traps-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/traps-xen.c 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/arch/i386/kernel/traps-xen.c 2007-10-22 13:53:08.000000000 +0200
+@@ -28,6 +28,7 @@
+ #include <linux/kprobes.h>
+ #include <linux/kexec.h>
+ #include <linux/unwind.h>
++#include <linux/uaccess.h>
+
+ #ifdef CONFIG_EISA
+ #include <linux/ioport.h>
+@@ -40,7 +41,6 @@
+
+ #include <asm/processor.h>
+ #include <asm/system.h>
+-#include <asm/uaccess.h>
+ #include <asm/io.h>
+ #include <asm/atomic.h>
+ #include <asm/debugreg.h>
+@@ -51,11 +51,14 @@
+ #include <asm/smp.h>
+ #include <asm/arch_hooks.h>
+ #include <asm/kdebug.h>
++#include <asm/stacktrace.h>
+
+ #include <linux/module.h>
+
+ #include "mach_traps.h"
+
++int panic_on_unrecovered_nmi;
++
+ asmlinkage int system_call(void);
+
+ struct desc_struct default_ldt[] = { { 0, 0 }, { 0, 0 }, { 0, 0 },
+@@ -124,62 +127,63 @@ static inline int valid_stack_ptr(struct
+ p < (void *)tinfo + THREAD_SIZE - 3;
+ }
+
+-/*
+- * Print one address/symbol entries per line.
+- */
+-static inline void print_addr_and_symbol(unsigned long addr, char *log_lvl)
+-{
+- printk(" [<%08lx>] ", addr);
+-
+- print_symbol("%s\n", addr);
+-}
+-
+ static inline unsigned long print_context_stack(struct thread_info *tinfo,
+ unsigned long *stack, unsigned long ebp,
+- char *log_lvl)
++ struct stacktrace_ops *ops, void *data)
+ {
+ unsigned long addr;
+
+ #ifdef CONFIG_FRAME_POINTER
+ while (valid_stack_ptr(tinfo, (void *)ebp)) {
++ unsigned long new_ebp;
+ addr = *(unsigned long *)(ebp + 4);
+- print_addr_and_symbol(addr, log_lvl);
++ ops->address(data, addr);
+ /*
+ * break out of recursive entries (such as
+- * end_of_stack_stop_unwind_function):
++ * end_of_stack_stop_unwind_function). Also,
++ * we can never allow a frame pointer to
++ * move downwards!
+ */
+- if (ebp == *(unsigned long *)ebp)
++ new_ebp = *(unsigned long *)ebp;
++ if (new_ebp <= ebp)
+ break;
+- ebp = *(unsigned long *)ebp;
++ ebp = new_ebp;
+ }
+ #else
+ while (valid_stack_ptr(tinfo, stack)) {
+ addr = *stack++;
+ if (__kernel_text_address(addr))
+- print_addr_and_symbol(addr, log_lvl);
++ ops->address(data, addr);
+ }
+ #endif
+ return ebp;
+ }
+
++struct ops_and_data {
++ struct stacktrace_ops *ops;
++ void *data;
++};
++
+ static asmlinkage int
+-show_trace_unwind(struct unwind_frame_info *info, void *log_lvl)
++dump_trace_unwind(struct unwind_frame_info *info, void *data)
+ {
++ struct ops_and_data *oad = (struct ops_and_data *)data;
+ int n = 0;
+
+ while (unwind(info) == 0 && UNW_PC(info)) {
+ n++;
+- print_addr_and_symbol(UNW_PC(info), log_lvl);
++ oad->ops->address(oad->data, UNW_PC(info));
+ if (arch_unw_user_mode(info))
+ break;
+ }
+ return n;
+ }
+
+-static void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
+- unsigned long *stack, char *log_lvl)
++void dump_trace(struct task_struct *task, struct pt_regs *regs,
++ unsigned long *stack,
++ struct stacktrace_ops *ops, void *data)
+ {
+- unsigned long ebp;
++ unsigned long ebp = 0;
+
+ if (!task)
+ task = current;
+@@ -187,54 +191,116 @@ static void show_trace_log_lvl(struct ta
+ if (call_trace >= 0) {
+ int unw_ret = 0;
+ struct unwind_frame_info info;
++ struct ops_and_data oad = { .ops = ops, .data = data };
+
+ if (regs) {
+ if (unwind_init_frame_info(&info, task, regs) == 0)
+- unw_ret = show_trace_unwind(&info, log_lvl);
++ unw_ret = dump_trace_unwind(&info, &oad);
+ } else if (task == current)
+- unw_ret = unwind_init_running(&info, show_trace_unwind, log_lvl);
++ unw_ret = unwind_init_running(&info, dump_trace_unwind, &oad);
+ else {
+ if (unwind_init_blocked(&info, task) == 0)
+- unw_ret = show_trace_unwind(&info, log_lvl);
++ unw_ret = dump_trace_unwind(&info, &oad);
+ }
+ if (unw_ret > 0) {
+ if (call_trace == 1 && !arch_unw_user_mode(&info)) {
+- print_symbol("DWARF2 unwinder stuck at %s\n",
++ ops->warning_symbol(data, "DWARF2 unwinder stuck at %s\n",
+ UNW_PC(&info));
+ if (UNW_SP(&info) >= PAGE_OFFSET) {
+- printk("Leftover inexact backtrace:\n");
++ ops->warning(data, "Leftover inexact backtrace:\n");
+ stack = (void *)UNW_SP(&info);
++ if (!stack)
++ return;
++ ebp = UNW_FP(&info);
+ } else
+- printk("Full inexact backtrace again:\n");
++ ops->warning(data, "Full inexact backtrace again:\n");
+ } else if (call_trace >= 1)
+ return;
+ else
+- printk("Full inexact backtrace again:\n");
++ ops->warning(data, "Full inexact backtrace again:\n");
+ } else
+- printk("Inexact backtrace:\n");
++ ops->warning(data, "Inexact backtrace:\n");
+ }
+-
+- if (task == current) {
+- /* Grab ebp right from our regs */
+- asm ("movl %%ebp, %0" : "=r" (ebp) : );
+- } else {
+- /* ebp is the last reg pushed by switch_to */
+- ebp = *(unsigned long *) task->thread.esp;
++ if (!stack) {
++ unsigned long dummy;
++ stack = &dummy;
++ if (task && task != current)
++ stack = (unsigned long *)task->thread.esp;
++ }
++
++#ifdef CONFIG_FRAME_POINTER
++ if (!ebp) {
++ if (task == current) {
++ /* Grab ebp right from our regs */
++ asm ("movl %%ebp, %0" : "=r" (ebp) : );
++ } else {
++ /* ebp is the last reg pushed by switch_to */
++ ebp = *(unsigned long *) task->thread.esp;
++ }
+ }
++#endif
+
+ while (1) {
+ struct thread_info *context;
+ context = (struct thread_info *)
+ ((unsigned long)stack & (~(THREAD_SIZE - 1)));
+- ebp = print_context_stack(context, stack, ebp, log_lvl);
++ ebp = print_context_stack(context, stack, ebp, ops, data);
++ /* Should be after the line below, but somewhere
++ in early boot context comes out corrupted and we
++ can't reference it -AK */
++ if (ops->stack(data, "IRQ") < 0)
++ break;
+ stack = (unsigned long*)context->previous_esp;
+ if (!stack)
+ break;
+- printk("%s =======================\n", log_lvl);
+ }
+ }
++EXPORT_SYMBOL(dump_trace);
++
++static void
++print_trace_warning_symbol(void *data, char *msg, unsigned long symbol)
++{
++ printk(data);
++ print_symbol(msg, symbol);
++ printk("\n");
++}
++
++static void print_trace_warning(void *data, char *msg)
++{
++ printk("%s%s\n", (char *)data, msg);
++}
+
+-void show_trace(struct task_struct *task, struct pt_regs *regs, unsigned long * stack)
++static int print_trace_stack(void *data, char *name)
++{
++ return 0;
++}
++
++/*
++ * Print one address/symbol entries per line.
++ */
++static void print_trace_address(void *data, unsigned long addr)
++{
++ printk("%s [<%08lx>] ", (char *)data, addr);
++ print_symbol("%s\n", addr);
++}
++
++static struct stacktrace_ops print_trace_ops = {
++ .warning = print_trace_warning,
++ .warning_symbol = print_trace_warning_symbol,
++ .stack = print_trace_stack,
++ .address = print_trace_address,
++};
++
++static void
++show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
++ unsigned long * stack, char *log_lvl)
++{
++ dump_trace(task, regs, stack, &print_trace_ops, log_lvl);
++ printk("%s =======================\n", log_lvl);
++}
++
++void show_trace(struct task_struct *task, struct pt_regs *regs,
++ unsigned long * stack)
+ {
+ show_trace_log_lvl(task, regs, stack, "");
+ }
+@@ -297,12 +363,13 @@ void show_registers(struct pt_regs *regs
+ ss = regs->xss & 0xffff;
+ }
+ print_modules();
+- printk(KERN_EMERG "CPU: %d\nEIP: %04x:[<%08lx>] %s VLI\n"
+- "EFLAGS: %08lx (%s %.*s) \n",
++ printk(KERN_EMERG "CPU: %d\n"
++ KERN_EMERG "EIP: %04x:[<%08lx>] %s VLI\n"
++ KERN_EMERG "EFLAGS: %08lx (%s %.*s)\n",
+ smp_processor_id(), 0xffff & regs->xcs, regs->eip,
+- print_tainted(), regs->eflags, system_utsname.release,
+- (int)strcspn(system_utsname.version, " "),
+- system_utsname.version);
++ print_tainted(), regs->eflags, init_utsname()->release,
++ (int)strcspn(init_utsname()->version, " "),
++ init_utsname()->version);
+ print_symbol(KERN_EMERG "EIP is at %s\n", regs->eip);
+ printk(KERN_EMERG "eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n",
+ regs->eax, regs->ebx, regs->ecx, regs->edx);
+@@ -319,6 +386,8 @@ void show_registers(struct pt_regs *regs
+ */
+ if (in_kernel) {
+ u8 __user *eip;
++ int code_bytes = 64;
++ unsigned char c;
+
+ printk("\n" KERN_EMERG "Stack: ");
+ show_stack_log_lvl(NULL, regs, (unsigned long *)esp, KERN_EMERG);
+@@ -326,9 +395,12 @@ void show_registers(struct pt_regs *regs
+ printk(KERN_EMERG "Code: ");
+
+ eip = (u8 __user *)regs->eip - 43;
+- for (i = 0; i < 64; i++, eip++) {
+- unsigned char c;
+-
++ if (eip < (u8 __user *)PAGE_OFFSET || __get_user(c, eip)) {
++ /* try starting at EIP */
++ eip = (u8 __user *)regs->eip;
++ code_bytes = 32;
++ }
++ for (i = 0; i < code_bytes; i++, eip++) {
+ if (eip < (u8 __user *)PAGE_OFFSET || __get_user(c, eip)) {
+ printk(" Bad EIP value.");
+ break;
+@@ -349,7 +421,7 @@ static void handle_BUG(struct pt_regs *r
+
+ if (eip < PAGE_OFFSET)
+ return;
+- if (__get_user(ud2, (unsigned short __user *)eip))
++ if (probe_kernel_address((unsigned short __user *)eip, ud2))
+ return;
+ if (ud2 != 0x0b0f)
+ return;
+@@ -362,7 +434,8 @@ static void handle_BUG(struct pt_regs *r
+ char *file;
+ char c;
+
+- if (__get_user(line, (unsigned short __user *)(eip + 2)))
++ if (probe_kernel_address((unsigned short __user *)(eip + 2),
++ line))
+ break;
+ if (__get_user(file, (char * __user *)(eip + 4)) ||
+ (unsigned long)file < PAGE_OFFSET || __get_user(c, file))
+@@ -604,18 +677,24 @@ gp_in_kernel:
+ }
+ }
+
+-static void mem_parity_error(unsigned char reason, struct pt_regs * regs)
++static __kprobes void
++mem_parity_error(unsigned char reason, struct pt_regs * regs)
+ {
+- printk(KERN_EMERG "Uhhuh. NMI received. Dazed and confused, but trying "
+- "to continue\n");
++ printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x on "
++ "CPU %d.\n", reason, smp_processor_id());
+ printk(KERN_EMERG "You probably have a hardware problem with your RAM "
+ "chips\n");
++ if (panic_on_unrecovered_nmi)
++ panic("NMI: Not continuing");
++
++ printk(KERN_EMERG "Dazed and confused, but trying to continue\n");
+
+ /* Clear and disable the memory parity error line. */
+ clear_mem_error(reason);
+ }
+
+-static void io_check_error(unsigned char reason, struct pt_regs * regs)
++static __kprobes void
++io_check_error(unsigned char reason, struct pt_regs * regs)
+ {
+ printk(KERN_EMERG "NMI: IOCK error (debug interrupt?)\n");
+ show_registers(regs);
+@@ -624,7 +703,8 @@ static void io_check_error(unsigned char
+ clear_io_check_error(reason);
+ }
+
+-static void unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
++static __kprobes void
++unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
+ {
+ #ifdef CONFIG_MCA
+ /* Might actually be able to figure out what the guilty party
+@@ -634,15 +714,18 @@ static void unknown_nmi_error(unsigned c
+ return;
+ }
+ #endif
+- printk("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
+- reason, smp_processor_id());
+- printk("Dazed and confused, but trying to continue\n");
+- printk("Do you have a strange power saving mode enabled?\n");
++ printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x on "
++ "CPU %d.\n", reason, smp_processor_id());
++ printk(KERN_EMERG "Do you have a strange power saving mode enabled?\n");
++ if (panic_on_unrecovered_nmi)
++ panic("NMI: Not continuing");
++
++ printk(KERN_EMERG "Dazed and confused, but trying to continue\n");
+ }
+
+ static DEFINE_SPINLOCK(nmi_print_lock);
+
+-void die_nmi (struct pt_regs *regs, const char *msg)
++void __kprobes die_nmi(struct pt_regs *regs, const char *msg)
+ {
+ if (notify_die(DIE_NMIWATCHDOG, msg, regs, 0, 2, SIGINT) ==
+ NOTIFY_STOP)
+@@ -674,7 +757,7 @@ void die_nmi (struct pt_regs *regs, cons
+ do_exit(SIGSEGV);
+ }
+
+-static void default_do_nmi(struct pt_regs * regs)
++static __kprobes void default_do_nmi(struct pt_regs * regs)
+ {
+ unsigned char reason = 0;
+
+@@ -691,12 +774,12 @@ static void default_do_nmi(struct pt_reg
+ * Ok, so this is none of the documented NMI sources,
+ * so it must be the NMI watchdog.
+ */
+- if (nmi_watchdog) {
+- nmi_watchdog_tick(regs);
++ if (nmi_watchdog_tick(regs, reason))
+ return;
+- }
++ if (!do_nmi_callback(regs, smp_processor_id()))
+ #endif
+- unknown_nmi_error(reason, regs);
++ unknown_nmi_error(reason, regs);
++
+ return;
+ }
+ if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP)
+@@ -712,14 +795,7 @@ static void default_do_nmi(struct pt_reg
+ reassert_nmi();
+ }
+
+-static int dummy_nmi_callback(struct pt_regs * regs, int cpu)
+-{
+- return 0;
+-}
+-
+-static nmi_callback_t nmi_callback = dummy_nmi_callback;
+-
+-fastcall void do_nmi(struct pt_regs * regs, long error_code)
++fastcall __kprobes void do_nmi(struct pt_regs * regs, long error_code)
+ {
+ int cpu;
+
+@@ -729,25 +805,11 @@ fastcall void do_nmi(struct pt_regs * re
+
+ ++nmi_count(cpu);
+
+- if (!rcu_dereference(nmi_callback)(regs, cpu))
+- default_do_nmi(regs);
++ default_do_nmi(regs);
+
+ nmi_exit();
+ }
+
+-void set_nmi_callback(nmi_callback_t callback)
+-{
+- vmalloc_sync_all();
+- rcu_assign_pointer(nmi_callback, callback);
+-}
+-EXPORT_SYMBOL_GPL(set_nmi_callback);
+-
+-void unset_nmi_callback(void)
+-{
+- nmi_callback = dummy_nmi_callback;
+-}
+-EXPORT_SYMBOL_GPL(unset_nmi_callback);
+-
+ #ifdef CONFIG_KPROBES
+ fastcall void __kprobes do_int3(struct pt_regs *regs, long error_code)
+ {
+Index: 10.3-2007-11-26/arch/i386/mach-xen/setup.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/mach-xen/setup.c 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/arch/i386/mach-xen/setup.c 2007-10-22 13:53:08.000000000 +0200
+@@ -133,8 +133,10 @@ void __init machine_specific_arch_setup(
+ }
+ #endif
+
+- if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0)
+- set_fixaddr_top(pp.virt_start);
++ if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0) {
++ hypervisor_virt_start = pp.virt_start;
++ reserve_top_address(0UL - pp.virt_start);
++ }
+
+ machine_to_phys_mapping = (unsigned long *)MACH2PHYS_VIRT_START;
+ machine_to_phys_nr_ents = MACH2PHYS_NR_ENTRIES;
+Index: 10.3-2007-11-26/arch/i386/mm/fault-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/mm/fault-xen.c 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/arch/i386/mm/fault-xen.c 2007-10-22 13:53:08.000000000 +0200
+@@ -27,21 +27,24 @@
+ #include <asm/uaccess.h>
+ #include <asm/desc.h>
+ #include <asm/kdebug.h>
++#include <asm/segment.h>
+
+ extern void die(const char *,struct pt_regs *,long);
+
+-#ifdef CONFIG_KPROBES
+-ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain);
++static ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain);
++
+ int register_page_fault_notifier(struct notifier_block *nb)
+ {
+ vmalloc_sync_all();
+ return atomic_notifier_chain_register(&notify_page_fault_chain, nb);
+ }
++EXPORT_SYMBOL_GPL(register_page_fault_notifier);
+
+ int unregister_page_fault_notifier(struct notifier_block *nb)
+ {
+ return atomic_notifier_chain_unregister(&notify_page_fault_chain, nb);
+ }
++EXPORT_SYMBOL_GPL(unregister_page_fault_notifier);
+
+ static inline int notify_page_fault(enum die_val val, const char *str,
+ struct pt_regs *regs, long err, int trap, int sig)
+@@ -55,14 +58,6 @@ static inline int notify_page_fault(enum
+ };
+ return atomic_notifier_call_chain(&notify_page_fault_chain, val, &args);
+ }
+-#else
+-static inline int notify_page_fault(enum die_val val, const char *str,
+- struct pt_regs *regs, long err, int trap, int sig)
+-{
+- return NOTIFY_DONE;
+-}
+-#endif
+-
+
+ /*
+ * Unlock any spinlocks which will prevent us from getting the
+@@ -119,10 +114,10 @@ static inline unsigned long get_segment_
+ }
+
+ /* The standard kernel/user address space limit. */
+- *eip_limit = (seg & 2) ? USER_DS.seg : KERNEL_DS.seg;
++ *eip_limit = user_mode(regs) ? USER_DS.seg : KERNEL_DS.seg;
+
+ /* By far the most common cases. */
+- if (likely(seg == __USER_CS || seg == GET_KERNEL_CS()))
++ if (likely(SEGMENT_IS_FLAT_CODE(seg)))
+ return eip;
+
+ /* Check the segment exists, is within the current LDT/GDT size,
+@@ -559,11 +554,7 @@ good_area:
+ write = 0;
+ switch (error_code & 3) {
+ default: /* 3: write, present */
+-#ifdef TEST_VERIFY_AREA
+- if (regs->cs == GET_KERNEL_CS())
+- printk("WP fault at %08lx\n", regs->eip);
+-#endif
+- /* fall through */
++ /* fall through */
+ case 2: /* write, not present */
+ if (!(vma->vm_flags & VM_WRITE))
+ goto bad_area;
+@@ -572,7 +563,7 @@ good_area:
+ case 1: /* read, present */
+ goto bad_area;
+ case 0: /* read, not present */
+- if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
++ if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
+ goto bad_area;
+ }
+
+@@ -704,7 +695,7 @@ no_context:
+ */
+ out_of_memory:
+ up_read(&mm->mmap_sem);
+- if (tsk->pid == 1) {
++ if (is_init(tsk)) {
+ yield();
+ down_read(&mm->mmap_sem);
+ goto survive;
+Index: 10.3-2007-11-26/arch/i386/mm/highmem-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/mm/highmem-xen.c 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/arch/i386/mm/highmem-xen.c 2007-10-22 13:53:08.000000000 +0200
+@@ -38,11 +38,9 @@ static void *__kmap_atomic(struct page *
+
+ idx = type + KM_TYPE_NR*smp_processor_id();
+ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+-#ifdef CONFIG_DEBUG_HIGHMEM
+ if (!pte_none(*(kmap_pte-idx)))
+ BUG();
+-#endif
+- set_pte_at_sync(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot));
++ set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot));
+
+ return (void*) vaddr;
+ }
+@@ -62,36 +60,26 @@ void *kmap_atomic_pte(struct page *page,
+
+ void kunmap_atomic(void *kvaddr, enum km_type type)
+ {
+-#if defined(CONFIG_DEBUG_HIGHMEM) || defined(CONFIG_XEN)
+ unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
+ enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
+
+- if (vaddr < FIXADDR_START) { // FIXME
++#ifdef CONFIG_DEBUG_HIGHMEM
++ if (vaddr >= PAGE_OFFSET && vaddr < (unsigned long)high_memory) {
+ dec_preempt_count();
+ preempt_check_resched();
+ return;
+ }
+-#endif
+
+-#if defined(CONFIG_DEBUG_HIGHMEM)
+ if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx))
+ BUG();
+-
+- /*
+- * force other mappings to Oops if they'll try to access
+- * this pte without first remap it
+- */
+- pte_clear(&init_mm, vaddr, kmap_pte-idx);
+- __flush_tlb_one(vaddr);
+-#elif defined(CONFIG_XEN)
++#endif
+ /*
+- * We must ensure there are no dangling pagetable references when
+- * returning memory to Xen (decrease_reservation).
+- * XXX TODO: We could make this faster by only zapping when
+- * kmap_flush_unused is called but that is trickier and more invasive.
++ * Force other mappings to Oops if they'll try to access this pte
++ * without first remap it. Keeping stale mappings around is a bad idea
++ * also, in case the page changes cacheability attributes or becomes
++ * a protected page in a hypervisor.
+ */
+- pte_clear(&init_mm, vaddr, kmap_pte-idx);
+-#endif
++ kpte_clear_flush(kmap_pte-idx, vaddr);
+
+ dec_preempt_count();
+ preempt_check_resched();
+@@ -110,7 +98,6 @@ void *kmap_atomic_pfn(unsigned long pfn,
+ idx = type + KM_TYPE_NR*smp_processor_id();
+ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+ set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot));
+- __flush_tlb_one(vaddr);
+
+ return (void*) vaddr;
+ }
+Index: 10.3-2007-11-26/arch/i386/mm/init-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/mm/init-xen.c 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/arch/i386/mm/init-xen.c 2007-10-22 13:53:08.000000000 +0200
+@@ -467,16 +467,22 @@ EXPORT_SYMBOL(__supported_pte_mask);
+ * on Enable
+ * off Disable
+ */
+-void __init noexec_setup(const char *str)
++static int __init noexec_setup(char *str)
+ {
+- if (!strncmp(str, "on",2) && cpu_has_nx) {
+- __supported_pte_mask |= _PAGE_NX;
+- disable_nx = 0;
+- } else if (!strncmp(str,"off",3)) {
++ if (!str || !strcmp(str, "on")) {
++ if (cpu_has_nx) {
++ __supported_pte_mask |= _PAGE_NX;
++ disable_nx = 0;
++ }
++ } else if (!strcmp(str,"off")) {
+ disable_nx = 1;
+ __supported_pte_mask &= ~_PAGE_NX;
+- }
++ } else
++ return -EINVAL;
++
++ return 0;
+ }
++early_param("noexec", noexec_setup);
+
+ int nx_enabled = 0;
+ #ifdef CONFIG_X86_PAE
+@@ -519,6 +525,7 @@ int __init set_kernel_exec(unsigned long
+ pte->pte_high &= ~(1 << (_PAGE_BIT_NX - 32));
+ else
+ pte->pte_high |= 1 << (_PAGE_BIT_NX - 32);
++ pte_update_defer(&init_mm, vaddr, pte);
+ __flush_tlb_all();
+ out:
+ return ret;
+@@ -601,18 +608,6 @@ static void __init test_wp_bit(void)
+ }
+ }
+
+-static void __init set_max_mapnr_init(void)
+-{
+-#ifdef CONFIG_HIGHMEM
+- num_physpages = highend_pfn;
+-#else
+- num_physpages = max_low_pfn;
+-#endif
+-#ifdef CONFIG_FLATMEM
+- max_mapnr = num_physpages;
+-#endif
+-}
+-
+ static struct kcore_list kcore_mem, kcore_vmalloc;
+
+ void __init mem_init(void)
+@@ -633,8 +628,7 @@ void __init mem_init(void)
+ #endif
+
+ #ifdef CONFIG_FLATMEM
+- if (!mem_map)
+- BUG();
++ BUG_ON(!mem_map);
+ #endif
+
+ bad_ppro = ppro_with_ram_bug();
+@@ -649,13 +643,6 @@ void __init mem_init(void)
+ }
+ #endif
+
+- set_max_mapnr_init();
+-
+-#ifdef CONFIG_HIGHMEM
+- high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
+-#else
+- high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
+-#endif
+ printk("vmalloc area: %lx-%lx, maxmem %lx\n",
+ VMALLOC_START,VMALLOC_END,MAXMEM);
+ BUG_ON(VMALLOC_START > VMALLOC_END);
+@@ -697,6 +684,48 @@ void __init mem_init(void)
+ (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
+ );
+
++#if 1 /* double-sanity-check paranoia */
++ printk("virtual kernel memory layout:\n"
++ " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
++#ifdef CONFIG_HIGHMEM
++ " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
++#endif
++ " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
++ " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
++ " .init : 0x%08lx - 0x%08lx (%4ld kB)\n"
++ " .data : 0x%08lx - 0x%08lx (%4ld kB)\n"
++ " .text : 0x%08lx - 0x%08lx (%4ld kB)\n",
++ FIXADDR_START, FIXADDR_TOP,
++ (FIXADDR_TOP - FIXADDR_START) >> 10,
++
++#ifdef CONFIG_HIGHMEM
++ PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
++ (LAST_PKMAP*PAGE_SIZE) >> 10,
++#endif
++
++ VMALLOC_START, VMALLOC_END,
++ (VMALLOC_END - VMALLOC_START) >> 20,
++
++ (unsigned long)__va(0), (unsigned long)high_memory,
++ ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
++
++ (unsigned long)&__init_begin, (unsigned long)&__init_end,
++ ((unsigned long)&__init_end - (unsigned long)&__init_begin) >> 10,
++
++ (unsigned long)&_etext, (unsigned long)&_edata,
++ ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
++
++ (unsigned long)&_text, (unsigned long)&_etext,
++ ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
++
++#ifdef CONFIG_HIGHMEM
++ BUG_ON(PKMAP_BASE+LAST_PKMAP*PAGE_SIZE > FIXADDR_START);
++ BUG_ON(VMALLOC_END > PKMAP_BASE);
++#endif
++ BUG_ON(VMALLOC_START > VMALLOC_END);
++ BUG_ON((unsigned long)high_memory > VMALLOC_START);
++#endif /* double-sanity-check paranoia */
++
+ #ifdef CONFIG_X86_PAE
+ if (!cpu_has_pae)
+ panic("cannot execute a PAE-enabled kernel on a PAE-less CPU!");
+@@ -727,7 +756,7 @@ void __init mem_init(void)
+ int arch_add_memory(int nid, u64 start, u64 size)
+ {
+ struct pglist_data *pgdata = &contig_page_data;
+- struct zone *zone = pgdata->node_zones + MAX_NR_ZONES-1;
++ struct zone *zone = pgdata->node_zones + ZONE_HIGHMEM;
+ unsigned long start_pfn = start >> PAGE_SHIFT;
+ unsigned long nr_pages = size >> PAGE_SHIFT;
+
+Index: 10.3-2007-11-26/arch/i386/mm/ioremap-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/mm/ioremap-xen.c 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/arch/i386/mm/ioremap-xen.c 2007-10-22 13:53:08.000000000 +0200
+@@ -12,7 +12,7 @@
+ #include <linux/init.h>
+ #include <linux/slab.h>
+ #include <linux/module.h>
+-#include <asm/io.h>
++#include <linux/io.h>
+ #include <asm/fixmap.h>
+ #include <asm/cacheflush.h>
+ #include <asm/tlbflush.h>
+@@ -118,7 +118,7 @@ int direct_remap_pfn_range(struct vm_are
+ if (domid == DOMID_SELF)
+ return -EINVAL;
+
+- vma->vm_flags |= VM_IO | VM_RESERVED;
++ vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
+
+ vma->vm_mm->context.has_foreign_mappings = 1;
+
+@@ -203,6 +203,7 @@ void __iomem * __ioremap(unsigned long p
+ void __iomem * addr;
+ struct vm_struct * area;
+ unsigned long offset, last_addr;
++ pgprot_t prot;
+ domid_t domid = DOMID_IO;
+
+ /* Don't allow wraparound or zero size */
+@@ -234,6 +235,8 @@ void __iomem * __ioremap(unsigned long p
+ domid = DOMID_SELF;
+ }
+
++ prot = __pgprot(_KERNPG_TABLE | flags);
++
+ /*
+ * Mappings have to be page-aligned
+ */
+@@ -249,10 +252,9 @@ void __iomem * __ioremap(unsigned long p
+ return NULL;
+ area->phys_addr = phys_addr;
+ addr = (void __iomem *) area->addr;
+- flags |= _KERNPG_TABLE;
+ if (__direct_remap_pfn_range(&init_mm, (unsigned long)addr,
+ phys_addr>>PAGE_SHIFT,
+- size, __pgprot(flags), domid)) {
++ size, prot, domid)) {
+ vunmap((void __force *) addr);
+ return NULL;
+ }
+Index: 10.3-2007-11-26/arch/i386/mm/pgtable-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/mm/pgtable-xen.c 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/arch/i386/mm/pgtable-xen.c 2007-10-22 13:53:08.000000000 +0200
+@@ -68,7 +68,9 @@ void show_mem(void)
+ printk(KERN_INFO "%lu pages writeback\n",
+ global_page_state(NR_WRITEBACK));
+ printk(KERN_INFO "%lu pages mapped\n", global_page_state(NR_FILE_MAPPED));
+- printk(KERN_INFO "%lu pages slab\n", global_page_state(NR_SLAB));
++ printk(KERN_INFO "%lu pages slab\n",
++ global_page_state(NR_SLAB_RECLAIMABLE) +
++ global_page_state(NR_SLAB_UNRECLAIMABLE));
+ printk(KERN_INFO "%lu pages pagetables\n",
+ global_page_state(NR_PAGETABLE));
+ }
+@@ -189,18 +191,11 @@ void set_pmd_pfn(unsigned long vaddr, un
+ __flush_tlb_one(vaddr);
+ }
+
+-static int nr_fixmaps = 0;
++static int fixmaps = 0;
+ unsigned long hypervisor_virt_start = HYPERVISOR_VIRT_START;
+-unsigned long __FIXADDR_TOP = (HYPERVISOR_VIRT_START - 2 * PAGE_SIZE);
++unsigned long __FIXADDR_TOP = (HYPERVISOR_VIRT_START - PAGE_SIZE);
+ EXPORT_SYMBOL(__FIXADDR_TOP);
+
+-void __init set_fixaddr_top(unsigned long top)
+-{
+- BUG_ON(nr_fixmaps > 0);
+- hypervisor_virt_start = top;
+- __FIXADDR_TOP = hypervisor_virt_start - 2 * PAGE_SIZE;
+-}
+-
+ void __set_fixmap (enum fixed_addresses idx, maddr_t phys, pgprot_t flags)
+ {
+ unsigned long address = __fix_to_virt(idx);
+@@ -221,7 +216,21 @@ void __set_fixmap (enum fixed_addresses
+ set_pte_pfn_ma(address, phys >> PAGE_SHIFT, flags);
+ break;
+ }
+- nr_fixmaps++;
++ fixmaps++;
++}
++
++/**
++ * reserve_top_address - reserves a hole in the top of kernel address space
++ * @reserve - size of hole to reserve
++ *
++ * Can be used to relocate the fixmap area and poke a hole in the top
++ * of kernel address space to make room for a hypervisor.
++ */
++void __init reserve_top_address(unsigned long reserve)
++{
++ BUG_ON(fixmaps > 0);
++ __FIXADDR_TOP = -reserve - PAGE_SIZE;
++ __VMALLOC_RESERVE += reserve;
+ }
+
+ pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
+Index: 10.3-2007-11-26/arch/i386/pci/irq-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/pci/irq-xen.c 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/arch/i386/pci/irq-xen.c 2007-10-22 13:53:08.000000000 +0200
+@@ -985,10 +985,6 @@ static void __init pcibios_fixup_irqs(vo
+ pci_name(bridge), 'A' + pin, irq);
+ }
+ if (irq >= 0) {
+- if (use_pci_vector() &&
+- !platform_legacy_irq(irq))
+- irq = IO_APIC_VECTOR(irq);
+-
+ printk(KERN_INFO "PCI->APIC IRQ transform: %s[%c] -> IRQ %d\n",
+ pci_name(dev), 'A' + pin, irq);
+ dev->irq = irq;
+@@ -1149,10 +1145,6 @@ static int pirq_enable_irq(struct pci_de
+ }
+ dev = temp_dev;
+ if (irq >= 0) {
+-#ifdef CONFIG_PCI_MSI
+- if (!platform_legacy_irq(irq))
+- irq = IO_APIC_VECTOR(irq);
+-#endif
+ printk(KERN_INFO "PCI->APIC IRQ transform: %s[%c] -> IRQ %d\n",
+ pci_name(dev), 'A' + pin, irq);
+ dev->irq = irq;
+@@ -1173,33 +1165,3 @@ static int pirq_enable_irq(struct pci_de
+ }
+ return 0;
+ }
+-
+-int pci_vector_resources(int last, int nr_released)
+-{
+- int count = nr_released;
+-
+- int next = last;
+- int offset = (last % 8);
+-
+- while (next < FIRST_SYSTEM_VECTOR) {
+- next += 8;
+-#ifdef CONFIG_X86_64
+- if (next == IA32_SYSCALL_VECTOR)
+- continue;
+-#else
+- if (next == SYSCALL_VECTOR)
+- continue;
+-#endif
+- count++;
+- if (next >= FIRST_SYSTEM_VECTOR) {
+- if (offset%8) {
+- next = FIRST_DEVICE_VECTOR + offset;
+- offset++;
+- continue;
+- }
+- count--;
+- }
+- }
+-
+- return count;
+-}
+Index: 10.3-2007-11-26/arch/x86_64/ia32/ia32entry-xen.S
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/ia32/ia32entry-xen.S 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/arch/x86_64/ia32/ia32entry-xen.S 2007-10-22 13:53:08.000000000 +0200
+@@ -88,6 +88,7 @@
+ */
+ ENTRY(ia32_sysenter_target)
+ CFI_STARTPROC32 simple
++ CFI_SIGNAL_FRAME
+ CFI_DEF_CFA rsp,0
+ CFI_REGISTER rsp,rbp
+ __swapgs
+@@ -206,6 +207,7 @@ ENDPROC(ia32_sysenter_target)
+ */
+ ENTRY(ia32_cstar_target)
+ CFI_STARTPROC32 simple
++ CFI_SIGNAL_FRAME
+ CFI_DEF_CFA rsp,PDA_STACKOFFSET
+ CFI_REGISTER rip,rcx
+ /*CFI_REGISTER rflags,r11*/
+@@ -315,6 +317,7 @@ ia32_badarg:
+
+ ENTRY(ia32_syscall)
+ CFI_STARTPROC simple
++ CFI_SIGNAL_FRAME
+ CFI_DEF_CFA rsp,SS+8-RIP
+ /*CFI_REL_OFFSET ss,SS-RIP*/
+ CFI_REL_OFFSET rsp,RSP-RIP
+@@ -397,6 +400,7 @@ ENTRY(ia32_ptregs_common)
+ popq %r11
+ CFI_ENDPROC
+ CFI_STARTPROC32 simple
++ CFI_SIGNAL_FRAME
+ CFI_DEF_CFA rsp,SS+8-ARGOFFSET
+ CFI_REL_OFFSET rax,RAX-ARGOFFSET
+ CFI_REL_OFFSET rcx,RCX-ARGOFFSET
+@@ -730,8 +734,8 @@ ia32_sys_call_table:
+ .quad sys_readlinkat /* 305 */
+ .quad sys_fchmodat
+ .quad sys_faccessat
+- .quad quiet_ni_syscall /* pselect6 for now */
+- .quad quiet_ni_syscall /* ppoll for now */
++ .quad compat_sys_pselect6
++ .quad compat_sys_ppoll
+ .quad sys_unshare /* 310 */
+ .quad compat_sys_set_robust_list
+ .quad compat_sys_get_robust_list
+@@ -740,4 +744,5 @@ ia32_sys_call_table:
+ .quad sys_tee
+ .quad compat_sys_vmsplice
+ .quad compat_sys_move_pages
++ .quad sys_getcpu
+ ia32_syscall_end:
+Index: 10.3-2007-11-26/arch/x86_64/kernel/Makefile
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/kernel/Makefile 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/arch/x86_64/kernel/Makefile 2007-10-22 13:53:08.000000000 +0200
+@@ -4,7 +4,7 @@
+
+ extra-y := head.o head64.o init_task.o vmlinux.lds
+ EXTRA_AFLAGS := -traditional
+-obj-y := process.o signal.o entry.o traps.o irq.o \
++obj-y := process.o signal.o entry.o traps.o irq.o nmi.o \
+ ptrace.o time.o ioport.o ldt.o setup.o i8259.o sys_x86_64.o \
+ x8664_ksyms.o i387.o syscall.o vsyscall.o \
+ setup64.o bootflag.o e820.o reboot.o quirks.o i8237.o \
+@@ -21,9 +21,9 @@ obj-$(CONFIG_X86_MSR) += msr.o
+ obj-$(CONFIG_MICROCODE) += microcode.o
+ obj-$(CONFIG_X86_CPUID) += cpuid.o
+ obj-$(CONFIG_SMP) += smp.o smpboot.o trampoline.o tsc_sync.o
+-obj-y += apic.o nmi.o
++obj-$(CONFIG_X86_LOCAL_APIC) += apic.o
+ obj-$(CONFIG_X86_XEN_GENAPIC) += genapic.o genapic_xen.o
+-obj-y += io_apic.o mpparse.o genapic.o genapic_flat.o
++obj-$(CONFIG_X86_IO_APIC) += io_apic.o mpparse.o genapic.o genapic_flat.o
+ obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o crash.o
+ obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
+ obj-$(CONFIG_SOFTWARE_SUSPEND) += suspend.o
+@@ -68,7 +68,7 @@ pci-dma-y += ../../i386/kernel/pci-dma
+ microcode-$(subst m,y,$(CONFIG_MICROCODE)) := ../../i386/kernel/microcode-xen.o
+ quirks-y := ../../i386/kernel/quirks-xen.o
+
+-n-obj-xen := i8259.o reboot.o i8237.o smpboot.o trampoline.o
++n-obj-xen := early-quirks.o i8259.o reboot.o i8237.o smpboot.o trampoline.o
+
+ include $(srctree)/scripts/Makefile.xen
+
+Index: 10.3-2007-11-26/arch/x86_64/kernel/acpi/Makefile
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/kernel/acpi/Makefile 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/arch/x86_64/kernel/acpi/Makefile 2007-10-22 13:53:08.000000000 +0200
+@@ -8,3 +8,4 @@ processor-y := ../../../i386/kernel/acp
+ endif
+
+ boot-$(CONFIG_XEN) := ../../../i386/kernel/acpi/boot-xen.o
++processor-$(CONFIG_XEN) := ../../../i386/kernel/acpi/processor.o
+Index: 10.3-2007-11-26/arch/x86_64/kernel/apic-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/kernel/apic-xen.c 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/arch/x86_64/kernel/apic-xen.c 2007-10-22 13:53:08.000000000 +0200
+@@ -43,7 +43,7 @@ int apic_verbosity;
+ */
+ void ack_bad_irq(unsigned int irq)
+ {
+- printk("unexpected IRQ trap at vector %02x\n", irq);
++ printk("unexpected IRQ trap at irq %02x\n", irq);
+ /*
+ * Currently unexpected vectors happen only on SMP and APIC.
+ * We _must_ ack these because every local APIC has only N
+@@ -62,19 +62,19 @@ int setup_profiling_timer(unsigned int m
+ return -EINVAL;
+ }
+
+-void smp_local_timer_interrupt(struct pt_regs *regs)
++void smp_local_timer_interrupt(void)
+ {
+- profile_tick(CPU_PROFILING, regs);
++ profile_tick(CPU_PROFILING);
+ #ifndef CONFIG_XEN
+ #ifdef CONFIG_SMP
+- update_process_times(user_mode(regs));
++ update_process_times(user_mode(get_irq_regs()));
+ #endif
+ #endif
+ /*
+ * We take the 'long' return path, and there every subsystem
+ * grabs the appropriate locks (kernel lock/ irq lock).
+ *
+- * we might want to decouple profiling from the 'long path',
++ * We might want to decouple profiling from the 'long path',
+ * and do the profiling totally in assembly.
+ *
+ * Currently this isn't too much of an issue (performance wise),
+@@ -92,6 +92,8 @@ void smp_local_timer_interrupt(struct pt
+ */
+ void smp_apic_timer_interrupt(struct pt_regs *regs)
+ {
++ struct pt_regs *old_regs = set_irq_regs(regs);
++
+ /*
+ * the NMI deadlock-detector uses this.
+ */
+@@ -109,8 +111,9 @@ void smp_apic_timer_interrupt(struct pt_
+ */
+ exit_idle();
+ irq_enter();
+- smp_local_timer_interrupt(regs);
++ smp_local_timer_interrupt();
+ irq_exit();
++ set_irq_regs(old_regs);
+ }
+
+ /*
+@@ -188,9 +191,8 @@ int disable_apic;
+ int __init APIC_init_uniprocessor (void)
+ {
+ #ifdef CONFIG_X86_IO_APIC
+- if (smp_found_config)
+- if (!skip_ioapic_setup && nr_ioapics)
+- setup_IO_APIC();
++ if (smp_found_config && !skip_ioapic_setup && nr_ioapics)
++ setup_IO_APIC();
+ #endif
+
+ return 1;
+Index: 10.3-2007-11-26/arch/x86_64/kernel/e820-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/kernel/e820-xen.c 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/arch/x86_64/kernel/e820-xen.c 2007-10-22 13:53:08.000000000 +0200
+@@ -16,6 +16,7 @@
+ #include <linux/string.h>
+ #include <linux/kexec.h>
+ #include <linux/module.h>
++#include <linux/mm.h>
+
+ #include <asm/pgtable.h>
+ #include <asm/page.h>
+@@ -25,6 +26,11 @@
+ #include <asm/sections.h>
+ #include <xen/interface/memory.h>
+
++struct e820map e820 __initdata;
++#ifdef CONFIG_XEN
++struct e820map machine_e820 __initdata;
++#endif
++
+ /*
+ * PFN of last memory page.
+ */
+@@ -41,7 +47,7 @@ unsigned long end_pfn_map;
+ /*
+ * Last pfn which the user wants to use.
+ */
+-unsigned long end_user_pfn = MAXMEM>>PAGE_SHIFT;
++static unsigned long __initdata end_user_pfn = MAXMEM>>PAGE_SHIFT;
+
+ extern struct resource code_resource, data_resource;
+
+@@ -53,13 +59,13 @@ static inline int bad_addr(unsigned long
+ #ifndef CONFIG_XEN
+ /* various gunk below that needed for SMP startup */
+ if (addr < 0x8000) {
+- *addrp = 0x8000;
++ *addrp = PAGE_ALIGN(0x8000);
+ return 1;
+ }
+
+ /* direct mapping tables of the kernel */
+ if (last >= table_start<<PAGE_SHIFT && addr < table_end<<PAGE_SHIFT) {
+- *addrp = table_end << PAGE_SHIFT;
++ *addrp = PAGE_ALIGN(table_end << PAGE_SHIFT);
+ return 1;
+ }
+
+@@ -67,23 +73,18 @@ static inline int bad_addr(unsigned long
+ #ifdef CONFIG_BLK_DEV_INITRD
+ if (LOADER_TYPE && INITRD_START && last >= INITRD_START &&
+ addr < INITRD_START+INITRD_SIZE) {
+- *addrp = INITRD_START + INITRD_SIZE;
++ *addrp = PAGE_ALIGN(INITRD_START + INITRD_SIZE);
+ return 1;
+ }
+ #endif
+- /* kernel code + 640k memory hole (later should not be needed, but
+- be paranoid for now) */
+- if (last >= 640*1024 && addr < 1024*1024) {
+- *addrp = 1024*1024;
+- return 1;
+- }
+- if (last >= __pa_symbol(&_text) && last < __pa_symbol(&_end)) {
+- *addrp = __pa_symbol(&_end);
++ /* kernel code */
++ if (last >= __pa_symbol(&_text) && addr < __pa_symbol(&_end)) {
++ *addrp = PAGE_ALIGN(__pa_symbol(&_end));
+ return 1;
+ }
+
+ if (last >= ebda_addr && addr < ebda_addr + ebda_size) {
+- *addrp = ebda_addr + ebda_size;
++ *addrp = PAGE_ALIGN(ebda_addr + ebda_size);
+ return 1;
+ }
+
+@@ -132,8 +133,6 @@ int __init e820_all_mapped(unsigned long
+ for (i = 0; i < e820.nr_map; i++) {
+ struct e820entry *ei = &e820.map[i];
+ #else
+- extern struct e820map machine_e820;
+-
+ if (!is_initial_xendomain())
+ return 0;
+ for (i = 0; i < machine_e820.nr_map; i++) {
+@@ -175,7 +174,7 @@ unsigned long __init find_e820_area(unsi
+ continue;
+ while (bad_addr(&addr, size) && addr+size <= ei->addr+ei->size)
+ ;
+- last = addr + size;
++ last = PAGE_ALIGN(addr) + size;
+ if (last > ei->addr + ei->size)
+ continue;
+ if (last > end)
+@@ -185,59 +184,14 @@ unsigned long __init find_e820_area(unsi
+ return -1UL;
+ }
+
+-/*
+- * Free bootmem based on the e820 table for a node.
+- */
+-void __init e820_bootmem_free(pg_data_t *pgdat, unsigned long start,unsigned long end)
+-{
+- int i;
+- for (i = 0; i < e820.nr_map; i++) {
+- struct e820entry *ei = &e820.map[i];
+- unsigned long last, addr;
+-
+- if (ei->type != E820_RAM ||
+- ei->addr+ei->size <= start ||
+- ei->addr >= end)
+- continue;
+-
+- addr = round_up(ei->addr, PAGE_SIZE);
+- if (addr < start)
+- addr = start;
+-
+- last = round_down(ei->addr + ei->size, PAGE_SIZE);
+- if (last >= end)
+- last = end;
+-
+- if (last > addr && last-addr >= PAGE_SIZE)
+- free_bootmem_node(pgdat, addr, last-addr);
+- }
+-}
+-
+ /*
+ * Find the highest page frame number we have available
+ */
+ unsigned long __init e820_end_of_ram(void)
+ {
+- int i;
+ unsigned long end_pfn = 0;
++ end_pfn = find_max_pfn_with_active_regions();
+
+- for (i = 0; i < e820.nr_map; i++) {
+- struct e820entry *ei = &e820.map[i];
+- unsigned long start, end;
+-
+- start = round_up(ei->addr, PAGE_SIZE);
+- end = round_down(ei->addr + ei->size, PAGE_SIZE);
+- if (start >= end)
+- continue;
+- if (ei->type == E820_RAM) {
+- if (end > end_pfn<<PAGE_SHIFT)
+- end_pfn = end>>PAGE_SHIFT;
+- } else {
+- if (end > end_pfn_map<<PAGE_SHIFT)
+- end_pfn_map = end>>PAGE_SHIFT;
+- }
+- }
+-
+ if (end_pfn > end_pfn_map)
+ end_pfn_map = end_pfn;
+ if (end_pfn_map > MAXMEM>>PAGE_SHIFT)
+@@ -247,43 +201,10 @@ unsigned long __init e820_end_of_ram(voi
+ if (end_pfn > end_pfn_map)
+ end_pfn = end_pfn_map;
+
++ printk("end_pfn_map = %lu\n", end_pfn_map);
+ return end_pfn;
+ }
+
+-/*
+- * Compute how much memory is missing in a range.
+- * Unlike the other functions in this file the arguments are in page numbers.
+- */
+-unsigned long __init
+-e820_hole_size(unsigned long start_pfn, unsigned long end_pfn)
+-{
+- unsigned long ram = 0;
+- unsigned long start = start_pfn << PAGE_SHIFT;
+- unsigned long end = end_pfn << PAGE_SHIFT;
+- int i;
+- for (i = 0; i < e820.nr_map; i++) {
+- struct e820entry *ei = &e820.map[i];
+- unsigned long last, addr;
+-
+- if (ei->type != E820_RAM ||
+- ei->addr+ei->size <= start ||
+- ei->addr >= end)
+- continue;
+-
+- addr = round_up(ei->addr, PAGE_SIZE);
+- if (addr < start)
+- addr = start;
+-
+- last = round_down(ei->addr + ei->size, PAGE_SIZE);
+- if (last >= end)
+- last = end;
+-
+- if (last > addr)
+- ram += last - addr;
+- }
+- return ((end - start) - ram) >> PAGE_SHIFT;
+-}
+-
+ /*
+ * Mark e820 reserved areas as busy for the resource manager.
+ */
+@@ -324,6 +245,98 @@ void __init e820_reserve_resources(struc
+ }
+ }
+
++#ifndef CONFIG_XEN
++/* Mark pages corresponding to given address range as nosave */
++static void __init
++e820_mark_nosave_range(unsigned long start, unsigned long end)
++{
++ unsigned long pfn, max_pfn;
++
++ if (start >= end)
++ return;
++
++ printk("Nosave address range: %016lx - %016lx\n", start, end);
++ max_pfn = end >> PAGE_SHIFT;
++ for (pfn = start >> PAGE_SHIFT; pfn < max_pfn; pfn++)
++ if (pfn_valid(pfn))
++ SetPageNosave(pfn_to_page(pfn));
++}
++
++/*
++ * Find the ranges of physical addresses that do not correspond to
++ * e820 RAM areas and mark the corresponding pages as nosave for software
++ * suspend and suspend to RAM.
++ *
++ * This function requires the e820 map to be sorted and without any
++ * overlapping entries and assumes the first e820 area to be RAM.
++ */
++void __init e820_mark_nosave_regions(void)
++{
++ int i;
++ unsigned long paddr;
++
++ paddr = round_down(e820.map[0].addr + e820.map[0].size, PAGE_SIZE);
++ for (i = 1; i < e820.nr_map; i++) {
++ struct e820entry *ei = &e820.map[i];
++
++ if (paddr < ei->addr)
++ e820_mark_nosave_range(paddr,
++ round_up(ei->addr, PAGE_SIZE));
++
++ paddr = round_down(ei->addr + ei->size, PAGE_SIZE);
++ if (ei->type != E820_RAM)
++ e820_mark_nosave_range(round_up(ei->addr, PAGE_SIZE),
++ paddr);
++
++ if (paddr >= (end_pfn << PAGE_SHIFT))
++ break;
++ }
++}
++#endif
++
++/* Walk the e820 map and register active regions within a node */
++void __init
++e820_register_active_regions(int nid, unsigned long start_pfn,
++ unsigned long end_pfn)
++{
++ int i;
++ unsigned long ei_startpfn, ei_endpfn;
++ for (i = 0; i < e820.nr_map; i++) {
++ struct e820entry *ei = &e820.map[i];
++ ei_startpfn = round_up(ei->addr, PAGE_SIZE) >> PAGE_SHIFT;
++ ei_endpfn = round_down(ei->addr + ei->size, PAGE_SIZE)
++ >> PAGE_SHIFT;
++
++ /* Skip map entries smaller than a page */
++ if (ei_startpfn >= ei_endpfn)
++ continue;
++
++ /* Check if end_pfn_map should be updated */
++ if (ei->type != E820_RAM && ei_endpfn > end_pfn_map)
++ end_pfn_map = ei_endpfn;
++
++ /* Skip if map is outside the node */
++ if (ei->type != E820_RAM ||
++ ei_endpfn <= start_pfn ||
++ ei_startpfn >= end_pfn)
++ continue;
++
++ /* Check for overlaps */
++ if (ei_startpfn < start_pfn)
++ ei_startpfn = start_pfn;
++ if (ei_endpfn > end_pfn)
++ ei_endpfn = end_pfn;
++
++ /* Obey end_user_pfn to save on memmap */
++ if (ei_startpfn >= end_user_pfn)
++ continue;
++ if (ei_endpfn > end_user_pfn)
++ ei_endpfn = end_user_pfn;
++
++ add_active_range(nid, ei_startpfn, ei_endpfn);
++ }
++}
++
+ /*
+ * Add a memory region to the kernel e820 map.
+ */
+@@ -544,13 +557,6 @@ static int __init sanitize_e820_map(stru
+ * If we're lucky and live on a modern system, the setup code
+ * will have given us a memory map that we can use to properly
+ * set up memory. If we aren't, we'll fake a memory map.
+- *
+- * We check to see that the memory map contains at least 2 elements
+- * before we'll use it, because the detection code in setup.S may
+- * not be perfect and most every PC known to man has two memory
+- * regions: one from 0 to 640k, and one from 1mb up. (The IBM
+- * thinkpad 560x, for example, does not cooperate with the memory
+- * detection code.)
+ */
+ static int __init copy_e820_map(struct e820entry * biosmap, int nr_map)
+ {
+@@ -572,37 +578,20 @@ static int __init copy_e820_map(struct e
+ if (start > end)
+ return -1;
+
+-#ifndef CONFIG_XEN
+- /*
+- * Some BIOSes claim RAM in the 640k - 1M region.
+- * Not right. Fix it up.
+- *
+- * This should be removed on Hammer which is supposed to not
+- * have non e820 covered ISA mappings there, but I had some strange
+- * problems so it stays for now. -AK
+- */
+- if (type == E820_RAM) {
+- if (start < 0x100000ULL && end > 0xA0000ULL) {
+- if (start < 0xA0000ULL)
+- add_memory_region(start, 0xA0000ULL-start, type);
+- if (end <= 0x100000ULL)
+- continue;
+- start = 0x100000ULL;
+- size = end - start;
+- }
+- }
+-#endif
+-
+ add_memory_region(start, size, type);
+ } while (biosmap++,--nr_map);
+ return 0;
+ }
+
++void early_panic(char *msg)
++{
++ early_printk(msg);
++ panic(msg);
++}
++
+ #ifndef CONFIG_XEN
+ void __init setup_memory_region(void)
+ {
+- char *who = "BIOS-e820";
+-
+ /*
+ * Try to copy the BIOS-supplied E820-map.
+ *
+@@ -610,24 +599,10 @@ void __init setup_memory_region(void)
+ * the next section from 1mb->appropriate_mem_k
+ */
+ sanitize_e820_map(E820_MAP, &E820_MAP_NR);
+- if (copy_e820_map(E820_MAP, E820_MAP_NR) < 0) {
+- unsigned long mem_size;
+-
+- /* compare results from other methods and take the greater */
+- if (ALT_MEM_K < EXT_MEM_K) {
+- mem_size = EXT_MEM_K;
+- who = "BIOS-88";
+- } else {
+- mem_size = ALT_MEM_K;
+- who = "BIOS-e801";
+- }
+-
+- e820.nr_map = 0;
+- add_memory_region(0, LOWMEMSIZE(), E820_RAM);
+- add_memory_region(HIGH_MEMORY, mem_size << 10, E820_RAM);
+- }
++ if (copy_e820_map(E820_MAP, E820_MAP_NR) < 0)
++ early_panic("Cannot find a valid memory map");
+ printk(KERN_INFO "BIOS-provided physical RAM map:\n");
+- e820_print_map(who);
++ e820_print_map("BIOS-e820");
+ }
+
+ #else /* CONFIG_XEN */
+@@ -659,20 +634,23 @@ void __init setup_memory_region(void)
+
+ sanitize_e820_map(map, (char *)&memmap.nr_entries);
+
+- BUG_ON(copy_e820_map(map, (char)memmap.nr_entries) < 0);
++ if (copy_e820_map(map, (char)memmap.nr_entries) < 0)
++ early_panic("Cannot find a valid memory map");
+
+ printk(KERN_INFO "BIOS-provided physical RAM map:\n");
+ e820_print_map("Xen");
+ }
+ #endif
+
+-void __init parse_memopt(char *p, char **from)
+-{
++static int __init parse_memopt(char *p)
++{
+ int i;
+ unsigned long current_end;
+ unsigned long end;
+
+- end_user_pfn = memparse(p, from);
++ if (!p)
++ return -EINVAL;
++ end_user_pfn = memparse(p, &p);
+ end_user_pfn >>= PAGE_SHIFT;
+
+ end = end_user_pfn<<PAGE_SHIFT;
+@@ -689,27 +667,61 @@ void __init parse_memopt(char *p, char *
+ else
+ add_memory_region(current_end, end - current_end, E820_RAM);
+ }
++
++ return 0;
+ }
++early_param("mem", parse_memopt);
++
++static int userdef __initdata;
+
+-void __init parse_memmapopt(char *p, char **from)
++static int __init parse_memmap_opt(char *p)
+ {
++ char *oldp;
+ unsigned long long start_at, mem_size;
+
+- mem_size = memparse(p, from);
+- p = *from;
++ if (!strcmp(p, "exactmap")) {
++#ifdef CONFIG_CRASH_DUMP
++ /* If we are doing a crash dump, we
++ * still need to know the real mem
++ * size before original memory map is
++ * reset.
++ */
++ e820_register_active_regions(0, 0, -1UL);
++ saved_max_pfn = e820_end_of_ram();
++ remove_all_active_ranges();
++#endif
++ end_pfn_map = 0;
++ e820.nr_map = 0;
++ userdef = 1;
++ return 0;
++ }
++
++ oldp = p;
++ mem_size = memparse(p, &p);
++ if (p == oldp)
++ return -EINVAL;
+ if (*p == '@') {
+- start_at = memparse(p+1, from);
++ start_at = memparse(p+1, &p);
+ add_memory_region(start_at, mem_size, E820_RAM);
+ } else if (*p == '#') {
+- start_at = memparse(p+1, from);
++ start_at = memparse(p+1, &p);
+ add_memory_region(start_at, mem_size, E820_ACPI);
+ } else if (*p == '$') {
+- start_at = memparse(p+1, from);
++ start_at = memparse(p+1, &p);
+ add_memory_region(start_at, mem_size, E820_RESERVED);
+ } else {
+ end_user_pfn = (mem_size >> PAGE_SHIFT);
+ }
+- p = *from;
++ return *p == '\0' ? 0 : -EINVAL;
++}
++early_param("memmap", parse_memmap_opt);
++
++void finish_e820_parsing(void)
++{
++ if (userdef) {
++ printk(KERN_INFO "user-defined physical RAM map:\n");
++ e820_print_map("user");
++ }
+ }
+
+ unsigned long pci_mem_start = 0xaeedbabe;
+Index: 10.3-2007-11-26/arch/x86_64/kernel/early_printk-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/kernel/early_printk-xen.c 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/arch/x86_64/kernel/early_printk-xen.c 2007-10-22 13:53:08.000000000 +0200
+@@ -244,20 +244,16 @@ void early_printk(const char *fmt, ...)
+
+ static int __initdata keep_early;
+
+-int __init setup_early_printk(char *opt)
++static int __init setup_early_printk(char *buf)
+ {
+- char *space;
+- char buf[256];
++ if (!buf)
++ return 0;
+
+ if (early_console_initialized)
+- return 1;
+-
+- strlcpy(buf,opt,sizeof(buf));
+- space = strchr(buf, ' ');
+- if (space)
+- *space = 0;
++ return 0;
++ early_console_initialized = 1;
+
+- if (strstr(buf,"keep"))
++ if (strstr(buf, "keep"))
+ keep_early = 1;
+
+ if (!strncmp(buf, "serial", 6)) {
+@@ -281,11 +277,12 @@ int __init setup_early_printk(char *opt)
+ early_console = &simnow_console;
+ keep_early = 1;
+ }
+- early_console_initialized = 1;
+ register_console(early_console);
+ return 0;
+ }
+
++early_param("earlyprintk", setup_early_printk);
++
+ void __init disable_early_printk(void)
+ {
+ if (!early_console_initialized || !early_console)
+@@ -299,4 +296,3 @@ void __init disable_early_printk(void)
+ }
+ }
+
+-__setup("earlyprintk=", setup_early_printk);
+Index: 10.3-2007-11-26/arch/x86_64/kernel/entry-xen.S
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/kernel/entry-xen.S 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/arch/x86_64/kernel/entry-xen.S 2007-10-22 13:53:08.000000000 +0200
+@@ -4,9 +4,6 @@
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
+ * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
+- *
+- * $Id$
+- *
+ * Jun Nakajima <jun.nakajima@intel.com>
+ * Asit Mallick <asit.k.mallick@intel.com>
+ * Modified for Xen
+@@ -26,15 +23,25 @@
+ * at the top of the kernel process stack.
+ * - partial stack frame: partially saved registers upto R11.
+ * - full stack frame: Like partial stack frame, but all register saved.
+- *
+- * TODO:
+- * - schedule it carefully for the final hardware.
++ *
++ * Some macro usage:
++ * - CFI macros are used to generate dwarf2 unwind information for better
++ * backtraces. They don't change any code.
++ * - SAVE_ALL/RESTORE_ALL - Save/restore all registers
++ * - SAVE_ARGS/RESTORE_ARGS - Save/restore registers that C functions modify.
++ * There are unfortunately lots of special cases where some registers
++ * not touched. The macro is a big mess that should be cleaned up.
++ * - SAVE_REST/RESTORE_REST - Handle the registers not saved by SAVE_ARGS.
++ * Gives a full stack frame.
++ * - ENTRY/END Define functions in the symbol table.
++ * - FIXUP_TOP_OF_STACK/RESTORE_TOP_OF_STACK - Fix up the hardware stack
++ * frame that is otherwise undefined after a SYSCALL
++ * - TRACE_IRQ_* - Trace hard interrupt state for lock debugging.
++ * - errorentry/paranoidentry/zeroentry - Define exception entry points.
+ */
+
+-#define ASSEMBLY 1
+ #include <linux/linkage.h>
+ #include <asm/segment.h>
+-#include <asm/smp.h>
+ #include <asm/cache.h>
+ #include <asm/errno.h>
+ #include <asm/dwarf2.h>
+@@ -119,6 +126,7 @@ NMI_MASK = 0x80000000
+ .macro CFI_DEFAULT_STACK start=1,adj=0
+ .if \start
+ CFI_STARTPROC simple
++ CFI_SIGNAL_FRAME
+ CFI_DEF_CFA rsp,SS+8-(\adj*ARGOFFSET)
+ .else
+ CFI_DEF_CFA_OFFSET SS+8-(\adj*ARGOFFSET)
+@@ -180,6 +188,10 @@ NMI_MASK = 0x80000000
+ /* rdi: prev */
+ ENTRY(ret_from_fork)
+ CFI_DEFAULT_STACK
++ push kernel_eflags(%rip)
++ CFI_ADJUST_CFA_OFFSET 4
++ popf # reset kernel eflags
++ CFI_ADJUST_CFA_OFFSET -4
+ call schedule_tail
+ GET_THREAD_INFO(%rcx)
+ testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%rcx)
+@@ -205,6 +217,7 @@ END(ret_from_fork)
+ */
+ .macro _frame ref
+ CFI_STARTPROC simple
++ CFI_SIGNAL_FRAME
+ CFI_DEF_CFA rsp,SS+8-\ref
+ /*CFI_REL_OFFSET ss,SS-\ref*/
+ CFI_REL_OFFSET rsp,RSP-\ref
+@@ -337,6 +350,8 @@ tracesys:
+ LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */
+ RESTORE_REST
+ cmpq $__NR_syscall_max,%rax
++ movq $-ENOSYS,%rcx
++ cmova %rcx,%rax
+ ja 1f
+ movq %r10,%rcx /* fixup for C */
+ call *sys_call_table(,%rax,8)
+@@ -352,6 +367,7 @@ END(system_call)
+ */
+ ENTRY(int_ret_from_sys_call)
+ CFI_STARTPROC simple
++ CFI_SIGNAL_FRAME
+ CFI_DEF_CFA rsp,SS+8-ARGOFFSET
+ /*CFI_REL_OFFSET ss,SS-ARGOFFSET*/
+ CFI_REL_OFFSET rsp,RSP-ARGOFFSET
+@@ -586,8 +602,7 @@ retint_signal:
+ #ifdef CONFIG_PREEMPT
+ /* Returning to kernel space. Check if we need preemption */
+ /* rcx: threadinfo. interrupts off. */
+- .p2align
+-retint_kernel:
++ENTRY(retint_kernel)
+ cmpl $0,threadinfo_preempt_count(%rcx)
+ jnz retint_restore_args
+ bt $TIF_NEED_RESCHED,threadinfo_flags(%rcx)
+@@ -647,7 +662,6 @@ ENTRY(call_function_interrupt)
+ END(call_function_interrupt)
+ #endif
+
+-#ifdef CONFIG_X86_LOCAL_APIC
+ ENTRY(apic_timer_interrupt)
+ apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt
+ END(apic_timer_interrupt)
+@@ -659,7 +673,6 @@ END(error_interrupt)
+ ENTRY(spurious_interrupt)
+ apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt
+ END(spurious_interrupt)
+-#endif
+ #endif /* !CONFIG_XEN */
+
+ /*
+@@ -758,7 +771,9 @@ paranoid_exit\trace:
+ testl $3,CS(%rsp)
+ jnz paranoid_userspace\trace
+ paranoid_swapgs\trace:
++ .if \trace
+ TRACE_IRQS_IRETQ 0
++ .endif
+ swapgs
+ paranoid_restore\trace:
+ RESTORE_ALL 8
+@@ -805,7 +820,7 @@ paranoid_schedule\trace:
+ * Exception entry point. This expects an error code/orig_rax on the stack
+ * and the exception handler in %rax.
+ */
+-ENTRY(error_entry)
++KPROBE_ENTRY(error_entry)
+ _frame RDI
+ CFI_REL_OFFSET rax,0
+ /* rdi slot contains rax, oldrax contains error code */
+@@ -899,7 +914,7 @@ error_kernelspace:
+ jmp error_sti
+ #endif
+ CFI_ENDPROC
+-END(error_entry)
++KPROBE_END(error_entry)
+
+ ENTRY(hypervisor_callback)
+ zeroentry do_hypervisor_callback
+@@ -939,26 +954,6 @@ ENTRY(do_hypervisor_callback) # do_hyp
+ CFI_ENDPROC
+ END(do_hypervisor_callback)
+
+-#ifdef CONFIG_X86_LOCAL_APIC
+-KPROBE_ENTRY(nmi)
+- zeroentry do_nmi_callback
+-ENTRY(do_nmi_callback)
+- CFI_STARTPROC
+- addq $8, %rsp
+- CFI_ENDPROC
+- CFI_DEFAULT_STACK
+- call do_nmi
+- orl $NMI_MASK,EFLAGS(%rsp)
+- RESTORE_REST
+- XEN_BLOCK_EVENTS(%rsi)
+- TRACE_IRQS_OFF
+- GET_THREAD_INFO(%rcx)
+- jmp retint_restore_args
+- CFI_ENDPROC
+- .previous .text
+-END(nmi)
+-#endif
+-
+ ALIGN
+ restore_all_enable_events:
+ CFI_DEFAULT_STACK adj=1
+@@ -1124,7 +1119,7 @@ ENDPROC(child_rip)
+ * do_sys_execve asm fallback arguments:
+ * rdi: name, rsi: argv, rdx: envp, fake frame on the stack
+ */
+-ENTRY(execve)
++ENTRY(kernel_execve)
+ CFI_STARTPROC
+ FAKE_STACK_FRAME $0
+ SAVE_ALL
+@@ -1138,12 +1133,11 @@ ENTRY(execve)
+ UNFAKE_STACK_FRAME
+ ret
+ CFI_ENDPROC
+-ENDPROC(execve)
++ENDPROC(kernel_execve)
+
+ KPROBE_ENTRY(page_fault)
+ errorentry do_page_fault
+-END(page_fault)
+- .previous .text
++KPROBE_END(page_fault)
+
+ ENTRY(coprocessor_error)
+ zeroentry do_coprocessor_error
+@@ -1165,25 +1159,25 @@ KPROBE_ENTRY(debug)
+ zeroentry do_debug
+ /* paranoidexit
+ CFI_ENDPROC */
+-END(debug)
+- .previous .text
++KPROBE_END(debug)
+
+-#if 0
+- /* runs on exception stack */
+ KPROBE_ENTRY(nmi)
+- INTR_FRAME
+- pushq $-1
+- CFI_ADJUST_CFA_OFFSET 8
+- paranoidentry do_nmi, 0, 0
+-#ifdef CONFIG_TRACE_IRQFLAGS
+- paranoidexit 0
+-#else
+- jmp paranoid_exit1
+- CFI_ENDPROC
+-#endif
+-END(nmi)
+- .previous .text
+-#endif
++ zeroentry do_nmi_callback
++KPROBE_END(nmi)
++do_nmi_callback:
++ CFI_STARTPROC
++ addq $8, %rsp
++ CFI_ENDPROC
++ CFI_DEFAULT_STACK
++ call do_nmi
++ orl $NMI_MASK,EFLAGS(%rsp)
++ RESTORE_REST
++ XEN_BLOCK_EVENTS(%rsi)
++ TRACE_IRQS_OFF
++ GET_THREAD_INFO(%rcx)
++ jmp retint_restore_args
++ CFI_ENDPROC
++END(do_nmi_callback)
+
+ KPROBE_ENTRY(int3)
+ /* INTR_FRAME
+@@ -1192,8 +1186,7 @@ KPROBE_ENTRY(int3)
+ zeroentry do_int3
+ /* jmp paranoid_exit1
+ CFI_ENDPROC */
+-END(int3)
+- .previous .text
++KPROBE_END(int3)
+
+ ENTRY(overflow)
+ zeroentry do_overflow
+@@ -1244,8 +1237,7 @@ END(stack_segment)
+
+ KPROBE_ENTRY(general_protection)
+ errorentry do_general_protection
+-END(general_protection)
+- .previous .text
++KPROBE_END(general_protection)
+
+ ENTRY(alignment_check)
+ errorentry do_alignment_check
+Index: 10.3-2007-11-26/arch/x86_64/kernel/genapic_xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/kernel/genapic_xen.c 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/arch/x86_64/kernel/genapic_xen.c 2007-10-22 13:53:08.000000000 +0200
+@@ -71,6 +71,13 @@ static cpumask_t xen_target_cpus(void)
+ return cpu_online_map;
+ }
+
++static cpumask_t xen_vector_allocation_domain(int cpu)
++{
++ cpumask_t domain = CPU_MASK_NONE;
++ cpu_set(cpu, domain);
++ return domain;
++}
++
+ /*
+ * Set up the logical destination ID.
+ * Do nothing, not called now.
+@@ -147,8 +154,8 @@ struct genapic apic_xen = {
+ .int_delivery_mode = dest_LowestPrio,
+ #endif
+ .int_dest_mode = (APIC_DEST_LOGICAL != 0),
+- .int_delivery_dest = APIC_DEST_LOGICAL | APIC_DM_LOWEST,
+ .target_cpus = xen_target_cpus,
++ .vector_allocation_domain = xen_vector_allocation_domain,
+ #ifdef CONFIG_XEN_PRIVILEGED_GUEST
+ .apic_id_registered = xen_apic_id_registered,
+ #endif
+Index: 10.3-2007-11-26/arch/x86_64/kernel/head-xen.S
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/kernel/head-xen.S 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/arch/x86_64/kernel/head-xen.S 2007-10-22 13:53:08.000000000 +0200
+@@ -5,9 +5,6 @@
+ * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
+ * Copyright (C) 2000 Karsten Keil <kkeil@suse.de>
+ * Copyright (C) 2001,2002 Andi Kleen <ak@suse.de>
+- *
+- * $Id: head.S,v 1.49 2002/03/19 17:39:25 ak Exp $
+- *
+ * Jun Nakajima <jun.nakajima@intel.com>
+ * Modified for Xen
+ */
+@@ -138,7 +135,7 @@ ENTRY(cpu_gdt_table)
+ .quad 0,0 /* TSS */
+ .quad 0,0 /* LDT */
+ .quad 0,0,0 /* three TLS descriptors */
+- .quad 0 /* unused */
++ .quad 0x0000f40000000000 /* node/CPU stored in limit */
+ gdt_end:
+ /* asm/segment.h:GDT_ENTRIES must match this */
+ /* This should be a multiple of the cache line size */
+Index: 10.3-2007-11-26/arch/x86_64/kernel/head64-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/kernel/head64-xen.c 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/arch/x86_64/kernel/head64-xen.c 2007-10-22 13:53:08.000000000 +0200
+@@ -54,11 +54,9 @@ static void __init copy_bootdata(char *r
+ new_data = *(int *) (x86_boot_params + NEW_CL_POINTER);
+ if (!new_data) {
+ if (OLD_CL_MAGIC != * (u16 *) OLD_CL_MAGIC_ADDR) {
+- printk("so old bootloader that it does not support commandline?!\n");
+ return;
+ }
+ new_data = OLD_CL_BASE_ADDR + * (u16 *) OLD_CL_OFFSET;
+- printk("old bootloader convention, maybe loadlin?\n");
+ }
+ command_line = (char *) ((u64)(new_data));
+ memcpy(saved_command_line, command_line, COMMAND_LINE_SIZE);
+@@ -70,25 +68,6 @@ static void __init copy_bootdata(char *r
+ memcpy(saved_command_line, xen_start_info->cmd_line, max_cmdline);
+ saved_command_line[max_cmdline-1] = '\0';
+ #endif
+- printk("Bootdata ok (command line is %s)\n", saved_command_line);
+-}
+-
+-static void __init setup_boot_cpu_data(void)
+-{
+- unsigned int dummy, eax;
+-
+- /* get vendor info */
+- cpuid(0, (unsigned int *)&boot_cpu_data.cpuid_level,
+- (unsigned int *)&boot_cpu_data.x86_vendor_id[0],
+- (unsigned int *)&boot_cpu_data.x86_vendor_id[8],
+- (unsigned int *)&boot_cpu_data.x86_vendor_id[4]);
+-
+- /* get cpu type */
+- cpuid(1, &eax, &dummy, &dummy,
+- (unsigned int *) &boot_cpu_data.x86_capability);
+- boot_cpu_data.x86 = (eax >> 8) & 0xf;
+- boot_cpu_data.x86_model = (eax >> 4) & 0xf;
+- boot_cpu_data.x86_mask = eax & 0xf;
+ }
+
+ #include <xen/interface/memory.h>
+@@ -101,7 +80,6 @@ void __init x86_64_start_kernel(char * r
+ {
+ struct xen_machphys_mapping mapping;
+ unsigned long machine_to_phys_nr_ents;
+- char *s;
+ int i;
+
+ setup_xen_features();
+@@ -128,10 +106,7 @@ void __init x86_64_start_kernel(char * r
+ asm volatile("lidt %0" :: "m" (idt_descr));
+ #endif
+
+- /*
+- * This must be called really, really early:
+- */
+- lockdep_init();
++ early_printk("Kernel alive\n");
+
+ for (i = 0; i < NR_CPUS; i++)
+ cpu_pda(i) = &boot_cpu_pda[i];
+@@ -141,22 +116,5 @@ void __init x86_64_start_kernel(char * r
+ #ifdef CONFIG_SMP
+ cpu_set(0, cpu_online_map);
+ #endif
+- s = strstr(saved_command_line, "earlyprintk=");
+- if (s != NULL)
+- setup_early_printk(strchr(s, '=') + 1);
+-#ifdef CONFIG_NUMA
+- s = strstr(saved_command_line, "numa=");
+- if (s != NULL)
+- numa_setup(s+5);
+-#endif
+-#ifdef CONFIG_X86_IO_APIC
+- if (strstr(saved_command_line, "disableapic"))
+- disable_apic = 1;
+-#endif
+- /* You need early console to see that */
+- if (__pa_symbol(&_end) >= KERNEL_TEXT_SIZE)
+- panic("Kernel too big for kernel mapping\n");
+-
+- setup_boot_cpu_data();
+ start_kernel();
+ }
+Index: 10.3-2007-11-26/arch/x86_64/kernel/io_apic-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/kernel/io_apic-xen.c 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/arch/x86_64/kernel/io_apic-xen.c 2007-10-22 13:53:08.000000000 +0200
+@@ -26,9 +26,12 @@
+ #include <linux/delay.h>
+ #include <linux/sched.h>
+ #include <linux/smp_lock.h>
++#include <linux/pci.h>
+ #include <linux/mc146818rtc.h>
+ #include <linux/acpi.h>
+ #include <linux/sysdev.h>
++#include <linux/msi.h>
++#include <linux/htirq.h>
+ #ifdef CONFIG_ACPI
+ #include <acpi/acpi_bus.h>
+ #endif
+@@ -41,6 +44,10 @@
+ #include <asm/acpi.h>
+ #include <asm/dma.h>
+ #include <asm/nmi.h>
++#include <asm/msidef.h>
++#include <asm/hypertransport.h>
++
++static int assign_irq_vector(int irq, cpumask_t mask, cpumask_t *result);
+
+ #define __apicdebuginit __init
+
+@@ -48,17 +55,29 @@ int sis_apic_bug; /* not actually suppor
+
+ static int no_timer_check;
+
+-int disable_timer_pin_1 __initdata;
++static int disable_timer_pin_1 __initdata;
+
+-#ifndef CONFIG_XEN
+-int timer_over_8254 __initdata = 0;
++#ifdef CONFIG_XEN
++#include <xen/interface/xen.h>
++#include <xen/interface/physdev.h>
++
++/* Fake i8259 */
++#define make_8259A_irq(_irq) (io_apic_irqs &= ~(1UL<<(_irq)))
++#define disable_8259A_irq(_irq) ((void)0)
++#define i8259A_irq_pending(_irq) (0)
++
++unsigned long io_apic_irqs;
++
++#define clear_IO_APIC() ((void)0)
++#else
++int timer_over_8254 __initdata = 1;
+
+ /* Where if anywhere is the i8259 connect in external int mode */
+ static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
+ #endif
+
+ static DEFINE_SPINLOCK(ioapic_lock);
+-static DEFINE_SPINLOCK(vector_lock);
++DEFINE_SPINLOCK(vector_lock);
+
+ /*
+ * # of IRQ routing registers
+@@ -83,28 +102,27 @@ static struct irq_pin_list {
+ short apic, pin, next;
+ } irq_2_pin[PIN_MAP_SIZE];
+
+-int vector_irq[NR_VECTORS] __read_mostly = { [0 ... NR_VECTORS - 1] = -1};
+-#ifdef CONFIG_PCI_MSI
+-#define vector_to_irq(vector) \
+- (platform_legacy_irq(vector) ? vector : vector_irq[vector])
+-#else
+-#define vector_to_irq(vector) (vector)
+-#endif
+-
+-#ifdef CONFIG_XEN
+-
+-#include <xen/interface/xen.h>
+-#include <xen/interface/physdev.h>
+-
+-/* Fake i8259 */
+-#define make_8259A_irq(_irq) (io_apic_irqs &= ~(1UL<<(_irq)))
+-#define disable_8259A_irq(_irq) ((void)0)
+-#define i8259A_irq_pending(_irq) (0)
++#ifndef CONFIG_XEN
++struct io_apic {
++ unsigned int index;
++ unsigned int unused[3];
++ unsigned int data;
++};
+
+-unsigned long io_apic_irqs;
++static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx)
++{
++ return (void __iomem *) __fix_to_virt(FIX_IO_APIC_BASE_0 + idx)
++ + (mp_ioapics[idx].mpc_apicaddr & ~PAGE_MASK);
++}
++#endif
+
+-static inline unsigned int xen_io_apic_read(unsigned int apic, unsigned int reg)
++static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg)
+ {
++#ifndef CONFIG_XEN
++ struct io_apic __iomem *io_apic = io_apic_base(apic);
++ writel(reg, &io_apic->index);
++ return readl(&io_apic->data);
++#else
+ struct physdev_apic apic_op;
+ int ret;
+
+@@ -114,31 +132,131 @@ static inline unsigned int xen_io_apic_r
+ if (ret)
+ return ret;
+ return apic_op.value;
++#endif
+ }
+
+-static inline void xen_io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
++static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
+ {
++#ifndef CONFIG_XEN
++ struct io_apic __iomem *io_apic = io_apic_base(apic);
++ writel(reg, &io_apic->index);
++ writel(value, &io_apic->data);
++#else
+ struct physdev_apic apic_op;
+
+ apic_op.apic_physbase = mp_ioapics[apic].mpc_apicaddr;
+ apic_op.reg = reg;
+ apic_op.value = value;
+ HYPERVISOR_physdev_op(PHYSDEVOP_apic_write, &apic_op);
++#endif
++}
++
++#ifndef CONFIG_XEN
++/*
++ * Re-write a value: to be used for read-modify-write
++ * cycles where the read already set up the index register.
++ */
++static inline void io_apic_modify(unsigned int apic, unsigned int value)
++{
++ struct io_apic __iomem *io_apic = io_apic_base(apic);
++ writel(value, &io_apic->data);
+ }
++#else
++#define io_apic_modify io_apic_write
++#endif
+
+-#define io_apic_read(a,r) xen_io_apic_read(a,r)
+-#define io_apic_write(a,r,v) xen_io_apic_write(a,r,v)
++/*
++ * Synchronize the IO-APIC and the CPU by doing
++ * a dummy read from the IO-APIC
++ */
++static inline void io_apic_sync(unsigned int apic)
++{
++#ifndef CONFIG_XEN
++ struct io_apic __iomem *io_apic = io_apic_base(apic);
++ readl(&io_apic->data);
++#endif
++}
+
+-#define clear_IO_APIC() ((void)0)
++union entry_union {
++ struct { u32 w1, w2; };
++ struct IO_APIC_route_entry entry;
++};
+
+-#else
++static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin)
++{
++ union entry_union eu;
++ unsigned long flags;
++ spin_lock_irqsave(&ioapic_lock, flags);
++ eu.w1 = io_apic_read(apic, 0x10 + 2 * pin);
++ eu.w2 = io_apic_read(apic, 0x11 + 2 * pin);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++ return eu.entry;
++}
++
++/*
++ * When we write a new IO APIC routing entry, we need to write the high
++ * word first! If the mask bit in the low word is clear, we will enable
++ * the interrupt, and we need to make sure the entry is fully populated
++ * before that happens.
++ */
++static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
++{
++ unsigned long flags;
++ union entry_union eu;
++ eu.entry = e;
++ spin_lock_irqsave(&ioapic_lock, flags);
++ io_apic_write(apic, 0x11 + 2*pin, eu.w2);
++ io_apic_write(apic, 0x10 + 2*pin, eu.w1);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++}
++
++#ifndef CONFIG_XEN
++/*
++ * When we mask an IO APIC routing entry, we need to write the low
++ * word first, in order to set the mask bit before we change the
++ * high bits!
++ */
++static void ioapic_mask_entry(int apic, int pin)
++{
++ unsigned long flags;
++ union entry_union eu = { .entry.mask = 1 };
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ io_apic_write(apic, 0x10 + 2*pin, eu.w1);
++ io_apic_write(apic, 0x11 + 2*pin, eu.w2);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++}
+
+ #ifdef CONFIG_SMP
++static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, u8 vector)
++{
++ int apic, pin;
++ struct irq_pin_list *entry = irq_2_pin + irq;
++
++ BUG_ON(irq >= NR_IRQS);
++ for (;;) {
++ unsigned int reg;
++ apic = entry->apic;
++ pin = entry->pin;
++ if (pin == -1)
++ break;
++ io_apic_write(apic, 0x11 + pin*2, dest);
++ reg = io_apic_read(apic, 0x10 + pin*2);
++ reg &= ~0x000000ff;
++ reg |= vector;
++ io_apic_modify(apic, reg);
++ if (!entry->next)
++ break;
++ entry = irq_2_pin + entry->next;
++ }
++}
++
+ static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
+ {
+ unsigned long flags;
+ unsigned int dest;
+ cpumask_t tmp;
++ int vector;
+
+ cpus_and(tmp, mask, cpu_online_map);
+ if (cpus_empty(tmp))
+@@ -146,7 +264,11 @@ static void set_ioapic_affinity_irq(unsi
+
+ cpus_and(mask, tmp, CPU_MASK_ALL);
+
+- dest = cpu_mask_to_apicid(mask);
++ vector = assign_irq_vector(irq, mask, &tmp);
++ if (vector < 0)
++ return;
++
++ dest = cpu_mask_to_apicid(tmp);
+
+ /*
+ * Only the high 8 bits are valid.
+@@ -154,13 +276,12 @@ static void set_ioapic_affinity_irq(unsi
+ dest = SET_APIC_LOGICAL_ID(dest);
+
+ spin_lock_irqsave(&ioapic_lock, flags);
+- __DO_ACTION(1, = dest, )
+- set_irq_info(irq, mask);
++ __target_IO_APIC_irq(irq, dest, vector);
++ set_native_irq_info(irq, mask);
+ spin_unlock_irqrestore(&ioapic_lock, flags);
+ }
+ #endif
+-
+-#endif /* !CONFIG_XEN */
++#endif
+
+ /*
+ * The common case is 1:1 IRQ<->pin mappings. Sometimes there are
+@@ -240,24 +361,15 @@ static void unmask_IO_APIC_irq (unsigned
+ static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
+ {
+ struct IO_APIC_route_entry entry;
+- unsigned long flags;
+
+ /* Check delivery_mode to be sure we're not clearing an SMI pin */
+- spin_lock_irqsave(&ioapic_lock, flags);
+- *(((int*)&entry) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
+- *(((int*)&entry) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
+- spin_unlock_irqrestore(&ioapic_lock, flags);
++ entry = ioapic_read_entry(apic, pin);
+ if (entry.delivery_mode == dest_SMI)
+ return;
+ /*
+ * Disable it in the IO-APIC irq-routing table:
+ */
+- memset(&entry, 0, sizeof(entry));
+- entry.mask = 1;
+- spin_lock_irqsave(&ioapic_lock, flags);
+- io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry) + 0));
+- io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry) + 1));
+- spin_unlock_irqrestore(&ioapic_lock, flags);
++ ioapic_mask_entry(apic, pin);
+ }
+
+ static void clear_IO_APIC (void)
+@@ -271,16 +383,6 @@ static void clear_IO_APIC (void)
+
+ #endif /* !CONFIG_XEN */
+
+-static u8 gsi_2_irq[NR_IRQ_VECTORS] = { [0 ... NR_IRQ_VECTORS-1] = 0xFF };
+-
+-/*
+- * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to
+- * specific CPU-side IRQs.
+- */
+-
+-#define MAX_PIRQS 8
+-static int pirq_entries [MAX_PIRQS];
+-static int pirqs_enabled;
+ int skip_ioapic_setup;
+ int ioapic_force;
+
+@@ -289,18 +391,17 @@ int ioapic_force;
+ static int __init disable_ioapic_setup(char *str)
+ {
+ skip_ioapic_setup = 1;
+- return 1;
++ return 0;
+ }
++early_param("noapic", disable_ioapic_setup);
+
+-static int __init enable_ioapic_setup(char *str)
++/* Actually the next is obsolete, but keep it for paranoid reasons -AK */
++static int __init disable_timer_pin_setup(char *arg)
+ {
+- ioapic_force = 1;
+- skip_ioapic_setup = 0;
++ disable_timer_pin_1 = 1;
+ return 1;
+ }
+-
+-__setup("noapic", disable_ioapic_setup);
+-__setup("apic", enable_ioapic_setup);
++__setup("disable_timer_pin_1", disable_timer_pin_setup);
+
+ #ifndef CONFIG_XEN
+ static int __init setup_disable_8254_timer(char *s)
+@@ -318,137 +419,6 @@ __setup("disable_8254_timer", setup_disa
+ __setup("enable_8254_timer", setup_enable_8254_timer);
+ #endif /* !CONFIG_XEN */
+
+-#include <asm/pci-direct.h>
+-#include <linux/pci_ids.h>
+-#include <linux/pci.h>
+-
+-
+-#ifdef CONFIG_ACPI
+-
+-static int nvidia_hpet_detected __initdata;
+-
+-static int __init nvidia_hpet_check(unsigned long phys, unsigned long size)
+-{
+- nvidia_hpet_detected = 1;
+- return 0;
+-}
+-#endif
+-
+-/* Temporary Hack. Nvidia and VIA boards currently only work with IO-APIC
+- off. Check for an Nvidia or VIA PCI bridge and turn it off.
+- Use pci direct infrastructure because this runs before the PCI subsystem.
+-
+- Can be overwritten with "apic"
+-
+- And another hack to disable the IOMMU on VIA chipsets.
+-
+- ... and others. Really should move this somewhere else.
+-
+- Kludge-O-Rama. */
+-void __init check_ioapic(void)
+-{
+- int num,slot,func;
+- /* Poor man's PCI discovery */
+- for (num = 0; num < 32; num++) {
+- for (slot = 0; slot < 32; slot++) {
+- for (func = 0; func < 8; func++) {
+- u32 class;
+- u32 vendor;
+- u8 type;
+- class = read_pci_config(num,slot,func,
+- PCI_CLASS_REVISION);
+- if (class == 0xffffffff)
+- break;
+-
+- if ((class >> 16) != PCI_CLASS_BRIDGE_PCI)
+- continue;
+-
+- vendor = read_pci_config(num, slot, func,
+- PCI_VENDOR_ID);
+- vendor &= 0xffff;
+- switch (vendor) {
+- case PCI_VENDOR_ID_VIA:
+-#ifdef CONFIG_IOMMU
+- if ((end_pfn > MAX_DMA32_PFN ||
+- force_iommu) &&
+- !iommu_aperture_allowed) {
+- printk(KERN_INFO
+- "Looks like a VIA chipset. Disabling IOMMU. Override with \"iommu=allowed\"\n");
+- iommu_aperture_disabled = 1;
+- }
+-#endif
+- return;
+- case PCI_VENDOR_ID_NVIDIA:
+-#ifdef CONFIG_ACPI
+- /*
+- * All timer overrides on Nvidia are
+- * wrong unless HPET is enabled.
+- */
+- nvidia_hpet_detected = 0;
+- acpi_table_parse(ACPI_HPET,
+- nvidia_hpet_check);
+- if (nvidia_hpet_detected == 0) {
+- acpi_skip_timer_override = 1;
+- printk(KERN_INFO "Nvidia board "
+- "detected. Ignoring ACPI "
+- "timer override.\n");
+- }
+-#endif
+- /* RED-PEN skip them on mptables too? */
+- return;
+- case PCI_VENDOR_ID_ATI:
+-
+- /* This should be actually default, but
+- for 2.6.16 let's do it for ATI only where
+- it's really needed. */
+-#ifndef CONFIG_XEN
+- if (timer_over_8254 == 1) {
+- timer_over_8254 = 0;
+- printk(KERN_INFO
+- "ATI board detected. Disabling timer routing over 8254.\n");
+- }
+-#endif
+- return;
+- }
+-
+-
+- /* No multi-function device? */
+- type = read_pci_config_byte(num,slot,func,
+- PCI_HEADER_TYPE);
+- if (!(type & 0x80))
+- break;
+- }
+- }
+- }
+-}
+-
+-static int __init ioapic_pirq_setup(char *str)
+-{
+- int i, max;
+- int ints[MAX_PIRQS+1];
+-
+- get_options(str, ARRAY_SIZE(ints), ints);
+-
+- for (i = 0; i < MAX_PIRQS; i++)
+- pirq_entries[i] = -1;
+-
+- pirqs_enabled = 1;
+- apic_printk(APIC_VERBOSE, "PIRQ redirection, working around broken MP-BIOS.\n");
+- max = MAX_PIRQS;
+- if (ints[0] < MAX_PIRQS)
+- max = ints[0];
+-
+- for (i = 0; i < max; i++) {
+- apic_printk(APIC_VERBOSE, "... PIRQ%d -> IRQ %d\n", i, ints[i+1]);
+- /*
+- * PIRQs are mapped upside down, usually.
+- */
+- pirq_entries[MAX_PIRQS-i-1] = ints[i+1];
+- }
+- return 1;
+-}
+-
+-__setup("pirq=", ioapic_pirq_setup);
+
+ /*
+ * Find the IRQ entry number of a certain pin.
+@@ -478,9 +448,7 @@ static int __init find_isa_irq_pin(int i
+ for (i = 0; i < mp_irq_entries; i++) {
+ int lbus = mp_irqs[i].mpc_srcbus;
+
+- if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA ||
+- mp_bus_id_to_type[lbus] == MP_BUS_EISA ||
+- mp_bus_id_to_type[lbus] == MP_BUS_MCA) &&
++ if (test_bit(lbus, mp_bus_not_pci) &&
+ (mp_irqs[i].mpc_irqtype == type) &&
+ (mp_irqs[i].mpc_srcbusirq == irq))
+
+@@ -496,9 +464,7 @@ static int __init find_isa_irq_apic(int
+ for (i = 0; i < mp_irq_entries; i++) {
+ int lbus = mp_irqs[i].mpc_srcbus;
+
+- if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA ||
+- mp_bus_id_to_type[lbus] == MP_BUS_EISA ||
+- mp_bus_id_to_type[lbus] == MP_BUS_MCA) &&
++ if (test_bit(lbus, mp_bus_not_pci) &&
+ (mp_irqs[i].mpc_irqtype == type) &&
+ (mp_irqs[i].mpc_srcbusirq == irq))
+ break;
+@@ -539,7 +505,7 @@ int IO_APIC_get_PCI_irq_vector(int bus,
+ mp_irqs[i].mpc_dstapic == MP_APIC_ALL)
+ break;
+
+- if ((mp_bus_id_to_type[lbus] == MP_BUS_PCI) &&
++ if (!test_bit(lbus, mp_bus_not_pci) &&
+ !mp_irqs[i].mpc_irqtype &&
+ (bus == lbus) &&
+ (slot == ((mp_irqs[i].mpc_srcbusirq >> 2) & 0x1f))) {
+@@ -562,27 +528,6 @@ int IO_APIC_get_PCI_irq_vector(int bus,
+ return best_guess;
+ }
+
+-/*
+- * EISA Edge/Level control register, ELCR
+- */
+-static int EISA_ELCR(unsigned int irq)
+-{
+- if (irq < 16) {
+- unsigned int port = 0x4d0 + (irq >> 3);
+- return (inb(port) >> (irq & 7)) & 1;
+- }
+- apic_printk(APIC_VERBOSE, "Broken MPtable reports ISA irq %d\n", irq);
+- return 0;
+-}
+-
+-/* EISA interrupts are always polarity zero and can be edge or level
+- * trigger depending on the ELCR value. If an interrupt is listed as
+- * EISA conforming in the MP table, that means its trigger type must
+- * be read in from the ELCR */
+-
+-#define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].mpc_srcbusirq))
+-#define default_EISA_polarity(idx) (0)
+-
+ /* ISA interrupts are always polarity zero edge triggered,
+ * when listed as conforming in the MP table. */
+
+@@ -595,12 +540,6 @@ static int EISA_ELCR(unsigned int irq)
+ #define default_PCI_trigger(idx) (1)
+ #define default_PCI_polarity(idx) (1)
+
+-/* MCA interrupts are always polarity zero level triggered,
+- * when listed as conforming in the MP table. */
+-
+-#define default_MCA_trigger(idx) (1)
+-#define default_MCA_polarity(idx) (0)
+-
+ static int __init MPBIOS_polarity(int idx)
+ {
+ int bus = mp_irqs[idx].mpc_srcbus;
+@@ -612,38 +551,11 @@ static int __init MPBIOS_polarity(int id
+ switch (mp_irqs[idx].mpc_irqflag & 3)
+ {
+ case 0: /* conforms, ie. bus-type dependent polarity */
+- {
+- switch (mp_bus_id_to_type[bus])
+- {
+- case MP_BUS_ISA: /* ISA pin */
+- {
+- polarity = default_ISA_polarity(idx);
+- break;
+- }
+- case MP_BUS_EISA: /* EISA pin */
+- {
+- polarity = default_EISA_polarity(idx);
+- break;
+- }
+- case MP_BUS_PCI: /* PCI pin */
+- {
+- polarity = default_PCI_polarity(idx);
+- break;
+- }
+- case MP_BUS_MCA: /* MCA pin */
+- {
+- polarity = default_MCA_polarity(idx);
+- break;
+- }
+- default:
+- {
+- printk(KERN_WARNING "broken BIOS!!\n");
+- polarity = 1;
+- break;
+- }
+- }
++ if (test_bit(bus, mp_bus_not_pci))
++ polarity = default_ISA_polarity(idx);
++ else
++ polarity = default_PCI_polarity(idx);
+ break;
+- }
+ case 1: /* high active */
+ {
+ polarity = 0;
+@@ -681,38 +593,11 @@ static int MPBIOS_trigger(int idx)
+ switch ((mp_irqs[idx].mpc_irqflag>>2) & 3)
+ {
+ case 0: /* conforms, ie. bus-type dependent */
+- {
+- switch (mp_bus_id_to_type[bus])
+- {
+- case MP_BUS_ISA: /* ISA pin */
+- {
+- trigger = default_ISA_trigger(idx);
+- break;
+- }
+- case MP_BUS_EISA: /* EISA pin */
+- {
+- trigger = default_EISA_trigger(idx);
+- break;
+- }
+- case MP_BUS_PCI: /* PCI pin */
+- {
+- trigger = default_PCI_trigger(idx);
+- break;
+- }
+- case MP_BUS_MCA: /* MCA pin */
+- {
+- trigger = default_MCA_trigger(idx);
+- break;
+- }
+- default:
+- {
+- printk(KERN_WARNING "broken BIOS!!\n");
+- trigger = 1;
+- break;
+- }
+- }
++ if (test_bit(bus, mp_bus_not_pci))
++ trigger = default_ISA_trigger(idx);
++ else
++ trigger = default_PCI_trigger(idx);
+ break;
+- }
+ case 1: /* edge */
+ {
+ trigger = 0;
+@@ -749,64 +634,6 @@ static inline int irq_trigger(int idx)
+ return MPBIOS_trigger(idx);
+ }
+
+-static int next_irq = 16;
+-
+-/*
+- * gsi_irq_sharing -- Name overload! "irq" can be either a legacy IRQ
+- * in the range 0-15, a linux IRQ in the range 0-223, or a GSI number
+- * from ACPI, which can reach 800 in large boxen.
+- *
+- * Compact the sparse GSI space into a sequential IRQ series and reuse
+- * vectors if possible.
+- */
+-int gsi_irq_sharing(int gsi)
+-{
+- int i, tries, vector;
+-
+- BUG_ON(gsi >= NR_IRQ_VECTORS);
+-
+- if (platform_legacy_irq(gsi))
+- return gsi;
+-
+- if (gsi_2_irq[gsi] != 0xFF)
+- return (int)gsi_2_irq[gsi];
+-
+- tries = NR_IRQS;
+- try_again:
+- vector = assign_irq_vector(gsi);
+-
+- /*
+- * Sharing vectors means sharing IRQs, so scan irq_vectors for previous
+- * use of vector and if found, return that IRQ. However, we never want
+- * to share legacy IRQs, which usually have a different trigger mode
+- * than PCI.
+- */
+- for (i = 0; i < NR_IRQS; i++)
+- if (IO_APIC_VECTOR(i) == vector)
+- break;
+- if (platform_legacy_irq(i)) {
+- if (--tries >= 0) {
+- IO_APIC_VECTOR(i) = 0;
+- goto try_again;
+- }
+- panic("gsi_irq_sharing: didn't find an IRQ using vector 0x%02X for GSI %d", vector, gsi);
+- }
+- if (i < NR_IRQS) {
+- gsi_2_irq[gsi] = i;
+- printk(KERN_INFO "GSI %d sharing vector 0x%02X and IRQ %d\n",
+- gsi, vector, i);
+- return i;
+- }
+-
+- i = next_irq++;
+- BUG_ON(i >= NR_IRQS);
+- gsi_2_irq[gsi] = i;
+- IO_APIC_VECTOR(i) = vector;
+- printk(KERN_INFO "GSI %d assigned vector 0x%02X and IRQ %d\n",
+- gsi, vector, i);
+- return i;
+-}
+-
+ static int pin_2_irq(int idx, int apic, int pin)
+ {
+ int irq, i;
+@@ -818,49 +645,16 @@ static int pin_2_irq(int idx, int apic,
+ if (mp_irqs[idx].mpc_dstirq != pin)
+ printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n");
+
+- switch (mp_bus_id_to_type[bus])
+- {
+- case MP_BUS_ISA: /* ISA pin */
+- case MP_BUS_EISA:
+- case MP_BUS_MCA:
+- {
+- irq = mp_irqs[idx].mpc_srcbusirq;
+- break;
+- }
+- case MP_BUS_PCI: /* PCI pin */
+- {
+- /*
+- * PCI IRQs are mapped in order
+- */
+- i = irq = 0;
+- while (i < apic)
+- irq += nr_ioapic_registers[i++];
+- irq += pin;
+- irq = gsi_irq_sharing(irq);
+- break;
+- }
+- default:
+- {
+- printk(KERN_ERR "unknown bus type %d.\n",bus);
+- irq = 0;
+- break;
+- }
+- }
+- BUG_ON(irq >= NR_IRQS);
+-
+- /*
+- * PCI IRQ command line redirection. Yes, limits are hardcoded.
+- */
+- if ((pin >= 16) && (pin <= 23)) {
+- if (pirq_entries[pin-16] != -1) {
+- if (!pirq_entries[pin-16]) {
+- apic_printk(APIC_VERBOSE, "disabling PIRQ%d\n", pin-16);
+- } else {
+- irq = pirq_entries[pin-16];
+- apic_printk(APIC_VERBOSE, "using PIRQ%d -> IRQ %d\n",
+- pin-16, irq);
+- }
+- }
++ if (test_bit(bus, mp_bus_not_pci)) {
++ irq = mp_irqs[idx].mpc_srcbusirq;
++ } else {
++ /*
++ * PCI IRQs are mapped in order
++ */
++ i = irq = 0;
++ while (i < apic)
++ irq += nr_ioapic_registers[i++];
++ irq += pin;
+ }
+ BUG_ON(irq >= NR_IRQS);
+ return irq;
+@@ -884,43 +678,68 @@ static inline int IO_APIC_irq_trigger(in
+ }
+
+ /* irq_vectors is indexed by the sum of all RTEs in all I/O APICs. */
+-u8 irq_vector[NR_IRQ_VECTORS] __read_mostly;
++static u8 irq_vector[NR_IRQ_VECTORS] __read_mostly;
+
+-int assign_irq_vector(int irq)
++static int __assign_irq_vector(int irq, cpumask_t mask, cpumask_t *result)
+ {
+- unsigned long flags;
+ int vector;
+ struct physdev_irq irq_op;
+
+- BUG_ON(irq != AUTO_ASSIGN && (unsigned)irq >= NR_IRQ_VECTORS);
++ BUG_ON((unsigned)irq >= NR_IRQ_VECTORS);
+
+- spin_lock_irqsave(&vector_lock, flags);
++ cpus_and(*result, mask, cpu_online_map);
+
+- if (irq != AUTO_ASSIGN && IO_APIC_VECTOR(irq) > 0) {
+- spin_unlock_irqrestore(&vector_lock, flags);
+- return IO_APIC_VECTOR(irq);
+- }
++ if (irq_vector[irq] > 0)
++ return irq_vector[irq];
+
+ irq_op.irq = irq;
+- if (HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) {
+- spin_unlock_irqrestore(&vector_lock, flags);
++ if (HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op))
+ return -ENOSPC;
+- }
+
+ vector = irq_op.vector;
+- vector_irq[vector] = irq;
+- if (irq != AUTO_ASSIGN)
+- IO_APIC_VECTOR(irq) = vector;
++ irq_vector[irq] = vector;
+
+- spin_unlock_irqrestore(&vector_lock, flags);
++ return vector;
++}
+
++static int assign_irq_vector(int irq, cpumask_t mask, cpumask_t *result)
++{
++ int vector;
++ unsigned long flags;
++
++ spin_lock_irqsave(&vector_lock, flags);
++ vector = __assign_irq_vector(irq, mask, result);
++ spin_unlock_irqrestore(&vector_lock, flags);
+ return vector;
+ }
+
+-extern void (*interrupt[NR_IRQS])(void);
+ #ifndef CONFIG_XEN
+-static struct hw_interrupt_type ioapic_level_type;
+-static struct hw_interrupt_type ioapic_edge_type;
++void __setup_vector_irq(int cpu)
++{
++ /* Initialize vector_irq on a new cpu */
++ /* This function must be called with vector_lock held */
++ int irq, vector;
++
++ /* Mark the inuse vectors */
++ for (irq = 0; irq < NR_IRQ_VECTORS; ++irq) {
++ if (!cpu_isset(cpu, irq_domain[irq]))
++ continue;
++ vector = irq_vector[irq];
++ per_cpu(vector_irq, cpu)[vector] = irq;
++ }
++ /* Mark the free vectors */
++ for (vector = 0; vector < NR_VECTORS; ++vector) {
++ irq = per_cpu(vector_irq, cpu)[vector];
++ if (irq < 0)
++ continue;
++ if (!cpu_isset(cpu, irq_domain[irq]))
++ per_cpu(vector_irq, cpu)[vector] = -1;
++ }
++}
++
++extern void (*interrupt[NR_IRQS])(void);
++
++static struct irq_chip ioapic_chip;
+
+ #define IOAPIC_AUTO -1
+ #define IOAPIC_EDGE 0
+@@ -928,16 +747,15 @@ static struct hw_interrupt_type ioapic_e
+
+ static void ioapic_register_intr(int irq, int vector, unsigned long trigger)
+ {
+- unsigned idx;
+-
+- idx = use_pci_vector() && !platform_legacy_irq(irq) ? vector : irq;
+-
+ if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
+ trigger == IOAPIC_LEVEL)
+- irq_desc[idx].chip = &ioapic_level_type;
+- else
+- irq_desc[idx].chip = &ioapic_edge_type;
+- set_intr_gate(vector, interrupt[idx]);
++ set_irq_chip_and_handler_name(irq, &ioapic_chip,
++ handle_fasteoi_irq, "fasteoi");
++ else {
++ irq_desc[irq].status |= IRQ_DELAYED_DISABLE;
++ set_irq_chip_and_handler_name(irq, &ioapic_chip,
++ handle_edge_irq, "edge");
++ }
+ }
+ #else
+ #define ioapic_register_intr(_irq,_vector,_trigger) ((void)0)
+@@ -990,16 +808,21 @@ static void __init setup_IO_APIC_irqs(vo
+ continue;
+
+ if (IO_APIC_IRQ(irq)) {
+- vector = assign_irq_vector(irq);
++ cpumask_t mask;
++ vector = assign_irq_vector(irq, TARGET_CPUS, &mask);
++ if (vector < 0)
++ continue;
++
++ entry.dest.logical.logical_dest = cpu_mask_to_apicid(mask);
+ entry.vector = vector;
+
+ ioapic_register_intr(irq, vector, IOAPIC_AUTO);
+ if (!apic && (irq < 16))
+ disable_8259A_irq(irq);
+ }
++ ioapic_write_entry(apic, pin, entry);
++
+ spin_lock_irqsave(&ioapic_lock, flags);
+- io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1));
+- io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0));
+ set_native_irq_info(irq, TARGET_CPUS);
+ spin_unlock_irqrestore(&ioapic_lock, flags);
+ }
+@@ -1042,7 +865,7 @@ static void __init setup_ExtINT_IRQ0_pin
+ * The timer IRQ doesn't have to know that behind the
+ * scene we have a 8259A-master in AEOI mode ...
+ */
+- irq_desc[0].chip = &ioapic_edge_type;
++ set_irq_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, "edge");
+
+ /*
+ * Add it to the IO-APIC irq-routing table:
+@@ -1138,10 +961,7 @@ void __apicdebuginit print_IO_APIC(void)
+ for (i = 0; i <= reg_01.bits.entries; i++) {
+ struct IO_APIC_route_entry entry;
+
+- spin_lock_irqsave(&ioapic_lock, flags);
+- *(((int *)&entry)+0) = io_apic_read(apic, 0x10+i*2);
+- *(((int *)&entry)+1) = io_apic_read(apic, 0x11+i*2);
+- spin_unlock_irqrestore(&ioapic_lock, flags);
++ entry = ioapic_read_entry(apic, i);
+
+ printk(KERN_DEBUG " %02x %03X %02X ",
+ i,
+@@ -1161,17 +981,12 @@ void __apicdebuginit print_IO_APIC(void)
+ );
+ }
+ }
+- if (use_pci_vector())
+- printk(KERN_INFO "Using vector-based indexing\n");
+ printk(KERN_DEBUG "IRQ to pin mappings:\n");
+ for (i = 0; i < NR_IRQS; i++) {
+ struct irq_pin_list *entry = irq_2_pin + i;
+ if (entry->pin < 0)
+ continue;
+- if (use_pci_vector() && !platform_legacy_irq(i))
+- printk(KERN_DEBUG "IRQ%d ", IO_APIC_VECTOR(i));
+- else
+- printk(KERN_DEBUG "IRQ%d ", i);
++ printk(KERN_DEBUG "IRQ%d ", i);
+ for (;;) {
+ printk("-> %d:%d", entry->apic, entry->pin);
+ if (!entry->next)
+@@ -1342,9 +1157,6 @@ static void __init enable_IO_APIC(void)
+ irq_2_pin[i].pin = -1;
+ irq_2_pin[i].next = 0;
+ }
+- if (!pirqs_enabled)
+- for (i = 0; i < MAX_PIRQS; i++)
+- pirq_entries[i] = -1;
+
+ /*
+ * The number of IO-APIC IRQ registers (== #pins):
+@@ -1361,11 +1173,7 @@ static void __init enable_IO_APIC(void)
+ /* See if any of the pins is in ExtINT mode */
+ for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
+ struct IO_APIC_route_entry entry;
+- spin_lock_irqsave(&ioapic_lock, flags);
+- *(((int *)&entry) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
+- *(((int *)&entry) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
+- spin_unlock_irqrestore(&ioapic_lock, flags);
+-
++ entry = ioapic_read_entry(apic, pin);
+
+ /* If the interrupt line is enabled and in ExtInt mode
+ * I have found the pin where the i8259 is connected.
+@@ -1419,7 +1227,6 @@ void disable_IO_APIC(void)
+ */
+ if (ioapic_i8259.pin != -1) {
+ struct IO_APIC_route_entry entry;
+- unsigned long flags;
+
+ memset(&entry, 0, sizeof(entry));
+ entry.mask = 0; /* Enabled */
+@@ -1436,12 +1243,7 @@ void disable_IO_APIC(void)
+ /*
+ * Add it to the IO-APIC irq-routing table:
+ */
+- spin_lock_irqsave(&ioapic_lock, flags);
+- io_apic_write(ioapic_i8259.apic, 0x11+2*ioapic_i8259.pin,
+- *(((int *)&entry)+1));
+- io_apic_write(ioapic_i8259.apic, 0x10+2*ioapic_i8259.pin,
+- *(((int *)&entry)+0));
+- spin_unlock_irqrestore(&ioapic_lock, flags);
++ ioapic_write_entry(ioapic_i8259.apic, ioapic_i8259.pin, entry);
+ }
+
+ disconnect_bsp_APIC(ioapic_i8259.pin != -1);
+@@ -1449,76 +1251,6 @@ void disable_IO_APIC(void)
+ }
+
+ /*
+- * function to set the IO-APIC physical IDs based on the
+- * values stored in the MPC table.
+- *
+- * by Matt Domsch <Matt_Domsch@dell.com> Tue Dec 21 12:25:05 CST 1999
+- */
+-
+-#ifndef CONFIG_XEN
+-static void __init setup_ioapic_ids_from_mpc (void)
+-{
+- union IO_APIC_reg_00 reg_00;
+- int apic;
+- int i;
+- unsigned char old_id;
+- unsigned long flags;
+-
+- /*
+- * Set the IOAPIC ID to the value stored in the MPC table.
+- */
+- for (apic = 0; apic < nr_ioapics; apic++) {
+-
+- /* Read the register 0 value */
+- spin_lock_irqsave(&ioapic_lock, flags);
+- reg_00.raw = io_apic_read(apic, 0);
+- spin_unlock_irqrestore(&ioapic_lock, flags);
+-
+- old_id = mp_ioapics[apic].mpc_apicid;
+-
+-
+- printk(KERN_INFO "Using IO-APIC %d\n", mp_ioapics[apic].mpc_apicid);
+-
+-
+- /*
+- * We need to adjust the IRQ routing table
+- * if the ID changed.
+- */
+- if (old_id != mp_ioapics[apic].mpc_apicid)
+- for (i = 0; i < mp_irq_entries; i++)
+- if (mp_irqs[i].mpc_dstapic == old_id)
+- mp_irqs[i].mpc_dstapic
+- = mp_ioapics[apic].mpc_apicid;
+-
+- /*
+- * Read the right value from the MPC table and
+- * write it into the ID register.
+- */
+- apic_printk(APIC_VERBOSE,KERN_INFO "...changing IO-APIC physical APIC ID to %d ...",
+- mp_ioapics[apic].mpc_apicid);
+-
+- reg_00.bits.ID = mp_ioapics[apic].mpc_apicid;
+- spin_lock_irqsave(&ioapic_lock, flags);
+- io_apic_write(apic, 0, reg_00.raw);
+- spin_unlock_irqrestore(&ioapic_lock, flags);
+-
+- /*
+- * Sanity check
+- */
+- spin_lock_irqsave(&ioapic_lock, flags);
+- reg_00.raw = io_apic_read(apic, 0);
+- spin_unlock_irqrestore(&ioapic_lock, flags);
+- if (reg_00.bits.ID != mp_ioapics[apic].mpc_apicid)
+- printk("could not set ID!\n");
+- else
+- apic_printk(APIC_VERBOSE," ok.\n");
+- }
+-}
+-#else
+-static void __init setup_ioapic_ids_from_mpc(void) { }
+-#endif
+-
+-/*
+ * There is a nasty bug in some older SMP boards, their mptable lies
+ * about the timer IRQ. We do the following to work around the situation:
+ *
+@@ -1572,7 +1304,7 @@ static int __init timer_irq_works(void)
+ * an edge even if it isn't on the 8259A...
+ */
+
+-static unsigned int startup_edge_ioapic_irq(unsigned int irq)
++static unsigned int startup_ioapic_irq(unsigned int irq)
+ {
+ int was_pending = 0;
+ unsigned long flags;
+@@ -1589,107 +1321,19 @@ static unsigned int startup_edge_ioapic_
+ return was_pending;
+ }
+
+-/*
+- * Once we have recorded IRQ_PENDING already, we can mask the
+- * interrupt for real. This prevents IRQ storms from unhandled
+- * devices.
+- */
+-static void ack_edge_ioapic_irq(unsigned int irq)
+-{
+- move_irq(irq);
+- if ((irq_desc[irq].status & (IRQ_PENDING | IRQ_DISABLED))
+- == (IRQ_PENDING | IRQ_DISABLED))
+- mask_IO_APIC_irq(irq);
+- ack_APIC_irq();
+-}
+-
+-/*
+- * Level triggered interrupts can just be masked,
+- * and shutting down and starting up the interrupt
+- * is the same as enabling and disabling them -- except
+- * with a startup need to return a "was pending" value.
+- *
+- * Level triggered interrupts are special because we
+- * do not touch any IO-APIC register while handling
+- * them. We ack the APIC in the end-IRQ handler, not
+- * in the start-IRQ-handler. Protection against reentrance
+- * from the same interrupt is still provided, both by the
+- * generic IRQ layer and by the fact that an unacked local
+- * APIC does not accept IRQs.
+- */
+-static unsigned int startup_level_ioapic_irq (unsigned int irq)
+-{
+- unmask_IO_APIC_irq(irq);
+-
+- return 0; /* don't check for pending */
+-}
+-
+-static void end_level_ioapic_irq (unsigned int irq)
+-{
+- move_irq(irq);
+- ack_APIC_irq();
+-}
+-
+-#ifdef CONFIG_PCI_MSI
+-static unsigned int startup_edge_ioapic_vector(unsigned int vector)
+-{
+- int irq = vector_to_irq(vector);
+-
+- return startup_edge_ioapic_irq(irq);
+-}
+-
+-static void ack_edge_ioapic_vector(unsigned int vector)
+-{
+- int irq = vector_to_irq(vector);
+-
+- move_native_irq(vector);
+- ack_edge_ioapic_irq(irq);
+-}
+-
+-static unsigned int startup_level_ioapic_vector (unsigned int vector)
+-{
+- int irq = vector_to_irq(vector);
+-
+- return startup_level_ioapic_irq (irq);
+-}
+-
+-static void end_level_ioapic_vector (unsigned int vector)
+-{
+- int irq = vector_to_irq(vector);
+-
+- move_native_irq(vector);
+- end_level_ioapic_irq(irq);
+-}
+-
+-static void mask_IO_APIC_vector (unsigned int vector)
+-{
+- int irq = vector_to_irq(vector);
+-
+- mask_IO_APIC_irq(irq);
+-}
+-
+-static void unmask_IO_APIC_vector (unsigned int vector)
+-{
+- int irq = vector_to_irq(vector);
+-
+- unmask_IO_APIC_irq(irq);
+-}
+-
+-#ifdef CONFIG_SMP
+-static void set_ioapic_affinity_vector (unsigned int vector,
+- cpumask_t cpu_mask)
++static int ioapic_retrigger_irq(unsigned int irq)
+ {
+- int irq = vector_to_irq(vector);
++ cpumask_t mask;
++ unsigned vector;
++ unsigned long flags;
+
+- set_native_irq_info(vector, cpu_mask);
+- set_ioapic_affinity_irq(irq, cpu_mask);
+-}
+-#endif // CONFIG_SMP
+-#endif // CONFIG_PCI_MSI
++ spin_lock_irqsave(&vector_lock, flags);
++ vector = irq_vector[irq];
++ cpus_clear(mask);
++ cpu_set(first_cpu(irq_domain[irq]), mask);
+
+-static int ioapic_retrigger(unsigned int irq)
+-{
+- send_IPI_self(IO_APIC_VECTOR(irq));
++ send_IPI_mask(mask, vector);
++ spin_unlock_irqrestore(&vector_lock, flags);
+
+ return 1;
+ }
+@@ -1703,32 +1347,47 @@ static int ioapic_retrigger(unsigned int
+ * races.
+ */
+
+-static struct hw_interrupt_type ioapic_edge_type __read_mostly = {
+- .typename = "IO-APIC-edge",
+- .startup = startup_edge_ioapic,
+- .shutdown = shutdown_edge_ioapic,
+- .enable = enable_edge_ioapic,
+- .disable = disable_edge_ioapic,
+- .ack = ack_edge_ioapic,
+- .end = end_edge_ioapic,
+-#ifdef CONFIG_SMP
+- .set_affinity = set_ioapic_affinity,
++static void ack_apic_edge(unsigned int irq)
++{
++ move_native_irq(irq);
++ ack_APIC_irq();
++}
++
++static void ack_apic_level(unsigned int irq)
++{
++ int do_unmask_irq = 0;
++
++#if defined(CONFIG_GENERIC_PENDING_IRQ) || defined(CONFIG_IRQBALANCE)
++ /* If we are moving the irq we need to mask it */
++ if (unlikely(irq_desc[irq].status & IRQ_MOVE_PENDING)) {
++ do_unmask_irq = 1;
++ mask_IO_APIC_irq(irq);
++ }
+ #endif
+- .retrigger = ioapic_retrigger,
+-};
+
+-static struct hw_interrupt_type ioapic_level_type __read_mostly = {
+- .typename = "IO-APIC-level",
+- .startup = startup_level_ioapic,
+- .shutdown = shutdown_level_ioapic,
+- .enable = enable_level_ioapic,
+- .disable = disable_level_ioapic,
+- .ack = mask_and_ack_level_ioapic,
+- .end = end_level_ioapic,
++ /*
++ * We must acknowledge the irq before we move it or the acknowledge will
++ * not propogate properly.
++ */
++ ack_APIC_irq();
++
++ /* Now we can move and renable the irq */
++ move_masked_irq(irq);
++ if (unlikely(do_unmask_irq))
++ unmask_IO_APIC_irq(irq);
++}
++
++static struct irq_chip ioapic_chip __read_mostly = {
++ .name = "IO-APIC",
++ .startup = startup_ioapic_irq,
++ .mask = mask_IO_APIC_irq,
++ .unmask = unmask_IO_APIC_irq,
++ .ack = ack_apic_edge,
++ .eoi = ack_apic_level,
+ #ifdef CONFIG_SMP
+- .set_affinity = set_ioapic_affinity,
++ .set_affinity = set_ioapic_affinity_irq,
+ #endif
+- .retrigger = ioapic_retrigger,
++ .retrigger = ioapic_retrigger_irq,
+ };
+ #endif /* !CONFIG_XEN */
+
+@@ -1749,12 +1408,7 @@ static inline void init_IO_APIC_traps(vo
+ */
+ for (irq = 0; irq < NR_IRQS ; irq++) {
+ int tmp = irq;
+- if (use_pci_vector()) {
+- if (!platform_legacy_irq(tmp))
+- if ((tmp = vector_to_irq(tmp)) == -1)
+- continue;
+- }
+- if (IO_APIC_IRQ(tmp) && !IO_APIC_VECTOR(tmp)) {
++ if (IO_APIC_IRQ(tmp) && !irq_vector[tmp]) {
+ /*
+ * Hmm.. We don't have an entry for this,
+ * so default to an old-fashioned 8259
+@@ -1765,7 +1419,7 @@ static inline void init_IO_APIC_traps(vo
+ #ifndef CONFIG_XEN
+ else
+ /* Strange. Oh, well.. */
+- irq_desc[irq].chip = &no_irq_type;
++ irq_desc[irq].chip = &no_irq_chip;
+ #endif
+ }
+ }
+@@ -1886,8 +1540,6 @@ static inline void unlock_ExtINT_logic(v
+ spin_unlock_irqrestore(&ioapic_lock, flags);
+ }
+
+-int timer_uses_ioapic_pin_0;
+-
+ /*
+ * This code may look a bit paranoid, but it's supposed to cooperate with
+ * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ
+@@ -1900,13 +1552,13 @@ static inline void check_timer(void)
+ {
+ int apic1, pin1, apic2, pin2;
+ int vector;
++ cpumask_t mask;
+
+ /*
+ * get/set the timer IRQ vector:
+ */
+ disable_8259A_irq(0);
+- vector = assign_irq_vector(0);
+- set_intr_gate(vector, interrupt[0]);
++ vector = assign_irq_vector(0, TARGET_CPUS, &mask);
+
+ /*
+ * Subtle, code in do_timer_interrupt() expects an AEOI
+@@ -1925,9 +1577,6 @@ static inline void check_timer(void)
+ pin2 = ioapic_i8259.pin;
+ apic2 = ioapic_i8259.apic;
+
+- if (pin1 == 0)
+- timer_uses_ioapic_pin_0 = 1;
+-
+ apic_printk(APIC_VERBOSE,KERN_INFO "..TIMER: vector=0x%02X apic1=%d pin1=%d apic2=%d pin2=%d\n",
+ vector, apic1, pin1, apic2, pin2);
+
+@@ -2042,11 +1691,6 @@ void __init setup_IO_APIC(void)
+
+ apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n");
+
+- /*
+- * Set up the IO-APIC IRQ routing table.
+- */
+- if (!acpi_ioapic)
+- setup_ioapic_ids_from_mpc();
+ #ifndef CONFIG_XEN
+ sync_Arb_IDs();
+ #endif /* !CONFIG_XEN */
+@@ -2067,17 +1711,12 @@ static int ioapic_suspend(struct sys_dev
+ {
+ struct IO_APIC_route_entry *entry;
+ struct sysfs_ioapic_data *data;
+- unsigned long flags;
+ int i;
+
+ data = container_of(dev, struct sysfs_ioapic_data, dev);
+ entry = data->entry;
+- spin_lock_irqsave(&ioapic_lock, flags);
+- for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ ) {
+- *(((int *)entry) + 1) = io_apic_read(dev->id, 0x11 + 2 * i);
+- *(((int *)entry) + 0) = io_apic_read(dev->id, 0x10 + 2 * i);
+- }
+- spin_unlock_irqrestore(&ioapic_lock, flags);
++ for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ )
++ *entry = ioapic_read_entry(dev->id, i);
+
+ return 0;
+ }
+@@ -2099,11 +1738,9 @@ static int ioapic_resume(struct sys_devi
+ reg_00.bits.ID = mp_ioapics[dev->id].mpc_apicid;
+ io_apic_write(dev->id, 0, reg_00.raw);
+ }
+- for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ ) {
+- io_apic_write(dev->id, 0x11+2*i, *(((int *)entry)+1));
+- io_apic_write(dev->id, 0x10+2*i, *(((int *)entry)+0));
+- }
+ spin_unlock_irqrestore(&ioapic_lock, flags);
++ for (i = 0; i < nr_ioapic_registers[dev->id]; i++)
++ ioapic_write_entry(dev->id, i, entry[i]);
+
+ return 0;
+ }
+@@ -2149,26 +1786,254 @@ static int __init ioapic_init_sysfs(void
+
+ device_initcall(ioapic_init_sysfs);
+
+-/* --------------------------------------------------------------------------
+- ACPI-based IOAPIC Configuration
+- -------------------------------------------------------------------------- */
++#ifndef CONFIG_XEN
++/*
++ * Dynamic irq allocate and deallocation
++ */
++int create_irq(void)
++{
++ /* Allocate an unused irq */
++ int irq;
++ int new;
++ int vector = 0;
++ unsigned long flags;
++ cpumask_t mask;
+
+-#ifdef CONFIG_ACPI
++ irq = -ENOSPC;
++ spin_lock_irqsave(&vector_lock, flags);
++ for (new = (NR_IRQS - 1); new >= 0; new--) {
++ if (platform_legacy_irq(new))
++ continue;
++ if (irq_vector[new] != 0)
++ continue;
++ vector = __assign_irq_vector(new, TARGET_CPUS, &mask);
++ if (likely(vector > 0))
++ irq = new;
++ break;
++ }
++ spin_unlock_irqrestore(&vector_lock, flags);
+
+-#define IO_APIC_MAX_ID 0xFE
++ if (irq >= 0) {
++ dynamic_irq_init(irq);
++ }
++ return irq;
++}
+
+-int __init io_apic_get_version (int ioapic)
++void destroy_irq(unsigned int irq)
+ {
+- union IO_APIC_reg_01 reg_01;
+ unsigned long flags;
+
+- spin_lock_irqsave(&ioapic_lock, flags);
+- reg_01.raw = io_apic_read(ioapic, 1);
+- spin_unlock_irqrestore(&ioapic_lock, flags);
++ dynamic_irq_cleanup(irq);
++
++ spin_lock_irqsave(&vector_lock, flags);
++ irq_vector[irq] = 0;
++ spin_unlock_irqrestore(&vector_lock, flags);
++}
++#endif
++
++/*
++ * MSI mesage composition
++ */
++#ifdef CONFIG_PCI_MSI
++static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg)
++{
++ int vector;
++ unsigned dest;
++ cpumask_t tmp;
++
++ vector = assign_irq_vector(irq, TARGET_CPUS, &tmp);
++ if (vector >= 0) {
++ dest = cpu_mask_to_apicid(tmp);
++
++ msg->address_hi = MSI_ADDR_BASE_HI;
++ msg->address_lo =
++ MSI_ADDR_BASE_LO |
++ ((INT_DEST_MODE == 0) ?
++ MSI_ADDR_DEST_MODE_PHYSICAL:
++ MSI_ADDR_DEST_MODE_LOGICAL) |
++ ((INT_DELIVERY_MODE != dest_LowestPrio) ?
++ MSI_ADDR_REDIRECTION_CPU:
++ MSI_ADDR_REDIRECTION_LOWPRI) |
++ MSI_ADDR_DEST_ID(dest);
++
++ msg->data =
++ MSI_DATA_TRIGGER_EDGE |
++ MSI_DATA_LEVEL_ASSERT |
++ ((INT_DELIVERY_MODE != dest_LowestPrio) ?
++ MSI_DATA_DELIVERY_FIXED:
++ MSI_DATA_DELIVERY_LOWPRI) |
++ MSI_DATA_VECTOR(vector);
++ }
++ return vector;
++}
++
++#ifdef CONFIG_SMP
++static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
++{
++ struct msi_msg msg;
++ unsigned int dest;
++ cpumask_t tmp;
++ int vector;
++
++ cpus_and(tmp, mask, cpu_online_map);
++ if (cpus_empty(tmp))
++ tmp = TARGET_CPUS;
++
++ cpus_and(mask, tmp, CPU_MASK_ALL);
++
++ vector = assign_irq_vector(irq, mask, &tmp);
++ if (vector < 0)
++ return;
++
++ dest = cpu_mask_to_apicid(tmp);
++
++ read_msi_msg(irq, &msg);
++
++ msg.data &= ~MSI_DATA_VECTOR_MASK;
++ msg.data |= MSI_DATA_VECTOR(vector);
++ msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
++ msg.address_lo |= MSI_ADDR_DEST_ID(dest);
++
++ write_msi_msg(irq, &msg);
++ set_native_irq_info(irq, mask);
++}
++#endif /* CONFIG_SMP */
++
++/*
++ * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices,
++ * which implement the MSI or MSI-X Capability Structure.
++ */
++static struct irq_chip msi_chip = {
++ .name = "PCI-MSI",
++ .unmask = unmask_msi_irq,
++ .mask = mask_msi_irq,
++ .ack = ack_apic_edge,
++#ifdef CONFIG_SMP
++ .set_affinity = set_msi_irq_affinity,
++#endif
++ .retrigger = ioapic_retrigger_irq,
++};
++
++int arch_setup_msi_irq(unsigned int irq, struct pci_dev *dev)
++{
++ struct msi_msg msg;
++ int ret;
++ ret = msi_compose_msg(dev, irq, &msg);
++ if (ret < 0)
++ return ret;
++
++ write_msi_msg(irq, &msg);
++
++ set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, "edge");
++
++ return 0;
++}
++
++void arch_teardown_msi_irq(unsigned int irq)
++{
++ return;
++}
++
++#endif /* CONFIG_PCI_MSI */
++
++/*
++ * Hypertransport interrupt support
++ */
++#ifdef CONFIG_HT_IRQ
++
++#ifdef CONFIG_SMP
++
++static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector)
++{
++ struct ht_irq_msg msg;
++ fetch_ht_irq_msg(irq, &msg);
++
++ msg.address_lo &= ~(HT_IRQ_LOW_VECTOR_MASK | HT_IRQ_LOW_DEST_ID_MASK);
++ msg.address_hi &= ~(HT_IRQ_HIGH_DEST_ID_MASK);
+
+- return reg_01.bits.version;
++ msg.address_lo |= HT_IRQ_LOW_VECTOR(vector) | HT_IRQ_LOW_DEST_ID(dest);
++ msg.address_hi |= HT_IRQ_HIGH_DEST_ID(dest);
++
++ write_ht_irq_msg(irq, &msg);
+ }
+
++static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask)
++{
++ unsigned int dest;
++ cpumask_t tmp;
++ int vector;
++
++ cpus_and(tmp, mask, cpu_online_map);
++ if (cpus_empty(tmp))
++ tmp = TARGET_CPUS;
++
++ cpus_and(mask, tmp, CPU_MASK_ALL);
++
++ vector = assign_irq_vector(irq, mask, &tmp);
++ if (vector < 0)
++ return;
++
++ dest = cpu_mask_to_apicid(tmp);
++
++ target_ht_irq(irq, dest, vector);
++ set_native_irq_info(irq, mask);
++}
++#endif
++
++static struct irq_chip ht_irq_chip = {
++ .name = "PCI-HT",
++ .mask = mask_ht_irq,
++ .unmask = unmask_ht_irq,
++ .ack = ack_apic_edge,
++#ifdef CONFIG_SMP
++ .set_affinity = set_ht_irq_affinity,
++#endif
++ .retrigger = ioapic_retrigger_irq,
++};
++
++int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
++{
++ int vector;
++ cpumask_t tmp;
++
++ vector = assign_irq_vector(irq, TARGET_CPUS, &tmp);
++ if (vector >= 0) {
++ struct ht_irq_msg msg;
++ unsigned dest;
++
++ dest = cpu_mask_to_apicid(tmp);
++
++ msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest);
++
++ msg.address_lo =
++ HT_IRQ_LOW_BASE |
++ HT_IRQ_LOW_DEST_ID(dest) |
++ HT_IRQ_LOW_VECTOR(vector) |
++ ((INT_DEST_MODE == 0) ?
++ HT_IRQ_LOW_DM_PHYSICAL :
++ HT_IRQ_LOW_DM_LOGICAL) |
++ HT_IRQ_LOW_RQEOI_EDGE |
++ ((INT_DELIVERY_MODE != dest_LowestPrio) ?
++ HT_IRQ_LOW_MT_FIXED :
++ HT_IRQ_LOW_MT_ARBITRATED) |
++ HT_IRQ_LOW_IRQ_MASKED;
++
++ write_ht_irq_msg(irq, &msg);
++
++ set_irq_chip_and_handler_name(irq, &ht_irq_chip,
++ handle_edge_irq, "edge");
++ }
++ return vector;
++}
++#endif /* CONFIG_HT_IRQ */
++
++/* --------------------------------------------------------------------------
++ ACPI-based IOAPIC Configuration
++ -------------------------------------------------------------------------- */
++
++#ifdef CONFIG_ACPI
++
++#define IO_APIC_MAX_ID 0xFE
+
+ int __init io_apic_get_redir_entries (int ioapic)
+ {
+@@ -2187,6 +2052,8 @@ int io_apic_set_pci_routing (int ioapic,
+ {
+ struct IO_APIC_route_entry entry;
+ unsigned long flags;
++ int vector;
++ cpumask_t mask;
+
+ if (!IO_APIC_IRQ(irq)) {
+ apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n",
+@@ -2195,6 +2062,17 @@ int io_apic_set_pci_routing (int ioapic,
+ }
+
+ /*
++ * IRQs < 16 are already in the irq_2_pin[] map
++ */
++ if (irq >= 16)
++ add_pin_to_irq(irq, ioapic, pin);
++
++
++ vector = assign_irq_vector(irq, TARGET_CPUS, &mask);
++ if (vector < 0)
++ return vector;
++
++ /*
+ * Generate a PCI IRQ routing entry and program the IOAPIC accordingly.
+ * Note that we mask (disable) IRQs now -- these get enabled when the
+ * corresponding device driver registers for this IRQ.
+@@ -2204,19 +2082,11 @@ int io_apic_set_pci_routing (int ioapic,
+
+ entry.delivery_mode = INT_DELIVERY_MODE;
+ entry.dest_mode = INT_DEST_MODE;
+- entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
++ entry.dest.logical.logical_dest = cpu_mask_to_apicid(mask);
+ entry.trigger = edge_level;
+ entry.polarity = active_high_low;
+ entry.mask = 1; /* Disabled (masked) */
+-
+- irq = gsi_irq_sharing(irq);
+- /*
+- * IRQs < 16 are already in the irq_2_pin[] map
+- */
+- if (irq >= 16)
+- add_pin_to_irq(irq, ioapic, pin);
+-
+- entry.vector = assign_irq_vector(irq);
++ entry.vector = vector & 0xff;
+
+ apic_printk(APIC_VERBOSE,KERN_DEBUG "IOAPIC[%d]: Set PCI routing entry (%d-%d -> 0x%x -> "
+ "IRQ %d Mode:%i Active:%i)\n", ioapic,
+@@ -2228,10 +2098,10 @@ int io_apic_set_pci_routing (int ioapic,
+ if (!ioapic && (irq < 16))
+ disable_8259A_irq(irq);
+
++ ioapic_write_entry(ioapic, pin, entry);
++
+ spin_lock_irqsave(&ioapic_lock, flags);
+- io_apic_write(ioapic, 0x11+2*pin, *(((int *)&entry)+1));
+- io_apic_write(ioapic, 0x10+2*pin, *(((int *)&entry)+0));
+- set_native_irq_info(use_pci_vector() ? entry.vector : irq, TARGET_CPUS);
++ set_native_irq_info(irq, TARGET_CPUS);
+ spin_unlock_irqrestore(&ioapic_lock, flags);
+
+ return 0;
+Index: 10.3-2007-11-26/arch/x86_64/kernel/ioport-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/kernel/ioport-xen.c 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/arch/x86_64/kernel/ioport-xen.c 2007-10-22 13:53:08.000000000 +0200
+@@ -58,6 +58,7 @@ asmlinkage long sys_ioperm(unsigned long
+
+ memset(bitmap, 0xff, IO_BITMAP_BYTES);
+ t->io_bitmap_ptr = bitmap;
++ set_thread_flag(TIF_IO_BITMAP);
+
+ set_xen_guest_handle(set_iobitmap.bitmap, (char *)bitmap);
+ set_iobitmap.nr_ports = IO_BITMAP_BITS;
+Index: 10.3-2007-11-26/arch/x86_64/kernel/irq-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/kernel/irq-xen.c 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/arch/x86_64/kernel/irq-xen.c 2007-10-22 13:53:08.000000000 +0200
+@@ -20,11 +20,6 @@
+ #include <asm/idle.h>
+
+ atomic_t irq_err_count;
+-#ifdef CONFIG_X86_IO_APIC
+-#ifdef APIC_MISMATCH_DEBUG
+-atomic_t irq_mis_count;
+-#endif
+-#endif
+
+ #ifdef CONFIG_DEBUG_STACKOVERFLOW
+ /*
+@@ -79,7 +74,8 @@ int show_interrupts(struct seq_file *p,
+ for_each_online_cpu(j)
+ seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
+ #endif
+- seq_printf(p, " %14s", irq_desc[i].chip->typename);
++ seq_printf(p, " %8s", irq_desc[i].chip->name);
++ seq_printf(p, "-%-8s", irq_desc[i].name);
+
+ seq_printf(p, " %s", action->name);
+ for (action=action->next; action; action = action->next)
+@@ -99,11 +95,6 @@ skip:
+ seq_putc(p, '\n');
+ #endif
+ seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
+-#ifdef CONFIG_X86_IO_APIC
+-#ifdef APIC_MISMATCH_DEBUG
+- seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count));
+-#endif
+-#endif
+ }
+ return 0;
+ }
+@@ -114,24 +105,28 @@ skip:
+ * handlers).
+ */
+ asmlinkage unsigned int do_IRQ(struct pt_regs *regs)
+-{
++{
++ struct pt_regs *old_regs = set_irq_regs(regs);
++
+ /* high bit used in ret_from_ code */
+ unsigned irq = ~regs->orig_rax;
+
+- if (unlikely(irq >= NR_IRQS)) {
+- printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
+- __FUNCTION__, irq);
+- BUG();
+- }
+-
+ exit_idle();
+ irq_enter();
++
+ #ifdef CONFIG_DEBUG_STACKOVERFLOW
+ stack_overflow_check(regs);
+ #endif
+- __do_IRQ(irq, regs);
++
++ if (likely(irq < NR_IRQS))
++ generic_handle_irq(irq);
++ else
++ printk(KERN_EMERG "%s: %d.%d No irq handler for irq\n",
++ __func__, smp_processor_id(), irq);
++
+ irq_exit();
+
++ set_irq_regs(old_regs);
+ return 1;
+ }
+
+@@ -192,6 +187,6 @@ EXPORT_SYMBOL(do_softirq);
+ */
+ void ack_bad_irq(unsigned int irq)
+ {
+- printk("unexpected IRQ trap at vector %02x\n", irq);
++ printk("unexpected IRQ trap at irq %02x\n", irq);
+ }
+ #endif
+Index: 10.3-2007-11-26/arch/x86_64/kernel/machine_kexec.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/kernel/machine_kexec.c 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/arch/x86_64/kernel/machine_kexec.c 2007-10-22 13:53:08.000000000 +0200
+@@ -286,6 +286,7 @@ NORET_TYPE void machine_kexec(struct kim
+ */
+ static int __init setup_crashkernel(char *arg)
+ {
++#ifndef CONFIG_XEN
+ unsigned long size, base;
+ char *p;
+ if (!arg)
+@@ -301,6 +302,10 @@ static int __init setup_crashkernel(char
+ crashk_res.start = base;
+ crashk_res.end = base + size - 1;
+ }
++#else
++ printk("Ignoring crashkernel command line, "
++ "parameter will be supplied by xen\n");
++#endif
+ return 0;
+ }
+ early_param("crashkernel", setup_crashkernel);
+Index: 10.3-2007-11-26/arch/x86_64/kernel/mpparse-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/kernel/mpparse-xen.c 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/arch/x86_64/kernel/mpparse-xen.c 2007-10-22 13:53:08.000000000 +0200
+@@ -41,8 +41,7 @@ int acpi_found_madt;
+ * Various Linux-internal data structures created from the
+ * MP-table.
+ */
+-unsigned char apic_version [MAX_APICS];
+-unsigned char mp_bus_id_to_type [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 };
++DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES);
+ int mp_bus_id_to_pci_bus [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 };
+
+ static int mp_current_pci_id = 0;
+@@ -56,7 +55,6 @@ struct mpc_config_intsrc mp_irqs[MAX_IRQ
+ int mp_irq_entries;
+
+ int nr_ioapics;
+-int pic_mode;
+ unsigned long mp_lapic_addr = 0;
+
+
+@@ -71,19 +69,6 @@ unsigned disabled_cpus __initdata;
+ /* Bitmask of physically existing CPUs */
+ physid_mask_t phys_cpu_present_map = PHYSID_MASK_NONE;
+
+-/* ACPI MADT entry parsing functions */
+-#ifdef CONFIG_ACPI
+-extern struct acpi_boot_flags acpi_boot;
+-#ifdef CONFIG_X86_LOCAL_APIC
+-extern int acpi_parse_lapic (acpi_table_entry_header *header);
+-extern int acpi_parse_lapic_addr_ovr (acpi_table_entry_header *header);
+-extern int acpi_parse_lapic_nmi (acpi_table_entry_header *header);
+-#endif /*CONFIG_X86_LOCAL_APIC*/
+-#ifdef CONFIG_X86_IO_APIC
+-extern int acpi_parse_ioapic (acpi_table_entry_header *header);
+-#endif /*CONFIG_X86_IO_APIC*/
+-#endif /*CONFIG_ACPI*/
+-
+ u8 bios_cpu_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
+
+
+@@ -109,24 +94,20 @@ static int __init mpf_checksum(unsigned
+ static void __cpuinit MP_processor_info (struct mpc_config_processor *m)
+ {
+ int cpu;
+- unsigned char ver;
+ cpumask_t tmp_map;
++ char *bootup_cpu = "";
+
+ if (!(m->mpc_cpuflag & CPU_ENABLED)) {
+ disabled_cpus++;
+ return;
+ }
+-
+- printk(KERN_INFO "Processor #%d %d:%d APIC version %d\n",
+- m->mpc_apicid,
+- (m->mpc_cpufeature & CPU_FAMILY_MASK)>>8,
+- (m->mpc_cpufeature & CPU_MODEL_MASK)>>4,
+- m->mpc_apicver);
+-
+ if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) {
+- Dprintk(" Bootup CPU\n");
++ bootup_cpu = " (Bootup-CPU)";
+ boot_cpu_id = m->mpc_apicid;
+ }
++
++ printk(KERN_INFO "Processor #%d%s\n", m->mpc_apicid, bootup_cpu);
++
+ if (num_processors >= NR_CPUS) {
+ printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached."
+ " Processor ignored.\n", NR_CPUS);
+@@ -137,24 +118,7 @@ static void __cpuinit MP_processor_info
+ cpus_complement(tmp_map, cpu_present_map);
+ cpu = first_cpu(tmp_map);
+
+-#if MAX_APICS < 255
+- if ((int)m->mpc_apicid > MAX_APICS) {
+- printk(KERN_ERR "Processor #%d INVALID. (Max ID: %d).\n",
+- m->mpc_apicid, MAX_APICS);
+- return;
+- }
+-#endif
+- ver = m->mpc_apicver;
+-
+ physid_set(m->mpc_apicid, phys_cpu_present_map);
+- /*
+- * Validate version
+- */
+- if (ver == 0x0) {
+- printk(KERN_ERR "BIOS bug, APIC version is 0 for CPU#%d! fixing up to 0x10. (tell your hw vendor)\n", m->mpc_apicid);
+- ver = 0x10;
+- }
+- apic_version[m->mpc_apicid] = ver;
+ if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) {
+ /*
+ * bios_cpu_apicid is required to have processors listed
+@@ -185,37 +149,42 @@ static void __init MP_bus_info (struct m
+ Dprintk("Bus #%d is %s\n", m->mpc_busid, str);
+
+ if (strncmp(str, "ISA", 3) == 0) {
+- mp_bus_id_to_type[m->mpc_busid] = MP_BUS_ISA;
+- } else if (strncmp(str, "EISA", 4) == 0) {
+- mp_bus_id_to_type[m->mpc_busid] = MP_BUS_EISA;
++ set_bit(m->mpc_busid, mp_bus_not_pci);
+ } else if (strncmp(str, "PCI", 3) == 0) {
+- mp_bus_id_to_type[m->mpc_busid] = MP_BUS_PCI;
++ clear_bit(m->mpc_busid, mp_bus_not_pci);
+ mp_bus_id_to_pci_bus[m->mpc_busid] = mp_current_pci_id;
+ mp_current_pci_id++;
+- } else if (strncmp(str, "MCA", 3) == 0) {
+- mp_bus_id_to_type[m->mpc_busid] = MP_BUS_MCA;
+ } else {
+ printk(KERN_ERR "Unknown bustype %s\n", str);
+ }
+ }
+
++static int bad_ioapic(unsigned long address)
++{
++ if (nr_ioapics >= MAX_IO_APICS) {
++ printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded "
++ "(found %d)\n", MAX_IO_APICS, nr_ioapics);
++ panic("Recompile kernel with bigger MAX_IO_APICS!\n");
++ }
++ if (!address) {
++ printk(KERN_ERR "WARNING: Bogus (zero) I/O APIC address"
++ " found in table, skipping!\n");
++ return 1;
++ }
++ return 0;
++}
++
+ static void __init MP_ioapic_info (struct mpc_config_ioapic *m)
+ {
+ if (!(m->mpc_flags & MPC_APIC_USABLE))
+ return;
+
+- printk("I/O APIC #%d Version %d at 0x%X.\n",
+- m->mpc_apicid, m->mpc_apicver, m->mpc_apicaddr);
+- if (nr_ioapics >= MAX_IO_APICS) {
+- printk(KERN_ERR "Max # of I/O APICs (%d) exceeded (found %d).\n",
+- MAX_IO_APICS, nr_ioapics);
+- panic("Recompile kernel with bigger MAX_IO_APICS!.\n");
+- }
+- if (!m->mpc_apicaddr) {
+- printk(KERN_ERR "WARNING: bogus zero I/O APIC address"
+- " found in MP table, skipping!\n");
++ printk("I/O APIC #%d at 0x%X.\n",
++ m->mpc_apicid, m->mpc_apicaddr);
++
++ if (bad_ioapic(m->mpc_apicaddr))
+ return;
+- }
++
+ mp_ioapics[nr_ioapics] = *m;
+ nr_ioapics++;
+ }
+@@ -239,19 +208,6 @@ static void __init MP_lintsrc_info (stru
+ m->mpc_irqtype, m->mpc_irqflag & 3,
+ (m->mpc_irqflag >> 2) &3, m->mpc_srcbusid,
+ m->mpc_srcbusirq, m->mpc_destapic, m->mpc_destapiclint);
+- /*
+- * Well it seems all SMP boards in existence
+- * use ExtINT/LVT1 == LINT0 and
+- * NMI/LVT2 == LINT1 - the following check
+- * will show us if this assumptions is false.
+- * Until then we do not have to add baggage.
+- */
+- if ((m->mpc_irqtype == mp_ExtINT) &&
+- (m->mpc_destapiclint != 0))
+- BUG();
+- if ((m->mpc_irqtype == mp_NMI) &&
+- (m->mpc_destapiclint != 1))
+- BUG();
+ }
+
+ /*
+@@ -265,7 +221,7 @@ static int __init smp_read_mpc(struct mp
+ unsigned char *mpt=((unsigned char *)mpc)+count;
+
+ if (memcmp(mpc->mpc_signature,MPC_SIGNATURE,4)) {
+- printk("SMP mptable: bad signature [%c%c%c%c]!\n",
++ printk("MPTABLE: bad signature [%c%c%c%c]!\n",
+ mpc->mpc_signature[0],
+ mpc->mpc_signature[1],
+ mpc->mpc_signature[2],
+@@ -273,31 +229,31 @@ static int __init smp_read_mpc(struct mp
+ return 0;
+ }
+ if (mpf_checksum((unsigned char *)mpc,mpc->mpc_length)) {
+- printk("SMP mptable: checksum error!\n");
++ printk("MPTABLE: checksum error!\n");
+ return 0;
+ }
+ if (mpc->mpc_spec!=0x01 && mpc->mpc_spec!=0x04) {
+- printk(KERN_ERR "SMP mptable: bad table version (%d)!!\n",
++ printk(KERN_ERR "MPTABLE: bad table version (%d)!!\n",
+ mpc->mpc_spec);
+ return 0;
+ }
+ if (!mpc->mpc_lapic) {
+- printk(KERN_ERR "SMP mptable: null local APIC address!\n");
++ printk(KERN_ERR "MPTABLE: null local APIC address!\n");
+ return 0;
+ }
+ memcpy(str,mpc->mpc_oem,8);
+- str[8]=0;
+- printk(KERN_INFO "OEM ID: %s ",str);
++ str[8] = 0;
++ printk(KERN_INFO "MPTABLE: OEM ID: %s ",str);
+
+ memcpy(str,mpc->mpc_productid,12);
+- str[12]=0;
+- printk("Product ID: %s ",str);
++ str[12] = 0;
++ printk("MPTABLE: Product ID: %s ",str);
+
+- printk("APIC at: 0x%X\n",mpc->mpc_lapic);
++ printk("MPTABLE: APIC at: 0x%X\n",mpc->mpc_lapic);
+
+ /* save the local APIC address, it might be non-default */
+ if (!acpi_lapic)
+- mp_lapic_addr = mpc->mpc_lapic;
++ mp_lapic_addr = mpc->mpc_lapic;
+
+ /*
+ * Now process the configuration blocks.
+@@ -309,7 +265,7 @@ static int __init smp_read_mpc(struct mp
+ struct mpc_config_processor *m=
+ (struct mpc_config_processor *)mpt;
+ if (!acpi_lapic)
+- MP_processor_info(m);
++ MP_processor_info(m);
+ mpt += sizeof(*m);
+ count += sizeof(*m);
+ break;
+@@ -328,8 +284,8 @@ static int __init smp_read_mpc(struct mp
+ struct mpc_config_ioapic *m=
+ (struct mpc_config_ioapic *)mpt;
+ MP_ioapic_info(m);
+- mpt+=sizeof(*m);
+- count+=sizeof(*m);
++ mpt += sizeof(*m);
++ count += sizeof(*m);
+ break;
+ }
+ case MP_INTSRC:
+@@ -338,8 +294,8 @@ static int __init smp_read_mpc(struct mp
+ (struct mpc_config_intsrc *)mpt;
+
+ MP_intsrc_info(m);
+- mpt+=sizeof(*m);
+- count+=sizeof(*m);
++ mpt += sizeof(*m);
++ count += sizeof(*m);
+ break;
+ }
+ case MP_LINTSRC:
+@@ -347,15 +303,15 @@ static int __init smp_read_mpc(struct mp
+ struct mpc_config_lintsrc *m=
+ (struct mpc_config_lintsrc *)mpt;
+ MP_lintsrc_info(m);
+- mpt+=sizeof(*m);
+- count+=sizeof(*m);
++ mpt += sizeof(*m);
++ count += sizeof(*m);
+ break;
+ }
+ }
+ }
+ clustered_apic_check();
+ if (!num_processors)
+- printk(KERN_ERR "SMP mptable: no processors registered!\n");
++ printk(KERN_ERR "MPTABLE: no processors registered!\n");
+ return num_processors;
+ }
+
+@@ -451,13 +407,10 @@ static inline void __init construct_defa
+ * 2 CPUs, numbered 0 & 1.
+ */
+ processor.mpc_type = MP_PROCESSOR;
+- /* Either an integrated APIC or a discrete 82489DX. */
+- processor.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
++ processor.mpc_apicver = 0;
+ processor.mpc_cpuflag = CPU_ENABLED;
+- processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) |
+- (boot_cpu_data.x86_model << 4) |
+- boot_cpu_data.x86_mask;
+- processor.mpc_featureflag = boot_cpu_data.x86_capability[0];
++ processor.mpc_cpufeature = 0;
++ processor.mpc_featureflag = 0;
+ processor.mpc_reserved[0] = 0;
+ processor.mpc_reserved[1] = 0;
+ for (i = 0; i < 2; i++) {
+@@ -476,14 +429,6 @@ static inline void __init construct_defa
+ case 5:
+ memcpy(bus.mpc_bustype, "ISA ", 6);
+ break;
+- case 2:
+- case 6:
+- case 3:
+- memcpy(bus.mpc_bustype, "EISA ", 6);
+- break;
+- case 4:
+- case 7:
+- memcpy(bus.mpc_bustype, "MCA ", 6);
+ }
+ MP_bus_info(&bus);
+ if (mpc_default_type > 4) {
+@@ -494,7 +439,7 @@ static inline void __init construct_defa
+
+ ioapic.mpc_type = MP_IOAPIC;
+ ioapic.mpc_apicid = 2;
+- ioapic.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
++ ioapic.mpc_apicver = 0;
+ ioapic.mpc_flags = MPC_APIC_USABLE;
+ ioapic.mpc_apicaddr = 0xFEC00000;
+ MP_ioapic_info(&ioapic);
+@@ -537,13 +482,6 @@ void __init get_smp_config (void)
+ printk(KERN_INFO "Using ACPI for processor (LAPIC) configuration information\n");
+
+ printk("Intel MultiProcessor Specification v1.%d\n", mpf->mpf_specification);
+- if (mpf->mpf_feature2 & (1<<7)) {
+- printk(KERN_INFO " IMCR and PIC compatibility mode.\n");
+- pic_mode = 1;
+- } else {
+- printk(KERN_INFO " Virtual Wire compatibility mode.\n");
+- pic_mode = 0;
+- }
+
+ /*
+ * Now see if we need to read further.
+@@ -620,7 +558,7 @@ static int __init smp_scan_config (unsig
+ return 0;
+ }
+
+-void __init find_intel_smp (void)
++void __init find_smp_config(void)
+ {
+ unsigned int address;
+
+@@ -637,9 +575,7 @@ void __init find_intel_smp (void)
+ smp_scan_config(0xF0000,0x10000))
+ return;
+ /*
+- * If it is an SMP machine we should know now, unless the
+- * configuration is in an EISA/MCA bus machine with an
+- * extended bios data area.
++ * If it is an SMP machine we should know now.
+ *
+ * there is a real-mode segmented pointer pointing to the
+ * 4K EBDA area at 0x40E, calculate and scan it here.
+@@ -660,64 +596,38 @@ void __init find_intel_smp (void)
+ printk(KERN_INFO "No mptable found.\n");
+ }
+
+-/*
+- * - Intel MP Configuration Table
+- */
+-void __init find_smp_config (void)
+-{
+-#ifdef CONFIG_X86_LOCAL_APIC
+- find_intel_smp();
+-#endif
+-}
+-
+-
+ /* --------------------------------------------------------------------------
+ ACPI-based MP Configuration
+ -------------------------------------------------------------------------- */
+
+ #ifdef CONFIG_ACPI
+
+-void __init mp_register_lapic_address (
+- u64 address)
++void __init mp_register_lapic_address(u64 address)
+ {
+ #ifndef CONFIG_XEN
+ mp_lapic_addr = (unsigned long) address;
+-
+ set_fixmap_nocache(FIX_APIC_BASE, mp_lapic_addr);
+-
+ if (boot_cpu_id == -1U)
+ boot_cpu_id = GET_APIC_ID(apic_read(APIC_ID));
+-
+- Dprintk("Boot CPU = %d\n", boot_cpu_physical_apicid);
+ #endif
+ }
+
+-
+-void __cpuinit mp_register_lapic (
+- u8 id,
+- u8 enabled)
++void __cpuinit mp_register_lapic (u8 id, u8 enabled)
+ {
+ struct mpc_config_processor processor;
+ int boot_cpu = 0;
+
+- if (id >= MAX_APICS) {
+- printk(KERN_WARNING "Processor #%d invalid (max %d)\n",
+- id, MAX_APICS);
+- return;
+- }
+-
+- if (id == boot_cpu_physical_apicid)
++ if (id == boot_cpu_id)
+ boot_cpu = 1;
+
+ #ifndef CONFIG_XEN
+ processor.mpc_type = MP_PROCESSOR;
+ processor.mpc_apicid = id;
+- processor.mpc_apicver = GET_APIC_VERSION(apic_read(APIC_LVR));
++ processor.mpc_apicver = 0;
+ processor.mpc_cpuflag = (enabled ? CPU_ENABLED : 0);
+ processor.mpc_cpuflag |= (boot_cpu ? CPU_BOOTPROCESSOR : 0);
+- processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) |
+- (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask;
+- processor.mpc_featureflag = boot_cpu_data.x86_capability[0];
++ processor.mpc_cpufeature = 0;
++ processor.mpc_featureflag = 0;
+ processor.mpc_reserved[0] = 0;
+ processor.mpc_reserved[1] = 0;
+ #endif
+@@ -725,8 +635,6 @@ void __cpuinit mp_register_lapic (
+ MP_processor_info(&processor);
+ }
+
+-#ifdef CONFIG_X86_IO_APIC
+-
+ #define MP_ISA_BUS 0
+ #define MP_MAX_IOAPIC_PIN 127
+
+@@ -737,11 +645,9 @@ static struct mp_ioapic_routing {
+ u32 pin_programmed[4];
+ } mp_ioapic_routing[MAX_IO_APICS];
+
+-
+-static int mp_find_ioapic (
+- int gsi)
++static int mp_find_ioapic(int gsi)
+ {
+- int i = 0;
++ int i = 0;
+
+ /* Find the IOAPIC that manages this GSI. */
+ for (i = 0; i < nr_ioapics; i++) {
+@@ -751,28 +657,15 @@ static int mp_find_ioapic (
+ }
+
+ printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi);
+-
+ return -1;
+ }
+-
+
+-void __init mp_register_ioapic (
+- u8 id,
+- u32 address,
+- u32 gsi_base)
++void __init mp_register_ioapic(u8 id, u32 address, u32 gsi_base)
+ {
+- int idx = 0;
++ int idx = 0;
+
+- if (nr_ioapics >= MAX_IO_APICS) {
+- printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded "
+- "(found %d)\n", MAX_IO_APICS, nr_ioapics);
+- panic("Recompile kernel with bigger MAX_IO_APICS!\n");
+- }
+- if (!address) {
+- printk(KERN_ERR "WARNING: Bogus (zero) I/O APIC address"
+- " found in MADT table, skipping!\n");
++ if (bad_ioapic(address))
+ return;
+- }
+
+ idx = nr_ioapics++;
+
+@@ -784,7 +677,7 @@ void __init mp_register_ioapic (
+ set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
+ #endif
+ mp_ioapics[idx].mpc_apicid = id;
+- mp_ioapics[idx].mpc_apicver = io_apic_get_version(idx);
++ mp_ioapics[idx].mpc_apicver = 0;
+
+ /*
+ * Build basic IRQ lookup table to facilitate gsi->io_apic lookups
+@@ -795,21 +688,15 @@ void __init mp_register_ioapic (
+ mp_ioapic_routing[idx].gsi_end = gsi_base +
+ io_apic_get_redir_entries(idx);
+
+- printk(KERN_INFO "IOAPIC[%d]: apic_id %d, version %d, address 0x%x, "
++ printk(KERN_INFO "IOAPIC[%d]: apic_id %d, address 0x%x, "
+ "GSI %d-%d\n", idx, mp_ioapics[idx].mpc_apicid,
+- mp_ioapics[idx].mpc_apicver, mp_ioapics[idx].mpc_apicaddr,
++ mp_ioapics[idx].mpc_apicaddr,
+ mp_ioapic_routing[idx].gsi_start,
+ mp_ioapic_routing[idx].gsi_end);
+-
+- return;
+ }
+
+-
+-void __init mp_override_legacy_irq (
+- u8 bus_irq,
+- u8 polarity,
+- u8 trigger,
+- u32 gsi)
++void __init
++mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi)
+ {
+ struct mpc_config_intsrc intsrc;
+ int ioapic = -1;
+@@ -847,22 +734,18 @@ void __init mp_override_legacy_irq (
+ mp_irqs[mp_irq_entries] = intsrc;
+ if (++mp_irq_entries == MAX_IRQ_SOURCES)
+ panic("Max # of irq sources exceeded!\n");
+-
+- return;
+ }
+
+-
+-void __init mp_config_acpi_legacy_irqs (void)
++void __init mp_config_acpi_legacy_irqs(void)
+ {
+ struct mpc_config_intsrc intsrc;
+- int i = 0;
+- int ioapic = -1;
++ int i = 0;
++ int ioapic = -1;
+
+ /*
+ * Fabricate the legacy ISA bus (bus #31).
+ */
+- mp_bus_id_to_type[MP_ISA_BUS] = MP_BUS_ISA;
+- Dprintk("Bus #%d is ISA\n", MP_ISA_BUS);
++ set_bit(MP_ISA_BUS, mp_bus_not_pci);
+
+ /*
+ * Locate the IOAPIC that manages the ISA IRQs (0-15).
+@@ -915,24 +798,13 @@ void __init mp_config_acpi_legacy_irqs (
+ if (++mp_irq_entries == MAX_IRQ_SOURCES)
+ panic("Max # of irq sources exceeded!\n");
+ }
+-
+- return;
+ }
+
+-#define MAX_GSI_NUM 4096
+-
+ int mp_register_gsi(u32 gsi, int triggering, int polarity)
+ {
+- int ioapic = -1;
+- int ioapic_pin = 0;
+- int idx, bit = 0;
+- static int pci_irq = 16;
+- /*
+- * Mapping between Global System Interrupts, which
+- * represent all possible interrupts, to the IRQs
+- * assigned to actual devices.
+- */
+- static int gsi_to_irq[MAX_GSI_NUM];
++ int ioapic = -1;
++ int ioapic_pin = 0;
++ int idx, bit = 0;
+
+ if (acpi_irq_model != ACPI_IRQ_MODEL_IOAPIC)
+ return gsi;
+@@ -965,47 +837,14 @@ int mp_register_gsi(u32 gsi, int trigger
+ if ((1<<bit) & mp_ioapic_routing[ioapic].pin_programmed[idx]) {
+ Dprintk(KERN_DEBUG "Pin %d-%d already programmed\n",
+ mp_ioapic_routing[ioapic].apic_id, ioapic_pin);
+- return gsi_to_irq[gsi];
++ return gsi;
+ }
+
+ mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit);
+
+- if (triggering == ACPI_LEVEL_SENSITIVE) {
+- /*
+- * For PCI devices assign IRQs in order, avoiding gaps
+- * due to unused I/O APIC pins.
+- */
+- int irq = gsi;
+- if (gsi < MAX_GSI_NUM) {
+- /*
+- * Retain the VIA chipset work-around (gsi > 15), but
+- * avoid a problem where the 8254 timer (IRQ0) is setup
+- * via an override (so it's not on pin 0 of the ioapic),
+- * and at the same time, the pin 0 interrupt is a PCI
+- * type. The gsi > 15 test could cause these two pins
+- * to be shared as IRQ0, and they are not shareable.
+- * So test for this condition, and if necessary, avoid
+- * the pin collision.
+- */
+- if (gsi > 15 || (gsi == 0 && !timer_uses_ioapic_pin_0))
+- gsi = pci_irq++;
+- /*
+- * Don't assign IRQ used by ACPI SCI
+- */
+- if (gsi == acpi_fadt.sci_int)
+- gsi = pci_irq++;
+- gsi_to_irq[irq] = gsi;
+- } else {
+- printk(KERN_ERR "GSI %u is too high\n", gsi);
+- return gsi;
+- }
+- }
+-
+ io_apic_set_pci_routing(ioapic, ioapic_pin, gsi,
+ triggering == ACPI_EDGE_SENSITIVE ? 0 : 1,
+ polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
+ return gsi;
+ }
+-
+-#endif /*CONFIG_X86_IO_APIC*/
+ #endif /*CONFIG_ACPI*/
+Index: 10.3-2007-11-26/arch/x86_64/kernel/pci-swiotlb-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/kernel/pci-swiotlb-xen.c 2007-05-31 14:39:08.000000000 +0200
++++ 10.3-2007-11-26/arch/x86_64/kernel/pci-swiotlb-xen.c 2007-10-22 13:53:08.000000000 +0200
+@@ -3,7 +3,8 @@
+ #include <linux/pci.h>
+ #include <linux/cache.h>
+ #include <linux/module.h>
+-#include <asm/dma-mapping.h>
++#include <linux/dma-mapping.h>
++
+ #include <asm/proto.h>
+ #include <asm/swiotlb.h>
+ #include <asm/dma.h>
+Index: 10.3-2007-11-26/arch/x86_64/kernel/process-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/kernel/process-xen.c 2007-05-31 14:39:08.000000000 +0200
++++ 10.3-2007-11-26/arch/x86_64/kernel/process-xen.c 2007-10-22 13:53:08.000000000 +0200
+@@ -89,25 +89,24 @@ void idle_notifier_unregister(struct not
+ }
+ EXPORT_SYMBOL(idle_notifier_unregister);
+
+-enum idle_state { CPU_IDLE, CPU_NOT_IDLE };
+-static DEFINE_PER_CPU(enum idle_state, idle_state) = CPU_NOT_IDLE;
+-
+ void enter_idle(void)
+ {
+- __get_cpu_var(idle_state) = CPU_IDLE;
++ write_pda(isidle, 1);
+ atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
+ }
+
+ static void __exit_idle(void)
+ {
+- __get_cpu_var(idle_state) = CPU_NOT_IDLE;
++ if (test_and_clear_bit_pda(0, isidle) == 0)
++ return;
+ atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
+ }
+
+ /* Called from interrupts to signify idle end */
+ void exit_idle(void)
+ {
+- if (current->pid | read_pda(irqcount))
++ /* idle loop has pid 0 */
++ if (current->pid)
+ return;
+ __exit_idle();
+ }
+@@ -184,6 +183,9 @@ void cpu_idle (void)
+ play_dead();
+ enter_idle();
+ idle();
++ /* In many cases the interrupt that ended idle
++ has already called exit_idle. But some idle
++ loops can be woken up without interrupt. */
+ __exit_idle();
+ }
+
+@@ -196,7 +198,7 @@ void cpu_idle (void)
+ void cpu_idle_wait(void)
+ {
+ unsigned int cpu, this_cpu = get_cpu();
+- cpumask_t map;
++ cpumask_t map, tmp = current->cpus_allowed;
+
+ set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
+ put_cpu();
+@@ -219,6 +221,8 @@ void cpu_idle_wait(void)
+ }
+ cpus_and(map, map, cpu_online_map);
+ } while (!cpus_empty(map));
++
++ set_cpus_allowed(current, tmp);
+ }
+ EXPORT_SYMBOL_GPL(cpu_idle_wait);
+
+@@ -250,9 +254,9 @@ void __show_regs(struct pt_regs * regs)
+ print_modules();
+ printk("Pid: %d, comm: %.20s %s %s %.*s\n",
+ current->pid, current->comm, print_tainted(),
+- system_utsname.release,
+- (int)strcspn(system_utsname.version, " "),
+- system_utsname.version);
++ init_utsname()->release,
++ (int)strcspn(init_utsname()->version, " "),
++ init_utsname()->version);
+ printk("RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->rip);
+ printk_address(regs->rip);
+ printk("RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss, regs->rsp,
+@@ -310,6 +314,7 @@ void exit_thread(void)
+
+ kfree(t->io_bitmap_ptr);
+ t->io_bitmap_ptr = NULL;
++ clear_thread_flag(TIF_IO_BITMAP);
+ /*
+ * Careful, clear this in the TSS too:
+ */
+@@ -339,6 +344,7 @@ void flush_thread(void)
+ if (t->flags & _TIF_IA32)
+ current_thread_info()->status |= TS_COMPAT;
+ }
++ t->flags &= ~_TIF_DEBUG;
+
+ tsk->thread.debugreg0 = 0;
+ tsk->thread.debugreg1 = 0;
+@@ -431,7 +437,7 @@ int copy_thread(int nr, unsigned long cl
+ asm("mov %%es,%0" : "=m" (p->thread.es));
+ asm("mov %%ds,%0" : "=m" (p->thread.ds));
+
+- if (unlikely(me->thread.io_bitmap_ptr != NULL)) {
++ if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
+ p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
+ if (!p->thread.io_bitmap_ptr) {
+ p->thread.io_bitmap_max = 0;
+@@ -439,6 +445,7 @@ int copy_thread(int nr, unsigned long cl
+ }
+ memcpy(p->thread.io_bitmap_ptr, me->thread.io_bitmap_ptr,
+ IO_BITMAP_BYTES);
++ set_tsk_thread_flag(p, TIF_IO_BITMAP);
+ }
+
+ /*
+@@ -466,6 +473,30 @@ out:
+ }
+
+ /*
++ * This special macro can be used to load a debugging register
++ */
++#define loaddebug(thread,r) set_debugreg(thread->debugreg ## r, r)
++
++static inline void __switch_to_xtra(struct task_struct *prev_p,
++ struct task_struct *next_p)
++{
++ struct thread_struct *prev, *next;
++
++ prev = &prev_p->thread,
++ next = &next_p->thread;
++
++ if (test_tsk_thread_flag(next_p, TIF_DEBUG)) {
++ loaddebug(next, 0);
++ loaddebug(next, 1);
++ loaddebug(next, 2);
++ loaddebug(next, 3);
++ /* no 4 and 5 */
++ loaddebug(next, 6);
++ loaddebug(next, 7);
++ }
++}
++
++/*
+ * switch_to(x,y) should switch tasks from x to y.
+ *
+ * This could still be optimized:
+@@ -487,6 +518,10 @@ __switch_to(struct task_struct *prev_p,
+ struct physdev_set_iobitmap iobmp_op;
+ multicall_entry_t _mcl[8], *mcl = _mcl;
+
++ /* we're going to use this soon, after a few expensive things */
++ if (next_p->fpu_counter>5)
++ prefetch(&next->i387.fxsave);
++
+ /*
+ * This is basically '__unlazy_fpu', except that we queue a
+ * multicall to indicate FPU task switch, rather than
+@@ -499,7 +534,8 @@ __switch_to(struct task_struct *prev_p,
+ mcl->op = __HYPERVISOR_fpu_taskswitch;
+ mcl->args[0] = 1;
+ mcl++;
+- }
++ } else
++ prev_p->fpu_counter = 0;
+
+ /*
+ * Reload esp0, LDT and the page table pointer:
+@@ -576,21 +612,29 @@ __switch_to(struct task_struct *prev_p,
+ write_pda(oldrsp, next->userrsp);
+ write_pda(pcurrent, next_p);
+ write_pda(kernelstack,
+- task_stack_page(next_p) + THREAD_SIZE - PDA_STACKOFFSET);
++ (unsigned long)task_stack_page(next_p) + THREAD_SIZE - PDA_STACKOFFSET);
++#ifdef CONFIG_CC_STACKPROTECTOR
++ write_pda(stack_canary, next_p->stack_canary);
++
++ /*
++ * Build time only check to make sure the stack_canary is at
++ * offset 40 in the pda; this is a gcc ABI requirement
++ */
++ BUILD_BUG_ON(offsetof(struct x8664_pda, stack_canary) != 40);
++#endif
+
+ /*
+ * Now maybe reload the debug registers
+ */
+- if (unlikely(next->debugreg7)) {
+- set_debugreg(next->debugreg0, 0);
+- set_debugreg(next->debugreg1, 1);
+- set_debugreg(next->debugreg2, 2);
+- set_debugreg(next->debugreg3, 3);
+- /* no 4 and 5 */
+- set_debugreg(next->debugreg6, 6);
+- set_debugreg(next->debugreg7, 7);
+- }
++ if (unlikely(task_thread_info(next_p)->flags & _TIF_WORK_CTXSW))
++ __switch_to_xtra(prev_p, next_p);
+
++ /* If the task has used fpu the last 5 timeslices, just do a full
++ * restore of the math state immediately to avoid the trap; the
++ * chances of needing FPU soon are obviously high now
++ */
++ if (next_p->fpu_counter>5)
++ math_state_restore();
+ return prev_p;
+ }
+
+@@ -810,7 +854,7 @@ int dump_task_regs(struct task_struct *t
+
+ unsigned long arch_align_stack(unsigned long sp)
+ {
+- if (randomize_va_space)
++ if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
+ sp -= get_random_int() % 8192;
+ return sp & ~0xf;
+ }
+Index: 10.3-2007-11-26/arch/x86_64/kernel/setup-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/kernel/setup-xen.c 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/arch/x86_64/kernel/setup-xen.c 2007-10-22 13:53:08.000000000 +0200
+@@ -118,16 +118,6 @@ EXPORT_SYMBOL(boot_cpu_data);
+
+ unsigned long mmu_cr4_features;
+
+-int acpi_disabled;
+-EXPORT_SYMBOL(acpi_disabled);
+-#ifdef CONFIG_ACPI
+-extern int __initdata acpi_ht;
+-extern acpi_interrupt_flags acpi_sci_flags;
+-int __initdata acpi_force = 0;
+-#endif
+-
+-int acpi_numa __initdata;
+-
+ /* Boot loader ID as an integer, for the benefit of proc_dointvec */
+ int bootloader_type;
+
+@@ -151,10 +141,6 @@ struct sys_desc_table_struct {
+
+ struct edid_info edid_info;
+ EXPORT_SYMBOL_GPL(edid_info);
+-struct e820map e820;
+-#ifdef CONFIG_XEN
+-struct e820map machine_e820;
+-#endif
+
+ extern int root_mountflags;
+
+@@ -181,9 +167,6 @@ struct resource standard_io_resources[]
+ .flags = IORESOURCE_BUSY | IORESOURCE_IO }
+ };
+
+-#define STANDARD_IO_RESOURCES \
+- (sizeof standard_io_resources / sizeof standard_io_resources[0])
+-
+ #define IORESOURCE_RAM (IORESOURCE_BUSY | IORESOURCE_MEM)
+
+ struct resource data_resource = {
+@@ -230,9 +213,6 @@ static struct resource adapter_rom_resou
+ .flags = IORESOURCE_ROM }
+ };
+
+-#define ADAPTER_ROM_RESOURCES \
+- (sizeof adapter_rom_resources / sizeof adapter_rom_resources[0])
+-
+ static struct resource video_rom_resource = {
+ .name = "Video ROM",
+ .start = 0xc0000,
+@@ -309,7 +289,8 @@ static void __init probe_roms(void)
+ }
+
+ /* check for adapter roms on 2k boundaries */
+- for (i = 0; i < ADAPTER_ROM_RESOURCES && start < upper; start += 2048) {
++ for (i = 0; i < ARRAY_SIZE(adapter_rom_resources) && start < upper;
++ start += 2048) {
+ rom = isa_bus_to_virt(start);
+ if (!romsignature(rom))
+ continue;
+@@ -329,186 +310,21 @@ static void __init probe_roms(void)
+ }
+ }
+
+-/* Check for full argument with no trailing characters */
+-static int fullarg(char *p, char *arg)
++#ifdef CONFIG_PROC_VMCORE
++/* elfcorehdr= specifies the location of elf core header
++ * stored by the crashed kernel. This option will be passed
++ * by kexec loader to the capture kernel.
++ */
++static int __init setup_elfcorehdr(char *arg)
+ {
+- int l = strlen(arg);
+- return !memcmp(p, arg, l) && (p[l] == 0 || isspace(p[l]));
++ char *end;
++ if (!arg)
++ return -EINVAL;
++ elfcorehdr_addr = memparse(arg, &end);
++ return end > arg ? 0 : -EINVAL;
+ }
+-
+-static __init void parse_cmdline_early (char ** cmdline_p)
+-{
+- char c = ' ', *to = command_line, *from = COMMAND_LINE;
+- int len = 0;
+- int userdef = 0;
+-
+- for (;;) {
+- if (c != ' ')
+- goto next_char;
+-
+-#ifdef CONFIG_SMP
+- /*
+- * If the BIOS enumerates physical processors before logical,
+- * maxcpus=N at enumeration-time can be used to disable HT.
+- */
+- else if (!memcmp(from, "maxcpus=", 8)) {
+- extern unsigned int maxcpus;
+-
+- maxcpus = simple_strtoul(from + 8, NULL, 0);
+- }
+-#endif
+-#ifdef CONFIG_ACPI
+- /* "acpi=off" disables both ACPI table parsing and interpreter init */
+- if (fullarg(from,"acpi=off"))
+- disable_acpi();
+-
+- if (fullarg(from, "acpi=force")) {
+- /* add later when we do DMI horrors: */
+- acpi_force = 1;
+- acpi_disabled = 0;
+- }
+-
+- /* acpi=ht just means: do ACPI MADT parsing
+- at bootup, but don't enable the full ACPI interpreter */
+- if (fullarg(from, "acpi=ht")) {
+- if (!acpi_force)
+- disable_acpi();
+- acpi_ht = 1;
+- }
+- else if (fullarg(from, "pci=noacpi"))
+- acpi_disable_pci();
+- else if (fullarg(from, "acpi=noirq"))
+- acpi_noirq_set();
+-
+- else if (fullarg(from, "acpi_sci=edge"))
+- acpi_sci_flags.trigger = 1;
+- else if (fullarg(from, "acpi_sci=level"))
+- acpi_sci_flags.trigger = 3;
+- else if (fullarg(from, "acpi_sci=high"))
+- acpi_sci_flags.polarity = 1;
+- else if (fullarg(from, "acpi_sci=low"))
+- acpi_sci_flags.polarity = 3;
+-
+- /* acpi=strict disables out-of-spec workarounds */
+- else if (fullarg(from, "acpi=strict")) {
+- acpi_strict = 1;
+- }
+-#ifdef CONFIG_X86_IO_APIC
+- else if (fullarg(from, "acpi_skip_timer_override"))
+- acpi_skip_timer_override = 1;
+-#endif
+-#endif
+-
+-#ifndef CONFIG_XEN
+- if (fullarg(from, "nolapic") || fullarg(from, "disableapic")) {
+- clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability);
+- disable_apic = 1;
+- }
+-
+- if (fullarg(from, "noapic"))
+- skip_ioapic_setup = 1;
+-
+- if (fullarg(from,"apic")) {
+- skip_ioapic_setup = 0;
+- ioapic_force = 1;
+- }
++early_param("elfcorehdr", setup_elfcorehdr);
+ #endif
+-
+- if (!memcmp(from, "mem=", 4))
+- parse_memopt(from+4, &from);
+-
+- if (!memcmp(from, "memmap=", 7)) {
+- /* exactmap option is for used defined memory */
+- if (!memcmp(from+7, "exactmap", 8)) {
+-#ifdef CONFIG_CRASH_DUMP
+- /* If we are doing a crash dump, we
+- * still need to know the real mem
+- * size before original memory map is
+- * reset.
+- */
+- saved_max_pfn = e820_end_of_ram();
+-#endif
+- from += 8+7;
+- end_pfn_map = 0;
+- e820.nr_map = 0;
+- userdef = 1;
+- }
+- else {
+- parse_memmapopt(from+7, &from);
+- userdef = 1;
+- }
+- }
+-
+-#ifdef CONFIG_NUMA
+- if (!memcmp(from, "numa=", 5))
+- numa_setup(from+5);
+-#endif
+-
+- if (!memcmp(from,"iommu=",6)) {
+- iommu_setup(from+6);
+- }
+-
+- if (fullarg(from,"oops=panic"))
+- panic_on_oops = 1;
+-
+- if (!memcmp(from, "noexec=", 7))
+- nonx_setup(from + 7);
+-
+-#ifdef CONFIG_KEXEC
+- /* crashkernel=size@addr specifies the location to reserve for
+- * a crash kernel. By reserving this memory we guarantee
+- * that linux never set's it up as a DMA target.
+- * Useful for holding code to do something appropriate
+- * after a kernel panic.
+- */
+- else if (!memcmp(from, "crashkernel=", 12)) {
+-#ifndef CONFIG_XEN
+- unsigned long size, base;
+- size = memparse(from+12, &from);
+- if (*from == '@') {
+- base = memparse(from+1, &from);
+- /* FIXME: Do I want a sanity check
+- * to validate the memory range?
+- */
+- crashk_res.start = base;
+- crashk_res.end = base + size - 1;
+- }
+-#else
+- printk("Ignoring crashkernel command line, "
+- "parameter will be supplied by xen\n");
+-#endif
+- }
+-#endif
+-
+-#ifdef CONFIG_PROC_VMCORE
+- /* elfcorehdr= specifies the location of elf core header
+- * stored by the crashed kernel. This option will be passed
+- * by kexec loader to the capture kernel.
+- */
+- else if(!memcmp(from, "elfcorehdr=", 11))
+- elfcorehdr_addr = memparse(from+11, &from);
+-#endif
+-
+-#if defined(CONFIG_HOTPLUG_CPU) && !defined(CONFIG_XEN)
+- else if (!memcmp(from, "additional_cpus=", 16))
+- setup_additional_cpus(from+16);
+-#endif
+-
+- next_char:
+- c = *(from++);
+- if (!c)
+- break;
+- if (COMMAND_LINE_SIZE <= ++len)
+- break;
+- *(to++) = c;
+- }
+- if (userdef) {
+- printk(KERN_INFO "user-defined physical RAM map:\n");
+- e820_print_map("user");
+- }
+- *to = '\0';
+- *cmdline_p = command_line;
+-}
+
+ #ifndef CONFIG_NUMA
+ static void __init
+@@ -521,10 +337,11 @@ contig_initmem_init(unsigned long start_
+ if (bootmap == -1L)
+ panic("Cannot find bootmem map of size %ld\n",bootmap_size);
+ bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn);
++ e820_register_active_regions(0, start_pfn, end_pfn);
+ #ifdef CONFIG_XEN
+- e820_bootmem_free(NODE_DATA(0), 0, xen_start_info->nr_pages<<PAGE_SHIFT);
++ free_bootmem_with_active_regions(0, xen_start_info->nr_pages);
+ #else
+- e820_bootmem_free(NODE_DATA(0), 0, end_pfn << PAGE_SHIFT);
++ free_bootmem_with_active_regions(0, end_pfn);
+ #endif
+ reserve_bootmem(bootmap, bootmap_size);
+ }
+@@ -587,6 +404,10 @@ static void discover_ebda(void)
+ void __init setup_arch(char **cmdline_p)
+ {
+ #ifdef CONFIG_XEN
++ extern struct e820map machine_e820;
++
++ printk(KERN_INFO "Command line: %s\n", saved_command_line);
++
+ /* Register a call for panic conditions. */
+ atomic_notifier_chain_register(&panic_notifier_list, &xen_panic_block);
+
+@@ -633,6 +454,8 @@ void __init setup_arch(char **cmdline_p)
+
+ ARCH_SETUP
+ #else
++ printk(KERN_INFO "Command line: %s\n", saved_command_line);
++
+ ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);
+ screen_info = SCREEN_INFO;
+ edid_info = EDID_INFO;
+@@ -660,16 +483,22 @@ void __init setup_arch(char **cmdline_p)
+ data_resource.start = virt_to_phys(&_etext);
+ data_resource.end = virt_to_phys(&_edata)-1;
+
+- parse_cmdline_early(cmdline_p);
+-
+ early_identify_cpu(&boot_cpu_data);
+
++ strlcpy(command_line, saved_command_line, COMMAND_LINE_SIZE);
++ *cmdline_p = command_line;
++
++ parse_early_param();
++
++ finish_e820_parsing();
++
++ e820_register_active_regions(0, 0, -1UL);
+ /*
+ * partially used pages are not usable - thus
+ * we are rounding upwards:
+ */
+ end_pfn = e820_end_of_ram();
+- num_physpages = end_pfn; /* for pfn_valid */
++ num_physpages = end_pfn;
+
+ check_efer();
+
+@@ -680,6 +509,14 @@ void __init setup_arch(char **cmdline_p)
+ if (is_initial_xendomain())
+ dmi_scan_machine();
+
++ /* How many end-of-memory variables you have, grandma! */
++ max_low_pfn = end_pfn;
++ max_pfn = end_pfn;
++ high_memory = (void *)__va(end_pfn * PAGE_SIZE - 1) + 1;
++
++ /* Remove active ranges so rediscovery with NUMA-awareness happens */
++ remove_all_active_ranges();
++
+ #ifdef CONFIG_ACPI_NUMA
+ /*
+ * Parse SRAT to discover nodes.
+@@ -838,16 +675,16 @@ void __init setup_arch(char **cmdline_p)
+
+ }
+
++#ifdef CONFIG_ACPI
+ if (!is_initial_xendomain()) {
+ acpi_disabled = 1;
+-#ifdef CONFIG_ACPI
+ acpi_ht = 0;
+-#endif
+ }
+ #endif
++#endif
+
+-#ifndef CONFIG_XEN
+- check_ioapic();
++#if defined(CONFIG_PCI) && !defined(CONFIG_XEN)
++ early_quirks();
+ #endif
+
+ zap_low_mappings(0);
+@@ -907,6 +744,7 @@ void __init setup_arch(char **cmdline_p)
+ }
+ #else
+ e820_reserve_resources(e820.map, e820.nr_map);
++ e820_mark_nosave_regions();
+ #endif
+
+ request_resource(&iomem_resource, &video_ram_resource);
+@@ -914,7 +752,7 @@ void __init setup_arch(char **cmdline_p)
+ {
+ unsigned i;
+ /* request I/O space for devices used on all i[345]86 PCs */
+- for (i = 0; i < STANDARD_IO_RESOURCES; i++)
++ for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++)
+ request_resource(&ioport_resource, &standard_io_resources[i]);
+ }
+
+@@ -1099,7 +937,7 @@ static void __init amd_detect_cmp(struct
+ #endif
+ }
+
+-static void __init init_amd(struct cpuinfo_x86 *c)
++static void __cpuinit init_amd(struct cpuinfo_x86 *c)
+ {
+ unsigned level;
+
+@@ -1155,6 +993,12 @@ static void __init init_amd(struct cpuin
+
+ /* Fix cpuid4 emulation for more */
+ num_cache_leaves = 3;
++
++ /* When there is only one core no need to synchronize RDTSC */
++ if (num_possible_cpus() == 1)
++ set_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
++ else
++ clear_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
+ }
+
+ static void __cpuinit detect_ht(struct cpuinfo_x86 *c)
+@@ -1236,8 +1080,7 @@ static void srat_detect_node(void)
+ node = first_node(node_online_map);
+ numa_set_node(cpu, node);
+
+- if (acpi_numa > 0)
+- printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
++ printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
+ #endif
+ }
+
+@@ -1271,6 +1114,8 @@ static void __cpuinit init_intel(struct
+ if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
+ (c->x86 == 0x6 && c->x86_model >= 0x0e))
+ set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
++ if (c->x86 == 6)
++ set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
+ set_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
+ c->x86_max_cores = intel_num_cpu_cores(c);
+
+@@ -1489,8 +1334,8 @@ static int show_cpuinfo(struct seq_file
+
+ /* Intel-defined (#2) */
+ "pni", NULL, NULL, "monitor", "ds_cpl", "vmx", "smx", "est",
+- "tm2", NULL, "cid", NULL, NULL, "cx16", "xtpr", NULL,
+- NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++ "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
++ NULL, NULL, "dca", NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+
+ /* VIA/Cyrix/Centaur-defined */
+Index: 10.3-2007-11-26/arch/x86_64/kernel/setup64-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/kernel/setup64-xen.c 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/arch/x86_64/kernel/setup64-xen.c 2007-10-22 13:53:08.000000000 +0200
+@@ -31,7 +31,7 @@
+ #include <asm/hypervisor.h>
+ #endif
+
+-char x86_boot_params[BOOT_PARAM_SIZE] __initdata = {0,};
++char x86_boot_params[BOOT_PARAM_SIZE] __initdata;
+
+ cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE;
+
+@@ -55,8 +55,10 @@ Control non executable mappings for 64bi
+ on Enable(default)
+ off Disable
+ */
+-int __init nonx_setup(char *str)
++static int __init nonx_setup(char *str)
+ {
++ if (!str)
++ return -EINVAL;
+ if (!strncmp(str, "on", 2)) {
+ __supported_pte_mask |= _PAGE_NX;
+ do_not_nx = 0;
+@@ -64,9 +66,9 @@ int __init nonx_setup(char *str)
+ do_not_nx = 1;
+ __supported_pte_mask &= ~_PAGE_NX;
+ }
+- return 1;
++ return 0;
+ }
+-__setup("noexec=", nonx_setup); /* parsed early actually */
++early_param("noexec", nonx_setup);
+
+ int force_personality32 = 0;
+
+@@ -102,12 +104,9 @@ void __init setup_per_cpu_areas(void)
+ #endif
+
+ /* Copy section for each CPU (we discard the original) */
+- size = ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES);
+-#ifdef CONFIG_MODULES
+- if (size < PERCPU_ENOUGH_ROOM)
+- size = PERCPU_ENOUGH_ROOM;
+-#endif
++ size = PERCPU_ENOUGH_ROOM;
+
++ printk(KERN_INFO "PERCPU: Allocating %lu bytes of per cpu data\n", size);
+ for_each_cpu_mask (i, cpu_possible_map) {
+ char *ptr;
+
+@@ -169,7 +168,10 @@ void pda_init(int cpu)
+ /* Setup up data that may be needed in __get_free_pages early */
+ asm volatile("movl %0,%%fs ; movl %0,%%gs" :: "r" (0));
+ #ifndef CONFIG_XEN
++ /* Memory clobbers used to order PDA accessed */
++ mb();
+ wrmsrl(MSR_GS_BASE, pda);
++ mb();
+ #else
+ HYPERVISOR_set_segment_base(SEGBASE_GS_KERNEL, (unsigned long)pda);
+ #endif
+@@ -234,6 +236,8 @@ void __cpuinit check_efer(void)
+ }
+ }
+
++unsigned long kernel_eflags;
++
+ /*
+ * cpu_init() initializes state that is per-CPU. Some data is already
+ * initialized (naturally) in the bootstrap process, such as the GDT
+@@ -298,28 +302,17 @@ void __cpuinit cpu_init (void)
+ * set up and load the per-CPU TSS
+ */
+ for (v = 0; v < N_EXCEPTION_STACKS; v++) {
++ static const unsigned int order[N_EXCEPTION_STACKS] = {
++ [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STACK_ORDER,
++ [DEBUG_STACK - 1] = DEBUG_STACK_ORDER
++ };
+ if (cpu) {
+- static const unsigned int order[N_EXCEPTION_STACKS] = {
+- [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STACK_ORDER,
+- [DEBUG_STACK - 1] = DEBUG_STACK_ORDER
+- };
+-
+ estacks = (char *)__get_free_pages(GFP_ATOMIC, order[v]);
+ if (!estacks)
+ panic("Cannot allocate exception stack %ld %d\n",
+ v, cpu);
+ }
+- switch (v + 1) {
+-#if DEBUG_STKSZ > EXCEPTION_STKSZ
+- case DEBUG_STACK:
+- cpu_pda(cpu)->debugstack = (unsigned long)estacks;
+- estacks += DEBUG_STKSZ;
+- break;
+-#endif
+- default:
+- estacks += EXCEPTION_STKSZ;
+- break;
+- }
++ estacks += PAGE_SIZE << order[v];
+ orig_ist->ist[v] = t->ist[v] = (unsigned long)estacks;
+ }
+
+@@ -358,4 +351,6 @@ void __cpuinit cpu_init (void)
+ set_debugreg(0UL, 7);
+
+ fpu_init();
++
++ raw_local_save_flags(kernel_eflags);
+ }
+Index: 10.3-2007-11-26/arch/x86_64/kernel/smp-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/kernel/smp-xen.c 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/arch/x86_64/kernel/smp-xen.c 2007-10-22 13:53:08.000000000 +0200
+@@ -394,9 +394,8 @@ int smp_call_function_single (int cpu, v
+ /* prevent preemption and reschedule on another processor */
+ int me = get_cpu();
+ if (cpu == me) {
+- WARN_ON(1);
+ put_cpu();
+- return -EBUSY;
++ return 0;
+ }
+ spin_lock_bh(&call_lock);
+ __smp_call_function_single(cpu, func, info, nonatomic, wait);
+@@ -526,7 +525,7 @@ void smp_send_stop(void)
+ #ifndef CONFIG_XEN
+ asmlinkage void smp_reschedule_interrupt(void)
+ #else
+-asmlinkage irqreturn_t smp_reschedule_interrupt(void)
++asmlinkage irqreturn_t smp_reschedule_interrupt(int irq, void *ctx)
+ #endif
+ {
+ #ifndef CONFIG_XEN
+@@ -539,7 +538,7 @@ asmlinkage irqreturn_t smp_reschedule_in
+ #ifndef CONFIG_XEN
+ asmlinkage void smp_call_function_interrupt(void)
+ #else
+-asmlinkage irqreturn_t smp_call_function_interrupt(void)
++asmlinkage irqreturn_t smp_call_function_interrupt(int irq, void *ctx)
+ #endif
+ {
+ void (*func) (void *info) = call_data->func;
+@@ -570,31 +569,3 @@ asmlinkage irqreturn_t smp_call_function
+ return IRQ_HANDLED;
+ #endif
+ }
+-
+-int safe_smp_processor_id(void)
+-{
+-#ifdef CONFIG_XEN
+- return smp_processor_id();
+-#else
+- unsigned apicid, i;
+-
+- if (disable_apic)
+- return 0;
+-
+- apicid = hard_smp_processor_id();
+- if (apicid < NR_CPUS && x86_cpu_to_apicid[apicid] == apicid)
+- return apicid;
+-
+- for (i = 0; i < NR_CPUS; ++i) {
+- if (x86_cpu_to_apicid[i] == apicid)
+- return i;
+- }
+-
+- /* No entries in x86_cpu_to_apicid? Either no MPS|ACPI,
+- * or called too early. Either way, we must be CPU 0. */
+- if (x86_cpu_to_apicid[0] == BAD_APICID)
+- return 0;
+-
+- return 0; /* Should not happen */
+-#endif
+-}
+Index: 10.3-2007-11-26/arch/x86_64/kernel/traps-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/kernel/traps-xen.c 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/arch/x86_64/kernel/traps-xen.c 2007-10-22 13:53:08.000000000 +0200
+@@ -23,6 +23,7 @@
+ #include <linux/delay.h>
+ #include <linux/spinlock.h>
+ #include <linux/interrupt.h>
++#include <linux/kallsyms.h>
+ #include <linux/module.h>
+ #include <linux/moduleparam.h>
+ #include <linux/nmi.h>
+@@ -45,6 +46,7 @@
+ #include <asm/pda.h>
+ #include <asm/proto.h>
+ #include <asm/nmi.h>
++#include <asm/stacktrace.h>
+
+ asmlinkage void divide_error(void);
+ asmlinkage void debug(void);
+@@ -114,7 +116,6 @@ static int call_trace = 1;
+ #endif
+
+ #ifdef CONFIG_KALLSYMS
+-# include <linux/kallsyms.h>
+ void printk_address(unsigned long address)
+ {
+ unsigned long offset = 0, symsize;
+@@ -142,7 +143,7 @@ void printk_address(unsigned long addres
+ #endif
+
+ static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
+- unsigned *usedp, const char **idp)
++ unsigned *usedp, char **idp)
+ {
+ #ifndef CONFIG_X86_NO_TSS
+ static char ids[][8] = {
+@@ -162,26 +163,7 @@ static unsigned long *in_exception_stack
+ * 'stack' is in one of them:
+ */
+ for (k = 0; k < N_EXCEPTION_STACKS; k++) {
+- unsigned long end;
+-
+- /*
+- * set 'end' to the end of the exception stack.
+- */
+- switch (k + 1) {
+- /*
+- * TODO: this block is not needed i think, because
+- * setup64.c:cpu_init() sets up t->ist[DEBUG_STACK]
+- * properly too.
+- */
+-#if DEBUG_STKSZ > EXCEPTION_STKSZ
+- case DEBUG_STACK:
+- end = cpu_pda(cpu)->debugstack + DEBUG_STKSZ;
+- break;
+-#endif
+- default:
+- end = per_cpu(orig_ist, cpu).ist[k];
+- break;
+- }
++ unsigned long end = per_cpu(orig_ist, cpu).ist[k];
+ /*
+ * Is 'stack' above this exception frame's end?
+ * If yes then skip to the next frame.
+@@ -236,13 +218,19 @@ static unsigned long *in_exception_stack
+ return NULL;
+ }
+
+-static int show_trace_unwind(struct unwind_frame_info *info, void *context)
++struct ops_and_data {
++ struct stacktrace_ops *ops;
++ void *data;
++};
++
++static int dump_trace_unwind(struct unwind_frame_info *info, void *context)
+ {
++ struct ops_and_data *oad = (struct ops_and_data *)context;
+ int n = 0;
+
+ while (unwind(info) == 0 && UNW_PC(info)) {
+ n++;
+- printk_address(UNW_PC(info));
++ oad->ops->address(oad->data, UNW_PC(info));
+ if (arch_unw_user_mode(info))
+ break;
+ }
+@@ -256,13 +244,19 @@ static int show_trace_unwind(struct unwi
+ * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
+ */
+
+-void show_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long * stack)
++static inline int valid_stack_ptr(struct thread_info *tinfo, void *p)
+ {
+- const unsigned cpu = safe_smp_processor_id();
++ void *t = (void *)tinfo;
++ return p > t && p < t + THREAD_SIZE - 3;
++}
++
++void dump_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long * stack,
++ struct stacktrace_ops *ops, void *data)
++{
++ const unsigned cpu = smp_processor_id();
+ unsigned long *irqstack_end = (unsigned long *)cpu_pda(cpu)->irqstackptr;
+ unsigned used = 0;
+-
+- printk("\nCall Trace:\n");
++ struct thread_info *tinfo;
+
+ if (!tsk)
+ tsk = current;
+@@ -270,32 +264,47 @@ void show_trace(struct task_struct *tsk,
+ if (call_trace >= 0) {
+ int unw_ret = 0;
+ struct unwind_frame_info info;
++ struct ops_and_data oad = { .ops = ops, .data = data };
+
+ if (regs) {
+ if (unwind_init_frame_info(&info, tsk, regs) == 0)
+- unw_ret = show_trace_unwind(&info, NULL);
++ unw_ret = dump_trace_unwind(&info, &oad);
+ } else if (tsk == current)
+- unw_ret = unwind_init_running(&info, show_trace_unwind, NULL);
++ unw_ret = unwind_init_running(&info, dump_trace_unwind, &oad);
+ else {
+ if (unwind_init_blocked(&info, tsk) == 0)
+- unw_ret = show_trace_unwind(&info, NULL);
++ unw_ret = dump_trace_unwind(&info, &oad);
+ }
+ if (unw_ret > 0) {
+ if (call_trace == 1 && !arch_unw_user_mode(&info)) {
+- print_symbol("DWARF2 unwinder stuck at %s\n",
++ ops->warning_symbol(data, "DWARF2 unwinder stuck at %s\n",
+ UNW_PC(&info));
+ if ((long)UNW_SP(&info) < 0) {
+- printk("Leftover inexact backtrace:\n");
++ ops->warning(data, "Leftover inexact backtrace:\n");
+ stack = (unsigned long *)UNW_SP(&info);
++ if (!stack)
++ return;
+ } else
+- printk("Full inexact backtrace again:\n");
++ ops->warning(data, "Full inexact backtrace again:\n");
+ } else if (call_trace >= 1)
+ return;
+ else
+- printk("Full inexact backtrace again:\n");
++ ops->warning(data, "Full inexact backtrace again:\n");
+ } else
+- printk("Inexact backtrace:\n");
++ ops->warning(data, "Inexact backtrace:\n");
++ }
++ if (!stack) {
++ unsigned long dummy;
++ stack = &dummy;
++ if (tsk && tsk != current)
++ stack = (unsigned long *)tsk->thread.rsp;
+ }
++ /*
++ * Align the stack pointer on word boundary, later loops
++ * rely on that (and corruption / debug info bugs can cause
++ * unaligned values here):
++ */
++ stack = (unsigned long *)((unsigned long)stack & ~(sizeof(long)-1));
+
+ /*
+ * Print function call entries within a stack. 'cond' is the
+@@ -305,7 +314,9 @@ void show_trace(struct task_struct *tsk,
+ #define HANDLE_STACK(cond) \
+ do while (cond) { \
+ unsigned long addr = *stack++; \
+- if (kernel_text_address(addr)) { \
++ if (oops_in_progress ? \
++ __kernel_text_address(addr) : \
++ kernel_text_address(addr)) { \
+ /* \
+ * If the address is either in the text segment of the \
+ * kernel, or in the region which contains vmalloc'ed \
+@@ -314,7 +325,7 @@ void show_trace(struct task_struct *tsk,
+ * down the cause of the crash will be able to figure \
+ * out the call path that was taken. \
+ */ \
+- printk_address(addr); \
++ ops->address(data, addr); \
+ } \
+ } while (0)
+
+@@ -323,16 +334,17 @@ void show_trace(struct task_struct *tsk,
+ * current stack address. If the stacks consist of nested
+ * exceptions
+ */
+- for ( ; ; ) {
+- const char *id;
++ for (;;) {
++ char *id;
+ unsigned long *estack_end;
+ estack_end = in_exception_stack(cpu, (unsigned long)stack,
+ &used, &id);
+
+ if (estack_end) {
+- printk(" <%s>", id);
++ if (ops->stack(data, id) < 0)
++ break;
+ HANDLE_STACK (stack < estack_end);
+- printk(" <EOE>");
++ ops->stack(data, "<EOE>");
+ /*
+ * We link to the next stack via the
+ * second-to-last pointer (index -2 to end) in the
+@@ -347,7 +359,8 @@ void show_trace(struct task_struct *tsk,
+ (IRQSTACKSIZE - 64) / sizeof(*irqstack);
+
+ if (stack >= irqstack && stack < irqstack_end) {
+- printk(" <IRQ>");
++ if (ops->stack(data, "IRQ") < 0)
++ break;
+ HANDLE_STACK (stack < irqstack_end);
+ /*
+ * We link to the next stack (which would be
+@@ -356,7 +369,7 @@ void show_trace(struct task_struct *tsk,
+ */
+ stack = (unsigned long *) (irqstack_end[-1]);
+ irqstack_end = NULL;
+- printk(" <EOI>");
++ ops->stack(data, "EOI");
+ continue;
+ }
+ }
+@@ -364,19 +377,58 @@ void show_trace(struct task_struct *tsk,
+ }
+
+ /*
+- * This prints the process stack:
++ * This handles the process stack:
+ */
+- HANDLE_STACK (((long) stack & (THREAD_SIZE-1)) != 0);
++ tinfo = current_thread_info();
++ HANDLE_STACK (valid_stack_ptr(tinfo, stack));
+ #undef HANDLE_STACK
++}
++EXPORT_SYMBOL(dump_trace);
++
++static void
++print_trace_warning_symbol(void *data, char *msg, unsigned long symbol)
++{
++ print_symbol(msg, symbol);
++ printk("\n");
++}
++
++static void print_trace_warning(void *data, char *msg)
++{
++ printk("%s\n", msg);
++}
++
++static int print_trace_stack(void *data, char *name)
++{
++ printk(" <%s> ", name);
++ return 0;
++}
++
++static void print_trace_address(void *data, unsigned long addr)
++{
++ printk_address(addr);
++}
++
++static struct stacktrace_ops print_trace_ops = {
++ .warning = print_trace_warning,
++ .warning_symbol = print_trace_warning_symbol,
++ .stack = print_trace_stack,
++ .address = print_trace_address,
++};
+
++void
++show_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long *stack)
++{
++ printk("\nCall Trace:\n");
++ dump_trace(tsk, regs, stack, &print_trace_ops, NULL);
+ printk("\n");
+ }
+
+-static void _show_stack(struct task_struct *tsk, struct pt_regs *regs, unsigned long * rsp)
++static void
++_show_stack(struct task_struct *tsk, struct pt_regs *regs, unsigned long *rsp)
+ {
+ unsigned long *stack;
+ int i;
+- const int cpu = safe_smp_processor_id();
++ const int cpu = smp_processor_id();
+ unsigned long *irqstack_end = (unsigned long *) (cpu_pda(cpu)->irqstackptr);
+ unsigned long *irqstack = (unsigned long *) (cpu_pda(cpu)->irqstackptr - IRQSTACKSIZE);
+
+@@ -430,7 +482,7 @@ void show_registers(struct pt_regs *regs
+ int i;
+ int in_kernel = !user_mode(regs);
+ unsigned long rsp;
+- const int cpu = safe_smp_processor_id();
++ const int cpu = smp_processor_id();
+ struct task_struct *cur = cpu_pda(cpu)->pcurrent;
+
+ rsp = regs->rsp;
+@@ -505,9 +557,11 @@ static unsigned int die_nest_count;
+
+ unsigned __kprobes long oops_begin(void)
+ {
+- int cpu = safe_smp_processor_id();
++ int cpu = smp_processor_id();
+ unsigned long flags;
+
++ oops_enter();
++
+ /* racy, but better than risking deadlock. */
+ local_irq_save(flags);
+ if (!spin_trylock(&die_lock)) {
+@@ -536,6 +590,7 @@ void __kprobes oops_end(unsigned long fl
+ spin_unlock_irqrestore(&die_lock, flags);
+ if (panic_on_oops)
+ panic("Fatal exception");
++ oops_exit();
+ }
+
+ void __kprobes __die(const char * str, struct pt_regs * regs, long err)
+@@ -573,7 +628,7 @@ void die(const char * str, struct pt_reg
+ }
+
+ #ifdef CONFIG_X86_LOCAL_APIC
+-void __kprobes die_nmi(char *str, struct pt_regs *regs)
++void __kprobes die_nmi(char *str, struct pt_regs *regs, int do_panic)
+ {
+ unsigned long flags = oops_begin();
+
+@@ -581,13 +636,12 @@ void __kprobes die_nmi(char *str, struct
+ * We are in trouble anyway, lets at least try
+ * to get a message out.
+ */
+- printk(str, safe_smp_processor_id());
++ printk(str, smp_processor_id());
+ show_registers(regs);
+ if (kexec_should_crash(current))
+ crash_kexec(regs);
+- if (panic_on_timeout || panic_on_oops)
+- panic("nmi watchdog");
+- printk("console shuts up ...\n");
++ if (do_panic || panic_on_oops)
++ panic("Non maskable interrupt");
+ oops_end(flags);
+ nmi_exit();
+ local_irq_enable();
+@@ -734,8 +788,15 @@ asmlinkage void __kprobes do_general_pro
+ static __kprobes void
+ mem_parity_error(unsigned char reason, struct pt_regs * regs)
+ {
+- printk("Uhhuh. NMI received. Dazed and confused, but trying to continue\n");
+- printk("You probably have a hardware problem with your RAM chips\n");
++ printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x.\n",
++ reason);
++ printk(KERN_EMERG "You probably have a hardware problem with your "
++ "RAM chips\n");
++
++ if (panic_on_unrecovered_nmi)
++ panic("NMI: Not continuing");
++
++ printk(KERN_EMERG "Dazed and confused, but trying to continue\n");
+
+ #if 0 /* XEN */
+ /* Clear and disable the memory parity error line. */
+@@ -762,9 +823,15 @@ io_check_error(unsigned char reason, str
+
+ static __kprobes void
+ unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
+-{ printk("Uhhuh. NMI received for unknown reason %02x.\n", reason);
+- printk("Dazed and confused, but trying to continue\n");
+- printk("Do you have a strange power saving mode enabled?\n");
++{
++ printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x.\n",
++ reason);
++ printk(KERN_EMERG "Do you have a strange power saving mode enabled?\n");
++
++ if (panic_on_unrecovered_nmi)
++ panic("NMI: Not continuing");
++
++ printk(KERN_EMERG "Dazed and confused, but trying to continue\n");
+ }
+
+ /* Runs on IST stack. This code must keep interrupts off all the time.
+@@ -789,12 +856,12 @@ asmlinkage __kprobes void default_do_nmi
+ * Ok, so this is none of the documented NMI sources,
+ * so it must be the NMI watchdog.
+ */
+- if (nmi_watchdog > 0) {
+- nmi_watchdog_tick(regs,reason);
++ if (nmi_watchdog_tick(regs,reason))
+ return;
+- }
+ #endif
+- unknown_nmi_error(reason, regs);
++ if (!do_nmi_callback(regs,cpu))
++ unknown_nmi_error(reason, regs);
++
+ return;
+ }
+ if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP)
+@@ -1081,6 +1148,7 @@ asmlinkage void math_state_restore(void)
+ init_fpu(me);
+ restore_fpu_checking(&me->thread.i387.fxsave);
+ task_thread_info(me)->status |= TS_USEDFPU;
++ me->fpu_counter++;
+ }
+
+
+@@ -1143,24 +1211,30 @@ void smp_trap_init(trap_info_t *trap_ctx
+ }
+
+
+-/* Actual parsing is done early in setup.c. */
+-static int __init oops_dummy(char *s)
++static int __init oops_setup(char *s)
+ {
+- panic_on_oops = 1;
+- return 1;
++ if (!s)
++ return -EINVAL;
++ if (!strcmp(s, "panic"))
++ panic_on_oops = 1;
++ return 0;
+ }
+-__setup("oops=", oops_dummy);
++early_param("oops", oops_setup);
+
+ static int __init kstack_setup(char *s)
+ {
++ if (!s)
++ return -EINVAL;
+ kstack_depth_to_print = simple_strtoul(s,NULL,0);
+- return 1;
++ return 0;
+ }
+-__setup("kstack=", kstack_setup);
++early_param("kstack", kstack_setup);
+
+ #ifdef CONFIG_STACK_UNWIND
+ static int __init call_trace_setup(char *s)
+ {
++ if (!s)
++ return -EINVAL;
+ if (strcmp(s, "old") == 0)
+ call_trace = -1;
+ else if (strcmp(s, "both") == 0)
+@@ -1169,7 +1243,7 @@ static int __init call_trace_setup(char
+ call_trace = 1;
+ else if (strcmp(s, "new") == 0)
+ call_trace = 2;
+- return 1;
++ return 0;
+ }
+-__setup("call_trace=", call_trace_setup);
++early_param("call_trace", call_trace_setup);
+ #endif
+Index: 10.3-2007-11-26/arch/x86_64/kernel/vsyscall-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/kernel/vsyscall-xen.c 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/arch/x86_64/kernel/vsyscall-xen.c 2007-10-22 13:53:08.000000000 +0200
+@@ -26,6 +26,10 @@
+ #include <linux/seqlock.h>
+ #include <linux/jiffies.h>
+ #include <linux/sysctl.h>
++#include <linux/getcpu.h>
++#include <linux/cpu.h>
++#include <linux/smp.h>
++#include <linux/notifier.h>
+
+ #include <asm/vsyscall.h>
+ #include <asm/pgtable.h>
+@@ -33,11 +37,15 @@
+ #include <asm/fixmap.h>
+ #include <asm/errno.h>
+ #include <asm/io.h>
++#include <asm/segment.h>
++#include <asm/desc.h>
++#include <asm/topology.h>
+
+ #define __vsyscall(nr) __attribute__ ((unused,__section__(".vsyscall_" #nr)))
+
+ int __sysctl_vsyscall __section_sysctl_vsyscall = 1;
+ seqlock_t __xtime_lock __section_xtime_lock = SEQLOCK_UNLOCKED;
++int __vgetcpu_mode __section_vgetcpu_mode;
+
+ #include <asm/unistd.h>
+
+@@ -61,8 +69,7 @@ static __always_inline void do_vgettimeo
+ sequence = read_seqbegin(&__xtime_lock);
+
+ sec = __xtime.tv_sec;
+- usec = (__xtime.tv_nsec / 1000) +
+- (__jiffies - __wall_jiffies) * (1000000 / HZ);
++ usec = __xtime.tv_nsec / 1000;
+
+ if (__vxtime.mode != VXTIME_HPET) {
+ t = get_cycles_sync();
+@@ -72,7 +79,8 @@ static __always_inline void do_vgettimeo
+ __vxtime.tsc_quot) >> 32;
+ /* See comment in x86_64 do_gettimeofday. */
+ } else {
+- usec += ((readl((void *)fix_to_virt(VSYSCALL_HPET) + 0xf0) -
++ usec += ((readl((void __iomem *)
++ fix_to_virt(VSYSCALL_HPET) + 0xf0) -
+ __vxtime.last) * __vxtime.quot) >> 32;
+ }
+ } while (read_seqretry(&__xtime_lock, sequence));
+@@ -127,9 +135,46 @@ time_t __vsyscall(1) vtime(time_t *t)
+ return __xtime.tv_sec;
+ }
+
+-long __vsyscall(2) venosys_0(void)
+-{
+- return -ENOSYS;
++/* Fast way to get current CPU and node.
++ This helps to do per node and per CPU caches in user space.
++ The result is not guaranteed without CPU affinity, but usually
++ works out because the scheduler tries to keep a thread on the same
++ CPU.
++
++ tcache must point to a two element sized long array.
++ All arguments can be NULL. */
++long __vsyscall(2)
++vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache)
++{
++ unsigned int dummy, p;
++ unsigned long j = 0;
++
++ /* Fast cache - only recompute value once per jiffies and avoid
++ relatively costly rdtscp/cpuid otherwise.
++ This works because the scheduler usually keeps the process
++ on the same CPU and this syscall doesn't guarantee its
++ results anyways.
++ We do this here because otherwise user space would do it on
++ its own in a likely inferior way (no access to jiffies).
++ If you don't like it pass NULL. */
++ if (tcache && tcache->blob[0] == (j = __jiffies)) {
++ p = tcache->blob[1];
++ } else if (__vgetcpu_mode == VGETCPU_RDTSCP) {
++ /* Load per CPU data from RDTSCP */
++ rdtscp(dummy, dummy, p);
++ } else {
++ /* Load per CPU data from GDT */
++ asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
++ }
++ if (tcache) {
++ tcache->blob[0] = j;
++ tcache->blob[1] = p;
++ }
++ if (cpu)
++ *cpu = p & 0xfff;
++ if (node)
++ *node = p >> 12;
++ return 0;
+ }
+
+ long __vsyscall(3) venosys_1(void)
+@@ -149,7 +194,8 @@ static int vsyscall_sysctl_change(ctl_ta
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+ {
+ extern u16 vsysc1, vsysc2;
+- u16 *map1, *map2;
++ u16 __iomem *map1;
++ u16 __iomem *map2;
+ int ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
+ if (!write)
+ return ret;
+@@ -164,11 +210,11 @@ static int vsyscall_sysctl_change(ctl_ta
+ goto out;
+ }
+ if (!sysctl_vsyscall) {
+- *map1 = SYSCALL;
+- *map2 = SYSCALL;
++ writew(SYSCALL, map1);
++ writew(SYSCALL, map2);
+ } else {
+- *map1 = NOP2;
+- *map2 = NOP2;
++ writew(NOP2, map1);
++ writew(NOP2, map2);
+ }
+ iounmap(map2);
+ out:
+@@ -200,6 +246,45 @@ static ctl_table kernel_root_table2[] =
+
+ #endif
+
++/* Assume __initcall executes before all user space. Hopefully kmod
++ doesn't violate that. We'll find out if it does. */
++static void __cpuinit vsyscall_set_cpu(int cpu)
++{
++ unsigned long d;
++ unsigned long node = 0;
++#ifdef CONFIG_NUMA
++ node = cpu_to_node[cpu];
++#endif
++ if (cpu_has(&cpu_data[cpu], X86_FEATURE_RDTSCP))
++ write_rdtscp_aux((node << 12) | cpu);
++
++ /* Store cpu number in limit so that it can be loaded quickly
++ in user space in vgetcpu.
++ 12 bits for the CPU and 8 bits for the node. */
++ d = 0x0f40000000000ULL;
++ d |= cpu;
++ d |= (node & 0xf) << 12;
++ d |= (node >> 4) << 48;
++ HYPERVISOR_update_descriptor(virt_to_machine(&cpu_gdt(cpu)[GDT_ENTRY_PER_CPU]), d);
++}
++
++static void __cpuinit cpu_vsyscall_init(void *arg)
++{
++ /* preemption should be already off */
++ vsyscall_set_cpu(raw_smp_processor_id());
++}
++
++#ifdef CONFIG_HOTPLUG_CPU
++static int __cpuinit
++cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg)
++{
++ long cpu = (long)arg;
++ if (action == CPU_ONLINE)
++ smp_call_function_single(cpu, cpu_vsyscall_init, NULL, 0, 1);
++ return NOTIFY_DONE;
++}
++#endif
++
+ static void __init map_vsyscall(void)
+ {
+ extern char __vsyscall_0;
+@@ -225,14 +310,21 @@ static int __init vsyscall_init(void)
+ VSYSCALL_ADDR(__NR_vgettimeofday)));
+ BUG_ON((unsigned long) &vtime != VSYSCALL_ADDR(__NR_vtime));
+ BUG_ON((VSYSCALL_ADDR(0) != __fix_to_virt(VSYSCALL_FIRST_PAGE)));
++ BUG_ON((unsigned long) &vgetcpu != VSYSCALL_ADDR(__NR_vgetcpu));
+ map_vsyscall();
+ #ifdef CONFIG_XEN
+ map_vsyscall_user();
+ sysctl_vsyscall = 0; /* disable vgettimeofay() */
++ if (boot_cpu_has(X86_FEATURE_RDTSCP))
++ vgetcpu_mode = VGETCPU_RDTSCP;
++ else
++ vgetcpu_mode = VGETCPU_LSL;
+ #endif
+ #ifdef CONFIG_SYSCTL
+ register_sysctl_table(kernel_root_table2, 0);
+ #endif
++ on_each_cpu(cpu_vsyscall_init, NULL, 0, 1);
++ hotcpu_notifier(cpu_vsyscall_notifier, 0);
+ return 0;
+ }
+
+Index: 10.3-2007-11-26/arch/x86_64/mm/fault-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/mm/fault-xen.c 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/arch/x86_64/mm/fault-xen.c 2007-10-22 13:53:08.000000000 +0200
+@@ -40,8 +40,7 @@
+ #define PF_RSVD (1<<3)
+ #define PF_INSTR (1<<4)
+
+-#ifdef CONFIG_KPROBES
+-ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain);
++static ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain);
+
+ /* Hook to register for page fault notifications */
+ int register_page_fault_notifier(struct notifier_block *nb)
+@@ -49,11 +48,13 @@ int register_page_fault_notifier(struct
+ vmalloc_sync_all();
+ return atomic_notifier_chain_register(&notify_page_fault_chain, nb);
+ }
++EXPORT_SYMBOL_GPL(register_page_fault_notifier);
+
+ int unregister_page_fault_notifier(struct notifier_block *nb)
+ {
+ return atomic_notifier_chain_unregister(&notify_page_fault_chain, nb);
+ }
++EXPORT_SYMBOL_GPL(unregister_page_fault_notifier);
+
+ static inline int notify_page_fault(enum die_val val, const char *str,
+ struct pt_regs *regs, long err, int trap, int sig)
+@@ -67,13 +68,6 @@ static inline int notify_page_fault(enum
+ };
+ return atomic_notifier_call_chain(&notify_page_fault_chain, val, &args);
+ }
+-#else
+-static inline int notify_page_fault(enum die_val val, const char *str,
+- struct pt_regs *regs, long err, int trap, int sig)
+-{
+- return NOTIFY_DONE;
+-}
+-#endif
+
+ void bust_spinlocks(int yes)
+ {
+@@ -102,7 +96,7 @@ void bust_spinlocks(int yes)
+ static noinline int is_prefetch(struct pt_regs *regs, unsigned long addr,
+ unsigned long error_code)
+ {
+- unsigned char *instr;
++ unsigned char __user *instr;
+ int scan_more = 1;
+ int prefetch = 0;
+ unsigned char *max_instr;
+@@ -111,7 +105,7 @@ static noinline int is_prefetch(struct p
+ if (error_code & PF_INSTR)
+ return 0;
+
+- instr = (unsigned char *)convert_rip_to_linear(current, regs);
++ instr = (unsigned char __user *)convert_rip_to_linear(current, regs);
+ max_instr = instr + 15;
+
+ if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE)
+@@ -122,7 +116,7 @@ static noinline int is_prefetch(struct p
+ unsigned char instr_hi;
+ unsigned char instr_lo;
+
+- if (__get_user(opcode, instr))
++ if (__get_user(opcode, (char __user *)instr))
+ break;
+
+ instr_hi = opcode & 0xf0;
+@@ -160,7 +154,7 @@ static noinline int is_prefetch(struct p
+ case 0x00:
+ /* Prefetch instruction is 0x0F0D or 0x0F18 */
+ scan_more = 0;
+- if (__get_user(opcode, instr))
++ if (__get_user(opcode, (char __user *)instr))
+ break;
+ prefetch = (instr_lo == 0xF) &&
+ (opcode == 0x0D || opcode == 0x18);
+@@ -176,7 +170,7 @@ static noinline int is_prefetch(struct p
+ static int bad_address(void *p)
+ {
+ unsigned long dummy;
+- return __get_user(dummy, (unsigned long *)p);
++ return __get_user(dummy, (unsigned long __user *)p);
+ }
+
+ void dump_pagetable(unsigned long address)
+@@ -248,7 +242,7 @@ static int is_errata93(struct pt_regs *r
+
+ int unhandled_signal(struct task_struct *tsk, int sig)
+ {
+- if (tsk->pid == 1)
++ if (is_init(tsk))
+ return 1;
+ if (tsk->ptrace & PT_PTRACED)
+ return 0;
+@@ -300,7 +294,7 @@ static int vmalloc_fault(unsigned long a
+ if (pgd_none(*pgd))
+ set_pgd(pgd, *pgd_ref);
+ else
+- BUG_ON(pgd_page(*pgd) != pgd_page(*pgd_ref));
++ BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
+
+ /* Below here mismatches are bugs because these lower tables
+ are shared */
+@@ -309,7 +303,7 @@ static int vmalloc_fault(unsigned long a
+ pud_ref = pud_offset(pgd_ref, address);
+ if (pud_none(*pud_ref))
+ return -1;
+- if (pud_none(*pud) || pud_page(*pud) != pud_page(*pud_ref))
++ if (pud_none(*pud) || pud_page_vaddr(*pud) != pud_page_vaddr(*pud_ref))
+ BUG();
+ pmd = pmd_offset(pud, address);
+ pmd_ref = pmd_offset(pud_ref, address);
+@@ -531,7 +525,7 @@ good_area:
+ case PF_PROT: /* read, present */
+ goto bad_area;
+ case 0: /* read, not present */
+- if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
++ if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
+ goto bad_area;
+ }
+
+@@ -647,7 +641,7 @@ no_context:
+ */
+ out_of_memory:
+ up_read(&mm->mmap_sem);
+- if (current->pid == 1) {
++ if (is_init(current)) {
+ yield();
+ goto again;
+ }
+@@ -702,7 +696,7 @@ void vmalloc_sync_all(void)
+ if (pgd_none(*pgd))
+ set_pgd(pgd, *pgd_ref);
+ else
+- BUG_ON(pgd_page(*pgd) != pgd_page(*pgd_ref));
++ BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
+ }
+ spin_unlock(&pgd_lock);
+ set_bit(pgd_index(address), insync);
+Index: 10.3-2007-11-26/arch/x86_64/mm/init-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/mm/init-xen.c 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/arch/x86_64/mm/init-xen.c 2007-10-22 13:53:08.000000000 +0200
+@@ -61,8 +61,6 @@ EXPORT_SYMBOL(__kernel_page_user);
+
+ extern unsigned long *contiguous_bitmap;
+
+-static unsigned long dma_reserve __initdata;
+-
+ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
+ extern unsigned long start_pfn;
+
+@@ -440,7 +438,6 @@ __init void *early_ioremap(unsigned long
+
+ /* actually usually some more */
+ if (size >= LARGE_PAGE_SIZE) {
+- printk("SMBIOS area too long %lu\n", size);
+ return NULL;
+ }
+ set_pmd(temp_mappings[0].pmd, __pmd(map | _KERNPG_TABLE | _PAGE_PSE));
+@@ -462,13 +459,15 @@ __init void early_iounmap(void *addr, un
+ #endif
+
+ static void __meminit
+-phys_pmd_init(pmd_t *pmd, unsigned long address, unsigned long end)
++phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end)
+ {
+- int i, k;
++ int i = pmd_index(address);
+
+- for (i = 0; i < PTRS_PER_PMD; pmd++, i++) {
++ for (; i < PTRS_PER_PMD; i++) {
+ unsigned long pte_phys;
++ pmd_t *pmd = pmd_page + pmd_index(address);
+ pte_t *pte, *pte_save;
++ int k;
+
+ if (address >= end) {
+ if (!after_bootmem)
+@@ -476,6 +475,10 @@ phys_pmd_init(pmd_t *pmd, unsigned long
+ set_pmd(pmd, __pmd(0));
+ break;
+ }
++
++ if (pmd_val(*pmd))
++ continue;
++
+ pte = alloc_static_page(&pte_phys);
+ pte_save = pte;
+ for (k = 0; k < PTRS_PER_PTE; pte++, k++, address += PTE_SIZE) {
+@@ -501,40 +504,35 @@ phys_pmd_init(pmd_t *pmd, unsigned long
+ static void __meminit
+ phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end)
+ {
+- pmd_t *pmd = pmd_offset(pud, (unsigned long)__va(address));
+-
+- if (pmd_none(*pmd)) {
+- spin_lock(&init_mm.page_table_lock);
+- phys_pmd_init(pmd, address, end);
+- spin_unlock(&init_mm.page_table_lock);
+- __flush_tlb_all();
+- }
++ pmd_t *pmd = pmd_offset(pud,0);
++ spin_lock(&init_mm.page_table_lock);
++ phys_pmd_init(pmd, address, end);
++ spin_unlock(&init_mm.page_table_lock);
++ __flush_tlb_all();
+ }
+
+-static void __meminit phys_pud_init(pud_t *pud, unsigned long address, unsigned long end)
++static void __meminit phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end)
+ {
+- long i = pud_index(address);
+-
+- pud = pud + i;
+-
+- if (after_bootmem && pud_val(*pud)) {
+- phys_pmd_update(pud, address, end);
+- return;
+- }
++ int i = pud_index(addr);
+
+- for (; i < PTRS_PER_PUD; pud++, i++) {
+- unsigned long paddr, pmd_phys;
++ for (; i < PTRS_PER_PUD; i++, addr = (addr & PUD_MASK) + PUD_SIZE ) {
++ unsigned long pmd_phys;
++ pud_t *pud = pud_page + pud_index(addr);
+ pmd_t *pmd;
+
+- paddr = (address & PGDIR_MASK) + i*PUD_SIZE;
+- if (paddr >= end)
++ if (addr >= end)
+ break;
+
++ if (pud_val(*pud)) {
++ phys_pmd_update(pud, addr, end);
++ continue;
++ }
++
+ pmd = alloc_static_page(&pmd_phys);
+ early_make_page_readonly(pmd, XENFEAT_writable_page_tables);
+ spin_lock(&init_mm.page_table_lock);
+ set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE));
+- phys_pmd_init(pmd, paddr, end);
++ phys_pmd_init(pmd, addr, end);
+ spin_unlock(&init_mm.page_table_lock);
+ }
+ __flush_tlb();
+@@ -797,77 +795,19 @@ void __cpuinit zap_low_mappings(int cpu)
+ #endif
+ }
+
+-/* Compute zone sizes for the DMA and DMA32 zones in a node. */
+-__init void
+-size_zones(unsigned long *z, unsigned long *h,
+- unsigned long start_pfn, unsigned long end_pfn)
+-{
+- int i;
+-#ifndef CONFIG_XEN
+- unsigned long w;
+-#endif
+-
+- for (i = 0; i < MAX_NR_ZONES; i++)
+- z[i] = 0;
+-
+-#ifndef CONFIG_XEN
+- if (start_pfn < MAX_DMA_PFN)
+- z[ZONE_DMA] = MAX_DMA_PFN - start_pfn;
+- if (start_pfn < MAX_DMA32_PFN) {
+- unsigned long dma32_pfn = MAX_DMA32_PFN;
+- if (dma32_pfn > end_pfn)
+- dma32_pfn = end_pfn;
+- z[ZONE_DMA32] = dma32_pfn - start_pfn;
+- }
+- z[ZONE_NORMAL] = end_pfn - start_pfn;
+-
+- /* Remove lower zones from higher ones. */
+- w = 0;
+- for (i = 0; i < MAX_NR_ZONES; i++) {
+- if (z[i])
+- z[i] -= w;
+- w += z[i];
+- }
+-
+- /* Compute holes */
+- w = start_pfn;
+- for (i = 0; i < MAX_NR_ZONES; i++) {
+- unsigned long s = w;
+- w += z[i];
+- h[i] = e820_hole_size(s, w);
+- }
+-
+- /* Add the space pace needed for mem_map to the holes too. */
+- for (i = 0; i < MAX_NR_ZONES; i++)
+- h[i] += (z[i] * sizeof(struct page)) / PAGE_SIZE;
+-
+- /* The 16MB DMA zone has the kernel and other misc mappings.
+- Account them too */
+- if (h[ZONE_DMA]) {
+- h[ZONE_DMA] += dma_reserve;
+- if (h[ZONE_DMA] >= z[ZONE_DMA]) {
+- printk(KERN_WARNING
+- "Kernel too large and filling up ZONE_DMA?\n");
+- h[ZONE_DMA] = z[ZONE_DMA];
+- }
+- }
+-#else
+- z[ZONE_DMA] = end_pfn;
+- for (i = 0; i < MAX_NR_ZONES; i++)
+- h[i] = 0;
+-#endif
+-}
+-
+ #ifndef CONFIG_NUMA
+ void __init paging_init(void)
+ {
+- unsigned long zones[MAX_NR_ZONES], holes[MAX_NR_ZONES];
++ unsigned long max_zone_pfns[MAX_NR_ZONES];
++
++ memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
++ max_zone_pfns[ZONE_DMA] = end_pfn;
++ max_zone_pfns[ZONE_DMA32] = end_pfn;
++ max_zone_pfns[ZONE_NORMAL] = end_pfn;
+
+ memory_present(0, 0, end_pfn);
+ sparse_init();
+- size_zones(zones, holes, 0, end_pfn);
+- free_area_init_node(0, NODE_DATA(0), zones,
+- __pa(PAGE_OFFSET) >> PAGE_SHIFT, holes);
++ free_area_init_nodes(max_zone_pfns);
+
+ init_mm.context.pinned = 1;
+ }
+@@ -921,36 +861,23 @@ void online_page(struct page *page)
+
+ #ifdef CONFIG_MEMORY_HOTPLUG
+ /*
+- * XXX: memory_add_physaddr_to_nid() is to find node id from physical address
+- * via probe interface of sysfs. If acpi notifies hot-add event, then it
+- * can tell node id by searching dsdt. But, probe interface doesn't have
+- * node id. So, return 0 as node id at this time.
+- */
+-#ifdef CONFIG_NUMA
+-int memory_add_physaddr_to_nid(u64 start)
+-{
+- return 0;
+-}
+-#endif
+-
+-/*
+ * Memory is added always to NORMAL zone. This means you will never get
+ * additional DMA/DMA32 memory.
+ */
+ int arch_add_memory(int nid, u64 start, u64 size)
+ {
+ struct pglist_data *pgdat = NODE_DATA(nid);
+- struct zone *zone = pgdat->node_zones + MAX_NR_ZONES-2;
++ struct zone *zone = pgdat->node_zones + ZONE_NORMAL;
+ unsigned long start_pfn = start >> PAGE_SHIFT;
+ unsigned long nr_pages = size >> PAGE_SHIFT;
+ int ret;
+
++ init_memory_mapping(start, (start + size -1));
++
+ ret = __add_pages(zone, start_pfn, nr_pages);
+ if (ret)
+ goto error;
+
+- init_memory_mapping(start, (start + size -1));
+-
+ return ret;
+ error:
+ printk("%s: Problem encountered in __add_pages!\n", __func__);
+@@ -964,7 +891,17 @@ int remove_memory(u64 start, u64 size)
+ }
+ EXPORT_SYMBOL_GPL(remove_memory);
+
+-#else /* CONFIG_MEMORY_HOTPLUG */
++#if !defined(CONFIG_ACPI_NUMA) && defined(CONFIG_NUMA)
++int memory_add_physaddr_to_nid(u64 start)
++{
++ return 0;
++}
++EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
++#endif
++
++#endif /* CONFIG_MEMORY_HOTPLUG */
++
++#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
+ /*
+ * Memory Hotadd without sparsemem. The mem_maps have been allocated in advance,
+ * just online the pages.
+@@ -990,7 +927,7 @@ int __add_pages(struct zone *z, unsigned
+ }
+ return err;
+ }
+-#endif /* CONFIG_MEMORY_HOTPLUG */
++#endif
+
+ static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, kcore_modules,
+ kcore_vsyscall;
+@@ -1007,12 +944,6 @@ void __init mem_init(void)
+
+ pci_iommu_alloc();
+
+- /* How many end-of-memory variables you have, grandma! */
+- max_low_pfn = end_pfn;
+- max_pfn = end_pfn;
+- num_physpages = end_pfn;
+- high_memory = (void *) __va(end_pfn * PAGE_SIZE);
+-
+ /* clear the zero-page */
+ memset(empty_zero_page, 0, PAGE_SIZE);
+
+@@ -1030,7 +961,8 @@ void __init mem_init(void)
+ init_page_count(pfn_to_page(pfn));
+ totalram_pages++;
+ }
+- reservedpages = end_pfn - totalram_pages - e820_hole_size(0, end_pfn);
++ reservedpages = end_pfn - totalram_pages -
++ absent_pages_in_range(0, end_pfn);
+
+ after_bootmem = 1;
+
+@@ -1137,15 +1069,34 @@ void free_initrd_mem(unsigned long start
+
+ void __init reserve_bootmem_generic(unsigned long phys, unsigned len)
+ {
+- /* Should check here against the e820 map to avoid double free */
+ #ifdef CONFIG_NUMA
+ int nid = phys_to_nid(phys);
++#endif
++ unsigned long pfn = phys >> PAGE_SHIFT;
++ if (pfn >= end_pfn) {
++ /* This can happen with kdump kernels when accessing firmware
++ tables. */
++ if (pfn < end_pfn_map)
++ return;
++ printk(KERN_ERR "reserve_bootmem: illegal reserve %lx %u\n",
++ phys, len);
++ return;
++ }
++
++ /* Should check here against the e820 map to avoid double free */
++#ifdef CONFIG_NUMA
+ reserve_bootmem_node(NODE_DATA(nid), phys, len);
+ #else
+ reserve_bootmem(phys, len);
+ #endif
+- if (phys+len <= MAX_DMA_PFN*PAGE_SIZE)
++#ifndef CONFIG_XEN
++ if (phys+len <= MAX_DMA_PFN*PAGE_SIZE) {
++ static unsigned long dma_reserve __initdata;
++
+ dma_reserve += len / PAGE_SIZE;
++ set_dma_reserve(dma_reserve);
++ }
++#endif
+ }
+
+ int kern_addr_valid(unsigned long addr)
+Index: 10.3-2007-11-26/arch/x86_64/mm/pageattr-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/mm/pageattr-xen.c 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/arch/x86_64/mm/pageattr-xen.c 2007-10-22 13:53:08.000000000 +0200
+@@ -302,8 +302,8 @@ static void revert_page(unsigned long ad
+ BUG_ON(pud_none(*pud));
+ pmd = pmd_offset(pud, address);
+ BUG_ON(pmd_val(*pmd) & _PAGE_PSE);
+- pgprot_val(ref_prot) |= _PAGE_PSE;
+ large_pte = mk_pte_phys(__pa(address) & LARGE_PAGE_MASK, ref_prot);
++ large_pte = pte_mkhuge(large_pte);
+ set_pte((pte_t *)pmd, large_pte);
+ }
+
+@@ -313,32 +313,28 @@ __change_page_attr(unsigned long address
+ {
+ pte_t *kpte;
+ struct page *kpte_page;
+- unsigned kpte_flags;
+ pgprot_t ref_prot2;
+ kpte = lookup_address(address);
+ if (!kpte) return 0;
+ kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK);
+- kpte_flags = pte_val(*kpte);
+ if (pgprot_val(prot) != pgprot_val(ref_prot)) {
+- if ((kpte_flags & _PAGE_PSE) == 0) {
++ if (!pte_huge(*kpte)) {
+ set_pte(kpte, pfn_pte(pfn, prot));
+ } else {
+ /*
+ * split_large_page will take the reference for this
+ * change_page_attr on the split page.
+ */
+-
+ struct page *split;
+- ref_prot2 = __pgprot(pgprot_val(pte_pgprot(*lookup_address(address))) & ~(1<<_PAGE_BIT_PSE));
+-
++ ref_prot2 = pte_pgprot(pte_clrhuge(*kpte));
+ split = split_large_page(address, prot, ref_prot2);
+ if (!split)
+ return -ENOMEM;
+- set_pte(kpte,mk_pte(split, ref_prot2));
++ set_pte(kpte, mk_pte(split, ref_prot2));
+ kpte_page = split;
+- }
++ }
+ page_private(kpte_page)++;
+- } else if ((kpte_flags & _PAGE_PSE) == 0) {
++ } else if (!pte_huge(*kpte)) {
+ set_pte(kpte, pfn_pte(pfn, ref_prot));
+ BUG_ON(page_private(kpte_page) == 0);
+ page_private(kpte_page)--;
+@@ -395,10 +391,12 @@ int change_page_attr_addr(unsigned long
+ * lowmem */
+ if (__pa(address) < KERNEL_TEXT_SIZE) {
+ unsigned long addr2;
+- pgprot_t prot2 = prot;
++ pgprot_t prot2;
+ addr2 = __START_KERNEL_map + __pa(address);
+- pgprot_val(prot2) &= ~_PAGE_NX;
+- err = __change_page_attr(addr2, pfn, prot2, PAGE_KERNEL_EXEC);
++ /* Make sure the kernel mappings stay executable */
++ prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot)));
++ err = __change_page_attr(addr2, pfn, prot2,
++ PAGE_KERNEL_EXEC);
+ }
+ }
+ up_write(&init_mm.mmap_sem);
+Index: 10.3-2007-11-26/drivers/char/tpm/tpm_xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/drivers/char/tpm/tpm_xen.c 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/drivers/char/tpm/tpm_xen.c 2007-10-22 13:53:08.000000000 +0200
+@@ -85,8 +85,7 @@ static struct tpm_private *my_priv;
+
+ /* local function prototypes */
+ static irqreturn_t tpmif_int(int irq,
+- void *tpm_priv,
+- struct pt_regs *ptregs);
++ void *tpm_priv);
+ static void tpmif_rx_action(unsigned long unused);
+ static int tpmif_connect(struct xenbus_device *dev,
+ struct tpm_private *tp,
+@@ -558,7 +557,7 @@ static void tpmif_rx_action(unsigned lon
+ }
+
+
+-static irqreturn_t tpmif_int(int irq, void *tpm_priv, struct pt_regs *ptregs)
++static irqreturn_t tpmif_int(int irq, void *tpm_priv)
+ {
+ struct tpm_private *tp = tpm_priv;
+ unsigned long flags;
+Index: 10.3-2007-11-26/drivers/pci/Kconfig
+===================================================================
+--- 10.3-2007-11-26.orig/drivers/pci/Kconfig 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/drivers/pci/Kconfig 2007-10-22 13:53:08.000000000 +0200
+@@ -34,7 +34,7 @@ config PCI_DEBUG
+ config HT_IRQ
+ bool "Interrupts on hypertransport devices"
+ default y
+- depends on PCI && X86_LOCAL_APIC && X86_IO_APIC
++ depends on PCI && X86_LOCAL_APIC && X86_IO_APIC && !XEN
+ help
+ This allows native hypertransport devices to use interrupts.
+
+Index: 10.3-2007-11-26/drivers/xen/Kconfig
+===================================================================
+--- 10.3-2007-11-26.orig/drivers/xen/Kconfig 2007-09-03 09:52:56.000000000 +0200
++++ 10.3-2007-11-26/drivers/xen/Kconfig 2007-10-22 13:53:08.000000000 +0200
+@@ -249,6 +249,9 @@ config HAVE_IRQ_IGNORE_UNHANDLED
+ bool
+ default y
+
++config GENERIC_HARDIRQS_NO__DO_IRQ
++ def_bool y
++
+ config NO_IDLE_HZ
+ bool
+ default y
+Index: 10.3-2007-11-26/drivers/xen/balloon/balloon.c
+===================================================================
+--- 10.3-2007-11-26.orig/drivers/xen/balloon/balloon.c 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/drivers/xen/balloon/balloon.c 2007-10-22 13:53:08.000000000 +0200
+@@ -83,7 +83,13 @@ static unsigned long frame_list[PAGE_SIZ
+
+ /* VM /proc information for memory */
+ extern unsigned long totalram_pages;
++#ifdef CONFIG_HIGHMEM
+ extern unsigned long totalhigh_pages;
++#define totalhigh_pages(op) (totalhigh_pages op)
++#else
++#undef totalhigh_pages
++#define totalhigh_pages(op)
++#endif
+
+ /* List of ballooned pages, threaded through the mem_map array. */
+ static LIST_HEAD(ballooned_pages);
+@@ -119,7 +125,7 @@ static void balloon_append(struct page *
+ if (PageHighMem(page)) {
+ list_add_tail(PAGE_TO_LIST(page), &ballooned_pages);
+ bs.balloon_high++;
+- totalhigh_pages--;
++ totalhigh_pages(--);
+ } else {
+ list_add(PAGE_TO_LIST(page), &ballooned_pages);
+ bs.balloon_low++;
+@@ -139,7 +145,7 @@ static struct page *balloon_retrieve(voi
+
+ if (PageHighMem(page)) {
+ bs.balloon_high--;
+- totalhigh_pages++;
++ totalhigh_pages(++);
+ }
+ else
+ bs.balloon_low--;
+Index: 10.3-2007-11-26/drivers/xen/blkback/blkback.c
+===================================================================
+--- 10.3-2007-11-26.orig/drivers/xen/blkback/blkback.c 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/drivers/xen/blkback/blkback.c 2007-10-22 13:53:08.000000000 +0200
+@@ -287,7 +287,7 @@ static void blkif_notify_work(blkif_t *b
+ wake_up(&blkif->wq);
+ }
+
+-irqreturn_t blkif_be_int(int irq, void *dev_id, struct pt_regs *regs)
++irqreturn_t blkif_be_int(int irq, void *dev_id)
+ {
+ blkif_notify_work(dev_id);
+ return IRQ_HANDLED;
+Index: 10.3-2007-11-26/drivers/xen/blkback/common.h
+===================================================================
+--- 10.3-2007-11-26.orig/drivers/xen/blkback/common.h 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/drivers/xen/blkback/common.h 2007-10-22 13:53:08.000000000 +0200
+@@ -130,7 +130,7 @@ void blkif_interface_init(void);
+
+ void blkif_xenbus_init(void);
+
+-irqreturn_t blkif_be_int(int irq, void *dev_id, struct pt_regs *regs);
++irqreturn_t blkif_be_int(int irq, void *dev_id);
+ int blkif_schedule(void *arg);
+
+ int blkback_barrier(struct xenbus_transaction xbt,
+Index: 10.3-2007-11-26/drivers/xen/blkfront/blkfront.c
+===================================================================
+--- 10.3-2007-11-26.orig/drivers/xen/blkfront/blkfront.c 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/drivers/xen/blkfront/blkfront.c 2007-10-22 13:53:08.000000000 +0200
+@@ -69,7 +69,7 @@ static int setup_blkring(struct xenbus_d
+
+ static void kick_pending_request_queues(struct blkfront_info *);
+
+-static irqreturn_t blkif_int(int irq, void *dev_id, struct pt_regs *ptregs);
++static irqreturn_t blkif_int(int irq, void *dev_id);
+ static void blkif_restart_queue(void *arg);
+ static void blkif_recover(struct blkfront_info *);
+ static void blkif_completion(struct blk_shadow *);
+@@ -688,7 +688,7 @@ void do_blkif_request(request_queue_t *r
+ }
+
+
+-static irqreturn_t blkif_int(int irq, void *dev_id, struct pt_regs *ptregs)
++static irqreturn_t blkif_int(int irq, void *dev_id)
+ {
+ struct request *req;
+ blkif_response_t *bret;
+Index: 10.3-2007-11-26/drivers/xen/blktap/blktap.c
+===================================================================
+--- 10.3-2007-11-26.orig/drivers/xen/blktap/blktap.c 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/drivers/xen/blktap/blktap.c 2007-10-22 13:53:08.000000000 +0200
+@@ -1059,7 +1059,7 @@ static void blkif_notify_work(blkif_t *b
+ wake_up(&blkif->wq);
+ }
+
+-irqreturn_t tap_blkif_be_int(int irq, void *dev_id, struct pt_regs *regs)
++irqreturn_t tap_blkif_be_int(int irq, void *dev_id)
+ {
+ blkif_notify_work(dev_id);
+ return IRQ_HANDLED;
+Index: 10.3-2007-11-26/drivers/xen/blktap/common.h
+===================================================================
+--- 10.3-2007-11-26.orig/drivers/xen/blktap/common.h 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/drivers/xen/blktap/common.h 2007-10-22 13:53:08.000000000 +0200
+@@ -112,7 +112,7 @@ void tap_blkif_interface_init(void);
+
+ void tap_blkif_xenbus_init(void);
+
+-irqreturn_t tap_blkif_be_int(int irq, void *dev_id, struct pt_regs *regs);
++irqreturn_t tap_blkif_be_int(int irq, void *dev_id);
+ int tap_blkif_schedule(void *arg);
+
+ int dom_to_devid(domid_t domid, int xenbus_id, blkif_t *blkif);
+Index: 10.3-2007-11-26/drivers/xen/console/console.c
+===================================================================
+--- 10.3-2007-11-26.orig/drivers/xen/console/console.c 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/drivers/xen/console/console.c 2007-10-22 13:53:08.000000000 +0200
+@@ -335,7 +335,7 @@ static struct tty_struct *xencons_tty;
+ static int xencons_priv_irq;
+ static char x_char;
+
+-void xencons_rx(char *buf, unsigned len, struct pt_regs *regs)
++void xencons_rx(char *buf, unsigned len)
+ {
+ int i;
+ unsigned long flags;
+@@ -360,8 +360,7 @@ void xencons_rx(char *buf, unsigned len,
+ if (time_before(jiffies, sysrq_timeout)) {
+ spin_unlock_irqrestore(
+ &xencons_lock, flags);
+- handle_sysrq(
+- buf[i], regs, xencons_tty);
++ handle_sysrq(buf[i], xencons_tty);
+ spin_lock_irqsave(
+ &xencons_lock, flags);
+ continue;
+@@ -426,14 +425,13 @@ void xencons_tx(void)
+ }
+
+ /* Privileged receive callback and transmit kicker. */
+-static irqreturn_t xencons_priv_interrupt(int irq, void *dev_id,
+- struct pt_regs *regs)
++static irqreturn_t xencons_priv_interrupt(int irq, void *dev_id)
+ {
+ static char rbuf[16];
+ int l;
+
+ while ((l = HYPERVISOR_console_io(CONSOLEIO_read, 16, rbuf)) > 0)
+- xencons_rx(rbuf, l, regs);
++ xencons_rx(rbuf, l);
+
+ xencons_tx();
+
+Index: 10.3-2007-11-26/drivers/xen/console/xencons_ring.c
+===================================================================
+--- 10.3-2007-11-26.orig/drivers/xen/console/xencons_ring.c 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/drivers/xen/console/xencons_ring.c 2007-10-22 13:53:08.000000000 +0200
+@@ -83,7 +83,7 @@ int xencons_ring_send(const char *data,
+ return sent;
+ }
+
+-static irqreturn_t handle_input(int irq, void *unused, struct pt_regs *regs)
++static irqreturn_t handle_input(int irq, void *unused)
+ {
+ struct xencons_interface *intf = xencons_interface();
+ XENCONS_RING_IDX cons, prod;
+@@ -94,7 +94,7 @@ static irqreturn_t handle_input(int irq,
+ BUG_ON((prod - cons) > sizeof(intf->in));
+
+ while (cons != prod) {
+- xencons_rx(intf->in+MASK_XENCONS_IDX(cons,intf->in), 1, regs);
++ xencons_rx(intf->in+MASK_XENCONS_IDX(cons,intf->in), 1);
+ cons++;
+ }
+
+Index: 10.3-2007-11-26/drivers/xen/core/evtchn.c
+===================================================================
+--- 10.3-2007-11-26.orig/drivers/xen/core/evtchn.c 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/drivers/xen/core/evtchn.c 2007-10-22 13:53:08.000000000 +0200
+@@ -462,7 +462,7 @@ static void unbind_from_irq(unsigned int
+
+ int bind_caller_port_to_irqhandler(
+ unsigned int caller_port,
+- irqreturn_t (*handler)(int, void *, struct pt_regs *),
++ irq_handler_t handler,
+ unsigned long irqflags,
+ const char *devname,
+ void *dev_id)
+@@ -485,7 +485,7 @@ EXPORT_SYMBOL_GPL(bind_caller_port_to_ir
+
+ int bind_listening_port_to_irqhandler(
+ unsigned int remote_domain,
+- irqreturn_t (*handler)(int, void *, struct pt_regs *),
++ irq_handler_t handler,
+ unsigned long irqflags,
+ const char *devname,
+ void *dev_id)
+@@ -509,7 +509,7 @@ EXPORT_SYMBOL_GPL(bind_listening_port_to
+ int bind_interdomain_evtchn_to_irqhandler(
+ unsigned int remote_domain,
+ unsigned int remote_port,
+- irqreturn_t (*handler)(int, void *, struct pt_regs *),
++ irq_handler_t handler,
+ unsigned long irqflags,
+ const char *devname,
+ void *dev_id)
+@@ -533,7 +533,7 @@ EXPORT_SYMBOL_GPL(bind_interdomain_evtch
+ int bind_virq_to_irqhandler(
+ unsigned int virq,
+ unsigned int cpu,
+- irqreturn_t (*handler)(int, void *, struct pt_regs *),
++ irq_handler_t handler,
+ unsigned long irqflags,
+ const char *devname,
+ void *dev_id)
+@@ -557,7 +557,7 @@ EXPORT_SYMBOL_GPL(bind_virq_to_irqhandle
+ int bind_ipi_to_irqhandler(
+ unsigned int ipi,
+ unsigned int cpu,
+- irqreturn_t (*handler)(int, void *, struct pt_regs *),
++ irq_handler_t handler,
+ unsigned long irqflags,
+ const char *devname,
+ void *dev_id)
+@@ -644,15 +644,7 @@ static unsigned int startup_dynirq(unsig
+ return 0;
+ }
+
+-static void shutdown_dynirq(unsigned int irq)
+-{
+- int evtchn = evtchn_from_irq(irq);
+-
+- if (VALID_EVTCHN(evtchn))
+- mask_evtchn(evtchn);
+-}
+-
+-static void enable_dynirq(unsigned int irq)
++static void unmask_dynirq(unsigned int irq)
+ {
+ int evtchn = evtchn_from_irq(irq);
+
+@@ -660,7 +652,7 @@ static void enable_dynirq(unsigned int i
+ unmask_evtchn(evtchn);
+ }
+
+-static void disable_dynirq(unsigned int irq)
++static void mask_dynirq(unsigned int irq)
+ {
+ int evtchn = evtchn_from_irq(irq);
+
+@@ -688,12 +680,12 @@ static void end_dynirq(unsigned int irq)
+ unmask_evtchn(evtchn);
+ }
+
+-static struct hw_interrupt_type dynirq_type = {
+- .typename = "Dynamic-irq",
++static struct irq_chip dynirq_chip = {
++ .name = "Dynamic-irq",
+ .startup = startup_dynirq,
+- .shutdown = shutdown_dynirq,
+- .enable = enable_dynirq,
+- .disable = disable_dynirq,
++ .mask = mask_dynirq,
++ .unmask = unmask_dynirq,
++ .mask_ack = ack_dynirq,
+ .ack = ack_dynirq,
+ .end = end_dynirq,
+ #ifdef CONFIG_SMP
+@@ -776,7 +768,7 @@ static void shutdown_pirq(unsigned int i
+ irq_info[irq] = IRQ_UNBOUND;
+ }
+
+-static void enable_pirq(unsigned int irq)
++static void unmask_pirq(unsigned int irq)
+ {
+ int evtchn = evtchn_from_irq(irq);
+
+@@ -786,7 +778,7 @@ static void enable_pirq(unsigned int irq
+ }
+ }
+
+-static void disable_pirq(unsigned int irq)
++static void mask_pirq(unsigned int irq)
+ {
+ int evtchn = evtchn_from_irq(irq);
+
+@@ -816,12 +808,14 @@ static void end_pirq(unsigned int irq)
+ }
+ }
+
+-static struct hw_interrupt_type pirq_type = {
++static struct irq_chip pirq_chip = {
++ .name = "Phys-irq",
+ .typename = "Phys-irq",
+ .startup = startup_pirq,
+ .shutdown = shutdown_pirq,
+- .enable = enable_pirq,
+- .disable = disable_pirq,
++ .mask = mask_pirq,
++ .unmask = unmask_pirq,
++ .mask_ack = ack_pirq,
+ .ack = ack_pirq,
+ .end = end_pirq,
+ #ifdef CONFIG_SMP
+@@ -994,7 +988,8 @@ void __init xen_init_IRQ(void)
+ irq_desc[dynirq_to_irq(i)].status = IRQ_DISABLED;
+ irq_desc[dynirq_to_irq(i)].action = NULL;
+ irq_desc[dynirq_to_irq(i)].depth = 1;
+- irq_desc[dynirq_to_irq(i)].chip = &dynirq_type;
++ set_irq_chip_and_handler_name(dynirq_to_irq(i), &dynirq_chip,
++ handle_level_irq, "level");
+ }
+
+ /* Phys IRQ space is statically bound (1:1 mapping). Nail refcnts. */
+@@ -1010,6 +1005,7 @@ void __init xen_init_IRQ(void)
+ irq_desc[pirq_to_irq(i)].status = IRQ_DISABLED;
+ irq_desc[pirq_to_irq(i)].action = NULL;
+ irq_desc[pirq_to_irq(i)].depth = 1;
+- irq_desc[pirq_to_irq(i)].chip = &pirq_type;
++ set_irq_chip_and_handler_name(pirq_to_irq(i), &pirq_chip,
++ handle_level_irq, "level");
+ }
+ }
+Index: 10.3-2007-11-26/drivers/xen/core/reboot.c
+===================================================================
+--- 10.3-2007-11-26.orig/drivers/xen/core/reboot.c 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/drivers/xen/core/reboot.c 2007-10-22 13:53:08.000000000 +0200
+@@ -180,7 +180,7 @@ static void sysrq_handler(struct xenbus_
+
+ #ifdef CONFIG_MAGIC_SYSRQ
+ if (sysrq_key != '\0')
+- handle_sysrq(sysrq_key, NULL, NULL);
++ handle_sysrq(sysrq_key, NULL);
+ #endif
+ }
+
+Index: 10.3-2007-11-26/drivers/xen/core/smpboot.c
+===================================================================
+--- 10.3-2007-11-26.orig/drivers/xen/core/smpboot.c 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/drivers/xen/core/smpboot.c 2007-10-22 13:53:08.000000000 +0200
+@@ -25,8 +25,8 @@
+ #include <xen/cpu_hotplug.h>
+ #include <xen/xenbus.h>
+
+-extern irqreturn_t smp_reschedule_interrupt(int, void *, struct pt_regs *);
+-extern irqreturn_t smp_call_function_interrupt(int, void *, struct pt_regs *);
++extern irqreturn_t smp_reschedule_interrupt(int, void *);
++extern irqreturn_t smp_call_function_interrupt(int, void *);
+
+ extern int local_setup_timer(unsigned int cpu);
+ extern void local_teardown_timer(unsigned int cpu);
+@@ -72,8 +72,6 @@ EXPORT_SYMBOL(cpu_core_map);
+ #if defined(__i386__)
+ u8 x86_cpu_to_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = 0xff };
+ EXPORT_SYMBOL(x86_cpu_to_apicid);
+-#elif !defined(CONFIG_X86_IO_APIC)
+-unsigned int maxcpus = NR_CPUS;
+ #endif
+
+ void __init prefill_possible_map(void)
+Index: 10.3-2007-11-26/drivers/xen/fbfront/xenfb.c
+===================================================================
+--- 10.3-2007-11-26.orig/drivers/xen/fbfront/xenfb.c 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/drivers/xen/fbfront/xenfb.c 2007-10-22 13:53:08.000000000 +0200
+@@ -417,8 +417,7 @@ static struct fb_ops xenfb_fb_ops = {
+ .fb_mmap = xenfb_mmap,
+ };
+
+-static irqreturn_t xenfb_event_handler(int rq, void *dev_id,
+- struct pt_regs *regs)
++static irqreturn_t xenfb_event_handler(int rq, void *dev_id)
+ {
+ /*
+ * No in events recognized, simply ignore them all.
+Index: 10.3-2007-11-26/drivers/xen/fbfront/xenkbd.c
+===================================================================
+--- 10.3-2007-11-26.orig/drivers/xen/fbfront/xenkbd.c 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/drivers/xen/fbfront/xenkbd.c 2007-10-22 13:53:08.000000000 +0200
+@@ -46,7 +46,7 @@ static void xenkbd_disconnect_backend(st
+ * to do that.
+ */
+
+-static irqreturn_t input_handler(int rq, void *dev_id, struct pt_regs *regs)
++static irqreturn_t input_handler(int rq, void *dev_id)
+ {
+ struct xenkbd_info *info = dev_id;
+ struct xenkbd_page *page = info->page;
+Index: 10.3-2007-11-26/drivers/xen/gntdev/gntdev.c
+===================================================================
+--- 10.3-2007-11-26.orig/drivers/xen/gntdev/gntdev.c 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/drivers/xen/gntdev/gntdev.c 2007-10-22 13:53:08.000000000 +0200
+@@ -701,9 +701,6 @@ static pte_t gntdev_clear_pte(struct vm_
+ BUG();
+ }
+
+- /* Copy the existing value of the PTE for returning. */
+- copy = *ptep;
+-
+ /* Calculate the grant relating to this PTE. */
+ slot_index = vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT);
+
+@@ -718,6 +715,10 @@ static pte_t gntdev_clear_pte(struct vm_
+ GNTDEV_INVALID_HANDLE &&
+ !xen_feature(XENFEAT_auto_translated_physmap)) {
+ /* NOT USING SHADOW PAGE TABLES. */
++
++ /* Copy the existing value of the PTE for returning. */
++ copy = *ptep;
++
+ gnttab_set_unmap_op(&op, virt_to_machine(ptep),
+ GNTMAP_contains_pte,
+ private_data->grants[slot_index]
+@@ -730,7 +731,7 @@ static pte_t gntdev_clear_pte(struct vm_
+ op.status);
+ } else {
+ /* USING SHADOW PAGE TABLES. */
+- pte_clear_full(vma->vm_mm, addr, ptep, is_fullmm);
++ copy = ptep_get_and_clear_full(vma->vm_mm, addr, ptep, is_fullmm);
+ }
+
+ /* Finally, we unmap the grant from kernel space. */
+@@ -758,7 +759,7 @@ static pte_t gntdev_clear_pte(struct vm_
+ >> PAGE_SHIFT, INVALID_P2M_ENTRY);
+
+ } else {
+- pte_clear_full(vma->vm_mm, addr, ptep, is_fullmm);
++ copy = ptep_get_and_clear_full(vma->vm_mm, addr, ptep, is_fullmm);
+ }
+
+ return copy;
+Index: 10.3-2007-11-26/drivers/xen/privcmd/privcmd.c
+===================================================================
+--- 10.3-2007-11-26.orig/drivers/xen/privcmd/privcmd.c 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/drivers/xen/privcmd/privcmd.c 2007-10-22 13:53:08.000000000 +0200
+@@ -234,7 +234,7 @@ static int privcmd_mmap(struct file * fi
+ return -ENOSYS;
+
+ /* DONTCOPY is essential for Xen as copy_page_range is broken. */
+- vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY;
++ vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTCOPY;
+ vma->vm_ops = &privcmd_vm_ops;
+ vma->vm_private_data = NULL;
+
+Index: 10.3-2007-11-26/drivers/xen/netback/common.h
+===================================================================
+--- 10.3-2007-11-26.orig/drivers/xen/netback/common.h 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/drivers/xen/netback/common.h 2007-10-22 13:53:08.000000000 +0200
+@@ -140,7 +140,7 @@ void netif_deschedule_work(netif_t *neti
+
+ int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev);
+ struct net_device_stats *netif_be_get_stats(struct net_device *dev);
+-irqreturn_t netif_be_int(int irq, void *dev_id, struct pt_regs *regs);
++irqreturn_t netif_be_int(int irq, void *dev_id);
+
+ static inline int netbk_can_queue(struct net_device *dev)
+ {
+Index: 10.3-2007-11-26/drivers/xen/netback/loopback.c
+===================================================================
+--- 10.3-2007-11-26.orig/drivers/xen/netback/loopback.c 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/drivers/xen/netback/loopback.c 2007-10-22 13:53:08.000000000 +0200
+@@ -151,7 +151,7 @@ static int loopback_start_xmit(struct sk
+ np->stats.rx_bytes += skb->len;
+ np->stats.rx_packets++;
+
+- if (skb->ip_summed == CHECKSUM_HW) {
++ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ /* Defer checksum calculation. */
+ skb->proto_csum_blank = 1;
+ /* Must be a local packet: assert its integrity. */
+Index: 10.3-2007-11-26/drivers/xen/netback/netback.c
+===================================================================
+--- 10.3-2007-11-26.orig/drivers/xen/netback/netback.c 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/drivers/xen/netback/netback.c 2007-10-22 13:53:08.000000000 +0200
+@@ -649,7 +649,7 @@ static void net_rx_action(unsigned long
+ id = meta[npo.meta_cons].id;
+ flags = nr_frags ? NETRXF_more_data : 0;
+
+- if (skb->ip_summed == CHECKSUM_HW) /* local packet? */
++ if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */
+ flags |= NETRXF_csum_blank | NETRXF_data_validated;
+ else if (skb->proto_data_valid) /* remote but checksummed? */
+ flags |= NETRXF_data_validated;
+@@ -1339,7 +1339,7 @@ static void netif_page_release(struct pa
+ netif_idx_release(netif_page_index(page));
+ }
+
+-irqreturn_t netif_be_int(int irq, void *dev_id, struct pt_regs *regs)
++irqreturn_t netif_be_int(int irq, void *dev_id)
+ {
+ netif_t *netif = dev_id;
+
+@@ -1406,7 +1406,7 @@ static netif_rx_response_t *make_rx_resp
+ }
+
+ #ifdef NETBE_DEBUG_INTERRUPT
+-static irqreturn_t netif_be_dbg(int irq, void *dev_id, struct pt_regs *regs)
++static irqreturn_t netif_be_dbg(int irq, void *dev_id)
+ {
+ struct list_head *ent;
+ netif_t *netif;
+Index: 10.3-2007-11-26/drivers/xen/netfront/netfront.c
+===================================================================
+--- 10.3-2007-11-26.orig/drivers/xen/netfront/netfront.c 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/drivers/xen/netfront/netfront.c 2007-10-22 13:53:08.000000000 +0200
+@@ -135,7 +135,7 @@ static inline int netif_needs_gso(struct
+ {
+ return skb_is_gso(skb) &&
+ (!skb_gso_ok(skb, dev->features) ||
+- unlikely(skb->ip_summed != CHECKSUM_HW));
++ unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
+ }
+ #else
+ #define netif_needs_gso(dev, skb) 0
+@@ -268,7 +268,7 @@ static void network_tx_buf_gc(struct net
+ static void network_alloc_rx_buffers(struct net_device *);
+ static int send_fake_arp(struct net_device *);
+
+-static irqreturn_t netif_int(int irq, void *dev_id, struct pt_regs *ptregs);
++static irqreturn_t netif_int(int irq, void *dev_id);
+
+ #ifdef CONFIG_SYSFS
+ static int xennet_sysfs_addif(struct net_device *netdev);
+@@ -978,7 +978,7 @@ static int network_start_xmit(struct sk_
+ tx->flags = 0;
+ extra = NULL;
+
+- if (skb->ip_summed == CHECKSUM_HW) /* local packet? */
++ if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */
+ tx->flags |= NETTXF_csum_blank | NETTXF_data_validated;
+ #ifdef CONFIG_XEN
+ if (skb->proto_data_valid) /* remote but checksummed? */
+@@ -1034,7 +1034,7 @@ static int network_start_xmit(struct sk_
+ return 0;
+ }
+
+-static irqreturn_t netif_int(int irq, void *dev_id, struct pt_regs *ptregs)
++static irqreturn_t netif_int(int irq, void *dev_id)
+ {
+ struct net_device *dev = dev_id;
+ struct netfront_info *np = netdev_priv(dev);
+Index: 10.3-2007-11-26/drivers/xen/pciback/pciback.h
+===================================================================
+--- 10.3-2007-11-26.orig/drivers/xen/pciback/pciback.h 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/drivers/xen/pciback/pciback.h 2007-10-22 13:53:08.000000000 +0200
+@@ -83,7 +83,7 @@ int pciback_publish_pci_roots(struct pci
+ void pciback_release_devices(struct pciback_device *pdev);
+
+ /* Handles events from front-end */
+-irqreturn_t pciback_handle_event(int irq, void *dev_id, struct pt_regs *regs);
++irqreturn_t pciback_handle_event(int irq, void *dev_id);
+ void pciback_do_op(void *data);
+
+ int pciback_xenbus_register(void);
+Index: 10.3-2007-11-26/drivers/xen/pciback/pciback_ops.c
+===================================================================
+--- 10.3-2007-11-26.orig/drivers/xen/pciback/pciback_ops.c 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/drivers/xen/pciback/pciback_ops.c 2007-10-22 13:53:08.000000000 +0200
+@@ -85,7 +85,7 @@ void pciback_do_op(void *data)
+ test_and_schedule_op(pdev);
+ }
+
+-irqreturn_t pciback_handle_event(int irq, void *dev_id, struct pt_regs *regs)
++irqreturn_t pciback_handle_event(int irq, void *dev_id)
+ {
+ struct pciback_device *pdev = dev_id;
+
+Index: 10.3-2007-11-26/drivers/xen/tpmback/common.h
+===================================================================
+--- 10.3-2007-11-26.orig/drivers/xen/tpmback/common.h 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/drivers/xen/tpmback/common.h 2007-10-22 13:53:08.000000000 +0200
+@@ -61,7 +61,7 @@ void tpmif_deschedule_work(tpmif_t * tpm
+ void tpmif_xenbus_init(void);
+ void tpmif_xenbus_exit(void);
+ int tpmif_map(tpmif_t *tpmif, unsigned long shared_page, unsigned int evtchn);
+-irqreturn_t tpmif_be_int(int irq, void *dev_id, struct pt_regs *regs);
++irqreturn_t tpmif_be_int(int irq, void *dev_id);
+
+ long int tpmback_get_instance(struct backend_info *bi);
+
+Index: 10.3-2007-11-26/drivers/xen/tpmback/tpmback.c
+===================================================================
+--- 10.3-2007-11-26.orig/drivers/xen/tpmback/tpmback.c 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/drivers/xen/tpmback/tpmback.c 2007-10-22 13:53:08.000000000 +0200
+@@ -502,7 +502,7 @@ static ssize_t vtpm_op_read(struct file
+ list_del(&pak->next);
+ write_unlock_irqrestore(&dataex.pak_lock, flags);
+
+- DPRINTK("size given by app: %d, available: %d\n", size, left);
++ DPRINTK("size given by app: %zu, available: %u\n", size, left);
+
+ ret_size = min_t(size_t, size, left);
+
+@@ -899,7 +899,7 @@ static void tpm_tx_action(unsigned long
+ }
+ }
+
+-irqreturn_t tpmif_be_int(int irq, void *dev_id, struct pt_regs *regs)
++irqreturn_t tpmif_be_int(int irq, void *dev_id)
+ {
+ tpmif_t *tpmif = (tpmif_t *) dev_id;
+
+Index: 10.3-2007-11-26/drivers/xen/xenbus/xenbus_comms.c
+===================================================================
+--- 10.3-2007-11-26.orig/drivers/xen/xenbus/xenbus_comms.c 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/drivers/xen/xenbus/xenbus_comms.c 2007-10-22 13:53:08.000000000 +0200
+@@ -54,7 +54,7 @@ static DECLARE_WORK(probe_work, xenbus_p
+
+ static DECLARE_WAIT_QUEUE_HEAD(xb_waitq);
+
+-static irqreturn_t wake_waiting(int irq, void *unused, struct pt_regs *regs)
++static irqreturn_t wake_waiting(int irq, void *unused)
+ {
+ if (unlikely(xenstored_ready == 0)) {
+ xenstored_ready = 1;
+Index: 10.3-2007-11-26/drivers/xen/xenoprof/xenoprofile.c
+===================================================================
+--- 10.3-2007-11-26.orig/drivers/xen/xenoprof/xenoprofile.c 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/drivers/xen/xenoprof/xenoprofile.c 2007-10-22 13:53:08.000000000 +0200
+@@ -177,7 +177,7 @@ done:
+ }
+
+ static irqreturn_t
+-xenoprof_ovf_interrupt(int irq, void * dev_id, struct pt_regs * regs)
++xenoprof_ovf_interrupt(int irq, void * dev_id)
+ {
+ struct xenoprof_buf * buf;
+ int cpu;
+Index: 10.3-2007-11-26/include/asm-i386/acpi.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-i386/acpi.h 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/include/asm-i386/acpi.h 2007-10-22 13:53:08.000000000 +0200
+@@ -141,7 +141,9 @@ extern void acpi_reserve_bootmem(void);
+
+ #endif /*CONFIG_ACPI_SLEEP*/
+
++#ifndef CONFIG_XEN
+ #define ARCH_HAS_POWER_INIT 1
++#endif
+
+ #endif /*__KERNEL__*/
+
+Index: 10.3-2007-11-26/include/asm-i386/mach-xen/asm/desc.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-i386/mach-xen/asm/desc.h 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/include/asm-i386/mach-xen/asm/desc.h 2007-10-22 13:53:08.000000000 +0200
+@@ -32,52 +32,108 @@ static inline struct desc_struct *get_cp
+ return (struct desc_struct *)per_cpu(cpu_gdt_descr, cpu).address;
+ }
+
++/*
++ * This is the ldt that every process will get unless we need
++ * something other than this.
++ */
++extern struct desc_struct default_ldt[];
++extern struct desc_struct idt_table[];
++extern void set_intr_gate(unsigned int irq, void * addr);
++
++static inline void pack_descriptor(__u32 *a, __u32 *b,
++ unsigned long base, unsigned long limit, unsigned char type, unsigned char flags)
++{
++ *a = ((base & 0xffff) << 16) | (limit & 0xffff);
++ *b = (base & 0xff000000) | ((base & 0xff0000) >> 16) |
++ (limit & 0x000f0000) | ((type & 0xff) << 8) | ((flags & 0xf) << 20);
++}
++
++static inline void pack_gate(__u32 *a, __u32 *b,
++ unsigned long base, unsigned short seg, unsigned char type, unsigned char flags)
++{
++ *a = (seg << 16) | (base & 0xffff);
++ *b = (base & 0xffff0000) | ((type & 0xff) << 8) | (flags & 0xff);
++}
++
++#define DESCTYPE_LDT 0x82 /* present, system, DPL-0, LDT */
++#define DESCTYPE_TSS 0x89 /* present, system, DPL-0, 32-bit TSS */
++#define DESCTYPE_TASK 0x85 /* present, system, DPL-0, task gate */
++#define DESCTYPE_INT 0x8e /* present, system, DPL-0, interrupt gate */
++#define DESCTYPE_TRAP 0x8f /* present, system, DPL-0, trap gate */
++#define DESCTYPE_DPL3 0x60 /* DPL-3 */
++#define DESCTYPE_S 0x10 /* !system */
++
+ #define load_TR_desc() __asm__ __volatile__("ltr %w0"::"q" (GDT_ENTRY_TSS*8))
+ #define load_LDT_desc() __asm__ __volatile__("lldt %w0"::"q" (GDT_ENTRY_LDT*8))
+
+ #define load_gdt(dtr) __asm__ __volatile("lgdt %0"::"m" (*dtr))
+ #define load_idt(dtr) __asm__ __volatile("lidt %0"::"m" (*dtr))
+-#define load_tr(tr) __asm__ __volatile("ltr %0"::"mr" (tr))
+-#define load_ldt(ldt) __asm__ __volatile("lldt %0"::"mr" (ldt))
++#define load_tr(tr) __asm__ __volatile("ltr %0"::"m" (tr))
++#define load_ldt(ldt) __asm__ __volatile("lldt %0"::"m" (ldt))
+
+ #define store_gdt(dtr) __asm__ ("sgdt %0":"=m" (*dtr))
+ #define store_idt(dtr) __asm__ ("sidt %0":"=m" (*dtr))
+-#define store_tr(tr) __asm__ ("str %0":"=mr" (tr))
+-#define store_ldt(ldt) __asm__ ("sldt %0":"=mr" (ldt))
++#define store_tr(tr) __asm__ ("str %0":"=m" (tr))
++#define store_ldt(ldt) __asm__ ("sldt %0":"=m" (ldt))
+
+-/*
+- * This is the ldt that every process will get unless we need
+- * something other than this.
+- */
+-extern struct desc_struct default_ldt[];
+-extern void set_intr_gate(unsigned int irq, void * addr);
++#if TLS_SIZE != 24
++# error update this code.
++#endif
++
++static inline void load_TLS(struct thread_struct *t, unsigned int cpu)
++{
++#define C(i) HYPERVISOR_update_descriptor(virt_to_machine(&get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i]), *(u64 *)&t->tls_array[i])
++ C(0); C(1); C(2);
++#undef C
++}
+
+-#define _set_tssldt_desc(n,addr,limit,type) \
+-__asm__ __volatile__ ("movw %w3,0(%2)\n\t" \
+- "movw %w1,2(%2)\n\t" \
+- "rorl $16,%1\n\t" \
+- "movb %b1,4(%2)\n\t" \
+- "movb %4,5(%2)\n\t" \
+- "movb $0,6(%2)\n\t" \
+- "movb %h1,7(%2)\n\t" \
+- "rorl $16,%1" \
+- : "=m"(*(n)) : "q" (addr), "r"(n), "ir"(limit), "i"(type))
++#ifndef CONFIG_XEN
++static inline void write_dt_entry(void *dt, int entry, __u32 entry_a, __u32 entry_b)
++{
++ __u32 *lp = (__u32 *)((char *)dt + entry*8);
++ *lp = entry_a;
++ *(lp+1) = entry_b;
++}
+
+-#ifndef CONFIG_X86_NO_TSS
+-static inline void __set_tss_desc(unsigned int cpu, unsigned int entry, void *addr)
++#define write_ldt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b)
++#define write_gdt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b)
++#else
++extern int write_ldt_entry(void *ldt, int entry, __u32 entry_a, __u32 entry_b);
++extern int write_gdt_entry(void *gdt, int entry, __u32 entry_a, __u32 entry_b);
++#endif
++#ifndef CONFIG_X86_NO_IDT
++#define write_idt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b)
++
++static inline void _set_gate(int gate, unsigned int type, void *addr, unsigned short seg)
+ {
+- _set_tssldt_desc(&get_cpu_gdt_table(cpu)[entry], (int)addr,
+- offsetof(struct tss_struct, __cacheline_filler) - 1, 0x89);
++ __u32 a, b;
++ pack_gate(&a, &b, (unsigned long)addr, seg, type, 0);
++ write_idt_entry(idt_table, gate, a, b);
+ }
++#endif
+
+-#define set_tss_desc(cpu,addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr)
++#ifndef CONFIG_X86_NO_TSS
++static inline void __set_tss_desc(unsigned int cpu, unsigned int entry, const void *addr)
++{
++ __u32 a, b;
++ pack_descriptor(&a, &b, (unsigned long)addr,
++ offsetof(struct tss_struct, __cacheline_filler) - 1,
++ DESCTYPE_TSS, 0);
++ write_gdt_entry(get_cpu_gdt_table(cpu), entry, a, b);
++}
+ #endif
+
+-static inline void set_ldt_desc(unsigned int cpu, void *addr, unsigned int size)
++static inline void set_ldt_desc(unsigned int cpu, void *addr, unsigned int entries)
+ {
+- _set_tssldt_desc(&get_cpu_gdt_table(cpu)[GDT_ENTRY_LDT], (int)addr, ((size << 3)-1), 0x82);
++ __u32 a, b;
++ pack_descriptor(&a, &b, (unsigned long)addr,
++ entries * sizeof(struct desc_struct) - 1,
++ DESCTYPE_LDT, 0);
++ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_LDT, a, b);
+ }
+
++#define set_tss_desc(cpu,addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr)
++
+ #define LDT_entry_a(info) \
+ ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff))
+
+@@ -103,19 +159,6 @@ static inline void set_ldt_desc(unsigned
+ (info)->seg_not_present == 1 && \
+ (info)->useable == 0 )
+
+-extern int write_ldt_entry(void *ldt, int entry, __u32 entry_a, __u32 entry_b);
+-
+-#if TLS_SIZE != 24
+-# error update this code.
+-#endif
+-
+-static inline void load_TLS(struct thread_struct *t, unsigned int cpu)
+-{
+-#define C(i) HYPERVISOR_update_descriptor(virt_to_machine(&get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i]), *(u64 *)&t->tls_array[i])
+- C(0); C(1); C(2);
+-#undef C
+-}
+-
+ static inline void clear_LDT(void)
+ {
+ int cpu = get_cpu();
+Index: 10.3-2007-11-26/include/asm-i386/mach-xen/asm/fixmap.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-i386/mach-xen/asm/fixmap.h 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/include/asm-i386/mach-xen/asm/fixmap.h 2007-10-22 13:53:08.000000000 +0200
+@@ -55,7 +55,7 @@ enum fixed_addresses {
+ #ifdef CONFIG_X86_LOCAL_APIC
+ FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */
+ #endif
+-#ifdef CONFIG_X86_IO_APIC
++#if defined(CONFIG_X86_IO_APIC) && !defined(CONFIG_XEN)
+ FIX_IO_APIC_BASE_0,
+ FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS-1,
+ #endif
+@@ -95,10 +95,9 @@ enum fixed_addresses {
+ __end_of_fixed_addresses
+ };
+
+-extern void set_fixaddr_top(unsigned long top);
+-
+ extern void __set_fixmap(enum fixed_addresses idx,
+ maddr_t phys, pgprot_t flags);
++extern void reserve_top_address(unsigned long reserve);
+
+ #define set_fixmap(idx, phys) \
+ __set_fixmap(idx, phys, PAGE_KERNEL)
+Index: 10.3-2007-11-26/include/asm-i386/mach-xen/asm/floppy.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-i386/mach-xen/asm/floppy.h 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/include/asm-i386/mach-xen/asm/floppy.h 2007-10-22 13:53:08.000000000 +0200
+@@ -43,14 +43,14 @@ static char *virtual_dma_addr;
+ static int virtual_dma_mode;
+ static int doing_pdma;
+
+-static irqreturn_t floppy_hardint(int irq, void *dev_id, struct pt_regs * regs)
++static irqreturn_t floppy_hardint(int irq, void *dev_id)
+ {
+ register unsigned char st;
+ register int lcount;
+ register char *lptr;
+
+ if (!doing_pdma)
+- return floppy_interrupt(irq, dev_id, regs);
++ return floppy_interrupt(irq, dev_id);
+
+ st = 1;
+ for(lcount=virtual_dma_count, lptr=virtual_dma_addr;
+@@ -73,7 +73,7 @@ static irqreturn_t floppy_hardint(int ir
+ virtual_dma_residue += virtual_dma_count;
+ virtual_dma_count=0;
+ doing_pdma = 0;
+- floppy_interrupt(irq, dev_id, regs);
++ floppy_interrupt(irq, dev_id);
+ return IRQ_HANDLED;
+ }
+ return IRQ_HANDLED;
+Index: 10.3-2007-11-26/include/asm-i386/mach-xen/asm/hw_irq.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-i386/mach-xen/asm/hw_irq.h 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/include/asm-i386/mach-xen/asm/hw_irq.h 2007-10-22 13:53:08.000000000 +0200
+@@ -17,8 +17,6 @@
+ #include <asm/irq.h>
+ #include <asm/sections.h>
+
+-struct hw_interrupt_type;
+-
+ #define NMI_VECTOR 0x02
+
+ /*
+@@ -28,10 +26,6 @@ struct hw_interrupt_type;
+ * Interrupt entry/exit code at both C and assembly level
+ */
+
+-extern u8 irq_vector[NR_IRQ_VECTORS];
+-#define IO_APIC_VECTOR(irq) (irq_vector[irq])
+-#define AUTO_ASSIGN -1
+-
+ extern void (*interrupt[NR_IRQS])(void);
+
+ #ifdef CONFIG_SMP
+@@ -44,7 +38,7 @@ fastcall void call_function_interrupt(vo
+ fastcall void apic_timer_interrupt(void);
+ fastcall void error_interrupt(void);
+ fastcall void spurious_interrupt(void);
+-fastcall void thermal_interrupt(struct pt_regs *);
++fastcall void thermal_interrupt(void);
+ #define platform_legacy_irq(irq) ((irq) < 16)
+ #endif
+
+Index: 10.3-2007-11-26/include/asm-i386/mach-xen/asm/io.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-i386/mach-xen/asm/io.h 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/include/asm-i386/mach-xen/asm/io.h 2007-10-22 13:53:08.000000000 +0200
+@@ -238,33 +238,6 @@ static inline void memcpy_toio(volatile
+
+ #define eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),(void __force *)(b),(c),(d))
+
+-/**
+- * check_signature - find BIOS signatures
+- * @io_addr: mmio address to check
+- * @signature: signature block
+- * @length: length of signature
+- *
+- * Perform a signature comparison with the mmio address io_addr. This
+- * address should have been obtained by ioremap.
+- * Returns 1 on a match.
+- */
+-
+-static inline int check_signature(volatile void __iomem * io_addr,
+- const unsigned char *signature, int length)
+-{
+- int retval = 0;
+- do {
+- if (readb(io_addr) != *signature)
+- goto out;
+- io_addr++;
+- signature++;
+- length--;
+- } while (length);
+- retval = 1;
+-out:
+- return retval;
+-}
+-
+ /*
+ * Cache management
+ *
+Index: 10.3-2007-11-26/include/asm-i386/mach-xen/asm/page.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-i386/mach-xen/asm/page.h 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/include/asm-i386/mach-xen/asm/page.h 2007-10-22 13:53:08.000000000 +0200
+@@ -196,7 +196,7 @@ extern int page_is_ram(unsigned long pag
+
+ #define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
+ #define VMALLOC_RESERVE ((unsigned long)__VMALLOC_RESERVE)
+-#define MAXMEM (__FIXADDR_TOP-__PAGE_OFFSET-__VMALLOC_RESERVE)
++#define MAXMEM (-__PAGE_OFFSET-__VMALLOC_RESERVE)
+ #define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
+ #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
+ #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
+Index: 10.3-2007-11-26/include/asm-i386/mach-xen/asm/pgtable-2level.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-i386/mach-xen/asm/pgtable-2level.h 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/include/asm-i386/mach-xen/asm/pgtable-2level.h 2007-10-22 13:53:08.000000000 +0200
+@@ -21,14 +21,6 @@
+ set_pte((ptep), (pteval)); \
+ } while (0)
+
+-#define set_pte_at_sync(_mm,addr,ptep,pteval) do { \
+- if (((_mm) != current->mm && (_mm) != &init_mm) || \
+- HYPERVISOR_update_va_mapping((addr), (pteval), UVMF_INVLPG)) { \
+- set_pte((ptep), (pteval)); \
+- xen_invlpg((addr)); \
+- } \
+-} while (0)
+-
+ #define set_pte_atomic(pteptr, pteval) set_pte(pteptr,pteval)
+
+ #define set_pmd(pmdptr, pmdval) xen_l2_entry_update((pmdptr), (pmdval))
+@@ -38,6 +30,7 @@
+
+ #define pte_none(x) (!(x).pte_low)
+
++#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
+ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+ {
+ pte_t pte = *ptep;
+@@ -49,6 +42,7 @@ static inline pte_t ptep_get_and_clear(s
+ return pte;
+ }
+
++#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
+ #define ptep_clear_flush(vma, addr, ptep) \
+ ({ \
+ pte_t *__ptep = (ptep); \
+@@ -64,8 +58,6 @@ static inline pte_t ptep_get_and_clear(s
+ __res; \
+ })
+
+-#define pte_same(a, b) ((a).pte_low == (b).pte_low)
+-
+ #define __pte_mfn(_pte) ((_pte).pte_low >> PAGE_SHIFT)
+ #define pte_mfn(_pte) ((_pte).pte_low & _PAGE_PRESENT ? \
+ __pte_mfn(_pte) : pfn_to_mfn(__pte_mfn(_pte)))
+Index: 10.3-2007-11-26/include/asm-i386/mach-xen/asm/pgtable-3level.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-i386/mach-xen/asm/pgtable-3level.h 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/include/asm-i386/mach-xen/asm/pgtable-3level.h 2007-10-22 13:53:08.000000000 +0200
+@@ -50,7 +50,6 @@ static inline int pte_exec_kernel(pte_t
+ * not possible, use pte_get_and_clear to obtain the old pte
+ * value and then use set_pte to update it. -ben
+ */
+-#define __HAVE_ARCH_SET_PTE_ATOMIC
+
+ static inline void set_pte(pte_t *ptep, pte_t pte)
+ {
+@@ -67,14 +66,6 @@ static inline void set_pte(pte_t *ptep,
+ set_pte((ptep), (pteval)); \
+ } while (0)
+
+-#define set_pte_at_sync(_mm,addr,ptep,pteval) do { \
+- if (((_mm) != current->mm && (_mm) != &init_mm) || \
+- HYPERVISOR_update_va_mapping((addr), (pteval), UVMF_INVLPG)) { \
+- set_pte((ptep), (pteval)); \
+- xen_invlpg((addr)); \
+- } \
+-} while (0)
+-
+ #define set_pmd(pmdptr,pmdval) \
+ xen_l2_entry_update((pmdptr), (pmdval))
+ #define set_pud(pudptr,pudval) \
+@@ -91,7 +82,7 @@ static inline void pud_clear (pud_t * pu
+ #define pud_page(pud) \
+ ((struct page *) __va(pud_val(pud) & PAGE_MASK))
+
+-#define pud_page_kernel(pud) \
++#define pud_page_vaddr(pud) \
+ ((unsigned long) __va(pud_val(pud) & PAGE_MASK))
+
+
+@@ -121,6 +112,7 @@ static inline void pte_clear(struct mm_s
+
+ #define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
+
++#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
+ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+ {
+ pte_t pte = *ptep;
+@@ -139,6 +131,7 @@ static inline pte_t ptep_get_and_clear(s
+ return pte;
+ }
+
++#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
+ #define ptep_clear_flush(vma, addr, ptep) \
+ ({ \
+ pte_t *__ptep = (ptep); \
+@@ -156,6 +149,7 @@ static inline pte_t ptep_get_and_clear(s
+ __res; \
+ })
+
++#define __HAVE_ARCH_PTE_SAME
+ static inline int pte_same(pte_t a, pte_t b)
+ {
+ return a.pte_low == b.pte_low && a.pte_high == b.pte_high;
+Index: 10.3-2007-11-26/include/asm-i386/mach-xen/asm/pgtable.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-i386/mach-xen/asm/pgtable.h 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/include/asm-i386/mach-xen/asm/pgtable.h 2007-10-22 13:53:08.000000000 +0200
+@@ -256,31 +256,89 @@ static inline pte_t pte_mkhuge(pte_t pte
+ # include <asm/pgtable-2level.h>
+ #endif
+
+-#define ptep_test_and_clear_dirty(vma, addr, ptep) \
++/*
++ * Rules for using pte_update - it must be called after any PTE update which
++ * has not been done using the set_pte / clear_pte interfaces. It is used by
++ * shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE
++ * updates should either be sets, clears, or set_pte_atomic for P->P
++ * transitions, which means this hook should only be called for user PTEs.
++ * This hook implies a P->P protection or access change has taken place, which
++ * requires a subsequent TLB flush. The notification can optionally be delayed
++ * until the TLB flush event by using the pte_update_defer form of the
++ * interface, but care must be taken to assure that the flush happens while
++ * still holding the same page table lock so that the shadow and primary pages
++ * do not become out of sync on SMP.
++ */
++#define pte_update(mm, addr, ptep) do { } while (0)
++#define pte_update_defer(mm, addr, ptep) do { } while (0)
++
++
++/*
++ * We only update the dirty/accessed state if we set
++ * the dirty bit by hand in the kernel, since the hardware
++ * will do the accessed bit for us, and we don't want to
++ * race with other CPU's that might be updating the dirty
++ * bit at the same time.
++ */
++#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
++#define ptep_set_access_flags(vma, address, ptep, entry, dirty) \
++do { \
++ if (dirty) \
++ ptep_establish(vma, address, ptep, entry); \
++} while (0)
++
++/*
++ * We don't actually have these, but we want to advertise them so that
++ * we can encompass the flush here.
++ */
++#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
++#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
++
++/*
++ * Rules for using ptep_establish: the pte MUST be a user pte, and
++ * must be a present->present transition.
++ */
++#define __HAVE_ARCH_PTEP_ESTABLISH
++#define ptep_establish(vma, address, ptep, pteval) \
++do { \
++ if ( likely((vma)->vm_mm == current->mm) ) { \
++ BUG_ON(HYPERVISOR_update_va_mapping(address, \
++ pteval, \
++ (unsigned long)(vma)->vm_mm->cpu_vm_mask.bits| \
++ UVMF_INVLPG|UVMF_MULTI)); \
++ } else { \
++ xen_l1_entry_update(ptep, pteval); \
++ flush_tlb_page(vma, address); \
++ } \
++} while (0)
++
++#define __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH
++#define ptep_clear_flush_dirty(vma, address, ptep) \
+ ({ \
+ pte_t __pte = *(ptep); \
+- int __ret = pte_dirty(__pte); \
+- if (__ret) { \
+- __pte = pte_mkclean(__pte); \
+- if ((vma)->vm_mm != current->mm || \
+- HYPERVISOR_update_va_mapping(addr, __pte, 0)) \
+- (ptep)->pte_low = __pte.pte_low; \
+- } \
+- __ret; \
++ int __dirty = pte_dirty(__pte); \
++ __pte = pte_mkclean(__pte); \
++ if (test_bit(PG_pinned, &virt_to_page((vma)->vm_mm->pgd)->flags)) \
++ ptep_set_access_flags(vma, address, ptep, __pte, __dirty); \
++ else if (__dirty) \
++ (ptep)->pte_low = __pte.pte_low; \
++ __dirty; \
+ })
+
+-#define ptep_test_and_clear_young(vma, addr, ptep) \
++#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
++#define ptep_clear_flush_young(vma, address, ptep) \
+ ({ \
+ pte_t __pte = *(ptep); \
+- int __ret = pte_young(__pte); \
+- if (__ret) \
+- __pte = pte_mkold(__pte); \
+- if ((vma)->vm_mm != current->mm || \
+- HYPERVISOR_update_va_mapping(addr, __pte, 0)) \
+- (ptep)->pte_low = __pte.pte_low; \
+- __ret; \
++ int __young = pte_young(__pte); \
++ __pte = pte_mkold(__pte); \
++ if (test_bit(PG_pinned, &virt_to_page((vma)->vm_mm->pgd)->flags)) \
++ ptep_set_access_flags(vma, address, ptep, __pte, __young); \
++ else if (__young) \
++ (ptep)->pte_low = __pte.pte_low; \
++ __young; \
+ })
+
++#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
+ #define ptep_get_and_clear_full(mm, addr, ptep, full) \
+ ((full) ? ({ \
+ pte_t __res = *(ptep); \
+@@ -292,6 +350,7 @@ static inline pte_t pte_mkhuge(pte_t pte
+ }) : \
+ ptep_get_and_clear(mm, addr, ptep))
+
++#define __HAVE_ARCH_PTEP_SET_WRPROTECT
+ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+ {
+ pte_t pte = *ptep;
+@@ -387,11 +446,11 @@ static inline pte_t pte_modify(pte_t pte
+ #define pte_index(address) \
+ (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+ #define pte_offset_kernel(dir, address) \
+- ((pte_t *) pmd_page_kernel(*(dir)) + pte_index(address))
++ ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address))
+
+ #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
+
+-#define pmd_page_kernel(pmd) \
++#define pmd_page_vaddr(pmd) \
+ ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
+
+ /*
+@@ -414,8 +473,6 @@ extern pte_t *lookup_address(unsigned lo
+ static inline int set_kernel_exec(unsigned long vaddr, int enable) { return 0;}
+ #endif
+
+-extern void noexec_setup(const char *str);
+-
+ #if defined(CONFIG_HIGHPTE)
+ #define pte_offset_map(dir, address) \
+ ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE0) + \
+@@ -433,37 +490,15 @@ extern void noexec_setup(const char *str
+ #define pte_unmap_nested(pte) do { } while (0)
+ #endif
+
+-#define __HAVE_ARCH_PTEP_ESTABLISH
+-#define ptep_establish(vma, address, ptep, pteval) \
+- do { \
+- if ( likely((vma)->vm_mm == current->mm) ) { \
+- BUG_ON(HYPERVISOR_update_va_mapping(address, \
+- pteval, \
+- (unsigned long)(vma)->vm_mm->cpu_vm_mask.bits| \
+- UVMF_INVLPG|UVMF_MULTI)); \
+- } else { \
+- xen_l1_entry_update(ptep, pteval); \
+- flush_tlb_page(vma, address); \
+- } \
+- } while (0)
++/* Clear a kernel PTE and flush it from the TLB */
++#define kpte_clear_flush(ptep, vaddr) \
++ HYPERVISOR_update_va_mapping(vaddr, __pte(0), UVMF_INVLPG)
+
+ /*
+ * The i386 doesn't have any external MMU info: the kernel page
+ * tables contain all the necessary information.
+- *
+- * Also, we only update the dirty/accessed state if we set
+- * the dirty bit by hand in the kernel, since the hardware
+- * will do the accessed bit for us, and we don't want to
+- * race with other CPU's that might be updating the dirty
+- * bit at the same time.
+ */
+ #define update_mmu_cache(vma,address,pte) do { } while (0)
+-#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
+-#define ptep_set_access_flags(vma, address, ptep, entry, dirty) \
+- do { \
+- if (dirty) \
+- ptep_establish(vma, address, ptep, entry); \
+- } while (0)
+
+ #include <xen/features.h>
+ void make_lowmem_page_readonly(void *va, unsigned int feature);
+@@ -518,13 +553,6 @@ direct_remap_pfn_range(vma,from,pfn,size
+ #define GET_IOSPACE(pfn) 0
+ #define GET_PFN(pfn) (pfn)
+
+-#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+-#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
+-#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
+-#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
+-#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
+-#define __HAVE_ARCH_PTEP_SET_WRPROTECT
+-#define __HAVE_ARCH_PTE_SAME
+ #include <asm-generic/pgtable.h>
+
+ #endif /* _I386_PGTABLE_H */
+Index: 10.3-2007-11-26/include/asm-i386/mach-xen/asm/processor.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-i386/mach-xen/asm/processor.h 2007-09-03 09:52:56.000000000 +0200
++++ 10.3-2007-11-26/include/asm-i386/mach-xen/asm/processor.h 2007-10-22 13:53:08.000000000 +0200
+@@ -146,6 +146,18 @@ static inline void detect_ht(struct cpui
+ #define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */
+ #define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */
+
++static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
++ unsigned int *ecx, unsigned int *edx)
++{
++ /* ecx is often an input as well as an output. */
++ __asm__(XEN_CPUID
++ : "=a" (*eax),
++ "=b" (*ebx),
++ "=c" (*ecx),
++ "=d" (*edx)
++ : "0" (*eax), "2" (*ecx));
++}
++
+ /*
+ * Generic CPUID function
+ * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
+@@ -153,24 +165,18 @@ static inline void detect_ht(struct cpui
+ */
+ static inline void cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx)
+ {
+- __asm__(XEN_CPUID
+- : "=a" (*eax),
+- "=b" (*ebx),
+- "=c" (*ecx),
+- "=d" (*edx)
+- : "0" (op), "c"(0));
++ *eax = op;
++ *ecx = 0;
++ __cpuid(eax, ebx, ecx, edx);
+ }
+
+ /* Some CPUID calls want 'count' to be placed in ecx */
+ static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx,
+- int *edx)
++ int *edx)
+ {
+- __asm__(XEN_CPUID
+- : "=a" (*eax),
+- "=b" (*ebx),
+- "=c" (*ecx),
+- "=d" (*edx)
+- : "0" (op), "c" (count));
++ *eax = op;
++ *ecx = count;
++ __cpuid(eax, ebx, ecx, edx);
+ }
+
+ /*
+@@ -178,42 +184,30 @@ static inline void cpuid_count(int op, i
+ */
+ static inline unsigned int cpuid_eax(unsigned int op)
+ {
+- unsigned int eax;
++ unsigned int eax, ebx, ecx, edx;
+
+- __asm__(XEN_CPUID
+- : "=a" (eax)
+- : "0" (op)
+- : "bx", "cx", "dx");
++ cpuid(op, &eax, &ebx, &ecx, &edx);
+ return eax;
+ }
+ static inline unsigned int cpuid_ebx(unsigned int op)
+ {
+- unsigned int eax, ebx;
++ unsigned int eax, ebx, ecx, edx;
+
+- __asm__(XEN_CPUID
+- : "=a" (eax), "=b" (ebx)
+- : "0" (op)
+- : "cx", "dx" );
++ cpuid(op, &eax, &ebx, &ecx, &edx);
+ return ebx;
+ }
+ static inline unsigned int cpuid_ecx(unsigned int op)
+ {
+- unsigned int eax, ecx;
++ unsigned int eax, ebx, ecx, edx;
+
+- __asm__(XEN_CPUID
+- : "=a" (eax), "=c" (ecx)
+- : "0" (op)
+- : "bx", "dx" );
++ cpuid(op, &eax, &ebx, &ecx, &edx);
+ return ecx;
+ }
+ static inline unsigned int cpuid_edx(unsigned int op)
+ {
+- unsigned int eax, edx;
++ unsigned int eax, ebx, ecx, edx;
+
+- __asm__(XEN_CPUID
+- : "=a" (eax), "=d" (edx)
+- : "0" (op)
+- : "bx", "cx");
++ cpuid(op, &eax, &ebx, &ecx, &edx);
+ return edx;
+ }
+
+@@ -315,6 +309,8 @@ static inline void __mwait(unsigned long
+ : :"a" (eax), "c" (ecx));
+ }
+
++extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx);
++
+ /* from system description table in BIOS. Mostly for MCA use, but
+ others may find it useful. */
+ extern unsigned int machine_id;
+Index: 10.3-2007-11-26/include/asm-i386/mach-xen/asm/ptrace.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-i386/mach-xen/asm/ptrace.h 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/include/asm-i386/mach-xen/asm/ptrace.h 2007-10-22 13:53:08.000000000 +0200
+@@ -1,24 +1,7 @@
+ #ifndef _I386_PTRACE_H
+ #define _I386_PTRACE_H
+
+-#define EBX 0
+-#define ECX 1
+-#define EDX 2
+-#define ESI 3
+-#define EDI 4
+-#define EBP 5
+-#define EAX 6
+-#define DS 7
+-#define ES 8
+-#define FS 9
+-#define GS 10
+-#define ORIG_EAX 11
+-#define EIP 12
+-#define CS 13
+-#define EFL 14
+-#define UESP 15
+-#define SS 16
+-#define FRAME_SIZE 17
++#include <asm/ptrace-abi.h>
+
+ /* this struct defines the way the registers are stored on the
+ stack during a system call. */
+@@ -41,25 +24,10 @@ struct pt_regs {
+ int xss;
+ };
+
+-/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
+-#define PTRACE_GETREGS 12
+-#define PTRACE_SETREGS 13
+-#define PTRACE_GETFPREGS 14
+-#define PTRACE_SETFPREGS 15
+-#define PTRACE_GETFPXREGS 18
+-#define PTRACE_SETFPXREGS 19
+-
+-#define PTRACE_OLDSETOPTIONS 21
+-
+-#define PTRACE_GET_THREAD_AREA 25
+-#define PTRACE_SET_THREAD_AREA 26
+-
+-#define PTRACE_SYSEMU 31
+-#define PTRACE_SYSEMU_SINGLESTEP 32
+-
+ #ifdef __KERNEL__
+
+ #include <asm/vm86.h>
++#include <asm/segment.h>
+
+ struct task_struct;
+ extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code);
+@@ -73,18 +41,17 @@ extern void send_sigtrap(struct task_str
+ */
+ static inline int user_mode(struct pt_regs *regs)
+ {
+- return (regs->xcs & 2) != 0;
++ return (regs->xcs & SEGMENT_RPL_MASK) == USER_RPL;
+ }
+ static inline int user_mode_vm(struct pt_regs *regs)
+ {
+- return ((regs->xcs & 2) | (regs->eflags & VM_MASK)) != 0;
++ return ((regs->xcs & SEGMENT_RPL_MASK) | (regs->eflags & VM_MASK)) >= USER_RPL;
+ }
++
+ #define instruction_pointer(regs) ((regs)->eip)
+-#if defined(CONFIG_SMP) && defined(CONFIG_FRAME_POINTER)
++#define regs_return_value(regs) ((regs)->eax)
++
+ extern unsigned long profile_pc(struct pt_regs *regs);
+-#else
+-#define profile_pc(regs) instruction_pointer(regs)
+-#endif
+ #endif /* __KERNEL__ */
+
+ #endif
+Index: 10.3-2007-11-26/include/asm-i386/mach-xen/asm/segment.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-i386/mach-xen/asm/segment.h 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/include/asm-i386/mach-xen/asm/segment.h 2007-10-22 13:53:08.000000000 +0200
+@@ -61,11 +61,9 @@
+
+ #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE + 0)
+ #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8)
+-#define GET_KERNEL_CS() (__KERNEL_CS | (xen_feature(XENFEAT_supervisor_mode_kernel)?0:1) )
+
+ #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1)
+ #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8)
+-#define GET_KERNEL_DS() (__KERNEL_DS | (xen_feature(XENFEAT_supervisor_mode_kernel)?0:1) )
+
+ #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4)
+ #define GDT_ENTRY_LDT (GDT_ENTRY_KERNEL_BASE + 5)
+@@ -85,6 +83,11 @@
+
+ #define GDT_SIZE (GDT_ENTRIES * 8)
+
++/* Matches __KERNEL_CS and __USER_CS (they must be 2 entries apart) */
++#define SEGMENT_IS_FLAT_CODE(x) (((x) & 0xec) == GDT_ENTRY_KERNEL_CS * 8)
++/* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
++#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
++
+ /* Simple and small GDT entries for booting only */
+
+ #define GDT_ENTRY_BOOT_CS 2
+@@ -114,4 +117,16 @@
+ */
+ #define IDT_ENTRIES 256
+
++/* Bottom two bits of selector give the ring privilege level */
++#define SEGMENT_RPL_MASK 0x3
++/* Bit 2 is table indicator (LDT/GDT) */
++#define SEGMENT_TI_MASK 0x4
++
++/* User mode is privilege level 3 */
++#define USER_RPL 0x3
++/* LDT segment has TI set, GDT has it cleared */
++#define SEGMENT_LDT 0x4
++#define SEGMENT_GDT 0x0
++
++#define get_kernel_rpl() (xen_feature(XENFEAT_supervisor_mode_kernel)?0:1)
+ #endif
+Index: 10.3-2007-11-26/include/asm-i386/mach-xen/asm/smp.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-i386/mach-xen/asm/smp.h 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/include/asm-i386/mach-xen/asm/smp.h 2007-10-22 13:53:08.000000000 +0200
+@@ -79,25 +79,36 @@ static inline int hard_smp_processor_id(
+ return GET_APIC_ID(*(unsigned long *)(APIC_BASE+APIC_ID));
+ }
+ #endif
+-
+-static __inline int logical_smp_processor_id(void)
+-{
+- /* we don't want to mark this access volatile - bad code generation */
+- return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR));
+-}
+-
+ #endif
+
++extern int safe_smp_processor_id(void);
+ extern int __cpu_disable(void);
+ extern void __cpu_die(unsigned int cpu);
+ extern void prefill_possible_map(void);
++extern unsigned int num_processors;
++
+ #endif /* !__ASSEMBLY__ */
+
+ #else /* CONFIG_SMP */
+
++#define safe_smp_processor_id() 0
+ #define cpu_physical_id(cpu) boot_cpu_physical_apicid
+
+ #define NO_PROC_ID 0xFF /* No processor magic marker */
+
+ #endif
++
++#ifndef __ASSEMBLY__
++
++extern u8 apicid_2_node[];
++
++#ifdef CONFIG_X86_LOCAL_APIC
++static __inline int logical_smp_processor_id(void)
++{
++ /* we don't want to mark this access volatile - bad code generation */
++ return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR));
++}
++#endif
++#endif
++
+ #endif
+Index: 10.3-2007-11-26/include/asm-i386/mach-xen/asm/spinlock.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-i386/mach-xen/asm/spinlock.h 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/include/asm-i386/mach-xen/asm/spinlock.h 2007-10-22 13:53:08.000000000 +0200
+@@ -4,8 +4,12 @@
+ #include <asm/atomic.h>
+ #include <asm/rwlock.h>
+ #include <asm/page.h>
++#include <asm/processor.h>
+ #include <linux/compiler.h>
+
++#define CLI_STRING "#cli"
++#define STI_STRING "#sti"
++
+ /*
+ * Your basic SMP spinlocks, allowing only a single CPU anywhere
+ *
+@@ -17,67 +21,64 @@
+ * (the type definitions are in asm/spinlock_types.h)
+ */
+
+-#define __raw_spin_is_locked(x) \
+- (*(volatile signed char *)(&(x)->slock) <= 0)
+-
+-#define __raw_spin_lock_string \
+- "\n1:\t" \
+- LOCK_PREFIX " ; decb %0\n\t" \
+- "jns 3f\n" \
+- "2:\t" \
+- "rep;nop\n\t" \
+- "cmpb $0,%0\n\t" \
+- "jle 2b\n\t" \
+- "jmp 1b\n" \
+- "3:\n\t"
+-
+-/*
+- * NOTE: there's an irqs-on section here, which normally would have to be
+- * irq-traced, but on CONFIG_TRACE_IRQFLAGS we never use
+- * __raw_spin_lock_string_flags().
+- */
+-#define __raw_spin_lock_string_flags \
+- "\n1:\t" \
+- LOCK_PREFIX " ; decb %0\n\t" \
+- "jns 5f\n" \
+- "2:\t" \
+- "testl $0x200, %1\n\t" \
+- "jz 4f\n\t" \
+- "#sti\n" \
+- "3:\t" \
+- "rep;nop\n\t" \
+- "cmpb $0, %0\n\t" \
+- "jle 3b\n\t" \
+- "#cli\n\t" \
+- "jmp 1b\n" \
+- "4:\t" \
+- "rep;nop\n\t" \
+- "cmpb $0, %0\n\t" \
+- "jg 1b\n\t" \
+- "jmp 4b\n" \
+- "5:\n\t"
++static inline int __raw_spin_is_locked(raw_spinlock_t *x)
++{
++ return *(volatile signed char *)(&(x)->slock) <= 0;
++}
+
+ static inline void __raw_spin_lock(raw_spinlock_t *lock)
+ {
+- asm(__raw_spin_lock_string : "+m" (lock->slock) : : "memory");
++ asm volatile("\n1:\n" \
++ LOCK_PREFIX "decb %0\n\t"
++ "jns 3f\n"
++ "2:\t"
++ "rep;nop\n\t"
++ "cmpb $0,%0\n\t"
++ "jle 2b\n\t"
++ "jmp 1b\n"
++ "3:\n\t"
++ : "+m" (lock->slock) : : "memory");
+ }
+
+ /*
+ * It is easier for the lock validator if interrupts are not re-enabled
+ * in the middle of a lock-acquire. This is a performance feature anyway
+ * so we turn it off:
++ *
++ * NOTE: there's an irqs-on section here, which normally would have to be
++ * irq-traced, but on CONFIG_TRACE_IRQFLAGS we never use this variant.
+ */
+ #ifndef CONFIG_PROVE_LOCKING
+ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
+ {
+- asm(__raw_spin_lock_string_flags : "+m" (lock->slock) : "r" (flags) : "memory");
++ asm volatile(
++ "\n1:\t"
++ LOCK_PREFIX "decb %0\n\t"
++ "jns 5f\n"
++ "2:\t"
++ "testl $0x200, %1\n\t"
++ "jz 4f\n\t"
++ STI_STRING "\n"
++ "3:\t"
++ "rep;nop\n\t"
++ "cmpb $0, %0\n\t"
++ "jle 3b\n\t"
++ CLI_STRING "\n\t"
++ "jmp 1b\n"
++ "4:\t"
++ "rep;nop\n\t"
++ "cmpb $0, %0\n\t"
++ "jg 1b\n\t"
++ "jmp 4b\n"
++ "5:\n\t"
++ : "+m" (lock->slock) : "r" (flags) : "memory");
+ }
+ #endif
+
+ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
+ {
+ char oldval;
+- __asm__ __volatile__(
++ asm volatile(
+ "xchgb %b0,%1"
+ :"=q" (oldval), "+m" (lock->slock)
+ :"0" (0) : "memory");
+@@ -93,38 +94,29 @@ static inline int __raw_spin_trylock(raw
+
+ #if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE)
+
+-#define __raw_spin_unlock_string \
+- "movb $1,%0" \
+- :"+m" (lock->slock) : : "memory"
+-
+-
+ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
+ {
+- __asm__ __volatile__(
+- __raw_spin_unlock_string
+- );
++ asm volatile("movb $1,%0" : "+m" (lock->slock) :: "memory");
+ }
+
+ #else
+
+-#define __raw_spin_unlock_string \
+- "xchgb %b0, %1" \
+- :"=q" (oldval), "+m" (lock->slock) \
+- :"0" (oldval) : "memory"
+-
+ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
+ {
+ char oldval = 1;
+
+- __asm__ __volatile__(
+- __raw_spin_unlock_string
+- );
++ asm volatile("xchgb %b0, %1"
++ : "=q" (oldval), "+m" (lock->slock)
++ : "0" (oldval) : "memory");
+ }
+
+ #endif
+
+-#define __raw_spin_unlock_wait(lock) \
+- do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
++static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
++{
++ while (__raw_spin_is_locked(lock))
++ cpu_relax();
++}
+
+ /*
+ * Read-write spinlocks, allowing multiple readers
+@@ -151,22 +143,36 @@ static inline void __raw_spin_unlock(raw
+ * read_can_lock - would read_trylock() succeed?
+ * @lock: the rwlock in question.
+ */
+-#define __raw_read_can_lock(x) ((int)(x)->lock > 0)
++static inline int __raw_read_can_lock(raw_rwlock_t *x)
++{
++ return (int)(x)->lock > 0;
++}
+
+ /**
+ * write_can_lock - would write_trylock() succeed?
+ * @lock: the rwlock in question.
+ */
+-#define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
++static inline int __raw_write_can_lock(raw_rwlock_t *x)
++{
++ return (x)->lock == RW_LOCK_BIAS;
++}
+
+ static inline void __raw_read_lock(raw_rwlock_t *rw)
+ {
+- __build_read_lock(rw, "__read_lock_failed");
++ asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
++ "jns 1f\n"
++ "call __read_lock_failed\n\t"
++ "1:\n"
++ ::"a" (rw) : "memory");
+ }
+
+ static inline void __raw_write_lock(raw_rwlock_t *rw)
+ {
+- __build_write_lock(rw, "__write_lock_failed");
++ asm volatile(LOCK_PREFIX " subl $" RW_LOCK_BIAS_STR ",(%0)\n\t"
++ "jz 1f\n"
++ "call __write_lock_failed\n\t"
++ "1:\n"
++ ::"a" (rw) : "memory");
+ }
+
+ static inline int __raw_read_trylock(raw_rwlock_t *lock)
+@@ -199,4 +205,8 @@ static inline void __raw_write_unlock(ra
+ : "+m" (rw->lock) : : "memory");
+ }
+
++#define _raw_spin_relax(lock) cpu_relax()
++#define _raw_read_relax(lock) cpu_relax()
++#define _raw_write_relax(lock) cpu_relax()
++
+ #endif /* __ASM_SPINLOCK_H */
+Index: 10.3-2007-11-26/include/asm-i386/mach-xen/asm/system.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-i386/mach-xen/asm/system.h 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/include/asm-i386/mach-xen/asm/system.h 2007-10-22 13:53:08.000000000 +0200
+@@ -267,6 +267,9 @@ static inline unsigned long __xchg(unsig
+ #define cmpxchg(ptr,o,n)\
+ ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
+ (unsigned long)(n),sizeof(*(ptr))))
++#define sync_cmpxchg(ptr,o,n)\
++ ((__typeof__(*(ptr)))__sync_cmpxchg((ptr),(unsigned long)(o),\
++ (unsigned long)(n),sizeof(*(ptr))))
+ #endif
+
+ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
+@@ -296,6 +299,39 @@ static inline unsigned long __cmpxchg(vo
+ return old;
+ }
+
++/*
++ * Always use locked operations when touching memory shared with a
++ * hypervisor, since the system may be SMP even if the guest kernel
++ * isn't.
++ */
++static inline unsigned long __sync_cmpxchg(volatile void *ptr,
++ unsigned long old,
++ unsigned long new, int size)
++{
++ unsigned long prev;
++ switch (size) {
++ case 1:
++ __asm__ __volatile__("lock; cmpxchgb %b1,%2"
++ : "=a"(prev)
++ : "q"(new), "m"(*__xg(ptr)), "0"(old)
++ : "memory");
++ return prev;
++ case 2:
++ __asm__ __volatile__("lock; cmpxchgw %w1,%2"
++ : "=a"(prev)
++ : "r"(new), "m"(*__xg(ptr)), "0"(old)
++ : "memory");
++ return prev;
++ case 4:
++ __asm__ __volatile__("lock; cmpxchgl %1,%2"
++ : "=a"(prev)
++ : "r"(new), "m"(*__xg(ptr)), "0"(old)
++ : "memory");
++ return prev;
++ }
++ return old;
++}
++
+ #ifndef CONFIG_X86_CMPXCHG
+ /*
+ * Building a kernel capable running on 80386. It may be necessary to
+Index: 10.3-2007-11-26/include/asm-i386/mach-xen/asm/tlbflush.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-i386/mach-xen/asm/tlbflush.h 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/include/asm-i386/mach-xen/asm/tlbflush.h 2007-10-22 13:53:08.000000000 +0200
+@@ -8,8 +8,6 @@
+ #define __flush_tlb_global() xen_tlb_flush()
+ #define __flush_tlb_all() xen_tlb_flush()
+
+-extern unsigned long pgkern_mask;
+-
+ #define cpu_has_invlpg (boot_cpu_data.x86 > 3)
+
+ #define __flush_tlb_single(addr) xen_invlpg(addr)
+Index: 10.3-2007-11-26/include/asm-x86_64/acpi.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-x86_64/acpi.h 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/include/asm-x86_64/acpi.h 2007-10-22 13:53:08.000000000 +0200
+@@ -136,7 +136,9 @@ extern void acpi_reserve_bootmem(void);
+ extern int acpi_disabled;
+ extern int acpi_pci_disabled;
+
++#ifndef CONFIG_XEN
+ #define ARCH_HAS_POWER_INIT 1
++#endif
+
+ extern int acpi_skip_timer_override;
+ extern int acpi_use_timer_override;
+Index: 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/arch_hooks.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-x86_64/mach-xen/asm/arch_hooks.h 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/arch_hooks.h 2007-10-22 13:53:08.000000000 +0200
+@@ -14,7 +14,7 @@
+ extern void init_ISA_irqs(void);
+ extern void apic_intr_init(void);
+ extern void smp_intr_init(void);
+-extern irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs);
++extern irqreturn_t timer_interrupt(int irq, void *dev_id);
+
+ /* these are the defined hooks */
+ extern void intr_init_hook(void);
+Index: 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/dma-mapping.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-x86_64/mach-xen/asm/dma-mapping.h 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/dma-mapping.h 2007-10-22 13:53:08.000000000 +0200
+@@ -55,13 +55,6 @@ extern dma_addr_t bad_dma_address;
+ extern struct dma_mapping_ops* dma_ops;
+ extern int iommu_merge;
+
+-static inline int valid_dma_direction(int dma_direction)
+-{
+- return ((dma_direction == DMA_BIDIRECTIONAL) ||
+- (dma_direction == DMA_TO_DEVICE) ||
+- (dma_direction == DMA_FROM_DEVICE));
+-}
+-
+ #if 0
+ static inline int dma_mapping_error(dma_addr_t dma_addr)
+ {
+Index: 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/e820.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-x86_64/mach-xen/asm/e820.h 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/e820.h 2007-10-22 13:53:08.000000000 +0200
+@@ -19,13 +19,9 @@
+
+ #define E820_RAM 1
+ #define E820_RESERVED 2
+-#define E820_ACPI 3 /* usable as RAM once ACPI tables have been read */
++#define E820_ACPI 3
+ #define E820_NVS 4
+
+-#define HIGH_MEMORY (1024*1024)
+-
+-#define LOWMEMSIZE() (0x9f000)
+-
+ #ifndef __ASSEMBLY__
+ struct e820entry {
+ u64 addr; /* start of memory segment */
+@@ -46,17 +42,16 @@ extern void setup_memory_region(void);
+ extern void contig_e820_setup(void);
+ extern unsigned long e820_end_of_ram(void);
+ extern void e820_reserve_resources(struct e820entry *e820, int nr_map);
++extern void e820_mark_nosave_regions(void);
+ extern void e820_print_map(char *who);
+ extern int e820_any_mapped(unsigned long start, unsigned long end, unsigned type);
+ extern int e820_all_mapped(unsigned long start, unsigned long end, unsigned type);
+
+-extern void e820_bootmem_free(pg_data_t *pgdat, unsigned long start,unsigned long end);
+ extern void e820_setup_gap(struct e820entry *e820, int nr_map);
+-extern unsigned long e820_hole_size(unsigned long start_pfn,
+- unsigned long end_pfn);
++extern void e820_register_active_regions(int nid,
++ unsigned long start_pfn, unsigned long end_pfn);
+
+-extern void __init parse_memopt(char *p, char **end);
+-extern void __init parse_memmapopt(char *p, char **end);
++extern void finish_e820_parsing(void);
+
+ extern struct e820map e820;
+
+Index: 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/fixmap.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-x86_64/mach-xen/asm/fixmap.h 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/fixmap.h 2007-10-22 13:53:08.000000000 +0200
+@@ -41,7 +41,7 @@ enum fixed_addresses {
+ #ifdef CONFIG_X86_LOCAL_APIC
+ FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */
+ #endif
+-#ifdef CONFIG_X86_IO_APIC
++#ifndef CONFIG_XEN
+ FIX_IO_APIC_BASE_0,
+ FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS-1,
+ #endif
+Index: 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/floppy.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-x86_64/mach-xen/asm/floppy.h 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/floppy.h 2007-10-22 13:53:08.000000000 +0200
+@@ -54,7 +54,7 @@ static char *virtual_dma_addr;
+ static int virtual_dma_mode;
+ static int doing_pdma;
+
+-static irqreturn_t floppy_hardint(int irq, void *dev_id, struct pt_regs * regs)
++static irqreturn_t floppy_hardint(int irq, void *dev_id)
+ {
+ register unsigned char st;
+
+@@ -66,7 +66,7 @@ static irqreturn_t floppy_hardint(int ir
+ static int dma_wait=0;
+ #endif
+ if (!doing_pdma)
+- return floppy_interrupt(irq, dev_id, regs);
++ return floppy_interrupt(irq, dev_id);
+
+ #ifdef TRACE_FLPY_INT
+ if(!calls)
+@@ -109,7 +109,7 @@ static irqreturn_t floppy_hardint(int ir
+ dma_wait=0;
+ #endif
+ doing_pdma = 0;
+- floppy_interrupt(irq, dev_id, regs);
++ floppy_interrupt(irq, dev_id);
+ return IRQ_HANDLED;
+ }
+ #ifdef TRACE_FLPY_INT
+Index: 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/hw_irq.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-x86_64/mach-xen/asm/hw_irq.h 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/hw_irq.h 2007-10-22 13:53:08.000000000 +0200
+@@ -19,8 +19,7 @@
+ #include <asm/irq.h>
+ #include <linux/profile.h>
+ #include <linux/smp.h>
+-
+-struct hw_interrupt_type;
++#include <linux/percpu.h>
+ #endif
+
+ #define NMI_VECTOR 0x02
+@@ -77,9 +76,10 @@ struct hw_interrupt_type;
+
+
+ #ifndef __ASSEMBLY__
+-extern u8 irq_vector[NR_IRQ_VECTORS];
+-#define IO_APIC_VECTOR(irq) (irq_vector[irq])
+-#define AUTO_ASSIGN -1
++typedef int vector_irq_t[NR_VECTORS];
++DECLARE_PER_CPU(vector_irq_t, vector_irq);
++extern void __setup_vector_irq(int cpu);
++extern spinlock_t vector_lock;
+
+ /*
+ * Various low-level irq details needed by irq.c, process.c,
+Index: 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/io.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-x86_64/mach-xen/asm/io.h 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/io.h 2007-10-22 13:53:08.000000000 +0200
+@@ -273,33 +273,6 @@ void memset_io(volatile void __iomem *a,
+
+ #define eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),(void *)(b),(c),(d))
+
+-/**
+- * check_signature - find BIOS signatures
+- * @io_addr: mmio address to check
+- * @signature: signature block
+- * @length: length of signature
+- *
+- * Perform a signature comparison with the mmio address io_addr. This
+- * address should have been obtained by ioremap.
+- * Returns 1 on a match.
+- */
+-
+-static inline int check_signature(void __iomem *io_addr,
+- const unsigned char *signature, int length)
+-{
+- int retval = 0;
+- do {
+- if (readb(io_addr) != *signature)
+- goto out;
+- io_addr++;
+- signature++;
+- length--;
+- } while (length);
+- retval = 1;
+-out:
+- return retval;
+-}
+-
+ /* Nothing to do */
+
+ #define dma_cache_inv(_start,_size) do { } while (0)
+Index: 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/msr.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-x86_64/mach-xen/asm/msr.h 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/msr.h 2007-10-22 13:53:08.000000000 +0200
+@@ -66,14 +66,25 @@
+ #define rdtscl(low) \
+ __asm__ __volatile__ ("rdtsc" : "=a" (low) : : "edx")
+
++#define rdtscp(low,high,aux) \
++ asm volatile (".byte 0x0f,0x01,0xf9" : "=a" (low), "=d" (high), "=c" (aux))
++
+ #define rdtscll(val) do { \
+ unsigned int __a,__d; \
+ asm volatile("rdtsc" : "=a" (__a), "=d" (__d)); \
+ (val) = ((unsigned long)__a) | (((unsigned long)__d)<<32); \
+ } while(0)
+
++#define rdtscpll(val, aux) do { \
++ unsigned long __a, __d; \
++ asm volatile (".byte 0x0f,0x01,0xf9" : "=a" (__a), "=d" (__d), "=c" (aux)); \
++ (val) = (__d << 32) | __a; \
++} while (0)
++
+ #define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
+
++#define write_rdtscp_aux(val) wrmsr(0xc0000103, val, 0)
++
+ #define rdpmc(counter,low,high) \
+ __asm__ __volatile__("rdpmc" \
+ : "=a" (low), "=d" (high) \
+Index: 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/nmi.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-x86_64/mach-xen/asm/nmi.h 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/nmi.h 2007-10-22 13:53:08.000000000 +0200
+@@ -9,24 +9,13 @@
+
+ #include <xen/interface/nmi.h>
+
+-struct pt_regs;
+-
+-typedef int (*nmi_callback_t)(struct pt_regs * regs, int cpu);
+-
+-/**
+- * set_nmi_callback
+- *
+- * Set a handler for an NMI. Only one handler may be
+- * set. Return 1 if the NMI was handled.
+- */
+-void set_nmi_callback(nmi_callback_t callback);
+-
+ /**
+- * unset_nmi_callback
++ * do_nmi_callback
+ *
+- * Remove the handler previously set.
++ * Check to see if a callback exists and execute it. Return 1
++ * if the handler exists and was handled successfully.
+ */
+-void unset_nmi_callback(void);
++int do_nmi_callback(struct pt_regs *regs, int cpu);
+
+ #ifdef CONFIG_PM
+
+@@ -50,7 +39,7 @@ static inline void unset_nmi_pm_callback
+ #endif /* CONFIG_PM */
+
+ extern void default_do_nmi(struct pt_regs *);
+-extern void die_nmi(char *str, struct pt_regs *regs);
++extern void die_nmi(char *str, struct pt_regs *regs, int do_panic);
+
+ static inline unsigned char get_nmi_reason(void)
+ {
+@@ -70,19 +59,26 @@ static inline unsigned char get_nmi_reas
+
+ extern int panic_on_timeout;
+ extern int unknown_nmi_panic;
++extern int nmi_watchdog_enabled;
+
+ extern int check_nmi_watchdog(void);
+-
+-extern void setup_apic_nmi_watchdog (void);
+-extern int reserve_lapic_nmi(void);
+-extern void release_lapic_nmi(void);
++extern int avail_to_resrv_perfctr_nmi_bit(unsigned int);
++extern int avail_to_resrv_perfctr_nmi(unsigned int);
++extern int reserve_perfctr_nmi(unsigned int);
++extern void release_perfctr_nmi(unsigned int);
++extern int reserve_evntsel_nmi(unsigned int);
++extern void release_evntsel_nmi(unsigned int);
++
++extern void setup_apic_nmi_watchdog (void *);
++extern void stop_apic_nmi_watchdog (void *);
+ extern void disable_timer_nmi_watchdog(void);
+ extern void enable_timer_nmi_watchdog(void);
+-extern void nmi_watchdog_tick (struct pt_regs * regs, unsigned reason);
++extern int nmi_watchdog_tick (struct pt_regs * regs, unsigned reason);
+
+ extern void nmi_watchdog_default(void);
+ extern int setup_nmi_watchdog(char *);
+
++extern atomic_t nmi_active;
+ extern unsigned int nmi_watchdog;
+ #define NMI_DEFAULT -1
+ #define NMI_NONE 0
+@@ -90,4 +86,11 @@ extern unsigned int nmi_watchdog;
+ #define NMI_LOCAL_APIC 2
+ #define NMI_INVALID 3
+
++struct ctl_table;
++struct file;
++extern int proc_nmi_enabled(struct ctl_table *, int , struct file *,
++ void __user *, size_t *, loff_t *);
++
++extern int unknown_nmi_panic;
++
+ #endif /* ASM_NMI_H */
+Index: 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/pgtable.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-x86_64/mach-xen/asm/pgtable.h 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/pgtable.h 2007-10-22 13:53:08.000000000 +0200
+@@ -44,12 +44,9 @@ extern unsigned long __supported_pte_mas
+
+ #define swapper_pg_dir init_level4_pgt
+
+-extern int nonx_setup(char *str);
+ extern void paging_init(void);
+ extern void clear_kernel_mapping(unsigned long addr, unsigned long size);
+
+-extern unsigned long pgkern_mask;
+-
+ /*
+ * ZERO_PAGE is a global shared page that is always zero: used
+ * for zero-mapped memory areas etc..
+@@ -115,9 +112,6 @@ static inline void pgd_clear (pgd_t * pg
+ set_pgd(__user_pgd(pgd), __pgd(0));
+ }
+
+-#define pud_page(pud) \
+- ((unsigned long) __va(pud_val(pud) & PHYSICAL_PAGE_MASK))
+-
+ #define pte_same(a, b) ((a).pte == (b).pte)
+
+ #define pte_pgprot(a) (__pgprot((a).pte & ~PHYSICAL_PAGE_MASK))
+@@ -326,7 +320,7 @@ static inline pte_t ptep_get_and_clear_f
+ #define __LARGE_PTE (_PAGE_PSE|_PAGE_PRESENT)
+ static inline int pte_user(pte_t pte) { return __pte_val(pte) & _PAGE_USER; }
+ static inline int pte_read(pte_t pte) { return __pte_val(pte) & _PAGE_USER; }
+-static inline int pte_exec(pte_t pte) { return __pte_val(pte) & _PAGE_USER; }
++static inline int pte_exec(pte_t pte) { return !(__pte_val(pte) & _PAGE_NX); }
+ static inline int pte_dirty(pte_t pte) { return __pte_val(pte) & _PAGE_DIRTY; }
+ static inline int pte_young(pte_t pte) { return __pte_val(pte) & _PAGE_ACCESSED; }
+ static inline int pte_write(pte_t pte) { return __pte_val(pte) & _PAGE_RW; }
+@@ -339,29 +333,12 @@ static inline pte_t pte_mkclean(pte_t pt
+ static inline pte_t pte_mkold(pte_t pte) { __pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
+ static inline pte_t pte_wrprotect(pte_t pte) { __pte_val(pte) &= ~_PAGE_RW; return pte; }
+ static inline pte_t pte_mkread(pte_t pte) { __pte_val(pte) |= _PAGE_USER; return pte; }
+-static inline pte_t pte_mkexec(pte_t pte) { __pte_val(pte) |= _PAGE_USER; return pte; }
++static inline pte_t pte_mkexec(pte_t pte) { __pte_val(pte) &= ~_PAGE_NX; return pte; }
+ static inline pte_t pte_mkdirty(pte_t pte) { __pte_val(pte) |= _PAGE_DIRTY; return pte; }
+ static inline pte_t pte_mkyoung(pte_t pte) { __pte_val(pte) |= _PAGE_ACCESSED; return pte; }
+ static inline pte_t pte_mkwrite(pte_t pte) { __pte_val(pte) |= _PAGE_RW; return pte; }
+ static inline pte_t pte_mkhuge(pte_t pte) { __pte_val(pte) |= _PAGE_PSE; return pte; }
+-
+-#define ptep_test_and_clear_dirty(vma, addr, ptep) \
+-({ \
+- pte_t __pte = *(ptep); \
+- int __ret = pte_dirty(__pte); \
+- if (__ret) \
+- set_pte_at((vma)->vm_mm, addr, ptep, pte_mkclean(__pte)); \
+- __ret; \
+-})
+-
+-#define ptep_test_and_clear_young(vma, addr, ptep) \
+-({ \
+- pte_t __pte = *(ptep); \
+- int __ret = pte_young(__pte); \
+- if (__ret) \
+- set_pte_at((vma)->vm_mm, addr, ptep, pte_mkold(__pte)); \
+- __ret; \
+-})
++static inline pte_t pte_clrhuge(pte_t pte) { __pte_val(pte) &= ~_PAGE_PSE; return pte; }
+
+ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+ {
+@@ -389,7 +366,8 @@ static inline int pmd_large(pmd_t pte) {
+ * Level 4 access.
+ * Never use these in the common code.
+ */
+-#define pgd_page(pgd) ((unsigned long) __va(pgd_val(pgd) & PTE_MASK))
++#define pgd_page_vaddr(pgd) ((unsigned long) __va(pgd_val(pgd) & PTE_MASK))
++#define pgd_page(pgd) (pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT))
+ #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
+ #define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
+ #define pgd_offset_k(address) (pgd_t *)(init_level4_pgt + pgd_index(address))
+@@ -398,16 +376,18 @@ static inline int pmd_large(pmd_t pte) {
+
+ /* PUD - Level3 access */
+ /* to find an entry in a page-table-directory. */
++#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PHYSICAL_PAGE_MASK))
++#define pud_page(pud) (pfn_to_page(pud_val(pud) >> PAGE_SHIFT))
+ #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
+-#define pud_offset(pgd, address) ((pud_t *) pgd_page(*(pgd)) + pud_index(address))
++#define pud_offset(pgd, address) ((pud_t *) pgd_page_vaddr(*(pgd)) + pud_index(address))
+ #define pud_present(pud) (pud_val(pud) & _PAGE_PRESENT)
+
+ /* PMD - Level 2 access */
+-#define pmd_page_kernel(pmd) ((unsigned long) __va(pmd_val(pmd) & PTE_MASK))
++#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PTE_MASK))
+ #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
+
+ #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
+-#define pmd_offset(dir, address) ((pmd_t *) pud_page(*(dir)) + \
++#define pmd_offset(dir, address) ((pmd_t *) pud_page_vaddr(*(dir)) + \
+ pmd_index(address))
+ #define pmd_none(x) (!pmd_val(x))
+ #if CONFIG_XEN_COMPAT <= 0x030002
+@@ -438,6 +418,7 @@ static inline pte_t mk_pte_phys(unsigned
+ {
+ unsigned long pteval;
+ pteval = physpage | pgprot_val(pgprot);
++ pteval &= __supported_pte_mask;
+ return __pte(pteval);
+ }
+
+@@ -459,7 +440,7 @@ static inline pte_t pte_modify(pte_t pte
+
+ #define pte_index(address) \
+ (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+-#define pte_offset_kernel(dir, address) ((pte_t *) pmd_page_kernel(*(dir)) + \
++#define pte_offset_kernel(dir, address) ((pte_t *) pmd_page_vaddr(*(dir)) + \
+ pte_index(address))
+
+ /* x86-64 always has all page tables mapped. */
+@@ -500,6 +481,40 @@ static inline pte_t pte_modify(pte_t pte
+ ptep_establish(vma, address, ptep, entry); \
+ } while (0)
+
++
++/*
++ * i386 says: We don't actually have these, but we want to advertise
++ * them so that we can encompass the flush here.
++ */
++#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
++#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
++
++#define __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH
++#define ptep_clear_flush_dirty(vma, address, ptep) \
++({ \
++ pte_t __pte = *(ptep); \
++ int __dirty = pte_dirty(__pte); \
++ __pte = pte_mkclean(__pte); \
++ if ((vma)->vm_mm->context.pinned) \
++ ptep_set_access_flags(vma, address, ptep, __pte, __dirty); \
++ else if (__dirty) \
++ set_pte(ptep, __pte); \
++ __dirty; \
++})
++
++#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
++#define ptep_clear_flush_young(vma, address, ptep) \
++({ \
++ pte_t __pte = *(ptep); \
++ int __young = pte_young(__pte); \
++ __pte = pte_mkold(__pte); \
++ if ((vma)->vm_mm->context.pinned) \
++ ptep_set_access_flags(vma, address, ptep, __pte, __young); \
++ else if (__young) \
++ set_pte(ptep, __pte); \
++ __young; \
++})
++
+ /* Encode and de-code a swap entry */
+ #define __swp_type(x) (((x).val >> 1) & 0x3f)
+ #define __swp_offset(x) ((x).val >> 8)
+@@ -560,8 +575,6 @@ int touch_pte_range(struct mm_struct *mm
+ #define kc_offset_to_vaddr(o) \
+ (((o) & (1UL << (__VIRTUAL_MASK_SHIFT-1))) ? ((o) | (~__VIRTUAL_MASK)) : (o))
+
+-#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+-#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
+ #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
+ #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
+ #define __HAVE_ARCH_PTEP_CLEAR_FLUSH
+Index: 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/processor.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-x86_64/mach-xen/asm/processor.h 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/processor.h 2007-10-22 13:53:08.000000000 +0200
+@@ -488,6 +488,8 @@ static inline void __mwait(unsigned long
+ : :"a" (eax), "c" (ecx));
+ }
+
++extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx);
++
+ #define stack_current() \
+ ({ \
+ struct thread_info *ti; \
+Index: 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/ptrace.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-x86_64/mach-xen/asm/ptrace.h 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/ptrace.h 2007-10-22 13:53:08.000000000 +0200
+@@ -1,40 +1,9 @@
+ #ifndef _X86_64_PTRACE_H
+ #define _X86_64_PTRACE_H
+
+-#if defined(__ASSEMBLY__) || defined(__FRAME_OFFSETS)
+-#define R15 0
+-#define R14 8
+-#define R13 16
+-#define R12 24
+-#define RBP 32
+-#define RBX 40
+-/* arguments: interrupts/non tracing syscalls only save upto here*/
+-#define R11 48
+-#define R10 56
+-#define R9 64
+-#define R8 72
+-#define RAX 80
+-#define RCX 88
+-#define RDX 96
+-#define RSI 104
+-#define RDI 112
+-#define ORIG_RAX 120 /* = ERROR */
+-/* end of arguments */
+-/* cpu exception frame or undefined in case of fast syscall. */
+-#define RIP 128
+-#define CS 136
+-#define EFLAGS 144
+-#define RSP 152
+-#define SS 160
+-#define ARGOFFSET R11
+-#endif /* __ASSEMBLY__ */
++#include <asm/ptrace-abi.h>
+
+-/* top of stack page */
+-#define FRAME_SIZE 168
+-
+-#define PTRACE_OLDSETOPTIONS 21
+-
+-#ifndef __ASSEMBLY__
++#ifndef __ASSEMBLY__
+
+ struct pt_regs {
+ unsigned long r15;
+@@ -45,7 +14,7 @@ struct pt_regs {
+ unsigned long rbx;
+ /* arguments: non interrupts/non tracing syscalls only save upto here*/
+ unsigned long r11;
+- unsigned long r10;
++ unsigned long r10;
+ unsigned long r9;
+ unsigned long r8;
+ unsigned long rax;
+@@ -54,41 +23,25 @@ struct pt_regs {
+ unsigned long rsi;
+ unsigned long rdi;
+ unsigned long orig_rax;
+-/* end of arguments */
++/* end of arguments */
+ /* cpu exception frame or undefined */
+ unsigned long rip;
+ unsigned long cs;
+- unsigned long eflags;
+- unsigned long rsp;
++ unsigned long eflags;
++ unsigned long rsp;
+ unsigned long ss;
+-/* top of stack page */
++/* top of stack page */
+ };
+
+ #endif
+
+-/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
+-#define PTRACE_GETREGS 12
+-#define PTRACE_SETREGS 13
+-#define PTRACE_GETFPREGS 14
+-#define PTRACE_SETFPREGS 15
+-#define PTRACE_GETFPXREGS 18
+-#define PTRACE_SETFPXREGS 19
+-
+-/* only useful for access 32bit programs */
+-#define PTRACE_GET_THREAD_AREA 25
+-#define PTRACE_SET_THREAD_AREA 26
+-
+-#define PTRACE_ARCH_PRCTL 30 /* arch_prctl for child */
+-
+ #if defined(__KERNEL__) && !defined(__ASSEMBLY__)
+ #define user_mode(regs) (!!((regs)->cs & 3))
+ #define user_mode_vm(regs) user_mode(regs)
+ #define instruction_pointer(regs) ((regs)->rip)
+-#if defined(CONFIG_SMP) && defined(CONFIG_FRAME_POINTER)
++#define regs_return_value(regs) ((regs)->rax)
++
+ extern unsigned long profile_pc(struct pt_regs *regs);
+-#else
+-#define profile_pc(regs) instruction_pointer(regs)
+-#endif
+
+ #include <linux/compiler.h>
+
+Index: 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/smp.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-x86_64/mach-xen/asm/smp.h 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/smp.h 2007-10-22 13:53:08.000000000 +0200
+@@ -4,15 +4,12 @@
+ /*
+ * We need the APIC definitions automatically as part of 'smp.h'
+ */
+-#ifndef __ASSEMBLY__
+ #include <linux/threads.h>
+ #include <linux/cpumask.h>
+ #include <linux/bitops.h>
+ extern int disable_apic;
+-#endif
+
+ #ifdef CONFIG_X86_LOCAL_APIC
+-#ifndef __ASSEMBLY__
+ #include <asm/fixmap.h>
+ #include <asm/mpspec.h>
+ #ifdef CONFIG_X86_IO_APIC
+@@ -21,10 +18,8 @@ extern int disable_apic;
+ #include <asm/apic.h>
+ #include <asm/thread_info.h>
+ #endif
+-#endif
+
+ #ifdef CONFIG_SMP
+-#ifndef ASSEMBLY
+
+ #include <asm/pda.h>
+
+@@ -41,14 +36,11 @@ extern cpumask_t cpu_initialized;
+
+ extern void smp_alloc_memory(void);
+ extern volatile unsigned long smp_invalidate_needed;
+-extern int pic_mode;
+ extern void lock_ipi_call_lock(void);
+ extern void unlock_ipi_call_lock(void);
+ extern int smp_num_siblings;
+ extern void smp_send_reschedule(int cpu);
+ void smp_stop_cpu(void);
+-extern int smp_call_function_single(int cpuid, void (*func) (void *info),
+- void *info, int retry, int wait);
+
+ extern cpumask_t cpu_sibling_map[NR_CPUS];
+ extern cpumask_t cpu_core_map[NR_CPUS];
+@@ -77,20 +69,16 @@ static inline int hard_smp_processor_id(
+ }
+ #endif
+
+-extern int safe_smp_processor_id(void);
+ extern int __cpu_disable(void);
+ extern void __cpu_die(unsigned int cpu);
+ extern void prefill_possible_map(void);
+ extern unsigned num_processors;
+ extern unsigned disabled_cpus;
+
+-#endif /* !ASSEMBLY */
+-
+ #define NO_PROC_ID 0xFF /* No processor magic marker */
+
+ #endif
+
+-#ifndef ASSEMBLY
+ /*
+ * Some lowlevel functions might want to know about
+ * the real APIC ID <-> CPU # mapping.
+@@ -114,11 +102,8 @@ static inline int cpu_present_to_apicid(
+ }
+ #endif
+
+-#endif /* !ASSEMBLY */
+-
+ #ifndef CONFIG_SMP
+ #define stack_smp_processor_id() 0
+-#define safe_smp_processor_id() 0
+ #define cpu_logical_map(x) (x)
+ #else
+ #include <asm/thread_info.h>
+@@ -130,7 +115,6 @@ static inline int cpu_present_to_apicid(
+ })
+ #endif
+
+-#ifndef __ASSEMBLY__
+ #ifdef CONFIG_X86_LOCAL_APIC
+ static __inline int logical_smp_processor_id(void)
+ {
+@@ -138,13 +122,18 @@ static __inline int logical_smp_processo
+ return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR));
+ }
+ #endif
+-#endif
+
+ #ifdef CONFIG_SMP
+ #define cpu_physical_id(cpu) x86_cpu_to_apicid[cpu]
+ #else
+ #define cpu_physical_id(cpu) boot_cpu_id
+-#endif
+-
++static inline int smp_call_function_single(int cpuid, void (*func) (void *info),
++ void *info, int retry, int wait)
++{
++ /* Disable interrupts here? */
++ func(info);
++ return 0;
++}
++#endif /* !CONFIG_SMP */
+ #endif
+
+Index: 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/system.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-x86_64/mach-xen/asm/system.h 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/system.h 2007-10-22 13:53:08.000000000 +0200
+@@ -24,12 +24,13 @@
+ #define __RESTORE(reg,offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t"
+
+ /* frame pointer must be last for get_wchan */
+-#define SAVE_CONTEXT "pushq %%rbp ; movq %%rsi,%%rbp\n\t"
+-#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp\n\t"
++#define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
++#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t"
+
+ #define __EXTRA_CLOBBER \
+ ,"rcx","rbx","rdx","r8","r9","r10","r11","r12","r13","r14","r15"
+
++/* Save restore flags to clear handle leaking NT */
+ #define switch_to(prev,next,last) \
+ asm volatile(SAVE_CONTEXT \
+ "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \
+Index: 10.3-2007-11-26/include/asm-x86_64/thread_info.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-x86_64/thread_info.h 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/include/asm-x86_64/thread_info.h 2007-10-22 13:53:08.000000000 +0200
+@@ -147,7 +147,11 @@ static inline struct thread_info *stack_
+ #define _TIF_ALLWORK_MASK (0x0000FFFF & ~_TIF_SECCOMP)
+
+ /* flags to check in __switch_to() */
++#ifndef CONFIG_XEN
+ #define _TIF_WORK_CTXSW (_TIF_DEBUG|_TIF_IO_BITMAP)
++#else
++#define _TIF_WORK_CTXSW _TIF_DEBUG
++#endif
+
+ #define PREEMPT_ACTIVE 0x10000000
+
+Index: 10.3-2007-11-26/include/linux/skbuff.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/linux/skbuff.h 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/include/linux/skbuff.h 2007-10-22 13:53:08.000000000 +0200
+@@ -1729,5 +1729,11 @@ static inline void skb_forward_csum(stru
+ skb->ip_summed = CHECKSUM_NONE;
+ }
+
++#ifdef CONFIG_XEN
++int skb_checksum_setup(struct sk_buff *skb);
++#else
++static inline int skb_checksum_setup(struct sk_buff *skb) { return 0; }
++#endif
++
+ #endif /* __KERNEL__ */
+ #endif /* _LINUX_SKBUFF_H */
+Index: 10.3-2007-11-26/include/xen/evtchn.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/xen/evtchn.h 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/include/xen/evtchn.h 2007-10-22 13:53:08.000000000 +0200
+@@ -54,34 +54,34 @@
+ */
+ int bind_caller_port_to_irqhandler(
+ unsigned int caller_port,
+- irqreturn_t (*handler)(int, void *, struct pt_regs *),
++ irq_handler_t handler,
+ unsigned long irqflags,
+ const char *devname,
+ void *dev_id);
+ int bind_listening_port_to_irqhandler(
+ unsigned int remote_domain,
+- irqreturn_t (*handler)(int, void *, struct pt_regs *),
++ irq_handler_t handler,
+ unsigned long irqflags,
+ const char *devname,
+ void *dev_id);
+ int bind_interdomain_evtchn_to_irqhandler(
+ unsigned int remote_domain,
+ unsigned int remote_port,
+- irqreturn_t (*handler)(int, void *, struct pt_regs *),
++ irq_handler_t handler,
+ unsigned long irqflags,
+ const char *devname,
+ void *dev_id);
+ int bind_virq_to_irqhandler(
+ unsigned int virq,
+ unsigned int cpu,
+- irqreturn_t (*handler)(int, void *, struct pt_regs *),
++ irq_handler_t handler,
+ unsigned long irqflags,
+ const char *devname,
+ void *dev_id);
+ int bind_ipi_to_irqhandler(
+ unsigned int ipi,
+ unsigned int cpu,
+- irqreturn_t (*handler)(int, void *, struct pt_regs *),
++ irq_handler_t handler,
+ unsigned long irqflags,
+ const char *devname,
+ void *dev_id);
+Index: 10.3-2007-11-26/include/xen/xencons.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/xen/xencons.h 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/include/xen/xencons.h 2007-10-22 13:53:08.000000000 +0200
+@@ -8,7 +8,7 @@ void xencons_force_flush(void);
+ void xencons_resume(void);
+
+ /* Interrupt work hooks. Receive data, or kick data out. */
+-void xencons_rx(char *buf, unsigned len, struct pt_regs *regs);
++void xencons_rx(char *buf, unsigned len);
+ void xencons_tx(void);
+
+ int xencons_ring_init(void);
+Index: 10.3-2007-11-26/net/core/dev.c
+===================================================================
+--- 10.3-2007-11-26.orig/net/core/dev.c 2007-12-06 17:27:35.000000000 +0100
++++ 10.3-2007-11-26/net/core/dev.c 2007-10-22 13:53:08.000000000 +0200
+@@ -1487,15 +1487,13 @@ inline int skb_checksum_setup(struct sk_
+ }
+ if ((skb->h.raw + skb->csum + 2) > skb->tail)
+ goto out;
+- skb->ip_summed = CHECKSUM_HW;
++ skb->ip_summed = CHECKSUM_PARTIAL;
+ skb->proto_csum_blank = 0;
+ }
+ return 0;
+ out:
+ return -EPROTO;
+ }
+-#else
+-inline int skb_checksum_setup(struct sk_buff *skb) { return 0; }
+ #endif
+
+
+@@ -1928,7 +1926,7 @@ int netif_receive_skb(struct sk_buff *sk
+ case CHECKSUM_UNNECESSARY:
+ skb->proto_data_valid = 1;
+ break;
+- case CHECKSUM_HW:
++ case CHECKSUM_PARTIAL:
+ /* XXX Implement me. */
+ default:
+ skb->proto_data_valid = 0;
diff --git a/trunk/2.6.22/20045_xen3-patch-2.6.20.patch1 b/trunk/2.6.22/20045_xen3-patch-2.6.20.patch1
new file mode 100644
index 0000000..18d83c9
--- /dev/null
+++ b/trunk/2.6.22/20045_xen3-patch-2.6.20.patch1
@@ -0,0 +1,7592 @@
+From: www.kernel.org
+Subject: Linux 2.6.20
+Patch-mainline: 2.6.20
+
+Automatically created from "patches.kernel.org/patch-2.6.20" by xen-port-patches.py
+
+Acked-by: jbeulich@novell.com
+
+Index: 10.3-2007-11-26/arch/i386/Kconfig
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/Kconfig 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/arch/i386/Kconfig 2007-10-22 13:53:25.000000000 +0200
+@@ -876,7 +876,7 @@ config PHYSICAL_START
+
+ config RELOCATABLE
+ bool "Build a relocatable kernel(EXPERIMENTAL)"
+- depends on EXPERIMENTAL
++ depends on EXPERIMENTAL && !X86_XEN
+ help
+ This builds a kernel image that retains relocation information
+ so it can be loaded someplace besides the default 1MB.
+Index: 10.3-2007-11-26/arch/i386/kernel/acpi/boot-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/acpi/boot-xen.c 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/arch/i386/kernel/acpi/boot-xen.c 2007-10-22 13:53:25.000000000 +0200
+@@ -335,7 +335,7 @@ acpi_parse_ioapic(acpi_table_entry_heade
+ /*
+ * Parse Interrupt Source Override for the ACPI SCI
+ */
+-static void acpi_sci_ioapic_setup(u32 bus_irq, u32 gsi, u16 polarity, u16 trigger)
++static void __init acpi_sci_ioapic_setup(u32 gsi, u16 polarity, u16 trigger)
+ {
+ if (trigger == 0) /* compatible SCI trigger is level */
+ trigger = 3;
+@@ -355,13 +355,13 @@ static void acpi_sci_ioapic_setup(u32 bu
+ * If GSI is < 16, this will update its flags,
+ * else it will create a new mp_irqs[] entry.
+ */
+- mp_override_legacy_irq(bus_irq, polarity, trigger, gsi);
++ mp_override_legacy_irq(gsi, polarity, trigger, gsi);
+
+ /*
+ * stash over-ride to indicate we've been here
+ * and for later update of acpi_fadt
+ */
+- acpi_sci_override_gsi = bus_irq;
++ acpi_sci_override_gsi = gsi;
+ return;
+ }
+
+@@ -379,7 +379,7 @@ acpi_parse_int_src_ovr(acpi_table_entry_
+ acpi_table_print_madt_entry(header);
+
+ if (intsrc->bus_irq == acpi_fadt.sci_int) {
+- acpi_sci_ioapic_setup(intsrc->bus_irq, intsrc->global_irq,
++ acpi_sci_ioapic_setup(intsrc->global_irq,
+ intsrc->flags.polarity,
+ intsrc->flags.trigger);
+ return 0;
+@@ -883,7 +883,7 @@ static int __init acpi_parse_madt_ioapic
+ * pretend we got one so we can set the SCI flags.
+ */
+ if (!acpi_sci_override_gsi)
+- acpi_sci_ioapic_setup(acpi_fadt.sci_int, acpi_fadt.sci_int, 0, 0);
++ acpi_sci_ioapic_setup(acpi_fadt.sci_int, 0, 0);
+
+ /* Fill in identity legacy mapings where no override */
+ mp_config_acpi_legacy_irqs();
+@@ -1330,3 +1330,25 @@ static int __init setup_acpi_sci(char *s
+ return 0;
+ }
+ early_param("acpi_sci", setup_acpi_sci);
++
++int __acpi_acquire_global_lock(unsigned int *lock)
++{
++ unsigned int old, new, val;
++ do {
++ old = *lock;
++ new = (((old & ~0x3) + 2) + ((old >> 1) & 0x1));
++ val = cmpxchg(lock, old, new);
++ } while (unlikely (val != old));
++ return (new < 3) ? -1 : 0;
++}
++
++int __acpi_release_global_lock(unsigned int *lock)
++{
++ unsigned int old, new, val;
++ do {
++ old = *lock;
++ new = old & ~0x3;
++ val = cmpxchg(lock, old, new);
++ } while (unlikely (val != old));
++ return old & 0x1;
++}
+Index: 10.3-2007-11-26/arch/i386/kernel/asm-offsets.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/asm-offsets.c 2007-12-06 17:27:33.000000000 +0100
++++ 10.3-2007-11-26/arch/i386/kernel/asm-offsets.c 2007-10-22 13:53:25.000000000 +0200
+@@ -16,6 +16,9 @@
+ #include <asm/processor.h>
+ #include <asm/thread_info.h>
+ #include <asm/elf.h>
++#ifdef CONFIG_XEN
++#include <xen/interface/xen.h>
++#endif
+
+ #define DEFINE(sym, val) \
+ asm volatile("\n->" #sym " %0 " #val : : "i" (val))
+@@ -55,6 +58,7 @@ void foo(void)
+ OFFSET(TI_exec_domain, thread_info, exec_domain);
+ OFFSET(TI_flags, thread_info, flags);
+ OFFSET(TI_status, thread_info, status);
++ OFFSET(TI_cpu, thread_info, cpu);
+ OFFSET(TI_preempt_count, thread_info, preempt_count);
+ OFFSET(TI_addr_limit, thread_info, addr_limit);
+ OFFSET(TI_restart_block, thread_info, restart_block);
+@@ -111,6 +115,11 @@ void foo(void)
+
+ OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
+
++#ifdef CONFIG_XEN
++ BLANK();
++ OFFSET(XEN_START_mfn_list, start_info, mfn_list);
++#endif
++
+ #ifdef CONFIG_PARAVIRT
+ BLANK();
+ OFFSET(PARAVIRT_enabled, paravirt_ops, paravirt_enabled);
+Index: 10.3-2007-11-26/arch/i386/kernel/cpu/common-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/cpu/common-xen.c 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/arch/i386/kernel/cpu/common-xen.c 2007-10-22 13:53:25.000000000 +0200
+@@ -22,6 +22,7 @@
+ #define phys_pkg_id(a,b) a
+ #endif
+ #endif
++#include <asm/pda.h>
+ #include <asm/hypervisor.h>
+
+ #include "cpu.h"
+@@ -29,10 +30,8 @@
+ DEFINE_PER_CPU(struct Xgt_desc_struct, cpu_gdt_descr);
+ EXPORT_PER_CPU_SYMBOL(cpu_gdt_descr);
+
+-#ifndef CONFIG_XEN
+-DEFINE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]);
+-EXPORT_PER_CPU_SYMBOL(cpu_16bit_stack);
+-#endif
++struct i386_pda *_cpu_pda[NR_CPUS] __read_mostly;
++EXPORT_SYMBOL(_cpu_pda);
+
+ static int cachesize_override __cpuinitdata = -1;
+ static int disable_x86_fxsr __cpuinitdata;
+@@ -60,7 +59,7 @@ static struct cpu_dev __cpuinitdata defa
+ .c_init = default_init,
+ .c_vendor = "Unknown",
+ };
+-static struct cpu_dev * this_cpu = &default_cpu;
++static struct cpu_dev * this_cpu __cpuinitdata = &default_cpu;
+
+ static int __init cachesize_setup(char *str)
+ {
+@@ -242,29 +241,14 @@ static int __cpuinit have_cpuid_p(void)
+ return flag_is_changeable_p(X86_EFLAGS_ID);
+ }
+
+-/* Do minimum CPU detection early.
+- Fields really needed: vendor, cpuid_level, family, model, mask, cache alignment.
+- The others are not touched to avoid unwanted side effects.
+-
+- WARNING: this function is only called on the BP. Don't add code here
+- that is supposed to run on all CPUs. */
+-static void __init early_cpu_detect(void)
++void __init cpu_detect(struct cpuinfo_x86 *c)
+ {
+- struct cpuinfo_x86 *c = &boot_cpu_data;
+-
+- c->x86_cache_alignment = 32;
+-
+- if (!have_cpuid_p())
+- return;
+-
+ /* Get vendor name */
+ cpuid(0x00000000, &c->cpuid_level,
+ (int *)&c->x86_vendor_id[0],
+ (int *)&c->x86_vendor_id[8],
+ (int *)&c->x86_vendor_id[4]);
+
+- get_cpu_vendor(c, 1);
+-
+ c->x86 = 4;
+ if (c->cpuid_level >= 0x00000001) {
+ u32 junk, tfms, cap0, misc;
+@@ -281,6 +265,26 @@ static void __init early_cpu_detect(void
+ }
+ }
+
++/* Do minimum CPU detection early.
++ Fields really needed: vendor, cpuid_level, family, model, mask, cache alignment.
++ The others are not touched to avoid unwanted side effects.
++
++ WARNING: this function is only called on the BP. Don't add code here
++ that is supposed to run on all CPUs. */
++static void __init early_cpu_detect(void)
++{
++ struct cpuinfo_x86 *c = &boot_cpu_data;
++
++ c->x86_cache_alignment = 32;
++
++ if (!have_cpuid_p())
++ return;
++
++ cpu_detect(c);
++
++ get_cpu_vendor(c, 1);
++}
++
+ static void __cpuinit generic_identify(struct cpuinfo_x86 * c)
+ {
+ u32 tfms, xlvl;
+@@ -315,6 +319,8 @@ static void __cpuinit generic_identify(s
+ #else
+ c->apicid = (ebx >> 24) & 0xFF;
+ #endif
++ if (c->x86_capability[0] & (1<<19))
++ c->x86_clflush_size = ((ebx >> 8) & 0xff) * 8;
+ } else {
+ /* Have CPUID level 0 only - unheard of */
+ c->x86 = 4;
+@@ -379,6 +385,7 @@ void __cpuinit identify_cpu(struct cpuin
+ c->x86_vendor_id[0] = '\0'; /* Unset */
+ c->x86_model_id[0] = '\0'; /* Unset */
+ c->x86_max_cores = 1;
++ c->x86_clflush_size = 32;
+ memset(&c->x86_capability, 0, sizeof c->x86_capability);
+
+ if (!have_cpuid_p()) {
+@@ -599,61 +606,23 @@ void __init early_cpu_init(void)
+ #endif
+ }
+
+-void __cpuinit cpu_gdt_init(struct Xgt_desc_struct *gdt_descr)
++/* Make sure %gs is initialized properly in idle threads */
++struct pt_regs * __devinit idle_regs(struct pt_regs *regs)
+ {
+- unsigned long frames[16];
+- unsigned long va;
+- int f;
+-
+- for (va = gdt_descr->address, f = 0;
+- va < gdt_descr->address + gdt_descr->size;
+- va += PAGE_SIZE, f++) {
+- frames[f] = virt_to_mfn(va);
+- make_lowmem_page_readonly(
+- (void *)va, XENFEAT_writable_descriptor_tables);
+- }
+- if (HYPERVISOR_set_gdt(frames, gdt_descr->size / 8))
+- BUG();
++ memset(regs, 0, sizeof(struct pt_regs));
++ regs->xgs = __KERNEL_PDA;
++ return regs;
+ }
+
+-/*
+- * cpu_init() initializes state that is per-CPU. Some data is already
+- * initialized (naturally) in the bootstrap process, such as the GDT
+- * and IDT. We reload them nevertheless, this function acts as a
+- * 'CPU state barrier', nothing should get across.
+- */
+-void __cpuinit cpu_init(void)
++static __cpuinit int alloc_gdt(int cpu)
+ {
+- int cpu = smp_processor_id();
+-#ifndef CONFIG_X86_NO_TSS
+- struct tss_struct * t = &per_cpu(init_tss, cpu);
+-#endif
+- struct thread_struct *thread = &current->thread;
+- struct desc_struct *gdt;
+ struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
++ struct desc_struct *gdt;
++ struct i386_pda *pda;
+
+- if (cpu_test_and_set(cpu, cpu_initialized)) {
+- printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
+- for (;;) local_irq_enable();
+- }
+- printk(KERN_INFO "Initializing CPU#%d\n", cpu);
+-
+- if (cpu_has_vme || cpu_has_de)
+- clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
+- if (tsc_disable && cpu_has_tsc) {
+- printk(KERN_NOTICE "Disabling TSC...\n");
+- /**** FIX-HPA: DOES THIS REALLY BELONG HERE? ****/
+- clear_bit(X86_FEATURE_TSC, boot_cpu_data.x86_capability);
+- set_in_cr4(X86_CR4_TSD);
+- }
++ gdt = (struct desc_struct *)cpu_gdt_descr->address;
++ pda = cpu_pda(cpu);
+
+-#ifndef CONFIG_XEN
+- /* The CPU hotplug case */
+- if (cpu_gdt_descr->address) {
+- gdt = (struct desc_struct *)cpu_gdt_descr->address;
+- memset(gdt, 0, PAGE_SIZE);
+- goto old_gdt;
+- }
+ /*
+ * This is a horrible hack to allocate the GDT. The problem
+ * is that cpu_init() is called really early for the boot CPU
+@@ -661,54 +630,141 @@ void __cpuinit cpu_init(void)
+ * CPUs, when bootmem will have gone away
+ */
+ if (NODE_DATA(0)->bdata->node_bootmem_map) {
+- gdt = (struct desc_struct *)alloc_bootmem_pages(PAGE_SIZE);
+- /* alloc_bootmem_pages panics on failure, so no check */
++ BUG_ON(gdt != NULL || pda != NULL);
++
++ gdt = alloc_bootmem_pages(PAGE_SIZE);
++ pda = alloc_bootmem(sizeof(*pda));
++ /* alloc_bootmem(_pages) panics on failure, so no check */
++
+ memset(gdt, 0, PAGE_SIZE);
++ memset(pda, 0, sizeof(*pda));
+ } else {
+- gdt = (struct desc_struct *)get_zeroed_page(GFP_KERNEL);
+- if (unlikely(!gdt)) {
+- printk(KERN_CRIT "CPU%d failed to allocate GDT\n", cpu);
+- for (;;)
+- local_irq_enable();
++ /* GDT and PDA might already have been allocated if
++ this is a CPU hotplug re-insertion. */
++ if (gdt == NULL)
++ gdt = (struct desc_struct *)get_zeroed_page(GFP_KERNEL);
++
++ if (pda == NULL)
++ pda = kmalloc_node(sizeof(*pda), GFP_KERNEL, cpu_to_node(cpu));
++
++ if (unlikely(!gdt || !pda)) {
++ free_pages((unsigned long)gdt, 0);
++ kfree(pda);
++ return 0;
+ }
+ }
+-old_gdt:
++
++ cpu_gdt_descr->address = (unsigned long)gdt;
++ cpu_pda(cpu) = pda;
++
++ return 1;
++}
++
++/* Initial PDA used by boot CPU */
++struct i386_pda boot_pda = {
++ ._pda = &boot_pda,
++ .cpu_number = 0,
++ .pcurrent = &init_task,
++};
++
++static inline void set_kernel_gs(void)
++{
++ /* Set %gs for this CPU's PDA. Memory clobber is to create a
++ barrier with respect to any PDA operations, so the compiler
++ doesn't move any before here. */
++ asm volatile ("mov %0, %%gs" : : "r" (__KERNEL_PDA) : "memory");
++}
++
++/* Initialize the CPU's GDT and PDA. The boot CPU does this for
++ itself, but secondaries find this done for them. */
++__cpuinit int init_gdt(int cpu, struct task_struct *idle)
++{
++ struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
++ struct desc_struct *gdt;
++ struct i386_pda *pda;
++
++ /* For non-boot CPUs, the GDT and PDA should already have been
++ allocated. */
++ if (!alloc_gdt(cpu)) {
++ printk(KERN_CRIT "CPU%d failed to allocate GDT or PDA\n", cpu);
++ return 0;
++ }
++
++ gdt = (struct desc_struct *)cpu_gdt_descr->address;
++ pda = cpu_pda(cpu);
++
++ BUG_ON(gdt == NULL || pda == NULL);
++
+ /*
+ * Initialize the per-CPU GDT with the boot GDT,
+ * and set up the GDT descriptor:
+ */
+ memcpy(gdt, cpu_gdt_table, GDT_SIZE);
++ cpu_gdt_descr->size = GDT_SIZE - 1;
+
+- /* Set up GDT entry for 16bit stack */
+- *(__u64 *)(&gdt[GDT_ENTRY_ESPFIX_SS]) |=
+- ((((__u64)stk16_off) << 16) & 0x000000ffffff0000ULL) |
+- ((((__u64)stk16_off) << 32) & 0xff00000000000000ULL) |
+- (CPU_16BIT_STACK_SIZE - 1);
++ pack_descriptor((u32 *)&gdt[GDT_ENTRY_PDA].a,
++ (u32 *)&gdt[GDT_ENTRY_PDA].b,
++ (unsigned long)pda, sizeof(*pda) - 1,
++ 0x80 | DESCTYPE_S | 0x2, 0); /* present read-write data segment */
++
++ memset(pda, 0, sizeof(*pda));
++ pda->_pda = pda;
++ pda->cpu_number = cpu;
++ pda->pcurrent = idle;
+
+- cpu_gdt_descr->size = GDT_SIZE - 1;
+- cpu_gdt_descr->address = (unsigned long)gdt;
+-#else
+- if (cpu == 0 && cpu_gdt_descr->address == 0) {
+- gdt = (struct desc_struct *)alloc_bootmem_pages(PAGE_SIZE);
+- /* alloc_bootmem_pages panics on failure, so no check */
+- memset(gdt, 0, PAGE_SIZE);
++ return 1;
++}
+
+- memcpy(gdt, cpu_gdt_table, GDT_SIZE);
+-
+- cpu_gdt_descr->size = GDT_SIZE;
+- cpu_gdt_descr->address = (unsigned long)gdt;
++void __cpuinit cpu_set_gdt(int cpu)
++{
++ struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
++ unsigned long va, frames[16];
++ int f;
++
++ for (va = cpu_gdt_descr->address, f = 0;
++ va < cpu_gdt_descr->address + cpu_gdt_descr->size;
++ va += PAGE_SIZE, f++) {
++ frames[f] = virt_to_mfn(va);
++ make_lowmem_page_readonly(
++ (void *)va, XENFEAT_writable_descriptor_tables);
+ }
++ BUG_ON(HYPERVISOR_set_gdt(frames, cpu_gdt_descr->size / 8));
++
++ set_kernel_gs();
++}
++
++/* Common CPU init for both boot and secondary CPUs */
++static void __cpuinit _cpu_init(int cpu, struct task_struct *curr)
++{
++#ifndef CONFIG_X86_NO_TSS
++ struct tss_struct * t = &per_cpu(init_tss, cpu);
+ #endif
++ struct thread_struct *thread = &curr->thread;
++
++ if (cpu_test_and_set(cpu, cpu_initialized)) {
++ printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
++ for (;;) local_irq_enable();
++ }
+
+- cpu_gdt_init(cpu_gdt_descr);
++ printk(KERN_INFO "Initializing CPU#%d\n", cpu);
++
++ if (cpu_has_vme || cpu_has_de)
++ clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
++ if (tsc_disable && cpu_has_tsc) {
++ printk(KERN_NOTICE "Disabling TSC...\n");
++ /**** FIX-HPA: DOES THIS REALLY BELONG HERE? ****/
++ clear_bit(X86_FEATURE_TSC, boot_cpu_data.x86_capability);
++ set_in_cr4(X86_CR4_TSD);
++ }
+
+ /*
+ * Set up and load the per-CPU TSS and LDT
+ */
+ atomic_inc(&init_mm.mm_count);
+- current->active_mm = &init_mm;
+- BUG_ON(current->mm);
+- enter_lazy_tlb(&init_mm, current);
++ curr->active_mm = &init_mm;
++ if (curr->mm)
++ BUG();
++ enter_lazy_tlb(&init_mm, curr);
+
+ load_esp0(t, thread);
+
+@@ -719,8 +775,8 @@ old_gdt:
+ __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
+ #endif
+
+- /* Clear %fs and %gs. */
+- asm volatile ("movl %0, %%fs; movl %0, %%gs" : : "r" (0));
++ /* Clear %fs. */
++ asm volatile ("mov %0, %%fs" : : "r" (0));
+
+ /* Clear all 6 debug registers: */
+ set_debugreg(0, 0);
+@@ -738,6 +794,38 @@ old_gdt:
+ mxcsr_feature_mask_init();
+ }
+
++/* Entrypoint to initialize secondary CPU */
++void __cpuinit secondary_cpu_init(void)
++{
++ int cpu = smp_processor_id();
++ struct task_struct *curr = current;
++
++ _cpu_init(cpu, curr);
++}
++
++/*
++ * cpu_init() initializes state that is per-CPU. Some data is already
++ * initialized (naturally) in the bootstrap process, such as the GDT
++ * and IDT. We reload them nevertheless, this function acts as a
++ * 'CPU state barrier', nothing should get across.
++ */
++void __cpuinit cpu_init(void)
++{
++ int cpu = smp_processor_id();
++ struct task_struct *curr = current;
++
++ /* Set up the real GDT and PDA, so we can transition from the
++ boot versions. */
++ if (!init_gdt(cpu, curr)) {
++ /* failed to allocate something; not much we can do... */
++ for (;;)
++ local_irq_enable();
++ }
++
++ cpu_set_gdt(cpu);
++ _cpu_init(cpu, curr);
++}
++
+ #ifdef CONFIG_HOTPLUG_CPU
+ void __cpuinit cpu_uninit(void)
+ {
+Index: 10.3-2007-11-26/arch/i386/kernel/cpu/mtrr/main-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/cpu/mtrr/main-xen.c 2007-12-06 17:27:33.000000000 +0100
++++ 10.3-2007-11-26/arch/i386/kernel/cpu/mtrr/main-xen.c 2007-10-22 13:53:25.000000000 +0200
+@@ -12,7 +12,7 @@
+ static DEFINE_MUTEX(mtrr_mutex);
+
+ void generic_get_mtrr(unsigned int reg, unsigned long *base,
+- unsigned int *size, mtrr_type * type)
++ unsigned long *size, mtrr_type * type)
+ {
+ struct xen_platform_op op;
+
+@@ -114,8 +114,7 @@ int mtrr_del_page(int reg, unsigned long
+ {
+ unsigned i;
+ mtrr_type ltype;
+- unsigned long lbase;
+- unsigned int lsize;
++ unsigned long lbase, lsize;
+ int error = -EINVAL;
+ struct xen_platform_op op;
+
+Index: 10.3-2007-11-26/arch/i386/kernel/e820-xen.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ 10.3-2007-11-26/arch/i386/kernel/e820-xen.c 2007-10-22 13:53:25.000000000 +0200
+@@ -0,0 +1,972 @@
++#include <linux/kernel.h>
++#include <linux/types.h>
++#include <linux/init.h>
++#include <linux/bootmem.h>
++#include <linux/ioport.h>
++#include <linux/string.h>
++#include <linux/kexec.h>
++#include <linux/module.h>
++#include <linux/mm.h>
++#include <linux/efi.h>
++#include <linux/pfn.h>
++#include <linux/uaccess.h>
++
++#include <asm/pgtable.h>
++#include <asm/page.h>
++#include <asm/e820.h>
++#include <xen/interface/memory.h>
++
++#ifdef CONFIG_EFI
++int efi_enabled = 0;
++EXPORT_SYMBOL(efi_enabled);
++#endif
++
++struct e820map e820;
++struct change_member {
++ struct e820entry *pbios; /* pointer to original bios entry */
++ unsigned long long addr; /* address for this change point */
++};
++static struct change_member change_point_list[2*E820MAX] __initdata;
++static struct change_member *change_point[2*E820MAX] __initdata;
++static struct e820entry *overlap_list[E820MAX] __initdata;
++static struct e820entry new_bios[E820MAX] __initdata;
++/* For PCI or other memory-mapped resources */
++unsigned long pci_mem_start = 0x10000000;
++#ifdef CONFIG_PCI
++EXPORT_SYMBOL(pci_mem_start);
++#endif
++extern int user_defined_memmap;
++struct resource data_resource = {
++ .name = "Kernel data",
++ .start = 0,
++ .end = 0,
++ .flags = IORESOURCE_BUSY | IORESOURCE_MEM
++};
++
++struct resource code_resource = {
++ .name = "Kernel code",
++ .start = 0,
++ .end = 0,
++ .flags = IORESOURCE_BUSY | IORESOURCE_MEM
++};
++
++static struct resource system_rom_resource = {
++ .name = "System ROM",
++ .start = 0xf0000,
++ .end = 0xfffff,
++ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
++};
++
++static struct resource extension_rom_resource = {
++ .name = "Extension ROM",
++ .start = 0xe0000,
++ .end = 0xeffff,
++ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
++};
++
++static struct resource adapter_rom_resources[] = { {
++ .name = "Adapter ROM",
++ .start = 0xc8000,
++ .end = 0,
++ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
++}, {
++ .name = "Adapter ROM",
++ .start = 0,
++ .end = 0,
++ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
++}, {
++ .name = "Adapter ROM",
++ .start = 0,
++ .end = 0,
++ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
++}, {
++ .name = "Adapter ROM",
++ .start = 0,
++ .end = 0,
++ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
++}, {
++ .name = "Adapter ROM",
++ .start = 0,
++ .end = 0,
++ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
++}, {
++ .name = "Adapter ROM",
++ .start = 0,
++ .end = 0,
++ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
++} };
++
++static struct resource video_rom_resource = {
++ .name = "Video ROM",
++ .start = 0xc0000,
++ .end = 0xc7fff,
++ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
++};
++
++static struct resource video_ram_resource = {
++ .name = "Video RAM area",
++ .start = 0xa0000,
++ .end = 0xbffff,
++ .flags = IORESOURCE_BUSY | IORESOURCE_MEM
++};
++
++static struct resource standard_io_resources[] = { {
++ .name = "dma1",
++ .start = 0x0000,
++ .end = 0x001f,
++ .flags = IORESOURCE_BUSY | IORESOURCE_IO
++}, {
++ .name = "pic1",
++ .start = 0x0020,
++ .end = 0x0021,
++ .flags = IORESOURCE_BUSY | IORESOURCE_IO
++}, {
++ .name = "timer0",
++ .start = 0x0040,
++ .end = 0x0043,
++ .flags = IORESOURCE_BUSY | IORESOURCE_IO
++}, {
++ .name = "timer1",
++ .start = 0x0050,
++ .end = 0x0053,
++ .flags = IORESOURCE_BUSY | IORESOURCE_IO
++}, {
++ .name = "keyboard",
++ .start = 0x0060,
++ .end = 0x006f,
++ .flags = IORESOURCE_BUSY | IORESOURCE_IO
++}, {
++ .name = "dma page reg",
++ .start = 0x0080,
++ .end = 0x008f,
++ .flags = IORESOURCE_BUSY | IORESOURCE_IO
++}, {
++ .name = "pic2",
++ .start = 0x00a0,
++ .end = 0x00a1,
++ .flags = IORESOURCE_BUSY | IORESOURCE_IO
++}, {
++ .name = "dma2",
++ .start = 0x00c0,
++ .end = 0x00df,
++ .flags = IORESOURCE_BUSY | IORESOURCE_IO
++}, {
++ .name = "fpu",
++ .start = 0x00f0,
++ .end = 0x00ff,
++ .flags = IORESOURCE_BUSY | IORESOURCE_IO
++} };
++
++static int romsignature(const unsigned char *x)
++{
++ unsigned short sig;
++ int ret = 0;
++ if (probe_kernel_address((const unsigned short *)x, sig) == 0)
++ ret = (sig == 0xaa55);
++ return ret;
++}
++
++static int __init romchecksum(unsigned char *rom, unsigned long length)
++{
++ unsigned char *p, sum = 0;
++
++ for (p = rom; p < rom + length; p++)
++ sum += *p;
++ return sum == 0;
++}
++
++static void __init probe_roms(void)
++{
++ unsigned long start, length, upper;
++ unsigned char *rom;
++ int i;
++
++#ifdef CONFIG_XEN
++ /* Nothing to do if not running in dom0. */
++ if (!is_initial_xendomain())
++ return;
++#endif
++
++ /* video rom */
++ upper = adapter_rom_resources[0].start;
++ for (start = video_rom_resource.start; start < upper; start += 2048) {
++ rom = isa_bus_to_virt(start);
++ if (!romsignature(rom))
++ continue;
++
++ video_rom_resource.start = start;
++
++ /* 0 < length <= 0x7f * 512, historically */
++ length = rom[2] * 512;
++
++ /* if checksum okay, trust length byte */
++ if (length && romchecksum(rom, length))
++ video_rom_resource.end = start + length - 1;
++
++ request_resource(&iomem_resource, &video_rom_resource);
++ break;
++ }
++
++ start = (video_rom_resource.end + 1 + 2047) & ~2047UL;
++ if (start < upper)
++ start = upper;
++
++ /* system rom */
++ request_resource(&iomem_resource, &system_rom_resource);
++ upper = system_rom_resource.start;
++
++ /* check for extension rom (ignore length byte!) */
++ rom = isa_bus_to_virt((unsigned long)extension_rom_resource.start);
++ if (romsignature(rom)) {
++ length = extension_rom_resource.end - extension_rom_resource.start + 1;
++ if (romchecksum(rom, length)) {
++ request_resource(&iomem_resource, &extension_rom_resource);
++ upper = extension_rom_resource.start;
++ }
++ }
++
++ /* check for adapter roms on 2k boundaries */
++ for (i = 0; i < ARRAY_SIZE(adapter_rom_resources) && start < upper; start += 2048) {
++ rom = isa_bus_to_virt(start);
++ if (!romsignature(rom))
++ continue;
++
++ /* 0 < length <= 0x7f * 512, historically */
++ length = rom[2] * 512;
++
++ /* but accept any length that fits if checksum okay */
++ if (!length || start + length > upper || !romchecksum(rom, length))
++ continue;
++
++ adapter_rom_resources[i].start = start;
++ adapter_rom_resources[i].end = start + length - 1;
++ request_resource(&iomem_resource, &adapter_rom_resources[i]);
++
++ start = adapter_rom_resources[i++].end & ~2047UL;
++ }
++}
++
++#ifdef CONFIG_XEN
++static struct e820map machine_e820 __initdata;
++#define e820 machine_e820
++#endif
++
++/*
++ * Request address space for all standard RAM and ROM resources
++ * and also for regions reported as reserved by the e820.
++ */
++static void __init
++legacy_init_iomem_resources(struct resource *code_resource, struct resource *data_resource)
++{
++ int i;
++
++ probe_roms();
++ for (i = 0; i < e820.nr_map; i++) {
++ struct resource *res;
++#ifndef CONFIG_RESOURCES_64BIT
++ if (e820.map[i].addr + e820.map[i].size > 0x100000000ULL)
++ continue;
++#endif
++ res = kzalloc(sizeof(struct resource), GFP_ATOMIC);
++ switch (e820.map[i].type) {
++ case E820_RAM: res->name = "System RAM"; break;
++ case E820_ACPI: res->name = "ACPI Tables"; break;
++ case E820_NVS: res->name = "ACPI Non-volatile Storage"; break;
++ default: res->name = "reserved";
++ }
++ res->start = e820.map[i].addr;
++ res->end = res->start + e820.map[i].size - 1;
++ res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
++ if (request_resource(&iomem_resource, res)) {
++ kfree(res);
++ continue;
++ }
++ if (e820.map[i].type == E820_RAM) {
++ /*
++ * We don't know which RAM region contains kernel data,
++ * so we try it repeatedly and let the resource manager
++ * test it.
++ */
++#ifndef CONFIG_XEN
++ request_resource(res, code_resource);
++ request_resource(res, data_resource);
++#endif
++#ifdef CONFIG_KEXEC
++ request_resource(res, &crashk_res);
++#ifdef CONFIG_XEN
++ xen_machine_kexec_register_resources(res);
++#endif
++#endif
++ }
++ }
++}
++
++#undef e820
++
++/*
++ * Request address space for all standard resources
++ *
++ * This is called just before pcibios_init(), which is also a
++ * subsys_initcall, but is linked in later (in arch/i386/pci/common.c).
++ */
++static int __init request_standard_resources(void)
++{
++ int i;
++
++ /* Nothing to do if not running in dom0. */
++ if (!is_initial_xendomain())
++ return 0;
++
++ printk("Setting up standard PCI resources\n");
++ if (efi_enabled)
++ efi_initialize_iomem_resources(&code_resource, &data_resource);
++ else
++ legacy_init_iomem_resources(&code_resource, &data_resource);
++
++ /* EFI systems may still have VGA */
++ request_resource(&iomem_resource, &video_ram_resource);
++
++ /* request I/O space for devices used on all i[345]86 PCs */
++ for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++)
++ request_resource(&ioport_resource, &standard_io_resources[i]);
++ return 0;
++}
++
++subsys_initcall(request_standard_resources);
++
++void __init add_memory_region(unsigned long long start,
++ unsigned long long size, int type)
++{
++ int x;
++
++ if (!efi_enabled) {
++ x = e820.nr_map;
++
++ if (x == E820MAX) {
++ printk(KERN_ERR "Ooops! Too many entries in the memory map!\n");
++ return;
++ }
++
++ e820.map[x].addr = start;
++ e820.map[x].size = size;
++ e820.map[x].type = type;
++ e820.nr_map++;
++ }
++} /* add_memory_region */
++
++/*
++ * Sanitize the BIOS e820 map.
++ *
++ * Some e820 responses include overlapping entries. The following
++ * replaces the original e820 map with a new one, removing overlaps.
++ *
++ */
++int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map)
++{
++ struct change_member *change_tmp;
++ unsigned long current_type, last_type;
++ unsigned long long last_addr;
++ int chgidx, still_changing;
++ int overlap_entries;
++ int new_bios_entry;
++ int old_nr, new_nr, chg_nr;
++ int i;
++
++ /*
++ Visually we're performing the following (1,2,3,4 = memory types)...
++
++ Sample memory map (w/overlaps):
++ ____22__________________
++ ______________________4_
++ ____1111________________
++ _44_____________________
++ 11111111________________
++ ____________________33__
++ ___________44___________
++ __________33333_________
++ ______________22________
++ ___________________2222_
++ _________111111111______
++ _____________________11_
++ _________________4______
++
++ Sanitized equivalent (no overlap):
++ 1_______________________
++ _44_____________________
++ ___1____________________
++ ____22__________________
++ ______11________________
++ _________1______________
++ __________3_____________
++ ___________44___________
++ _____________33_________
++ _______________2________
++ ________________1_______
++ _________________4______
++ ___________________2____
++ ____________________33__
++ ______________________4_
++ */
++ printk("sanitize start\n");
++ /* if there's only one memory region, don't bother */
++ if (*pnr_map < 2) {
++ printk("sanitize bail 0\n");
++ return -1;
++ }
++
++ old_nr = *pnr_map;
++
++ /* bail out if we find any unreasonable addresses in bios map */
++ for (i=0; i<old_nr; i++)
++ if (biosmap[i].addr + biosmap[i].size < biosmap[i].addr) {
++ printk("sanitize bail 1\n");
++ return -1;
++ }
++
++ /* create pointers for initial change-point information (for sorting) */
++ for (i=0; i < 2*old_nr; i++)
++ change_point[i] = &change_point_list[i];
++
++ /* record all known change-points (starting and ending addresses),
++ omitting those that are for empty memory regions */
++ chgidx = 0;
++ for (i=0; i < old_nr; i++) {
++ if (biosmap[i].size != 0) {
++ change_point[chgidx]->addr = biosmap[i].addr;
++ change_point[chgidx++]->pbios = &biosmap[i];
++ change_point[chgidx]->addr = biosmap[i].addr + biosmap[i].size;
++ change_point[chgidx++]->pbios = &biosmap[i];
++ }
++ }
++ chg_nr = chgidx; /* true number of change-points */
++
++ /* sort change-point list by memory addresses (low -> high) */
++ still_changing = 1;
++ while (still_changing) {
++ still_changing = 0;
++ for (i=1; i < chg_nr; i++) {
++ /* if <current_addr> > <last_addr>, swap */
++ /* or, if current=<start_addr> & last=<end_addr>, swap */
++ if ((change_point[i]->addr < change_point[i-1]->addr) ||
++ ((change_point[i]->addr == change_point[i-1]->addr) &&
++ (change_point[i]->addr == change_point[i]->pbios->addr) &&
++ (change_point[i-1]->addr != change_point[i-1]->pbios->addr))
++ )
++ {
++ change_tmp = change_point[i];
++ change_point[i] = change_point[i-1];
++ change_point[i-1] = change_tmp;
++ still_changing=1;
++ }
++ }
++ }
++
++ /* create a new bios memory map, removing overlaps */
++ overlap_entries=0; /* number of entries in the overlap table */
++ new_bios_entry=0; /* index for creating new bios map entries */
++ last_type = 0; /* start with undefined memory type */
++ last_addr = 0; /* start with 0 as last starting address */
++ /* loop through change-points, determining affect on the new bios map */
++ for (chgidx=0; chgidx < chg_nr; chgidx++)
++ {
++ /* keep track of all overlapping bios entries */
++ if (change_point[chgidx]->addr == change_point[chgidx]->pbios->addr)
++ {
++ /* add map entry to overlap list (> 1 entry implies an overlap) */
++ overlap_list[overlap_entries++]=change_point[chgidx]->pbios;
++ }
++ else
++ {
++ /* remove entry from list (order independent, so swap with last) */
++ for (i=0; i<overlap_entries; i++)
++ {
++ if (overlap_list[i] == change_point[chgidx]->pbios)
++ overlap_list[i] = overlap_list[overlap_entries-1];
++ }
++ overlap_entries--;
++ }
++ /* if there are overlapping entries, decide which "type" to use */
++ /* (larger value takes precedence -- 1=usable, 2,3,4,4+=unusable) */
++ current_type = 0;
++ for (i=0; i<overlap_entries; i++)
++ if (overlap_list[i]->type > current_type)
++ current_type = overlap_list[i]->type;
++ /* continue building up new bios map based on this information */
++ if (current_type != last_type) {
++ if (last_type != 0) {
++ new_bios[new_bios_entry].size =
++ change_point[chgidx]->addr - last_addr;
++ /* move forward only if the new size was non-zero */
++ if (new_bios[new_bios_entry].size != 0)
++ if (++new_bios_entry >= E820MAX)
++ break; /* no more space left for new bios entries */
++ }
++ if (current_type != 0) {
++ new_bios[new_bios_entry].addr = change_point[chgidx]->addr;
++ new_bios[new_bios_entry].type = current_type;
++ last_addr=change_point[chgidx]->addr;
++ }
++ last_type = current_type;
++ }
++ }
++ new_nr = new_bios_entry; /* retain count for new bios entries */
++
++ /* copy new bios mapping into original location */
++ memcpy(biosmap, new_bios, new_nr*sizeof(struct e820entry));
++ *pnr_map = new_nr;
++
++ printk("sanitize end\n");
++ return 0;
++}
++
++/*
++ * Copy the BIOS e820 map into a safe place.
++ *
++ * Sanity-check it while we're at it..
++ *
++ * If we're lucky and live on a modern system, the setup code
++ * will have given us a memory map that we can use to properly
++ * set up memory. If we aren't, we'll fake a memory map.
++ *
++ * We check to see that the memory map contains at least 2 elements
++ * before we'll use it, because the detection code in setup.S may
++ * not be perfect and most every PC known to man has two memory
++ * regions: one from 0 to 640k, and one from 1mb up. (The IBM
++ * thinkpad 560x, for example, does not cooperate with the memory
++ * detection code.)
++ */
++int __init copy_e820_map(struct e820entry * biosmap, int nr_map)
++{
++#ifndef CONFIG_XEN
++ /* Only one memory region (or negative)? Ignore it */
++ if (nr_map < 2)
++ return -1;
++#else
++ BUG_ON(nr_map < 1);
++#endif
++
++ do {
++ unsigned long long start = biosmap->addr;
++ unsigned long long size = biosmap->size;
++ unsigned long long end = start + size;
++ unsigned long type = biosmap->type;
++ printk("copy_e820_map() start: %016Lx size: %016Lx end: %016Lx type: %ld\n", start, size, end, type);
++
++ /* Overflow in 64 bits? Ignore the memory map. */
++ if (start > end)
++ return -1;
++
++#ifndef CONFIG_XEN
++ /*
++ * Some BIOSes claim RAM in the 640k - 1M region.
++ * Not right. Fix it up.
++ */
++ if (type == E820_RAM) {
++ printk("copy_e820_map() type is E820_RAM\n");
++ if (start < 0x100000ULL && end > 0xA0000ULL) {
++ printk("copy_e820_map() lies in range...\n");
++ if (start < 0xA0000ULL) {
++ printk("copy_e820_map() start < 0xA0000ULL\n");
++ add_memory_region(start, 0xA0000ULL-start, type);
++ }
++ if (end <= 0x100000ULL) {
++ printk("copy_e820_map() end <= 0x100000ULL\n");
++ continue;
++ }
++ start = 0x100000ULL;
++ size = end - start;
++ }
++ }
++#endif
++ add_memory_region(start, size, type);
++ } while (biosmap++,--nr_map);
++ return 0;
++}
++
++/*
++ * Callback for efi_memory_walk.
++ */
++static int __init
++efi_find_max_pfn(unsigned long start, unsigned long end, void *arg)
++{
++ unsigned long *max_pfn = arg, pfn;
++
++ if (start < end) {
++ pfn = PFN_UP(end -1);
++ if (pfn > *max_pfn)
++ *max_pfn = pfn;
++ }
++ return 0;
++}
++
++static int __init
++efi_memory_present_wrapper(unsigned long start, unsigned long end, void *arg)
++{
++ memory_present(0, PFN_UP(start), PFN_DOWN(end));
++ return 0;
++}
++
++/*
++ * Find the highest page frame number we have available
++ */
++void __init find_max_pfn(void)
++{
++ int i;
++
++ max_pfn = 0;
++ if (efi_enabled) {
++ efi_memmap_walk(efi_find_max_pfn, &max_pfn);
++ efi_memmap_walk(efi_memory_present_wrapper, NULL);
++ return;
++ }
++
++ for (i = 0; i < e820.nr_map; i++) {
++ unsigned long start, end;
++ /* RAM? */
++ if (e820.map[i].type != E820_RAM)
++ continue;
++ start = PFN_UP(e820.map[i].addr);
++ end = PFN_DOWN(e820.map[i].addr + e820.map[i].size);
++ if (start >= end)
++ continue;
++ if (end > max_pfn)
++ max_pfn = end;
++ memory_present(0, start, end);
++ }
++}
++
++/*
++ * Free all available memory for boot time allocation. Used
++ * as a callback function by efi_memory_walk()
++ */
++
++static int __init
++free_available_memory(unsigned long start, unsigned long end, void *arg)
++{
++ /* check max_low_pfn */
++ if (start >= (max_low_pfn << PAGE_SHIFT))
++ return 0;
++ if (end >= (max_low_pfn << PAGE_SHIFT))
++ end = max_low_pfn << PAGE_SHIFT;
++ if (start < end)
++ free_bootmem(start, end - start);
++
++ return 0;
++}
++/*
++ * Register fully available low RAM pages with the bootmem allocator.
++ */
++void __init register_bootmem_low_pages(unsigned long max_low_pfn)
++{
++ int i;
++
++ if (efi_enabled) {
++ efi_memmap_walk(free_available_memory, NULL);
++ return;
++ }
++ for (i = 0; i < e820.nr_map; i++) {
++ unsigned long curr_pfn, last_pfn, size;
++ /*
++ * Reserve usable low memory
++ */
++ if (e820.map[i].type != E820_RAM)
++ continue;
++ /*
++ * We are rounding up the start address of usable memory:
++ */
++ curr_pfn = PFN_UP(e820.map[i].addr);
++ if (curr_pfn >= max_low_pfn)
++ continue;
++ /*
++ * ... and at the end of the usable range downwards:
++ */
++ last_pfn = PFN_DOWN(e820.map[i].addr + e820.map[i].size);
++
++#ifdef CONFIG_XEN
++ /*
++ * Truncate to the number of actual pages currently
++ * present.
++ */
++ if (last_pfn > xen_start_info->nr_pages)
++ last_pfn = xen_start_info->nr_pages;
++#endif
++
++ if (last_pfn > max_low_pfn)
++ last_pfn = max_low_pfn;
++
++ /*
++ * .. finally, did all the rounding and playing
++ * around just make the area go away?
++ */
++ if (last_pfn <= curr_pfn)
++ continue;
++
++ size = last_pfn - curr_pfn;
++ free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(size));
++ }
++}
++
++void __init e820_register_memory(void)
++{
++ unsigned long gapstart, gapsize, round;
++ unsigned long long last;
++ int i;
++
++#ifdef CONFIG_XEN
++ if (is_initial_xendomain()) {
++ struct xen_memory_map memmap;
++
++ memmap.nr_entries = E820MAX;
++ set_xen_guest_handle(memmap.buffer, machine_e820.map);
++
++ if (HYPERVISOR_memory_op(XENMEM_machine_memory_map, &memmap))
++ BUG();
++ machine_e820.nr_map = memmap.nr_entries;
++ }
++ else
++ machine_e820 = e820;
++#define e820 machine_e820
++#endif
++
++ /*
++ * Search for the bigest gap in the low 32 bits of the e820
++ * memory space.
++ */
++ last = 0x100000000ull;
++ gapstart = 0x10000000;
++ gapsize = 0x400000;
++ i = e820.nr_map;
++ while (--i >= 0) {
++ unsigned long long start = e820.map[i].addr;
++ unsigned long long end = start + e820.map[i].size;
++
++ /*
++ * Since "last" is at most 4GB, we know we'll
++ * fit in 32 bits if this condition is true
++ */
++ if (last > end) {
++ unsigned long gap = last - end;
++
++ if (gap > gapsize) {
++ gapsize = gap;
++ gapstart = end;
++ }
++ }
++ if (start < last)
++ last = start;
++ }
++
++ /*
++ * See how much we want to round up: start off with
++ * rounding to the next 1MB area.
++ */
++ round = 0x100000;
++ while ((gapsize >> 4) > round)
++ round += round;
++ /* Fun with two's complement */
++ pci_mem_start = (gapstart + round) & -round;
++
++ printk("Allocating PCI resources starting at %08lx (gap: %08lx:%08lx)\n",
++ pci_mem_start, gapstart, gapsize);
++}
++
++#undef e820
++
++void __init print_memory_map(char *who)
++{
++ int i;
++
++ for (i = 0; i < e820.nr_map; i++) {
++ printk(" %s: %016Lx - %016Lx ", who,
++ e820.map[i].addr,
++ e820.map[i].addr + e820.map[i].size);
++ switch (e820.map[i].type) {
++ case E820_RAM: printk("(usable)\n");
++ break;
++ case E820_RESERVED:
++ printk("(reserved)\n");
++ break;
++ case E820_ACPI:
++ printk("(ACPI data)\n");
++ break;
++ case E820_NVS:
++ printk("(ACPI NVS)\n");
++ break;
++ default: printk("type %lu\n", e820.map[i].type);
++ break;
++ }
++ }
++}
++
++static __init __always_inline void efi_limit_regions(unsigned long long size)
++{
++ unsigned long long current_addr = 0;
++ efi_memory_desc_t *md, *next_md;
++ void *p, *p1;
++ int i, j;
++
++ j = 0;
++ p1 = memmap.map;
++ for (p = p1, i = 0; p < memmap.map_end; p += memmap.desc_size, i++) {
++ md = p;
++ next_md = p1;
++ current_addr = md->phys_addr +
++ PFN_PHYS(md->num_pages);
++ if (is_available_memory(md)) {
++ if (md->phys_addr >= size) continue;
++ memcpy(next_md, md, memmap.desc_size);
++ if (current_addr >= size) {
++ next_md->num_pages -=
++ PFN_UP(current_addr-size);
++ }
++ p1 += memmap.desc_size;
++ next_md = p1;
++ j++;
++ } else if ((md->attribute & EFI_MEMORY_RUNTIME) ==
++ EFI_MEMORY_RUNTIME) {
++ /* In order to make runtime services
++ * available we have to include runtime
++ * memory regions in memory map */
++ memcpy(next_md, md, memmap.desc_size);
++ p1 += memmap.desc_size;
++ next_md = p1;
++ j++;
++ }
++ }
++ memmap.nr_map = j;
++ memmap.map_end = memmap.map +
++ (memmap.nr_map * memmap.desc_size);
++}
++
++void __init limit_regions(unsigned long long size)
++{
++ unsigned long long current_addr = 0;
++ int i;
++
++ print_memory_map("limit_regions start");
++ if (efi_enabled) {
++ efi_limit_regions(size);
++ return;
++ }
++ for (i = 0; i < e820.nr_map; i++) {
++ current_addr = e820.map[i].addr + e820.map[i].size;
++ if (current_addr < size)
++ continue;
++
++ if (e820.map[i].type != E820_RAM)
++ continue;
++
++ if (e820.map[i].addr >= size) {
++ /*
++ * This region starts past the end of the
++ * requested size, skip it completely.
++ */
++ e820.nr_map = i;
++ } else {
++ e820.nr_map = i + 1;
++ e820.map[i].size -= current_addr - size;
++ }
++ print_memory_map("limit_regions endfor");
++ return;
++ }
++#ifdef CONFIG_XEN
++ if (current_addr < size) {
++ /*
++ * The e820 map finished before our requested size so
++ * extend the final entry to the requested address.
++ */
++ --i;
++ if (e820.map[i].type == E820_RAM)
++ e820.map[i].size -= current_addr - size;
++ else
++ add_memory_region(current_addr, size - current_addr, E820_RAM);
++ }
++#endif
++ print_memory_map("limit_regions endfunc");
++}
++
++ /*
++ * This function checks if the entire range <start,end> is mapped with type.
++ *
++ * Note: this function only works correct if the e820 table is sorted and
++ * not-overlapping, which is the case
++ */
++int __init
++e820_all_mapped(unsigned long s, unsigned long e, unsigned type)
++{
++ u64 start = s;
++ u64 end = e;
++ int i;
++
++#ifndef CONFIG_XEN
++ for (i = 0; i < e820.nr_map; i++) {
++ struct e820entry *ei = &e820.map[i];
++#else
++ if (!is_initial_xendomain())
++ return 0;
++ for (i = 0; i < machine_e820.nr_map; ++i) {
++ const struct e820entry *ei = &machine_e820.map[i];
++#endif
++
++ if (type && ei->type != type)
++ continue;
++ /* is the region (part) in overlap with the current region ?*/
++ if (ei->addr >= end || ei->addr + ei->size <= start)
++ continue;
++ /* if the region is at the beginning of <start,end> we move
++ * start to the end of the region since it's ok until there
++ */
++ if (ei->addr <= start)
++ start = ei->addr + ei->size;
++ /* if start is now at or beyond end, we're done, full
++ * coverage */
++ if (start >= end)
++ return 1; /* we're done */
++ }
++ return 0;
++}
++
++static int __init parse_memmap(char *arg)
++{
++ if (!arg)
++ return -EINVAL;
++
++ if (strcmp(arg, "exactmap") == 0) {
++#ifdef CONFIG_CRASH_DUMP
++ /* If we are doing a crash dump, we
++ * still need to know the real mem
++ * size before original memory map is
++ * reset.
++ */
++ find_max_pfn();
++ saved_max_pfn = max_pfn;
++#endif
++ e820.nr_map = 0;
++ user_defined_memmap = 1;
++ } else {
++ /* If the user specifies memory size, we
++ * limit the BIOS-provided memory map to
++ * that size. exactmap can be used to specify
++ * the exact map. mem=number can be used to
++ * trim the existing memory map.
++ */
++ unsigned long long start_at, mem_size;
++
++ mem_size = memparse(arg, &arg);
++ if (*arg == '@') {
++ start_at = memparse(arg+1, &arg);
++ add_memory_region(start_at, mem_size, E820_RAM);
++ } else if (*arg == '#') {
++ start_at = memparse(arg+1, &arg);
++ add_memory_region(start_at, mem_size, E820_ACPI);
++ } else if (*arg == '$') {
++ start_at = memparse(arg+1, &arg);
++ add_memory_region(start_at, mem_size, E820_RESERVED);
++ } else {
++ limit_regions(mem_size);
++ user_defined_memmap = 1;
++ }
++ }
++ return 0;
++}
++early_param("memmap", parse_memmap);
+Index: 10.3-2007-11-26/arch/i386/kernel/entry-xen.S
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/entry-xen.S 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/arch/i386/kernel/entry-xen.S 2007-10-22 13:53:25.000000000 +0200
+@@ -30,12 +30,13 @@
+ * 18(%esp) - %eax
+ * 1C(%esp) - %ds
+ * 20(%esp) - %es
+- * 24(%esp) - orig_eax
+- * 28(%esp) - %eip
+- * 2C(%esp) - %cs
+- * 30(%esp) - %eflags
+- * 34(%esp) - %oldesp
+- * 38(%esp) - %oldss
++ * 24(%esp) - %gs
++ * 28(%esp) - orig_eax
++ * 2C(%esp) - %eip
++ * 30(%esp) - %cs
++ * 34(%esp) - %eflags
++ * 38(%esp) - %oldesp
++ * 3C(%esp) - %oldss
+ *
+ * "current" is in register %ebx during any slow entries.
+ */
+@@ -48,27 +49,25 @@
+ #include <asm/smp.h>
+ #include <asm/page.h>
+ #include <asm/desc.h>
++#include <asm/percpu.h>
+ #include <asm/dwarf2.h>
+ #include "irq_vectors.h"
+ #include <xen/interface/xen.h>
+
+-#define nr_syscalls ((syscall_table_size)/4)
++/*
++ * We use macros for low-level operations which need to be overridden
++ * for paravirtualization. The following will never clobber any registers:
++ * INTERRUPT_RETURN (aka. "iret")
++ * GET_CR0_INTO_EAX (aka. "movl %cr0, %eax")
++ * ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit").
++ *
++ * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must
++ * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY).
++ * Allowing a register to be clobbered can shrink the paravirt replacement
++ * enough to patch inline, increasing performance.
++ */
+
+-EBX = 0x00
+-ECX = 0x04
+-EDX = 0x08
+-ESI = 0x0C
+-EDI = 0x10
+-EBP = 0x14
+-EAX = 0x18
+-DS = 0x1C
+-ES = 0x20
+-ORIG_EAX = 0x24
+-EIP = 0x28
+-CS = 0x2C
+-EFLAGS = 0x30
+-OLDESP = 0x34
+-OLDSS = 0x38
++#define nr_syscalls ((syscall_table_size)/4)
+
+ CF_MASK = 0x00000001
+ TF_MASK = 0x00000100
+@@ -79,61 +78,16 @@ VM_MASK = 0x00020000
+ /* Pseudo-eflags. */
+ NMI_MASK = 0x80000000
+
+-#ifndef CONFIG_XEN
+-/* These are replaces for paravirtualization */
+-#define DISABLE_INTERRUPTS cli
+-#define ENABLE_INTERRUPTS sti
+-#define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
+-#define INTERRUPT_RETURN iret
+-#define GET_CR0_INTO_EAX movl %cr0, %eax
+-#else
+-/* Offsets into shared_info_t. */
+-#define evtchn_upcall_pending /* 0 */
+-#define evtchn_upcall_mask 1
+-
+-#define sizeof_vcpu_shift 6
+-
+-#ifdef CONFIG_SMP
+-#define GET_VCPU_INFO movl TI_cpu(%ebp),%esi ; \
+- shl $sizeof_vcpu_shift,%esi ; \
+- addl HYPERVISOR_shared_info,%esi
+-#else
+-#define GET_VCPU_INFO movl HYPERVISOR_shared_info,%esi
+-#endif
+-
+-#define __DISABLE_INTERRUPTS movb $1,evtchn_upcall_mask(%esi)
+-#define __ENABLE_INTERRUPTS movb $0,evtchn_upcall_mask(%esi)
+-#define __TEST_PENDING testb $0xFF,evtchn_upcall_pending(%esi)
+-#define DISABLE_INTERRUPTS GET_VCPU_INFO ; \
+- __DISABLE_INTERRUPTS
+-#define ENABLE_INTERRUPTS GET_VCPU_INFO ; \
+- __ENABLE_INTERRUPTS
+-#define ENABLE_INTERRUPTS_SYSEXIT __ENABLE_INTERRUPTS ; \
+-sysexit_scrit: /**** START OF SYSEXIT CRITICAL REGION ****/ ; \
+- __TEST_PENDING ; \
+- jnz 14f # process more events if necessary... ; \
+- movl ESI(%esp), %esi ; \
+- sysexit ; \
+-14: __DISABLE_INTERRUPTS ; \
+- TRACE_IRQS_OFF ; \
+-sysexit_ecrit: /**** END OF SYSEXIT CRITICAL REGION ****/ ; \
+- push %esp ; \
+- call evtchn_do_upcall ; \
+- add $4,%esp ; \
+- jmp ret_from_intr
+-#define INTERRUPT_RETURN iret
+-#endif
+-
+ #ifdef CONFIG_PREEMPT
+-#define preempt_stop DISABLE_INTERRUPTS; TRACE_IRQS_OFF
++#define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
+ #else
+-#define preempt_stop
++#define preempt_stop(clobbers)
+ #define resume_kernel restore_nocheck
+ #endif
+
+ .macro TRACE_IRQS_IRET
+ #ifdef CONFIG_TRACE_IRQFLAGS
+- testl $IF_MASK,EFLAGS(%esp) # interrupts off?
++ testl $IF_MASK,PT_EFLAGS(%esp) # interrupts off?
+ jz 1f
+ TRACE_IRQS_ON
+ 1:
+@@ -148,6 +102,9 @@ sysexit_ecrit: /**** END OF SYSEXIT CRIT
+
+ #define SAVE_ALL \
+ cld; \
++ pushl %gs; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ /*CFI_REL_OFFSET gs, 0;*/\
+ pushl %es; \
+ CFI_ADJUST_CFA_OFFSET 4;\
+ /*CFI_REL_OFFSET es, 0;*/\
+@@ -177,7 +134,9 @@ sysexit_ecrit: /**** END OF SYSEXIT CRIT
+ CFI_REL_OFFSET ebx, 0;\
+ movl $(__USER_DS), %edx; \
+ movl %edx, %ds; \
+- movl %edx, %es;
++ movl %edx, %es; \
++ movl $(__KERNEL_PDA), %edx; \
++ movl %edx, %gs
+
+ #define RESTORE_INT_REGS \
+ popl %ebx; \
+@@ -210,17 +169,22 @@ sysexit_ecrit: /**** END OF SYSEXIT CRIT
+ 2: popl %es; \
+ CFI_ADJUST_CFA_OFFSET -4;\
+ /*CFI_RESTORE es;*/\
+-.section .fixup,"ax"; \
+-3: movl $0,(%esp); \
+- jmp 1b; \
++3: popl %gs; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ /*CFI_RESTORE gs;*/\
++.pushsection .fixup,"ax"; \
+ 4: movl $0,(%esp); \
++ jmp 1b; \
++5: movl $0,(%esp); \
+ jmp 2b; \
+-.previous; \
++6: movl $0,(%esp); \
++ jmp 3b; \
+ .section __ex_table,"a";\
+ .align 4; \
+- .long 1b,3b; \
+- .long 2b,4b; \
+-.previous
++ .long 1b,4b; \
++ .long 2b,5b; \
++ .long 3b,6b; \
++.popsection
+
+ #define RING0_INT_FRAME \
+ CFI_STARTPROC simple;\
+@@ -239,18 +203,18 @@ sysexit_ecrit: /**** END OF SYSEXIT CRIT
+ #define RING0_PTREGS_FRAME \
+ CFI_STARTPROC simple;\
+ CFI_SIGNAL_FRAME;\
+- CFI_DEF_CFA esp, OLDESP-EBX;\
+- /*CFI_OFFSET cs, CS-OLDESP;*/\
+- CFI_OFFSET eip, EIP-OLDESP;\
+- /*CFI_OFFSET es, ES-OLDESP;*/\
+- /*CFI_OFFSET ds, DS-OLDESP;*/\
+- CFI_OFFSET eax, EAX-OLDESP;\
+- CFI_OFFSET ebp, EBP-OLDESP;\
+- CFI_OFFSET edi, EDI-OLDESP;\
+- CFI_OFFSET esi, ESI-OLDESP;\
+- CFI_OFFSET edx, EDX-OLDESP;\
+- CFI_OFFSET ecx, ECX-OLDESP;\
+- CFI_OFFSET ebx, EBX-OLDESP
++ CFI_DEF_CFA esp, PT_OLDESP-PT_EBX;\
++ /*CFI_OFFSET cs, PT_CS-PT_OLDESP;*/\
++ CFI_OFFSET eip, PT_EIP-PT_OLDESP;\
++ /*CFI_OFFSET es, PT_ES-PT_OLDESP;*/\
++ /*CFI_OFFSET ds, PT_DS-PT_OLDESP;*/\
++ CFI_OFFSET eax, PT_EAX-PT_OLDESP;\
++ CFI_OFFSET ebp, PT_EBP-PT_OLDESP;\
++ CFI_OFFSET edi, PT_EDI-PT_OLDESP;\
++ CFI_OFFSET esi, PT_ESI-PT_OLDESP;\
++ CFI_OFFSET edx, PT_EDX-PT_OLDESP;\
++ CFI_OFFSET ecx, PT_ECX-PT_OLDESP;\
++ CFI_OFFSET ebx, PT_EBX-PT_OLDESP
+
+ ENTRY(ret_from_fork)
+ CFI_STARTPROC
+@@ -278,17 +242,18 @@ ENTRY(ret_from_fork)
+ ALIGN
+ RING0_PTREGS_FRAME
+ ret_from_exception:
+- preempt_stop
++ preempt_stop(CLBR_ANY)
+ ret_from_intr:
+ GET_THREAD_INFO(%ebp)
+ check_userspace:
+- movl EFLAGS(%esp), %eax # mix EFLAGS and CS
+- movb CS(%esp), %al
++ movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS
++ movb PT_CS(%esp), %al
+ andl $(VM_MASK | SEGMENT_RPL_MASK), %eax
+ cmpl $USER_RPL, %eax
+ jb resume_kernel # not returning to v8086 or userspace
++
+ ENTRY(resume_userspace)
+- DISABLE_INTERRUPTS # make sure we don't miss an interrupt
++ DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
+ # setting need_resched or sigpending
+ # between sampling and the iret
+ movl TI_flags(%ebp), %ecx
+@@ -299,14 +264,14 @@ ENTRY(resume_userspace)
+
+ #ifdef CONFIG_PREEMPT
+ ENTRY(resume_kernel)
+- DISABLE_INTERRUPTS
++ DISABLE_INTERRUPTS(CLBR_ANY)
+ cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ?
+ jnz restore_nocheck
+ need_resched:
+ movl TI_flags(%ebp), %ecx # need_resched set ?
+ testb $_TIF_NEED_RESCHED, %cl
+ jz restore_all
+- testl $IF_MASK,EFLAGS(%esp) # interrupts off (exception path) ?
++ testl $IF_MASK,PT_EFLAGS(%esp) # interrupts off (exception path) ?
+ jz restore_all
+ call preempt_schedule_irq
+ jmp need_resched
+@@ -328,7 +293,7 @@ sysenter_past_esp:
+ * No need to follow this irqs on/off section: the syscall
+ * disabled irqs and here we enable it straight after entry:
+ */
+- ENABLE_INTERRUPTS
++ ENABLE_INTERRUPTS(CLBR_NONE)
+ pushl $(__USER_DS)
+ CFI_ADJUST_CFA_OFFSET 4
+ /*CFI_REL_OFFSET ss, 0*/
+@@ -340,12 +305,16 @@ sysenter_past_esp:
+ pushl $(__USER_CS)
+ CFI_ADJUST_CFA_OFFSET 4
+ /*CFI_REL_OFFSET cs, 0*/
++#ifndef CONFIG_COMPAT_VDSO
+ /*
+ * Push current_thread_info()->sysenter_return to the stack.
+ * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
+ * pushed above; +8 corresponds to copy_thread's esp0 setting.
+ */
+ pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
++#else
++ pushl $SYSENTER_RETURN
++#endif
+ CFI_ADJUST_CFA_OFFSET 4
+ CFI_REL_OFFSET eip, 0
+
+@@ -372,20 +341,27 @@ sysenter_past_esp:
+ cmpl $(nr_syscalls), %eax
+ jae syscall_badsys
+ call *sys_call_table(,%eax,4)
+- movl %eax,EAX(%esp)
+- DISABLE_INTERRUPTS
++ movl %eax,PT_EAX(%esp)
++ DISABLE_INTERRUPTS(CLBR_ECX|CLBR_EDX)
+ TRACE_IRQS_OFF
+ movl TI_flags(%ebp), %ecx
+ testw $_TIF_ALLWORK_MASK, %cx
+ jne syscall_exit_work
+ /* if something modifies registers it must also disable sysexit */
+- movl EIP(%esp), %edx
+- movl OLDESP(%esp), %ecx
++ movl PT_EIP(%esp), %edx
++ movl PT_OLDESP(%esp), %ecx
+ xorl %ebp,%ebp
+ TRACE_IRQS_ON
++1: mov PT_GS(%esp), %gs
+ ENABLE_INTERRUPTS_SYSEXIT
+ CFI_ENDPROC
+-
++.pushsection .fixup,"ax"
++2: movl $0,PT_GS(%esp)
++ jmp 1b
++.section __ex_table,"a"
++ .align 4
++ .long 1b,2b
++.popsection
+
+ # system call handler stub
+ ENTRY(system_call)
+@@ -394,7 +370,7 @@ ENTRY(system_call)
+ CFI_ADJUST_CFA_OFFSET 4
+ SAVE_ALL
+ GET_THREAD_INFO(%ebp)
+- testl $TF_MASK,EFLAGS(%esp)
++ testl $TF_MASK,PT_EFLAGS(%esp)
+ jz no_singlestep
+ orl $_TIF_SINGLESTEP,TI_flags(%ebp)
+ no_singlestep:
+@@ -406,9 +382,9 @@ no_singlestep:
+ jae syscall_badsys
+ syscall_call:
+ call *sys_call_table(,%eax,4)
+- movl %eax,EAX(%esp) # store the return value
++ movl %eax,PT_EAX(%esp) # store the return value
+ syscall_exit:
+- DISABLE_INTERRUPTS # make sure we don't miss an interrupt
++ DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
+ # setting need_resched or sigpending
+ # between sampling and the iret
+ TRACE_IRQS_OFF
+@@ -418,12 +394,12 @@ syscall_exit:
+
+ restore_all:
+ #ifndef CONFIG_XEN
+- movl EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
+- # Warning: OLDSS(%esp) contains the wrong/random values if we
++ movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
++ # Warning: PT_OLDSS(%esp) contains the wrong/random values if we
+ # are returning to the kernel.
+ # See comments in process.c:copy_thread() for details.
+- movb OLDSS(%esp), %ah
+- movb CS(%esp), %al
++ movb PT_OLDSS(%esp), %ah
++ movb PT_CS(%esp), %al
+ andl $(VM_MASK | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
+ cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
+ CFI_REMEMBER_STATE
+@@ -431,7 +407,7 @@ restore_all:
+ restore_nocheck:
+ #else
+ restore_nocheck:
+- movl EFLAGS(%esp), %eax
++ movl PT_EFLAGS(%esp), %eax
+ testl $(VM_MASK|NMI_MASK), %eax
+ CFI_REMEMBER_STATE
+ jnz hypervisor_iret
+@@ -445,13 +421,13 @@ restore_nocheck:
+ TRACE_IRQS_IRET
+ restore_nocheck_notrace:
+ RESTORE_REGS
+- addl $4, %esp
++ addl $4, %esp # skip orig_eax/error_code
+ CFI_ADJUST_CFA_OFFSET -4
+ 1: INTERRUPT_RETURN
+ .section .fixup,"ax"
+ iret_exc:
+ #ifndef CONFIG_XEN
+- ENABLE_INTERRUPTS
++ ENABLE_INTERRUPTS(CLBR_NONE)
+ #endif
+ pushl $0 # no error code
+ pushl $do_iret_error
+@@ -465,33 +441,42 @@ iret_exc:
+ CFI_RESTORE_STATE
+ #ifndef CONFIG_XEN
+ ldt_ss:
+- larl OLDSS(%esp), %eax
++ larl PT_OLDSS(%esp), %eax
+ jnz restore_nocheck
+ testl $0x00400000, %eax # returning to 32bit stack?
+ jnz restore_nocheck # allright, normal return
++
++#ifdef CONFIG_PARAVIRT
++ /*
++ * The kernel can't run on a non-flat stack if paravirt mode
++ * is active. Rather than try to fixup the high bits of
++ * ESP, bypass this code entirely. This may break DOSemu
++ * and/or Wine support in a paravirt VM, although the option
++ * is still available to implement the setting of the high
++ * 16-bits in the INTERRUPT_RETURN paravirt-op.
++ */
++ cmpl $0, paravirt_ops+PARAVIRT_enabled
++ jne restore_nocheck
++#endif
++
+ /* If returning to userspace with 16bit stack,
+ * try to fix the higher word of ESP, as the CPU
+ * won't restore it.
+ * This is an "official" bug of all the x86-compatible
+ * CPUs, which we can try to work around to make
+ * dosemu and wine happy. */
+- subl $8, %esp # reserve space for switch16 pointer
+- CFI_ADJUST_CFA_OFFSET 8
+- DISABLE_INTERRUPTS
++ movl PT_OLDESP(%esp), %eax
++ movl %esp, %edx
++ call patch_espfix_desc
++ pushl $__ESPFIX_SS
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ DISABLE_INTERRUPTS(CLBR_EAX)
+ TRACE_IRQS_OFF
+- movl %esp, %eax
+- /* Set up the 16bit stack frame with switch32 pointer on top,
+- * and a switch16 pointer on top of the current frame. */
+- call setup_x86_bogus_stack
+- CFI_ADJUST_CFA_OFFSET -8 # frame has moved
+- TRACE_IRQS_IRET
+- RESTORE_REGS
+- lss 20+4(%esp), %esp # switch to 16bit stack
+-1: INTERRUPT_RETURN
+-.section __ex_table,"a"
+- .align 4
+- .long 1b,iret_exc
+-.previous
++ lss (%esp), %esp
++ CFI_ADJUST_CFA_OFFSET -8
++ jmp restore_nocheck
+ #else
+ ALIGN
+ restore_all_enable_events:
+@@ -515,7 +500,7 @@ ecrit: /**** END OF CRITICAL REGION ***
+
+ CFI_RESTORE_STATE
+ hypervisor_iret:
+- andl $~NMI_MASK, EFLAGS(%esp)
++ andl $~NMI_MASK, PT_EFLAGS(%esp)
+ RESTORE_REGS
+ addl $4, %esp
+ CFI_ADJUST_CFA_OFFSET -4
+@@ -531,7 +516,7 @@ work_pending:
+ jz work_notifysig
+ work_resched:
+ call schedule
+- DISABLE_INTERRUPTS # make sure we don't miss an interrupt
++ DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
+ # setting need_resched or sigpending
+ # between sampling and the iret
+ TRACE_IRQS_OFF
+@@ -544,7 +529,8 @@ work_resched:
+
+ work_notifysig: # deal with pending signals and
+ # notify-resume requests
+- testl $VM_MASK, EFLAGS(%esp)
++#ifdef CONFIG_VM86
++ testl $VM_MASK, PT_EFLAGS(%esp)
+ movl %esp, %eax
+ jne work_notifysig_v86 # returning to kernel-space or
+ # vm86-space
+@@ -554,29 +540,30 @@ work_notifysig: # deal with pending s
+
+ ALIGN
+ work_notifysig_v86:
+-#ifdef CONFIG_VM86
+ pushl %ecx # save ti_flags for do_notify_resume
+ CFI_ADJUST_CFA_OFFSET 4
+ call save_v86_state # %eax contains pt_regs pointer
+ popl %ecx
+ CFI_ADJUST_CFA_OFFSET -4
+ movl %eax, %esp
++#else
++ movl %esp, %eax
++#endif
+ xorl %edx, %edx
+ call do_notify_resume
+ jmp resume_userspace_sig
+-#endif
+
+ # perform syscall exit tracing
+ ALIGN
+ syscall_trace_entry:
+- movl $-ENOSYS,EAX(%esp)
++ movl $-ENOSYS,PT_EAX(%esp)
+ movl %esp, %eax
+ xorl %edx,%edx
+ call do_syscall_trace
+ cmpl $0, %eax
+ jne resume_userspace # ret != 0 -> running under PTRACE_SYSEMU,
+ # so must skip actual syscall
+- movl ORIG_EAX(%esp), %eax
++ movl PT_ORIG_EAX(%esp), %eax
+ cmpl $(nr_syscalls), %eax
+ jnae syscall_call
+ jmp syscall_exit
+@@ -587,7 +574,7 @@ syscall_exit_work:
+ testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl
+ jz work_pending
+ TRACE_IRQS_ON
+- ENABLE_INTERRUPTS # could let do_syscall_trace() call
++ ENABLE_INTERRUPTS(CLBR_ANY) # could let do_syscall_trace() call
+ # schedule() instead
+ movl %esp, %eax
+ movl $1, %edx
+@@ -601,40 +588,39 @@ syscall_fault:
+ CFI_ADJUST_CFA_OFFSET 4
+ SAVE_ALL
+ GET_THREAD_INFO(%ebp)
+- movl $-EFAULT,EAX(%esp)
++ movl $-EFAULT,PT_EAX(%esp)
+ jmp resume_userspace
+
+ syscall_badsys:
+- movl $-ENOSYS,EAX(%esp)
++ movl $-ENOSYS,PT_EAX(%esp)
+ jmp resume_userspace
+ CFI_ENDPROC
+
+ #ifndef CONFIG_XEN
+ #define FIXUP_ESPFIX_STACK \
+- movl %esp, %eax; \
+- /* switch to 32bit stack using the pointer on top of 16bit stack */ \
+- lss %ss:CPU_16BIT_STACK_SIZE-8, %esp; \
+- /* copy data from 16bit stack to 32bit stack */ \
+- call fixup_x86_bogus_stack; \
+- /* put ESP to the proper location */ \
+- movl %eax, %esp;
+-#define UNWIND_ESPFIX_STACK \
++ /* since we are on a wrong stack, we cant make it a C code :( */ \
++ movl %gs:PDA_cpu, %ebx; \
++ PER_CPU(cpu_gdt_descr, %ebx); \
++ movl GDS_address(%ebx), %ebx; \
++ GET_DESC_BASE(GDT_ENTRY_ESPFIX_SS, %ebx, %eax, %ax, %al, %ah); \
++ addl %esp, %eax; \
++ pushl $__KERNEL_DS; \
++ CFI_ADJUST_CFA_OFFSET 4; \
+ pushl %eax; \
+ CFI_ADJUST_CFA_OFFSET 4; \
++ lss (%esp), %esp; \
++ CFI_ADJUST_CFA_OFFSET -8;
++#define UNWIND_ESPFIX_STACK \
+ movl %ss, %eax; \
+- /* see if on 16bit stack */ \
++ /* see if on espfix stack */ \
+ cmpw $__ESPFIX_SS, %ax; \
+- je 28f; \
+-27: popl %eax; \
+- CFI_ADJUST_CFA_OFFSET -4; \
+-.section .fixup,"ax"; \
+-28: movl $__KERNEL_DS, %eax; \
++ jne 27f; \
++ movl $__KERNEL_DS, %eax; \
+ movl %eax, %ds; \
+ movl %eax, %es; \
+- /* switch to 32bit stack */ \
++ /* switch to normal stack */ \
+ FIXUP_ESPFIX_STACK; \
+- jmp 27b; \
+-.previous
++27:;
+
+ /*
+ * Build the entry stubs and pointer table with
+@@ -698,13 +684,16 @@ KPROBE_ENTRY(page_fault)
+ CFI_ADJUST_CFA_OFFSET 4
+ ALIGN
+ error_code:
++ /* the function address is in %gs's slot on the stack */
++ pushl %es
++ CFI_ADJUST_CFA_OFFSET 4
++ /*CFI_REL_OFFSET es, 0*/
+ pushl %ds
+ CFI_ADJUST_CFA_OFFSET 4
+ /*CFI_REL_OFFSET ds, 0*/
+ pushl %eax
+ CFI_ADJUST_CFA_OFFSET 4
+ CFI_REL_OFFSET eax, 0
+- xorl %eax, %eax
+ pushl %ebp
+ CFI_ADJUST_CFA_OFFSET 4
+ CFI_REL_OFFSET ebp, 0
+@@ -717,7 +706,6 @@ error_code:
+ pushl %edx
+ CFI_ADJUST_CFA_OFFSET 4
+ CFI_REL_OFFSET edx, 0
+- decl %eax # eax = -1
+ pushl %ecx
+ CFI_ADJUST_CFA_OFFSET 4
+ CFI_REL_OFFSET ecx, 0
+@@ -725,18 +713,20 @@ error_code:
+ CFI_ADJUST_CFA_OFFSET 4
+ CFI_REL_OFFSET ebx, 0
+ cld
+- pushl %es
++ pushl %gs
+ CFI_ADJUST_CFA_OFFSET 4
+- /*CFI_REL_OFFSET es, 0*/
++ /*CFI_REL_OFFSET gs, 0*/
++ movl $(__KERNEL_PDA), %ecx
++ movl %ecx, %gs
+ UNWIND_ESPFIX_STACK
+ popl %ecx
+ CFI_ADJUST_CFA_OFFSET -4
+ /*CFI_REGISTER es, ecx*/
+- movl ES(%esp), %edi # get the function address
+- movl ORIG_EAX(%esp), %edx # get the error code
+- movl %eax, ORIG_EAX(%esp)
+- movl %ecx, ES(%esp)
+- /*CFI_REL_OFFSET es, ES*/
++ movl PT_GS(%esp), %edi # get the function address
++ movl PT_ORIG_EAX(%esp), %edx # get the error code
++ movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
++ mov %ecx, PT_GS(%esp)
++ /*CFI_REL_OFFSET gs, ES*/
+ movl $(__USER_DS), %ecx
+ movl %ecx, %ds
+ movl %ecx, %es
+@@ -768,7 +758,7 @@ ENTRY(hypervisor_callback)
+ pushl %eax
+ CFI_ADJUST_CFA_OFFSET 4
+ SAVE_ALL
+- movl EIP(%esp),%eax
++ movl PT_EIP(%esp),%eax
+ cmpl $scrit,%eax
+ jb 11f
+ cmpl $ecrit,%eax
+@@ -777,7 +767,7 @@ ENTRY(hypervisor_callback)
+ jb 11f
+ cmpl $sysexit_ecrit,%eax
+ ja 11f
+- addl $OLDESP,%esp # Remove eflags...ebx from stack frame.
++ addl $PT_OLDESP,%esp # Remove eflags...ebx from stack frame.
+ 11: push %esp
+ CFI_ADJUST_CFA_OFFSET 4
+ call evtchn_do_upcall
+@@ -799,7 +789,7 @@ critical_region_fixup:
+ jne 15f
+ xorl %ecx,%ecx
+ 15: leal (%esp,%ecx),%esi # %esi points at end of src region
+- leal OLDESP(%esp),%edi # %edi points at end of dst region
++ leal PT_OLDESP(%esp),%edi # %edi points at end of dst region
+ shrl $2,%ecx # convert words to bytes
+ je 17f # skip loop if nothing to copy
+ 16: subl $4,%esi # pre-decrementing copy loop
+@@ -823,8 +813,9 @@ critical_fixup_table:
+ .byte 0x18 # pop %eax
+ .byte 0x1c # pop %ds
+ .byte 0x20 # pop %es
+- .byte 0x24,0x24,0x24 # add $4,%esp
+- .byte 0x28 # iret
++ .byte 0x24,0x24 # pop %gs
++ .byte 0x28,0x28,0x28 # add $4,%esp
++ .byte 0x2c # iret
+ .byte 0xff,0xff,0xff,0xff # movb $1,1(%esi)
+ .byte 0x00,0x00 # jmp 11b
+ .previous
+@@ -915,7 +906,7 @@ ENTRY(device_not_available)
+ jmp ret_from_exception
+ device_available_emulate:
+ #endif
+- preempt_stop
++ preempt_stop(CLBR_ANY)
+ call math_state_restore
+ jmp ret_from_exception
+ CFI_ENDPROC
+@@ -985,7 +976,7 @@ KPROBE_ENTRY(nmi)
+ cmpw $__ESPFIX_SS, %ax
+ popl %eax
+ CFI_ADJUST_CFA_OFFSET -4
+- je nmi_16bit_stack
++ je nmi_espfix_stack
+ cmpl $sysenter_entry,(%esp)
+ je nmi_stack_fixup
+ pushl %eax
+@@ -1028,7 +1019,7 @@ nmi_debug_stack_check:
+ FIX_STACK(24,nmi_stack_correct, 1)
+ jmp nmi_stack_correct
+
+-nmi_16bit_stack:
++nmi_espfix_stack:
+ /* We have a RING0_INT_FRAME here.
+ *
+ * create the pointer to lss back
+@@ -1037,7 +1028,6 @@ nmi_16bit_stack:
+ CFI_ADJUST_CFA_OFFSET 4
+ pushl %esp
+ CFI_ADJUST_CFA_OFFSET 4
+- movzwl %sp, %esp
+ addw $4, (%esp)
+ /* copy the iret frame of 12 bytes */
+ .rept 3
+@@ -1048,11 +1038,11 @@ nmi_16bit_stack:
+ CFI_ADJUST_CFA_OFFSET 4
+ SAVE_ALL
+ FIXUP_ESPFIX_STACK # %eax == %esp
+- CFI_ADJUST_CFA_OFFSET -20 # the frame has now moved
+ xorl %edx,%edx # zero error code
+ call do_nmi
+ RESTORE_REGS
+- lss 12+4(%esp), %esp # back to 16bit stack
++ lss 12+4(%esp), %esp # back to espfix stack
++ CFI_ADJUST_CFA_OFFSET -24
+ 1: INTERRUPT_RETURN
+ CFI_ENDPROC
+ .section __ex_table,"a"
+@@ -1068,12 +1058,25 @@ KPROBE_ENTRY(nmi)
+ xorl %edx,%edx # zero error code
+ movl %esp,%eax # pt_regs pointer
+ call do_nmi
+- orl $NMI_MASK, EFLAGS(%esp)
++ orl $NMI_MASK, PT_EFLAGS(%esp)
+ jmp restore_all
+ CFI_ENDPROC
+ #endif
+ KPROBE_END(nmi)
+
++#ifdef CONFIG_PARAVIRT
++ENTRY(native_iret)
++1: iret
++.section __ex_table,"a"
++ .align 4
++ .long 1b,iret_exc
++.previous
++
++ENTRY(native_irq_enable_sysexit)
++ sti
++ sysexit
++#endif
++
+ KPROBE_ENTRY(int3)
+ RING0_INT_FRAME
+ pushl $-1 # mark this as an int
+@@ -1189,37 +1192,6 @@ ENTRY(spurious_interrupt_bug)
+ CFI_ENDPROC
+ #endif /* !CONFIG_XEN */
+
+-#ifdef CONFIG_STACK_UNWIND
+-ENTRY(arch_unwind_init_running)
+- CFI_STARTPROC
+- movl 4(%esp), %edx
+- movl (%esp), %ecx
+- leal 4(%esp), %eax
+- movl %ebx, EBX(%edx)
+- xorl %ebx, %ebx
+- movl %ebx, ECX(%edx)
+- movl %ebx, EDX(%edx)
+- movl %esi, ESI(%edx)
+- movl %edi, EDI(%edx)
+- movl %ebp, EBP(%edx)
+- movl %ebx, EAX(%edx)
+- movl $__USER_DS, DS(%edx)
+- movl $__USER_DS, ES(%edx)
+- movl %ebx, ORIG_EAX(%edx)
+- movl %ecx, EIP(%edx)
+- movl 12(%esp), %ecx
+- movl $__KERNEL_CS, CS(%edx)
+- movl %ebx, EFLAGS(%edx)
+- movl %eax, OLDESP(%edx)
+- movl 8(%esp), %eax
+- movl %ecx, 8(%esp)
+- movl EBX(%edx), %ebx
+- movl $__KERNEL_DS, OLDSS(%edx)
+- jmpl *%eax
+- CFI_ENDPROC
+-ENDPROC(arch_unwind_init_running)
+-#endif
+-
+ ENTRY(fixup_4gb_segment)
+ RING0_EC_FRAME
+ pushl $do_fixup_4gb_segment
+Index: 10.3-2007-11-26/arch/i386/kernel/head-xen.S
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/head-xen.S 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/arch/i386/kernel/head-xen.S 2007-10-22 13:53:25.000000000 +0200
+@@ -9,6 +9,7 @@
+ #include <asm/cache.h>
+ #include <asm/thread_info.h>
+ #include <asm/asm-offsets.h>
++#include <asm/boot.h>
+ #include <asm/dwarf2.h>
+ #include <xen/interface/xen.h>
+ #include <xen/interface/elfnote.h>
+@@ -35,6 +36,8 @@ ENTRY(startup_32)
+ /* Set up the stack pointer */
+ movl $(init_thread_union+THREAD_SIZE),%esp
+
++ call setup_pda
++
+ /* get vendor info */
+ xorl %eax,%eax # call CPUID with 0 -> return vendor ID
+ XEN_CPUID
+@@ -57,14 +60,58 @@ ENTRY(startup_32)
+
+ movb $1,X86_HARD_MATH
+
+- xorl %eax,%eax # Clear FS/GS and LDT
++ xorl %eax,%eax # Clear FS
+ movl %eax,%fs
+- movl %eax,%gs
++
++ movl $(__KERNEL_PDA),%eax
++ mov %eax,%gs
++
+ cld # gcc2 wants the direction flag cleared at all times
+
+ pushl $0 # fake return address for unwinder
+ jmp start_kernel
+
++/*
++ * Point the GDT at this CPU's PDA. This will be
++ * cpu_gdt_table and boot_pda.
++ */
++setup_pda:
++ /* get the PDA pointer */
++ movl $boot_pda, %eax
++
++ /* slot the PDA address into the GDT */
++ mov $cpu_gdt_table, %ecx
++ mov %ax, (__KERNEL_PDA+0+2)(%ecx) /* base & 0x0000ffff */
++ shr $16, %eax
++ mov %al, (__KERNEL_PDA+4+0)(%ecx) /* base & 0x00ff0000 */
++ mov %ah, (__KERNEL_PDA+4+3)(%ecx) /* base & 0xff000000 */
++
++ # %esi still points to start_info, and no registers
++ # need to be preserved.
++
++ movl XEN_START_mfn_list(%esi), %ebx
++ movl $(cpu_gdt_table - __PAGE_OFFSET), %eax
++ shrl $PAGE_SHIFT, %eax
++ movl (%ebx,%eax,4), %ecx
++ pushl %ecx # frame number for set_gdt below
++
++ xorl %esi, %esi
++ xorl %edx, %edx
++ shldl $PAGE_SHIFT, %ecx, %edx
++ shll $PAGE_SHIFT, %ecx
++ orl $0x61, %ecx
++ movl $cpu_gdt_table, %ebx
++ movl $__HYPERVISOR_update_va_mapping, %eax
++ int $0x82
++
++ movl $(PAGE_SIZE_asm / 8), %ecx
++ movl %esp, %ebx
++ movl $__HYPERVISOR_set_gdt, %eax
++ int $0x82
++
++ popl %ecx
++ ret
++
+ #define HYPERCALL_PAGE_OFFSET 0x1000
+ .org HYPERCALL_PAGE_OFFSET
+ ENTRY(hypercall_page)
+@@ -93,7 +140,8 @@ ENTRY(empty_zero_page)
+ /*
+ * The Global Descriptor Table contains 28 quadwords, per-CPU.
+ */
+- .align L1_CACHE_BYTES
++ .section .data.page_aligned, "aw"
++ .align PAGE_SIZE_asm
+ ENTRY(cpu_gdt_table)
+ .quad 0x0000000000000000 /* NULL descriptor */
+ .quad 0x0000000000000000 /* 0x0b reserved */
+@@ -135,12 +183,13 @@ ENTRY(cpu_gdt_table)
+ .quad 0x0000000000000000 /* 0xc0 APM CS 16 code (16 bit) */
+ .quad 0x0000000000000000 /* 0xc8 APM DS data */
+
+- .quad 0x0000000000000000 /* 0xd0 - ESPFIX 16-bit SS */
+- .quad 0x0000000000000000 /* 0xd8 - unused */
++ .quad 0x0000000000000000 /* 0xd0 - ESPFIX SS */
++ .quad 0x00cf92000000ffff /* 0xd8 - PDA */
+ .quad 0x0000000000000000 /* 0xe0 - unused */
+ .quad 0x0000000000000000 /* 0xe8 - unused */
+ .quad 0x0000000000000000 /* 0xf0 - unused */
+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
++ .align PAGE_SIZE_asm
+
+ #if CONFIG_XEN_COMPAT <= 0x030002
+ /*
+@@ -165,9 +214,9 @@ ENTRY(cpu_gdt_table)
+ .ascii ",ELF_PADDR_OFFSET=0x"
+ utoa __PAGE_OFFSET
+ .ascii ",VIRT_ENTRY=0x"
+- utoa (__PAGE_OFFSET + __PHYSICAL_START + VIRT_ENTRY_OFFSET)
++ utoa (__PAGE_OFFSET + LOAD_PHYSICAL_ADDR + VIRT_ENTRY_OFFSET)
+ .ascii ",HYPERCALL_PAGE=0x"
+- utoa ((__PHYSICAL_START+HYPERCALL_PAGE_OFFSET)>>PAGE_SHIFT)
++ utoa ((LOAD_PHYSICAL_ADDR+HYPERCALL_PAGE_OFFSET)>>PAGE_SHIFT)
+ .ascii ",FEATURES=writable_page_tables"
+ .ascii "|writable_descriptor_tables"
+ .ascii "|auto_translated_physmap"
+Index: 10.3-2007-11-26/arch/i386/kernel/io_apic-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/io_apic-xen.c 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/arch/i386/kernel/io_apic-xen.c 2007-10-22 13:53:25.000000000 +0200
+@@ -34,6 +34,7 @@
+ #include <linux/pci.h>
+ #include <linux/msi.h>
+ #include <linux/htirq.h>
++#include <linux/freezer.h>
+
+ #include <asm/io.h>
+ #include <asm/smp.h>
+@@ -194,14 +195,20 @@ static struct IO_APIC_route_entry ioapic
+ * the interrupt, and we need to make sure the entry is fully populated
+ * before that happens.
+ */
+-static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
++static void
++__ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
+ {
+- unsigned long flags;
+ union entry_union eu;
+ eu.entry = e;
+- spin_lock_irqsave(&ioapic_lock, flags);
+ io_apic_write(apic, 0x11 + 2*pin, eu.w2);
+ io_apic_write(apic, 0x10 + 2*pin, eu.w1);
++}
++
++static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
++{
++ unsigned long flags;
++ spin_lock_irqsave(&ioapic_lock, flags);
++ __ioapic_write_entry(apic, pin, e);
+ spin_unlock_irqrestore(&ioapic_lock, flags);
+ }
+
+@@ -883,8 +890,7 @@ static int __init find_isa_irq_pin(int i
+
+ if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA ||
+ mp_bus_id_to_type[lbus] == MP_BUS_EISA ||
+- mp_bus_id_to_type[lbus] == MP_BUS_MCA ||
+- mp_bus_id_to_type[lbus] == MP_BUS_NEC98
++ mp_bus_id_to_type[lbus] == MP_BUS_MCA
+ ) &&
+ (mp_irqs[i].mpc_irqtype == type) &&
+ (mp_irqs[i].mpc_srcbusirq == irq))
+@@ -903,8 +909,7 @@ static int __init find_isa_irq_apic(int
+
+ if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA ||
+ mp_bus_id_to_type[lbus] == MP_BUS_EISA ||
+- mp_bus_id_to_type[lbus] == MP_BUS_MCA ||
+- mp_bus_id_to_type[lbus] == MP_BUS_NEC98
++ mp_bus_id_to_type[lbus] == MP_BUS_MCA
+ ) &&
+ (mp_irqs[i].mpc_irqtype == type) &&
+ (mp_irqs[i].mpc_srcbusirq == irq))
+@@ -1036,12 +1041,6 @@ static int EISA_ELCR(unsigned int irq)
+ #define default_MCA_trigger(idx) (1)
+ #define default_MCA_polarity(idx) (0)
+
+-/* NEC98 interrupts are always polarity zero edge triggered,
+- * when listed as conforming in the MP table. */
+-
+-#define default_NEC98_trigger(idx) (0)
+-#define default_NEC98_polarity(idx) (0)
+-
+ static int __init MPBIOS_polarity(int idx)
+ {
+ int bus = mp_irqs[idx].mpc_srcbus;
+@@ -1076,11 +1075,6 @@ static int __init MPBIOS_polarity(int id
+ polarity = default_MCA_polarity(idx);
+ break;
+ }
+- case MP_BUS_NEC98: /* NEC 98 pin */
+- {
+- polarity = default_NEC98_polarity(idx);
+- break;
+- }
+ default:
+ {
+ printk(KERN_WARNING "broken BIOS!!\n");
+@@ -1150,11 +1144,6 @@ static int MPBIOS_trigger(int idx)
+ trigger = default_MCA_trigger(idx);
+ break;
+ }
+- case MP_BUS_NEC98: /* NEC 98 pin */
+- {
+- trigger = default_NEC98_trigger(idx);
+- break;
+- }
+ default:
+ {
+ printk(KERN_WARNING "broken BIOS!!\n");
+@@ -1216,7 +1205,6 @@ static int pin_2_irq(int idx, int apic,
+ case MP_BUS_ISA: /* ISA pin */
+ case MP_BUS_EISA:
+ case MP_BUS_MCA:
+- case MP_BUS_NEC98:
+ {
+ irq = mp_irqs[idx].mpc_srcbusirq;
+ break;
+@@ -1284,7 +1272,7 @@ static inline int IO_APIC_irq_trigger(in
+ }
+
+ /* irq_vectors is indexed by the sum of all RTEs in all I/O APICs. */
+-u8 irq_vector[NR_IRQ_VECTORS] __read_mostly; /* = { FIRST_DEVICE_VECTOR , 0 }; */
++static u8 irq_vector[NR_IRQ_VECTORS] __read_mostly; /* = { FIRST_DEVICE_VECTOR , 0 }; */
+
+ static int __assign_irq_vector(int irq)
+ {
+@@ -1407,8 +1395,8 @@ static void __init setup_IO_APIC_irqs(vo
+ if (!apic && (irq < 16))
+ disable_8259A_irq(irq);
+ }
+- ioapic_write_entry(apic, pin, entry);
+ spin_lock_irqsave(&ioapic_lock, flags);
++ __ioapic_write_entry(apic, pin, entry);
+ set_native_irq_info(irq, TARGET_CPUS);
+ spin_unlock_irqrestore(&ioapic_lock, flags);
+ }
+@@ -1981,6 +1969,15 @@ static void __init setup_ioapic_ids_from
+ #endif
+
+ #ifndef CONFIG_XEN
++static int no_timer_check __initdata;
++
++static int __init notimercheck(char *s)
++{
++ no_timer_check = 1;
++ return 1;
++}
++__setup("no_timer_check", notimercheck);
++
+ /*
+ * There is a nasty bug in some older SMP boards, their mptable lies
+ * about the timer IRQ. We do the following to work around the situation:
+@@ -1989,10 +1986,13 @@ static void __init setup_ioapic_ids_from
+ * - if this function detects that timer IRQs are defunct, then we fall
+ * back to ISA timer IRQs
+ */
+-static int __init timer_irq_works(void)
++int __init timer_irq_works(void)
+ {
+ unsigned long t1 = jiffies;
+
++ if (no_timer_check)
++ return 1;
++
+ local_irq_enable();
+ /* Let ten ticks pass... */
+ mdelay((10 * 1000) / HZ);
+@@ -2219,9 +2219,15 @@ static inline void unlock_ExtINT_logic(v
+ unsigned char save_control, save_freq_select;
+
+ pin = find_isa_irq_pin(8, mp_INT);
++ if (pin == -1) {
++ WARN_ON_ONCE(1);
++ return;
++ }
+ apic = find_isa_irq_apic(8, mp_INT);
+- if (pin == -1)
++ if (apic == -1) {
++ WARN_ON_ONCE(1);
+ return;
++ }
+
+ entry0 = ioapic_read_entry(apic, pin);
+ clear_IO_APIC_pin(apic, pin);
+@@ -2266,7 +2272,7 @@ int timer_uses_ioapic_pin_0;
+ * is so screwy. Thanks to Brian Perkins for testing/hacking this beast
+ * fanatically on his truly buggy board.
+ */
+-static inline void check_timer(void)
++static inline void __init check_timer(void)
+ {
+ int apic1, pin1, apic2, pin2;
+ int vector;
+@@ -2550,7 +2556,7 @@ device_initcall(ioapic_init_sysfs);
+ int create_irq(void)
+ {
+ /* Allocate an unused irq */
+- int irq, new, vector;
++ int irq, new, vector = 0;
+ unsigned long flags;
+
+ irq = -ENOSPC;
+@@ -2932,8 +2938,8 @@ int io_apic_set_pci_routing (int ioapic,
+ if (!ioapic && (irq < 16))
+ disable_8259A_irq(irq);
+
+- ioapic_write_entry(ioapic, pin, entry);
+ spin_lock_irqsave(&ioapic_lock, flags);
++ __ioapic_write_entry(ioapic, pin, entry);
+ set_native_irq_info(irq, TARGET_CPUS);
+ spin_unlock_irqrestore(&ioapic_lock, flags);
+
+Index: 10.3-2007-11-26/arch/i386/kernel/ldt-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/ldt-xen.c 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/arch/i386/kernel/ldt-xen.c 2007-10-22 13:53:25.000000000 +0200
+@@ -177,16 +177,14 @@ static int read_default_ldt(void __user
+ {
+ int err;
+ unsigned long size;
+- void *address;
+
+ err = 0;
+- address = &default_ldt[0];
+ size = 5*sizeof(struct desc_struct);
+ if (size > bytecount)
+ size = bytecount;
+
+ err = size;
+- if (copy_to_user(ptr, address, size))
++ if (clear_user(ptr, size))
+ err = -EFAULT;
+
+ return err;
+Index: 10.3-2007-11-26/arch/i386/kernel/microcode-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/microcode-xen.c 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/arch/i386/kernel/microcode-xen.c 2007-10-22 13:53:25.000000000 +0200
+@@ -1,7 +1,7 @@
+ /*
+ * Intel CPU Microcode Update Driver for Linux
+ *
+- * Copyright (C) 2000-2004 Tigran Aivazian
++ * Copyright (C) 2000-2006 Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
+ * 2006 Shaohua Li <shaohua.li@intel.com>
+ *
+ * This driver allows to upgrade microcode on Intel processors
+@@ -43,7 +43,7 @@
+ #include <asm/processor.h>
+
+ MODULE_DESCRIPTION("Intel CPU (IA-32) Microcode Update Driver");
+-MODULE_AUTHOR("Tigran Aivazian <tigran@veritas.com>");
++MODULE_AUTHOR("Tigran Aivazian <tigran@aivazian.fsnet.co.uk>");
+ MODULE_LICENSE("GPL");
+
+ static int verbose;
+@@ -195,7 +195,7 @@ static int __init microcode_init (void)
+ request_microcode();
+
+ printk(KERN_INFO
+- "IA-32 Microcode Update Driver: v" MICROCODE_VERSION " <tigran@veritas.com>\n");
++ "IA-32 Microcode Update Driver: v" MICROCODE_VERSION " <tigran@aivazian.fsnet.co.uk>\n");
+ return 0;
+ }
+
+Index: 10.3-2007-11-26/arch/i386/kernel/mpparse-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/mpparse-xen.c 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/arch/i386/kernel/mpparse-xen.c 2007-10-22 13:53:25.000000000 +0200
+@@ -36,7 +36,7 @@
+
+ /* Have we found an MP table */
+ int smp_found_config;
+-unsigned int __initdata maxcpus = NR_CPUS;
++unsigned int __cpuinitdata maxcpus = NR_CPUS;
+
+ /*
+ * Various Linux-internal data structures created from the
+@@ -102,10 +102,10 @@ static int __init mpf_checksum(unsigned
+ */
+
+ static int mpc_record;
+-static struct mpc_config_translation *translation_table[MAX_MPC_ENTRY] __initdata;
++static struct mpc_config_translation *translation_table[MAX_MPC_ENTRY] __cpuinitdata;
+
+ #ifndef CONFIG_XEN
+-static void __devinit MP_processor_info (struct mpc_config_processor *m)
++static void __cpuinit MP_processor_info (struct mpc_config_processor *m)
+ {
+ int ver, apicid;
+ physid_mask_t phys_cpu;
+@@ -221,7 +221,7 @@ static void __devinit MP_processor_info
+ bios_cpu_apicid[num_processors - 1] = m->mpc_apicid;
+ }
+ #else
+-void __init MP_processor_info (struct mpc_config_processor *m)
++static void __cpuinit MP_processor_info (struct mpc_config_processor *m)
+ {
+ num_processors++;
+ }
+@@ -256,8 +256,6 @@ static void __init MP_bus_info (struct m
+ mp_current_pci_id++;
+ } else if (strncmp(str, BUSTYPE_MCA, sizeof(BUSTYPE_MCA)-1) == 0) {
+ mp_bus_id_to_type[m->mpc_busid] = MP_BUS_MCA;
+- } else if (strncmp(str, BUSTYPE_NEC98, sizeof(BUSTYPE_NEC98)-1) == 0) {
+- mp_bus_id_to_type[m->mpc_busid] = MP_BUS_NEC98;
+ } else {
+ printk(KERN_WARNING "Unknown bustype %s - ignoring\n", str);
+ }
+@@ -842,7 +840,7 @@ void __init mp_register_lapic_address(u6
+ #endif
+ }
+
+-void __devinit mp_register_lapic (u8 id, u8 enabled)
++void __cpuinit mp_register_lapic (u8 id, u8 enabled)
+ {
+ struct mpc_config_processor processor;
+ int boot_cpu = 0;
+Index: 10.3-2007-11-26/arch/i386/kernel/pci-dma-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/pci-dma-xen.c 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/arch/i386/kernel/pci-dma-xen.c 2007-10-22 13:53:25.000000000 +0200
+@@ -249,7 +249,7 @@ EXPORT_SYMBOL(dma_free_coherent);
+ int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
+ dma_addr_t device_addr, size_t size, int flags)
+ {
+- void __iomem *mem_base;
++ void __iomem *mem_base = NULL;
+ int pages = size >> PAGE_SHIFT;
+ int bitmap_size = (pages + 31)/32;
+
+@@ -266,14 +266,12 @@ int dma_declare_coherent_memory(struct d
+ if (!mem_base)
+ goto out;
+
+- dev->dma_mem = kmalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
++ dev->dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
+ if (!dev->dma_mem)
+ goto out;
+- memset(dev->dma_mem, 0, sizeof(struct dma_coherent_mem));
+- dev->dma_mem->bitmap = kmalloc(bitmap_size, GFP_KERNEL);
++ dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+ if (!dev->dma_mem->bitmap)
+ goto free1_out;
+- memset(dev->dma_mem->bitmap, 0, bitmap_size);
+
+ dev->dma_mem->virt_base = mem_base;
+ dev->dma_mem->device_base = device_addr;
+@@ -288,6 +286,8 @@ int dma_declare_coherent_memory(struct d
+ free1_out:
+ kfree(dev->dma_mem->bitmap);
+ out:
++ if (mem_base)
++ iounmap(mem_base);
+ return 0;
+ }
+ EXPORT_SYMBOL(dma_declare_coherent_memory);
+Index: 10.3-2007-11-26/arch/i386/kernel/process-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/process-xen.c 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/arch/i386/kernel/process-xen.c 2007-10-22 13:53:25.000000000 +0200
+@@ -60,6 +60,7 @@
+
+ #include <asm/tlbflush.h>
+ #include <asm/cpu.h>
++#include <asm/pda.h>
+
+ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
+
+@@ -104,28 +105,24 @@ EXPORT_SYMBOL(enable_hlt);
+ */
+ static void poll_idle (void)
+ {
+- local_irq_enable();
+-
+- asm volatile(
+- "2:"
+- "testl %0, %1;"
+- "rep; nop;"
+- "je 2b;"
+- : : "i"(_TIF_NEED_RESCHED), "m" (current_thread_info()->flags));
++ cpu_relax();
+ }
+
+ static void xen_idle(void)
+ {
+- local_irq_disable();
++ current_thread_info()->status &= ~TS_POLLING;
++ /*
++ * TS_POLLING-cleared state must be visible before we
++ * test NEED_RESCHED:
++ */
++ smp_mb();
+
+- if (need_resched())
++ local_irq_disable();
++ if (!need_resched())
++ safe_halt(); /* enables interrupts racelessly */
++ else
+ local_irq_enable();
+- else {
+- current_thread_info()->status &= ~TS_POLLING;
+- smp_mb__after_clear_bit();
+- safe_halt();
+- current_thread_info()->status |= TS_POLLING;
+- }
++ current_thread_info()->status |= TS_POLLING;
+ }
+ #ifdef CONFIG_APM_MODULE
+ EXPORT_SYMBOL(default_idle);
+@@ -250,8 +247,8 @@ void show_regs(struct pt_regs * regs)
+ regs->eax,regs->ebx,regs->ecx,regs->edx);
+ printk("ESI: %08lx EDI: %08lx EBP: %08lx",
+ regs->esi, regs->edi, regs->ebp);
+- printk(" DS: %04x ES: %04x\n",
+- 0xffff & regs->xds,0xffff & regs->xes);
++ printk(" DS: %04x ES: %04x GS: %04x\n",
++ 0xffff & regs->xds,0xffff & regs->xes, 0xffff & regs->xgs);
+
+ cr0 = read_cr0();
+ cr2 = read_cr2();
+@@ -282,6 +279,7 @@ int kernel_thread(int (*fn)(void *), voi
+
+ regs.xds = __USER_DS;
+ regs.xes = __USER_DS;
++ regs.xgs = __KERNEL_PDA;
+ regs.orig_eax = -1;
+ regs.eip = (unsigned long) kernel_thread_helper;
+ regs.xcs = __KERNEL_CS | get_kernel_rpl();
+@@ -358,7 +356,6 @@ int copy_thread(int nr, unsigned long cl
+ p->thread.eip = (unsigned long) ret_from_fork;
+
+ savesegment(fs,p->thread.fs);
+- savesegment(gs,p->thread.gs);
+
+ tsk = current;
+ if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) {
+@@ -437,7 +434,7 @@ void dump_thread(struct pt_regs * regs,
+ dump->regs.ds = regs->xds;
+ dump->regs.es = regs->xes;
+ savesegment(fs,dump->regs.fs);
+- savesegment(gs,dump->regs.gs);
++ dump->regs.gs = regs->xgs;
+ dump->regs.orig_eax = regs->orig_eax;
+ dump->regs.eip = regs->eip;
+ dump->regs.cs = regs->xcs;
+@@ -611,17 +608,19 @@ struct task_struct fastcall * __switch_t
+
+ (void)HYPERVISOR_multicall(_mcl, mcl - _mcl);
+
++ /* we're going to use this soon, after a few expensive things */
++ if (next_p->fpu_counter > 5)
++ prefetch(&next->i387.fxsave);
++
+ /*
+- * Restore %fs and %gs if needed.
++ * Restore %fs if needed.
+ *
+- * Glibc normally makes %fs be zero, and %gs is one of
+- * the TLS segments.
++ * Glibc normally makes %fs be zero.
+ */
+ if (unlikely(next->fs))
+ loadsegment(fs, next->fs);
+
+- if (next->gs)
+- loadsegment(gs, next->gs);
++ write_pda(pcurrent, next_p);
+
+ /*
+ * Now maybe handle debug registers
+@@ -631,6 +630,13 @@ struct task_struct fastcall * __switch_t
+
+ disable_tsc(prev_p, next_p);
+
++ /* If the task has used fpu the last 5 timeslices, just do a full
++ * restore of the math state immediately to avoid the trap; the
++ * chances of needing FPU soon are obviously high now
++ */
++ if (next_p->fpu_counter > 5)
++ math_state_restore();
++
+ return prev_p;
+ }
+
+Index: 10.3-2007-11-26/arch/i386/kernel/quirks-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/quirks-xen.c 2007-12-06 17:27:33.000000000 +0100
++++ 10.3-2007-11-26/arch/i386/kernel/quirks-xen.c 2007-10-22 13:53:25.000000000 +0200
+@@ -3,10 +3,12 @@
+ */
+ #include <linux/pci.h>
+ #include <linux/irq.h>
++#include <asm/pci-direct.h>
++#include <asm/genapic.h>
++#include <asm/cpu.h>
+
+ #if defined(CONFIG_X86_IO_APIC) && (defined(CONFIG_SMP) || defined(CONFIG_XEN)) && defined(CONFIG_PCI)
+-
+-static void __devinit quirk_intel_irqbalance(struct pci_dev *dev)
++static void __devinit verify_quirk_intel_irqbalance(struct pci_dev *dev)
+ {
+ u8 config, rev;
+ u32 word;
+@@ -14,14 +16,12 @@ static void __devinit quirk_intel_irqbal
+ /* BIOS may enable hardware IRQ balancing for
+ * E7520/E7320/E7525(revision ID 0x9 and below)
+ * based platforms.
+- * Disable SW irqbalance/affinity on those platforms.
++ * For those platforms, make sure that the genapic is set to 'flat'
+ */
+ pci_read_config_byte(dev, PCI_CLASS_REVISION, &rev);
+ if (rev > 0x9)
+ return;
+
+- printk(KERN_INFO "Intel E7520/7320/7525 detected.");
+-
+ /* enable access to config space*/
+ pci_read_config_byte(dev, 0xf4, &config);
+ pci_write_config_byte(dev, 0xf4, config|0x2);
+@@ -30,6 +30,46 @@ static void __devinit quirk_intel_irqbal
+ raw_pci_ops->read(0, 0, 0x40, 0x4c, 2, &word);
+
+ if (!(word & (1 << 13))) {
++#ifndef CONFIG_XEN
++#ifdef CONFIG_X86_64
++ if (genapic != &apic_flat)
++ panic("APIC mode must be flat on this system\n");
++#elif defined(CONFIG_X86_GENERICARCH)
++ if (genapic != &apic_default)
++ panic("APIC mode must be default(flat) on this system. Use apic=default\n");
++#endif
++#endif
++ }
++
++ /* put back the original value for config space*/
++ if (!(config & 0x2))
++ pci_write_config_byte(dev, 0xf4, config);
++}
++
++void __init quirk_intel_irqbalance(void)
++{
++ u8 config, rev;
++ u32 word;
++
++ /* BIOS may enable hardware IRQ balancing for
++ * E7520/E7320/E7525(revision ID 0x9 and below)
++ * based platforms.
++ * Disable SW irqbalance/affinity on those platforms.
++ */
++ rev = read_pci_config_byte(0, 0, 0, PCI_CLASS_REVISION);
++ if (rev > 0x9)
++ return;
++
++ printk(KERN_INFO "Intel E7520/7320/7525 detected.");
++
++ /* enable access to config space */
++ config = read_pci_config_byte(0, 0, 0, 0xf4);
++ write_pci_config_byte(0, 0, 0, 0xf4, config|0x2);
++
++ /* read xTPR register */
++ word = read_pci_config_16(0, 0, 0x40, 0x4c);
++
++ if (!(word & (1 << 13))) {
+ struct xen_platform_op op;
+ printk(KERN_INFO "Disabling irq balancing and affinity\n");
+ op.cmd = XENPF_platform_quirk;
+@@ -37,11 +77,12 @@ static void __devinit quirk_intel_irqbal
+ (void)HYPERVISOR_platform_op(&op);
+ }
+
+- /* put back the original value for config space*/
++ /* put back the original value for config space */
+ if (!(config & 0x2))
+- pci_write_config_byte(dev, 0xf4, config);
++ write_pci_config_byte(0, 0, 0, 0xf4, config);
+ }
+-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH, quirk_intel_irqbalance);
+-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, quirk_intel_irqbalance);
+-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, quirk_intel_irqbalance);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH, verify_quirk_intel_irqbalance);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, verify_quirk_intel_irqbalance);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, verify_quirk_intel_irqbalance);
++
+ #endif
+Index: 10.3-2007-11-26/arch/i386/kernel/setup-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/setup-xen.c 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/arch/i386/kernel/setup-xen.c 2007-10-22 13:53:25.000000000 +0200
+@@ -76,9 +76,6 @@
+ #include <xen/interface/kexec.h>
+ #endif
+
+-/* Forward Declaration. */
+-void __init find_max_pfn(void);
+-
+ static int xen_panic_event(struct notifier_block *, unsigned long, void *);
+ static struct notifier_block xen_panic_block = {
+ xen_panic_event, NULL, 0 /* try to go last */
+@@ -92,14 +89,11 @@ int disable_pse __devinitdata = 0;
+ /*
+ * Machine setup..
+ */
+-
+-#ifdef CONFIG_EFI
+-int efi_enabled = 0;
+-EXPORT_SYMBOL(efi_enabled);
+-#endif
++extern struct resource code_resource;
++extern struct resource data_resource;
+
+ /* cpu data as detected by the assembly code in head.S */
+-struct cpuinfo_x86 new_cpu_data __initdata = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
++struct cpuinfo_x86 new_cpu_data __cpuinitdata = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
+ /* common cpu data for all cpus */
+ struct cpuinfo_x86 boot_cpu_data __read_mostly = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
+ EXPORT_SYMBOL(boot_cpu_data);
+@@ -115,12 +109,6 @@ unsigned int machine_submodel_id;
+ unsigned int BIOS_revision;
+ unsigned int mca_pentium_flag;
+
+-/* For PCI or other memory-mapped resources */
+-unsigned long pci_mem_start = 0x10000000;
+-#ifdef CONFIG_PCI
+-EXPORT_SYMBOL(pci_mem_start);
+-#endif
+-
+ /* Boot loader ID as an integer, for the benefit of proc_dointvec */
+ int bootloader_type;
+
+@@ -153,10 +141,6 @@ struct ist_info ist_info;
+ defined(CONFIG_X86_SPEEDSTEP_SMI_MODULE)
+ EXPORT_SYMBOL(ist_info);
+ #endif
+-struct e820map e820;
+-#ifdef CONFIG_XEN
+-struct e820map machine_e820;
+-#endif
+
+ extern void early_cpu_init(void);
+ extern int root_mountflags;
+@@ -171,209 +155,6 @@ static char command_line[COMMAND_LINE_SI
+
+ unsigned char __initdata boot_params[PARAM_SIZE];
+
+-static struct resource data_resource = {
+- .name = "Kernel data",
+- .start = 0,
+- .end = 0,
+- .flags = IORESOURCE_BUSY | IORESOURCE_MEM
+-};
+-
+-static struct resource code_resource = {
+- .name = "Kernel code",
+- .start = 0,
+- .end = 0,
+- .flags = IORESOURCE_BUSY | IORESOURCE_MEM
+-};
+-
+-static struct resource system_rom_resource = {
+- .name = "System ROM",
+- .start = 0xf0000,
+- .end = 0xfffff,
+- .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+-};
+-
+-static struct resource extension_rom_resource = {
+- .name = "Extension ROM",
+- .start = 0xe0000,
+- .end = 0xeffff,
+- .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+-};
+-
+-static struct resource adapter_rom_resources[] = { {
+- .name = "Adapter ROM",
+- .start = 0xc8000,
+- .end = 0,
+- .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+-}, {
+- .name = "Adapter ROM",
+- .start = 0,
+- .end = 0,
+- .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+-}, {
+- .name = "Adapter ROM",
+- .start = 0,
+- .end = 0,
+- .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+-}, {
+- .name = "Adapter ROM",
+- .start = 0,
+- .end = 0,
+- .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+-}, {
+- .name = "Adapter ROM",
+- .start = 0,
+- .end = 0,
+- .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+-}, {
+- .name = "Adapter ROM",
+- .start = 0,
+- .end = 0,
+- .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+-} };
+-
+-static struct resource video_rom_resource = {
+- .name = "Video ROM",
+- .start = 0xc0000,
+- .end = 0xc7fff,
+- .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+-};
+-
+-static struct resource video_ram_resource = {
+- .name = "Video RAM area",
+- .start = 0xa0000,
+- .end = 0xbffff,
+- .flags = IORESOURCE_BUSY | IORESOURCE_MEM
+-};
+-
+-static struct resource standard_io_resources[] = { {
+- .name = "dma1",
+- .start = 0x0000,
+- .end = 0x001f,
+- .flags = IORESOURCE_BUSY | IORESOURCE_IO
+-}, {
+- .name = "pic1",
+- .start = 0x0020,
+- .end = 0x0021,
+- .flags = IORESOURCE_BUSY | IORESOURCE_IO
+-}, {
+- .name = "timer0",
+- .start = 0x0040,
+- .end = 0x0043,
+- .flags = IORESOURCE_BUSY | IORESOURCE_IO
+-}, {
+- .name = "timer1",
+- .start = 0x0050,
+- .end = 0x0053,
+- .flags = IORESOURCE_BUSY | IORESOURCE_IO
+-}, {
+- .name = "keyboard",
+- .start = 0x0060,
+- .end = 0x006f,
+- .flags = IORESOURCE_BUSY | IORESOURCE_IO
+-}, {
+- .name = "dma page reg",
+- .start = 0x0080,
+- .end = 0x008f,
+- .flags = IORESOURCE_BUSY | IORESOURCE_IO
+-}, {
+- .name = "pic2",
+- .start = 0x00a0,
+- .end = 0x00a1,
+- .flags = IORESOURCE_BUSY | IORESOURCE_IO
+-}, {
+- .name = "dma2",
+- .start = 0x00c0,
+- .end = 0x00df,
+- .flags = IORESOURCE_BUSY | IORESOURCE_IO
+-}, {
+- .name = "fpu",
+- .start = 0x00f0,
+- .end = 0x00ff,
+- .flags = IORESOURCE_BUSY | IORESOURCE_IO
+-} };
+-
+-#define romsignature(x) (*(unsigned short *)(x) == 0xaa55)
+-
+-static int __init romchecksum(unsigned char *rom, unsigned long length)
+-{
+- unsigned char *p, sum = 0;
+-
+- for (p = rom; p < rom + length; p++)
+- sum += *p;
+- return sum == 0;
+-}
+-
+-static void __init probe_roms(void)
+-{
+- unsigned long start, length, upper;
+- unsigned char *rom;
+- int i;
+-
+-#ifdef CONFIG_XEN
+- /* Nothing to do if not running in dom0. */
+- if (!is_initial_xendomain())
+- return;
+-#endif
+-
+- /* video rom */
+- upper = adapter_rom_resources[0].start;
+- for (start = video_rom_resource.start; start < upper; start += 2048) {
+- rom = isa_bus_to_virt(start);
+- if (!romsignature(rom))
+- continue;
+-
+- video_rom_resource.start = start;
+-
+- /* 0 < length <= 0x7f * 512, historically */
+- length = rom[2] * 512;
+-
+- /* if checksum okay, trust length byte */
+- if (length && romchecksum(rom, length))
+- video_rom_resource.end = start + length - 1;
+-
+- request_resource(&iomem_resource, &video_rom_resource);
+- break;
+- }
+-
+- start = (video_rom_resource.end + 1 + 2047) & ~2047UL;
+- if (start < upper)
+- start = upper;
+-
+- /* system rom */
+- request_resource(&iomem_resource, &system_rom_resource);
+- upper = system_rom_resource.start;
+-
+- /* check for extension rom (ignore length byte!) */
+- rom = isa_bus_to_virt(extension_rom_resource.start);
+- if (romsignature(rom)) {
+- length = extension_rom_resource.end - extension_rom_resource.start + 1;
+- if (romchecksum(rom, length)) {
+- request_resource(&iomem_resource, &extension_rom_resource);
+- upper = extension_rom_resource.start;
+- }
+- }
+-
+- /* check for adapter roms on 2k boundaries */
+- for (i = 0; i < ARRAY_SIZE(adapter_rom_resources) && start < upper; start += 2048) {
+- rom = isa_bus_to_virt(start);
+- if (!romsignature(rom))
+- continue;
+-
+- /* 0 < length <= 0x7f * 512, historically */
+- length = rom[2] * 512;
+-
+- /* but accept any length that fits if checksum okay */
+- if (!length || start + length > upper || !romchecksum(rom, length))
+- continue;
+-
+- adapter_rom_resources[i].start = start;
+- adapter_rom_resources[i].end = start + length - 1;
+- request_resource(&iomem_resource, &adapter_rom_resources[i]);
+-
+- start = adapter_rom_resources[i++].end & ~2047UL;
+- }
+-}
+-
+ /*
+ * Point at the empty zero page to start with. We map the real shared_info
+ * page as soon as fixmap is up and running.
+@@ -389,338 +170,6 @@ EXPORT_SYMBOL(phys_to_machine_mapping);
+ start_info_t *xen_start_info;
+ EXPORT_SYMBOL(xen_start_info);
+
+-void __init add_memory_region(unsigned long long start,
+- unsigned long long size, int type)
+-{
+- int x;
+-
+- if (!efi_enabled) {
+- x = e820.nr_map;
+-
+- if (x == E820MAX) {
+- printk(KERN_ERR "Ooops! Too many entries in the memory map!\n");
+- return;
+- }
+-
+- e820.map[x].addr = start;
+- e820.map[x].size = size;
+- e820.map[x].type = type;
+- e820.nr_map++;
+- }
+-} /* add_memory_region */
+-
+-static void __init limit_regions(unsigned long long size)
+-{
+- unsigned long long current_addr = 0;
+- int i;
+-
+- if (efi_enabled) {
+- efi_memory_desc_t *md;
+- void *p;
+-
+- for (p = memmap.map, i = 0; p < memmap.map_end;
+- p += memmap.desc_size, i++) {
+- md = p;
+- current_addr = md->phys_addr + (md->num_pages << 12);
+- if (md->type == EFI_CONVENTIONAL_MEMORY) {
+- if (current_addr >= size) {
+- md->num_pages -=
+- (((current_addr-size) + PAGE_SIZE-1) >> PAGE_SHIFT);
+- memmap.nr_map = i + 1;
+- return;
+- }
+- }
+- }
+- }
+- for (i = 0; i < e820.nr_map; i++) {
+- current_addr = e820.map[i].addr + e820.map[i].size;
+- if (current_addr < size)
+- continue;
+-
+- if (e820.map[i].type != E820_RAM)
+- continue;
+-
+- if (e820.map[i].addr >= size) {
+- /*
+- * This region starts past the end of the
+- * requested size, skip it completely.
+- */
+- e820.nr_map = i;
+- } else {
+- e820.nr_map = i + 1;
+- e820.map[i].size -= current_addr - size;
+- }
+- return;
+- }
+-#ifdef CONFIG_XEN
+- if (i==e820.nr_map && current_addr < size) {
+- /*
+- * The e820 map finished before our requested size so
+- * extend the final entry to the requested address.
+- */
+- --i;
+- if (e820.map[i].type == E820_RAM)
+- e820.map[i].size -= current_addr - size;
+- else
+- add_memory_region(current_addr, size - current_addr, E820_RAM);
+- }
+-#endif
+-}
+-
+-#define E820_DEBUG 1
+-
+-static void __init print_memory_map(char *who)
+-{
+- int i;
+-
+- for (i = 0; i < e820.nr_map; i++) {
+- printk(" %s: %016Lx - %016Lx ", who,
+- e820.map[i].addr,
+- e820.map[i].addr + e820.map[i].size);
+- switch (e820.map[i].type) {
+- case E820_RAM: printk("(usable)\n");
+- break;
+- case E820_RESERVED:
+- printk("(reserved)\n");
+- break;
+- case E820_ACPI:
+- printk("(ACPI data)\n");
+- break;
+- case E820_NVS:
+- printk("(ACPI NVS)\n");
+- break;
+- default: printk("type %lu\n", e820.map[i].type);
+- break;
+- }
+- }
+-}
+-
+-/*
+- * Sanitize the BIOS e820 map.
+- *
+- * Some e820 responses include overlapping entries. The following
+- * replaces the original e820 map with a new one, removing overlaps.
+- *
+- */
+-struct change_member {
+- struct e820entry *pbios; /* pointer to original bios entry */
+- unsigned long long addr; /* address for this change point */
+-};
+-static struct change_member change_point_list[2*E820MAX] __initdata;
+-static struct change_member *change_point[2*E820MAX] __initdata;
+-static struct e820entry *overlap_list[E820MAX] __initdata;
+-static struct e820entry new_bios[E820MAX] __initdata;
+-
+-int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map)
+-{
+- struct change_member *change_tmp;
+- unsigned long current_type, last_type;
+- unsigned long long last_addr;
+- int chgidx, still_changing;
+- int overlap_entries;
+- int new_bios_entry;
+- int old_nr, new_nr, chg_nr;
+- int i;
+-
+- /*
+- Visually we're performing the following (1,2,3,4 = memory types)...
+-
+- Sample memory map (w/overlaps):
+- ____22__________________
+- ______________________4_
+- ____1111________________
+- _44_____________________
+- 11111111________________
+- ____________________33__
+- ___________44___________
+- __________33333_________
+- ______________22________
+- ___________________2222_
+- _________111111111______
+- _____________________11_
+- _________________4______
+-
+- Sanitized equivalent (no overlap):
+- 1_______________________
+- _44_____________________
+- ___1____________________
+- ____22__________________
+- ______11________________
+- _________1______________
+- __________3_____________
+- ___________44___________
+- _____________33_________
+- _______________2________
+- ________________1_______
+- _________________4______
+- ___________________2____
+- ____________________33__
+- ______________________4_
+- */
+-
+- /* if there's only one memory region, don't bother */
+- if (*pnr_map < 2)
+- return -1;
+-
+- old_nr = *pnr_map;
+-
+- /* bail out if we find any unreasonable addresses in bios map */
+- for (i=0; i<old_nr; i++)
+- if (biosmap[i].addr + biosmap[i].size < biosmap[i].addr)
+- return -1;
+-
+- /* create pointers for initial change-point information (for sorting) */
+- for (i=0; i < 2*old_nr; i++)
+- change_point[i] = &change_point_list[i];
+-
+- /* record all known change-points (starting and ending addresses),
+- omitting those that are for empty memory regions */
+- chgidx = 0;
+- for (i=0; i < old_nr; i++) {
+- if (biosmap[i].size != 0) {
+- change_point[chgidx]->addr = biosmap[i].addr;
+- change_point[chgidx++]->pbios = &biosmap[i];
+- change_point[chgidx]->addr = biosmap[i].addr + biosmap[i].size;
+- change_point[chgidx++]->pbios = &biosmap[i];
+- }
+- }
+- chg_nr = chgidx; /* true number of change-points */
+-
+- /* sort change-point list by memory addresses (low -> high) */
+- still_changing = 1;
+- while (still_changing) {
+- still_changing = 0;
+- for (i=1; i < chg_nr; i++) {
+- /* if <current_addr> > <last_addr>, swap */
+- /* or, if current=<start_addr> & last=<end_addr>, swap */
+- if ((change_point[i]->addr < change_point[i-1]->addr) ||
+- ((change_point[i]->addr == change_point[i-1]->addr) &&
+- (change_point[i]->addr == change_point[i]->pbios->addr) &&
+- (change_point[i-1]->addr != change_point[i-1]->pbios->addr))
+- )
+- {
+- change_tmp = change_point[i];
+- change_point[i] = change_point[i-1];
+- change_point[i-1] = change_tmp;
+- still_changing=1;
+- }
+- }
+- }
+-
+- /* create a new bios memory map, removing overlaps */
+- overlap_entries=0; /* number of entries in the overlap table */
+- new_bios_entry=0; /* index for creating new bios map entries */
+- last_type = 0; /* start with undefined memory type */
+- last_addr = 0; /* start with 0 as last starting address */
+- /* loop through change-points, determining affect on the new bios map */
+- for (chgidx=0; chgidx < chg_nr; chgidx++)
+- {
+- /* keep track of all overlapping bios entries */
+- if (change_point[chgidx]->addr == change_point[chgidx]->pbios->addr)
+- {
+- /* add map entry to overlap list (> 1 entry implies an overlap) */
+- overlap_list[overlap_entries++]=change_point[chgidx]->pbios;
+- }
+- else
+- {
+- /* remove entry from list (order independent, so swap with last) */
+- for (i=0; i<overlap_entries; i++)
+- {
+- if (overlap_list[i] == change_point[chgidx]->pbios)
+- overlap_list[i] = overlap_list[overlap_entries-1];
+- }
+- overlap_entries--;
+- }
+- /* if there are overlapping entries, decide which "type" to use */
+- /* (larger value takes precedence -- 1=usable, 2,3,4,4+=unusable) */
+- current_type = 0;
+- for (i=0; i<overlap_entries; i++)
+- if (overlap_list[i]->type > current_type)
+- current_type = overlap_list[i]->type;
+- /* continue building up new bios map based on this information */
+- if (current_type != last_type) {
+- if (last_type != 0) {
+- new_bios[new_bios_entry].size =
+- change_point[chgidx]->addr - last_addr;
+- /* move forward only if the new size was non-zero */
+- if (new_bios[new_bios_entry].size != 0)
+- if (++new_bios_entry >= E820MAX)
+- break; /* no more space left for new bios entries */
+- }
+- if (current_type != 0) {
+- new_bios[new_bios_entry].addr = change_point[chgidx]->addr;
+- new_bios[new_bios_entry].type = current_type;
+- last_addr=change_point[chgidx]->addr;
+- }
+- last_type = current_type;
+- }
+- }
+- new_nr = new_bios_entry; /* retain count for new bios entries */
+-
+- /* copy new bios mapping into original location */
+- memcpy(biosmap, new_bios, new_nr*sizeof(struct e820entry));
+- *pnr_map = new_nr;
+-
+- return 0;
+-}
+-
+-/*
+- * Copy the BIOS e820 map into a safe place.
+- *
+- * Sanity-check it while we're at it..
+- *
+- * If we're lucky and live on a modern system, the setup code
+- * will have given us a memory map that we can use to properly
+- * set up memory. If we aren't, we'll fake a memory map.
+- *
+- * We check to see that the memory map contains at least 2 elements
+- * before we'll use it, because the detection code in setup.S may
+- * not be perfect and most every PC known to man has two memory
+- * regions: one from 0 to 640k, and one from 1mb up. (The IBM
+- * thinkpad 560x, for example, does not cooperate with the memory
+- * detection code.)
+- */
+-int __init copy_e820_map(struct e820entry * biosmap, int nr_map)
+-{
+-#ifndef CONFIG_XEN
+- /* Only one memory region (or negative)? Ignore it */
+- if (nr_map < 2)
+- return -1;
+-#else
+- BUG_ON(nr_map < 1);
+-#endif
+-
+- do {
+- unsigned long long start = biosmap->addr;
+- unsigned long long size = biosmap->size;
+- unsigned long long end = start + size;
+- unsigned long type = biosmap->type;
+-
+- /* Overflow in 64 bits? Ignore the memory map. */
+- if (start > end)
+- return -1;
+-
+-#ifndef CONFIG_XEN
+- /*
+- * Some BIOSes claim RAM in the 640k - 1M region.
+- * Not right. Fix it up.
+- */
+- if (type == E820_RAM) {
+- if (start < 0x100000ULL && end > 0xA0000ULL) {
+- if (start < 0xA0000ULL)
+- add_memory_region(start, 0xA0000ULL-start, type);
+- if (end <= 0x100000ULL)
+- continue;
+- start = 0x100000ULL;
+- size = end - start;
+- }
+- }
+-#endif
+- add_memory_region(start, size, type);
+- } while (biosmap++,--nr_map);
+- return 0;
+-}
+-
+ #if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
+ struct edd edd;
+ #ifdef CONFIG_EDD_MODULE
+@@ -746,7 +195,7 @@ static inline void copy_edd(void)
+ }
+ #endif
+
+-static int __initdata user_defined_memmap = 0;
++int __initdata user_defined_memmap = 0;
+
+ /*
+ * "mem=nopentium" disables the 4MB page tables.
+@@ -783,51 +232,6 @@ static int __init parse_mem(char *arg)
+ }
+ early_param("mem", parse_mem);
+
+-static int __init parse_memmap(char *arg)
+-{
+- if (!arg)
+- return -EINVAL;
+-
+- if (strcmp(arg, "exactmap") == 0) {
+-#ifdef CONFIG_CRASH_DUMP
+- /* If we are doing a crash dump, we
+- * still need to know the real mem
+- * size before original memory map is
+- * reset.
+- */
+- find_max_pfn();
+- saved_max_pfn = max_pfn;
+-#endif
+- e820.nr_map = 0;
+- user_defined_memmap = 1;
+- } else {
+- /* If the user specifies memory size, we
+- * limit the BIOS-provided memory map to
+- * that size. exactmap can be used to specify
+- * the exact map. mem=number can be used to
+- * trim the existing memory map.
+- */
+- unsigned long long start_at, mem_size;
+-
+- mem_size = memparse(arg, &arg);
+- if (*arg == '@') {
+- start_at = memparse(arg+1, &arg);
+- add_memory_region(start_at, mem_size, E820_RAM);
+- } else if (*arg == '#') {
+- start_at = memparse(arg+1, &arg);
+- add_memory_region(start_at, mem_size, E820_ACPI);
+- } else if (*arg == '$') {
+- start_at = memparse(arg+1, &arg);
+- add_memory_region(start_at, mem_size, E820_RESERVED);
+- } else {
+- limit_regions(mem_size);
+- user_defined_memmap = 1;
+- }
+- }
+- return 0;
+-}
+-early_param("memmap", parse_memmap);
+-
+ #ifdef CONFIG_PROC_VMCORE
+ /* elfcorehdr= specifies the location of elf core header
+ * stored by the crashed kernel.
+@@ -894,98 +298,6 @@ early_param("reservetop", parse_reservet
+ #endif
+
+ /*
+- * Callback for efi_memory_walk.
+- */
+-static int __init
+-efi_find_max_pfn(unsigned long start, unsigned long end, void *arg)
+-{
+- unsigned long *max_pfn = arg, pfn;
+-
+- if (start < end) {
+- pfn = PFN_UP(end -1);
+- if (pfn > *max_pfn)
+- *max_pfn = pfn;
+- }
+- return 0;
+-}
+-
+-static int __init
+-efi_memory_present_wrapper(unsigned long start, unsigned long end, void *arg)
+-{
+- memory_present(0, PFN_UP(start), PFN_DOWN(end));
+- return 0;
+-}
+-
+- /*
+- * This function checks if the entire range <start,end> is mapped with type.
+- *
+- * Note: this function only works correct if the e820 table is sorted and
+- * not-overlapping, which is the case
+- */
+-int __init
+-e820_all_mapped(unsigned long s, unsigned long e, unsigned type)
+-{
+- u64 start = s;
+- u64 end = e;
+- int i;
+-
+-#ifndef CONFIG_XEN
+- for (i = 0; i < e820.nr_map; i++) {
+- struct e820entry *ei = &e820.map[i];
+-#else
+- if (!is_initial_xendomain())
+- return 0;
+- for (i = 0; i < machine_e820.nr_map; ++i) {
+- const struct e820entry *ei = &machine_e820.map[i];
+-#endif
+- if (type && ei->type != type)
+- continue;
+- /* is the region (part) in overlap with the current region ?*/
+- if (ei->addr >= end || ei->addr + ei->size <= start)
+- continue;
+- /* if the region is at the beginning of <start,end> we move
+- * start to the end of the region since it's ok until there
+- */
+- if (ei->addr <= start)
+- start = ei->addr + ei->size;
+- /* if start is now at or beyond end, we're done, full
+- * coverage */
+- if (start >= end)
+- return 1; /* we're done */
+- }
+- return 0;
+-}
+-
+-/*
+- * Find the highest page frame number we have available
+- */
+-void __init find_max_pfn(void)
+-{
+- int i;
+-
+- max_pfn = 0;
+- if (efi_enabled) {
+- efi_memmap_walk(efi_find_max_pfn, &max_pfn);
+- efi_memmap_walk(efi_memory_present_wrapper, NULL);
+- return;
+- }
+-
+- for (i = 0; i < e820.nr_map; i++) {
+- unsigned long start, end;
+- /* RAM? */
+- if (e820.map[i].type != E820_RAM)
+- continue;
+- start = PFN_UP(e820.map[i].addr);
+- end = PFN_DOWN(e820.map[i].addr + e820.map[i].size);
+- if (start >= end)
+- continue;
+- if (end > max_pfn)
+- max_pfn = end;
+- memory_present(0, start, end);
+- }
+-}
+-
+-/*
+ * Determine low and high memory ranges:
+ */
+ unsigned long __init find_max_low_pfn(void)
+@@ -1044,77 +356,6 @@ unsigned long __init find_max_low_pfn(vo
+ return max_low_pfn;
+ }
+
+-/*
+- * Free all available memory for boot time allocation. Used
+- * as a callback function by efi_memory_walk()
+- */
+-
+-static int __init
+-free_available_memory(unsigned long start, unsigned long end, void *arg)
+-{
+- /* check max_low_pfn */
+- if (start >= (max_low_pfn << PAGE_SHIFT))
+- return 0;
+- if (end >= (max_low_pfn << PAGE_SHIFT))
+- end = max_low_pfn << PAGE_SHIFT;
+- if (start < end)
+- free_bootmem(start, end - start);
+-
+- return 0;
+-}
+-/*
+- * Register fully available low RAM pages with the bootmem allocator.
+- */
+-static void __init register_bootmem_low_pages(unsigned long max_low_pfn)
+-{
+- int i;
+-
+- if (efi_enabled) {
+- efi_memmap_walk(free_available_memory, NULL);
+- return;
+- }
+- for (i = 0; i < e820.nr_map; i++) {
+- unsigned long curr_pfn, last_pfn, size;
+- /*
+- * Reserve usable low memory
+- */
+- if (e820.map[i].type != E820_RAM)
+- continue;
+- /*
+- * We are rounding up the start address of usable memory:
+- */
+- curr_pfn = PFN_UP(e820.map[i].addr);
+- if (curr_pfn >= max_low_pfn)
+- continue;
+- /*
+- * ... and at the end of the usable range downwards:
+- */
+- last_pfn = PFN_DOWN(e820.map[i].addr + e820.map[i].size);
+-
+-#ifdef CONFIG_XEN
+- /*
+- * Truncate to the number of actual pages currently
+- * present.
+- */
+- if (last_pfn > xen_start_info->nr_pages)
+- last_pfn = xen_start_info->nr_pages;
+-#endif
+-
+- if (last_pfn > max_low_pfn)
+- last_pfn = max_low_pfn;
+-
+- /*
+- * .. finally, did all the rounding and playing
+- * around just make the area go away?
+- */
+- if (last_pfn <= curr_pfn)
+- continue;
+-
+- size = last_pfn - curr_pfn;
+- free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(size));
+- }
+-}
+-
+ #ifndef CONFIG_XEN
+ /*
+ * workaround for Dell systems that neglect to reserve EBDA
+@@ -1211,8 +452,8 @@ void __init setup_bootmem_allocator(void
+ * the (very unlikely) case of us accidentally initializing the
+ * bootmem allocator with an invalid RAM area.
+ */
+- reserve_bootmem(__PHYSICAL_START, (PFN_PHYS(min_low_pfn) +
+- bootmap_size + PAGE_SIZE-1) - (__PHYSICAL_START));
++ reserve_bootmem(__pa_symbol(_text), (PFN_PHYS(min_low_pfn) +
++ bootmap_size + PAGE_SIZE-1) - __pa_symbol(_text));
+
+ #ifndef CONFIG_XEN
+ /*
+@@ -1298,170 +539,6 @@ void __init remapped_pgdat_init(void)
+ }
+ }
+
+-/*
+- * Request address space for all standard RAM and ROM resources
+- * and also for regions reported as reserved by the e820.
+- */
+-static void __init
+-legacy_init_iomem_resources(struct e820entry *e820, int nr_map,
+- struct resource *code_resource,
+- struct resource *data_resource)
+-{
+- int i;
+-
+- probe_roms();
+-
+- for (i = 0; i < nr_map; i++) {
+- struct resource *res;
+-#ifndef CONFIG_RESOURCES_64BIT
+- if (e820[i].addr + e820[i].size > 0x100000000ULL)
+- continue;
+-#endif
+- res = kzalloc(sizeof(struct resource), GFP_ATOMIC);
+- switch (e820[i].type) {
+- case E820_RAM: res->name = "System RAM"; break;
+- case E820_ACPI: res->name = "ACPI Tables"; break;
+- case E820_NVS: res->name = "ACPI Non-volatile Storage"; break;
+- default: res->name = "reserved";
+- }
+- res->start = e820[i].addr;
+- res->end = res->start + e820[i].size - 1;
+- res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+- if (request_resource(&iomem_resource, res)) {
+- kfree(res);
+- continue;
+- }
+- if (e820[i].type == E820_RAM) {
+- /*
+- * We don't know which RAM region contains kernel data,
+- * so we try it repeatedly and let the resource manager
+- * test it.
+- */
+-#ifndef CONFIG_XEN
+- request_resource(res, code_resource);
+- request_resource(res, data_resource);
+-#endif
+-#ifdef CONFIG_KEXEC
+- if (crashk_res.start != crashk_res.end)
+- request_resource(res, &crashk_res);
+-#ifdef CONFIG_XEN
+- xen_machine_kexec_register_resources(res);
+-#endif
+-#endif
+- }
+- }
+-}
+-
+-/*
+- * Locate a unused range of the physical address space below 4G which
+- * can be used for PCI mappings.
+- */
+-static void __init
+-e820_setup_gap(struct e820entry *e820, int nr_map)
+-{
+- unsigned long gapstart, gapsize, round;
+- unsigned long long last;
+- int i;
+-
+- /*
+- * Search for the bigest gap in the low 32 bits of the e820
+- * memory space.
+- */
+- last = 0x100000000ull;
+- gapstart = 0x10000000;
+- gapsize = 0x400000;
+- i = nr_map;
+- while (--i >= 0) {
+- unsigned long long start = e820[i].addr;
+- unsigned long long end = start + e820[i].size;
+-
+- /*
+- * Since "last" is at most 4GB, we know we'll
+- * fit in 32 bits if this condition is true
+- */
+- if (last > end) {
+- unsigned long gap = last - end;
+-
+- if (gap > gapsize) {
+- gapsize = gap;
+- gapstart = end;
+- }
+- }
+- if (start < last)
+- last = start;
+- }
+-
+- /*
+- * See how much we want to round up: start off with
+- * rounding to the next 1MB area.
+- */
+- round = 0x100000;
+- while ((gapsize >> 4) > round)
+- round += round;
+- /* Fun with two's complement */
+- pci_mem_start = (gapstart + round) & -round;
+-
+- printk("Allocating PCI resources starting at %08lx (gap: %08lx:%08lx)\n",
+- pci_mem_start, gapstart, gapsize);
+-}
+-
+-/*
+- * Request address space for all standard resources
+- *
+- * This is called just before pcibios_init(), which is also a
+- * subsys_initcall, but is linked in later (in arch/i386/pci/common.c).
+- */
+-static int __init request_standard_resources(void)
+-{
+- int i;
+-
+- /* Nothing to do if not running in dom0. */
+- if (!is_initial_xendomain())
+- return 0;
+-
+- printk("Setting up standard PCI resources\n");
+-#ifdef CONFIG_XEN
+- legacy_init_iomem_resources(machine_e820.map, machine_e820.nr_map,
+- &code_resource, &data_resource);
+-#else
+- if (efi_enabled)
+- efi_initialize_iomem_resources(&code_resource, &data_resource);
+- else
+- legacy_init_iomem_resources(e820.map, e820.nr_map,
+- &code_resource, &data_resource);
+-#endif
+-
+- /* EFI systems may still have VGA */
+- request_resource(&iomem_resource, &video_ram_resource);
+-
+- /* request I/O space for devices used on all i[345]86 PCs */
+- for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++)
+- request_resource(&ioport_resource, &standard_io_resources[i]);
+- return 0;
+-}
+-
+-subsys_initcall(request_standard_resources);
+-
+-static void __init register_memory(void)
+-{
+-#ifdef CONFIG_XEN
+- if (is_initial_xendomain()) {
+- struct xen_memory_map memmap;
+-
+- memmap.nr_entries = E820MAX;
+- set_xen_guest_handle(memmap.buffer, machine_e820.map);
+-
+- if (HYPERVISOR_memory_op(XENMEM_machine_memory_map, &memmap))
+- BUG();
+-
+- machine_e820.nr_map = memmap.nr_entries;
+- e820_setup_gap(machine_e820.map, machine_e820.nr_map);
+- }
+- else
+-#endif
+- e820_setup_gap(e820.map, e820.nr_map);
+-}
+-
+ #ifdef CONFIG_MCA
+ static void set_mca_bus(int x)
+ {
+@@ -1471,6 +548,12 @@ static void set_mca_bus(int x)
+ static void set_mca_bus(int x) { }
+ #endif
+
++/* Overridden in paravirt.c if CONFIG_PARAVIRT */
++char * __init __attribute__((weak)) memory_setup(void)
++{
++ return machine_specific_memory_setup();
++}
++
+ /*
+ * Determine if we were loaded by an EFI loader. If so, then we have also been
+ * passed the efi memmap, systab, etc., so we should use these data structures
+@@ -1568,7 +651,7 @@ void __init setup_arch(char **cmdline_p)
+ efi_init();
+ else {
+ printk(KERN_INFO "BIOS-provided physical RAM map:\n");
+- print_memory_map(machine_specific_memory_setup());
++ print_memory_map(memory_setup());
+ }
+
+ copy_edd();
+@@ -1716,7 +799,7 @@ void __init setup_arch(char **cmdline_p)
+ get_smp_config();
+ #endif
+
+- register_memory();
++ e820_register_memory();
+
+ if (is_initial_xendomain()) {
+ #ifdef CONFIG_VT
+Index: 10.3-2007-11-26/arch/i386/kernel/smp-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/smp-xen.c 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/arch/i386/kernel/smp-xen.c 2007-10-22 13:53:25.000000000 +0200
+@@ -677,6 +677,10 @@ int smp_call_function_single(int cpu, vo
+ put_cpu();
+ return -EBUSY;
+ }
++
++ /* Can deadlock when called with interrupts disabled */
++ WARN_ON(irqs_disabled());
++
+ spin_lock_bh(&call_lock);
+ __smp_call_function_single(cpu, func, info, nonatomic, wait);
+ spin_unlock_bh(&call_lock);
+Index: 10.3-2007-11-26/arch/i386/kernel/time-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/time-xen.c 2007-12-06 17:31:58.000000000 +0100
++++ 10.3-2007-11-26/arch/i386/kernel/time-xen.c 2007-12-06 17:32:10.000000000 +0100
+@@ -60,6 +60,7 @@
+ #include <asm/uaccess.h>
+ #include <asm/processor.h>
+ #include <asm/timer.h>
++#include <asm/time.h>
+ #include <asm/sections.h>
+
+ #include "mach_time.h"
+@@ -125,11 +126,11 @@ static DEFINE_PER_CPU(struct vcpu_runsta
+ /* Must be signed, as it's compared with s64 quantities which can be -ve. */
+ #define NS_PER_TICK (1000000000LL/HZ)
+
+-static void __clock_was_set(void *unused)
++static void __clock_was_set(struct work_struct *unused)
+ {
+ clock_was_set();
+ }
+-static DECLARE_WORK(clock_was_set_work, __clock_was_set, NULL);
++static DECLARE_WORK(clock_was_set_work, __clock_was_set);
+
+ static inline void __normalize_time(time_t *sec, s64 *nsec)
+ {
+@@ -525,10 +526,7 @@ static int set_rtc_mmss(unsigned long no
+ /* gets recalled with irq locally disabled */
+ /* XXX - does irqsave resolve this? -johnstul */
+ spin_lock_irqsave(&rtc_lock, flags);
+- if (efi_enabled)
+- retval = efi_set_rtc_mmss(nowtime);
+- else
+- retval = mach_set_rtc_mmss(nowtime);
++ retval = set_wallclock(nowtime);
+ spin_unlock_irqrestore(&rtc_lock, flags);
+
+ return retval;
+@@ -860,10 +858,7 @@ unsigned long get_cmos_time(void)
+
+ spin_lock_irqsave(&rtc_lock, flags);
+
+- if (efi_enabled)
+- retval = efi_get_time();
+- else
+- retval = mach_get_cmos_time();
++ retval = get_wallclock();
+
+ spin_unlock_irqrestore(&rtc_lock, flags);
+
+@@ -1007,7 +1002,7 @@ static void __init hpet_time_init(void)
+ printk("Using HPET for base-timer\n");
+ }
+
+- time_init_hook();
++ do_time_init();
+ }
+ #endif
+
+Index: 10.3-2007-11-26/arch/i386/kernel/traps-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/traps-xen.c 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/arch/i386/kernel/traps-xen.c 2007-10-22 13:53:25.000000000 +0200
+@@ -29,6 +29,8 @@
+ #include <linux/kexec.h>
+ #include <linux/unwind.h>
+ #include <linux/uaccess.h>
++#include <linux/nmi.h>
++#include <linux/bug.h>
+
+ #ifdef CONFIG_EISA
+ #include <linux/ioport.h>
+@@ -61,9 +63,6 @@ int panic_on_unrecovered_nmi;
+
+ asmlinkage int system_call(void);
+
+-struct desc_struct default_ldt[] = { { 0, 0 }, { 0, 0 }, { 0, 0 },
+- { 0, 0 }, { 0, 0 } };
+-
+ /* Do we ignore FPU interrupts ? */
+ char ignore_fpu_irq = 0;
+
+@@ -100,12 +99,7 @@ asmlinkage void fixup_4gb_segment(void);
+ #endif
+ asmlinkage void machine_check(void);
+
+-static int kstack_depth_to_print = 24;
+-#ifdef CONFIG_STACK_UNWIND
+-static int call_trace = 1;
+-#else
+-#define call_trace (-1)
+-#endif
++int kstack_depth_to_print = 24;
+ ATOMIC_NOTIFIER_HEAD(i386die_chain);
+
+ int register_die_notifier(struct notifier_block *nb)
+@@ -159,25 +153,7 @@ static inline unsigned long print_contex
+ return ebp;
+ }
+
+-struct ops_and_data {
+- struct stacktrace_ops *ops;
+- void *data;
+-};
+-
+-static asmlinkage int
+-dump_trace_unwind(struct unwind_frame_info *info, void *data)
+-{
+- struct ops_and_data *oad = (struct ops_and_data *)data;
+- int n = 0;
+-
+- while (unwind(info) == 0 && UNW_PC(info)) {
+- n++;
+- oad->ops->address(oad->data, UNW_PC(info));
+- if (arch_unw_user_mode(info))
+- break;
+- }
+- return n;
+-}
++#define MSG(msg) ops->warning(data, msg)
+
+ void dump_trace(struct task_struct *task, struct pt_regs *regs,
+ unsigned long *stack,
+@@ -188,39 +164,6 @@ void dump_trace(struct task_struct *task
+ if (!task)
+ task = current;
+
+- if (call_trace >= 0) {
+- int unw_ret = 0;
+- struct unwind_frame_info info;
+- struct ops_and_data oad = { .ops = ops, .data = data };
+-
+- if (regs) {
+- if (unwind_init_frame_info(&info, task, regs) == 0)
+- unw_ret = dump_trace_unwind(&info, &oad);
+- } else if (task == current)
+- unw_ret = unwind_init_running(&info, dump_trace_unwind, &oad);
+- else {
+- if (unwind_init_blocked(&info, task) == 0)
+- unw_ret = dump_trace_unwind(&info, &oad);
+- }
+- if (unw_ret > 0) {
+- if (call_trace == 1 && !arch_unw_user_mode(&info)) {
+- ops->warning_symbol(data, "DWARF2 unwinder stuck at %s\n",
+- UNW_PC(&info));
+- if (UNW_SP(&info) >= PAGE_OFFSET) {
+- ops->warning(data, "Leftover inexact backtrace:\n");
+- stack = (void *)UNW_SP(&info);
+- if (!stack)
+- return;
+- ebp = UNW_FP(&info);
+- } else
+- ops->warning(data, "Full inexact backtrace again:\n");
+- } else if (call_trace >= 1)
+- return;
+- else
+- ops->warning(data, "Full inexact backtrace again:\n");
+- } else
+- ops->warning(data, "Inexact backtrace:\n");
+- }
+ if (!stack) {
+ unsigned long dummy;
+ stack = &dummy;
+@@ -253,6 +196,7 @@ void dump_trace(struct task_struct *task
+ stack = (unsigned long*)context->previous_esp;
+ if (!stack)
+ break;
++ touch_nmi_watchdog();
+ }
+ }
+ EXPORT_SYMBOL(dump_trace);
+@@ -385,7 +329,7 @@ void show_registers(struct pt_regs *regs
+ * time of the fault..
+ */
+ if (in_kernel) {
+- u8 __user *eip;
++ u8 *eip;
+ int code_bytes = 64;
+ unsigned char c;
+
+@@ -394,18 +338,20 @@ void show_registers(struct pt_regs *regs
+
+ printk(KERN_EMERG "Code: ");
+
+- eip = (u8 __user *)regs->eip - 43;
+- if (eip < (u8 __user *)PAGE_OFFSET || __get_user(c, eip)) {
++ eip = (u8 *)regs->eip - 43;
++ if (eip < (u8 *)PAGE_OFFSET ||
++ probe_kernel_address(eip, c)) {
+ /* try starting at EIP */
+- eip = (u8 __user *)regs->eip;
++ eip = (u8 *)regs->eip;
+ code_bytes = 32;
+ }
+ for (i = 0; i < code_bytes; i++, eip++) {
+- if (eip < (u8 __user *)PAGE_OFFSET || __get_user(c, eip)) {
++ if (eip < (u8 *)PAGE_OFFSET ||
++ probe_kernel_address(eip, c)) {
+ printk(" Bad EIP value.");
+ break;
+ }
+- if (eip == (u8 __user *)regs->eip)
++ if (eip == (u8 *)regs->eip)
+ printk("<%02x> ", c);
+ else
+ printk("%02x ", c);
+@@ -414,43 +360,22 @@ void show_registers(struct pt_regs *regs
+ printk("\n");
+ }
+
+-static void handle_BUG(struct pt_regs *regs)
++int is_valid_bugaddr(unsigned long eip)
+ {
+- unsigned long eip = regs->eip;
+ unsigned short ud2;
+
+ if (eip < PAGE_OFFSET)
+- return;
+- if (probe_kernel_address((unsigned short __user *)eip, ud2))
+- return;
+- if (ud2 != 0x0b0f)
+- return;
++ return 0;
++ if (probe_kernel_address((unsigned short *)eip, ud2))
++ return 0;
+
+- printk(KERN_EMERG "------------[ cut here ]------------\n");
+-
+-#ifdef CONFIG_DEBUG_BUGVERBOSE
+- do {
+- unsigned short line;
+- char *file;
+- char c;
+-
+- if (probe_kernel_address((unsigned short __user *)(eip + 2),
+- line))
+- break;
+- if (__get_user(file, (char * __user *)(eip + 4)) ||
+- (unsigned long)file < PAGE_OFFSET || __get_user(c, file))
+- file = "<bad filename>";
+-
+- printk(KERN_EMERG "kernel BUG at %s:%d!\n", file, line);
+- return;
+- } while (0);
+-#endif
+- printk(KERN_EMERG "Kernel BUG at [verbose debug info unavailable]\n");
++ return ud2 == 0x0b0f;
+ }
+
+-/* This is gone through when something in the kernel
+- * has done something bad and is about to be terminated.
+-*/
++/*
++ * This is gone through when something in the kernel has done something bad and
++ * is about to be terminated.
++ */
+ void die(const char * str, struct pt_regs * regs, long err)
+ {
+ static struct {
+@@ -458,7 +383,7 @@ void die(const char * str, struct pt_reg
+ u32 lock_owner;
+ int lock_owner_depth;
+ } die = {
+- .lock = SPIN_LOCK_UNLOCKED,
++ .lock = __SPIN_LOCK_UNLOCKED(die.lock),
+ .lock_owner = -1,
+ .lock_owner_depth = 0
+ };
+@@ -482,7 +407,8 @@ void die(const char * str, struct pt_reg
+ unsigned long esp;
+ unsigned short ss;
+
+- handle_BUG(regs);
++ report_bug(regs->eip);
++
+ printk(KERN_EMERG "%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
+ #ifdef CONFIG_PREEMPT
+ printk(KERN_EMERG "PREEMPT ");
+@@ -682,8 +608,7 @@ mem_parity_error(unsigned char reason, s
+ {
+ printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x on "
+ "CPU %d.\n", reason, smp_processor_id());
+- printk(KERN_EMERG "You probably have a hardware problem with your RAM "
+- "chips\n");
++ printk(KERN_EMERG "You have some hardware problem, likely on the PCI bus.\n");
+ if (panic_on_unrecovered_nmi)
+ panic("NMI: Not continuing");
+
+@@ -741,7 +666,6 @@ void __kprobes die_nmi(struct pt_regs *r
+ printk(" on CPU%d, eip %08lx, registers:\n",
+ smp_processor_id(), regs->eip);
+ show_registers(regs);
+- printk(KERN_EMERG "console shuts up ...\n");
+ console_silent();
+ spin_unlock(&nmi_print_lock);
+ bust_spinlocks(0);
+@@ -1057,49 +981,24 @@ fastcall void do_spurious_interrupt_bug(
+ #endif
+ }
+
+-fastcall void setup_x86_bogus_stack(unsigned char * stk)
++fastcall unsigned long patch_espfix_desc(unsigned long uesp,
++ unsigned long kesp)
+ {
+- unsigned long *switch16_ptr, *switch32_ptr;
+- struct pt_regs *regs;
+- unsigned long stack_top, stack_bot;
+- unsigned short iret_frame16_off;
+- int cpu = smp_processor_id();
+- /* reserve the space on 32bit stack for the magic switch16 pointer */
+- memmove(stk, stk + 8, sizeof(struct pt_regs));
+- switch16_ptr = (unsigned long *)(stk + sizeof(struct pt_regs));
+- regs = (struct pt_regs *)stk;
+- /* now the switch32 on 16bit stack */
+- stack_bot = (unsigned long)&per_cpu(cpu_16bit_stack, cpu);
+- stack_top = stack_bot + CPU_16BIT_STACK_SIZE;
+- switch32_ptr = (unsigned long *)(stack_top - 8);
+- iret_frame16_off = CPU_16BIT_STACK_SIZE - 8 - 20;
+- /* copy iret frame on 16bit stack */
+- memcpy((void *)(stack_bot + iret_frame16_off), &regs->eip, 20);
+- /* fill in the switch pointers */
+- switch16_ptr[0] = (regs->esp & 0xffff0000) | iret_frame16_off;
+- switch16_ptr[1] = __ESPFIX_SS;
+- switch32_ptr[0] = (unsigned long)stk + sizeof(struct pt_regs) +
+- 8 - CPU_16BIT_STACK_SIZE;
+- switch32_ptr[1] = __KERNEL_DS;
+-}
+-
+-fastcall unsigned char * fixup_x86_bogus_stack(unsigned short sp)
+-{
+- unsigned long *switch32_ptr;
+- unsigned char *stack16, *stack32;
+- unsigned long stack_top, stack_bot;
+- int len;
+ int cpu = smp_processor_id();
+- stack_bot = (unsigned long)&per_cpu(cpu_16bit_stack, cpu);
+- stack_top = stack_bot + CPU_16BIT_STACK_SIZE;
+- switch32_ptr = (unsigned long *)(stack_top - 8);
+- /* copy the data from 16bit stack to 32bit stack */
+- len = CPU_16BIT_STACK_SIZE - 8 - sp;
+- stack16 = (unsigned char *)(stack_bot + sp);
+- stack32 = (unsigned char *)
+- (switch32_ptr[0] + CPU_16BIT_STACK_SIZE - 8 - len);
+- memcpy(stack32, stack16, len);
+- return stack32;
++ struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
++ struct desc_struct *gdt = (struct desc_struct *)cpu_gdt_descr->address;
++ unsigned long base = (kesp - uesp) & -THREAD_SIZE;
++ unsigned long new_kesp = kesp - base;
++ unsigned long lim_pages = (new_kesp | (THREAD_SIZE - 1)) >> PAGE_SHIFT;
++ __u64 desc = *(__u64 *)&gdt[GDT_ENTRY_ESPFIX_SS];
++ /* Set up base for espfix segment */
++ desc &= 0x00f0ff0000000000ULL;
++ desc |= ((((__u64)base) << 16) & 0x000000ffffff0000ULL) |
++ ((((__u64)base) << 32) & 0xff00000000000000ULL) |
++ ((((__u64)lim_pages) << 32) & 0x000f000000000000ULL) |
++ (lim_pages & 0xffff);
++ *(__u64 *)&gdt[GDT_ENTRY_ESPFIX_SS] = desc;
++ return new_kesp;
+ }
+ #endif
+
+@@ -1113,7 +1012,7 @@ fastcall unsigned char * fixup_x86_bogus
+ * Must be called with kernel preemption disabled (in this case,
+ * local interrupts are disabled at the call-site in entry.S).
+ */
+-asmlinkage void math_state_restore(struct pt_regs regs)
++asmlinkage void math_state_restore(void)
+ {
+ struct thread_info *thread = current_thread_info();
+ struct task_struct *tsk = thread->task;
+@@ -1123,6 +1022,7 @@ asmlinkage void math_state_restore(struc
+ init_fpu(tsk);
+ restore_fpu(tsk);
+ thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */
++ tsk->fpu_counter++;
+ }
+
+ #ifndef CONFIG_MATH_EMULATION
+@@ -1230,19 +1130,3 @@ static int __init kstack_setup(char *s)
+ return 1;
+ }
+ __setup("kstack=", kstack_setup);
+-
+-#ifdef CONFIG_STACK_UNWIND
+-static int __init call_trace_setup(char *s)
+-{
+- if (strcmp(s, "old") == 0)
+- call_trace = -1;
+- else if (strcmp(s, "both") == 0)
+- call_trace = 0;
+- else if (strcmp(s, "newfallback") == 0)
+- call_trace = 1;
+- else if (strcmp(s, "new") == 2)
+- call_trace = 2;
+- return 1;
+-}
+-__setup("call_trace=", call_trace_setup);
+-#endif
+Index: 10.3-2007-11-26/arch/i386/kernel/vmlinux.lds.S
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/vmlinux.lds.S 2007-12-06 17:27:33.000000000 +0100
++++ 10.3-2007-11-26/arch/i386/kernel/vmlinux.lds.S 2007-10-22 13:53:25.000000000 +0200
+@@ -35,6 +35,12 @@ PHDRS {
+ SECTIONS
+ {
+ . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
++
++#if defined(CONFIG_XEN) && CONFIG_XEN_COMPAT <= 0x030002
++#undef LOAD_OFFSET
++#define LOAD_OFFSET 0
++#endif
++
+ phys_startup_32 = startup_32 - LOAD_OFFSET;
+
+ .text.head : AT(ADDR(.text.head) - LOAD_OFFSET) {
+Index: 10.3-2007-11-26/arch/i386/mm/fault-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/mm/fault-xen.c 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/arch/i386/mm/fault-xen.c 2007-10-22 13:53:25.000000000 +0200
+@@ -22,9 +22,9 @@
+ #include <linux/highmem.h>
+ #include <linux/module.h>
+ #include <linux/kprobes.h>
++#include <linux/uaccess.h>
+
+ #include <asm/system.h>
+-#include <asm/uaccess.h>
+ #include <asm/desc.h>
+ #include <asm/kdebug.h>
+ #include <asm/segment.h>
+@@ -167,7 +167,7 @@ static inline unsigned long get_segment_
+ static int __is_prefetch(struct pt_regs *regs, unsigned long addr)
+ {
+ unsigned long limit;
+- unsigned long instr = get_segment_eip (regs, &limit);
++ unsigned char *instr = (unsigned char *)get_segment_eip (regs, &limit);
+ int scan_more = 1;
+ int prefetch = 0;
+ int i;
+@@ -177,9 +177,9 @@ static int __is_prefetch(struct pt_regs
+ unsigned char instr_hi;
+ unsigned char instr_lo;
+
+- if (instr > limit)
++ if (instr > (unsigned char *)limit)
+ break;
+- if (__get_user(opcode, (unsigned char __user *) instr))
++ if (probe_kernel_address(instr, opcode))
+ break;
+
+ instr_hi = opcode & 0xf0;
+@@ -204,9 +204,9 @@ static int __is_prefetch(struct pt_regs
+ case 0x00:
+ /* Prefetch instruction is 0x0F0D or 0x0F18 */
+ scan_more = 0;
+- if (instr > limit)
++ if (instr > (unsigned char *)limit)
+ break;
+- if (__get_user(opcode, (unsigned char __user *) instr))
++ if (probe_kernel_address(instr, opcode))
+ break;
+ prefetch = (instr_lo == 0xF) &&
+ (opcode == 0x0D || opcode == 0x18);
+Index: 10.3-2007-11-26/arch/i386/mm/highmem-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/mm/highmem-xen.c 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/arch/i386/mm/highmem-xen.c 2007-10-22 13:53:25.000000000 +0200
+@@ -32,7 +32,7 @@ static void *__kmap_atomic(struct page *
+ unsigned long vaddr;
+
+ /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
+- inc_preempt_count();
++ pagefault_disable();
+ if (!PageHighMem(page))
+ return page_address(page);
+
+@@ -63,26 +63,22 @@ void kunmap_atomic(void *kvaddr, enum km
+ unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
+ enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
+
+-#ifdef CONFIG_DEBUG_HIGHMEM
+- if (vaddr >= PAGE_OFFSET && vaddr < (unsigned long)high_memory) {
+- dec_preempt_count();
+- preempt_check_resched();
+- return;
+- }
+-
+- if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx))
+- BUG();
+-#endif
+ /*
+ * Force other mappings to Oops if they'll try to access this pte
+ * without first remap it. Keeping stale mappings around is a bad idea
+ * also, in case the page changes cacheability attributes or becomes
+ * a protected page in a hypervisor.
+ */
+- kpte_clear_flush(kmap_pte-idx, vaddr);
++ if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx))
++ kpte_clear_flush(kmap_pte-idx, vaddr);
++ else {
++#ifdef CONFIG_DEBUG_HIGHMEM
++ BUG_ON(vaddr < PAGE_OFFSET);
++ BUG_ON(vaddr >= (unsigned long)high_memory);
++#endif
++ }
+
+- dec_preempt_count();
+- preempt_check_resched();
++ pagefault_enable();
+ }
+
+ /* This is the same as kmap_atomic() but can map memory that doesn't
+@@ -93,7 +89,7 @@ void *kmap_atomic_pfn(unsigned long pfn,
+ enum fixed_addresses idx;
+ unsigned long vaddr;
+
+- inc_preempt_count();
++ pagefault_disable();
+
+ idx = type + KM_TYPE_NR*smp_processor_id();
+ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+Index: 10.3-2007-11-26/arch/i386/mm/init-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/mm/init-xen.c 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/arch/i386/mm/init-xen.c 2007-10-22 13:53:25.000000000 +0200
+@@ -235,8 +235,6 @@ static inline int page_kills_ppro(unsign
+
+ #endif
+
+-extern int is_available_memory(efi_memory_desc_t *);
+-
+ int page_is_ram(unsigned long pagenr)
+ {
+ int i;
+@@ -329,7 +327,7 @@ void __init add_one_highpage_init(struct
+ SetPageReserved(page);
+ }
+
+-static int add_one_highpage_hotplug(struct page *page, unsigned long pfn)
++static int __meminit add_one_highpage_hotplug(struct page *page, unsigned long pfn)
+ {
+ free_new_highpage(page, pfn);
+ totalram_pages++;
+@@ -346,7 +344,7 @@ static int add_one_highpage_hotplug(stru
+ * has been added dynamically that would be
+ * onlined here is in HIGHMEM
+ */
+-void online_page(struct page *page)
++void __meminit online_page(struct page *page)
+ {
+ ClearPageReserved(page);
+ add_one_highpage_hotplug(page, page_to_pfn(page));
+@@ -746,16 +744,10 @@ void __init mem_init(void)
+ set_bit(PG_pinned, &virt_to_page(init_mm.pgd)->flags);
+ }
+
+-/*
+- * this is for the non-NUMA, single node SMP system case.
+- * Specifically, in the case of x86, we will always add
+- * memory to the highmem for now.
+- */
+ #ifdef CONFIG_MEMORY_HOTPLUG
+-#ifndef CONFIG_NEED_MULTIPLE_NODES
+ int arch_add_memory(int nid, u64 start, u64 size)
+ {
+- struct pglist_data *pgdata = &contig_page_data;
++ struct pglist_data *pgdata = NODE_DATA(nid);
+ struct zone *zone = pgdata->node_zones + ZONE_HIGHMEM;
+ unsigned long start_pfn = start >> PAGE_SHIFT;
+ unsigned long nr_pages = size >> PAGE_SHIFT;
+@@ -767,11 +759,11 @@ int remove_memory(u64 start, u64 size)
+ {
+ return -EINVAL;
+ }
+-#endif
++EXPORT_SYMBOL_GPL(remove_memory);
+ #endif
+
+-kmem_cache_t *pgd_cache;
+-kmem_cache_t *pmd_cache;
++struct kmem_cache *pgd_cache;
++struct kmem_cache *pmd_cache;
+
+ void __init pgtable_cache_init(void)
+ {
+Index: 10.3-2007-11-26/arch/i386/mm/pgtable-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/mm/pgtable-xen.c 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/arch/i386/mm/pgtable-xen.c 2007-10-22 13:53:25.000000000 +0200
+@@ -277,7 +277,7 @@ void pte_free(struct page *pte)
+ __free_page(pte);
+ }
+
+-void pmd_ctor(void *pmd, kmem_cache_t *cache, unsigned long flags)
++void pmd_ctor(void *pmd, struct kmem_cache *cache, unsigned long flags)
+ {
+ memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
+ }
+@@ -317,7 +317,7 @@ static inline void pgd_list_del(pgd_t *p
+ set_page_private(next, (unsigned long)pprev);
+ }
+
+-void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused)
++void pgd_ctor(void *pgd, struct kmem_cache *cache, unsigned long unused)
+ {
+ unsigned long flags;
+
+@@ -338,7 +338,7 @@ void pgd_ctor(void *pgd, kmem_cache_t *c
+ }
+
+ /* never called when PTRS_PER_PMD > 1 */
+-void pgd_dtor(void *pgd, kmem_cache_t *cache, unsigned long unused)
++void pgd_dtor(void *pgd, struct kmem_cache *cache, unsigned long unused)
+ {
+ unsigned long flags; /* can be called from interrupt context */
+
+Index: 10.3-2007-11-26/arch/i386/pci/irq-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/pci/irq-xen.c 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/arch/i386/pci/irq-xen.c 2007-10-22 13:53:25.000000000 +0200
+@@ -547,6 +547,12 @@ static __init int intel_router_probe(str
+ case PCI_DEVICE_ID_INTEL_ICH8_2:
+ case PCI_DEVICE_ID_INTEL_ICH8_3:
+ case PCI_DEVICE_ID_INTEL_ICH8_4:
++ case PCI_DEVICE_ID_INTEL_ICH9_0:
++ case PCI_DEVICE_ID_INTEL_ICH9_1:
++ case PCI_DEVICE_ID_INTEL_ICH9_2:
++ case PCI_DEVICE_ID_INTEL_ICH9_3:
++ case PCI_DEVICE_ID_INTEL_ICH9_4:
++ case PCI_DEVICE_ID_INTEL_ICH9_5:
+ r->name = "PIIX/ICH";
+ r->get = pirq_piix_get;
+ r->set = pirq_piix_set;
+@@ -762,7 +768,7 @@ static void __init pirq_find_router(stru
+ DBG(KERN_DEBUG "PCI: Attempting to find IRQ router for %04x:%04x\n",
+ rt->rtr_vendor, rt->rtr_device);
+
+- pirq_router_dev = pci_find_slot(rt->rtr_bus, rt->rtr_devfn);
++ pirq_router_dev = pci_get_bus_and_slot(rt->rtr_bus, rt->rtr_devfn);
+ if (!pirq_router_dev) {
+ DBG(KERN_DEBUG "PCI: Interrupt router not found at "
+ "%02x:%02x\n", rt->rtr_bus, rt->rtr_devfn);
+@@ -782,6 +788,8 @@ static void __init pirq_find_router(stru
+ pirq_router_dev->vendor,
+ pirq_router_dev->device,
+ pci_name(pirq_router_dev));
++
++ /* The device remains referenced for the kernel lifetime */
+ }
+
+ static struct irq_info *pirq_get_info(struct pci_dev *dev)
+Index: 10.3-2007-11-26/arch/x86_64/ia32/syscall32-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/ia32/syscall32-xen.c 2007-12-06 17:27:33.000000000 +0100
++++ 10.3-2007-11-26/arch/x86_64/ia32/syscall32-xen.c 2007-10-22 13:53:25.000000000 +0200
+@@ -54,7 +54,7 @@ int syscall32_setup_pages(struct linux_b
+ struct mm_struct *mm = current->mm;
+ int ret;
+
+- vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
++ vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
+ if (!vma)
+ return -ENOMEM;
+
+@@ -64,6 +64,13 @@ int syscall32_setup_pages(struct linux_b
+ vma->vm_end = VSYSCALL32_END;
+ /* MAYWRITE to allow gdb to COW and set breakpoints */
+ vma->vm_flags = VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC|VM_MAYWRITE;
++ /*
++ * Make sure the vDSO gets into every core dump.
++ * Dumping its contents makes post-mortem fully interpretable later
++ * without matching up the same kernel and hardware config to see
++ * what PC values meant.
++ */
++ vma->vm_flags |= VM_ALWAYSDUMP;
+ vma->vm_flags |= mm->def_flags;
+ vma->vm_page_prot = protection_map[vma->vm_flags & 7];
+ vma->vm_ops = &syscall32_vm_ops;
+@@ -80,6 +87,14 @@ int syscall32_setup_pages(struct linux_b
+ return 0;
+ }
+
++const char *arch_vma_name(struct vm_area_struct *vma)
++{
++ if (vma->vm_start == VSYSCALL32_BASE &&
++ vma->vm_mm && vma->vm_mm->task_size == IA32_PAGE_OFFSET)
++ return "[vdso]";
++ return NULL;
++}
++
+ static int __init init_syscall32(void)
+ {
+ syscall32_page = (void *)get_zeroed_page(GFP_KERNEL);
+Index: 10.3-2007-11-26/arch/x86_64/kernel/entry-xen.S
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/kernel/entry-xen.S 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/arch/x86_64/kernel/entry-xen.S 2007-10-22 13:53:25.000000000 +0200
+@@ -268,7 +268,6 @@ ENTRY(system_call)
+ XEN_UNBLOCK_EVENTS(%r11)
+ GET_THREAD_INFO(%rcx)
+ testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%rcx)
+- CFI_REMEMBER_STATE
+ jnz tracesys
+ cmpq $__NR_syscall_max,%rax
+ ja badsys
+@@ -279,7 +278,6 @@ ENTRY(system_call)
+ * Syscall return path ending with SYSRET (fast path)
+ * Has incomplete stack frame and undefined top of stack.
+ */
+- .globl ret_from_sys_call
+ ret_from_sys_call:
+ movl $_TIF_ALLWORK_MASK,%edi
+ /* edi: flagmask */
+@@ -289,8 +287,8 @@ sysret_check:
+ TRACE_IRQS_OFF
+ movl threadinfo_flags(%rcx),%edx
+ andl %edi,%edx
+- CFI_REMEMBER_STATE
+ jnz sysret_careful
++ CFI_REMEMBER_STATE
+ /*
+ * sysretq will re-enable interrupts:
+ */
+@@ -299,10 +297,10 @@ sysret_check:
+ RESTORE_ARGS 0,8,0
+ HYPERVISOR_IRET VGCF_IN_SYSCALL
+
++ CFI_RESTORE_STATE
+ /* Handle reschedules */
+ /* edx: work, edi: workmask */
+ sysret_careful:
+- CFI_RESTORE_STATE
+ bt $TIF_NEED_RESCHED,%edx
+ jnc sysret_signal
+ TRACE_IRQS_ON
+@@ -341,7 +339,6 @@ badsys:
+
+ /* Do syscall tracing */
+ tracesys:
+- CFI_RESTORE_STATE
+ SAVE_REST
+ movq $-ENOSYS,RAX(%rsp)
+ FIXUP_TOP_OF_STACK %rdi
+@@ -357,32 +354,13 @@ tracesys:
+ call *sys_call_table(,%rax,8)
+ 1: movq %rax,RAX-ARGOFFSET(%rsp)
+ /* Use IRET because user could have changed frame */
+- jmp int_ret_from_sys_call
+- CFI_ENDPROC
+-END(system_call)
+
+ /*
+ * Syscall return path ending with IRET.
+ * Has correct top of stack, but partial stack frame.
+- */
+-ENTRY(int_ret_from_sys_call)
+- CFI_STARTPROC simple
+- CFI_SIGNAL_FRAME
+- CFI_DEF_CFA rsp,SS+8-ARGOFFSET
+- /*CFI_REL_OFFSET ss,SS-ARGOFFSET*/
+- CFI_REL_OFFSET rsp,RSP-ARGOFFSET
+- /*CFI_REL_OFFSET rflags,EFLAGS-ARGOFFSET*/
+- /*CFI_REL_OFFSET cs,CS-ARGOFFSET*/
+- CFI_REL_OFFSET rip,RIP-ARGOFFSET
+- CFI_REL_OFFSET rdx,RDX-ARGOFFSET
+- CFI_REL_OFFSET rcx,RCX-ARGOFFSET
+- CFI_REL_OFFSET rax,RAX-ARGOFFSET
+- CFI_REL_OFFSET rdi,RDI-ARGOFFSET
+- CFI_REL_OFFSET rsi,RSI-ARGOFFSET
+- CFI_REL_OFFSET r8,R8-ARGOFFSET
+- CFI_REL_OFFSET r9,R9-ARGOFFSET
+- CFI_REL_OFFSET r10,R10-ARGOFFSET
+- CFI_REL_OFFSET r11,R11-ARGOFFSET
++ */
++ .globl int_ret_from_sys_call
++int_ret_from_sys_call:
+ XEN_BLOCK_EVENTS(%rsi)
+ TRACE_IRQS_OFF
+ testb $3,CS-ARGOFFSET(%rsp)
+@@ -435,8 +413,6 @@ int_very_careful:
+ popq %rdi
+ CFI_ADJUST_CFA_OFFSET -8
+ andl $~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edi
+- XEN_BLOCK_EVENTS(%rsi)
+- TRACE_IRQS_OFF
+ jmp int_restore_rest
+
+ int_signal:
+@@ -452,7 +428,7 @@ int_restore_rest:
+ TRACE_IRQS_OFF
+ jmp int_with_check
+ CFI_ENDPROC
+-END(int_ret_from_sys_call)
++END(system_call)
+
+ /*
+ * Certain special system calls that need to save a complete full stack frame.
+@@ -1282,36 +1258,3 @@ ENTRY(call_softirq)
+ ret
+ CFI_ENDPROC
+ ENDPROC(call_softirq)
+-
+-#ifdef CONFIG_STACK_UNWIND
+-ENTRY(arch_unwind_init_running)
+- CFI_STARTPROC
+- movq %r15, R15(%rdi)
+- movq %r14, R14(%rdi)
+- xchgq %rsi, %rdx
+- movq %r13, R13(%rdi)
+- movq %r12, R12(%rdi)
+- xorl %eax, %eax
+- movq %rbp, RBP(%rdi)
+- movq %rbx, RBX(%rdi)
+- movq (%rsp), %rcx
+- movq %rax, R11(%rdi)
+- movq %rax, R10(%rdi)
+- movq %rax, R9(%rdi)
+- movq %rax, R8(%rdi)
+- movq %rax, RAX(%rdi)
+- movq %rax, RCX(%rdi)
+- movq %rax, RDX(%rdi)
+- movq %rax, RSI(%rdi)
+- movq %rax, RDI(%rdi)
+- movq %rax, ORIG_RAX(%rdi)
+- movq %rcx, RIP(%rdi)
+- leaq 8(%rsp), %rcx
+- movq $__KERNEL_CS, CS(%rdi)
+- movq %rax, EFLAGS(%rdi)
+- movq %rcx, RSP(%rdi)
+- movq $__KERNEL_DS, SS(%rdi)
+- jmpq *%rdx
+- CFI_ENDPROC
+-ENDPROC(arch_unwind_init_running)
+-#endif
+Index: 10.3-2007-11-26/arch/x86_64/kernel/genapic-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/kernel/genapic-xen.c 2007-12-06 17:27:33.000000000 +0100
++++ 10.3-2007-11-26/arch/x86_64/kernel/genapic-xen.c 2007-10-22 13:53:25.000000000 +0200
+@@ -34,6 +34,7 @@ extern struct genapic apic_physflat;
+
+ #ifndef CONFIG_XEN
+ struct genapic *genapic = &apic_flat;
++struct genapic *genapic_force;
+ #else
+ extern struct genapic apic_xen;
+ struct genapic *genapic = &apic_xen;
+@@ -52,6 +53,13 @@ void __init clustered_apic_check(void)
+ u8 cluster_cnt[NUM_APIC_CLUSTERS];
+ int max_apic = 0;
+
++ /* genapic selection can be forced because of certain quirks.
++ */
++ if (genapic_force) {
++ genapic = genapic_force;
++ goto print;
++ }
++
+ #if defined(CONFIG_ACPI)
+ /*
+ * Some x86_64 machines use physical APIC mode regardless of how many
+Index: 10.3-2007-11-26/arch/x86_64/kernel/head64-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/kernel/head64-xen.c 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/arch/x86_64/kernel/head64-xen.c 2007-10-22 13:53:25.000000000 +0200
+@@ -101,7 +101,10 @@ void __init x86_64_start_kernel(char * r
+ machine_to_phys_order++;
+
+ #if 0
+- for (i = 0; i < 256; i++)
++ /* clear bss before set_intr_gate with early_idt_handler */
++ clear_bss();
++
++ for (i = 0; i < IDT_ENTRIES; i++)
+ set_intr_gate(i, early_idt_handler);
+ asm volatile("lidt %0" :: "m" (idt_descr));
+ #endif
+Index: 10.3-2007-11-26/arch/x86_64/kernel/io_apic-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/kernel/io_apic-xen.c 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/arch/x86_64/kernel/io_apic-xen.c 2007-10-22 13:53:25.000000000 +0200
+@@ -199,14 +199,20 @@ static struct IO_APIC_route_entry ioapic
+ * the interrupt, and we need to make sure the entry is fully populated
+ * before that happens.
+ */
+-static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
++static void
++__ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
+ {
+- unsigned long flags;
+ union entry_union eu;
+ eu.entry = e;
+- spin_lock_irqsave(&ioapic_lock, flags);
+ io_apic_write(apic, 0x11 + 2*pin, eu.w2);
+ io_apic_write(apic, 0x10 + 2*pin, eu.w1);
++}
++
++static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
++{
++ unsigned long flags;
++ spin_lock_irqsave(&ioapic_lock, flags);
++ __ioapic_write_entry(apic, pin, e);
+ spin_unlock_irqrestore(&ioapic_lock, flags);
+ }
+
+@@ -714,6 +720,22 @@ static int assign_irq_vector(int irq, cp
+ }
+
+ #ifndef CONFIG_XEN
++static void __clear_irq_vector(int irq)
++{
++ cpumask_t mask;
++ int cpu, vector;
++
++ BUG_ON(!irq_vector[irq]);
++
++ vector = irq_vector[irq];
++ cpus_and(mask, irq_domain[irq], cpu_online_map);
++ for_each_cpu_mask(cpu, mask)
++ per_cpu(vector_irq, cpu)[vector] = -1;
++
++ irq_vector[irq] = 0;
++ irq_domain[irq] = CPU_MASK_NONE;
++}
++
+ void __setup_vector_irq(int cpu)
+ {
+ /* Initialize vector_irq on a new cpu */
+@@ -761,26 +783,65 @@ static void ioapic_register_intr(int irq
+ #define ioapic_register_intr(_irq,_vector,_trigger) ((void)0)
+ #endif /* !CONFIG_XEN */
+
+-static void __init setup_IO_APIC_irqs(void)
++static void __init setup_IO_APIC_irq(int apic, int pin, int idx, int irq)
+ {
+ struct IO_APIC_route_entry entry;
+- int apic, pin, idx, irq, first_notcon = 1, vector;
++ int vector;
+ unsigned long flags;
+
+- apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
+
+- for (apic = 0; apic < nr_ioapics; apic++) {
+- for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
++ /*
++ * add it to the IO-APIC irq-routing table:
++ */
++ memset(&entry,0,sizeof(entry));
+
+- /*
+- * add it to the IO-APIC irq-routing table:
+- */
+- memset(&entry,0,sizeof(entry));
++ entry.delivery_mode = INT_DELIVERY_MODE;
++ entry.dest_mode = INT_DEST_MODE;
++ entry.mask = 0; /* enable IRQ */
++ entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
+
+- entry.delivery_mode = INT_DELIVERY_MODE;
+- entry.dest_mode = INT_DEST_MODE;
+- entry.mask = 0; /* enable IRQ */
++ entry.trigger = irq_trigger(idx);
++ entry.polarity = irq_polarity(idx);
++
++ if (irq_trigger(idx)) {
++ entry.trigger = 1;
++ entry.mask = 1;
+ entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
++ }
++
++ if (/* !apic && */ !IO_APIC_IRQ(irq))
++ return;
++
++ if (IO_APIC_IRQ(irq)) {
++ cpumask_t mask;
++ vector = assign_irq_vector(irq, TARGET_CPUS, &mask);
++ if (vector < 0)
++ return;
++
++ entry.dest.logical.logical_dest = cpu_mask_to_apicid(mask);
++ entry.vector = vector;
++
++ ioapic_register_intr(irq, vector, IOAPIC_AUTO);
++ if (!apic && (irq < 16))
++ disable_8259A_irq(irq);
++ }
++
++ ioapic_write_entry(apic, pin, entry);
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ set_native_irq_info(irq, TARGET_CPUS);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++}
++
++static void __init setup_IO_APIC_irqs(void)
++{
++ int apic, pin, idx, irq, first_notcon = 1;
++
++ apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
++
++ for (apic = 0; apic < nr_ioapics; apic++) {
++ for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
+
+ idx = find_irq_entry(apic,pin,mp_INT);
+ if (idx == -1) {
+@@ -792,39 +853,11 @@ static void __init setup_IO_APIC_irqs(vo
+ continue;
+ }
+
+- entry.trigger = irq_trigger(idx);
+- entry.polarity = irq_polarity(idx);
+-
+- if (irq_trigger(idx)) {
+- entry.trigger = 1;
+- entry.mask = 1;
+- entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
+- }
+-
+ irq = pin_2_irq(idx, apic, pin);
+ add_pin_to_irq(irq, apic, pin);
+
+- if (/* !apic && */ !IO_APIC_IRQ(irq))
+- continue;
+-
+- if (IO_APIC_IRQ(irq)) {
+- cpumask_t mask;
+- vector = assign_irq_vector(irq, TARGET_CPUS, &mask);
+- if (vector < 0)
+- continue;
+-
+- entry.dest.logical.logical_dest = cpu_mask_to_apicid(mask);
+- entry.vector = vector;
++ setup_IO_APIC_irq(apic, pin, idx, irq);
+
+- ioapic_register_intr(irq, vector, IOAPIC_AUTO);
+- if (!apic && (irq < 16))
+- disable_8259A_irq(irq);
+- }
+- ioapic_write_entry(apic, pin, entry);
+-
+- spin_lock_irqsave(&ioapic_lock, flags);
+- set_native_irq_info(irq, TARGET_CPUS);
+- spin_unlock_irqrestore(&ioapic_lock, flags);
+ }
+ }
+
+@@ -1826,7 +1859,7 @@ void destroy_irq(unsigned int irq)
+ dynamic_irq_cleanup(irq);
+
+ spin_lock_irqsave(&vector_lock, flags);
+- irq_vector[irq] = 0;
++ __clear_irq_vector(irq);
+ spin_unlock_irqrestore(&vector_lock, flags);
+ }
+ #endif
+@@ -2130,7 +2163,15 @@ void __init setup_ioapic_dest(void)
+ if (irq_entry == -1)
+ continue;
+ irq = pin_2_irq(irq_entry, ioapic, pin);
+- set_ioapic_affinity_irq(irq, TARGET_CPUS);
++
++ /* setup_IO_APIC_irqs could fail to get vector for some device
++ * when you have too many devices, because at that time only boot
++ * cpu is online.
++ */
++ if(!irq_vector[irq])
++ setup_IO_APIC_irq(ioapic, pin, irq_entry, irq);
++ else
++ set_ioapic_affinity_irq(irq, TARGET_CPUS);
+ }
+
+ }
+Index: 10.3-2007-11-26/arch/x86_64/kernel/irq-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/kernel/irq-xen.c 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/arch/x86_64/kernel/irq-xen.c 2007-10-22 13:53:25.000000000 +0200
+@@ -120,7 +120,7 @@ asmlinkage unsigned int do_IRQ(struct pt
+
+ if (likely(irq < NR_IRQS))
+ generic_handle_irq(irq);
+- else
++ else if (printk_ratelimit())
+ printk(KERN_EMERG "%s: %d.%d No irq handler for irq\n",
+ __func__, smp_processor_id(), irq);
+
+Index: 10.3-2007-11-26/arch/x86_64/kernel/mpparse-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/kernel/mpparse-xen.c 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/arch/x86_64/kernel/mpparse-xen.c 2007-10-22 13:53:25.000000000 +0200
+@@ -35,8 +35,6 @@
+ int smp_found_config;
+ unsigned int __initdata maxcpus = NR_CPUS;
+
+-int acpi_found_madt;
+-
+ /*
+ * Various Linux-internal data structures created from the
+ * MP-table.
+Index: 10.3-2007-11-26/arch/x86_64/kernel/process-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/kernel/process-xen.c 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/arch/x86_64/kernel/process-xen.c 2007-10-22 13:53:25.000000000 +0200
+@@ -119,29 +119,23 @@ void exit_idle(void)
+ static void poll_idle (void)
+ {
+ local_irq_enable();
+-
+- asm volatile(
+- "2:"
+- "testl %0,%1;"
+- "rep; nop;"
+- "je 2b;"
+- : :
+- "i" (_TIF_NEED_RESCHED),
+- "m" (current_thread_info()->flags));
++ cpu_relax();
+ }
+
+ static void xen_idle(void)
+ {
++ current_thread_info()->status &= ~TS_POLLING;
++ /*
++ * TS_POLLING-cleared state must be visible before we
++ * test NEED_RESCHED:
++ */
++ smp_mb();
+ local_irq_disable();
+-
+- if (need_resched())
+- local_irq_enable();
+- else {
+- current_thread_info()->status &= ~TS_POLLING;
+- smp_mb__after_clear_bit();
++ if (!need_resched())
+ safe_halt();
+- current_thread_info()->status |= TS_POLLING;
+- }
++ else
++ local_irq_enable();
++ current_thread_info()->status |= TS_POLLING;
+ }
+
+ #ifdef CONFIG_HOTPLUG_CPU
+@@ -181,6 +175,12 @@ void cpu_idle (void)
+ idle = xen_idle; /* no alternatives */
+ if (cpu_is_offline(smp_processor_id()))
+ play_dead();
++ /*
++ * Idle routines should keep interrupts disabled
++ * from here on, until they go to idle.
++ * Otherwise, idle callbacks can misfire.
++ */
++ local_irq_disable();
+ enter_idle();
+ idle();
+ /* In many cases the interrupt that ended idle
+Index: 10.3-2007-11-26/arch/x86_64/kernel/setup-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/kernel/setup-xen.c 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/arch/x86_64/kernel/setup-xen.c 2007-10-22 13:53:25.000000000 +0200
+@@ -597,8 +597,7 @@ void __init setup_arch(char **cmdline_p)
+ if (LOADER_TYPE && INITRD_START) {
+ if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
+ reserve_bootmem_generic(INITRD_START, INITRD_SIZE);
+- initrd_start =
+- INITRD_START ? INITRD_START + PAGE_OFFSET : 0;
++ initrd_start = INITRD_START + PAGE_OFFSET;
+ initrd_end = initrd_start+INITRD_SIZE;
+ }
+ else {
+@@ -994,11 +993,8 @@ static void __cpuinit init_amd(struct cp
+ /* Fix cpuid4 emulation for more */
+ num_cache_leaves = 3;
+
+- /* When there is only one core no need to synchronize RDTSC */
+- if (num_possible_cpus() == 1)
+- set_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
+- else
+- clear_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
++ /* RDTSC can be speculated around */
++ clear_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
+ }
+
+ static void __cpuinit detect_ht(struct cpuinfo_x86 *c)
+@@ -1097,6 +1093,15 @@ static void __cpuinit init_intel(struct
+ set_bit(X86_FEATURE_ARCH_PERFMON, &c->x86_capability);
+ }
+
++ if (cpu_has_ds) {
++ unsigned int l1, l2;
++ rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
++ if (!(l1 & (1<<11)))
++ set_bit(X86_FEATURE_BTS, c->x86_capability);
++ if (!(l1 & (1<<12)))
++ set_bit(X86_FEATURE_PEBS, c->x86_capability);
++ }
++
+ n = c->extended_cpuid_level;
+ if (n >= 0x80000008) {
+ unsigned eax = cpuid_eax(0x80000008);
+@@ -1116,7 +1121,10 @@ static void __cpuinit init_intel(struct
+ set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
+ if (c->x86 == 6)
+ set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
+- set_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
++ if (c->x86 == 15)
++ set_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
++ else
++ clear_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
+ c->x86_max_cores = intel_num_cpu_cores(c);
+
+ srat_detect_node();
+Index: 10.3-2007-11-26/arch/x86_64/kernel/smp-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/kernel/smp-xen.c 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/arch/x86_64/kernel/smp-xen.c 2007-10-22 13:53:25.000000000 +0200
+@@ -397,12 +397,17 @@ int smp_call_function_single (int cpu, v
+ put_cpu();
+ return 0;
+ }
++
++ /* Can deadlock when called with interrupts disabled */
++ WARN_ON(irqs_disabled());
++
+ spin_lock_bh(&call_lock);
+ __smp_call_function_single(cpu, func, info, nonatomic, wait);
+ spin_unlock_bh(&call_lock);
+ put_cpu();
+ return 0;
+ }
++EXPORT_SYMBOL(smp_call_function_single);
+
+ /*
+ * this function sends a 'generic call function' IPI to all other CPUs
+Index: 10.3-2007-11-26/arch/x86_64/kernel/traps-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/kernel/traps-xen.c 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/arch/x86_64/kernel/traps-xen.c 2007-10-22 13:53:25.000000000 +0200
+@@ -30,9 +30,10 @@
+ #include <linux/kprobes.h>
+ #include <linux/kexec.h>
+ #include <linux/unwind.h>
++#include <linux/uaccess.h>
++#include <linux/bug.h>
+
+ #include <asm/system.h>
+-#include <asm/uaccess.h>
+ #include <asm/io.h>
+ #include <asm/atomic.h>
+ #include <asm/debugreg.h>
+@@ -108,12 +109,7 @@ static inline void preempt_conditional_c
+ preempt_enable_no_resched();
+ }
+
+-static int kstack_depth_to_print = 12;
+-#ifdef CONFIG_STACK_UNWIND
+-static int call_trace = 1;
+-#else
+-#define call_trace (-1)
+-#endif
++int kstack_depth_to_print = 12;
+
+ #ifdef CONFIG_KALLSYMS
+ void printk_address(unsigned long address)
+@@ -218,24 +214,7 @@ static unsigned long *in_exception_stack
+ return NULL;
+ }
+
+-struct ops_and_data {
+- struct stacktrace_ops *ops;
+- void *data;
+-};
+-
+-static int dump_trace_unwind(struct unwind_frame_info *info, void *context)
+-{
+- struct ops_and_data *oad = (struct ops_and_data *)context;
+- int n = 0;
+-
+- while (unwind(info) == 0 && UNW_PC(info)) {
+- n++;
+- oad->ops->address(oad->data, UNW_PC(info));
+- if (arch_unw_user_mode(info))
+- break;
+- }
+- return n;
+-}
++#define MSG(txt) ops->warning(data, txt)
+
+ /*
+ * x86-64 can have upto three kernel stacks:
+@@ -250,61 +229,24 @@ static inline int valid_stack_ptr(struct
+ return p > t && p < t + THREAD_SIZE - 3;
+ }
+
+-void dump_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long * stack,
++void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
++ unsigned long *stack,
+ struct stacktrace_ops *ops, void *data)
+ {
+- const unsigned cpu = smp_processor_id();
+- unsigned long *irqstack_end = (unsigned long *)cpu_pda(cpu)->irqstackptr;
++ const unsigned cpu = get_cpu();
++ unsigned long *irqstack_end = (unsigned long*)cpu_pda(cpu)->irqstackptr;
+ unsigned used = 0;
+ struct thread_info *tinfo;
+
+ if (!tsk)
+ tsk = current;
+
+- if (call_trace >= 0) {
+- int unw_ret = 0;
+- struct unwind_frame_info info;
+- struct ops_and_data oad = { .ops = ops, .data = data };
+-
+- if (regs) {
+- if (unwind_init_frame_info(&info, tsk, regs) == 0)
+- unw_ret = dump_trace_unwind(&info, &oad);
+- } else if (tsk == current)
+- unw_ret = unwind_init_running(&info, dump_trace_unwind, &oad);
+- else {
+- if (unwind_init_blocked(&info, tsk) == 0)
+- unw_ret = dump_trace_unwind(&info, &oad);
+- }
+- if (unw_ret > 0) {
+- if (call_trace == 1 && !arch_unw_user_mode(&info)) {
+- ops->warning_symbol(data, "DWARF2 unwinder stuck at %s\n",
+- UNW_PC(&info));
+- if ((long)UNW_SP(&info) < 0) {
+- ops->warning(data, "Leftover inexact backtrace:\n");
+- stack = (unsigned long *)UNW_SP(&info);
+- if (!stack)
+- return;
+- } else
+- ops->warning(data, "Full inexact backtrace again:\n");
+- } else if (call_trace >= 1)
+- return;
+- else
+- ops->warning(data, "Full inexact backtrace again:\n");
+- } else
+- ops->warning(data, "Inexact backtrace:\n");
+- }
+ if (!stack) {
+ unsigned long dummy;
+ stack = &dummy;
+ if (tsk && tsk != current)
+ stack = (unsigned long *)tsk->thread.rsp;
+ }
+- /*
+- * Align the stack pointer on word boundary, later loops
+- * rely on that (and corruption / debug info bugs can cause
+- * unaligned values here):
+- */
+- stack = (unsigned long *)((unsigned long)stack & ~(sizeof(long)-1));
+
+ /*
+ * Print function call entries within a stack. 'cond' is the
+@@ -314,9 +256,9 @@ void dump_trace(struct task_struct *tsk,
+ #define HANDLE_STACK(cond) \
+ do while (cond) { \
+ unsigned long addr = *stack++; \
+- if (oops_in_progress ? \
+- __kernel_text_address(addr) : \
+- kernel_text_address(addr)) { \
++ /* Use unlocked access here because except for NMIs \
++ we should be already protected against module unloads */ \
++ if (__kernel_text_address(addr)) { \
+ /* \
+ * If the address is either in the text segment of the \
+ * kernel, or in the region which contains vmalloc'ed \
+@@ -379,9 +321,10 @@ void dump_trace(struct task_struct *tsk,
+ /*
+ * This handles the process stack:
+ */
+- tinfo = current_thread_info();
++ tinfo = task_thread_info(tsk);
+ HANDLE_STACK (valid_stack_ptr(tinfo, stack));
+ #undef HANDLE_STACK
++ put_cpu();
+ }
+ EXPORT_SYMBOL(dump_trace);
+
+@@ -518,30 +461,15 @@ bad:
+ printk("\n");
+ }
+
+-void handle_BUG(struct pt_regs *regs)
+-{
+- struct bug_frame f;
+- long len;
+- const char *prefix = "";
++int is_valid_bugaddr(unsigned long rip)
++{
++ unsigned short ud2;
+
+- if (user_mode(regs))
+- return;
+- if (__copy_from_user(&f, (const void __user *) regs->rip,
+- sizeof(struct bug_frame)))
+- return;
+- if (f.filename >= 0 ||
+- f.ud2[0] != 0x0f || f.ud2[1] != 0x0b)
+- return;
+- len = __strnlen_user((char *)(long)f.filename, PATH_MAX) - 1;
+- if (len < 0 || len >= PATH_MAX)
+- f.filename = (int)(long)"unmapped filename";
+- else if (len > 50) {
+- f.filename += len - 50;
+- prefix = "...";
+- }
+- printk("----------- [cut here ] --------- [please bite here ] ---------\n");
+- printk(KERN_ALERT "Kernel BUG at %s%.50s:%d\n", prefix, (char *)(long)f.filename, f.line);
+-}
++ if (__copy_from_user(&ud2, (const void __user *) rip, sizeof(ud2)))
++ return 0;
++
++ return ud2 == 0x0b0f;
++}
+
+ #ifdef CONFIG_BUG
+ void out_of_line_bug(void)
+@@ -621,7 +549,9 @@ void die(const char * str, struct pt_reg
+ {
+ unsigned long flags = oops_begin();
+
+- handle_BUG(regs);
++ if (!user_mode(regs))
++ report_bug(regs->rip);
++
+ __die(str, regs, err);
+ oops_end(flags);
+ do_exit(SIGSEGV);
+@@ -790,8 +720,7 @@ mem_parity_error(unsigned char reason, s
+ {
+ printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x.\n",
+ reason);
+- printk(KERN_EMERG "You probably have a hardware problem with your "
+- "RAM chips\n");
++ printk(KERN_EMERG "You have some hardware problem, likely on the PCI bus.\n");
+
+ if (panic_on_unrecovered_nmi)
+ panic("NMI: Not continuing");
+@@ -1229,21 +1158,3 @@ static int __init kstack_setup(char *s)
+ return 0;
+ }
+ early_param("kstack", kstack_setup);
+-
+-#ifdef CONFIG_STACK_UNWIND
+-static int __init call_trace_setup(char *s)
+-{
+- if (!s)
+- return -EINVAL;
+- if (strcmp(s, "old") == 0)
+- call_trace = -1;
+- else if (strcmp(s, "both") == 0)
+- call_trace = 0;
+- else if (strcmp(s, "newfallback") == 0)
+- call_trace = 1;
+- else if (strcmp(s, "new") == 0)
+- call_trace = 2;
+- return 0;
+-}
+-early_param("call_trace", call_trace_setup);
+-#endif
+Index: 10.3-2007-11-26/arch/x86_64/kernel/vsyscall-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/kernel/vsyscall-xen.c 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/arch/x86_64/kernel/vsyscall-xen.c 2007-10-22 13:53:25.000000000 +0200
+@@ -42,6 +42,7 @@
+ #include <asm/topology.h>
+
+ #define __vsyscall(nr) __attribute__ ((unused,__section__(".vsyscall_" #nr)))
++#define __syscall_clobber "r11","rcx","memory"
+
+ int __sysctl_vsyscall __section_sysctl_vsyscall = 1;
+ seqlock_t __xtime_lock __section_xtime_lock = SEQLOCK_UNLOCKED;
+@@ -224,8 +225,7 @@ out:
+
+ static int vsyscall_sysctl_nostrat(ctl_table *t, int __user *name, int nlen,
+ void __user *oldval, size_t __user *oldlenp,
+- void __user *newval, size_t newlen,
+- void **context)
++ void __user *newval, size_t newlen)
+ {
+ return -ENOSYS;
+ }
+@@ -274,7 +274,6 @@ static void __cpuinit cpu_vsyscall_init(
+ vsyscall_set_cpu(raw_smp_processor_id());
+ }
+
+-#ifdef CONFIG_HOTPLUG_CPU
+ static int __cpuinit
+ cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg)
+ {
+@@ -283,13 +282,13 @@ cpu_vsyscall_notifier(struct notifier_bl
+ smp_call_function_single(cpu, cpu_vsyscall_init, NULL, 0, 1);
+ return NOTIFY_DONE;
+ }
+-#endif
+
+ static void __init map_vsyscall(void)
+ {
+ extern char __vsyscall_0;
+ unsigned long physaddr_page0 = __pa_symbol(&__vsyscall_0);
+
++ /* Note that VSYSCALL_MAPPED_PAGES must agree with the code below. */
+ __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_page0, PAGE_KERNEL_VSYSCALL);
+ }
+
+Index: 10.3-2007-11-26/arch/x86_64/mm/fault-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/mm/fault-xen.c 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/arch/x86_64/mm/fault-xen.c 2007-10-22 13:53:25.000000000 +0200
+@@ -23,9 +23,9 @@
+ #include <linux/compiler.h>
+ #include <linux/module.h>
+ #include <linux/kprobes.h>
++#include <linux/uaccess.h>
+
+ #include <asm/system.h>
+-#include <asm/uaccess.h>
+ #include <asm/pgalloc.h>
+ #include <asm/smp.h>
+ #include <asm/tlbflush.h>
+@@ -96,7 +96,7 @@ void bust_spinlocks(int yes)
+ static noinline int is_prefetch(struct pt_regs *regs, unsigned long addr,
+ unsigned long error_code)
+ {
+- unsigned char __user *instr;
++ unsigned char *instr;
+ int scan_more = 1;
+ int prefetch = 0;
+ unsigned char *max_instr;
+@@ -116,7 +116,7 @@ static noinline int is_prefetch(struct p
+ unsigned char instr_hi;
+ unsigned char instr_lo;
+
+- if (__get_user(opcode, (char __user *)instr))
++ if (probe_kernel_address(instr, opcode))
+ break;
+
+ instr_hi = opcode & 0xf0;
+@@ -154,7 +154,7 @@ static noinline int is_prefetch(struct p
+ case 0x00:
+ /* Prefetch instruction is 0x0F0D or 0x0F18 */
+ scan_more = 0;
+- if (__get_user(opcode, (char __user *)instr))
++ if (probe_kernel_address(instr, opcode))
+ break;
+ prefetch = (instr_lo == 0xF) &&
+ (opcode == 0x0D || opcode == 0x18);
+@@ -170,7 +170,7 @@ static noinline int is_prefetch(struct p
+ static int bad_address(void *p)
+ {
+ unsigned long dummy;
+- return __get_user(dummy, (unsigned long __user *)p);
++ return probe_kernel_address((unsigned long *)p, dummy);
+ }
+
+ void dump_pagetable(unsigned long address)
+Index: 10.3-2007-11-26/arch/x86_64/mm/init-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/mm/init-xen.c 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/arch/x86_64/mm/init-xen.c 2007-10-22 13:53:25.000000000 +0200
+@@ -1155,14 +1155,15 @@ static __init int x8664_sysctl_init(void
+ __initcall(x8664_sysctl_init);
+ #endif
+
+-/* A pseudo VMAs to allow ptrace access for the vsyscall page. This only
++/* A pseudo VMA to allow ptrace access for the vsyscall page. This only
+ covers the 64bit vsyscall page now. 32bit has a real VMA now and does
+ not need special handling anymore. */
+
+ static struct vm_area_struct gate_vma = {
+ .vm_start = VSYSCALL_START,
+- .vm_end = VSYSCALL_END,
+- .vm_page_prot = PAGE_READONLY
++ .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES << PAGE_SHIFT),
++ .vm_page_prot = PAGE_READONLY_EXEC,
++ .vm_flags = VM_READ | VM_EXEC
+ };
+
+ struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
+Index: 10.3-2007-11-26/arch/x86_64/mm/pageattr-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/mm/pageattr-xen.c 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/arch/x86_64/mm/pageattr-xen.c 2007-10-22 13:53:25.000000000 +0200
+@@ -255,34 +255,40 @@ static struct page *split_large_page(uns
+ return base;
+ }
+
+-
+-static void flush_kernel_map(void *address)
++static void cache_flush_page(void *adr)
+ {
+- if (0 && address && cpu_has_clflush) {
+- /* is this worth it? */
+- int i;
+- for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
+- asm volatile("clflush (%0)" :: "r" (address + i));
+- } else
+- asm volatile("wbinvd":::"memory");
+- if (address)
+- __flush_tlb_one(address);
+- else
+- __flush_tlb_all();
++ int i;
++ for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
++ asm volatile("clflush (%0)" :: "r" (adr + i));
+ }
+
++static void flush_kernel_map(void *arg)
++{
++ struct list_head *l = (struct list_head *)arg;
++ struct page *pg;
+
+-static inline void flush_map(unsigned long address)
++ /* When clflush is available always use it because it is
++ much cheaper than WBINVD */
++ if (!cpu_has_clflush)
++ asm volatile("wbinvd" ::: "memory");
++ list_for_each_entry(pg, l, lru) {
++ void *adr = page_address(pg);
++ if (cpu_has_clflush)
++ cache_flush_page(adr);
++ __flush_tlb_one(adr);
++ }
++}
++
++static inline void flush_map(struct list_head *l)
+ {
+- on_each_cpu(flush_kernel_map, (void *)address, 1, 1);
++ on_each_cpu(flush_kernel_map, l, 1, 1);
+ }
+
+-static struct page *deferred_pages; /* protected by init_mm.mmap_sem */
++static LIST_HEAD(deferred_pages); /* protected by init_mm.mmap_sem */
+
+ static inline void save_page(struct page *fpage)
+ {
+- fpage->lru.next = (struct list_head *)deferred_pages;
+- deferred_pages = fpage;
++ list_add(&fpage->lru, &deferred_pages);
+ }
+
+ /*
+@@ -412,18 +418,18 @@ int change_page_attr(struct page *page,
+
+ void global_flush_tlb(void)
+ {
+- struct page *dpage;
++ struct page *pg, *next;
++ struct list_head l;
+
+ down_read(&init_mm.mmap_sem);
+- dpage = xchg(&deferred_pages, NULL);
++ list_replace_init(&deferred_pages, &l);
+ up_read(&init_mm.mmap_sem);
+
+- flush_map((dpage && !dpage->lru.next) ? (unsigned long)page_address(dpage) : 0);
+- while (dpage) {
+- struct page *tmp = dpage;
+- dpage = (struct page *)dpage->lru.next;
+- ClearPagePrivate(tmp);
+- __free_page(tmp);
++ flush_map(&l);
++
++ list_for_each_entry_safe(pg, next, &l, lru) {
++ ClearPagePrivate(pg);
++ __free_page(pg);
+ }
+ }
+
+Index: 10.3-2007-11-26/drivers/kvm/Kconfig
+===================================================================
+--- 10.3-2007-11-26.orig/drivers/kvm/Kconfig 2007-12-06 17:27:33.000000000 +0100
++++ 10.3-2007-11-26/drivers/kvm/Kconfig 2007-10-22 13:53:25.000000000 +0200
+@@ -6,7 +6,7 @@ menu "Virtualization"
+
+ config KVM
+ tristate "Kernel-based Virtual Machine (KVM) support"
+- depends on X86 && EXPERIMENTAL
++ depends on X86 && EXPERIMENTAL && !XEN
+ ---help---
+ Support hosting fully virtualized guest machines using hardware
+ virtualization extensions. You will need a fairly recent
+Index: 10.3-2007-11-26/drivers/xen/Kconfig
+===================================================================
+--- 10.3-2007-11-26.orig/drivers/xen/Kconfig 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/drivers/xen/Kconfig 2007-10-22 13:53:25.000000000 +0200
+@@ -256,6 +256,9 @@ config NO_IDLE_HZ
+ bool
+ default y
+
++config PM
++ def_bool y
++
+ config XEN_SMPBOOT
+ bool
+ default y
+Index: 10.3-2007-11-26/drivers/xen/balloon/balloon.c
+===================================================================
+--- 10.3-2007-11-26.orig/drivers/xen/balloon/balloon.c 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/drivers/xen/balloon/balloon.c 2007-10-22 13:53:25.000000000 +0200
+@@ -95,8 +95,8 @@ extern unsigned long totalhigh_pages;
+ static LIST_HEAD(ballooned_pages);
+
+ /* Main work function, always executed in process context. */
+-static void balloon_process(void *unused);
+-static DECLARE_WORK(balloon_worker, balloon_process, NULL);
++static void balloon_process(struct work_struct *unused);
++static DECLARE_WORK(balloon_worker, balloon_process);
+ static struct timer_list balloon_timer;
+
+ /* When ballooning out (allocating memory to return to Xen) we don't really
+@@ -339,7 +339,7 @@ static int decrease_reservation(unsigned
+ * by the balloon lock), or with changes to the Xen hard limit, but we will
+ * recover from these in time.
+ */
+-static void balloon_process(void *unused)
++static void balloon_process(struct work_struct *unused)
+ {
+ int need_sleep = 0;
+ long credit;
+Index: 10.3-2007-11-26/drivers/xen/blkback/blkback.c
+===================================================================
+--- 10.3-2007-11-26.orig/drivers/xen/blkback/blkback.c 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/drivers/xen/blkback/blkback.c 2007-10-22 13:53:25.000000000 +0200
+@@ -37,6 +37,7 @@
+
+ #include <linux/spinlock.h>
+ #include <linux/kthread.h>
++#include <linux/freezer.h>
+ #include <linux/list.h>
+ #include <xen/balloon.h>
+ #include <asm/hypervisor.h>
+Index: 10.3-2007-11-26/drivers/xen/blkback/interface.c
+===================================================================
+--- 10.3-2007-11-26.orig/drivers/xen/blkback/interface.c 2007-12-06 17:27:33.000000000 +0100
++++ 10.3-2007-11-26/drivers/xen/blkback/interface.c 2007-10-22 13:53:25.000000000 +0200
+@@ -34,7 +34,7 @@
+ #include <xen/evtchn.h>
+ #include <linux/kthread.h>
+
+-static kmem_cache_t *blkif_cachep;
++static struct kmem_cache *blkif_cachep;
+
+ blkif_t *blkif_alloc(domid_t domid)
+ {
+Index: 10.3-2007-11-26/drivers/xen/blkfront/blkfront.c
+===================================================================
+--- 10.3-2007-11-26.orig/drivers/xen/blkfront/blkfront.c 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/drivers/xen/blkfront/blkfront.c 2007-10-22 13:53:25.000000000 +0200
+@@ -70,7 +70,7 @@ static int setup_blkring(struct xenbus_d
+ static void kick_pending_request_queues(struct blkfront_info *);
+
+ static irqreturn_t blkif_int(int irq, void *dev_id);
+-static void blkif_restart_queue(void *arg);
++static void blkif_restart_queue(struct work_struct *arg);
+ static void blkif_recover(struct blkfront_info *);
+ static void blkif_completion(struct blk_shadow *);
+ static void blkif_free(struct blkfront_info *, int);
+@@ -105,7 +105,7 @@ static int blkfront_probe(struct xenbus_
+ info->xbdev = dev;
+ info->vdevice = vdevice;
+ info->connected = BLKIF_STATE_DISCONNECTED;
+- INIT_WORK(&info->work, blkif_restart_queue, (void *)info);
++ INIT_WORK(&info->work, blkif_restart_queue);
+
+ for (i = 0; i < BLK_RING_SIZE; i++)
+ info->shadow[i].req.id = i+1;
+@@ -443,9 +443,9 @@ static void kick_pending_request_queues(
+ }
+ }
+
+-static void blkif_restart_queue(void *arg)
++static void blkif_restart_queue(struct work_struct *arg)
+ {
+- struct blkfront_info *info = (struct blkfront_info *)arg;
++ struct blkfront_info *info = container_of(arg, struct blkfront_info, work);
+ spin_lock_irq(&blkif_io_lock);
+ if (info->connected == BLKIF_STATE_CONNECTED)
+ kick_pending_request_queues(info);
+Index: 10.3-2007-11-26/drivers/xen/blktap/blktap.c
+===================================================================
+--- 10.3-2007-11-26.orig/drivers/xen/blktap/blktap.c 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/drivers/xen/blktap/blktap.c 2007-10-22 13:53:25.000000000 +0200
+@@ -40,6 +40,7 @@
+
+ #include <linux/spinlock.h>
+ #include <linux/kthread.h>
++#include <linux/freezer.h>
+ #include <linux/list.h>
+ #include <asm/hypervisor.h>
+ #include "common.h"
+Index: 10.3-2007-11-26/drivers/xen/blktap/interface.c
+===================================================================
+--- 10.3-2007-11-26.orig/drivers/xen/blktap/interface.c 2007-12-06 17:27:33.000000000 +0100
++++ 10.3-2007-11-26/drivers/xen/blktap/interface.c 2007-10-22 13:53:25.000000000 +0200
+@@ -34,7 +34,7 @@
+ #include "common.h"
+ #include <xen/evtchn.h>
+
+-static kmem_cache_t *blkif_cachep;
++static struct kmem_cache *blkif_cachep;
+
+ blkif_t *tap_alloc_blkif(domid_t domid)
+ {
+Index: 10.3-2007-11-26/drivers/xen/char/mem.c
+===================================================================
+--- 10.3-2007-11-26.orig/drivers/xen/char/mem.c 2007-12-06 17:27:33.000000000 +0100
++++ 10.3-2007-11-26/drivers/xen/char/mem.c 2007-10-22 13:53:25.000000000 +0200
+@@ -170,7 +170,7 @@ static loff_t memory_lseek(struct file *
+ {
+ loff_t ret;
+
+- mutex_lock(&file->f_dentry->d_inode->i_mutex);
++ mutex_lock(&file->f_path.dentry->d_inode->i_mutex);
+ switch (orig) {
+ case 0:
+ file->f_pos = offset;
+@@ -185,7 +185,7 @@ static loff_t memory_lseek(struct file *
+ default:
+ ret = -EINVAL;
+ }
+- mutex_unlock(&file->f_dentry->d_inode->i_mutex);
++ mutex_unlock(&file->f_path.dentry->d_inode->i_mutex);
+ return ret;
+ }
+
+Index: 10.3-2007-11-26/drivers/xen/console/console.c
+===================================================================
+--- 10.3-2007-11-26.orig/drivers/xen/console/console.c 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/drivers/xen/console/console.c 2007-10-22 13:53:25.000000000 +0200
+@@ -80,11 +80,6 @@ static int xc_num = -1;
+ #define XEN_XVC_MAJOR 204
+ #define XEN_XVC_MINOR 191
+
+-#ifdef CONFIG_MAGIC_SYSRQ
+-static unsigned long sysrq_requested;
+-extern int sysrq_enabled;
+-#endif
+-
+ void xencons_early_setup(void)
+ {
+ extern int console_use_vt;
+@@ -329,8 +324,8 @@ void dom0_init_screen_info(const struct
+ #define DUMMY_TTY(_tty) ((xc_mode == XC_TTY) && \
+ ((_tty)->index != (xc_num - 1)))
+
+-static struct termios *xencons_termios[MAX_NR_CONSOLES];
+-static struct termios *xencons_termios_locked[MAX_NR_CONSOLES];
++static struct ktermios *xencons_termios[MAX_NR_CONSOLES];
++static struct ktermios *xencons_termios_locked[MAX_NR_CONSOLES];
+ static struct tty_struct *xencons_tty;
+ static int xencons_priv_irq;
+ static char x_char;
+@@ -346,7 +341,9 @@ void xencons_rx(char *buf, unsigned len)
+
+ for (i = 0; i < len; i++) {
+ #ifdef CONFIG_MAGIC_SYSRQ
+- if (sysrq_enabled) {
++ if (sysrq_on()) {
++ static unsigned long sysrq_requested;
++
+ if (buf[i] == '\x0f') { /* ^O */
+ if (!sysrq_requested) {
+ sysrq_requested = jiffies;
+Index: 10.3-2007-11-26/drivers/xen/core/reboot.c
+===================================================================
+--- 10.3-2007-11-26.orig/drivers/xen/core/reboot.c 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/drivers/xen/core/reboot.c 2007-10-22 13:53:25.000000000 +0200
+@@ -31,8 +31,8 @@ static int shutting_down = SHUTDOWN_INVA
+ /* Can we leave APs online when we suspend? */
+ static int fast_suspend;
+
+-static void __shutdown_handler(void *unused);
+-static DECLARE_WORK(shutdown_work, __shutdown_handler, NULL);
++static void __shutdown_handler(struct work_struct *unused);
++static DECLARE_DELAYED_WORK(shutdown_work, __shutdown_handler);
+
+ int __xen_suspend(int fast_suspend);
+
+@@ -86,7 +86,7 @@ static int kthread_create_on_cpu(int (*f
+ return 0;
+ }
+
+-static void __shutdown_handler(void *unused)
++static void __shutdown_handler(struct work_struct *unused)
+ {
+ int err;
+
+@@ -148,7 +148,7 @@ static void shutdown_handler(struct xenb
+ }
+
+ if (shutting_down != SHUTDOWN_INVALID)
+- schedule_work(&shutdown_work);
++ schedule_delayed_work(&shutdown_work, 0);
+
+ kfree(str);
+ }
+Index: 10.3-2007-11-26/drivers/xen/core/smpboot.c
+===================================================================
+--- 10.3-2007-11-26.orig/drivers/xen/core/smpboot.c 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/drivers/xen/core/smpboot.c 2007-10-22 13:53:25.000000000 +0200
+@@ -171,7 +171,12 @@ static void xen_smp_intr_exit(unsigned i
+
+ void cpu_bringup(void)
+ {
++#ifdef __i386__
++ cpu_set_gdt(current_thread_info()->cpu);
++ secondary_cpu_init();
++#else
+ cpu_init();
++#endif
+ touch_softlockup_watchdog();
+ preempt_disable();
+ local_irq_enable();
+@@ -289,11 +294,12 @@ void __init smp_prepare_cpus(unsigned in
+ if (cpu == 0)
+ continue;
+
++ idle = fork_idle(cpu);
++ if (IS_ERR(idle))
++ panic("failed fork for CPU %d", cpu);
++
+ #ifdef __x86_64__
+ gdt_descr = &cpu_gdt_descr[cpu];
+-#else
+- gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
+-#endif
+ gdt_descr->address = get_zeroed_page(GFP_KERNEL);
+ if (unlikely(!gdt_descr->address)) {
+ printk(KERN_CRIT "CPU%d failed to allocate GDT\n",
+@@ -302,6 +308,11 @@ void __init smp_prepare_cpus(unsigned in
+ }
+ gdt_descr->size = GDT_SIZE;
+ memcpy((void *)gdt_descr->address, cpu_gdt_table, GDT_SIZE);
++#else
++ if (unlikely(!init_gdt(cpu, idle)))
++ continue;
++ gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
++#endif
+ make_page_readonly(
+ (void *)gdt_descr->address,
+ XENFEAT_writable_descriptor_tables);
+@@ -312,10 +323,6 @@ void __init smp_prepare_cpus(unsigned in
+ cpu_2_logical_apicid[cpu] = cpu;
+ x86_cpu_to_apicid[cpu] = cpu;
+
+- idle = fork_idle(cpu);
+- if (IS_ERR(idle))
+- panic("failed fork for CPU %d", cpu);
+-
+ #ifdef __x86_64__
+ cpu_pda(cpu)->pcurrent = idle;
+ cpu_pda(cpu)->cpunumber = cpu;
+Index: 10.3-2007-11-26/drivers/xen/fbfront/xenfb.c
+===================================================================
+--- 10.3-2007-11-26.orig/drivers/xen/fbfront/xenfb.c 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/drivers/xen/fbfront/xenfb.c 2007-10-22 13:53:25.000000000 +0200
+@@ -25,6 +25,7 @@
+ #include <linux/vmalloc.h>
+ #include <linux/mm.h>
+ #include <linux/mutex.h>
++#include <linux/freezer.h>
+ #include <asm/hypervisor.h>
+ #include <xen/evtchn.h>
+ #include <xen/interface/io/fbif.h>
+Index: 10.3-2007-11-26/drivers/xen/netback/loopback.c
+===================================================================
+--- 10.3-2007-11-26.orig/drivers/xen/netback/loopback.c 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/drivers/xen/netback/loopback.c 2007-10-22 13:53:25.000000000 +0200
+@@ -54,6 +54,7 @@
+ #include <net/dst.h>
+ #include <net/xfrm.h> /* secpath_reset() */
+ #include <asm/hypervisor.h> /* is_initial_xendomain() */
++#include <../net/core/kmap_skb.h> /* k{,un}map_skb_frag() */
+
+ static int nloopbacks = -1;
+ module_param(nloopbacks, int, 0);
+Index: 10.3-2007-11-26/drivers/xen/pciback/conf_space_header.c
+===================================================================
+--- 10.3-2007-11-26.orig/drivers/xen/pciback/conf_space_header.c 2007-12-06 17:27:33.000000000 +0100
++++ 10.3-2007-11-26/drivers/xen/pciback/conf_space_header.c 2007-10-22 13:53:25.000000000 +0200
+@@ -22,14 +22,14 @@ static int command_write(struct pci_dev
+ {
+ int err;
+
+- if (!dev->is_enabled && is_enable_cmd(value)) {
++ if (!atomic_read(&dev->enable_cnt) && is_enable_cmd(value)) {
+ if (unlikely(verbose_request))
+ printk(KERN_DEBUG "pciback: %s: enable\n",
+ pci_name(dev));
+ err = pci_enable_device(dev);
+ if (err)
+ return err;
+- } else if (dev->is_enabled && !is_enable_cmd(value)) {
++ } else if (atomic_read(&dev->enable_cnt) && !is_enable_cmd(value)) {
+ if (unlikely(verbose_request))
+ printk(KERN_DEBUG "pciback: %s: disable\n",
+ pci_name(dev));
+Index: 10.3-2007-11-26/drivers/xen/pciback/pciback.h
+===================================================================
+--- 10.3-2007-11-26.orig/drivers/xen/pciback/pciback.h 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/drivers/xen/pciback/pciback.h 2007-10-22 13:53:25.000000000 +0200
+@@ -84,7 +84,7 @@ void pciback_release_devices(struct pcib
+
+ /* Handles events from front-end */
+ irqreturn_t pciback_handle_event(int irq, void *dev_id);
+-void pciback_do_op(void *data);
++void pciback_do_op(struct work_struct *work);
+
+ int pciback_xenbus_register(void);
+ void pciback_xenbus_unregister(void);
+Index: 10.3-2007-11-26/drivers/xen/pciback/pciback_ops.c
+===================================================================
+--- 10.3-2007-11-26.orig/drivers/xen/pciback/pciback_ops.c 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/drivers/xen/pciback/pciback_ops.c 2007-10-22 13:53:25.000000000 +0200
+@@ -25,7 +25,7 @@ void pciback_reset_device(struct pci_dev
+
+ pci_write_config_word(dev, PCI_COMMAND, 0);
+
+- dev->is_enabled = 0;
++ atomic_set(&dev->enable_cnt, 0);
+ dev->is_busmaster = 0;
+ } else {
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+@@ -51,9 +51,9 @@ static inline void test_and_schedule_op(
+ * context because some of the pci_* functions can sleep (mostly due to ACPI
+ * use of semaphores). This function is intended to be called from a work
+ * queue in process context taking a struct pciback_device as a parameter */
+-void pciback_do_op(void *data)
++void pciback_do_op(struct work_struct *work)
+ {
+- struct pciback_device *pdev = data;
++ struct pciback_device *pdev = container_of(work, struct pciback_device, op_work);
+ struct pci_dev *dev;
+ struct xen_pci_op *op = &pdev->sh_info->op;
+
+Index: 10.3-2007-11-26/drivers/xen/pciback/xenbus.c
+===================================================================
+--- 10.3-2007-11-26.orig/drivers/xen/pciback/xenbus.c 2007-12-06 17:27:33.000000000 +0100
++++ 10.3-2007-11-26/drivers/xen/pciback/xenbus.c 2007-10-22 13:53:25.000000000 +0200
+@@ -32,7 +32,7 @@ static struct pciback_device *alloc_pdev
+ pdev->evtchn_irq = INVALID_EVTCHN_IRQ;
+ pdev->be_watching = 0;
+
+- INIT_WORK(&pdev->op_work, pciback_do_op, pdev);
++ INIT_WORK(&pdev->op_work, pciback_do_op);
+
+ if (pciback_init_devices(pdev)) {
+ kfree(pdev);
+@@ -53,7 +53,6 @@ static void free_pdev(struct pciback_dev
+
+ /* If the driver domain started an op, make sure we complete it or
+ * delete it before releasing the shared memory */
+- cancel_delayed_work(&pdev->op_work);
+ flush_scheduled_work();
+
+ if (pdev->sh_info)
+Index: 10.3-2007-11-26/drivers/xen/tpmback/interface.c
+===================================================================
+--- 10.3-2007-11-26.orig/drivers/xen/tpmback/interface.c 2007-12-06 17:27:33.000000000 +0100
++++ 10.3-2007-11-26/drivers/xen/tpmback/interface.c 2007-10-22 13:53:25.000000000 +0200
+@@ -15,7 +15,7 @@
+ #include <xen/balloon.h>
+ #include <xen/gnttab.h>
+
+-static kmem_cache_t *tpmif_cachep;
++static struct kmem_cache *tpmif_cachep;
+ int num_frontends = 0;
+
+ LIST_HEAD(tpmif_list);
+Index: 10.3-2007-11-26/drivers/xen/xenbus/xenbus_comms.c
+===================================================================
+--- 10.3-2007-11-26.orig/drivers/xen/xenbus/xenbus_comms.c 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/drivers/xen/xenbus/xenbus_comms.c 2007-10-22 13:53:25.000000000 +0200
+@@ -48,9 +48,9 @@
+
+ static int xenbus_irq;
+
+-extern void xenbus_probe(void *);
++extern void xenbus_probe(struct work_struct *);
+ extern int xenstored_ready;
+-static DECLARE_WORK(probe_work, xenbus_probe, NULL);
++static DECLARE_WORK(probe_work, xenbus_probe);
+
+ static DECLARE_WAIT_QUEUE_HEAD(xb_waitq);
+
+Index: 10.3-2007-11-26/drivers/xen/xenbus/xenbus_probe.c
+===================================================================
+--- 10.3-2007-11-26.orig/drivers/xen/xenbus/xenbus_probe.c 2007-12-06 17:27:33.000000000 +0100
++++ 10.3-2007-11-26/drivers/xen/xenbus/xenbus_probe.c 2007-10-22 13:53:25.000000000 +0200
+@@ -834,7 +834,7 @@ void unregister_xenstore_notifier(struct
+ EXPORT_SYMBOL_GPL(unregister_xenstore_notifier);
+
+
+-void xenbus_probe(void *unused)
++void xenbus_probe(struct work_struct *unused)
+ {
+ BUG_ON((xenstored_ready <= 0));
+
+Index: 10.3-2007-11-26/include/asm-i386/mach-xen/asm/desc.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-i386/mach-xen/asm/desc.h 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/include/asm-i386/mach-xen/asm/desc.h 2007-10-22 13:53:25.000000000 +0200
+@@ -4,8 +4,6 @@
+ #include <asm/ldt.h>
+ #include <asm/segment.h>
+
+-#define CPU_16BIT_STACK_SIZE 1024
+-
+ #ifndef __ASSEMBLY__
+
+ #include <linux/preempt.h>
+@@ -15,8 +13,6 @@
+
+ extern struct desc_struct cpu_gdt_table[GDT_ENTRIES];
+
+-DECLARE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]);
+-
+ struct Xgt_desc_struct {
+ unsigned short size;
+ unsigned long address __attribute__((packed));
+@@ -32,11 +28,6 @@ static inline struct desc_struct *get_cp
+ return (struct desc_struct *)per_cpu(cpu_gdt_descr, cpu).address;
+ }
+
+-/*
+- * This is the ldt that every process will get unless we need
+- * something other than this.
+- */
+-extern struct desc_struct default_ldt[];
+ extern struct desc_struct idt_table[];
+ extern void set_intr_gate(unsigned int irq, void * addr);
+
+@@ -63,8 +54,8 @@ static inline void pack_gate(__u32 *a, _
+ #define DESCTYPE_DPL3 0x60 /* DPL-3 */
+ #define DESCTYPE_S 0x10 /* !system */
+
++#ifndef CONFIG_XEN
+ #define load_TR_desc() __asm__ __volatile__("ltr %w0"::"q" (GDT_ENTRY_TSS*8))
+-#define load_LDT_desc() __asm__ __volatile__("lldt %w0"::"q" (GDT_ENTRY_LDT*8))
+
+ #define load_gdt(dtr) __asm__ __volatile("lgdt %0"::"m" (*dtr))
+ #define load_idt(dtr) __asm__ __volatile("lidt %0"::"m" (*dtr))
+@@ -75,6 +66,7 @@ static inline void pack_gate(__u32 *a, _
+ #define store_idt(dtr) __asm__ ("sidt %0":"=m" (*dtr))
+ #define store_tr(tr) __asm__ ("str %0":"=m" (tr))
+ #define store_ldt(ldt) __asm__ ("sldt %0":"=m" (ldt))
++#endif
+
+ #if TLS_SIZE != 24
+ # error update this code.
+@@ -88,22 +80,43 @@ static inline void load_TLS(struct threa
+ }
+
+ #ifndef CONFIG_XEN
++#define write_ldt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b)
++#define write_gdt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b)
++#define write_idt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b)
++
+ static inline void write_dt_entry(void *dt, int entry, __u32 entry_a, __u32 entry_b)
+ {
+ __u32 *lp = (__u32 *)((char *)dt + entry*8);
+ *lp = entry_a;
+ *(lp+1) = entry_b;
+ }
+-
+-#define write_ldt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b)
+-#define write_gdt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b)
++#define set_ldt native_set_ldt
+ #else
+ extern int write_ldt_entry(void *ldt, int entry, __u32 entry_a, __u32 entry_b);
+ extern int write_gdt_entry(void *gdt, int entry, __u32 entry_a, __u32 entry_b);
++#define set_ldt(addr, entries) xen_set_ldt((unsigned long)(addr), entries)
++#endif
++
++#ifndef CONFIG_XEN
++static inline fastcall void native_set_ldt(const void *addr,
++ unsigned int entries)
++{
++ if (likely(entries == 0))
++ __asm__ __volatile__("lldt %w0"::"q" (0));
++ else {
++ unsigned cpu = smp_processor_id();
++ __u32 a, b;
++
++ pack_descriptor(&a, &b, (unsigned long)addr,
++ entries * sizeof(struct desc_struct) - 1,
++ DESCTYPE_LDT, 0);
++ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_LDT, a, b);
++ __asm__ __volatile__("lldt %w0"::"q" (GDT_ENTRY_LDT*8));
++ }
++}
+ #endif
+-#ifndef CONFIG_X86_NO_IDT
+-#define write_idt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b)
+
++#ifndef CONFIG_X86_NO_IDT
+ static inline void _set_gate(int gate, unsigned int type, void *addr, unsigned short seg)
+ {
+ __u32 a, b;
+@@ -123,14 +136,6 @@ static inline void __set_tss_desc(unsign
+ }
+ #endif
+
+-static inline void set_ldt_desc(unsigned int cpu, void *addr, unsigned int entries)
+-{
+- __u32 a, b;
+- pack_descriptor(&a, &b, (unsigned long)addr,
+- entries * sizeof(struct desc_struct) - 1,
+- DESCTYPE_LDT, 0);
+- write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_LDT, a, b);
+-}
+
+ #define set_tss_desc(cpu,addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr)
+
+@@ -161,36 +166,22 @@ static inline void set_ldt_desc(unsigned
+
+ static inline void clear_LDT(void)
+ {
+- int cpu = get_cpu();
+-
+- /*
+- * NB. We load the default_ldt for lcall7/27 handling on demand, as
+- * it slows down context switching. Noone uses it anyway.
+- */
+- cpu = cpu; /* XXX avoid compiler warning */
+- xen_set_ldt(0UL, 0);
+- put_cpu();
++ set_ldt(NULL, 0);
+ }
+
+ /*
+ * load one particular LDT into the current CPU
+ */
+-static inline void load_LDT_nolock(mm_context_t *pc, int cpu)
++static inline void load_LDT_nolock(mm_context_t *pc)
+ {
+- void *segments = pc->ldt;
+- int count = pc->size;
+-
+- if (likely(!count))
+- segments = NULL;
+-
+- xen_set_ldt((unsigned long)segments, count);
++ set_ldt(pc->ldt, pc->size);
+ }
+
+ static inline void load_LDT(mm_context_t *pc)
+ {
+- int cpu = get_cpu();
+- load_LDT_nolock(pc, cpu);
+- put_cpu();
++ preempt_disable();
++ load_LDT_nolock(pc);
++ preempt_enable();
+ }
+
+ static inline unsigned long get_desc_base(unsigned long *desc)
+@@ -202,6 +193,29 @@ static inline unsigned long get_desc_bas
+ return base;
+ }
+
++#else /* __ASSEMBLY__ */
++
++/*
++ * GET_DESC_BASE reads the descriptor base of the specified segment.
++ *
++ * Args:
++ * idx - descriptor index
++ * gdt - GDT pointer
++ * base - 32bit register to which the base will be written
++ * lo_w - lo word of the "base" register
++ * lo_b - lo byte of the "base" register
++ * hi_b - hi byte of the low word of the "base" register
++ *
++ * Example:
++ * GET_DESC_BASE(GDT_ENTRY_ESPFIX_SS, %ebx, %eax, %ax, %al, %ah)
++ * Will read the base address of GDT_ENTRY_ESPFIX_SS and put it into %eax.
++ */
++#define GET_DESC_BASE(idx, gdt, base, lo_w, lo_b, hi_b) \
++ movb idx*8+4(gdt), lo_b; \
++ movb idx*8+7(gdt), hi_b; \
++ shll $16, base; \
++ movw idx*8+2(gdt), lo_w;
++
+ #endif /* !__ASSEMBLY__ */
+
+ #endif
+Index: 10.3-2007-11-26/include/asm-i386/mach-xen/asm/dma-mapping.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-i386/mach-xen/asm/dma-mapping.h 2007-12-06 17:27:33.000000000 +0100
++++ 10.3-2007-11-26/include/asm-i386/mach-xen/asm/dma-mapping.h 2007-10-22 13:53:25.000000000 +0200
+@@ -133,10 +133,10 @@ dma_get_cache_alignment(void)
+ return (1 << INTERNODE_CACHE_SHIFT);
+ }
+
+-#define dma_is_consistent(d) (1)
++#define dma_is_consistent(d, h) (1)
+
+ static inline void
+-dma_cache_sync(void *vaddr, size_t size,
++dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+ enum dma_data_direction direction)
+ {
+ flush_write_buffers();
+Index: 10.3-2007-11-26/include/asm-i386/mach-xen/asm/fixmap.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-i386/mach-xen/asm/fixmap.h 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/include/asm-i386/mach-xen/asm/fixmap.h 2007-10-22 13:53:25.000000000 +0200
+@@ -13,13 +13,16 @@
+ #ifndef _ASM_FIXMAP_H
+ #define _ASM_FIXMAP_H
+
+-
+ /* used by vmalloc.c, vsyscall.lds.S.
+ *
+ * Leave one empty page between vmalloc'ed areas and
+ * the start of the fixmap.
+ */
+ extern unsigned long __FIXADDR_TOP;
++#ifdef CONFIG_COMPAT_VDSO
++#define FIXADDR_USER_START __fix_to_virt(FIX_VDSO)
++#define FIXADDR_USER_END __fix_to_virt(FIX_VDSO - 1)
++#endif
+
+ #ifndef __ASSEMBLY__
+ #include <linux/kernel.h>
+Index: 10.3-2007-11-26/include/asm-i386/mach-xen/asm/hypervisor.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-i386/mach-xen/asm/hypervisor.h 2007-12-06 17:27:33.000000000 +0100
++++ 10.3-2007-11-26/include/asm-i386/mach-xen/asm/hypervisor.h 2007-10-22 13:53:25.000000000 +0200
+@@ -45,13 +45,6 @@
+ #include <xen/interface/nmi.h>
+ #include <asm/ptrace.h>
+ #include <asm/page.h>
+-#if defined(__i386__)
+-# ifdef CONFIG_X86_PAE
+-# include <asm-generic/pgtable-nopud.h>
+-# else
+-# include <asm-generic/pgtable-nopmd.h>
+-# endif
+-#endif
+
+ extern shared_info_t *HYPERVISOR_shared_info;
+
+Index: 10.3-2007-11-26/include/asm-i386/mach-xen/asm/io.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-i386/mach-xen/asm/io.h 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/include/asm-i386/mach-xen/asm/io.h 2007-10-22 13:53:25.000000000 +0200
+@@ -270,11 +270,7 @@ static inline void flush_write_buffers(v
+
+ #endif /* __KERNEL__ */
+
+-#ifdef SLOW_IO_BY_JUMPING
+-#define __SLOW_DOWN_IO "jmp 1f; 1: jmp 1f; 1:"
+-#else
+ #define __SLOW_DOWN_IO "outb %%al,$0x80;"
+-#endif
+
+ static inline void slow_down_io(void) {
+ __asm__ __volatile__(
+Index: 10.3-2007-11-26/include/asm-i386/mach-xen/asm/irqflags.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-i386/mach-xen/asm/irqflags.h 2007-12-06 17:27:33.000000000 +0100
++++ 10.3-2007-11-26/include/asm-i386/mach-xen/asm/irqflags.h 2007-10-22 13:53:25.000000000 +0200
+@@ -22,9 +22,6 @@
+
+ #define __raw_local_save_flags() (current_vcpu_info()->evtchn_upcall_mask)
+
+-#define raw_local_save_flags(flags) \
+- do { (flags) = __raw_local_save_flags(); } while (0)
+-
+ #define raw_local_irq_restore(x) \
+ do { \
+ vcpu_info_t *_vcpu; \
+@@ -66,18 +63,6 @@ void raw_safe_halt(void);
+ */
+ void halt(void);
+
+-static inline int raw_irqs_disabled_flags(unsigned long flags)
+-{
+- return (flags != 0);
+-}
+-
+-#define raw_irqs_disabled() \
+-({ \
+- unsigned long flags = __raw_local_save_flags(); \
+- \
+- raw_irqs_disabled_flags(flags); \
+-})
+-
+ /*
+ * For spinlocks, etc:
+ */
+@@ -90,9 +75,62 @@ static inline int raw_irqs_disabled_flag
+ flags; \
+ })
+
++#else
++/* Offsets into shared_info_t. */
++#define evtchn_upcall_pending /* 0 */
++#define evtchn_upcall_mask 1
++
++#define sizeof_vcpu_shift 6
++
++#ifdef CONFIG_SMP
++#define GET_VCPU_INFO movl TI_cpu(%ebp),%esi ; \
++ shl $sizeof_vcpu_shift,%esi ; \
++ addl HYPERVISOR_shared_info,%esi
++#else
++#define GET_VCPU_INFO movl HYPERVISOR_shared_info,%esi
++#endif
++
++#define __DISABLE_INTERRUPTS movb $1,evtchn_upcall_mask(%esi)
++#define __ENABLE_INTERRUPTS movb $0,evtchn_upcall_mask(%esi)
++#define __TEST_PENDING testb $0xFF,evtchn_upcall_pending(%esi)
++#define DISABLE_INTERRUPTS(clb) GET_VCPU_INFO ; \
++ __DISABLE_INTERRUPTS
++#define ENABLE_INTERRUPTS(clb) GET_VCPU_INFO ; \
++ __ENABLE_INTERRUPTS
++#define ENABLE_INTERRUPTS_SYSEXIT __ENABLE_INTERRUPTS ; \
++sysexit_scrit: /**** START OF SYSEXIT CRITICAL REGION ****/ ; \
++ __TEST_PENDING ; \
++ jnz 14f /* process more events if necessary... */ ; \
++ movl PT_ESI(%esp), %esi ; \
++ sysexit ; \
++14: __DISABLE_INTERRUPTS ; \
++ TRACE_IRQS_OFF ; \
++sysexit_ecrit: /**** END OF SYSEXIT CRITICAL REGION ****/ ; \
++ push %esp ; \
++ call evtchn_do_upcall ; \
++ add $4,%esp ; \
++ jmp ret_from_intr
++#define INTERRUPT_RETURN iret
++#endif /* __ASSEMBLY__ */
++
++#ifndef __ASSEMBLY__
++#define raw_local_save_flags(flags) \
++ do { (flags) = __raw_local_save_flags(); } while (0)
++
+ #define raw_local_irq_save(flags) \
+ do { (flags) = __raw_local_irq_save(); } while (0)
+
++static inline int raw_irqs_disabled_flags(unsigned long flags)
++{
++ return (flags != 0);
++}
++
++#define raw_irqs_disabled() \
++({ \
++ unsigned long flags = __raw_local_save_flags(); \
++ \
++ raw_irqs_disabled_flags(flags); \
++})
+ #endif /* __ASSEMBLY__ */
+
+ /*
+Index: 10.3-2007-11-26/include/asm-i386/mach-xen/asm/mmu_context.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-i386/mach-xen/asm/mmu_context.h 2007-12-06 17:27:33.000000000 +0100
++++ 10.3-2007-11-26/include/asm-i386/mach-xen/asm/mmu_context.h 2007-10-22 13:53:25.000000000 +0200
+@@ -27,14 +27,13 @@ static inline void enter_lazy_tlb(struct
+ static inline void __prepare_arch_switch(void)
+ {
+ /*
+- * Save away %fs and %gs. No need to save %es and %ds, as those
+- * are always kernel segments while inside the kernel. Must
+- * happen before reload of cr3/ldt (i.e., not in __switch_to).
++ * Save away %fs. No need to save %gs, as it was saved on the
++ * stack on entry. No need to save %es and %ds, as those are
++ * always kernel segments while inside the kernel.
+ */
+- asm volatile ( "mov %%fs,%0 ; mov %%gs,%1"
+- : "=m" (current->thread.fs),
+- "=m" (current->thread.gs));
+- asm volatile ( "movl %0,%%fs ; movl %0,%%gs"
++ asm volatile ( "mov %%fs,%0"
++ : "=m" (current->thread.fs));
++ asm volatile ( "movl %0,%%fs"
+ : : "r" (0) );
+ }
+
+@@ -89,14 +88,14 @@ static inline void switch_mm(struct mm_s
+ * tlb flush IPI delivery. We must reload %cr3.
+ */
+ load_cr3(next->pgd);
+- load_LDT_nolock(&next->context, cpu);
++ load_LDT_nolock(&next->context);
+ }
+ }
+ #endif
+ }
+
+-#define deactivate_mm(tsk, mm) \
+- asm("movl %0,%%fs ; movl %0,%%gs": :"r" (0))
++#define deactivate_mm(tsk, mm) \
++ asm("movl %0,%%fs": :"r" (0));
+
+ static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
+ {
+Index: 10.3-2007-11-26/include/asm-i386/mach-xen/asm/page.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-i386/mach-xen/asm/page.h 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/include/asm-i386/mach-xen/asm/page.h 2007-10-22 13:53:25.000000000 +0200
+@@ -108,6 +108,7 @@ static inline unsigned long long pgd_val
+ return ret;
+ }
+ #define HPAGE_SHIFT 21
++#include <asm-generic/pgtable-nopud.h>
+ #else
+ typedef struct { unsigned long pte_low; } pte_t;
+ typedef struct { unsigned long pgd; } pgd_t;
+@@ -134,6 +135,7 @@ static inline unsigned long pgd_val(pgd_
+ return ret;
+ }
+ #define HPAGE_SHIFT 22
++#include <asm-generic/pgtable-nopmd.h>
+ #endif
+ #define PTE_MASK PHYSICAL_PAGE_MASK
+
+@@ -182,22 +184,17 @@ extern int page_is_ram(unsigned long pag
+
+ #ifdef __ASSEMBLY__
+ #define __PAGE_OFFSET CONFIG_PAGE_OFFSET
+-#define __PHYSICAL_START CONFIG_PHYSICAL_START
+ #else
+ #define __PAGE_OFFSET ((unsigned long)CONFIG_PAGE_OFFSET)
+-#define __PHYSICAL_START ((unsigned long)CONFIG_PHYSICAL_START)
+-#endif
+-#define __KERNEL_START (__PAGE_OFFSET + __PHYSICAL_START)
+-
+-#if CONFIG_XEN_COMPAT <= 0x030002
+-#undef LOAD_OFFSET
+-#define LOAD_OFFSET 0
+ #endif
+
+ #define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
+ #define VMALLOC_RESERVE ((unsigned long)__VMALLOC_RESERVE)
+ #define MAXMEM (-__PAGE_OFFSET-__VMALLOC_RESERVE)
+ #define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
++/* __pa_symbol should be used for C visible symbols.
++ This seems to be the official gcc blessed way to do such arithmetic. */
++#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x),0))
+ #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
+ #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
+ #ifdef CONFIG_FLATMEM
+@@ -215,7 +212,9 @@ extern int page_is_ram(unsigned long pag
+ #include <asm-generic/memory_model.h>
+ #include <asm-generic/page.h>
+
++#ifndef CONFIG_COMPAT_VDSO
+ #define __HAVE_ARCH_GATE_AREA 1
++#endif
+ #endif /* __KERNEL__ */
+
+ #endif /* _I386_PAGE_H */
+Index: 10.3-2007-11-26/include/asm-i386/mach-xen/asm/param.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-i386/mach-xen/asm/param.h 2007-12-06 17:27:33.000000000 +0100
++++ 10.3-2007-11-26/include/asm-i386/mach-xen/asm/param.h 2007-10-22 13:53:25.000000000 +0200
+@@ -18,6 +18,5 @@
+ #endif
+
+ #define MAXHOSTNAMELEN 64 /* max length of hostname */
+-#define COMMAND_LINE_SIZE 256
+
+ #endif
+Index: 10.3-2007-11-26/include/asm-i386/mach-xen/asm/pgtable-2level.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-i386/mach-xen/asm/pgtable-2level.h 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/include/asm-i386/mach-xen/asm/pgtable-2level.h 2007-10-22 13:54:57.000000000 +0200
+@@ -1,8 +1,6 @@
+ #ifndef _I386_PGTABLE_2LEVEL_H
+ #define _I386_PGTABLE_2LEVEL_H
+
+-#include <asm-generic/pgtable-nopmd.h>
+-
+ #define pte_ERROR(e) \
+ printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte_low)
+ #define pgd_ERROR(e) \
+@@ -21,26 +19,14 @@
+ set_pte((ptep), (pteval)); \
+ } while (0)
+
+-#define set_pte_atomic(pteptr, pteval) set_pte(pteptr,pteval)
+-
+ #define set_pmd(pmdptr, pmdval) xen_l2_entry_update((pmdptr), (pmdval))
+
++#define set_pte_atomic(pteptr, pteval) set_pte(pteptr,pteval)
++
+ #define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
+ #define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
+
+-#define pte_none(x) (!(x).pte_low)
+-
+-#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
+-static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+-{
+- pte_t pte = *ptep;
+- if (!pte_none(pte)) {
+- if ((mm != &init_mm) ||
+- HYPERVISOR_update_va_mapping(addr, __pte(0), 0))
+- pte = __pte_ma(xchg(&ptep->pte_low, 0));
+- }
+- return pte;
+-}
++#define raw_ptep_get_and_clear(xp, pte) __pte_ma(xchg(&(xp)->pte_low, 0))
+
+ #define __HAVE_ARCH_PTEP_CLEAR_FLUSH
+ #define ptep_clear_flush(vma, addr, ptep) \
+@@ -65,6 +51,7 @@ static inline pte_t ptep_get_and_clear(s
+ mfn_to_local_pfn(__pte_mfn(_pte)) : __pte_mfn(_pte))
+
+ #define pte_page(_pte) pfn_to_page(pte_pfn(_pte))
++#define pte_none(x) (!(x).pte_low)
+
+ #define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
+ #define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
+Index: 10.3-2007-11-26/include/asm-i386/mach-xen/asm/pgtable-3level.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-i386/mach-xen/asm/pgtable-3level.h 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/include/asm-i386/mach-xen/asm/pgtable-3level.h 2007-10-22 13:58:00.000000000 +0200
+@@ -1,8 +1,6 @@
+ #ifndef _I386_PGTABLE_3LEVEL_H
+ #define _I386_PGTABLE_3LEVEL_H
+
+-#include <asm-generic/pgtable-nopud.h>
+-
+ /*
+ * Intel Physical Address Extension (PAE) Mode - three-level page
+ * tables on PPro+ CPUs.
+@@ -72,6 +70,23 @@ static inline void set_pte(pte_t *ptep,
+ xen_l3_entry_update((pudptr), (pudval))
+
+ /*
++ * For PTEs and PDEs, we must clear the P-bit first when clearing a page table
++ * entry, so clear the bottom half first and enforce ordering with a compiler
++ * barrier.
++ */
++static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
++{
++ if ((mm != current->mm && mm != &init_mm)
++ || HYPERVISOR_update_va_mapping(addr, __pte(0), 0)) {
++ ptep->pte_low = 0;
++ smp_wmb();
++ ptep->pte_high = 0;
++ }
++}
++
++#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
++
++/*
+ * Pentium-II erratum A13: in PAE mode we explicitly have to flush
+ * the TLB via cr3 if the top-level pgd is changed...
+ * We do not let the generic code free and clear pgd entries due to
+@@ -90,45 +105,16 @@ static inline void pud_clear (pud_t * pu
+ #define pmd_offset(pud, address) ((pmd_t *) pud_page(*(pud)) + \
+ pmd_index(address))
+
+-static inline int pte_none(pte_t pte)
+-{
+- return !(pte.pte_low | pte.pte_high);
+-}
+-
+-/*
+- * For PTEs and PDEs, we must clear the P-bit first when clearing a page table
+- * entry, so clear the bottom half first and enforce ordering with a compiler
+- * barrier.
+- */
+-static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
++static inline pte_t raw_ptep_get_and_clear(pte_t *ptep, pte_t res)
+ {
+- if ((mm != current->mm && mm != &init_mm)
+- || HYPERVISOR_update_va_mapping(addr, __pte(0), 0)) {
+- ptep->pte_low = 0;
+- smp_wmb();
++ uint64_t val = pte_val_ma(res);
++ if (__cmpxchg64(ptep, val, 0) != val) {
++ /* xchg acts as a barrier before the setting of the high bits */
++ res.pte_low = xchg(&ptep->pte_low, 0);
++ res.pte_high = ptep->pte_high;
+ ptep->pte_high = 0;
+ }
+-}
+-
+-#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
+-
+-#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
+-static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+-{
+- pte_t pte = *ptep;
+- if (!pte_none(pte)) {
+- if ((mm != &init_mm) ||
+- HYPERVISOR_update_va_mapping(addr, __pte(0), 0)) {
+- uint64_t val = pte_val_ma(pte);
+- if (__cmpxchg64(ptep, val, 0) != val) {
+- /* xchg acts as a barrier before the setting of the high bits */
+- pte.pte_low = xchg(&ptep->pte_low, 0);
+- pte.pte_high = ptep->pte_high;
+- ptep->pte_high = 0;
+- }
+- }
+- }
+- return pte;
++ return res;
+ }
+
+ #define __HAVE_ARCH_PTEP_CLEAR_FLUSH
+@@ -157,6 +143,11 @@ static inline int pte_same(pte_t a, pte_
+
+ #define pte_page(x) pfn_to_page(pte_pfn(x))
+
++static inline int pte_none(pte_t pte)
++{
++ return !(pte.pte_low | pte.pte_high);
++}
++
+ #define __pte_mfn(_pte) (((_pte).pte_low >> PAGE_SHIFT) | \
+ ((_pte).pte_high << (32-PAGE_SHIFT)))
+ #define pte_mfn(_pte) ((_pte).pte_low & _PAGE_PRESENT ? \
+Index: 10.3-2007-11-26/include/asm-i386/mach-xen/asm/pgtable.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-i386/mach-xen/asm/pgtable.h 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/include/asm-i386/mach-xen/asm/pgtable.h 2007-10-22 14:08:14.000000000 +0200
+@@ -38,14 +38,14 @@ struct vm_area_struct;
+ #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
+ extern unsigned long empty_zero_page[1024];
+ extern pgd_t *swapper_pg_dir;
+-extern kmem_cache_t *pgd_cache;
+-extern kmem_cache_t *pmd_cache;
++extern struct kmem_cache *pgd_cache;
++extern struct kmem_cache *pmd_cache;
+ extern spinlock_t pgd_lock;
+ extern struct page *pgd_list;
+
+-void pmd_ctor(void *, kmem_cache_t *, unsigned long);
+-void pgd_ctor(void *, kmem_cache_t *, unsigned long);
+-void pgd_dtor(void *, kmem_cache_t *, unsigned long);
++void pmd_ctor(void *, struct kmem_cache *, unsigned long);
++void pgd_ctor(void *, struct kmem_cache *, unsigned long);
++void pgd_dtor(void *, struct kmem_cache *, unsigned long);
+ void pgtable_cache_init(void);
+ void paging_init(void);
+
+@@ -272,7 +272,6 @@ static inline pte_t pte_mkhuge(pte_t pte
+ #define pte_update(mm, addr, ptep) do { } while (0)
+ #define pte_update_defer(mm, addr, ptep) do { } while (0)
+
+-
+ /*
+ * We only update the dirty/accessed state if we set
+ * the dirty bit by hand in the kernel, since the hardware
+@@ -338,6 +337,19 @@ do { \
+ __young; \
+ })
+
++#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
++static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
++{
++ pte_t pte = *ptep;
++ if (!pte_none(pte)
++ && (mm != &init_mm
++ || HYPERVISOR_update_va_mapping(addr, __pte(0), 0))) {
++ pte = raw_ptep_get_and_clear(ptep, pte);
++ pte_update(mm, addr, ptep);
++ }
++ return pte;
++}
++
+ #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
+ #define ptep_get_and_clear_full(mm, addr, ptep, full) \
+ ((full) ? ({ \
+Index: 10.3-2007-11-26/include/asm-i386/mach-xen/asm/processor.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-i386/mach-xen/asm/processor.h 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/include/asm-i386/mach-xen/asm/processor.h 2007-10-22 13:53:25.000000000 +0200
+@@ -20,6 +20,7 @@
+ #include <linux/threads.h>
+ #include <asm/percpu.h>
+ #include <linux/cpumask.h>
++#include <linux/init.h>
+ #include <xen/interface/physdev.h>
+
+ /* flag for disabling the tsc */
+@@ -73,6 +74,7 @@ struct cpuinfo_x86 {
+ #endif
+ unsigned char x86_max_cores; /* cpuid returned max cores value */
+ unsigned char apicid;
++ unsigned short x86_clflush_size;
+ #ifdef CONFIG_SMP
+ unsigned char booted_cores; /* number of cores as seen by OS */
+ __u8 phys_proc_id; /* Physical processor id. */
+@@ -114,6 +116,8 @@ extern struct cpuinfo_x86 cpu_data[];
+ extern int cpu_llc_id[NR_CPUS];
+ extern char ignore_fpu_irq;
+
++void __init cpu_detect(struct cpuinfo_x86 *c);
++
+ extern void identify_cpu(struct cpuinfo_x86 *);
+ extern void print_cpu_info(struct cpuinfo_x86 *);
+ extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
+@@ -146,8 +150,8 @@ static inline void detect_ht(struct cpui
+ #define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */
+ #define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */
+
+-static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
+- unsigned int *ecx, unsigned int *edx)
++static inline fastcall void xen_cpuid(unsigned int *eax, unsigned int *ebx,
++ unsigned int *ecx, unsigned int *edx)
+ {
+ /* ecx is often an input as well as an output. */
+ __asm__(XEN_CPUID
+@@ -158,59 +162,6 @@ static inline void __cpuid(unsigned int
+ : "0" (*eax), "2" (*ecx));
+ }
+
+-/*
+- * Generic CPUID function
+- * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
+- * resulting in stale register contents being returned.
+- */
+-static inline void cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx)
+-{
+- *eax = op;
+- *ecx = 0;
+- __cpuid(eax, ebx, ecx, edx);
+-}
+-
+-/* Some CPUID calls want 'count' to be placed in ecx */
+-static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx,
+- int *edx)
+-{
+- *eax = op;
+- *ecx = count;
+- __cpuid(eax, ebx, ecx, edx);
+-}
+-
+-/*
+- * CPUID functions returning a single datum
+- */
+-static inline unsigned int cpuid_eax(unsigned int op)
+-{
+- unsigned int eax, ebx, ecx, edx;
+-
+- cpuid(op, &eax, &ebx, &ecx, &edx);
+- return eax;
+-}
+-static inline unsigned int cpuid_ebx(unsigned int op)
+-{
+- unsigned int eax, ebx, ecx, edx;
+-
+- cpuid(op, &eax, &ebx, &ecx, &edx);
+- return ebx;
+-}
+-static inline unsigned int cpuid_ecx(unsigned int op)
+-{
+- unsigned int eax, ebx, ecx, edx;
+-
+- cpuid(op, &eax, &ebx, &ecx, &edx);
+- return ecx;
+-}
+-static inline unsigned int cpuid_edx(unsigned int op)
+-{
+- unsigned int eax, ebx, ecx, edx;
+-
+- cpuid(op, &eax, &ebx, &ecx, &edx);
+- return edx;
+-}
+-
+ #define load_cr3(pgdir) write_cr3(__pa(pgdir))
+
+ /*
+@@ -480,9 +431,9 @@ struct thread_struct {
+ .vm86_info = NULL, \
+ .sysenter_cs = __KERNEL_CS, \
+ .io_bitmap_ptr = NULL, \
++ .gs = __KERNEL_PDA, \
+ }
+
+-#ifndef CONFIG_X86_NO_TSS
+ /*
+ * Note that the .io_bitmap member must be extra-big. This is because
+ * the CPU will access an additional byte beyond the end of the IO
+@@ -497,24 +448,9 @@ struct thread_struct {
+ .io_bitmap = { [ 0 ... IO_BITMAP_LONGS] = ~0 }, \
+ }
+
+-static inline void __load_esp0(struct tss_struct *tss, struct thread_struct *thread)
+-{
+- tss->esp0 = thread->esp0;
+- /* This can only happen when SEP is enabled, no need to test "SEP"arately */
+- if (unlikely(tss->ss1 != thread->sysenter_cs)) {
+- tss->ss1 = thread->sysenter_cs;
+- wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
+- }
+-}
+-#define load_esp0(tss, thread) \
+- __load_esp0(tss, thread)
+-#else
+-#define load_esp0(tss, thread) \
+- HYPERVISOR_stack_switch(__KERNEL_DS, (thread)->esp0)
+-#endif
+-
+ #define start_thread(regs, new_eip, new_esp) do { \
+- __asm__("movl %0,%%fs ; movl %0,%%gs": :"r" (0)); \
++ __asm__("movl %0,%%fs": :"r" (0)); \
++ regs->xgs = 0; \
+ set_fs(USER_DS); \
+ regs->xds = __USER_DS; \
+ regs->xes = __USER_DS; \
+@@ -524,26 +460,6 @@ static inline void __load_esp0(struct ts
+ regs->esp = new_esp; \
+ } while (0)
+
+-/*
+- * These special macros can be used to get or set a debugging register
+- */
+-#define get_debugreg(var, register) \
+- (var) = HYPERVISOR_get_debugreg((register))
+-#define set_debugreg(value, register) \
+- HYPERVISOR_set_debugreg((register), (value))
+-
+-/*
+- * Set IOPL bits in EFLAGS from given mask
+- */
+-static inline void set_iopl_mask(unsigned mask)
+-{
+- struct physdev_set_iopl set_iopl;
+-
+- /* Force the change at ring 0. */
+- set_iopl.iopl = (mask == 0) ? 1 : (mask >> 12) & 3;
+- HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
+-}
+-
+ /* Forward declaration, a strange C thing */
+ struct task_struct;
+ struct mm_struct;
+@@ -635,6 +551,103 @@ static inline void rep_nop(void)
+
+ #define cpu_relax() rep_nop()
+
++#define paravirt_enabled() 0
++#define __cpuid xen_cpuid
++
++#ifndef CONFIG_X86_NO_TSS
++static inline void __load_esp0(struct tss_struct *tss, struct thread_struct *thread)
++{
++ tss->esp0 = thread->esp0;
++ /* This can only happen when SEP is enabled, no need to test "SEP"arately */
++ if (unlikely(tss->ss1 != thread->sysenter_cs)) {
++ tss->ss1 = thread->sysenter_cs;
++ wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
++ }
++}
++#define load_esp0(tss, thread) \
++ __load_esp0(tss, thread)
++#else
++#define load_esp0(tss, thread) \
++ HYPERVISOR_stack_switch(__KERNEL_DS, (thread)->esp0)
++#endif
++
++
++/*
++ * These special macros can be used to get or set a debugging register
++ */
++#define get_debugreg(var, register) \
++ (var) = HYPERVISOR_get_debugreg((register))
++#define set_debugreg(value, register) \
++ HYPERVISOR_set_debugreg((register), (value))
++
++#define set_iopl_mask xen_set_iopl_mask
++
++/*
++ * Set IOPL bits in EFLAGS from given mask
++ */
++static inline void xen_set_iopl_mask(unsigned mask)
++{
++ struct physdev_set_iopl set_iopl;
++
++ /* Force the change at ring 0. */
++ set_iopl.iopl = (mask == 0) ? 1 : (mask >> 12) & 3;
++ HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
++}
++
++
++/*
++ * Generic CPUID function
++ * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
++ * resulting in stale register contents being returned.
++ */
++static inline void cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx)
++{
++ *eax = op;
++ *ecx = 0;
++ __cpuid(eax, ebx, ecx, edx);
++}
++
++/* Some CPUID calls want 'count' to be placed in ecx */
++static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx,
++ int *edx)
++{
++ *eax = op;
++ *ecx = count;
++ __cpuid(eax, ebx, ecx, edx);
++}
++
++/*
++ * CPUID functions returning a single datum
++ */
++static inline unsigned int cpuid_eax(unsigned int op)
++{
++ unsigned int eax, ebx, ecx, edx;
++
++ cpuid(op, &eax, &ebx, &ecx, &edx);
++ return eax;
++}
++static inline unsigned int cpuid_ebx(unsigned int op)
++{
++ unsigned int eax, ebx, ecx, edx;
++
++ cpuid(op, &eax, &ebx, &ecx, &edx);
++ return ebx;
++}
++static inline unsigned int cpuid_ecx(unsigned int op)
++{
++ unsigned int eax, ebx, ecx, edx;
++
++ cpuid(op, &eax, &ebx, &ecx, &edx);
++ return ecx;
++}
++static inline unsigned int cpuid_edx(unsigned int op)
++{
++ unsigned int eax, ebx, ecx, edx;
++
++ cpuid(op, &eax, &ebx, &ecx, &edx);
++ return edx;
++}
++
+ /* generic versions from gas */
+ #define GENERIC_NOP1 ".byte 0x90\n"
+ #define GENERIC_NOP2 ".byte 0x89,0xf6\n"
+@@ -734,4 +747,8 @@ extern unsigned long boot_option_idle_ov
+ extern void enable_sep_cpu(void);
+ extern int sysenter_setup(void);
+
++extern int init_gdt(int cpu, struct task_struct *idle);
++extern void cpu_set_gdt(int);
++extern void secondary_cpu_init(void);
++
+ #endif /* __ASM_I386_PROCESSOR_H */
+Index: 10.3-2007-11-26/include/asm-i386/mach-xen/asm/ptrace.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-i386/mach-xen/asm/ptrace.h 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/include/asm-i386/mach-xen/asm/ptrace.h 2007-10-22 13:53:25.000000000 +0200
+@@ -16,6 +16,8 @@ struct pt_regs {
+ long eax;
+ int xds;
+ int xes;
++ /* int xfs; */
++ int xgs;
+ long orig_eax;
+ long eip;
+ int xcs;
+Index: 10.3-2007-11-26/include/asm-i386/mach-xen/asm/segment.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-i386/mach-xen/asm/segment.h 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/include/asm-i386/mach-xen/asm/segment.h 2007-10-22 13:53:25.000000000 +0200
+@@ -39,7 +39,7 @@
+ * 25 - APM BIOS support
+ *
+ * 26 - ESPFIX small SS
+- * 27 - unused
++ * 27 - PDA [ per-cpu private data area ]
+ * 28 - unused
+ * 29 - unused
+ * 30 - unused
+@@ -74,6 +74,9 @@
+ #define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14)
+ #define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)
+
++#define GDT_ENTRY_PDA (GDT_ENTRY_KERNEL_BASE + 15)
++#define __KERNEL_PDA (GDT_ENTRY_PDA * 8)
++
+ #define GDT_ENTRY_DOUBLEFAULT_TSS 31
+
+ /*
+Index: 10.3-2007-11-26/include/asm-i386/mach-xen/asm/setup.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-i386/mach-xen/asm/setup.h 2007-12-06 17:27:33.000000000 +0100
++++ 10.3-2007-11-26/include/asm-i386/mach-xen/asm/setup.h 2007-10-22 13:53:25.000000000 +0200
+@@ -6,6 +6,8 @@
+ #ifndef _i386_SETUP_H
+ #define _i386_SETUP_H
+
++#define COMMAND_LINE_SIZE 256
++
+ #ifdef __KERNEL__
+ #include <linux/pfn.h>
+
+@@ -14,10 +16,8 @@
+ */
+ #define MAXMEM_PFN PFN_DOWN(MAXMEM)
+ #define MAX_NONPAE_PFN (1 << 20)
+-#endif
+
+ #define PARAM_SIZE 4096
+-#define COMMAND_LINE_SIZE 256
+
+ #define OLD_CL_MAGIC_ADDR 0x90020
+ #define OLD_CL_MAGIC 0xA33F
+@@ -70,6 +70,7 @@ extern unsigned char boot_params[PARAM_S
+ struct e820entry;
+
+ char * __init machine_specific_memory_setup(void);
++char *memory_setup(void);
+
+ int __init copy_e820_map(struct e820entry * biosmap, int nr_map);
+ int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map);
+@@ -78,4 +79,6 @@ void __init add_memory_region(unsigned l
+
+ #endif /* __ASSEMBLY__ */
+
++#endif /* __KERNEL__ */
++
+ #endif /* _i386_SETUP_H */
+Index: 10.3-2007-11-26/include/asm-i386/mach-xen/asm/smp.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-i386/mach-xen/asm/smp.h 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/include/asm-i386/mach-xen/asm/smp.h 2007-10-22 13:53:25.000000000 +0200
+@@ -8,6 +8,7 @@
+ #include <linux/kernel.h>
+ #include <linux/threads.h>
+ #include <linux/cpumask.h>
++#include <asm/pda.h>
+ #endif
+
+ #ifdef CONFIG_X86_LOCAL_APIC
+@@ -56,7 +57,7 @@ extern void cpu_uninit(void);
+ * from the initial startup. We map APIC_BASE very early in page_setup(),
+ * so this is correct in the x86 case.
+ */
+-#define raw_smp_processor_id() (current_thread_info()->cpu)
++#define raw_smp_processor_id() (read_pda(cpu_number))
+
+ extern cpumask_t cpu_possible_map;
+ #define cpu_callin_map cpu_possible_map
+Index: 10.3-2007-11-26/include/asm-i386/mach-xen/asm/spinlock.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-i386/mach-xen/asm/spinlock.h 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/include/asm-i386/mach-xen/asm/spinlock.h 2007-10-22 13:53:25.000000000 +0200
+@@ -9,6 +9,8 @@
+
+ #define CLI_STRING "#cli"
+ #define STI_STRING "#sti"
++#define CLI_STI_CLOBBERS
++#define CLI_STI_INPUT_ARGS
+
+ /*
+ * Your basic SMP spinlocks, allowing only a single CPU anywhere
+@@ -53,25 +55,28 @@ static inline void __raw_spin_lock_flags
+ {
+ asm volatile(
+ "\n1:\t"
+- LOCK_PREFIX "decb %0\n\t"
++ LOCK_PREFIX "decb %[slock]\n\t"
+ "jns 5f\n"
+ "2:\t"
+- "testl $0x200, %1\n\t"
++ "testl $0x200, %[flags]\n\t"
+ "jz 4f\n\t"
+ STI_STRING "\n"
+ "3:\t"
+ "rep;nop\n\t"
+- "cmpb $0, %0\n\t"
++ "cmpb $0, %[slock]\n\t"
+ "jle 3b\n\t"
+ CLI_STRING "\n\t"
+ "jmp 1b\n"
+ "4:\t"
+ "rep;nop\n\t"
+- "cmpb $0, %0\n\t"
++ "cmpb $0, %[slock]\n\t"
+ "jg 1b\n\t"
+ "jmp 4b\n"
+ "5:\n\t"
+- : "+m" (lock->slock) : "r" (flags) : "memory");
++ : [slock] "+m" (lock->slock)
++ : [flags] "r" (flags)
++ CLI_STI_INPUT_ARGS
++ : "memory" CLI_STI_CLOBBERS);
+ }
+ #endif
+
+Index: 10.3-2007-11-26/include/asm-i386/mach-xen/asm/system.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-i386/mach-xen/asm/system.h 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/include/asm-i386/mach-xen/asm/system.h 2007-10-22 13:53:25.000000000 +0200
+@@ -139,17 +139,17 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t"
+ #define write_cr4(x) \
+ __asm__ __volatile__("movl %0,%%cr4": :"r" (x))
+
+-/*
+- * Clear and set 'TS' bit respectively
+- */
++#define wbinvd() \
++ __asm__ __volatile__ ("wbinvd": : :"memory")
++
++/* Clear the 'TS' bit */
+ #define clts() (HYPERVISOR_fpu_taskswitch(0))
++
++/* Set the 'TS' bit */
+ #define stts() (HYPERVISOR_fpu_taskswitch(1))
+
+ #endif /* __KERNEL__ */
+
+-#define wbinvd() \
+- __asm__ __volatile__ ("wbinvd": : :"memory")
+-
+ static inline unsigned long get_limit(unsigned long segment)
+ {
+ unsigned long __limit;
+Index: 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/desc.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-x86_64/mach-xen/asm/desc.h 2007-12-06 17:27:33.000000000 +0100
++++ 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/desc.h 2007-10-22 13:53:25.000000000 +0200
+@@ -9,62 +9,11 @@
+
+ #include <linux/string.h>
+ #include <linux/smp.h>
++#include <asm/desc_defs.h>
+
+ #include <asm/segment.h>
+ #include <asm/mmu.h>
+
+-// 8 byte segment descriptor
+-struct desc_struct {
+- u16 limit0;
+- u16 base0;
+- unsigned base1 : 8, type : 4, s : 1, dpl : 2, p : 1;
+- unsigned limit : 4, avl : 1, l : 1, d : 1, g : 1, base2 : 8;
+-} __attribute__((packed));
+-
+-struct n_desc_struct {
+- unsigned int a,b;
+-};
+-
+-enum {
+- GATE_INTERRUPT = 0xE,
+- GATE_TRAP = 0xF,
+- GATE_CALL = 0xC,
+-};
+-
+-// 16byte gate
+-struct gate_struct {
+- u16 offset_low;
+- u16 segment;
+- unsigned ist : 3, zero0 : 5, type : 5, dpl : 2, p : 1;
+- u16 offset_middle;
+- u32 offset_high;
+- u32 zero1;
+-} __attribute__((packed));
+-
+-#define PTR_LOW(x) ((unsigned long)(x) & 0xFFFF)
+-#define PTR_MIDDLE(x) (((unsigned long)(x) >> 16) & 0xFFFF)
+-#define PTR_HIGH(x) ((unsigned long)(x) >> 32)
+-
+-enum {
+- DESC_TSS = 0x9,
+- DESC_LDT = 0x2,
+-};
+-
+-// LDT or TSS descriptor in the GDT. 16 bytes.
+-struct ldttss_desc {
+- u16 limit0;
+- u16 base0;
+- unsigned base1 : 8, type : 5, dpl : 2, p : 1;
+- unsigned limit1 : 4, zero0 : 3, g : 1, base2 : 8;
+- u32 base3;
+- u32 zero1;
+-} __attribute__((packed));
+-
+-struct desc_ptr {
+- unsigned short size;
+- unsigned long address;
+-} __attribute__((packed)) ;
+-
+ extern struct desc_ptr idt_descr, cpu_gdt_descr[NR_CPUS];
+
+ extern struct desc_struct cpu_gdt_table[GDT_ENTRIES];
+Index: 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/dma-mapping.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-x86_64/mach-xen/asm/dma-mapping.h 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/dma-mapping.h 2007-10-22 13:53:25.000000000 +0200
+@@ -64,6 +64,9 @@ static inline int dma_mapping_error(dma_
+ return (dma_addr == bad_dma_address);
+ }
+
++#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
++#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
++
+ extern void *dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp);
+ extern void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
+@@ -181,12 +184,13 @@ static inline int dma_get_cache_alignmen
+ return boot_cpu_data.x86_clflush_size;
+ }
+
+-#define dma_is_consistent(h) 1
++#define dma_is_consistent(d, h) 1
+
+ extern int dma_set_mask(struct device *dev, u64 mask);
+
+ static inline void
+-dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction dir)
++dma_cache_sync(struct device *dev, void *vaddr, size_t size,
++ enum dma_data_direction dir)
+ {
+ flush_write_buffers();
+ }
+Index: 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/msr.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-x86_64/mach-xen/asm/msr.h 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/msr.h 2007-10-22 13:53:25.000000000 +0200
+@@ -169,8 +169,8 @@ static inline unsigned int cpuid_edx(uns
+ #define MSR_LSTAR 0xc0000082 /* long mode SYSCALL target */
+ #define MSR_CSTAR 0xc0000083 /* compatibility mode SYSCALL target */
+ #define MSR_SYSCALL_MASK 0xc0000084 /* EFLAGS mask for syscall */
+-#define MSR_FS_BASE 0xc0000100 /* 64bit GS base */
+-#define MSR_GS_BASE 0xc0000101 /* 64bit FS base */
++#define MSR_FS_BASE 0xc0000100 /* 64bit FS base */
++#define MSR_GS_BASE 0xc0000101 /* 64bit GS base */
+ #define MSR_KERNEL_GS_BASE 0xc0000102 /* SwapGS GS shadow (or USER_GS from kernel) */
+ /* EFER bits: */
+ #define _EFER_SCE 0 /* SYSCALL/SYSRET */
+@@ -189,6 +189,7 @@ static inline unsigned int cpuid_edx(uns
+
+ #define MSR_IA32_PERFCTR0 0xc1
+ #define MSR_IA32_PERFCTR1 0xc2
++#define MSR_FSB_FREQ 0xcd
+
+ #define MSR_MTRRcap 0x0fe
+ #define MSR_IA32_BBL_CR_CTL 0x119
+@@ -210,6 +211,10 @@ static inline unsigned int cpuid_edx(uns
+ #define MSR_IA32_LASTINTFROMIP 0x1dd
+ #define MSR_IA32_LASTINTTOIP 0x1de
+
++#define MSR_IA32_PEBS_ENABLE 0x3f1
++#define MSR_IA32_DS_AREA 0x600
++#define MSR_IA32_PERF_CAPABILITIES 0x345
++
+ #define MSR_MTRRfix64K_00000 0x250
+ #define MSR_MTRRfix16K_80000 0x258
+ #define MSR_MTRRfix16K_A0000 0x259
+@@ -307,6 +312,9 @@ static inline unsigned int cpuid_edx(uns
+ #define MSR_IA32_PERF_STATUS 0x198
+ #define MSR_IA32_PERF_CTL 0x199
+
++#define MSR_IA32_MPERF 0xE7
++#define MSR_IA32_APERF 0xE8
++
+ #define MSR_IA32_THERM_CONTROL 0x19a
+ #define MSR_IA32_THERM_INTERRUPT 0x19b
+ #define MSR_IA32_THERM_STATUS 0x19c
+@@ -407,4 +415,13 @@ static inline unsigned int cpuid_edx(uns
+ #define MSR_P4_U2L_ESCR0 0x3b0
+ #define MSR_P4_U2L_ESCR1 0x3b1
+
++/* Intel Core-based CPU performance counters */
++#define MSR_CORE_PERF_FIXED_CTR0 0x309
++#define MSR_CORE_PERF_FIXED_CTR1 0x30a
++#define MSR_CORE_PERF_FIXED_CTR2 0x30b
++#define MSR_CORE_PERF_FIXED_CTR_CTRL 0x38d
++#define MSR_CORE_PERF_GLOBAL_STATUS 0x38e
++#define MSR_CORE_PERF_GLOBAL_CTRL 0x38f
++#define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x390
++
+ #endif
+Index: 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/nmi.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-x86_64/mach-xen/asm/nmi.h 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/nmi.h 2007-10-22 13:53:25.000000000 +0200
+@@ -93,4 +93,7 @@ extern int proc_nmi_enabled(struct ctl_t
+
+ extern int unknown_nmi_panic;
+
++void __trigger_all_cpu_backtrace(void);
++#define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace()
++
+ #endif /* ASM_NMI_H */
+Index: 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/pgtable.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-x86_64/mach-xen/asm/pgtable.h 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/pgtable.h 2007-10-22 13:53:25.000000000 +0200
+@@ -230,19 +230,18 @@ extern unsigned int __kernel_page_user;
+
+ static inline unsigned long pgd_bad(pgd_t pgd)
+ {
+- unsigned long val = pgd_val(pgd);
+- val &= ~PTE_MASK;
+- val &= ~(_PAGE_USER | _PAGE_DIRTY);
+- return val & ~(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED);
++ return pgd_val(pgd) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER);
+ }
+
+-static inline unsigned long pud_bad(pud_t pud)
+-{
+- unsigned long val = pud_val(pud);
+- val &= ~PTE_MASK;
+- val &= ~(_PAGE_USER | _PAGE_DIRTY);
+- return val & ~(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED);
+-}
++static inline unsigned long pud_bad(pud_t pud)
++{
++ return pud_val(pud) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER);
++}
++
++static inline unsigned long pmd_bad(pmd_t pmd)
++{
++ return pmd_val(pmd) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER);
++}
+
+ #define set_pte_at(_mm,addr,ptep,pteval) do { \
+ if (((_mm) != current->mm && (_mm) != &init_mm) || \
+@@ -398,8 +397,6 @@ static inline int pmd_large(pmd_t pte) {
+ #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
+ #endif
+ #define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
+-#define pmd_bad(x) ((pmd_val(x) & ~(PTE_MASK | _PAGE_USER | _PAGE_PRESENT)) \
+- != (_KERNPG_TABLE & ~(_PAGE_USER | _PAGE_PRESENT)))
+ #define pfn_pmd(nr,prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val(prot)))
+ #define pmd_pfn(x) ((pmd_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT)
+
+Index: 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/processor.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-x86_64/mach-xen/asm/processor.h 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/processor.h 2007-10-22 13:53:25.000000000 +0200
+@@ -488,6 +488,14 @@ static inline void __mwait(unsigned long
+ : :"a" (eax), "c" (ecx));
+ }
+
++static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
++{
++ /* "mwait %eax,%ecx;" */
++ asm volatile(
++ "sti; .byte 0x0f,0x01,0xc9;"
++ : :"a" (eax), "c" (ecx));
++}
++
+ extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx);
+
+ #define stack_current() \
+Index: 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/smp.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-x86_64/mach-xen/asm/smp.h 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/smp.h 2007-10-22 13:53:25.000000000 +0200
+@@ -88,11 +88,6 @@ extern u8 x86_cpu_to_log_apicid[NR_CPUS]
+ extern u8 bios_cpu_apicid[];
+
+ #ifdef CONFIG_X86_LOCAL_APIC
+-static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
+-{
+- return cpus_addr(cpumask)[0];
+-}
+-
+ static inline int cpu_present_to_apicid(int mps_cpu)
+ {
+ if (mps_cpu < NR_CPUS)
+@@ -127,13 +122,6 @@ static __inline int logical_smp_processo
+ #define cpu_physical_id(cpu) x86_cpu_to_apicid[cpu]
+ #else
+ #define cpu_physical_id(cpu) boot_cpu_id
+-static inline int smp_call_function_single(int cpuid, void (*func) (void *info),
+- void *info, int retry, int wait)
+-{
+- /* Disable interrupts here? */
+- func(info);
+- return 0;
+-}
+ #endif /* !CONFIG_SMP */
+ #endif
+
+Index: 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/time.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/time.h 2007-10-22 13:53:25.000000000 +0200
+@@ -0,0 +1 @@
++#include <asm-i386/time.h>
+Index: 10.3-2007-11-26/kernel/kexec.c
+===================================================================
+--- 10.3-2007-11-26.orig/kernel/kexec.c 2007-12-06 17:27:33.000000000 +0100
++++ 10.3-2007-11-26/kernel/kexec.c 2007-10-22 13:53:25.000000000 +0200
+@@ -344,7 +344,7 @@ static struct page *kimage_alloc_pages(g
+ if (limit == ~0UL)
+ address_bits = BITS_PER_LONG;
+ else
+- address_bits = long_log2(limit);
++ address_bits = ilog2(limit);
+
+ if (xen_create_contiguous_region((unsigned long)page_address(pages),
+ order, address_bits) < 0) {
+Index: 10.3-2007-11-26/net/core/dev.c
+===================================================================
+--- 10.3-2007-11-26.orig/net/core/dev.c 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/net/core/dev.c 2007-10-22 13:53:25.000000000 +0200
+@@ -1473,10 +1473,10 @@ inline int skb_checksum_setup(struct sk_
+ goto out;
+ switch (skb->nh.iph->protocol) {
+ case IPPROTO_TCP:
+- skb->csum = offsetof(struct tcphdr, check);
++ skb->csum_offset = offsetof(struct tcphdr, check);
+ break;
+ case IPPROTO_UDP:
+- skb->csum = offsetof(struct udphdr, check);
++ skb->csum_offset = offsetof(struct udphdr, check);
+ break;
+ default:
+ if (net_ratelimit())
+@@ -1485,7 +1485,7 @@ inline int skb_checksum_setup(struct sk_
+ " %d packet", skb->nh.iph->protocol);
+ goto out;
+ }
+- if ((skb->h.raw + skb->csum + 2) > skb->tail)
++ if ((skb->h.raw + skb->csum_offset + 2) > skb->tail)
+ goto out;
+ skb->ip_summed = CHECKSUM_PARTIAL;
+ skb->proto_csum_blank = 0;
diff --git a/trunk/2.6.22/20046_xen3-patch-2.6.21.patch1 b/trunk/2.6.22/20046_xen3-patch-2.6.21.patch1
new file mode 100644
index 0000000..7211fb9
--- /dev/null
+++ b/trunk/2.6.22/20046_xen3-patch-2.6.21.patch1
@@ -0,0 +1,5107 @@
+From: www.kernel.org
+Subject: Linux 2.6.21
+Patch-mainline: 2.6.21
+
+Automatically created from "patches.kernel.org/patch-2.6.21" by xen-port-patches.py
+
+Acked-by: jbeulich@novell.com
+
+Index: 10.3-2007-11-26/arch/i386/Kconfig
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/Kconfig 2007-10-22 13:53:25.000000000 +0200
++++ 10.3-2007-11-26/arch/i386/Kconfig 2007-10-22 13:58:46.000000000 +0200
+@@ -21,15 +21,17 @@ config GENERIC_TIME
+ config CLOCKSOURCE_WATCHDOG
+ bool
+ default y
++ depends on !X86_XEN
+
+ config GENERIC_CLOCKEVENTS
+ bool
+ default y
++ depends on !X86_XEN
+
+ config GENERIC_CLOCKEVENTS_BROADCAST
+ bool
+ default y
+- depends on X86_LOCAL_APIC
++ depends on X86_LOCAL_APIC && !X86_XEN
+
+ config LOCKDEP_SUPPORT
+ bool
+Index: 10.3-2007-11-26/arch/i386/kernel/Makefile
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/Makefile 2007-09-03 09:52:56.000000000 +0200
++++ 10.3-2007-11-26/arch/i386/kernel/Makefile 2007-10-22 13:58:46.000000000 +0200
+@@ -98,7 +98,7 @@ include $(srctree)/scripts/Makefile.xen
+
+ obj-y += fixup.o
+ microcode-$(subst m,y,$(CONFIG_MICROCODE)) := microcode-xen.o
+-n-obj-xen := i8253.o i8259.o reboot.o smpboot.o trampoline.o tsc.o
++n-obj-xen := i8253.o i8259.o reboot.o smpboot.o trampoline.o tsc.o tsc_sync.o
+
+ obj-y := $(call filterxen, $(obj-y), $(n-obj-xen))
+ obj-y := $(call cherrypickxen, $(obj-y))
+Index: 10.3-2007-11-26/arch/i386/kernel/acpi/boot-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/acpi/boot-xen.c 2007-10-22 13:53:25.000000000 +0200
++++ 10.3-2007-11-26/arch/i386/kernel/acpi/boot-xen.c 2007-10-22 13:58:46.000000000 +0200
+@@ -25,6 +25,7 @@
+
+ #include <linux/init.h>
+ #include <linux/acpi.h>
++#include <linux/acpi_pmtmr.h>
+ #include <linux/efi.h>
+ #include <linux/cpumask.h>
+ #include <linux/module.h>
+@@ -66,7 +67,7 @@ static inline int acpi_madt_oem_check(ch
+
+ #define BAD_MADT_ENTRY(entry, end) ( \
+ (!entry) || (unsigned long)entry + sizeof(*entry) > end || \
+- ((acpi_table_entry_header *)entry)->length < sizeof(*entry))
++ ((struct acpi_subtable_header *)entry)->length < sizeof(*entry))
+
+ #define PREFIX "ACPI: "
+
+@@ -79,7 +80,7 @@ int acpi_ioapic;
+ int acpi_strict;
+ EXPORT_SYMBOL(acpi_strict);
+
+-acpi_interrupt_flags acpi_sci_flags __initdata;
++u8 acpi_sci_flags __initdata;
+ int acpi_sci_override_gsi __initdata;
+ int acpi_skip_timer_override __initdata;
+ int acpi_use_timer_override __initdata;
+@@ -92,11 +93,6 @@ static u64 acpi_lapic_addr __initdata =
+ #warning ACPI uses CMPXCHG, i486 and later hardware
+ #endif
+
+-#define MAX_MADT_ENTRIES 256
+-u8 x86_acpiid_to_apicid[MAX_MADT_ENTRIES] =
+- {[0 ... MAX_MADT_ENTRIES - 1] = 0xff };
+-EXPORT_SYMBOL(x86_acpiid_to_apicid);
+-
+ /* --------------------------------------------------------------------------
+ Boot-time Configuration
+ -------------------------------------------------------------------------- */
+@@ -168,30 +164,26 @@ char *__acpi_map_table(unsigned long phy
+
+ #ifdef CONFIG_PCI_MMCONFIG
+ /* The physical address of the MMCONFIG aperture. Set from ACPI tables. */
+-struct acpi_table_mcfg_config *pci_mmcfg_config;
++struct acpi_mcfg_allocation *pci_mmcfg_config;
+ int pci_mmcfg_config_num;
+
+-int __init acpi_parse_mcfg(unsigned long phys_addr, unsigned long size)
++int __init acpi_parse_mcfg(struct acpi_table_header *header)
+ {
+ struct acpi_table_mcfg *mcfg;
+ unsigned long i;
+ int config_size;
+
+- if (!phys_addr || !size)
++ if (!header)
+ return -EINVAL;
+
+- mcfg = (struct acpi_table_mcfg *)__acpi_map_table(phys_addr, size);
+- if (!mcfg) {
+- printk(KERN_WARNING PREFIX "Unable to map MCFG\n");
+- return -ENODEV;
+- }
++ mcfg = (struct acpi_table_mcfg *)header;
+
+ /* how many config structures do we have */
+ pci_mmcfg_config_num = 0;
+- i = size - sizeof(struct acpi_table_mcfg);
+- while (i >= sizeof(struct acpi_table_mcfg_config)) {
++ i = header->length - sizeof(struct acpi_table_mcfg);
++ while (i >= sizeof(struct acpi_mcfg_allocation)) {
+ ++pci_mmcfg_config_num;
+- i -= sizeof(struct acpi_table_mcfg_config);
++ i -= sizeof(struct acpi_mcfg_allocation);
+ };
+ if (pci_mmcfg_config_num == 0) {
+ printk(KERN_ERR PREFIX "MMCONFIG has no entries\n");
+@@ -206,9 +198,9 @@ int __init acpi_parse_mcfg(unsigned long
+ return -ENOMEM;
+ }
+
+- memcpy(pci_mmcfg_config, &mcfg->config, config_size);
++ memcpy(pci_mmcfg_config, &mcfg[1], config_size);
+ for (i = 0; i < pci_mmcfg_config_num; ++i) {
+- if (mcfg->config[i].base_reserved) {
++ if (pci_mmcfg_config[i].address > 0xFFFFFFFF) {
+ printk(KERN_ERR PREFIX
+ "MMCONFIG not in low 4GB of memory\n");
+ kfree(pci_mmcfg_config);
+@@ -222,24 +214,24 @@ int __init acpi_parse_mcfg(unsigned long
+ #endif /* CONFIG_PCI_MMCONFIG */
+
+ #ifdef CONFIG_X86_LOCAL_APIC
+-static int __init acpi_parse_madt(unsigned long phys_addr, unsigned long size)
++static int __init acpi_parse_madt(struct acpi_table_header *table)
+ {
+ struct acpi_table_madt *madt = NULL;
+
+- if (!phys_addr || !size || !cpu_has_apic)
++ if (!cpu_has_apic)
+ return -EINVAL;
+
+- madt = (struct acpi_table_madt *)__acpi_map_table(phys_addr, size);
++ madt = (struct acpi_table_madt *)table;
+ if (!madt) {
+ printk(KERN_WARNING PREFIX "Unable to map MADT\n");
+ return -ENODEV;
+ }
+
+- if (madt->lapic_address) {
+- acpi_lapic_addr = (u64) madt->lapic_address;
++ if (madt->address) {
++ acpi_lapic_addr = (u64) madt->address;
+
+ printk(KERN_DEBUG PREFIX "Local APIC address 0x%08x\n",
+- madt->lapic_address);
++ madt->address);
+ }
+
+ acpi_madt_oem_check(madt->header.oem_id, madt->header.oem_table_id);
+@@ -248,21 +240,17 @@ static int __init acpi_parse_madt(unsign
+ }
+
+ static int __init
+-acpi_parse_lapic(acpi_table_entry_header * header, const unsigned long end)
++acpi_parse_lapic(struct acpi_subtable_header * header, const unsigned long end)
+ {
+- struct acpi_table_lapic *processor = NULL;
++ struct acpi_madt_local_apic *processor = NULL;
+
+- processor = (struct acpi_table_lapic *)header;
++ processor = (struct acpi_madt_local_apic *)header;
+
+ if (BAD_MADT_ENTRY(processor, end))
+ return -EINVAL;
+
+ acpi_table_print_madt_entry(header);
+
+- /* Record local apic id only when enabled */
+- if (processor->flags.enabled)
+- x86_acpiid_to_apicid[processor->acpi_id] = processor->id;
+-
+ /*
+ * We need to register disabled CPU as well to permit
+ * counting disabled CPUs. This allows us to size
+@@ -271,18 +259,18 @@ acpi_parse_lapic(acpi_table_entry_header
+ * when we use CPU hotplug.
+ */
+ mp_register_lapic(processor->id, /* APIC ID */
+- processor->flags.enabled); /* Enabled? */
++ processor->lapic_flags & ACPI_MADT_ENABLED); /* Enabled? */
+
+ return 0;
+ }
+
+ static int __init
+-acpi_parse_lapic_addr_ovr(acpi_table_entry_header * header,
++acpi_parse_lapic_addr_ovr(struct acpi_subtable_header * header,
+ const unsigned long end)
+ {
+- struct acpi_table_lapic_addr_ovr *lapic_addr_ovr = NULL;
++ struct acpi_madt_local_apic_override *lapic_addr_ovr = NULL;
+
+- lapic_addr_ovr = (struct acpi_table_lapic_addr_ovr *)header;
++ lapic_addr_ovr = (struct acpi_madt_local_apic_override *)header;
+
+ if (BAD_MADT_ENTRY(lapic_addr_ovr, end))
+ return -EINVAL;
+@@ -293,11 +281,11 @@ acpi_parse_lapic_addr_ovr(acpi_table_ent
+ }
+
+ static int __init
+-acpi_parse_lapic_nmi(acpi_table_entry_header * header, const unsigned long end)
++acpi_parse_lapic_nmi(struct acpi_subtable_header * header, const unsigned long end)
+ {
+- struct acpi_table_lapic_nmi *lapic_nmi = NULL;
++ struct acpi_madt_local_apic_nmi *lapic_nmi = NULL;
+
+- lapic_nmi = (struct acpi_table_lapic_nmi *)header;
++ lapic_nmi = (struct acpi_madt_local_apic_nmi *)header;
+
+ if (BAD_MADT_ENTRY(lapic_nmi, end))
+ return -EINVAL;
+@@ -315,11 +303,11 @@ acpi_parse_lapic_nmi(acpi_table_entry_he
+ #ifdef CONFIG_X86_IO_APIC
+
+ static int __init
+-acpi_parse_ioapic(acpi_table_entry_header * header, const unsigned long end)
++acpi_parse_ioapic(struct acpi_subtable_header * header, const unsigned long end)
+ {
+- struct acpi_table_ioapic *ioapic = NULL;
++ struct acpi_madt_io_apic *ioapic = NULL;
+
+- ioapic = (struct acpi_table_ioapic *)header;
++ ioapic = (struct acpi_madt_io_apic *)header;
+
+ if (BAD_MADT_ENTRY(ioapic, end))
+ return -EINVAL;
+@@ -344,11 +332,11 @@ static void __init acpi_sci_ioapic_setup
+ polarity = 3;
+
+ /* Command-line over-ride via acpi_sci= */
+- if (acpi_sci_flags.trigger)
+- trigger = acpi_sci_flags.trigger;
++ if (acpi_sci_flags & ACPI_MADT_TRIGGER_MASK)
++ trigger = (acpi_sci_flags & ACPI_MADT_TRIGGER_MASK) >> 2;
+
+- if (acpi_sci_flags.polarity)
+- polarity = acpi_sci_flags.polarity;
++ if (acpi_sci_flags & ACPI_MADT_POLARITY_MASK)
++ polarity = acpi_sci_flags & ACPI_MADT_POLARITY_MASK;
+
+ /*
+ * mp_config_acpi_legacy_irqs() already setup IRQs < 16
+@@ -359,51 +347,52 @@ static void __init acpi_sci_ioapic_setup
+
+ /*
+ * stash over-ride to indicate we've been here
+- * and for later update of acpi_fadt
++ * and for later update of acpi_gbl_FADT
+ */
+ acpi_sci_override_gsi = gsi;
+ return;
+ }
+
+ static int __init
+-acpi_parse_int_src_ovr(acpi_table_entry_header * header,
++acpi_parse_int_src_ovr(struct acpi_subtable_header * header,
+ const unsigned long end)
+ {
+- struct acpi_table_int_src_ovr *intsrc = NULL;
++ struct acpi_madt_interrupt_override *intsrc = NULL;
+
+- intsrc = (struct acpi_table_int_src_ovr *)header;
++ intsrc = (struct acpi_madt_interrupt_override *)header;
+
+ if (BAD_MADT_ENTRY(intsrc, end))
+ return -EINVAL;
+
+ acpi_table_print_madt_entry(header);
+
+- if (intsrc->bus_irq == acpi_fadt.sci_int) {
++ if (intsrc->source_irq == acpi_gbl_FADT.sci_interrupt) {
+ acpi_sci_ioapic_setup(intsrc->global_irq,
+- intsrc->flags.polarity,
+- intsrc->flags.trigger);
++ intsrc->inti_flags & ACPI_MADT_POLARITY_MASK,
++ (intsrc->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2);
+ return 0;
+ }
+
+ if (acpi_skip_timer_override &&
+- intsrc->bus_irq == 0 && intsrc->global_irq == 2) {
++ intsrc->source_irq == 0 && intsrc->global_irq == 2) {
+ printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n");
+ return 0;
+ }
+
+- mp_override_legacy_irq(intsrc->bus_irq,
+- intsrc->flags.polarity,
+- intsrc->flags.trigger, intsrc->global_irq);
++ mp_override_legacy_irq(intsrc->source_irq,
++ intsrc->inti_flags & ACPI_MADT_POLARITY_MASK,
++ (intsrc->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2,
++ intsrc->global_irq);
+
+ return 0;
+ }
+
+ static int __init
+-acpi_parse_nmi_src(acpi_table_entry_header * header, const unsigned long end)
++acpi_parse_nmi_src(struct acpi_subtable_header * header, const unsigned long end)
+ {
+- struct acpi_table_nmi_src *nmi_src = NULL;
++ struct acpi_madt_nmi_source *nmi_src = NULL;
+
+- nmi_src = (struct acpi_table_nmi_src *)header;
++ nmi_src = (struct acpi_madt_nmi_source *)header;
+
+ if (BAD_MADT_ENTRY(nmi_src, end))
+ return -EINVAL;
+@@ -419,7 +408,7 @@ acpi_parse_nmi_src(acpi_table_entry_head
+
+ /*
+ * acpi_pic_sci_set_trigger()
+- *
++ *
+ * use ELCR to set PIC-mode trigger type for SCI
+ *
+ * If a PIC-mode SCI is not recognized or gives spurious IRQ7's
+@@ -513,7 +502,7 @@ int acpi_map_lsapic(acpi_handle handle,
+ {
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *obj;
+- struct acpi_table_lapic *lapic;
++ struct acpi_madt_local_apic *lapic;
+ cpumask_t tmp_map, new_map;
+ u8 physid;
+ int cpu;
+@@ -531,10 +520,10 @@ int acpi_map_lsapic(acpi_handle handle,
+ return -EINVAL;
+ }
+
+- lapic = (struct acpi_table_lapic *)obj->buffer.pointer;
++ lapic = (struct acpi_madt_local_apic *)obj->buffer.pointer;
+
+- if ((lapic->header.type != ACPI_MADT_LAPIC) ||
+- (!lapic->flags.enabled)) {
++ if (lapic->header.type != ACPI_MADT_TYPE_LOCAL_APIC ||
++ !(lapic->lapic_flags & ACPI_MADT_ENABLED)) {
+ kfree(buffer.pointer);
+ return -EINVAL;
+ }
+@@ -546,7 +535,7 @@ int acpi_map_lsapic(acpi_handle handle,
+ buffer.pointer = NULL;
+
+ tmp_map = cpu_present_map;
+- mp_register_lapic(physid, lapic->flags.enabled);
++ mp_register_lapic(physid, lapic->lapic_flags & ACPI_MADT_ENABLED);
+
+ /*
+ * If mp_register_lapic successfully generates a new logical cpu
+@@ -568,14 +557,6 @@ EXPORT_SYMBOL(acpi_map_lsapic);
+
+ int acpi_unmap_lsapic(int cpu)
+ {
+- int i;
+-
+- for_each_possible_cpu(i) {
+- if (x86_acpiid_to_apicid[i] == x86_cpu_to_apicid[cpu]) {
+- x86_acpiid_to_apicid[i] = -1;
+- break;
+- }
+- }
+ x86_cpu_to_apicid[cpu] = -1;
+ cpu_clear(cpu, cpu_present_map);
+ num_processors--;
+@@ -622,42 +603,37 @@ acpi_scan_rsdp(unsigned long start, unsi
+ return 0;
+ }
+
+-static int __init acpi_parse_sbf(unsigned long phys_addr, unsigned long size)
++static int __init acpi_parse_sbf(struct acpi_table_header *table)
+ {
+- struct acpi_table_sbf *sb;
+-
+- if (!phys_addr || !size)
+- return -EINVAL;
++ struct acpi_table_boot *sb;
+
+- sb = (struct acpi_table_sbf *)__acpi_map_table(phys_addr, size);
++ sb = (struct acpi_table_boot *)table;
+ if (!sb) {
+ printk(KERN_WARNING PREFIX "Unable to map SBF\n");
+ return -ENODEV;
+ }
+
+- sbf_port = sb->sbf_cmos; /* Save CMOS port */
++ sbf_port = sb->cmos_index; /* Save CMOS port */
+
+ return 0;
+ }
+
+ #ifdef CONFIG_HPET_TIMER
++#include <asm/hpet.h>
+
+-static int __init acpi_parse_hpet(unsigned long phys, unsigned long size)
++static int __init acpi_parse_hpet(struct acpi_table_header *table)
+ {
+ struct acpi_table_hpet *hpet_tbl;
+ struct resource *hpet_res;
+ resource_size_t res_start;
+
+- if (!phys || !size)
+- return -EINVAL;
+-
+- hpet_tbl = (struct acpi_table_hpet *)__acpi_map_table(phys, size);
++ hpet_tbl = (struct acpi_table_hpet *)table;
+ if (!hpet_tbl) {
+ printk(KERN_WARNING PREFIX "Unable to map HPET\n");
+ return -ENODEV;
+ }
+
+- if (hpet_tbl->addr.space_id != ACPI_SPACE_MEM) {
++ if (hpet_tbl->address.space_id != ACPI_SPACE_MEM) {
+ printk(KERN_WARNING PREFIX "HPET timers must be located in "
+ "memory.\n");
+ return -1;
+@@ -670,29 +646,15 @@ static int __init acpi_parse_hpet(unsign
+ hpet_res->name = (void *)&hpet_res[1];
+ hpet_res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+ snprintf((char *)hpet_res->name, HPET_RESOURCE_NAME_SIZE,
+- "HPET %u", hpet_tbl->number);
++ "HPET %u", hpet_tbl->sequence);
+ hpet_res->end = (1 * 1024) - 1;
+ }
+
+-#ifdef CONFIG_X86_64
+- vxtime.hpet_address = hpet_tbl->addr.addrl |
+- ((long)hpet_tbl->addr.addrh << 32);
+-
++ hpet_address = hpet_tbl->address.address;
+ printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n",
+- hpet_tbl->id, vxtime.hpet_address);
+-
+- res_start = vxtime.hpet_address;
+-#else /* X86 */
+- {
+- extern unsigned long hpet_address;
+-
+- hpet_address = hpet_tbl->addr.addrl;
+- printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n",
+- hpet_tbl->id, hpet_address);
++ hpet_tbl->id, hpet_address);
+
+- res_start = hpet_address;
+- }
+-#endif /* X86 */
++ res_start = hpet_address;
+
+ if (hpet_res) {
+ hpet_res->start = res_start;
+@@ -706,46 +668,28 @@ static int __init acpi_parse_hpet(unsign
+ #define acpi_parse_hpet NULL
+ #endif
+
+-#ifdef CONFIG_X86_PM_TIMER
+-extern u32 pmtmr_ioport;
+-#endif
+-
+-static int __init acpi_parse_fadt(unsigned long phys, unsigned long size)
++static int __init acpi_parse_fadt(struct acpi_table_header *table)
+ {
+- struct fadt_descriptor *fadt = NULL;
+-
+- fadt = (struct fadt_descriptor *)__acpi_map_table(phys, size);
+- if (!fadt) {
+- printk(KERN_WARNING PREFIX "Unable to map FADT\n");
+- return 0;
+- }
+- /* initialize sci_int early for INT_SRC_OVR MADT parsing */
+- acpi_fadt.sci_int = fadt->sci_int;
+
+- /* initialize rev and apic_phys_dest_mode for x86_64 genapic */
+- acpi_fadt.revision = fadt->revision;
+- acpi_fadt.force_apic_physical_destination_mode =
+- fadt->force_apic_physical_destination_mode;
+-
+-#if defined(CONFIG_X86_PM_TIMER) && !defined(CONFIG_XEN)
++#ifdef CONFIG_X86_PM_TIMER
+ /* detect the location of the ACPI PM Timer */
+- if (fadt->revision >= FADT2_REVISION_ID) {
++ if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID) {
+ /* FADT rev. 2 */
+- if (fadt->xpm_tmr_blk.address_space_id !=
++ if (acpi_gbl_FADT.xpm_timer_block.space_id !=
+ ACPI_ADR_SPACE_SYSTEM_IO)
+ return 0;
+
+- pmtmr_ioport = fadt->xpm_tmr_blk.address;
++ pmtmr_ioport = acpi_gbl_FADT.xpm_timer_block.address;
+ /*
+ * "X" fields are optional extensions to the original V1.0
+ * fields, so we must selectively expand V1.0 fields if the
+ * corresponding X field is zero.
+ */
+ if (!pmtmr_ioport)
+- pmtmr_ioport = fadt->V1_pm_tmr_blk;
++ pmtmr_ioport = acpi_gbl_FADT.pm_timer_block;
+ } else {
+ /* FADT rev. 1 */
+- pmtmr_ioport = fadt->V1_pm_tmr_blk;
++ pmtmr_ioport = acpi_gbl_FADT.pm_timer_block;
+ }
+ if (pmtmr_ioport)
+ printk(KERN_INFO PREFIX "PM-Timer IO Port: %#x\n",
+@@ -787,13 +731,13 @@ static int __init acpi_parse_madt_lapic_
+ if (!cpu_has_apic)
+ return -ENODEV;
+
+- /*
++ /*
+ * Note that the LAPIC address is obtained from the MADT (32-bit value)
+ * and (optionally) overriden by a LAPIC_ADDR_OVR entry (64-bit value).
+ */
+
+ count =
+- acpi_table_parse_madt(ACPI_MADT_LAPIC_ADDR_OVR,
++ acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE,
+ acpi_parse_lapic_addr_ovr, 0);
+ if (count < 0) {
+ printk(KERN_ERR PREFIX
+@@ -803,7 +747,7 @@ static int __init acpi_parse_madt_lapic_
+
+ mp_register_lapic_address(acpi_lapic_addr);
+
+- count = acpi_table_parse_madt(ACPI_MADT_LAPIC, acpi_parse_lapic,
++ count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC, acpi_parse_lapic,
+ MAX_APICS);
+ if (!count) {
+ printk(KERN_ERR PREFIX "No LAPIC entries present\n");
+@@ -816,7 +760,7 @@ static int __init acpi_parse_madt_lapic_
+ }
+
+ count =
+- acpi_table_parse_madt(ACPI_MADT_LAPIC_NMI, acpi_parse_lapic_nmi, 0);
++ acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_NMI, acpi_parse_lapic_nmi, 0);
+ if (count < 0) {
+ printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n");
+ /* TBD: Cleanup to allow fallback to MPS */
+@@ -845,7 +789,7 @@ static int __init acpi_parse_madt_ioapic
+ return -ENODEV;
+ }
+
+- if (!cpu_has_apic)
++ if (!cpu_has_apic)
+ return -ENODEV;
+
+ /*
+@@ -858,7 +802,7 @@ static int __init acpi_parse_madt_ioapic
+ }
+
+ count =
+- acpi_table_parse_madt(ACPI_MADT_IOAPIC, acpi_parse_ioapic,
++ acpi_table_parse_madt(ACPI_MADT_TYPE_IO_APIC, acpi_parse_ioapic,
+ MAX_IO_APICS);
+ if (!count) {
+ printk(KERN_ERR PREFIX "No IOAPIC entries present\n");
+@@ -869,7 +813,7 @@ static int __init acpi_parse_madt_ioapic
+ }
+
+ count =
+- acpi_table_parse_madt(ACPI_MADT_INT_SRC_OVR, acpi_parse_int_src_ovr,
++ acpi_table_parse_madt(ACPI_MADT_TYPE_INTERRUPT_OVERRIDE, acpi_parse_int_src_ovr,
+ NR_IRQ_VECTORS);
+ if (count < 0) {
+ printk(KERN_ERR PREFIX
+@@ -883,13 +827,13 @@ static int __init acpi_parse_madt_ioapic
+ * pretend we got one so we can set the SCI flags.
+ */
+ if (!acpi_sci_override_gsi)
+- acpi_sci_ioapic_setup(acpi_fadt.sci_int, 0, 0);
++ acpi_sci_ioapic_setup(acpi_gbl_FADT.sci_interrupt, 0, 0);
+
+ /* Fill in identity legacy mapings where no override */
+ mp_config_acpi_legacy_irqs();
+
+ count =
+- acpi_table_parse_madt(ACPI_MADT_NMI_SRC, acpi_parse_nmi_src,
++ acpi_table_parse_madt(ACPI_MADT_TYPE_NMI_SOURCE, acpi_parse_nmi_src,
+ NR_IRQ_VECTORS);
+ if (count < 0) {
+ printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n");
+@@ -909,10 +853,9 @@ static inline int acpi_parse_madt_ioapic
+ static void __init acpi_process_madt(void)
+ {
+ #ifdef CONFIG_X86_LOCAL_APIC
+- int count, error;
++ int error;
+
+- count = acpi_table_parse(ACPI_APIC, acpi_parse_madt);
+- if (count >= 1) {
++ if (!acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt)) {
+
+ /*
+ * Parse MADT LAPIC entries
+@@ -1132,7 +1075,28 @@ static struct dmi_system_id __initdata a
+ "ASUS A7V ACPI BIOS Revision 1007"),
+ },
+ },
+-
++ {
++ /*
++ * Latest BIOS for IBM 600E (1.16) has bad pcinum
++ * for LPC bridge, which is needed for the PCI
++ * interrupt links to work. DSDT fix is in bug 5966.
++ * 2645, 2646 model numbers are shared with 600/600E/600X
++ */
++ .callback = disable_acpi_irq,
++ .ident = "IBM Thinkpad 600 Series 2645",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
++ DMI_MATCH(DMI_BOARD_NAME, "2645"),
++ },
++ },
++ {
++ .callback = disable_acpi_irq,
++ .ident = "IBM Thinkpad 600 Series 2646",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
++ DMI_MATCH(DMI_BOARD_NAME, "2646"),
++ },
++ },
+ /*
+ * Boxes that need ACPI PCI IRQ routing and PCI scan disabled
+ */
+@@ -1198,7 +1162,7 @@ int __init acpi_boot_table_init(void)
+ if (acpi_disabled && !acpi_ht)
+ return 1;
+
+- /*
++ /*
+ * Initialize the ACPI boot-time table parser.
+ */
+ error = acpi_table_init();
+@@ -1207,7 +1171,7 @@ int __init acpi_boot_table_init(void)
+ return error;
+ }
+
+- acpi_table_parse(ACPI_BOOT, acpi_parse_sbf);
++ acpi_table_parse(ACPI_SIG_BOOT, acpi_parse_sbf);
+
+ /*
+ * blacklist may disable ACPI entirely
+@@ -1235,19 +1199,19 @@ int __init acpi_boot_init(void)
+ if (acpi_disabled && !acpi_ht)
+ return 1;
+
+- acpi_table_parse(ACPI_BOOT, acpi_parse_sbf);
++ acpi_table_parse(ACPI_SIG_BOOT, acpi_parse_sbf);
+
+ /*
+ * set sci_int and PM timer address
+ */
+- acpi_table_parse(ACPI_FADT, acpi_parse_fadt);
++ acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt);
+
+ /*
+ * Process the Multiple APIC Description Table (MADT), if present
+ */
+ acpi_process_madt();
+
+- acpi_table_parse(ACPI_HPET, acpi_parse_hpet);
++ acpi_table_parse(ACPI_SIG_HPET, acpi_parse_hpet);
+
+ return 0;
+ }
+@@ -1318,13 +1282,17 @@ static int __init setup_acpi_sci(char *s
+ if (!s)
+ return -EINVAL;
+ if (!strcmp(s, "edge"))
+- acpi_sci_flags.trigger = 1;
++ acpi_sci_flags = ACPI_MADT_TRIGGER_EDGE |
++ (acpi_sci_flags & ~ACPI_MADT_TRIGGER_MASK);
+ else if (!strcmp(s, "level"))
+- acpi_sci_flags.trigger = 3;
++ acpi_sci_flags = ACPI_MADT_TRIGGER_LEVEL |
++ (acpi_sci_flags & ~ACPI_MADT_TRIGGER_MASK);
+ else if (!strcmp(s, "high"))
+- acpi_sci_flags.polarity = 1;
++ acpi_sci_flags = ACPI_MADT_POLARITY_ACTIVE_HIGH |
++ (acpi_sci_flags & ~ACPI_MADT_POLARITY_MASK);
+ else if (!strcmp(s, "low"))
+- acpi_sci_flags.polarity = 3;
++ acpi_sci_flags = ACPI_MADT_POLARITY_ACTIVE_LOW |
++ (acpi_sci_flags & ~ACPI_MADT_POLARITY_MASK);
+ else
+ return -EINVAL;
+ return 0;
+Index: 10.3-2007-11-26/arch/i386/kernel/apic-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/apic-xen.c 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/arch/i386/kernel/apic-xen.c 2007-10-22 13:58:46.000000000 +0200
+@@ -25,6 +25,8 @@
+ #include <linux/kernel_stat.h>
+ #include <linux/sysdev.h>
+ #include <linux/cpu.h>
++#include <linux/clockchips.h>
++#include <linux/acpi_pmtmr.h>
+ #include <linux/module.h>
+
+ #include <asm/atomic.h>
+@@ -56,83 +58,26 @@ static cpumask_t timer_bcast_ipi;
+ */
+
+ /*
+- * Debug level
++ * Debug level, exported for io_apic.c
+ */
+ int apic_verbosity;
+
+ #ifndef CONFIG_XEN
+ static int modern_apic(void)
+ {
+- unsigned int lvr, version;
+ /* AMD systems use old APIC versions, so check the CPU */
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
+- boot_cpu_data.x86 >= 0xf)
++ boot_cpu_data.x86 >= 0xf)
+ return 1;
+- lvr = apic_read(APIC_LVR);
+- version = GET_APIC_VERSION(lvr);
+- return version >= 0x14;
++ return lapic_get_version() >= 0x14;
+ }
+ #endif /* !CONFIG_XEN */
+
+-/*
+- * 'what should we do if we get a hw irq event on an illegal vector'.
+- * each architecture has to answer this themselves.
+- */
+-void ack_bad_irq(unsigned int irq)
+-{
+- printk("unexpected IRQ trap at vector %02x\n", irq);
+- /*
+- * Currently unexpected vectors happen only on SMP and APIC.
+- * We _must_ ack these because every local APIC has only N
+- * irq slots per priority level, and a 'hanging, unacked' IRQ
+- * holds up an irq slot - in excessive cases (when multiple
+- * unexpected vectors occur) that might lock up the APIC
+- * completely.
+- * But only ack when the APIC is enabled -AK
+- */
+- if (cpu_has_apic)
+- ack_APIC_irq();
+-}
+-
+ int get_physical_broadcast(void)
+ {
+ return 0xff;
+ }
+
+-#ifndef CONFIG_XEN
+-#ifndef CONFIG_SMP
+-static void up_apic_timer_interrupt_call(void)
+-{
+- int cpu = smp_processor_id();
+-
+- /*
+- * the NMI deadlock-detector uses this.
+- */
+- per_cpu(irq_stat, cpu).apic_timer_irqs++;
+-
+- smp_local_timer_interrupt();
+-}
+-#endif
+-
+-void smp_send_timer_broadcast_ipi(void)
+-{
+- cpumask_t mask;
+-
+- cpus_and(mask, cpu_online_map, timer_bcast_ipi);
+- if (!cpus_empty(mask)) {
+-#ifdef CONFIG_SMP
+- send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
+-#else
+- /*
+- * We can directly call the apic timer interrupt handler
+- * in UP case. Minus all irq related functions
+- */
+- up_apic_timer_interrupt_call();
+-#endif
+- }
+-}
+-#endif
+-
+ int setup_profiling_timer(unsigned int multiplier)
+ {
+ return -EINVAL;
+Index: 10.3-2007-11-26/arch/i386/kernel/cpu/common-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/cpu/common-xen.c 2007-10-22 13:53:25.000000000 +0200
++++ 10.3-2007-11-26/arch/i386/kernel/cpu/common-xen.c 2007-10-22 13:58:46.000000000 +0200
+@@ -610,7 +610,7 @@ void __init early_cpu_init(void)
+ struct pt_regs * __devinit idle_regs(struct pt_regs *regs)
+ {
+ memset(regs, 0, sizeof(struct pt_regs));
+- regs->xgs = __KERNEL_PDA;
++ regs->xfs = __KERNEL_PDA;
+ return regs;
+ }
+
+@@ -667,12 +667,12 @@ struct i386_pda boot_pda = {
+ .pcurrent = &init_task,
+ };
+
+-static inline void set_kernel_gs(void)
++static inline void set_kernel_fs(void)
+ {
+- /* Set %gs for this CPU's PDA. Memory clobber is to create a
++ /* Set %fs for this CPU's PDA. Memory clobber is to create a
+ barrier with respect to any PDA operations, so the compiler
+ doesn't move any before here. */
+- asm volatile ("mov %0, %%gs" : : "r" (__KERNEL_PDA) : "memory");
++ asm volatile ("mov %0, %%fs" : : "r" (__KERNEL_PDA) : "memory");
+ }
+
+ /* Initialize the CPU's GDT and PDA. The boot CPU does this for
+@@ -730,7 +730,7 @@ void __cpuinit cpu_set_gdt(int cpu)
+ }
+ BUG_ON(HYPERVISOR_set_gdt(frames, cpu_gdt_descr->size / 8));
+
+- set_kernel_gs();
++ set_kernel_fs();
+ }
+
+ /* Common CPU init for both boot and secondary CPUs */
+@@ -775,8 +775,8 @@ static void __cpuinit _cpu_init(int cpu,
+ __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
+ #endif
+
+- /* Clear %fs. */
+- asm volatile ("mov %0, %%fs" : : "r" (0));
++ /* Clear %gs. */
++ asm volatile ("mov %0, %%gs" : : "r" (0));
+
+ /* Clear all 6 debug registers: */
+ set_debugreg(0, 0);
+Index: 10.3-2007-11-26/arch/i386/kernel/e820-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/e820-xen.c 2007-10-22 13:53:25.000000000 +0200
++++ 10.3-2007-11-26/arch/i386/kernel/e820-xen.c 2007-10-22 13:58:46.000000000 +0200
+@@ -14,6 +14,7 @@
+ #include <asm/pgtable.h>
+ #include <asm/page.h>
+ #include <asm/e820.h>
++#include <asm/setup.h>
+ #include <xen/interface/memory.h>
+
+ #ifdef CONFIG_EFI
+@@ -157,21 +158,22 @@ static struct resource standard_io_resou
+ .flags = IORESOURCE_BUSY | IORESOURCE_IO
+ } };
+
+-static int romsignature(const unsigned char *x)
++#define ROMSIGNATURE 0xaa55
++
++static int __init romsignature(const unsigned char *rom)
+ {
+ unsigned short sig;
+- int ret = 0;
+- if (probe_kernel_address((const unsigned short *)x, sig) == 0)
+- ret = (sig == 0xaa55);
+- return ret;
++
++ return probe_kernel_address((const unsigned short *)rom, sig) == 0 &&
++ sig == ROMSIGNATURE;
+ }
+
+ static int __init romchecksum(unsigned char *rom, unsigned long length)
+ {
+- unsigned char *p, sum = 0;
++ unsigned char sum;
+
+- for (p = rom; p < rom + length; p++)
+- sum += *p;
++ for (sum = 0; length; length--)
++ sum += *rom++;
+ return sum == 0;
+ }
+
+Index: 10.3-2007-11-26/arch/i386/kernel/entry-xen.S
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/entry-xen.S 2007-10-22 13:53:25.000000000 +0200
++++ 10.3-2007-11-26/arch/i386/kernel/entry-xen.S 2007-10-22 13:58:46.000000000 +0200
+@@ -30,7 +30,7 @@
+ * 18(%esp) - %eax
+ * 1C(%esp) - %ds
+ * 20(%esp) - %es
+- * 24(%esp) - %gs
++ * 24(%esp) - %fs
+ * 28(%esp) - orig_eax
+ * 2C(%esp) - %eip
+ * 30(%esp) - %cs
+@@ -102,9 +102,9 @@ NMI_MASK = 0x80000000
+
+ #define SAVE_ALL \
+ cld; \
+- pushl %gs; \
++ pushl %fs; \
+ CFI_ADJUST_CFA_OFFSET 4;\
+- /*CFI_REL_OFFSET gs, 0;*/\
++ /*CFI_REL_OFFSET fs, 0;*/\
+ pushl %es; \
+ CFI_ADJUST_CFA_OFFSET 4;\
+ /*CFI_REL_OFFSET es, 0;*/\
+@@ -136,7 +136,7 @@ NMI_MASK = 0x80000000
+ movl %edx, %ds; \
+ movl %edx, %es; \
+ movl $(__KERNEL_PDA), %edx; \
+- movl %edx, %gs
++ movl %edx, %fs
+
+ #define RESTORE_INT_REGS \
+ popl %ebx; \
+@@ -169,9 +169,9 @@ NMI_MASK = 0x80000000
+ 2: popl %es; \
+ CFI_ADJUST_CFA_OFFSET -4;\
+ /*CFI_RESTORE es;*/\
+-3: popl %gs; \
++3: popl %fs; \
+ CFI_ADJUST_CFA_OFFSET -4;\
+- /*CFI_RESTORE gs;*/\
++ /*CFI_RESTORE fs;*/\
+ .pushsection .fixup,"ax"; \
+ 4: movl $0,(%esp); \
+ jmp 1b; \
+@@ -230,6 +230,7 @@ ENTRY(ret_from_fork)
+ CFI_ADJUST_CFA_OFFSET -4
+ jmp syscall_exit
+ CFI_ENDPROC
++END(ret_from_fork)
+
+ /*
+ * Return to user mode is not as complex as all this looks,
+@@ -261,6 +262,7 @@ ENTRY(resume_userspace)
+ # int/exception return?
+ jne work_pending
+ jmp restore_all
++END(ret_from_exception)
+
+ #ifdef CONFIG_PREEMPT
+ ENTRY(resume_kernel)
+@@ -275,6 +277,7 @@ need_resched:
+ jz restore_all
+ call preempt_schedule_irq
+ jmp need_resched
++END(resume_kernel)
+ #endif
+ CFI_ENDPROC
+
+@@ -352,16 +355,17 @@ sysenter_past_esp:
+ movl PT_OLDESP(%esp), %ecx
+ xorl %ebp,%ebp
+ TRACE_IRQS_ON
+-1: mov PT_GS(%esp), %gs
++1: mov PT_FS(%esp), %fs
+ ENABLE_INTERRUPTS_SYSEXIT
+ CFI_ENDPROC
+ .pushsection .fixup,"ax"
+-2: movl $0,PT_GS(%esp)
++2: movl $0,PT_FS(%esp)
+ jmp 1b
+ .section __ex_table,"a"
+ .align 4
+ .long 1b,2b
+ .popsection
++ENDPROC(sysenter_entry)
+
+ # system call handler stub
+ ENTRY(system_call)
+@@ -507,6 +511,7 @@ hypervisor_iret:
+ jmp hypercall_page + (__HYPERVISOR_iret * 32)
+ #endif
+ CFI_ENDPROC
++ENDPROC(system_call)
+
+ # perform work that needs to be done immediately before resumption
+ ALIGN
+@@ -552,6 +557,7 @@ work_notifysig_v86:
+ xorl %edx, %edx
+ call do_notify_resume
+ jmp resume_userspace_sig
++END(work_pending)
+
+ # perform syscall exit tracing
+ ALIGN
+@@ -567,6 +573,7 @@ syscall_trace_entry:
+ cmpl $(nr_syscalls), %eax
+ jnae syscall_call
+ jmp syscall_exit
++END(syscall_trace_entry)
+
+ # perform syscall exit tracing
+ ALIGN
+@@ -580,6 +587,7 @@ syscall_exit_work:
+ movl $1, %edx
+ call do_syscall_trace
+ jmp resume_userspace
++END(syscall_exit_work)
+ CFI_ENDPROC
+
+ RING0_INT_FRAME # can't unwind into user space anyway
+@@ -590,16 +598,18 @@ syscall_fault:
+ GET_THREAD_INFO(%ebp)
+ movl $-EFAULT,PT_EAX(%esp)
+ jmp resume_userspace
++END(syscall_fault)
+
+ syscall_badsys:
+ movl $-ENOSYS,PT_EAX(%esp)
+ jmp resume_userspace
++END(syscall_badsys)
+ CFI_ENDPROC
+
+ #ifndef CONFIG_XEN
+ #define FIXUP_ESPFIX_STACK \
+ /* since we are on a wrong stack, we cant make it a C code :( */ \
+- movl %gs:PDA_cpu, %ebx; \
++ movl %fs:PDA_cpu, %ebx; \
+ PER_CPU(cpu_gdt_descr, %ebx); \
+ movl GDS_address(%ebx), %ebx; \
+ GET_DESC_BASE(GDT_ENTRY_ESPFIX_SS, %ebx, %eax, %ax, %al, %ah); \
+@@ -630,9 +640,9 @@ syscall_badsys:
+ ENTRY(interrupt)
+ .text
+
+-vector=0
+ ENTRY(irq_entries_start)
+ RING0_INT_FRAME
++vector=0
+ .rept NR_IRQS
+ ALIGN
+ .if vector
+@@ -641,11 +651,16 @@ ENTRY(irq_entries_start)
+ 1: pushl $~(vector)
+ CFI_ADJUST_CFA_OFFSET 4
+ jmp common_interrupt
+-.data
++ .previous
+ .long 1b
+-.text
++ .text
+ vector=vector+1
+ .endr
++END(irq_entries_start)
++
++.previous
++END(interrupt)
++.previous
+
+ /*
+ * the CPU automatically disables interrupts when executing an IRQ vector,
+@@ -658,6 +673,7 @@ common_interrupt:
+ movl %esp,%eax
+ call do_IRQ
+ jmp ret_from_intr
++ENDPROC(common_interrupt)
+ CFI_ENDPROC
+
+ #define BUILD_INTERRUPT(name, nr) \
+@@ -670,10 +686,16 @@ ENTRY(name) \
+ movl %esp,%eax; \
+ call smp_/**/name; \
+ jmp ret_from_intr; \
+- CFI_ENDPROC
++ CFI_ENDPROC; \
++ENDPROC(name)
+
+ /* The include is where all of the SMP etc. interrupts come from */
+ #include "entry_arch.h"
++
++/* This alternate entry is needed because we hijack the apic LVTT */
++#if defined(CONFIG_VMI) && defined(CONFIG_X86_LOCAL_APIC)
++BUILD_INTERRUPT(apic_vmi_timer_interrupt,LOCAL_TIMER_VECTOR)
++#endif
+ #else
+ #define UNWIND_ESPFIX_STACK
+ #endif
+@@ -684,7 +706,7 @@ KPROBE_ENTRY(page_fault)
+ CFI_ADJUST_CFA_OFFSET 4
+ ALIGN
+ error_code:
+- /* the function address is in %gs's slot on the stack */
++ /* the function address is in %fs's slot on the stack */
+ pushl %es
+ CFI_ADJUST_CFA_OFFSET 4
+ /*CFI_REL_OFFSET es, 0*/
+@@ -713,20 +735,20 @@ error_code:
+ CFI_ADJUST_CFA_OFFSET 4
+ CFI_REL_OFFSET ebx, 0
+ cld
+- pushl %gs
++ pushl %fs
+ CFI_ADJUST_CFA_OFFSET 4
+- /*CFI_REL_OFFSET gs, 0*/
++ /*CFI_REL_OFFSET fs, 0*/
+ movl $(__KERNEL_PDA), %ecx
+- movl %ecx, %gs
++ movl %ecx, %fs
+ UNWIND_ESPFIX_STACK
+ popl %ecx
+ CFI_ADJUST_CFA_OFFSET -4
+ /*CFI_REGISTER es, ecx*/
+- movl PT_GS(%esp), %edi # get the function address
++ movl PT_FS(%esp), %edi # get the function address
+ movl PT_ORIG_EAX(%esp), %edx # get the error code
+ movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
+- mov %ecx, PT_GS(%esp)
+- /*CFI_REL_OFFSET gs, ES*/
++ mov %ecx, PT_FS(%esp)
++ /*CFI_REL_OFFSET fs, ES*/
+ movl $(__USER_DS), %ecx
+ movl %ecx, %ds
+ movl %ecx, %es
+@@ -813,7 +835,7 @@ critical_fixup_table:
+ .byte 0x18 # pop %eax
+ .byte 0x1c # pop %ds
+ .byte 0x20 # pop %es
+- .byte 0x24,0x24 # pop %gs
++ .byte 0x24,0x24 # pop %fs
+ .byte 0x28,0x28,0x28 # add $4,%esp
+ .byte 0x2c # iret
+ .byte 0xff,0xff,0xff,0xff # movb $1,1(%esi)
+@@ -879,6 +901,7 @@ ENTRY(coprocessor_error)
+ CFI_ADJUST_CFA_OFFSET 4
+ jmp error_code
+ CFI_ENDPROC
++END(coprocessor_error)
+
+ ENTRY(simd_coprocessor_error)
+ RING0_INT_FRAME
+@@ -888,6 +911,7 @@ ENTRY(simd_coprocessor_error)
+ CFI_ADJUST_CFA_OFFSET 4
+ jmp error_code
+ CFI_ENDPROC
++END(simd_coprocessor_error)
+
+ ENTRY(device_not_available)
+ RING0_INT_FRAME
+@@ -910,6 +934,7 @@ device_available_emulate:
+ call math_state_restore
+ jmp ret_from_exception
+ CFI_ENDPROC
++END(device_not_available)
+
+ #ifndef CONFIG_XEN
+ /*
+@@ -1071,10 +1096,12 @@ ENTRY(native_iret)
+ .align 4
+ .long 1b,iret_exc
+ .previous
++END(native_iret)
+
+ ENTRY(native_irq_enable_sysexit)
+ sti
+ sysexit
++END(native_irq_enable_sysexit)
+ #endif
+
+ KPROBE_ENTRY(int3)
+@@ -1097,6 +1124,7 @@ ENTRY(overflow)
+ CFI_ADJUST_CFA_OFFSET 4
+ jmp error_code
+ CFI_ENDPROC
++END(overflow)
+
+ ENTRY(bounds)
+ RING0_INT_FRAME
+@@ -1106,6 +1134,7 @@ ENTRY(bounds)
+ CFI_ADJUST_CFA_OFFSET 4
+ jmp error_code
+ CFI_ENDPROC
++END(bounds)
+
+ ENTRY(invalid_op)
+ RING0_INT_FRAME
+@@ -1115,6 +1144,7 @@ ENTRY(invalid_op)
+ CFI_ADJUST_CFA_OFFSET 4
+ jmp error_code
+ CFI_ENDPROC
++END(invalid_op)
+
+ ENTRY(coprocessor_segment_overrun)
+ RING0_INT_FRAME
+@@ -1124,6 +1154,7 @@ ENTRY(coprocessor_segment_overrun)
+ CFI_ADJUST_CFA_OFFSET 4
+ jmp error_code
+ CFI_ENDPROC
++END(coprocessor_segment_overrun)
+
+ ENTRY(invalid_TSS)
+ RING0_EC_FRAME
+@@ -1131,6 +1162,7 @@ ENTRY(invalid_TSS)
+ CFI_ADJUST_CFA_OFFSET 4
+ jmp error_code
+ CFI_ENDPROC
++END(invalid_TSS)
+
+ ENTRY(segment_not_present)
+ RING0_EC_FRAME
+@@ -1138,6 +1170,7 @@ ENTRY(segment_not_present)
+ CFI_ADJUST_CFA_OFFSET 4
+ jmp error_code
+ CFI_ENDPROC
++END(segment_not_present)
+
+ ENTRY(stack_segment)
+ RING0_EC_FRAME
+@@ -1145,6 +1178,7 @@ ENTRY(stack_segment)
+ CFI_ADJUST_CFA_OFFSET 4
+ jmp error_code
+ CFI_ENDPROC
++END(stack_segment)
+
+ KPROBE_ENTRY(general_protection)
+ RING0_EC_FRAME
+@@ -1160,6 +1194,7 @@ ENTRY(alignment_check)
+ CFI_ADJUST_CFA_OFFSET 4
+ jmp error_code
+ CFI_ENDPROC
++END(alignment_check)
+
+ ENTRY(divide_error)
+ RING0_INT_FRAME
+@@ -1169,6 +1204,7 @@ ENTRY(divide_error)
+ CFI_ADJUST_CFA_OFFSET 4
+ jmp error_code
+ CFI_ENDPROC
++END(divide_error)
+
+ #ifdef CONFIG_X86_MCE
+ ENTRY(machine_check)
+@@ -1179,6 +1215,7 @@ ENTRY(machine_check)
+ CFI_ADJUST_CFA_OFFSET 4
+ jmp error_code
+ CFI_ENDPROC
++END(machine_check)
+ #endif
+
+ #ifndef CONFIG_XEN
+@@ -1198,6 +1235,7 @@ ENTRY(fixup_4gb_segment)
+ CFI_ADJUST_CFA_OFFSET 4
+ jmp error_code
+ CFI_ENDPROC
++END(spurious_interrupt_bug)
+
+ ENTRY(kernel_thread_helper)
+ pushl $0 # fake return address for unwinder
+Index: 10.3-2007-11-26/arch/i386/kernel/head-xen.S
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/head-xen.S 2007-10-22 13:53:25.000000000 +0200
++++ 10.3-2007-11-26/arch/i386/kernel/head-xen.S 2007-10-22 13:58:46.000000000 +0200
+@@ -27,6 +27,7 @@
+ #define X86_CAPABILITY new_cpu_data+CPUINFO_x86_capability
+ #define X86_VENDOR_ID new_cpu_data+CPUINFO_x86_vendor_id
+
++.section .text.head,"ax",@progbits
+ #define VIRT_ENTRY_OFFSET 0x0
+ .org VIRT_ENTRY_OFFSET
+ ENTRY(startup_32)
+@@ -60,11 +61,11 @@ ENTRY(startup_32)
+
+ movb $1,X86_HARD_MATH
+
+- xorl %eax,%eax # Clear FS
+- movl %eax,%fs
++ xorl %eax,%eax # Clear GS
++ movl %eax,%gs
+
+ movl $(__KERNEL_PDA),%eax
+- mov %eax,%gs
++ mov %eax,%fs
+
+ cld # gcc2 wants the direction flag cleared at all times
+
+@@ -75,7 +76,7 @@ ENTRY(startup_32)
+ * Point the GDT at this CPU's PDA. This will be
+ * cpu_gdt_table and boot_pda.
+ */
+-setup_pda:
++ENTRY(setup_pda)
+ /* get the PDA pointer */
+ movl $boot_pda, %eax
+
+Index: 10.3-2007-11-26/arch/i386/kernel/io_apic-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/io_apic-xen.c 2007-10-22 13:53:25.000000000 +0200
++++ 10.3-2007-11-26/arch/i386/kernel/io_apic-xen.c 2007-10-22 13:58:46.000000000 +0200
+@@ -164,7 +164,7 @@ static inline void io_apic_write(unsigne
+ */
+ static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value)
+ {
+- volatile struct io_apic *io_apic = io_apic_base(apic);
++ volatile struct io_apic __iomem *io_apic = io_apic_base(apic);
+ if (sis_apic_bug)
+ writel(reg, &io_apic->index);
+ writel(value, &io_apic->data);
+@@ -387,7 +387,7 @@ static void set_ioapic_affinity_irq(unsi
+ break;
+ entry = irq_2_pin + entry->next;
+ }
+- set_native_irq_info(irq, cpumask);
++ irq_desc[irq].affinity = cpumask;
+ spin_unlock_irqrestore(&ioapic_lock, flags);
+ }
+
+@@ -526,8 +526,8 @@ static void do_irq_balance(void)
+ package_index = CPU_TO_PACKAGEINDEX(i);
+ for (j = 0; j < NR_IRQS; j++) {
+ unsigned long value_now, delta;
+- /* Is this an active IRQ? */
+- if (!irq_desc[j].action)
++ /* Is this an active IRQ or balancing disabled ? */
++ if (!irq_desc[j].action || irq_balancing_disabled(j))
+ continue;
+ if ( package_index == i )
+ IRQ_DELTA(package_index,j) = 0;
+@@ -780,7 +780,7 @@ failed:
+ return 0;
+ }
+
+-int __init irqbalance_disable(char *str)
++int __devinit irqbalance_disable(char *str)
+ {
+ irqbalance_disabled = 1;
+ return 1;
+@@ -1319,11 +1319,9 @@ static void ioapic_register_intr(int irq
+ trigger == IOAPIC_LEVEL)
+ set_irq_chip_and_handler_name(irq, &ioapic_chip,
+ handle_fasteoi_irq, "fasteoi");
+- else {
+- irq_desc[irq].status |= IRQ_DELAYED_DISABLE;
++ else
+ set_irq_chip_and_handler_name(irq, &ioapic_chip,
+ handle_edge_irq, "edge");
+- }
+ set_intr_gate(vector, interrupt[irq]);
+ }
+ #else
+@@ -1397,7 +1395,6 @@ static void __init setup_IO_APIC_irqs(vo
+ }
+ spin_lock_irqsave(&ioapic_lock, flags);
+ __ioapic_write_entry(apic, pin, entry);
+- set_native_irq_info(irq, TARGET_CPUS);
+ spin_unlock_irqrestore(&ioapic_lock, flags);
+ }
+ }
+@@ -1630,7 +1627,7 @@ void /*__init*/ print_local_APIC(void *
+ v = apic_read(APIC_LVR);
+ printk(KERN_INFO "... APIC VERSION: %08x\n", v);
+ ver = GET_APIC_VERSION(v);
+- maxlvt = get_maxlvt();
++ maxlvt = lapic_get_maxlvt();
+
+ v = apic_read(APIC_TASKPRI);
+ printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
+@@ -1969,7 +1966,7 @@ static void __init setup_ioapic_ids_from
+ #endif
+
+ #ifndef CONFIG_XEN
+-static int no_timer_check __initdata;
++int no_timer_check __initdata;
+
+ static int __init notimercheck(char *s)
+ {
+@@ -2362,7 +2359,7 @@ static inline void __init check_timer(vo
+
+ disable_8259A_irq(0);
+ set_irq_chip_and_handler_name(0, &lapic_chip, handle_fasteoi_irq,
+- "fasteio");
++ "fasteoi");
+ apic_write_around(APIC_LVT0, APIC_DM_FIXED | vector); /* Fixed mode */
+ enable_8259A_irq(0);
+
+@@ -2655,7 +2652,7 @@ static void set_msi_irq_affinity(unsigne
+ msg.address_lo |= MSI_ADDR_DEST_ID(dest);
+
+ write_msi_msg(irq, &msg);
+- set_native_irq_info(irq, mask);
++ irq_desc[irq].affinity = mask;
+ }
+ #endif /* CONFIG_SMP */
+
+@@ -2674,25 +2671,32 @@ static struct irq_chip msi_chip = {
+ .retrigger = ioapic_retrigger_irq,
+ };
+
+-int arch_setup_msi_irq(unsigned int irq, struct pci_dev *dev)
++int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
+ {
+ struct msi_msg msg;
+- int ret;
++ int irq, ret;
++ irq = create_irq();
++ if (irq < 0)
++ return irq;
++
++ set_irq_msi(irq, desc);
+ ret = msi_compose_msg(dev, irq, &msg);
+- if (ret < 0)
++ if (ret < 0) {
++ destroy_irq(irq);
+ return ret;
++ }
+
+ write_msi_msg(irq, &msg);
+
+ set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq,
+ "edge");
+
+- return 0;
++ return irq;
+ }
+
+ void arch_teardown_msi_irq(unsigned int irq)
+ {
+- return;
++ destroy_irq(irq);
+ }
+
+ #endif /* CONFIG_PCI_MSI */
+@@ -2732,7 +2736,7 @@ static void set_ht_irq_affinity(unsigned
+ dest = cpu_mask_to_apicid(mask);
+
+ target_ht_irq(irq, dest);
+- set_native_irq_info(irq, mask);
++ irq_desc[irq].affinity = mask;
+ }
+ #endif
+
+@@ -2940,7 +2944,6 @@ int io_apic_set_pci_routing (int ioapic,
+
+ spin_lock_irqsave(&ioapic_lock, flags);
+ __ioapic_write_entry(ioapic, pin, entry);
+- set_native_irq_info(irq, TARGET_CPUS);
+ spin_unlock_irqrestore(&ioapic_lock, flags);
+
+ return 0;
+Index: 10.3-2007-11-26/arch/i386/kernel/irq-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/irq-xen.c 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/arch/i386/kernel/irq-xen.c 2007-10-22 13:58:46.000000000 +0200
+@@ -10,7 +10,6 @@
+ * io_apic.c.)
+ */
+
+-#include <asm/uaccess.h>
+ #include <linux/module.h>
+ #include <linux/seq_file.h>
+ #include <linux/interrupt.h>
+@@ -19,19 +18,34 @@
+ #include <linux/cpu.h>
+ #include <linux/delay.h>
+
++#include <asm/apic.h>
++#include <asm/uaccess.h>
++
+ DEFINE_PER_CPU(irq_cpustat_t, irq_stat) ____cacheline_internodealigned_in_smp;
+ EXPORT_PER_CPU_SYMBOL(irq_stat);
+
+-#ifndef CONFIG_X86_LOCAL_APIC
+ /*
+ * 'what should we do if we get a hw irq event on an illegal vector'.
+ * each architecture has to answer this themselves.
+ */
+ void ack_bad_irq(unsigned int irq)
+ {
+- printk("unexpected IRQ trap at vector %02x\n", irq);
+-}
++ printk(KERN_ERR "unexpected IRQ trap at vector %02x\n", irq);
++
++#if defined(CONFIG_X86_LOCAL_APIC) && !defined(CONFIG_XEN)
++ /*
++ * Currently unexpected vectors happen only on SMP and APIC.
++ * We _must_ ack these because every local APIC has only N
++ * irq slots per priority level, and a 'hanging, unacked' IRQ
++ * holds up an irq slot - in excessive cases (when multiple
++ * unexpected vectors occur) that might lock up the APIC
++ * completely.
++ * But only ack when the APIC is enabled -AK
++ */
++ if (cpu_has_apic)
++ ack_APIC_irq();
+ #endif
++}
+
+ #ifdef CONFIG_4KSTACKS
+ /*
+Index: 10.3-2007-11-26/arch/i386/kernel/microcode-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/microcode-xen.c 2007-10-22 13:53:25.000000000 +0200
++++ 10.3-2007-11-26/arch/i386/kernel/microcode-xen.c 2007-10-22 13:58:46.000000000 +0200
+@@ -108,7 +108,7 @@ static ssize_t microcode_write (struct f
+ return ret;
+ }
+
+-static struct file_operations microcode_fops = {
++static const struct file_operations microcode_fops = {
+ .owner = THIS_MODULE,
+ .write = microcode_write,
+ .open = microcode_open,
+Index: 10.3-2007-11-26/arch/i386/kernel/mpparse-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/mpparse-xen.c 2007-10-22 13:53:25.000000000 +0200
++++ 10.3-2007-11-26/arch/i386/kernel/mpparse-xen.c 2007-10-22 13:58:46.000000000 +0200
+@@ -1079,7 +1079,7 @@ int mp_register_gsi(u32 gsi, int trigger
+ static int gsi_to_irq[MAX_GSI_NUM];
+
+ /* Don't set up the ACPI SCI because it's already set up */
+- if (acpi_fadt.sci_int == gsi)
++ if (acpi_gbl_FADT.sci_interrupt == gsi)
+ return gsi;
+
+ ioapic = mp_find_ioapic(gsi);
+@@ -1136,7 +1136,7 @@ int mp_register_gsi(u32 gsi, int trigger
+ /*
+ * Don't assign IRQ used by ACPI SCI
+ */
+- if (gsi == acpi_fadt.sci_int)
++ if (gsi == acpi_gbl_FADT.sci_interrupt)
+ gsi = pci_irq++;
+ gsi_to_irq[irq] = gsi;
+ } else {
+Index: 10.3-2007-11-26/arch/i386/kernel/pci-dma-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/pci-dma-xen.c 2007-10-22 13:53:25.000000000 +0200
++++ 10.3-2007-11-26/arch/i386/kernel/pci-dma-xen.c 2007-10-22 13:58:46.000000000 +0200
+@@ -284,7 +284,7 @@ int dma_declare_coherent_memory(struct d
+ return DMA_MEMORY_IO;
+
+ free1_out:
+- kfree(dev->dma_mem->bitmap);
++ kfree(dev->dma_mem);
+ out:
+ if (mem_base)
+ iounmap(mem_base);
+Index: 10.3-2007-11-26/arch/i386/kernel/pcspeaker.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/pcspeaker.c 2007-12-06 17:27:31.000000000 +0100
++++ 10.3-2007-11-26/arch/i386/kernel/pcspeaker.c 2007-10-22 13:58:46.000000000 +0200
+@@ -7,6 +7,11 @@ static __init int add_pcspkr(void)
+ struct platform_device *pd;
+ int ret;
+
++#ifdef CONFIG_XEN
++ if (!is_initial_xendomain())
++ return 0;
++#endif
++
+ pd = platform_device_alloc("pcspkr", -1);
+ if (!pd)
+ return -ENOMEM;
+Index: 10.3-2007-11-26/arch/i386/kernel/process-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/process-xen.c 2007-10-22 13:53:25.000000000 +0200
++++ 10.3-2007-11-26/arch/i386/kernel/process-xen.c 2007-10-22 13:58:46.000000000 +0200
+@@ -38,6 +38,7 @@
+ #include <linux/ptrace.h>
+ #include <linux/random.h>
+ #include <linux/personality.h>
++#include <linux/tick.h>
+
+ #include <asm/uaccess.h>
+ #include <asm/pgtable.h>
+@@ -160,6 +161,7 @@ void cpu_idle(void)
+
+ /* endless idle loop with no priority at all */
+ while (1) {
++ tick_nohz_stop_sched_tick();
+ while (!need_resched()) {
+ void (*idle)(void);
+
+@@ -175,6 +177,7 @@ void cpu_idle(void)
+ __get_cpu_var(irq_stat).idle_timestamp = jiffies;
+ idle();
+ }
++ tick_nohz_restart_sched_tick();
+ preempt_enable_no_resched();
+ schedule();
+ preempt_disable();
+@@ -247,8 +250,8 @@ void show_regs(struct pt_regs * regs)
+ regs->eax,regs->ebx,regs->ecx,regs->edx);
+ printk("ESI: %08lx EDI: %08lx EBP: %08lx",
+ regs->esi, regs->edi, regs->ebp);
+- printk(" DS: %04x ES: %04x GS: %04x\n",
+- 0xffff & regs->xds,0xffff & regs->xes, 0xffff & regs->xgs);
++ printk(" DS: %04x ES: %04x FS: %04x\n",
++ 0xffff & regs->xds,0xffff & regs->xes, 0xffff & regs->xfs);
+
+ cr0 = read_cr0();
+ cr2 = read_cr2();
+@@ -279,7 +282,7 @@ int kernel_thread(int (*fn)(void *), voi
+
+ regs.xds = __USER_DS;
+ regs.xes = __USER_DS;
+- regs.xgs = __KERNEL_PDA;
++ regs.xfs = __KERNEL_PDA;
+ regs.orig_eax = -1;
+ regs.eip = (unsigned long) kernel_thread_helper;
+ regs.xcs = __KERNEL_CS | get_kernel_rpl();
+@@ -355,7 +358,7 @@ int copy_thread(int nr, unsigned long cl
+
+ p->thread.eip = (unsigned long) ret_from_fork;
+
+- savesegment(fs,p->thread.fs);
++ savesegment(gs,p->thread.gs);
+
+ tsk = current;
+ if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) {
+@@ -433,8 +436,8 @@ void dump_thread(struct pt_regs * regs,
+ dump->regs.eax = regs->eax;
+ dump->regs.ds = regs->xds;
+ dump->regs.es = regs->xes;
+- savesegment(fs,dump->regs.fs);
+- dump->regs.gs = regs->xgs;
++ dump->regs.fs = regs->xfs;
++ savesegment(gs,dump->regs.gs);
+ dump->regs.orig_eax = regs->orig_eax;
+ dump->regs.eip = regs->eip;
+ dump->regs.cs = regs->xcs;
+@@ -613,16 +616,6 @@ struct task_struct fastcall * __switch_t
+ prefetch(&next->i387.fxsave);
+
+ /*
+- * Restore %fs if needed.
+- *
+- * Glibc normally makes %fs be zero.
+- */
+- if (unlikely(next->fs))
+- loadsegment(fs, next->fs);
+-
+- write_pda(pcurrent, next_p);
+-
+- /*
+ * Now maybe handle debug registers
+ */
+ if (unlikely(task_thread_info(next_p)->flags & _TIF_WORK_CTXSW))
+@@ -630,6 +623,15 @@ struct task_struct fastcall * __switch_t
+
+ disable_tsc(prev_p, next_p);
+
++ /*
++ * Leave lazy mode, flushing any hypercalls made here.
++ * This must be done before restoring TLS segments so
++ * the GDT and LDT are properly updated, and must be
++ * done before math_state_restore, so the TS bit is up
++ * to date.
++ */
++ arch_leave_lazy_cpu_mode();
++
+ /* If the task has used fpu the last 5 timeslices, just do a full
+ * restore of the math state immediately to avoid the trap; the
+ * chances of needing FPU soon are obviously high now
+@@ -637,6 +639,14 @@ struct task_struct fastcall * __switch_t
+ if (next_p->fpu_counter > 5)
+ math_state_restore();
+
++ /*
++ * Restore %gs if needed (which is common)
++ */
++ if (prev->gs | next->gs)
++ loadsegment(gs, next->gs);
++
++ write_pda(pcurrent, next_p);
++
+ return prev_p;
+ }
+
+Index: 10.3-2007-11-26/arch/i386/kernel/setup-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/setup-xen.c 2007-10-22 13:53:25.000000000 +0200
++++ 10.3-2007-11-26/arch/i386/kernel/setup-xen.c 2007-10-22 13:58:46.000000000 +0200
+@@ -33,7 +33,6 @@
+ #include <linux/initrd.h>
+ #include <linux/bootmem.h>
+ #include <linux/seq_file.h>
+-#include <linux/platform_device.h>
+ #include <linux/console.h>
+ #include <linux/mca.h>
+ #include <linux/root_dev.h>
+@@ -151,7 +150,7 @@ unsigned long saved_videomode;
+ #define RAMDISK_PROMPT_FLAG 0x8000
+ #define RAMDISK_LOAD_FLAG 0x4000
+
+-static char command_line[COMMAND_LINE_SIZE];
++static char __initdata command_line[COMMAND_LINE_SIZE];
+
+ unsigned char __initdata boot_params[PARAM_SIZE];
+
+@@ -671,8 +670,8 @@ void __init setup_arch(char **cmdline_p)
+
+ if ((i = MAX_GUEST_CMDLINE) > COMMAND_LINE_SIZE)
+ i = COMMAND_LINE_SIZE;
+- memcpy(saved_command_line, xen_start_info->cmd_line, i);
+- saved_command_line[i - 1] = '\0';
++ memcpy(boot_command_line, xen_start_info->cmd_line, i);
++ boot_command_line[i - 1] = '\0';
+ parse_early_param();
+
+ if (user_defined_memmap) {
+@@ -680,11 +679,19 @@ void __init setup_arch(char **cmdline_p)
+ print_memory_map("user");
+ }
+
+- strlcpy(command_line, saved_command_line, COMMAND_LINE_SIZE);
++ strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
+ *cmdline_p = command_line;
+
+ max_low_pfn = setup_memory();
+
++#ifdef CONFIG_VMI
++ /*
++ * Must be after max_low_pfn is determined, and before kernel
++ * pagetables are setup.
++ */
++ vmi_init();
++#endif
++
+ /*
+ * NOTE: before this point _nobody_ is allowed to allocate
+ * any memory using the bootmem allocator. Although the
+@@ -816,7 +823,6 @@ void __init setup_arch(char **cmdline_p)
+ conswitchp = &dummy_con;
+ #endif
+ }
+- tsc_init();
+
+ xencons_early_setup();
+ }
+@@ -828,31 +834,3 @@ xen_panic_event(struct notifier_block *t
+ /* we're never actually going to get here... */
+ return NOTIFY_DONE;
+ }
+-
+-static __init int add_pcspkr(void)
+-{
+- struct platform_device *pd;
+- int ret;
+-
+- if (!is_initial_xendomain())
+- return 0;
+-
+- pd = platform_device_alloc("pcspkr", -1);
+- if (!pd)
+- return -ENOMEM;
+-
+- ret = platform_device_add(pd);
+- if (ret)
+- platform_device_put(pd);
+-
+- return ret;
+-}
+-device_initcall(add_pcspkr);
+-
+-/*
+- * Local Variables:
+- * mode:c
+- * c-file-style:"k&r"
+- * c-basic-offset:8
+- * End:
+- */
+Index: 10.3-2007-11-26/arch/i386/kernel/smp-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/smp-xen.c 2007-10-22 13:53:25.000000000 +0200
++++ 10.3-2007-11-26/arch/i386/kernel/smp-xen.c 2007-10-22 13:58:46.000000000 +0200
+@@ -335,8 +335,7 @@ static void flush_tlb_others(cpumask_t c
+ /*
+ * i'm not happy about this global shared spinlock in the
+ * MM hot path, but we'll see how contended it is.
+- * Temporarily this turns IRQs off, so that lockups are
+- * detected by the NMI watchdog.
++ * AK: x86-64 has a faster method that could be ported.
+ */
+ spin_lock(&tlbstate_lock);
+
+@@ -361,7 +360,7 @@ static void flush_tlb_others(cpumask_t c
+
+ while (!cpus_empty(flush_cpumask))
+ /* nothing. lockup detection does not belong here */
+- mb();
++ cpu_relax();
+
+ flush_mm = NULL;
+ flush_va = 0;
+Index: 10.3-2007-11-26/arch/i386/kernel/swiotlb.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/swiotlb.c 2007-12-06 17:27:31.000000000 +0100
++++ 10.3-2007-11-26/arch/i386/kernel/swiotlb.c 2007-10-22 13:58:46.000000000 +0200
+@@ -138,8 +138,8 @@ __setup("swiotlb=", setup_io_tlb_npages)
+ * Statically reserve bounce buffer space and initialize bounce buffer data
+ * structures for the software IO TLB used to implement the PCI DMA API.
+ */
+-void
+-swiotlb_init_with_default_size (size_t default_size)
++void __init
++swiotlb_init_with_default_size(size_t default_size)
+ {
+ unsigned long i, bytes;
+ int rc;
+@@ -227,7 +227,7 @@ swiotlb_init_with_default_size (size_t d
+ dma_bits);
+ }
+
+-void
++void __init
+ swiotlb_init(void)
+ {
+ long ram_end;
+@@ -463,7 +463,7 @@ swiotlb_full(struct device *dev, size_t
+ * When the mapping is small enough return a static buffer to limit
+ * the damage, or panic when the transfer is too big.
+ */
+- printk(KERN_ERR "PCI-DMA: Out of SW-IOMMU space for %lu bytes at "
++ printk(KERN_ERR "PCI-DMA: Out of SW-IOMMU space for %zu bytes at "
+ "device %s\n", (unsigned long)size, dev ? dev->bus_id : "?");
+
+ if (size > io_tlb_overflow && do_panic) {
+@@ -608,7 +608,7 @@ swiotlb_map_sg(struct device *hwdev, str
+ sg[0].dma_length = 0;
+ return 0;
+ }
+- sg->dma_address = (dma_addr_t)virt_to_bus(map);
++ sg->dma_address = virt_to_bus(map);
+ } else
+ sg->dma_address = dev_addr;
+ sg->dma_length = sg->length;
+@@ -630,8 +630,7 @@ swiotlb_unmap_sg(struct device *hwdev, s
+
+ for (i = 0; i < nelems; i++, sg++)
+ if (in_swiotlb_aperture(sg->dma_address))
+- unmap_single(hwdev,
+- (void *)bus_to_virt(sg->dma_address),
++ unmap_single(hwdev, bus_to_virt(sg->dma_address),
+ sg->dma_length, dir);
+ else
+ gnttab_dma_unmap_page(sg->dma_address);
+@@ -654,8 +653,7 @@ swiotlb_sync_sg_for_cpu(struct device *h
+
+ for (i = 0; i < nelems; i++, sg++)
+ if (in_swiotlb_aperture(sg->dma_address))
+- sync_single(hwdev,
+- (void *)bus_to_virt(sg->dma_address),
++ sync_single(hwdev, bus_to_virt(sg->dma_address),
+ sg->dma_length, dir);
+ }
+
+@@ -669,8 +667,7 @@ swiotlb_sync_sg_for_device(struct device
+
+ for (i = 0; i < nelems; i++, sg++)
+ if (in_swiotlb_aperture(sg->dma_address))
+- sync_single(hwdev,
+- (void *)bus_to_virt(sg->dma_address),
++ sync_single(hwdev, bus_to_virt(sg->dma_address),
+ sg->dma_length, dir);
+ }
+
+Index: 10.3-2007-11-26/arch/i386/kernel/time-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/time-xen.c 2007-12-06 17:32:10.000000000 +0100
++++ 10.3-2007-11-26/arch/i386/kernel/time-xen.c 2007-12-06 17:32:21.000000000 +0100
+@@ -66,6 +66,7 @@
+ #include "mach_time.h"
+
+ #include <linux/timex.h>
++#include <linux/clocksource.h>
+
+ #include <asm/hpet.h>
+
+@@ -74,25 +75,17 @@
+ #include <xen/evtchn.h>
+ #include <xen/interface/vcpu.h>
+
+-#if defined (__i386__)
+-#include <asm/i8259.h>
++#ifdef CONFIG_X86_32
+ #include <asm/i8253.h>
+ DEFINE_SPINLOCK(i8253_lock);
+ EXPORT_SYMBOL(i8253_lock);
+-#endif
+-
+-#define XEN_SHIFT 22
+-
+ int pit_latch_buggy; /* extern */
+-
+-#if defined(__x86_64__)
+-unsigned long vxtime_hz = PIT_TICK_RATE;
+-struct vxtime_data __vxtime __section_vxtime; /* for vsyscalls */
++#else
+ volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
+-struct timespec __xtime __section_xtime;
+-struct timezone __sys_tz __section_sys_tz;
+ #endif
+
++#define XEN_SHIFT 22
++
+ unsigned int cpu_khz; /* Detected as we calibrate the TSC */
+ EXPORT_SYMBOL(cpu_khz);
+
+@@ -224,7 +217,7 @@ int read_current_timer(unsigned long *ti
+ }
+ #endif
+
+-void init_cpu_khz(void)
++static void init_cpu_khz(void)
+ {
+ u64 __cpu_khz = 1000000ULL << 32;
+ struct vcpu_time_info *info = &vcpu_info(0)->time;
+@@ -243,16 +236,6 @@ static u64 get_nsec_offset(struct shadow
+ return scale_delta(delta, shadow->tsc_to_nsec_mul, shadow->tsc_shift);
+ }
+
+-#ifdef CONFIG_X86_64
+-static unsigned long get_usec_offset(struct shadow_time_info *shadow)
+-{
+- u64 now, delta;
+- rdtscll(now);
+- delta = now - shadow->tsc_timestamp;
+- return scale_delta(delta, shadow->tsc_to_usec_mul, shadow->tsc_shift);
+-}
+-#endif
+-
+ static void __update_wallclock(time_t sec, long nsec)
+ {
+ long wtm_nsec, xtime_nsec;
+@@ -360,130 +343,6 @@ void rtc_cmos_write(unsigned char val, u
+ }
+ EXPORT_SYMBOL(rtc_cmos_write);
+
+-#ifdef CONFIG_X86_64
+-
+-/*
+- * This version of gettimeofday has microsecond resolution
+- * and better than microsecond precision on fast x86 machines with TSC.
+- */
+-void do_gettimeofday(struct timeval *tv)
+-{
+- unsigned long seq;
+- unsigned long usec, sec;
+- unsigned long max_ntp_tick;
+- s64 nsec;
+- unsigned int cpu;
+- struct shadow_time_info *shadow;
+- u32 local_time_version;
+-
+- cpu = get_cpu();
+- shadow = &per_cpu(shadow_time, cpu);
+-
+- do {
+- local_time_version = shadow->version;
+- seq = read_seqbegin(&xtime_lock);
+-
+- usec = get_usec_offset(shadow);
+-
+- /*
+- * If time_adjust is negative then NTP is slowing the clock
+- * so make sure not to go into next possible interval.
+- * Better to lose some accuracy than have time go backwards..
+- */
+- if (unlikely(time_adjust < 0)) {
+- max_ntp_tick = (USEC_PER_SEC / HZ) - tickadj;
+- usec = min(usec, max_ntp_tick);
+- }
+-
+- sec = xtime.tv_sec;
+- usec += (xtime.tv_nsec / NSEC_PER_USEC);
+-
+- nsec = shadow->system_timestamp - processed_system_time;
+- __normalize_time(&sec, &nsec);
+- usec += (long)nsec / NSEC_PER_USEC;
+-
+- if (unlikely(!time_values_up_to_date(cpu))) {
+- /*
+- * We may have blocked for a long time,
+- * rendering our calculations invalid
+- * (e.g. the time delta may have
+- * overflowed). Detect that and recalculate
+- * with fresh values.
+- */
+- get_time_values_from_xen(cpu);
+- continue;
+- }
+- } while (read_seqretry(&xtime_lock, seq) ||
+- (local_time_version != shadow->version));
+-
+- put_cpu();
+-
+- while (usec >= USEC_PER_SEC) {
+- usec -= USEC_PER_SEC;
+- sec++;
+- }
+-
+- tv->tv_sec = sec;
+- tv->tv_usec = usec;
+-}
+-
+-EXPORT_SYMBOL(do_gettimeofday);
+-
+-int do_settimeofday(struct timespec *tv)
+-{
+- time_t sec;
+- s64 nsec;
+- unsigned int cpu;
+- struct shadow_time_info *shadow;
+- struct xen_platform_op op;
+-
+- if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
+- return -EINVAL;
+-
+- cpu = get_cpu();
+- shadow = &per_cpu(shadow_time, cpu);
+-
+- write_seqlock_irq(&xtime_lock);
+-
+- /*
+- * Ensure we don't get blocked for a long time so that our time delta
+- * overflows. If that were to happen then our shadow time values would
+- * be stale, so we can retry with fresh ones.
+- */
+- for (;;) {
+- nsec = tv->tv_nsec - get_nsec_offset(shadow);
+- if (time_values_up_to_date(cpu))
+- break;
+- get_time_values_from_xen(cpu);
+- }
+- sec = tv->tv_sec;
+- __normalize_time(&sec, &nsec);
+-
+- if (is_initial_xendomain() && !independent_wallclock) {
+- op.cmd = XENPF_settime;
+- op.u.settime.secs = sec;
+- op.u.settime.nsecs = nsec;
+- op.u.settime.system_time = shadow->system_timestamp;
+- HYPERVISOR_platform_op(&op);
+- update_wallclock();
+- } else if (independent_wallclock) {
+- nsec -= shadow->system_timestamp;
+- __normalize_time(&sec, &nsec);
+- __update_wallclock(sec, nsec);
+- }
+-
+- write_sequnlock_irq(&xtime_lock);
+-
+- put_cpu();
+-
+- clock_was_set();
+- return 0;
+-}
+-
+-EXPORT_SYMBOL(do_settimeofday);
+-
+-#endif
+-
+ static void sync_xen_wallclock(unsigned long dummy);
+ static DEFINE_TIMER(sync_xen_wallclock_timer, sync_xen_wallclock, 0, 0);
+ static void sync_xen_wallclock(unsigned long dummy)
+@@ -532,15 +391,7 @@ static int set_rtc_mmss(unsigned long no
+ return retval;
+ }
+
+-#ifdef CONFIG_X86_64
+-/* monotonic_clock(): returns # of nanoseconds passed since time_init()
+- * Note: This function is required to return accurate
+- * time even in the absence of multiple timer ticks.
+- */
+-unsigned long long monotonic_clock(void)
+-#else
+ unsigned long long sched_clock(void)
+-#endif
+ {
+ int cpu = get_cpu();
+ struct shadow_time_info *shadow = &per_cpu(shadow_time, cpu);
+@@ -560,14 +411,6 @@ unsigned long long sched_clock(void)
+
+ return time;
+ }
+-#ifdef CONFIG_X86_64
+-EXPORT_SYMBOL(monotonic_clock);
+-
+-unsigned long long sched_clock(void)
+-{
+- return monotonic_clock();
+-}
+-#endif
+
+ unsigned long profile_pc(struct pt_regs *regs)
+ {
+@@ -590,15 +433,13 @@ unsigned long profile_pc(struct pt_regs
+ }
+ #else
+ #ifdef CONFIG_SMP
+- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
++ if (!v8086_mode(regs) && SEGMENT_IS_KERNEL_CODE(regs->xcs) &&
++ in_lock_functions(pc)) {
+ #ifdef CONFIG_FRAME_POINTER
+ return *(unsigned long *)(regs->ebp + 4);
+ #else
+- unsigned long *sp;
+- if ((regs->xcs & 2) == 0)
+- sp = (unsigned long *)&regs->esp;
+- else
+- sp = (unsigned long *)regs->esp;
++ unsigned long *sp = (unsigned long *)&regs->esp;
++
+ /* Return address is either directly at stack pointer
+ or above a saved eflags. Eflags has bits 22-31 zero,
+ kernel addresses don't. */
+@@ -752,19 +593,6 @@ irqreturn_t timer_interrupt(int irq, voi
+ return IRQ_HANDLED;
+ }
+
+-#ifndef CONFIG_X86_64
+-
+-void tsc_init(void)
+-{
+- init_cpu_khz();
+- printk(KERN_INFO "Xen reported: %u.%03u MHz processor.\n",
+- cpu_khz / 1000, cpu_khz % 1000);
+-
+- use_tsc_delay();
+-}
+-
+-#include <linux/clocksource.h>
+-
+ void mark_tsc_unstable(void)
+ {
+ #ifndef CONFIG_XEN /* XXX Should tell the hypervisor about this fact. */
+@@ -818,21 +646,9 @@ static struct clocksource clocksource_xe
+ .mask = CLOCKSOURCE_MASK(64),
+ .mult = 1 << XEN_SHIFT, /* time directly in nanoseconds */
+ .shift = XEN_SHIFT,
+- .is_continuous = 1,
++ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+ };
+
+-static int __init init_xen_clocksource(void)
+-{
+- clocksource_xen.mult = clocksource_khz2mult(cpu_khz,
+- clocksource_xen.shift);
+-
+- return clocksource_register(&clocksource_xen);
+-}
+-
+-module_init(init_xen_clocksource);
+-
+-#endif
+-
+ static void init_missing_ticks_accounting(int cpu)
+ {
+ struct vcpu_register_runstate_memory_area area;
+@@ -851,7 +667,7 @@ static void init_missing_ticks_accountin
+ }
+
+ /* not static: needed by APM */
+-unsigned long get_cmos_time(void)
++unsigned long read_persistent_clock(void)
+ {
+ unsigned long retval;
+ unsigned long flags;
+@@ -864,11 +680,11 @@ unsigned long get_cmos_time(void)
+
+ return retval;
+ }
+-EXPORT_SYMBOL(get_cmos_time);
+
+ static void sync_cmos_clock(unsigned long dummy);
+
+ static DEFINE_TIMER(sync_cmos_timer, sync_cmos_clock, 0, 0);
++int no_sync_cmos_clock;
+
+ static void sync_cmos_clock(unsigned long dummy)
+ {
+@@ -912,7 +728,8 @@ static void sync_cmos_clock(unsigned lon
+
+ void notify_arch_cmos_timer(void)
+ {
+- mod_timer(&sync_cmos_timer, jiffies + 1);
++ if (!no_sync_cmos_clock)
++ mod_timer(&sync_cmos_timer, jiffies + 1);
+ mod_timer(&sync_xen_wallclock_timer, jiffies + 1);
+ }
+
+@@ -924,7 +741,7 @@ static int timer_suspend(struct sys_devi
+ /*
+ * Estimate time zone so that set_time can update the clock
+ */
+- unsigned long ctime = get_cmos_time();
++ unsigned long ctime = read_persistent_clock();
+
+ clock_cmos_diff = -ctime;
+ clock_cmos_diff += get_seconds();
+@@ -936,7 +753,7 @@ static int timer_resume(struct sys_devic
+ {
+ unsigned long flags;
+ unsigned long sec;
+- unsigned long ctime = get_cmos_time();
++ unsigned long ctime = read_persistent_clock();
+ long sleep_length = (ctime - sleep_start) * HZ;
+ struct timespec ts;
+
+@@ -948,10 +765,6 @@ static int timer_resume(struct sys_devic
+ sleep_length = 0;
+ ctime = sleep_start;
+ }
+-#ifdef CONFIG_HPET_TIMER
+- if (is_hpet_enabled())
+- hpet_reenable();
+-#endif
+
+ sec = ctime + clock_cmos_diff;
+ ts.tv_sec = sec;
+@@ -987,29 +800,11 @@ static int time_init_device(void)
+
+ device_initcall(time_init_device);
+
+-#ifdef CONFIG_HPET_TIMER
+ extern void (*late_time_init)(void);
+-/* Duplicate of time_init() below, with hpet_enable part added */
+-static void __init hpet_time_init(void)
+-{
+- struct timespec ts;
+- ts.tv_sec = get_cmos_time();
+- ts.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ);
+-
+- do_settimeofday(&ts);
+-
+- if ((hpet_enable() >= 0) && hpet_use_timer) {
+- printk("Using HPET for base-timer\n");
+- }
+-
+- do_time_init();
+-}
+-#endif
+
+ /* Dynamically-mapped IRQ. */
+ DEFINE_PER_CPU(int, timer_irq);
+
+-extern void (*late_time_init)(void);
+ static void setup_cpu0_timer_irq(void)
+ {
+ per_cpu(timer_irq, 0) =
+@@ -1029,16 +824,9 @@ static struct vcpu_set_periodic_timer xe
+
+ void __init time_init(void)
+ {
+-#ifdef CONFIG_HPET_TIMER
+- if (is_hpet_capable()) {
+- /*
+- * HPET initialization needs to do memory-mapped io. So, let
+- * us do a late initialization after mem_init().
+- */
+- late_time_init = hpet_time_init;
+- return;
+- }
+-#endif
++ init_cpu_khz();
++ printk(KERN_INFO "Xen reported: %u.%03u MHz processor.\n",
++ cpu_khz / 1000, cpu_khz % 1000);
+
+ HYPERVISOR_vcpu_op(VCPUOP_set_periodic_timer, 0,
+ &xen_set_periodic_tick);
+@@ -1049,18 +837,12 @@ void __init time_init(void)
+ per_cpu(processed_system_time, 0) = processed_system_time;
+ init_missing_ticks_accounting(0);
+
+- update_wallclock();
++ clocksource_register(&clocksource_xen);
+
+-#ifdef CONFIG_X86_64
+- init_cpu_khz();
+- printk(KERN_INFO "Xen reported: %u.%03u MHz processor.\n",
+- cpu_khz / 1000, cpu_khz % 1000);
++ update_wallclock();
+
+- vxtime.mode = VXTIME_TSC;
+- vxtime.quot = (1000000L << 32) / vxtime_hz;
+- vxtime.tsc_quot = (1000L << 32) / cpu_khz;
+- sync_core();
+- rdtscll(vxtime.last_tsc);
++#ifndef CONFIG_X86_64
++ use_tsc_delay();
+ #endif
+
+ /* Cannot request_irq() until kmem is initialised. */
+@@ -1251,7 +1033,7 @@ static ctl_table xen_table[] = {
+ };
+ static int __init xen_sysctl_init(void)
+ {
+- (void)register_sysctl_table(xen_table, 0);
++ (void)register_sysctl_table(xen_table);
+ return 0;
+ }
+ __initcall(xen_sysctl_init);
+Index: 10.3-2007-11-26/arch/i386/kernel/traps-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/traps-xen.c 2007-10-22 13:53:25.000000000 +0200
++++ 10.3-2007-11-26/arch/i386/kernel/traps-xen.c 2007-10-22 13:58:46.000000000 +0200
+@@ -100,6 +100,7 @@ asmlinkage void fixup_4gb_segment(void);
+ asmlinkage void machine_check(void);
+
+ int kstack_depth_to_print = 24;
++static unsigned int code_bytes = 64;
+ ATOMIC_NOTIFIER_HEAD(i386die_chain);
+
+ int register_die_notifier(struct notifier_block *nb)
+@@ -297,10 +298,11 @@ void show_registers(struct pt_regs *regs
+ int i;
+ int in_kernel = 1;
+ unsigned long esp;
+- unsigned short ss;
++ unsigned short ss, gs;
+
+ esp = (unsigned long) (&regs->esp);
+ savesegment(ss, ss);
++ savesegment(gs, gs);
+ if (user_mode_vm(regs)) {
+ in_kernel = 0;
+ esp = regs->esp;
+@@ -319,8 +321,8 @@ void show_registers(struct pt_regs *regs
+ regs->eax, regs->ebx, regs->ecx, regs->edx);
+ printk(KERN_EMERG "esi: %08lx edi: %08lx ebp: %08lx esp: %08lx\n",
+ regs->esi, regs->edi, regs->ebp, esp);
+- printk(KERN_EMERG "ds: %04x es: %04x ss: %04x\n",
+- regs->xds & 0xffff, regs->xes & 0xffff, ss);
++ printk(KERN_EMERG "ds: %04x es: %04x fs: %04x gs: %04x ss: %04x\n",
++ regs->xds & 0xffff, regs->xes & 0xffff, regs->xfs & 0xffff, gs, ss);
+ printk(KERN_EMERG "Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)",
+ TASK_COMM_LEN, current->comm, current->pid,
+ current_thread_info(), current, current->thread_info);
+@@ -330,7 +332,8 @@ void show_registers(struct pt_regs *regs
+ */
+ if (in_kernel) {
+ u8 *eip;
+- int code_bytes = 64;
++ unsigned int code_prologue = code_bytes * 43 / 64;
++ unsigned int code_len = code_bytes;
+ unsigned char c;
+
+ printk("\n" KERN_EMERG "Stack: ");
+@@ -338,14 +341,14 @@ void show_registers(struct pt_regs *regs
+
+ printk(KERN_EMERG "Code: ");
+
+- eip = (u8 *)regs->eip - 43;
++ eip = (u8 *)regs->eip - code_prologue;
+ if (eip < (u8 *)PAGE_OFFSET ||
+ probe_kernel_address(eip, c)) {
+ /* try starting at EIP */
+ eip = (u8 *)regs->eip;
+- code_bytes = 32;
++ code_len = code_len - code_prologue + 1;
+ }
+- for (i = 0; i < code_bytes; i++, eip++) {
++ for (i = 0; i < code_len; i++, eip++) {
+ if (eip < (u8 *)PAGE_OFFSET ||
+ probe_kernel_address(eip, c)) {
+ printk(" Bad EIP value.");
+@@ -1130,3 +1133,13 @@ static int __init kstack_setup(char *s)
+ return 1;
+ }
+ __setup("kstack=", kstack_setup);
++
++static int __init code_bytes_setup(char *s)
++{
++ code_bytes = simple_strtoul(s, NULL, 0);
++ if (code_bytes > 8192)
++ code_bytes = 8192;
++
++ return 1;
++}
++__setup("code_bytes=", code_bytes_setup);
+Index: 10.3-2007-11-26/arch/i386/mm/fault-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/mm/fault-xen.c 2007-10-22 13:53:25.000000000 +0200
++++ 10.3-2007-11-26/arch/i386/mm/fault-xen.c 2007-10-22 13:58:46.000000000 +0200
+@@ -46,43 +46,17 @@ int unregister_page_fault_notifier(struc
+ }
+ EXPORT_SYMBOL_GPL(unregister_page_fault_notifier);
+
+-static inline int notify_page_fault(enum die_val val, const char *str,
+- struct pt_regs *regs, long err, int trap, int sig)
++static inline int notify_page_fault(struct pt_regs *regs, long err)
+ {
+ struct die_args args = {
+ .regs = regs,
+- .str = str,
++ .str = "page fault",
+ .err = err,
+- .trapnr = trap,
+- .signr = sig
++ .trapnr = 14,
++ .signr = SIGSEGV
+ };
+- return atomic_notifier_call_chain(&notify_page_fault_chain, val, &args);
+-}
+-
+-/*
+- * Unlock any spinlocks which will prevent us from getting the
+- * message out
+- */
+-void bust_spinlocks(int yes)
+-{
+- int loglevel_save = console_loglevel;
+-
+- if (yes) {
+- oops_in_progress = 1;
+- return;
+- }
+-#ifdef CONFIG_VT
+- unblank_screen();
+-#endif
+- oops_in_progress = 0;
+- /*
+- * OK, the message is on the console. Now we call printk()
+- * without oops_in_progress set so that printk will give klogd
+- * a poke. Hold onto your hats...
+- */
+- console_loglevel = 15; /* NMI oopser may have shut the console up */
+- printk(" ");
+- console_loglevel = loglevel_save;
++ return atomic_notifier_call_chain(&notify_page_fault_chain,
++ DIE_PAGE_FAULT, &args);
+ }
+
+ /*
+@@ -476,8 +450,7 @@ fastcall void __kprobes do_page_fault(st
+ /* Can take a spurious fault if mapping changes R/O -> R/W. */
+ if (spurious_fault(regs, address, error_code))
+ return;
+- if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
+- SIGSEGV) == NOTIFY_STOP)
++ if (notify_page_fault(regs, error_code) == NOTIFY_STOP)
+ return;
+ /*
+ * Don't take the mm semaphore here. If we fixup a prefetch
+@@ -486,8 +459,7 @@ fastcall void __kprobes do_page_fault(st
+ goto bad_area_nosemaphore;
+ }
+
+- if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
+- SIGSEGV) == NOTIFY_STOP)
++ if (notify_page_fault(regs, error_code) == NOTIFY_STOP)
+ return;
+
+ /* It's safe to allow irq's after cr2 has been saved and the vmalloc
+Index: 10.3-2007-11-26/arch/i386/mm/highmem-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/mm/highmem-xen.c 2007-10-22 13:53:25.000000000 +0200
++++ 10.3-2007-11-26/arch/i386/mm/highmem-xen.c 2007-10-22 13:58:46.000000000 +0200
+@@ -33,14 +33,16 @@ static void *__kmap_atomic(struct page *
+
+ /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
+ pagefault_disable();
++
++ idx = type + KM_TYPE_NR*smp_processor_id();
++ BUG_ON(!pte_none(*(kmap_pte-idx)));
++
+ if (!PageHighMem(page))
+ return page_address(page);
+
+- idx = type + KM_TYPE_NR*smp_processor_id();
+ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+- if (!pte_none(*(kmap_pte-idx)))
+- BUG();
+ set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot));
++ arch_flush_lazy_mmu_mode();
+
+ return (void*) vaddr;
+ }
+@@ -94,6 +96,7 @@ void *kmap_atomic_pfn(unsigned long pfn,
+ idx = type + KM_TYPE_NR*smp_processor_id();
+ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+ set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot));
++ arch_flush_lazy_mmu_mode();
+
+ return (void*) vaddr;
+ }
+Index: 10.3-2007-11-26/arch/i386/mm/init-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/mm/init-xen.c 2007-10-22 13:53:25.000000000 +0200
++++ 10.3-2007-11-26/arch/i386/mm/init-xen.c 2007-10-22 13:58:46.000000000 +0200
+@@ -68,6 +68,7 @@ static pmd_t * __init one_md_table_init(
+
+ #ifdef CONFIG_X86_PAE
+ pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
++ paravirt_alloc_pd(__pa(pmd_table) >> PAGE_SHIFT);
+ make_lowmem_page_readonly(pmd_table, XENFEAT_writable_page_tables);
+ set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
+ pud = pud_offset(pgd, 0);
+@@ -89,6 +90,7 @@ static pte_t * __init one_page_table_ini
+ {
+ if (pmd_none(*pmd)) {
+ pte_t *page_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
++ paravirt_alloc_pt(__pa(page_table) >> PAGE_SHIFT);
+ make_lowmem_page_readonly(page_table,
+ XENFEAT_writable_page_tables);
+ set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
+Index: 10.3-2007-11-26/arch/i386/mm/pgtable-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/mm/pgtable-xen.c 2007-10-22 13:53:25.000000000 +0200
++++ 10.3-2007-11-26/arch/i386/mm/pgtable-xen.c 2007-10-22 13:58:46.000000000 +0200
+@@ -229,6 +229,8 @@ void __set_fixmap (enum fixed_addresses
+ void __init reserve_top_address(unsigned long reserve)
+ {
+ BUG_ON(fixmaps > 0);
++ printk(KERN_INFO "Reserving virtual address space above 0x%08x\n",
++ (int)-reserve);
+ __FIXADDR_TOP = -reserve - PAGE_SIZE;
+ __VMALLOC_RESERVE += reserve;
+ }
+@@ -332,6 +334,12 @@ void pgd_ctor(void *pgd, struct kmem_cac
+ swapper_pg_dir + USER_PTRS_PER_PGD,
+ KERNEL_PGD_PTRS);
+ memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
++
++ /* must happen under lock */
++ paravirt_alloc_pd_clone(__pa(pgd) >> PAGE_SHIFT,
++ __pa(swapper_pg_dir) >> PAGE_SHIFT,
++ USER_PTRS_PER_PGD, PTRS_PER_PGD - USER_PTRS_PER_PGD);
++
+ pgd_list_add(pgd);
+ spin_unlock_irqrestore(&pgd_lock, flags);
+ }
+@@ -342,6 +350,7 @@ void pgd_dtor(void *pgd, struct kmem_cac
+ {
+ unsigned long flags; /* can be called from interrupt context */
+
++ paravirt_release_pd(__pa(pgd) >> PAGE_SHIFT);
+ spin_lock_irqsave(&pgd_lock, flags);
+ pgd_list_del(pgd);
+ spin_unlock_irqrestore(&pgd_lock, flags);
+@@ -366,6 +375,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
+ pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
+ if (!pmd)
+ goto out_oom;
++ paravirt_alloc_pd(__pa(pmd) >> PAGE_SHIFT);
+ set_pgd(&pgd[i], __pgd(1 + __pa(pmd)));
+ }
+ return pgd;
+@@ -388,6 +398,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
+ pmd[i] = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
+ if (!pmd[i])
+ goto out_oom;
++ paravirt_alloc_pd(__pa(pmd) >> PAGE_SHIFT);
+ }
+
+ spin_lock_irqsave(&pgd_lock, flags);
+@@ -428,12 +439,17 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
+
+ out_oom:
+ if (HAVE_SHARED_KERNEL_PMD) {
+- for (i--; i >= 0; i--)
+- kmem_cache_free(pmd_cache,
+- (void *)__va(pgd_val(pgd[i])-1));
++ for (i--; i >= 0; i--) {
++ pgd_t pgdent = pgd[i];
++ void* pmd = (void *)__va(pgd_val(pgdent)-1);
++ paravirt_release_pd(__pa(pmd) >> PAGE_SHIFT);
++ kmem_cache_free(pmd_cache, pmd);
++ }
+ } else {
+- for (i--; i >= 0; i--)
++ for (i--; i >= 0; i--) {
++ paravirt_release_pd(__pa(pmd[i]) >> PAGE_SHIFT);
+ kmem_cache_free(pmd_cache, pmd[i]);
++ }
+ kfree(pmd);
+ }
+ kmem_cache_free(pgd_cache, pgd);
+@@ -457,7 +473,9 @@ void pgd_free(pgd_t *pgd)
+ /* in the PAE case user pgd entries are overwritten before usage */
+ if (PTRS_PER_PMD > 1) {
+ for (i = 0; i < USER_PTRS_PER_PGD; ++i) {
+- pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1);
++ pgd_t pgdent = pgd[i];
++ void* pmd = (void *)__va(pgd_val(pgdent)-1);
++ paravirt_release_pd(__pa(pmd) >> PAGE_SHIFT);
+ kmem_cache_free(pmd_cache, pmd);
+ }
+
+Index: 10.3-2007-11-26/arch/x86_64/ia32/ia32entry-xen.S
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/ia32/ia32entry-xen.S 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/arch/x86_64/ia32/ia32entry-xen.S 2007-10-22 13:58:46.000000000 +0200
+@@ -542,7 +542,7 @@ ia32_sys_call_table:
+ .quad sys32_vm86_warning /* vm86old */
+ .quad compat_sys_wait4
+ .quad sys_swapoff /* 115 */
+- .quad sys32_sysinfo
++ .quad compat_sys_sysinfo
+ .quad sys32_ipc
+ .quad sys_fsync
+ .quad stub32_sigreturn
+@@ -587,7 +587,7 @@ ia32_sys_call_table:
+ .quad sys_sched_yield
+ .quad sys_sched_get_priority_max
+ .quad sys_sched_get_priority_min /* 160 */
+- .quad sys_sched_rr_get_interval
++ .quad sys32_sched_rr_get_interval
+ .quad compat_sys_nanosleep
+ .quad sys_mremap
+ .quad sys_setresuid16
+@@ -745,4 +745,5 @@ ia32_sys_call_table:
+ .quad compat_sys_vmsplice
+ .quad compat_sys_move_pages
+ .quad sys_getcpu
++ .quad sys_epoll_pwait
+ ia32_syscall_end:
+Index: 10.3-2007-11-26/arch/x86_64/ia32/syscall32-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/ia32/syscall32-xen.c 2007-10-22 13:53:25.000000000 +0200
++++ 10.3-2007-11-26/arch/x86_64/ia32/syscall32-xen.c 2007-10-22 13:58:46.000000000 +0200
+@@ -21,70 +21,36 @@ extern unsigned char syscall32_syscall[]
+ extern unsigned char syscall32_sysenter[], syscall32_sysenter_end[];
+ extern int sysctl_vsyscall32;
+
+-char *syscall32_page;
++static struct page *syscall32_pages[1];
+ #ifndef USE_INT80
+ static int use_sysenter = -1;
+ #endif
+
+-static struct page *
+-syscall32_nopage(struct vm_area_struct *vma, unsigned long adr, int *type)
+-{
+- struct page *p = virt_to_page(adr - vma->vm_start + syscall32_page);
+- get_page(p);
+- return p;
+-}
+-
+-/* Prevent VMA merging */
+-static void syscall32_vma_close(struct vm_area_struct *vma)
+-{
+-}
+-
+-static struct vm_operations_struct syscall32_vm_ops = {
+- .close = syscall32_vma_close,
+- .nopage = syscall32_nopage,
+-};
+-
+ struct linux_binprm;
+
+ /* Setup a VMA at program startup for the vsyscall page */
+ int syscall32_setup_pages(struct linux_binprm *bprm, int exstack)
+ {
+- int npages = (VSYSCALL32_END - VSYSCALL32_BASE) >> PAGE_SHIFT;
+- struct vm_area_struct *vma;
+ struct mm_struct *mm = current->mm;
+ int ret;
+
+- vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
+- if (!vma)
+- return -ENOMEM;
+-
+- memset(vma, 0, sizeof(struct vm_area_struct));
+- /* Could randomize here */
+- vma->vm_start = VSYSCALL32_BASE;
+- vma->vm_end = VSYSCALL32_END;
+- /* MAYWRITE to allow gdb to COW and set breakpoints */
+- vma->vm_flags = VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC|VM_MAYWRITE;
++ down_write(&mm->mmap_sem);
+ /*
++ * MAYWRITE to allow gdb to COW and set breakpoints
++ *
+ * Make sure the vDSO gets into every core dump.
+ * Dumping its contents makes post-mortem fully interpretable later
+ * without matching up the same kernel and hardware config to see
+ * what PC values meant.
+ */
+- vma->vm_flags |= VM_ALWAYSDUMP;
+- vma->vm_flags |= mm->def_flags;
+- vma->vm_page_prot = protection_map[vma->vm_flags & 7];
+- vma->vm_ops = &syscall32_vm_ops;
+- vma->vm_mm = mm;
+-
+- down_write(&mm->mmap_sem);
+- if ((ret = insert_vm_struct(mm, vma))) {
+- up_write(&mm->mmap_sem);
+- kmem_cache_free(vm_area_cachep, vma);
+- return ret;
+- }
+- mm->total_vm += npages;
++ /* Could randomize here */
++ ret = install_special_mapping(mm, VSYSCALL32_BASE, PAGE_SIZE,
++ VM_READ|VM_EXEC|
++ VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
++ VM_ALWAYSDUMP,
++ syscall32_pages);
+ up_write(&mm->mmap_sem);
+- return 0;
++ return ret;
+ }
+
+ const char *arch_vma_name(struct vm_area_struct *vma)
+@@ -97,10 +63,11 @@ const char *arch_vma_name(struct vm_area
+
+ static int __init init_syscall32(void)
+ {
+- syscall32_page = (void *)get_zeroed_page(GFP_KERNEL);
++ char *syscall32_page = (void *)get_zeroed_page(GFP_KERNEL);
+ if (!syscall32_page)
+ panic("Cannot allocate syscall32 page");
+
++ syscall32_pages[0] = virt_to_page(syscall32_page);
+ #ifdef USE_INT80
+ /*
+ * At this point we use int 0x80.
+Index: 10.3-2007-11-26/arch/x86_64/kernel/Makefile
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/kernel/Makefile 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/arch/x86_64/kernel/Makefile 2007-10-22 13:58:46.000000000 +0200
+@@ -68,7 +68,7 @@ pci-dma-y += ../../i386/kernel/pci-dma
+ microcode-$(subst m,y,$(CONFIG_MICROCODE)) := ../../i386/kernel/microcode-xen.o
+ quirks-y := ../../i386/kernel/quirks-xen.o
+
+-n-obj-xen := early-quirks.o i8259.o reboot.o i8237.o smpboot.o trampoline.o
++n-obj-xen := early-quirks.o i8259.o reboot.o i8237.o smpboot.o trampoline.o tsc.o tsc_sync.o
+
+ include $(srctree)/scripts/Makefile.xen
+
+Index: 10.3-2007-11-26/arch/x86_64/kernel/e820-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/kernel/e820-xen.c 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/arch/x86_64/kernel/e820-xen.c 2007-10-22 13:58:46.000000000 +0200
+@@ -88,6 +88,13 @@ static inline int bad_addr(unsigned long
+ return 1;
+ }
+
++#ifdef CONFIG_NUMA
++ /* NUMA memory to node map */
++ if (last >= nodemap_addr && addr < nodemap_addr + nodemap_size) {
++ *addrp = nodemap_addr + nodemap_size;
++ return 1;
++ }
++#endif
+ /* XXX ramdisk image here? */
+ #else
+ if (last < (table_end<<PAGE_SHIFT)) {
+@@ -206,6 +213,37 @@ unsigned long __init e820_end_of_ram(voi
+ }
+
+ /*
++ * Find the hole size in the range.
++ */
++unsigned long __init e820_hole_size(unsigned long start, unsigned long end)
++{
++ unsigned long ram = 0;
++ int i;
++
++ for (i = 0; i < e820.nr_map; i++) {
++ struct e820entry *ei = &e820.map[i];
++ unsigned long last, addr;
++
++ if (ei->type != E820_RAM ||
++ ei->addr+ei->size <= start ||
++ ei->addr >= end)
++ continue;
++
++ addr = round_up(ei->addr, PAGE_SIZE);
++ if (addr < start)
++ addr = start;
++
++ last = round_down(ei->addr + ei->size, PAGE_SIZE);
++ if (last >= end)
++ last = end;
++
++ if (last > addr)
++ ram += last - addr;
++ }
++ return ((end - start) - ram);
++}
++
++/*
+ * Mark e820 reserved areas as busy for the resource manager.
+ */
+ void __init e820_reserve_resources(struct e820entry *e820, int nr_map)
+@@ -716,7 +754,7 @@ static int __init parse_memmap_opt(char
+ }
+ early_param("memmap", parse_memmap_opt);
+
+-void finish_e820_parsing(void)
++void __init finish_e820_parsing(void)
+ {
+ if (userdef) {
+ printk(KERN_INFO "user-defined physical RAM map:\n");
+Index: 10.3-2007-11-26/arch/x86_64/kernel/entry-xen.S
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/kernel/entry-xen.S 2007-10-22 13:53:25.000000000 +0200
++++ 10.3-2007-11-26/arch/x86_64/kernel/entry-xen.S 2007-10-22 13:58:46.000000000 +0200
+@@ -636,6 +636,9 @@ END(invalidate_interrupt\num)
+ ENTRY(call_function_interrupt)
+ apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt
+ END(call_function_interrupt)
++ENTRY(irq_move_cleanup_interrupt)
++ apicinterrupt IRQ_MOVE_CLEANUP_VECTOR,smp_irq_move_cleanup_interrupt
++END(irq_move_cleanup_interrupt)
+ #endif
+
+ ENTRY(apic_timer_interrupt)
+Index: 10.3-2007-11-26/arch/x86_64/kernel/genapic-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/kernel/genapic-xen.c 2007-10-22 13:53:25.000000000 +0200
++++ 10.3-2007-11-26/arch/x86_64/kernel/genapic-xen.c 2007-10-22 13:58:46.000000000 +0200
+@@ -65,8 +65,8 @@ void __init clustered_apic_check(void)
+ * Some x86_64 machines use physical APIC mode regardless of how many
+ * procs/clusters are present (x86_64 ES7000 is an example).
+ */
+- if (acpi_fadt.revision > FADT2_REVISION_ID)
+- if (acpi_fadt.force_apic_physical_destination_mode) {
++ if (acpi_gbl_FADT.header.revision > FADT2_REVISION_ID)
++ if (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL) {
+ genapic = &apic_cluster;
+ goto print;
+ }
+Index: 10.3-2007-11-26/arch/x86_64/kernel/head64-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/kernel/head64-xen.c 2007-10-22 13:53:25.000000000 +0200
++++ 10.3-2007-11-26/arch/x86_64/kernel/head64-xen.c 2007-10-22 13:58:46.000000000 +0200
+@@ -42,8 +42,6 @@ static void __init clear_bss(void)
+ #define OLD_CL_BASE_ADDR 0x90000
+ #define OLD_CL_OFFSET 0x90022
+
+-extern char saved_command_line[];
+-
+ static void __init copy_bootdata(char *real_mode_data)
+ {
+ #ifndef CONFIG_XEN
+@@ -59,14 +57,14 @@ static void __init copy_bootdata(char *r
+ new_data = OLD_CL_BASE_ADDR + * (u16 *) OLD_CL_OFFSET;
+ }
+ command_line = (char *) ((u64)(new_data));
+- memcpy(saved_command_line, command_line, COMMAND_LINE_SIZE);
++ memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
+ #else
+ int max_cmdline;
+
+ if ((max_cmdline = MAX_GUEST_CMDLINE) > COMMAND_LINE_SIZE)
+ max_cmdline = COMMAND_LINE_SIZE;
+- memcpy(saved_command_line, xen_start_info->cmd_line, max_cmdline);
+- saved_command_line[max_cmdline-1] = '\0';
++ memcpy(boot_command_line, xen_start_info->cmd_line, max_cmdline);
++ boot_command_line[max_cmdline-1] = '\0';
+ #endif
+ }
+
+Index: 10.3-2007-11-26/arch/x86_64/kernel/io_apic-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/kernel/io_apic-xen.c 2007-10-22 13:53:25.000000000 +0200
++++ 10.3-2007-11-26/arch/x86_64/kernel/io_apic-xen.c 2007-10-22 13:58:46.000000000 +0200
+@@ -36,6 +36,7 @@
+ #include <acpi/acpi_bus.h>
+ #endif
+
++#include <asm/idle.h>
+ #include <asm/io.h>
+ #include <asm/smp.h>
+ #include <asm/desc.h>
+@@ -47,7 +48,20 @@
+ #include <asm/msidef.h>
+ #include <asm/hypertransport.h>
+
+-static int assign_irq_vector(int irq, cpumask_t mask, cpumask_t *result);
++struct irq_cfg {
++#ifndef CONFIG_XEN
++ cpumask_t domain;
++ cpumask_t old_domain;
++#endif
++ unsigned move_cleanup_count;
++ u8 vector;
++ u8 move_in_progress : 1;
++};
++
++/* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */
++struct irq_cfg irq_cfg[NR_IRQS] __read_mostly;
++
++static int assign_irq_vector(int irq, cpumask_t mask);
+
+ #define __apicdebuginit __init
+
+@@ -88,7 +102,7 @@ int nr_ioapic_registers[MAX_IO_APICS];
+ * Rough estimation of how many shared IRQs there are, can
+ * be changed anytime.
+ */
+-#define MAX_PLUS_SHARED_IRQS NR_IRQ_VECTORS
++#define MAX_PLUS_SHARED_IRQS NR_IRQS
+ #define PIN_MAP_SIZE (MAX_PLUS_SHARED_IRQS + NR_IRQS)
+
+ /*
+@@ -259,21 +273,19 @@ static void __target_IO_APIC_irq(unsigne
+
+ static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
+ {
++ struct irq_cfg *cfg = irq_cfg + irq;
+ unsigned long flags;
+ unsigned int dest;
+ cpumask_t tmp;
+- int vector;
+
+ cpus_and(tmp, mask, cpu_online_map);
+ if (cpus_empty(tmp))
+- tmp = TARGET_CPUS;
+-
+- cpus_and(mask, tmp, CPU_MASK_ALL);
++ return;
+
+- vector = assign_irq_vector(irq, mask, &tmp);
+- if (vector < 0)
++ if (assign_irq_vector(irq, mask))
+ return;
+
++ cpus_and(tmp, cfg->domain, mask);
+ dest = cpu_mask_to_apicid(tmp);
+
+ /*
+@@ -282,8 +294,8 @@ static void set_ioapic_affinity_irq(unsi
+ dest = SET_APIC_LOGICAL_ID(dest);
+
+ spin_lock_irqsave(&ioapic_lock, flags);
+- __target_IO_APIC_irq(irq, dest, vector);
+- set_native_irq_info(irq, mask);
++ __target_IO_APIC_irq(irq, dest, cfg->vector);
++ irq_desc[irq].affinity = mask;
+ spin_unlock_irqrestore(&ioapic_lock, flags);
+ }
+ #endif
+@@ -329,11 +341,11 @@ static void add_pin_to_irq(unsigned int
+ reg = io_apic_read(entry->apic, 0x10 + R + pin*2); \
+ reg ACTION; \
+ io_apic_modify(entry->apic, reg); \
++ FINAL; \
+ if (!entry->next) \
+ break; \
+ entry = irq_2_pin + entry->next; \
+ } \
+- FINAL; \
+ }
+
+ #define DO_ACTION(name,R,ACTION, FINAL) \
+@@ -666,74 +678,58 @@ static int pin_2_irq(int idx, int apic,
+ return irq;
+ }
+
+-static inline int IO_APIC_irq_trigger(int irq)
+-{
+- int apic, idx, pin;
+-
+- for (apic = 0; apic < nr_ioapics; apic++) {
+- for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
+- idx = find_irq_entry(apic,pin,mp_INT);
+- if ((idx != -1) && (irq == pin_2_irq(idx,apic,pin)))
+- return irq_trigger(idx);
+- }
+- }
+- /*
+- * nonexistent IRQs are edge default
+- */
+- return 0;
+-}
+-
+-/* irq_vectors is indexed by the sum of all RTEs in all I/O APICs. */
+-static u8 irq_vector[NR_IRQ_VECTORS] __read_mostly;
+-
+-static int __assign_irq_vector(int irq, cpumask_t mask, cpumask_t *result)
++static int __assign_irq_vector(int irq, cpumask_t mask)
+ {
+- int vector;
+ struct physdev_irq irq_op;
++ struct irq_cfg *cfg;
+
+- BUG_ON((unsigned)irq >= NR_IRQ_VECTORS);
++ BUG_ON((unsigned)irq >= NR_IRQS);
++ cfg = &irq_cfg[irq];
+
+- cpus_and(*result, mask, cpu_online_map);
++ if ((cfg->move_in_progress) || cfg->move_cleanup_count)
++ return -EBUSY;
+
+- if (irq_vector[irq] > 0)
+- return irq_vector[irq];
++ if (cfg->vector)
++ return 0;
+
+ irq_op.irq = irq;
+ if (HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op))
+ return -ENOSPC;
+
+- vector = irq_op.vector;
+- irq_vector[irq] = vector;
++ cfg->vector = irq_op.vector;
+
+- return vector;
++ return 0;
+ }
+
+-static int assign_irq_vector(int irq, cpumask_t mask, cpumask_t *result)
++static int assign_irq_vector(int irq, cpumask_t mask)
+ {
+- int vector;
++ int err;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vector_lock, flags);
+- vector = __assign_irq_vector(irq, mask, result);
++ err = __assign_irq_vector(irq, mask);
+ spin_unlock_irqrestore(&vector_lock, flags);
+- return vector;
++ return err;
+ }
+
+ #ifndef CONFIG_XEN
+ static void __clear_irq_vector(int irq)
+ {
++ struct irq_cfg *cfg;
+ cpumask_t mask;
+ int cpu, vector;
+
+- BUG_ON(!irq_vector[irq]);
++ BUG_ON((unsigned)irq >= NR_IRQS);
++ cfg = &irq_cfg[irq];
++ BUG_ON(!cfg->vector);
+
+- vector = irq_vector[irq];
+- cpus_and(mask, irq_domain[irq], cpu_online_map);
++ vector = cfg->vector;
++ cpus_and(mask, cfg->domain, cpu_online_map);
+ for_each_cpu_mask(cpu, mask)
+ per_cpu(vector_irq, cpu)[vector] = -1;
+
+- irq_vector[irq] = 0;
+- irq_domain[irq] = CPU_MASK_NONE;
++ cfg->vector = 0;
++ cfg->domain = CPU_MASK_NONE;
+ }
+
+ void __setup_vector_irq(int cpu)
+@@ -743,10 +739,10 @@ void __setup_vector_irq(int cpu)
+ int irq, vector;
+
+ /* Mark the inuse vectors */
+- for (irq = 0; irq < NR_IRQ_VECTORS; ++irq) {
+- if (!cpu_isset(cpu, irq_domain[irq]))
++ for (irq = 0; irq < NR_IRQS; ++irq) {
++ if (!cpu_isset(cpu, irq_cfg[irq].domain))
+ continue;
+- vector = irq_vector[irq];
++ vector = irq_cfg[irq].vector;
+ per_cpu(vector_irq, cpu)[vector] = irq;
+ }
+ /* Mark the free vectors */
+@@ -754,41 +750,49 @@ void __setup_vector_irq(int cpu)
+ irq = per_cpu(vector_irq, cpu)[vector];
+ if (irq < 0)
+ continue;
+- if (!cpu_isset(cpu, irq_domain[irq]))
++ if (!cpu_isset(cpu, irq_cfg[irq].domain))
+ per_cpu(vector_irq, cpu)[vector] = -1;
+ }
+ }
+
+-extern void (*interrupt[NR_IRQS])(void);
+-
+ static struct irq_chip ioapic_chip;
+
+-#define IOAPIC_AUTO -1
+-#define IOAPIC_EDGE 0
+-#define IOAPIC_LEVEL 1
+-
+-static void ioapic_register_intr(int irq, int vector, unsigned long trigger)
++static void ioapic_register_intr(int irq, unsigned long trigger)
+ {
+- if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
+- trigger == IOAPIC_LEVEL)
++ if (trigger)
+ set_irq_chip_and_handler_name(irq, &ioapic_chip,
+ handle_fasteoi_irq, "fasteoi");
+- else {
+- irq_desc[irq].status |= IRQ_DELAYED_DISABLE;
++ else
+ set_irq_chip_and_handler_name(irq, &ioapic_chip,
+ handle_edge_irq, "edge");
+- }
+ }
+ #else
+-#define ioapic_register_intr(_irq,_vector,_trigger) ((void)0)
++#define ioapic_register_intr(irq,trigger) ((void)0)
+ #endif /* !CONFIG_XEN */
+
+-static void __init setup_IO_APIC_irq(int apic, int pin, int idx, int irq)
++static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq,
++ int trigger, int polarity)
+ {
++ struct irq_cfg *cfg = irq_cfg + irq;
+ struct IO_APIC_route_entry entry;
+- int vector;
+- unsigned long flags;
++ cpumask_t mask;
+
++ if (!IO_APIC_IRQ(irq))
++ return;
++
++ mask = TARGET_CPUS;
++ if (assign_irq_vector(irq, mask))
++ return;
++
++#ifndef CONFIG_XEN
++ cpus_and(mask, cfg->domain, mask);
++#endif
++
++ apic_printk(APIC_VERBOSE,KERN_DEBUG
++ "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> "
++ "IRQ %d Mode:%i Active:%i)\n",
++ apic, mp_ioapics[apic].mpc_apicid, pin, cfg->vector,
++ irq, trigger, polarity);
+
+ /*
+ * add it to the IO-APIC irq-routing table:
+@@ -797,41 +801,23 @@ static void __init setup_IO_APIC_irq(int
+
+ entry.delivery_mode = INT_DELIVERY_MODE;
+ entry.dest_mode = INT_DEST_MODE;
++ entry.dest = cpu_mask_to_apicid(mask);
+ entry.mask = 0; /* enable IRQ */
+- entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
++ entry.trigger = trigger;
++ entry.polarity = polarity;
++ entry.vector = cfg->vector;
+
+- entry.trigger = irq_trigger(idx);
+- entry.polarity = irq_polarity(idx);
+-
+- if (irq_trigger(idx)) {
+- entry.trigger = 1;
++ /* Mask level triggered irqs.
++ * Use IRQ_DELAYED_DISABLE for edge triggered irqs.
++ */
++ if (trigger)
+ entry.mask = 1;
+- entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
+- }
+
+- if (/* !apic && */ !IO_APIC_IRQ(irq))
+- return;
+-
+- if (IO_APIC_IRQ(irq)) {
+- cpumask_t mask;
+- vector = assign_irq_vector(irq, TARGET_CPUS, &mask);
+- if (vector < 0)
+- return;
+-
+- entry.dest.logical.logical_dest = cpu_mask_to_apicid(mask);
+- entry.vector = vector;
+-
+- ioapic_register_intr(irq, vector, IOAPIC_AUTO);
+- if (!apic && (irq < 16))
+- disable_8259A_irq(irq);
+- }
++ ioapic_register_intr(irq, trigger);
++ if (irq < 16)
++ disable_8259A_irq(irq);
+
+ ioapic_write_entry(apic, pin, entry);
+-
+- spin_lock_irqsave(&ioapic_lock, flags);
+- set_native_irq_info(irq, TARGET_CPUS);
+- spin_unlock_irqrestore(&ioapic_lock, flags);
+-
+ }
+
+ static void __init setup_IO_APIC_irqs(void)
+@@ -856,8 +842,8 @@ static void __init setup_IO_APIC_irqs(vo
+ irq = pin_2_irq(idx, apic, pin);
+ add_pin_to_irq(irq, apic, pin);
+
+- setup_IO_APIC_irq(apic, pin, idx, irq);
+-
++ setup_IO_APIC_irq(apic, pin, irq,
++ irq_trigger(idx), irq_polarity(idx));
+ }
+ }
+
+@@ -888,7 +874,7 @@ static void __init setup_ExtINT_IRQ0_pin
+ */
+ entry.dest_mode = INT_DEST_MODE;
+ entry.mask = 0; /* unmask IRQ now */
+- entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
++ entry.dest = cpu_mask_to_apicid(TARGET_CPUS);
+ entry.delivery_mode = INT_DELIVERY_MODE;
+ entry.polarity = 0;
+ entry.trigger = 0;
+@@ -988,18 +974,17 @@ void __apicdebuginit print_IO_APIC(void)
+
+ printk(KERN_DEBUG ".... IRQ redirection table:\n");
+
+- printk(KERN_DEBUG " NR Log Phy Mask Trig IRR Pol"
+- " Stat Dest Deli Vect: \n");
++ printk(KERN_DEBUG " NR Dst Mask Trig IRR Pol"
++ " Stat Dmod Deli Vect: \n");
+
+ for (i = 0; i <= reg_01.bits.entries; i++) {
+ struct IO_APIC_route_entry entry;
+
+ entry = ioapic_read_entry(apic, i);
+
+- printk(KERN_DEBUG " %02x %03X %02X ",
++ printk(KERN_DEBUG " %02x %03X ",
+ i,
+- entry.dest.logical.logical_dest,
+- entry.dest.physical.physical_dest
++ entry.dest
+ );
+
+ printk("%1d %1d %1d %1d %1d %1d %1d %02X\n",
+@@ -1270,8 +1255,7 @@ void disable_IO_APIC(void)
+ entry.dest_mode = 0; /* Physical */
+ entry.delivery_mode = dest_ExtINT; /* ExtInt */
+ entry.vector = 0;
+- entry.dest.physical.physical_dest =
+- GET_APIC_ID(apic_read(APIC_ID));
++ entry.dest = GET_APIC_ID(apic_read(APIC_ID));
+
+ /*
+ * Add it to the IO-APIC irq-routing table:
+@@ -1356,16 +1340,15 @@ static unsigned int startup_ioapic_irq(u
+
+ static int ioapic_retrigger_irq(unsigned int irq)
+ {
++ struct irq_cfg *cfg = &irq_cfg[irq];
+ cpumask_t mask;
+- unsigned vector;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vector_lock, flags);
+- vector = irq_vector[irq];
+ cpus_clear(mask);
+- cpu_set(first_cpu(irq_domain[irq]), mask);
++ cpu_set(first_cpu(cfg->domain), mask);
+
+- send_IPI_mask(mask, vector);
++ send_IPI_mask(mask, cfg->vector);
+ spin_unlock_irqrestore(&vector_lock, flags);
+
+ return 1;
+@@ -1380,8 +1363,68 @@ static int ioapic_retrigger_irq(unsigned
+ * races.
+ */
+
++#ifdef CONFIG_SMP
++asmlinkage void smp_irq_move_cleanup_interrupt(void)
++{
++ unsigned vector, me;
++ ack_APIC_irq();
++ exit_idle();
++ irq_enter();
++
++ me = smp_processor_id();
++ for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
++ unsigned int irq;
++ struct irq_desc *desc;
++ struct irq_cfg *cfg;
++ irq = __get_cpu_var(vector_irq)[vector];
++ if (irq >= NR_IRQS)
++ continue;
++
++ desc = irq_desc + irq;
++ cfg = irq_cfg + irq;
++ spin_lock(&desc->lock);
++ if (!cfg->move_cleanup_count)
++ goto unlock;
++
++ if ((vector == cfg->vector) && cpu_isset(me, cfg->domain))
++ goto unlock;
++
++ __get_cpu_var(vector_irq)[vector] = -1;
++ cfg->move_cleanup_count--;
++unlock:
++ spin_unlock(&desc->lock);
++ }
++
++ irq_exit();
++}
++
++static void irq_complete_move(unsigned int irq)
++{
++ struct irq_cfg *cfg = irq_cfg + irq;
++ unsigned vector, me;
++
++ if (likely(!cfg->move_in_progress))
++ return;
++
++ vector = ~get_irq_regs()->orig_rax;
++ me = smp_processor_id();
++ if ((vector == cfg->vector) &&
++ cpu_isset(smp_processor_id(), cfg->domain)) {
++ cpumask_t cleanup_mask;
++
++ cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
++ cfg->move_cleanup_count = cpus_weight(cleanup_mask);
++ send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
++ cfg->move_in_progress = 0;
++ }
++}
++#else
++static inline void irq_complete_move(unsigned int irq) {}
++#endif
++
+ static void ack_apic_edge(unsigned int irq)
+ {
++ irq_complete_move(irq);
+ move_native_irq(irq);
+ ack_APIC_irq();
+ }
+@@ -1390,6 +1433,7 @@ static void ack_apic_level(unsigned int
+ {
+ int do_unmask_irq = 0;
+
++ irq_complete_move(irq);
+ #if defined(CONFIG_GENERIC_PENDING_IRQ) || defined(CONFIG_IRQBALANCE)
+ /* If we are moving the irq we need to mask it */
+ if (unlikely(irq_desc[irq].status & IRQ_MOVE_PENDING)) {
+@@ -1441,7 +1485,7 @@ static inline void init_IO_APIC_traps(vo
+ */
+ for (irq = 0; irq < NR_IRQS ; irq++) {
+ int tmp = irq;
+- if (IO_APIC_IRQ(tmp) && !irq_vector[tmp]) {
++ if (IO_APIC_IRQ(tmp) && !irq_cfg[tmp].vector) {
+ /*
+ * Hmm.. We don't have an entry for this,
+ * so default to an old-fashioned 8259
+@@ -1539,7 +1583,7 @@ static inline void unlock_ExtINT_logic(v
+
+ entry1.dest_mode = 0; /* physical delivery */
+ entry1.mask = 0; /* unmask IRQ now */
+- entry1.dest.physical.physical_dest = hard_smp_processor_id();
++ entry1.dest = hard_smp_processor_id();
+ entry1.delivery_mode = dest_ExtINT;
+ entry1.polarity = entry0.polarity;
+ entry1.trigger = 0;
+@@ -1583,15 +1627,14 @@ static inline void unlock_ExtINT_logic(v
+ */
+ static inline void check_timer(void)
+ {
++ struct irq_cfg *cfg = irq_cfg + 0;
+ int apic1, pin1, apic2, pin2;
+- int vector;
+- cpumask_t mask;
+
+ /*
+ * get/set the timer IRQ vector:
+ */
+ disable_8259A_irq(0);
+- vector = assign_irq_vector(0, TARGET_CPUS, &mask);
++ assign_irq_vector(0, TARGET_CPUS);
+
+ /*
+ * Subtle, code in do_timer_interrupt() expects an AEOI
+@@ -1611,7 +1654,7 @@ static inline void check_timer(void)
+ apic2 = ioapic_i8259.apic;
+
+ apic_printk(APIC_VERBOSE,KERN_INFO "..TIMER: vector=0x%02X apic1=%d pin1=%d apic2=%d pin2=%d\n",
+- vector, apic1, pin1, apic2, pin2);
++ cfg->vector, apic1, pin1, apic2, pin2);
+
+ if (pin1 != -1) {
+ /*
+@@ -1642,7 +1685,7 @@ static inline void check_timer(void)
+ /*
+ * legacy devices should be connected to IO APIC #0
+ */
+- setup_ExtINT_IRQ0_pin(apic2, pin2, vector);
++ setup_ExtINT_IRQ0_pin(apic2, pin2, cfg->vector);
+ if (timer_irq_works()) {
+ apic_printk(APIC_VERBOSE," works.\n");
+ nmi_watchdog_default();
+@@ -1667,14 +1710,14 @@ static inline void check_timer(void)
+
+ disable_8259A_irq(0);
+ irq_desc[0].chip = &lapic_irq_type;
+- apic_write(APIC_LVT0, APIC_DM_FIXED | vector); /* Fixed mode */
++ apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector); /* Fixed mode */
+ enable_8259A_irq(0);
+
+ if (timer_irq_works()) {
+ apic_printk(APIC_VERBOSE," works.\n");
+ return;
+ }
+- apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | vector);
++ apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector);
+ apic_printk(APIC_VERBOSE," failed.\n");
+
+ apic_printk(APIC_VERBOSE, KERN_INFO "...trying to set up timer as ExtINT IRQ...");
+@@ -1828,19 +1871,16 @@ int create_irq(void)
+ /* Allocate an unused irq */
+ int irq;
+ int new;
+- int vector = 0;
+ unsigned long flags;
+- cpumask_t mask;
+
+ irq = -ENOSPC;
+ spin_lock_irqsave(&vector_lock, flags);
+ for (new = (NR_IRQS - 1); new >= 0; new--) {
+ if (platform_legacy_irq(new))
+ continue;
+- if (irq_vector[new] != 0)
++ if (irq_cfg[new].vector != 0)
+ continue;
+- vector = __assign_irq_vector(new, TARGET_CPUS, &mask);
+- if (likely(vector > 0))
++ if (__assign_irq_vector(new, TARGET_CPUS) == 0)
+ irq = new;
+ break;
+ }
+@@ -1870,12 +1910,15 @@ void destroy_irq(unsigned int irq)
+ #ifdef CONFIG_PCI_MSI
+ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg)
+ {
+- int vector;
++ struct irq_cfg *cfg = irq_cfg + irq;
++ int err;
+ unsigned dest;
+ cpumask_t tmp;
+
+- vector = assign_irq_vector(irq, TARGET_CPUS, &tmp);
+- if (vector >= 0) {
++ tmp = TARGET_CPUS;
++ err = assign_irq_vector(irq, tmp);
++ if (!err) {
++ cpus_and(tmp, cfg->domain, tmp);
+ dest = cpu_mask_to_apicid(tmp);
+
+ msg->address_hi = MSI_ADDR_BASE_HI;
+@@ -1895,40 +1938,38 @@ static int msi_compose_msg(struct pci_de
+ ((INT_DELIVERY_MODE != dest_LowestPrio) ?
+ MSI_DATA_DELIVERY_FIXED:
+ MSI_DATA_DELIVERY_LOWPRI) |
+- MSI_DATA_VECTOR(vector);
++ MSI_DATA_VECTOR(cfg->vector);
+ }
+- return vector;
++ return err;
+ }
+
+ #ifdef CONFIG_SMP
+ static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
+ {
++ struct irq_cfg *cfg = irq_cfg + irq;
+ struct msi_msg msg;
+ unsigned int dest;
+ cpumask_t tmp;
+- int vector;
+
+ cpus_and(tmp, mask, cpu_online_map);
+ if (cpus_empty(tmp))
+- tmp = TARGET_CPUS;
+-
+- cpus_and(mask, tmp, CPU_MASK_ALL);
++ return;
+
+- vector = assign_irq_vector(irq, mask, &tmp);
+- if (vector < 0)
++ if (assign_irq_vector(irq, mask))
+ return;
+
++ cpus_and(tmp, cfg->domain, mask);
+ dest = cpu_mask_to_apicid(tmp);
+
+ read_msi_msg(irq, &msg);
+
+ msg.data &= ~MSI_DATA_VECTOR_MASK;
+- msg.data |= MSI_DATA_VECTOR(vector);
++ msg.data |= MSI_DATA_VECTOR(cfg->vector);
+ msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
+ msg.address_lo |= MSI_ADDR_DEST_ID(dest);
+
+ write_msi_msg(irq, &msg);
+- set_native_irq_info(irq, mask);
++ irq_desc[irq].affinity = mask;
+ }
+ #endif /* CONFIG_SMP */
+
+@@ -1947,24 +1988,31 @@ static struct irq_chip msi_chip = {
+ .retrigger = ioapic_retrigger_irq,
+ };
+
+-int arch_setup_msi_irq(unsigned int irq, struct pci_dev *dev)
++int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
+ {
+ struct msi_msg msg;
+- int ret;
++ int irq, ret;
++ irq = create_irq();
++ if (irq < 0)
++ return irq;
++
++ set_irq_msi(irq, desc);
+ ret = msi_compose_msg(dev, irq, &msg);
+- if (ret < 0)
++ if (ret < 0) {
++ destroy_irq(irq);
+ return ret;
++ }
+
+ write_msi_msg(irq, &msg);
+
+ set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, "edge");
+
+- return 0;
++ return irq;
+ }
+
+ void arch_teardown_msi_irq(unsigned int irq)
+ {
+- return;
++ destroy_irq(irq);
+ }
+
+ #endif /* CONFIG_PCI_MSI */
+@@ -1992,24 +2040,22 @@ static void target_ht_irq(unsigned int i
+
+ static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask)
+ {
++ struct irq_cfg *cfg = irq_cfg + irq;
+ unsigned int dest;
+ cpumask_t tmp;
+- int vector;
+
+ cpus_and(tmp, mask, cpu_online_map);
+ if (cpus_empty(tmp))
+- tmp = TARGET_CPUS;
+-
+- cpus_and(mask, tmp, CPU_MASK_ALL);
++ return;
+
+- vector = assign_irq_vector(irq, mask, &tmp);
+- if (vector < 0)
++ if (assign_irq_vector(irq, mask))
+ return;
+
++ cpus_and(tmp, cfg->domain, mask);
+ dest = cpu_mask_to_apicid(tmp);
+
+- target_ht_irq(irq, dest, vector);
+- set_native_irq_info(irq, mask);
++ target_ht_irq(irq, dest, cfg->vector);
++ irq_desc[irq].affinity = mask;
+ }
+ #endif
+
+@@ -2026,14 +2072,17 @@ static struct irq_chip ht_irq_chip = {
+
+ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
+ {
+- int vector;
++ struct irq_cfg *cfg = irq_cfg + irq;
++ int err;
+ cpumask_t tmp;
+
+- vector = assign_irq_vector(irq, TARGET_CPUS, &tmp);
+- if (vector >= 0) {
++ tmp = TARGET_CPUS;
++ err = assign_irq_vector(irq, tmp);
++ if (!err) {
+ struct ht_irq_msg msg;
+ unsigned dest;
+
++ cpus_and(tmp, cfg->domain, tmp);
+ dest = cpu_mask_to_apicid(tmp);
+
+ msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest);
+@@ -2041,7 +2090,7 @@ int arch_setup_ht_irq(unsigned int irq,
+ msg.address_lo =
+ HT_IRQ_LOW_BASE |
+ HT_IRQ_LOW_DEST_ID(dest) |
+- HT_IRQ_LOW_VECTOR(vector) |
++ HT_IRQ_LOW_VECTOR(cfg->vector) |
+ ((INT_DEST_MODE == 0) ?
+ HT_IRQ_LOW_DM_PHYSICAL :
+ HT_IRQ_LOW_DM_LOGICAL) |
+@@ -2056,7 +2105,7 @@ int arch_setup_ht_irq(unsigned int irq,
+ set_irq_chip_and_handler_name(irq, &ht_irq_chip,
+ handle_edge_irq, "edge");
+ }
+- return vector;
++ return err;
+ }
+ #endif /* CONFIG_HT_IRQ */
+
+@@ -2081,13 +2130,8 @@ int __init io_apic_get_redir_entries (in
+ }
+
+
+-int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int active_high_low)
++int io_apic_set_pci_routing (int ioapic, int pin, int irq, int triggering, int polarity)
+ {
+- struct IO_APIC_route_entry entry;
+- unsigned long flags;
+- int vector;
+- cpumask_t mask;
+-
+ if (!IO_APIC_IRQ(irq)) {
+ apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n",
+ ioapic);
+@@ -2100,42 +2144,7 @@ int io_apic_set_pci_routing (int ioapic,
+ if (irq >= 16)
+ add_pin_to_irq(irq, ioapic, pin);
+
+-
+- vector = assign_irq_vector(irq, TARGET_CPUS, &mask);
+- if (vector < 0)
+- return vector;
+-
+- /*
+- * Generate a PCI IRQ routing entry and program the IOAPIC accordingly.
+- * Note that we mask (disable) IRQs now -- these get enabled when the
+- * corresponding device driver registers for this IRQ.
+- */
+-
+- memset(&entry,0,sizeof(entry));
+-
+- entry.delivery_mode = INT_DELIVERY_MODE;
+- entry.dest_mode = INT_DEST_MODE;
+- entry.dest.logical.logical_dest = cpu_mask_to_apicid(mask);
+- entry.trigger = edge_level;
+- entry.polarity = active_high_low;
+- entry.mask = 1; /* Disabled (masked) */
+- entry.vector = vector & 0xff;
+-
+- apic_printk(APIC_VERBOSE,KERN_DEBUG "IOAPIC[%d]: Set PCI routing entry (%d-%d -> 0x%x -> "
+- "IRQ %d Mode:%i Active:%i)\n", ioapic,
+- mp_ioapics[ioapic].mpc_apicid, pin, entry.vector, irq,
+- edge_level, active_high_low);
+-
+- ioapic_register_intr(irq, entry.vector, edge_level);
+-
+- if (!ioapic && (irq < 16))
+- disable_8259A_irq(irq);
+-
+- ioapic_write_entry(ioapic, pin, entry);
+-
+- spin_lock_irqsave(&ioapic_lock, flags);
+- set_native_irq_info(irq, TARGET_CPUS);
+- spin_unlock_irqrestore(&ioapic_lock, flags);
++ setup_IO_APIC_irq(ioapic, pin, irq, triggering, polarity);
+
+ return 0;
+ }
+@@ -2168,8 +2177,10 @@ void __init setup_ioapic_dest(void)
+ * when you have too many devices, because at that time only boot
+ * cpu is online.
+ */
+- if(!irq_vector[irq])
+- setup_IO_APIC_irq(ioapic, pin, irq_entry, irq);
++ if (!irq_cfg[irq].vector)
++ setup_IO_APIC_irq(ioapic, pin, irq,
++ irq_trigger(irq_entry),
++ irq_polarity(irq_entry));
+ else
+ set_ioapic_affinity_irq(irq, TARGET_CPUS);
+ }
+Index: 10.3-2007-11-26/arch/x86_64/kernel/irq-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/kernel/irq-xen.c 2007-10-22 13:53:25.000000000 +0200
++++ 10.3-2007-11-26/arch/x86_64/kernel/irq-xen.c 2007-10-22 13:58:46.000000000 +0200
+@@ -18,6 +18,7 @@
+ #include <asm/uaccess.h>
+ #include <asm/io_apic.h>
+ #include <asm/idle.h>
++#include <asm/smp.h>
+
+ atomic_t irq_err_count;
+
+@@ -120,9 +121,15 @@ asmlinkage unsigned int do_IRQ(struct pt
+
+ if (likely(irq < NR_IRQS))
+ generic_handle_irq(irq);
+- else if (printk_ratelimit())
+- printk(KERN_EMERG "%s: %d.%d No irq handler for irq\n",
+- __func__, smp_processor_id(), irq);
++ else {
++#ifndef CONFIG_XEN
++ if (!disable_apic)
++ ack_APIC_irq();
++#endif
++ if (printk_ratelimit())
++ printk(KERN_EMERG "%s: %d.%d No irq handler for irq\n",
++ __func__, smp_processor_id(), irq);
++ }
+
+ irq_exit();
+
+Index: 10.3-2007-11-26/arch/x86_64/kernel/mpparse-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/kernel/mpparse-xen.c 2007-10-22 13:53:25.000000000 +0200
++++ 10.3-2007-11-26/arch/x86_64/kernel/mpparse-xen.c 2007-10-22 13:58:46.000000000 +0200
+@@ -60,9 +60,9 @@ unsigned long mp_lapic_addr = 0;
+ /* Processor that is doing the boot up */
+ unsigned int boot_cpu_id = -1U;
+ /* Internal processor count */
+-unsigned int num_processors __initdata = 0;
++unsigned int num_processors __cpuinitdata = 0;
+
+-unsigned disabled_cpus __initdata;
++unsigned disabled_cpus __cpuinitdata;
+
+ /* Bitmask of physically existing CPUs */
+ physid_mask_t phys_cpu_present_map = PHYSID_MASK_NONE;
+@@ -808,7 +808,7 @@ int mp_register_gsi(u32 gsi, int trigger
+ return gsi;
+
+ /* Don't set up the ACPI SCI because it's already set up */
+- if (acpi_fadt.sci_int == gsi)
++ if (acpi_gbl_FADT.sci_interrupt == gsi)
+ return gsi;
+
+ ioapic = mp_find_ioapic(gsi);
+Index: 10.3-2007-11-26/arch/x86_64/kernel/pci-swiotlb-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/kernel/pci-swiotlb-xen.c 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/arch/x86_64/kernel/pci-swiotlb-xen.c 2007-10-22 13:58:46.000000000 +0200
+@@ -35,7 +35,7 @@ struct dma_mapping_ops swiotlb_dma_ops =
+ #endif
+ };
+
+-void pci_swiotlb_init(void)
++void __init pci_swiotlb_init(void)
+ {
+ #if 0
+ /* don't initialize swiotlb if iommu=off (no_iommu=1) */
+Index: 10.3-2007-11-26/arch/x86_64/kernel/process-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/kernel/process-xen.c 2007-10-22 13:53:25.000000000 +0200
++++ 10.3-2007-11-26/arch/x86_64/kernel/process-xen.c 2007-10-22 13:58:46.000000000 +0200
+@@ -337,14 +337,17 @@ void load_gs_index(unsigned gs)
+ void flush_thread(void)
+ {
+ struct task_struct *tsk = current;
+- struct thread_info *t = current_thread_info();
+
+- if (t->flags & _TIF_ABI_PENDING) {
+- t->flags ^= (_TIF_ABI_PENDING | _TIF_IA32);
+- if (t->flags & _TIF_IA32)
++ if (test_tsk_thread_flag(tsk, TIF_ABI_PENDING)) {
++ clear_tsk_thread_flag(tsk, TIF_ABI_PENDING);
++ if (test_tsk_thread_flag(tsk, TIF_IA32)) {
++ clear_tsk_thread_flag(tsk, TIF_IA32);
++ } else {
++ set_tsk_thread_flag(tsk, TIF_IA32);
+ current_thread_info()->status |= TS_COMPAT;
++ }
+ }
+- t->flags &= ~_TIF_DEBUG;
++ clear_tsk_thread_flag(tsk, TIF_DEBUG);
+
+ tsk->thread.debugreg0 = 0;
+ tsk->thread.debugreg1 = 0;
+Index: 10.3-2007-11-26/arch/x86_64/kernel/setup-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/kernel/setup-xen.c 2007-10-22 13:53:25.000000000 +0200
++++ 10.3-2007-11-26/arch/x86_64/kernel/setup-xen.c 2007-10-22 13:58:46.000000000 +0200
+@@ -144,7 +144,7 @@ EXPORT_SYMBOL_GPL(edid_info);
+
+ extern int root_mountflags;
+
+-char command_line[COMMAND_LINE_SIZE];
++char __initdata command_line[COMMAND_LINE_SIZE];
+
+ struct resource standard_io_resources[] = {
+ { .name = "dma1", .start = 0x00, .end = 0x1f,
+@@ -182,134 +182,6 @@ struct resource code_resource = {
+ .flags = IORESOURCE_RAM,
+ };
+
+-#define IORESOURCE_ROM (IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM)
+-
+-static struct resource system_rom_resource = {
+- .name = "System ROM",
+- .start = 0xf0000,
+- .end = 0xfffff,
+- .flags = IORESOURCE_ROM,
+-};
+-
+-static struct resource extension_rom_resource = {
+- .name = "Extension ROM",
+- .start = 0xe0000,
+- .end = 0xeffff,
+- .flags = IORESOURCE_ROM,
+-};
+-
+-static struct resource adapter_rom_resources[] = {
+- { .name = "Adapter ROM", .start = 0xc8000, .end = 0,
+- .flags = IORESOURCE_ROM },
+- { .name = "Adapter ROM", .start = 0, .end = 0,
+- .flags = IORESOURCE_ROM },
+- { .name = "Adapter ROM", .start = 0, .end = 0,
+- .flags = IORESOURCE_ROM },
+- { .name = "Adapter ROM", .start = 0, .end = 0,
+- .flags = IORESOURCE_ROM },
+- { .name = "Adapter ROM", .start = 0, .end = 0,
+- .flags = IORESOURCE_ROM },
+- { .name = "Adapter ROM", .start = 0, .end = 0,
+- .flags = IORESOURCE_ROM }
+-};
+-
+-static struct resource video_rom_resource = {
+- .name = "Video ROM",
+- .start = 0xc0000,
+- .end = 0xc7fff,
+- .flags = IORESOURCE_ROM,
+-};
+-
+-static struct resource video_ram_resource = {
+- .name = "Video RAM area",
+- .start = 0xa0000,
+- .end = 0xbffff,
+- .flags = IORESOURCE_RAM,
+-};
+-
+-#define romsignature(x) (*(unsigned short *)(x) == 0xaa55)
+-
+-static int __init romchecksum(unsigned char *rom, unsigned long length)
+-{
+- unsigned char *p, sum = 0;
+-
+- for (p = rom; p < rom + length; p++)
+- sum += *p;
+- return sum == 0;
+-}
+-
+-static void __init probe_roms(void)
+-{
+- unsigned long start, length, upper;
+- unsigned char *rom;
+- int i;
+-
+-#ifdef CONFIG_XEN
+- /* Nothing to do if not running in dom0. */
+- if (!is_initial_xendomain())
+- return;
+-#endif
+-
+- /* video rom */
+- upper = adapter_rom_resources[0].start;
+- for (start = video_rom_resource.start; start < upper; start += 2048) {
+- rom = isa_bus_to_virt(start);
+- if (!romsignature(rom))
+- continue;
+-
+- video_rom_resource.start = start;
+-
+- /* 0 < length <= 0x7f * 512, historically */
+- length = rom[2] * 512;
+-
+- /* if checksum okay, trust length byte */
+- if (length && romchecksum(rom, length))
+- video_rom_resource.end = start + length - 1;
+-
+- request_resource(&iomem_resource, &video_rom_resource);
+- break;
+- }
+-
+- start = (video_rom_resource.end + 1 + 2047) & ~2047UL;
+- if (start < upper)
+- start = upper;
+-
+- /* system rom */
+- request_resource(&iomem_resource, &system_rom_resource);
+- upper = system_rom_resource.start;
+-
+- /* check for extension rom (ignore length byte!) */
+- rom = isa_bus_to_virt(extension_rom_resource.start);
+- if (romsignature(rom)) {
+- length = extension_rom_resource.end - extension_rom_resource.start + 1;
+- if (romchecksum(rom, length)) {
+- request_resource(&iomem_resource, &extension_rom_resource);
+- upper = extension_rom_resource.start;
+- }
+- }
+-
+- /* check for adapter roms on 2k boundaries */
+- for (i = 0; i < ARRAY_SIZE(adapter_rom_resources) && start < upper;
+- start += 2048) {
+- rom = isa_bus_to_virt(start);
+- if (!romsignature(rom))
+- continue;
+-
+- /* 0 < length <= 0x7f * 512, historically */
+- length = rom[2] * 512;
+-
+- /* but accept any length that fits if checksum okay */
+- if (!length || start + length > upper || !romchecksum(rom, length))
+- continue;
+-
+- adapter_rom_resources[i].start = start;
+- adapter_rom_resources[i].end = start + length - 1;
+- request_resource(&iomem_resource, &adapter_rom_resources[i]);
+-
+- start = adapter_rom_resources[i++].end & ~2047UL;
+- }
+-}
+-
+ #ifdef CONFIG_PROC_VMCORE
+ /* elfcorehdr= specifies the location of elf core header
+ * stored by the crashed kernel. This option will be passed
+@@ -406,7 +278,7 @@ void __init setup_arch(char **cmdline_p)
+ #ifdef CONFIG_XEN
+ extern struct e820map machine_e820;
+
+- printk(KERN_INFO "Command line: %s\n", saved_command_line);
++ printk(KERN_INFO "Command line: %s\n", boot_command_line);
+
+ /* Register a call for panic conditions. */
+ atomic_notifier_chain_register(&panic_notifier_list, &xen_panic_block);
+@@ -454,7 +326,7 @@ void __init setup_arch(char **cmdline_p)
+
+ ARCH_SETUP
+ #else
+- printk(KERN_INFO "Command line: %s\n", saved_command_line);
++ printk(KERN_INFO "Command line: %s\n", boot_command_line);
+
+ ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);
+ screen_info = SCREEN_INFO;
+@@ -485,7 +357,7 @@ void __init setup_arch(char **cmdline_p)
+
+ early_identify_cpu(&boot_cpu_data);
+
+- strlcpy(command_line, saved_command_line, COMMAND_LINE_SIZE);
++ strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
+ *cmdline_p = command_line;
+
+ parse_early_param();
+@@ -555,6 +427,11 @@ void __init setup_arch(char **cmdline_p)
+ /* reserve ebda region */
+ if (ebda_addr)
+ reserve_bootmem_generic(ebda_addr, ebda_size);
++#ifdef CONFIG_NUMA
++ /* reserve nodemap region */
++ if (nodemap_addr)
++ reserve_bootmem_generic(nodemap_addr, nodemap_size);
++#endif
+
+ #ifdef CONFIG_SMP
+ /*
+@@ -724,10 +601,8 @@ void __init setup_arch(char **cmdline_p)
+ #endif
+
+ /*
+- * Request address space for all standard RAM and ROM resources
+- * and also for regions reported as reserved by the e820.
+- */
+- probe_roms();
++ * We trust e820 completely. No explicit ROM probing in memory.
++ */
+ #ifdef CONFIG_XEN
+ if (is_initial_xendomain()) {
+ struct xen_memory_map memmap;
+@@ -746,8 +621,6 @@ void __init setup_arch(char **cmdline_p)
+ e820_mark_nosave_regions();
+ #endif
+
+- request_resource(&iomem_resource, &video_ram_resource);
+-
+ {
+ unsigned i;
+ /* request I/O space for devices used on all i[345]86 PCs */
+@@ -1325,7 +1198,8 @@ static int show_cpuinfo(struct seq_file
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, "nx", NULL, "mmxext", NULL,
+- NULL, "fxsr_opt", NULL, "rdtscp", NULL, "lm", "3dnowext", "3dnow",
++ NULL, "fxsr_opt", "pdpe1gb", "rdtscp", NULL, "lm",
++ "3dnowext", "3dnow",
+
+ /* Transmeta-defined */
+ "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
+@@ -1343,7 +1217,7 @@ static int show_cpuinfo(struct seq_file
+ /* Intel-defined (#2) */
+ "pni", NULL, NULL, "monitor", "ds_cpl", "vmx", "smx", "est",
+ "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
+- NULL, NULL, "dca", NULL, NULL, NULL, NULL, NULL,
++ NULL, NULL, "dca", NULL, NULL, NULL, NULL, "popcnt",
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+
+ /* VIA/Cyrix/Centaur-defined */
+@@ -1353,8 +1227,10 @@ static int show_cpuinfo(struct seq_file
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+
+ /* AMD-defined (#2) */
+- "lahf_lm", "cmp_legacy", "svm", NULL, "cr8_legacy", NULL, NULL, NULL,
+- NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++ "lahf_lm", "cmp_legacy", "svm", "extapic", "cr8_legacy",
++ "altmovcr8", "abm", "sse4a",
++ "misalignsse", "3dnowprefetch",
++ "osvw", "ibs", NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ };
+@@ -1365,6 +1241,9 @@ static int show_cpuinfo(struct seq_file
+ "ttp", /* thermal trip */
+ "tm",
+ "stc",
++ "100mhzsteps",
++ "hwpstate",
++ NULL, /* tsc invariant mapped to constant_tsc */
+ NULL,
+ /* nothing */ /* constant_tsc - moved to flags */
+ };
+@@ -1481,26 +1360,3 @@ struct seq_operations cpuinfo_op = {
+ .stop = c_stop,
+ .show = show_cpuinfo,
+ };
+-
+-#if defined(CONFIG_INPUT_PCSPKR) || defined(CONFIG_INPUT_PCSPKR_MODULE)
+-#include <linux/platform_device.h>
+-static __init int add_pcspkr(void)
+-{
+- struct platform_device *pd;
+- int ret;
+-
+- if (!is_initial_xendomain())
+- return 0;
+-
+- pd = platform_device_alloc("pcspkr", -1);
+- if (!pd)
+- return -ENOMEM;
+-
+- ret = platform_device_add(pd);
+- if (ret)
+- platform_device_put(pd);
+-
+- return ret;
+-}
+-device_initcall(add_pcspkr);
+-#endif
+Index: 10.3-2007-11-26/arch/x86_64/kernel/vsyscall-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/kernel/vsyscall-xen.c 2007-10-22 13:53:25.000000000 +0200
++++ 10.3-2007-11-26/arch/x86_64/kernel/vsyscall-xen.c 2007-10-22 13:58:46.000000000 +0200
+@@ -26,6 +26,7 @@
+ #include <linux/seqlock.h>
+ #include <linux/jiffies.h>
+ #include <linux/sysctl.h>
++#include <linux/clocksource.h>
+ #include <linux/getcpu.h>
+ #include <linux/cpu.h>
+ #include <linux/smp.h>
+@@ -34,6 +35,7 @@
+ #include <asm/vsyscall.h>
+ #include <asm/pgtable.h>
+ #include <asm/page.h>
++#include <asm/unistd.h>
+ #include <asm/fixmap.h>
+ #include <asm/errno.h>
+ #include <asm/io.h>
+@@ -44,56 +46,41 @@
+ #define __vsyscall(nr) __attribute__ ((unused,__section__(".vsyscall_" #nr)))
+ #define __syscall_clobber "r11","rcx","memory"
+
+-int __sysctl_vsyscall __section_sysctl_vsyscall = 1;
+-seqlock_t __xtime_lock __section_xtime_lock = SEQLOCK_UNLOCKED;
++struct vsyscall_gtod_data_t {
++ seqlock_t lock;
++ int sysctl_enabled;
++ struct timeval wall_time_tv;
++ struct timezone sys_tz;
++ cycle_t offset_base;
++ struct clocksource clock;
++};
+ int __vgetcpu_mode __section_vgetcpu_mode;
+
+-#include <asm/unistd.h>
+-
+-static __always_inline void timeval_normalize(struct timeval * tv)
++struct vsyscall_gtod_data_t __vsyscall_gtod_data __section_vsyscall_gtod_data =
+ {
+- time_t __sec;
+-
+- __sec = tv->tv_usec / 1000000;
+- if (__sec) {
+- tv->tv_usec %= 1000000;
+- tv->tv_sec += __sec;
+- }
+-}
++ .lock = SEQLOCK_UNLOCKED,
++ .sysctl_enabled = 1,
++};
+
+-static __always_inline void do_vgettimeofday(struct timeval * tv)
++void update_vsyscall(struct timespec *wall_time, struct clocksource *clock)
+ {
+- long sequence, t;
+- unsigned long sec, usec;
++ unsigned long flags;
+
+- do {
+- sequence = read_seqbegin(&__xtime_lock);
+-
+- sec = __xtime.tv_sec;
+- usec = __xtime.tv_nsec / 1000;
+-
+- if (__vxtime.mode != VXTIME_HPET) {
+- t = get_cycles_sync();
+- if (t < __vxtime.last_tsc)
+- t = __vxtime.last_tsc;
+- usec += ((t - __vxtime.last_tsc) *
+- __vxtime.tsc_quot) >> 32;
+- /* See comment in x86_64 do_gettimeofday. */
+- } else {
+- usec += ((readl((void __iomem *)
+- fix_to_virt(VSYSCALL_HPET) + 0xf0) -
+- __vxtime.last) * __vxtime.quot) >> 32;
+- }
+- } while (read_seqretry(&__xtime_lock, sequence));
+-
+- tv->tv_sec = sec + usec / 1000000;
+- tv->tv_usec = usec % 1000000;
++ write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
++ /* copy vsyscall data */
++ vsyscall_gtod_data.clock = *clock;
++ vsyscall_gtod_data.wall_time_tv.tv_sec = wall_time->tv_sec;
++ vsyscall_gtod_data.wall_time_tv.tv_usec = wall_time->tv_nsec/1000;
++ vsyscall_gtod_data.sys_tz = sys_tz;
++ write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags);
+ }
+
+-/* RED-PEN may want to readd seq locking, but then the variable should be write-once. */
++/* RED-PEN may want to readd seq locking, but then the variable should be
++ * write-once.
++ */
+ static __always_inline void do_get_tz(struct timezone * tz)
+ {
+- *tz = __sys_tz;
++ *tz = __vsyscall_gtod_data.sys_tz;
+ }
+
+ static __always_inline int gettimeofday(struct timeval *tv, struct timezone *tz)
+@@ -101,7 +88,8 @@ static __always_inline int gettimeofday(
+ int ret;
+ asm volatile("vsysc2: syscall"
+ : "=a" (ret)
+- : "0" (__NR_gettimeofday),"D" (tv),"S" (tz) : __syscall_clobber );
++ : "0" (__NR_gettimeofday),"D" (tv),"S" (tz)
++ : __syscall_clobber );
+ return ret;
+ }
+
+@@ -114,10 +102,44 @@ static __always_inline long time_syscall
+ return secs;
+ }
+
++static __always_inline void do_vgettimeofday(struct timeval * tv)
++{
++ cycle_t now, base, mask, cycle_delta;
++ unsigned long seq, mult, shift, nsec_delta;
++ cycle_t (*vread)(void);
++ do {
++ seq = read_seqbegin(&__vsyscall_gtod_data.lock);
++
++ vread = __vsyscall_gtod_data.clock.vread;
++ if (unlikely(!__vsyscall_gtod_data.sysctl_enabled || !vread)) {
++ gettimeofday(tv,NULL);
++ return;
++ }
++ now = vread();
++ base = __vsyscall_gtod_data.clock.cycle_last;
++ mask = __vsyscall_gtod_data.clock.mask;
++ mult = __vsyscall_gtod_data.clock.mult;
++ shift = __vsyscall_gtod_data.clock.shift;
++
++ *tv = __vsyscall_gtod_data.wall_time_tv;
++
++ } while (read_seqretry(&__vsyscall_gtod_data.lock, seq));
++
++ /* calculate interval: */
++ cycle_delta = (now - base) & mask;
++ /* convert to nsecs: */
++ nsec_delta = (cycle_delta * mult) >> shift;
++
++ /* convert to usecs and add to timespec: */
++ tv->tv_usec += nsec_delta / NSEC_PER_USEC;
++ while (tv->tv_usec > USEC_PER_SEC) {
++ tv->tv_sec += 1;
++ tv->tv_usec -= USEC_PER_SEC;
++ }
++}
++
+ int __vsyscall(0) vgettimeofday(struct timeval * tv, struct timezone * tz)
+ {
+- if (!__sysctl_vsyscall)
+- return gettimeofday(tv,tz);
+ if (tv)
+ do_vgettimeofday(tv);
+ if (tz)
+@@ -129,11 +151,11 @@ int __vsyscall(0) vgettimeofday(struct t
+ * unlikely */
+ time_t __vsyscall(1) vtime(time_t *t)
+ {
+- if (!__sysctl_vsyscall)
++ if (unlikely(!__vsyscall_gtod_data.sysctl_enabled))
+ return time_syscall(t);
+ else if (t)
+- *t = __xtime.tv_sec;
+- return __xtime.tv_sec;
++ *t = __vsyscall_gtod_data.wall_time_tv.tv_sec;
++ return __vsyscall_gtod_data.wall_time_tv.tv_sec;
+ }
+
+ /* Fast way to get current CPU and node.
+@@ -210,7 +232,7 @@ static int vsyscall_sysctl_change(ctl_ta
+ ret = -ENOMEM;
+ goto out;
+ }
+- if (!sysctl_vsyscall) {
++ if (!vsyscall_gtod_data.sysctl_enabled) {
+ writew(SYSCALL, map1);
+ writew(SYSCALL, map2);
+ } else {
+@@ -232,16 +254,17 @@ static int vsyscall_sysctl_nostrat(ctl_t
+
+ static ctl_table kernel_table2[] = {
+ { .ctl_name = 99, .procname = "vsyscall64",
+- .data = &sysctl_vsyscall, .maxlen = sizeof(int), .mode = 0644,
++ .data = &vsyscall_gtod_data.sysctl_enabled, .maxlen = sizeof(int),
++ .mode = 0644,
+ .strategy = vsyscall_sysctl_nostrat,
+ .proc_handler = vsyscall_sysctl_change },
+- { 0, }
++ {}
+ };
+
+ static ctl_table kernel_root_table2[] = {
+ { .ctl_name = CTL_KERN, .procname = "kernel", .mode = 0555,
+ .child = kernel_table2 },
+- { 0 },
++ {}
+ };
+
+ #endif
+@@ -313,14 +336,14 @@ static int __init vsyscall_init(void)
+ map_vsyscall();
+ #ifdef CONFIG_XEN
+ map_vsyscall_user();
+- sysctl_vsyscall = 0; /* disable vgettimeofay() */
++ vsyscall_gtod_data.sysctl_enabled = 0; /* disable vgettimeofay() */
+ if (boot_cpu_has(X86_FEATURE_RDTSCP))
+ vgetcpu_mode = VGETCPU_RDTSCP;
+ else
+ vgetcpu_mode = VGETCPU_LSL;
+ #endif
+ #ifdef CONFIG_SYSCTL
+- register_sysctl_table(kernel_root_table2, 0);
++ register_sysctl_table(kernel_root_table2);
+ #endif
+ on_each_cpu(cpu_vsyscall_init, NULL, 0, 1);
+ hotcpu_notifier(cpu_vsyscall_notifier, 0);
+Index: 10.3-2007-11-26/arch/x86_64/mm/fault-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/mm/fault-xen.c 2007-10-22 13:53:25.000000000 +0200
++++ 10.3-2007-11-26/arch/x86_64/mm/fault-xen.c 2007-10-22 13:58:46.000000000 +0200
+@@ -56,38 +56,17 @@ int unregister_page_fault_notifier(struc
+ }
+ EXPORT_SYMBOL_GPL(unregister_page_fault_notifier);
+
+-static inline int notify_page_fault(enum die_val val, const char *str,
+- struct pt_regs *regs, long err, int trap, int sig)
++static inline int notify_page_fault(struct pt_regs *regs, long err)
+ {
+ struct die_args args = {
+ .regs = regs,
+- .str = str,
++ .str = "page fault",
+ .err = err,
+- .trapnr = trap,
+- .signr = sig
++ .trapnr = 14,
++ .signr = SIGSEGV
+ };
+- return atomic_notifier_call_chain(&notify_page_fault_chain, val, &args);
+-}
+-
+-void bust_spinlocks(int yes)
+-{
+- int loglevel_save = console_loglevel;
+- if (yes) {
+- oops_in_progress = 1;
+- } else {
+-#ifdef CONFIG_VT
+- unblank_screen();
+-#endif
+- oops_in_progress = 0;
+- /*
+- * OK, the message is on the console. Now we call printk()
+- * without oops_in_progress set so that printk will give klogd
+- * a poke. Hold onto your hats...
+- */
+- console_loglevel = 15; /* NMI oopser may have shut the console up */
+- printk(" ");
+- console_loglevel = loglevel_save;
+- }
++ return atomic_notifier_call_chain(&notify_page_fault_chain,
++ DIE_PAGE_FAULT, &args);
+ }
+
+ /* Sometimes the CPU reports invalid exceptions on prefetch.
+@@ -437,8 +416,7 @@ asmlinkage void __kprobes do_page_fault(
+ /* Can take a spurious fault if mapping changes R/O -> R/W. */
+ if (spurious_fault(regs, address, error_code))
+ return;
+- if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
+- SIGSEGV) == NOTIFY_STOP)
++ if (notify_page_fault(regs, error_code) == NOTIFY_STOP)
+ return;
+ /*
+ * Don't take the mm semaphore here. If we fixup a prefetch
+@@ -447,8 +425,7 @@ asmlinkage void __kprobes do_page_fault(
+ goto bad_area_nosemaphore;
+ }
+
+- if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
+- SIGSEGV) == NOTIFY_STOP)
++ if (notify_page_fault(regs, error_code) == NOTIFY_STOP)
+ return;
+
+ if (likely(regs->eflags & X86_EFLAGS_IF))
+Index: 10.3-2007-11-26/arch/x86_64/mm/init-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/mm/init-xen.c 2007-10-22 13:53:25.000000000 +0200
++++ 10.3-2007-11-26/arch/x86_64/mm/init-xen.c 2007-10-22 13:58:46.000000000 +0200
+@@ -1136,20 +1136,30 @@ int kern_addr_valid(unsigned long addr)
+ extern int exception_trace, page_fault_trace;
+
+ static ctl_table debug_table2[] = {
+- { 99, "exception-trace", &exception_trace, sizeof(int), 0644, NULL,
+- proc_dointvec },
+- { 0, }
++ {
++ .ctl_name = 99,
++ .procname = "exception-trace",
++ .data = &exception_trace,
++ .maxlen = sizeof(int),
++ .mode = 0644,
++ .proc_handler = proc_dointvec
++ },
++ {}
+ };
+
+ static ctl_table debug_root_table2[] = {
+- { .ctl_name = CTL_DEBUG, .procname = "debug", .mode = 0555,
+- .child = debug_table2 },
+- { 0 },
++ {
++ .ctl_name = CTL_DEBUG,
++ .procname = "debug",
++ .mode = 0555,
++ .child = debug_table2
++ },
++ {}
+ };
+
+ static __init int x8664_sysctl_init(void)
+ {
+- register_sysctl_table(debug_root_table2, 1);
++ register_sysctl_table(debug_root_table2);
+ return 0;
+ }
+ __initcall(x8664_sysctl_init);
+Index: 10.3-2007-11-26/arch/x86_64/mm/pageattr-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/mm/pageattr-xen.c 2007-10-22 13:53:25.000000000 +0200
++++ 10.3-2007-11-26/arch/x86_64/mm/pageattr-xen.c 2007-10-22 13:58:46.000000000 +0200
+@@ -275,8 +275,8 @@ static void flush_kernel_map(void *arg)
+ void *adr = page_address(pg);
+ if (cpu_has_clflush)
+ cache_flush_page(adr);
+- __flush_tlb_one(adr);
+ }
++ __flush_tlb_all();
+ }
+
+ static inline void flush_map(struct list_head *l)
+@@ -301,6 +301,7 @@ static void revert_page(unsigned long ad
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t large_pte;
++ unsigned long pfn;
+
+ pgd = pgd_offset_k(address);
+ BUG_ON(pgd_none(*pgd));
+@@ -308,7 +309,8 @@ static void revert_page(unsigned long ad
+ BUG_ON(pud_none(*pud));
+ pmd = pmd_offset(pud, address);
+ BUG_ON(pmd_val(*pmd) & _PAGE_PSE);
+- large_pte = mk_pte_phys(__pa(address) & LARGE_PAGE_MASK, ref_prot);
++ pfn = (__pa(address) & LARGE_PAGE_MASK) >> PAGE_SHIFT;
++ large_pte = pfn_pte(pfn, ref_prot);
+ large_pte = pte_mkhuge(large_pte);
+ set_pte((pte_t *)pmd, large_pte);
+ }
+Index: 10.3-2007-11-26/drivers/xen/balloon/sysfs.c
+===================================================================
+--- 10.3-2007-11-26.orig/drivers/xen/balloon/sysfs.c 2007-12-06 17:27:31.000000000 +0100
++++ 10.3-2007-11-26/drivers/xen/balloon/sysfs.c 2007-10-22 13:58:46.000000000 +0200
+@@ -33,6 +33,7 @@
+ #include <linux/stat.h>
+ #include <linux/string.h>
+ #include <linux/sysdev.h>
++#include <linux/module.h>
+ #include "common.h"
+
+ #ifdef HAVE_XEN_PLATFORM_COMPAT_H
+Index: 10.3-2007-11-26/drivers/xen/core/evtchn.c
+===================================================================
+--- 10.3-2007-11-26.orig/drivers/xen/core/evtchn.c 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/drivers/xen/core/evtchn.c 2007-10-22 13:58:46.000000000 +0200
+@@ -130,7 +130,7 @@ static void bind_evtchn_to_cpu(unsigned
+ int irq = evtchn_to_irq[chn];
+
+ BUG_ON(irq == -1);
+- set_native_irq_info(irq, cpumask_of_cpu(cpu));
++ irq_desc[irq].affinity = cpumask_of_cpu(cpu);
+
+ clear_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu_evtchn[chn]]);
+ set_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu]);
+@@ -143,7 +143,7 @@ static void init_evtchn_cpu_bindings(voi
+
+ /* By default all event channels notify CPU#0. */
+ for (i = 0; i < NR_IRQS; i++)
+- set_native_irq_info(i, cpumask_of_cpu(0));
++ irq_desc[i].affinity = cpumask_of_cpu(0);
+
+ memset(cpu_evtchn, 0, sizeof(cpu_evtchn));
+ memset(cpu_evtchn_mask[0], ~0, sizeof(cpu_evtchn_mask[0]));
+Index: 10.3-2007-11-26/drivers/xen/netfront/netfront.c
+===================================================================
+--- 10.3-2007-11-26.orig/drivers/xen/netfront/netfront.c 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/drivers/xen/netfront/netfront.c 2007-10-22 13:58:46.000000000 +0200
+@@ -1833,20 +1833,19 @@ static struct ethtool_ops network_ethtoo
+ };
+
+ #ifdef CONFIG_SYSFS
+-static ssize_t show_rxbuf_min(struct class_device *cd, char *buf)
++static ssize_t show_rxbuf_min(struct device *dev,
++ struct device_attribute *attr, char *buf)
+ {
+- struct net_device *netdev = container_of(cd, struct net_device,
+- class_dev);
+- struct netfront_info *info = netdev_priv(netdev);
++ struct netfront_info *info = netdev_priv(to_net_dev(dev));
+
+ return sprintf(buf, "%u\n", info->rx_min_target);
+ }
+
+-static ssize_t store_rxbuf_min(struct class_device *cd,
++static ssize_t store_rxbuf_min(struct device *dev,
++ struct device_attribute *attr,
+ const char *buf, size_t len)
+ {
+- struct net_device *netdev = container_of(cd, struct net_device,
+- class_dev);
++ struct net_device *netdev = to_net_dev(dev);
+ struct netfront_info *np = netdev_priv(netdev);
+ char *endp;
+ unsigned long target;
+@@ -1876,20 +1875,19 @@ static ssize_t store_rxbuf_min(struct cl
+ return len;
+ }
+
+-static ssize_t show_rxbuf_max(struct class_device *cd, char *buf)
++static ssize_t show_rxbuf_max(struct device *dev,
++ struct device_attribute *attr, char *buf)
+ {
+- struct net_device *netdev = container_of(cd, struct net_device,
+- class_dev);
+- struct netfront_info *info = netdev_priv(netdev);
++ struct netfront_info *info = netdev_priv(to_net_dev(dev));
+
+ return sprintf(buf, "%u\n", info->rx_max_target);
+ }
+
+-static ssize_t store_rxbuf_max(struct class_device *cd,
++static ssize_t store_rxbuf_max(struct device *dev,
++ struct device_attribute *attr,
+ const char *buf, size_t len)
+ {
+- struct net_device *netdev = container_of(cd, struct net_device,
+- class_dev);
++ struct net_device *netdev = to_net_dev(dev);
+ struct netfront_info *np = netdev_priv(netdev);
+ char *endp;
+ unsigned long target;
+@@ -1919,16 +1917,15 @@ static ssize_t store_rxbuf_max(struct cl
+ return len;
+ }
+
+-static ssize_t show_rxbuf_cur(struct class_device *cd, char *buf)
++static ssize_t show_rxbuf_cur(struct device *dev,
++ struct device_attribute *attr, char *buf)
+ {
+- struct net_device *netdev = container_of(cd, struct net_device,
+- class_dev);
+- struct netfront_info *info = netdev_priv(netdev);
++ struct netfront_info *info = netdev_priv(to_net_dev(dev));
+
+ return sprintf(buf, "%u\n", info->rx_target);
+ }
+
+-static const struct class_device_attribute xennet_attrs[] = {
++static struct device_attribute xennet_attrs[] = {
+ __ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf_min, store_rxbuf_min),
+ __ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf_max, store_rxbuf_max),
+ __ATTR(rxbuf_cur, S_IRUGO, show_rxbuf_cur, NULL),
+@@ -1940,8 +1937,8 @@ static int xennet_sysfs_addif(struct net
+ int error = 0;
+
+ for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) {
+- error = class_device_create_file(&netdev->class_dev,
+- &xennet_attrs[i]);
++ error = device_create_file(&netdev->dev,
++ &xennet_attrs[i]);
+ if (error)
+ goto fail;
+ }
+@@ -1949,8 +1946,7 @@ static int xennet_sysfs_addif(struct net
+
+ fail:
+ while (--i >= 0)
+- class_device_remove_file(&netdev->class_dev,
+- &xennet_attrs[i]);
++ device_remove_file(&netdev->dev, &xennet_attrs[i]);
+ return error;
+ }
+
+@@ -1958,10 +1954,8 @@ static void xennet_sysfs_delif(struct ne
+ {
+ int i;
+
+- for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) {
+- class_device_remove_file(&netdev->class_dev,
+- &xennet_attrs[i]);
+- }
++ for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++)
++ device_remove_file(&netdev->dev, &xennet_attrs[i]);
+ }
+
+ #endif /* CONFIG_SYSFS */
+Index: 10.3-2007-11-26/include/asm-i386/i8253.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-i386/i8253.h 2007-12-06 17:27:31.000000000 +0100
++++ 10.3-2007-11-26/include/asm-i386/i8253.h 2007-10-22 13:58:46.000000000 +0200
+@@ -5,6 +5,8 @@
+
+ extern spinlock_t i8253_lock;
+
++#ifdef CONFIG_GENERIC_CLOCKEVENTS
++
+ extern struct clock_event_device *global_clock_event;
+
+ /**
+@@ -18,4 +20,6 @@ static inline void pit_interrupt_hook(vo
+ global_clock_event->event_handler(global_clock_event);
+ }
+
++#endif
++
+ #endif /* __ASM_I8253_H__ */
+Index: 10.3-2007-11-26/include/asm-i386/mach-xen/asm/desc.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-i386/mach-xen/asm/desc.h 2007-10-22 13:53:25.000000000 +0200
++++ 10.3-2007-11-26/include/asm-i386/mach-xen/asm/desc.h 2007-10-22 13:58:46.000000000 +0200
+@@ -21,7 +21,7 @@ struct Xgt_desc_struct {
+
+ extern struct Xgt_desc_struct idt_descr;
+ DECLARE_PER_CPU(struct Xgt_desc_struct, cpu_gdt_descr);
+-
++extern struct Xgt_desc_struct early_gdt_descr;
+
+ static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
+ {
+Index: 10.3-2007-11-26/include/asm-i386/mach-xen/asm/io.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-i386/mach-xen/asm/io.h 2007-10-22 13:53:25.000000000 +0200
++++ 10.3-2007-11-26/include/asm-i386/mach-xen/asm/io.h 2007-10-22 13:58:46.000000000 +0200
+@@ -233,12 +233,6 @@ static inline void memcpy_toio(volatile
+ #define __ISA_IO_base ((char __iomem *)(fix_to_virt(FIX_ISAMAP_BEGIN)))
+
+ /*
+- * Again, i386 does not require mem IO specific function.
+- */
+-
+-#define eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),(void __force *)(b),(c),(d))
+-
+-/*
+ * Cache management
+ *
+ * This needed for two cases
+Index: 10.3-2007-11-26/include/asm-i386/mach-xen/asm/mmu_context.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-i386/mach-xen/asm/mmu_context.h 2007-10-22 13:53:25.000000000 +0200
++++ 10.3-2007-11-26/include/asm-i386/mach-xen/asm/mmu_context.h 2007-10-22 13:58:46.000000000 +0200
+@@ -27,13 +27,13 @@ static inline void enter_lazy_tlb(struct
+ static inline void __prepare_arch_switch(void)
+ {
+ /*
+- * Save away %fs. No need to save %gs, as it was saved on the
++ * Save away %gs. No need to save %fs, as it was saved on the
+ * stack on entry. No need to save %es and %ds, as those are
+ * always kernel segments while inside the kernel.
+ */
+- asm volatile ( "mov %%fs,%0"
+- : "=m" (current->thread.fs));
+- asm volatile ( "movl %0,%%fs"
++ asm volatile ( "mov %%gs,%0"
++ : "=m" (current->thread.gs));
++ asm volatile ( "movl %0,%%gs"
+ : : "r" (0) );
+ }
+
+@@ -95,7 +95,7 @@ static inline void switch_mm(struct mm_s
+ }
+
+ #define deactivate_mm(tsk, mm) \
+- asm("movl %0,%%fs": :"r" (0));
++ asm("movl %0,%%gs": :"r" (0));
+
+ static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
+ {
+Index: 10.3-2007-11-26/include/asm-i386/mach-xen/asm/pgalloc.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-i386/mach-xen/asm/pgalloc.h 2007-12-06 17:27:31.000000000 +0100
++++ 10.3-2007-11-26/include/asm-i386/mach-xen/asm/pgalloc.h 2007-10-22 13:58:46.000000000 +0200
+@@ -6,12 +6,23 @@
+ #include <linux/mm.h> /* for struct page */
+ #include <asm/io.h> /* for phys_to_virt and page_to_pseudophys */
+
+-#define pmd_populate_kernel(mm, pmd, pte) \
+- set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte)))
++#define paravirt_alloc_pt(pfn) do { } while (0)
++#define paravirt_alloc_pd(pfn) do { } while (0)
++#define paravirt_alloc_pd(pfn) do { } while (0)
++#define paravirt_alloc_pd_clone(pfn, clonepfn, start, count) do { } while (0)
++#define paravirt_release_pt(pfn) do { } while (0)
++#define paravirt_release_pd(pfn) do { } while (0)
++
++#define pmd_populate_kernel(mm, pmd, pte) \
++do { \
++ paravirt_alloc_pt(__pa(pte) >> PAGE_SHIFT); \
++ set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte))); \
++} while (0)
+
+ #define pmd_populate(mm, pmd, pte) \
+ do { \
+ unsigned long pfn = page_to_pfn(pte); \
++ paravirt_alloc_pt(pfn); \
+ if (test_bit(PG_pinned, &virt_to_page((mm)->pgd)->flags)) { \
+ if (!PageHighMem(pte)) \
+ BUG_ON(HYPERVISOR_update_va_mapping( \
+@@ -42,7 +53,11 @@ static inline void pte_free_kernel(pte_t
+
+ extern void pte_free(struct page *pte);
+
+-#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
++#define __pte_free_tlb(tlb,pte) \
++do { \
++ paravirt_release_pt(page_to_pfn(pte)); \
++ tlb_remove_page((tlb),(pte)); \
++} while (0)
+
+ #ifdef CONFIG_X86_PAE
+ /*
+Index: 10.3-2007-11-26/include/asm-i386/mach-xen/asm/pgtable.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-i386/mach-xen/asm/pgtable.h 2007-10-22 14:08:14.000000000 +0200
++++ 10.3-2007-11-26/include/asm-i386/mach-xen/asm/pgtable.h 2007-10-22 14:08:56.000000000 +0200
+@@ -271,6 +271,7 @@ static inline pte_t pte_mkhuge(pte_t pte
+ */
+ #define pte_update(mm, addr, ptep) do { } while (0)
+ #define pte_update_defer(mm, addr, ptep) do { } while (0)
++#define paravirt_map_pt_hook(slot, va, pfn) do { } while (0)
+
+ /*
+ * We only update the dirty/accessed state if we set
+@@ -486,12 +487,24 @@ extern pte_t *lookup_address(unsigned lo
+ #endif
+
+ #if defined(CONFIG_HIGHPTE)
+-#define pte_offset_map(dir, address) \
+- ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE0) + \
+- pte_index(address))
+-#define pte_offset_map_nested(dir, address) \
+- ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE1) + \
+- pte_index(address))
++#define pte_offset_map(dir, address) \
++({ \
++ pte_t *__ptep; \
++ unsigned pfn = pmd_val(*(dir)) >> PAGE_SHIFT; \
++ __ptep = (pte_t *)kmap_atomic_pte(pfn_to_page(pfn),KM_PTE0); \
++ paravirt_map_pt_hook(KM_PTE0,__ptep, pfn); \
++ __ptep = __ptep + pte_index(address); \
++ __ptep; \
++})
++#define pte_offset_map_nested(dir, address) \
++({ \
++ pte_t *__ptep; \
++ unsigned pfn = pmd_val(*(dir)) >> PAGE_SHIFT; \
++ __ptep = (pte_t *)kmap_atomic_pte(pfn_to_page(pfn),KM_PTE1); \
++ paravirt_map_pt_hook(KM_PTE1,__ptep, pfn); \
++ __ptep = __ptep + pte_index(address); \
++ __ptep; \
++})
+ #define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
+ #define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
+ #else
+Index: 10.3-2007-11-26/include/asm-i386/mach-xen/asm/processor.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-i386/mach-xen/asm/processor.h 2007-10-22 13:53:25.000000000 +0200
++++ 10.3-2007-11-26/include/asm-i386/mach-xen/asm/processor.h 2007-10-22 13:58:46.000000000 +0200
+@@ -431,7 +431,7 @@ struct thread_struct {
+ .vm86_info = NULL, \
+ .sysenter_cs = __KERNEL_CS, \
+ .io_bitmap_ptr = NULL, \
+- .gs = __KERNEL_PDA, \
++ .fs = __KERNEL_PDA, \
+ }
+
+ /*
+@@ -449,8 +449,8 @@ struct thread_struct {
+ }
+
+ #define start_thread(regs, new_eip, new_esp) do { \
+- __asm__("movl %0,%%fs": :"r" (0)); \
+- regs->xgs = 0; \
++ __asm__("movl %0,%%gs": :"r" (0)); \
++ regs->xfs = 0; \
+ set_fs(USER_DS); \
+ regs->xds = __USER_DS; \
+ regs->xes = __USER_DS; \
+Index: 10.3-2007-11-26/include/asm-i386/mach-xen/asm/ptrace.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-i386/mach-xen/asm/ptrace.h 2007-10-22 13:53:25.000000000 +0200
++++ 10.3-2007-11-26/include/asm-i386/mach-xen/asm/ptrace.h 2007-10-22 13:58:46.000000000 +0200
+@@ -16,8 +16,8 @@ struct pt_regs {
+ long eax;
+ int xds;
+ int xes;
+- /* int xfs; */
+- int xgs;
++ int xfs;
++ /* int xgs; */
+ long orig_eax;
+ long eip;
+ int xcs;
+@@ -49,6 +49,10 @@ static inline int user_mode_vm(struct pt
+ {
+ return ((regs->xcs & SEGMENT_RPL_MASK) | (regs->eflags & VM_MASK)) >= USER_RPL;
+ }
++static inline int v8086_mode(struct pt_regs *regs)
++{
++ return (regs->eflags & VM_MASK);
++}
+
+ #define instruction_pointer(regs) ((regs)->eip)
+ #define regs_return_value(regs) ((regs)->eax)
+Index: 10.3-2007-11-26/include/asm-i386/mach-xen/asm/segment.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-i386/mach-xen/asm/segment.h 2007-10-22 13:53:25.000000000 +0200
++++ 10.3-2007-11-26/include/asm-i386/mach-xen/asm/segment.h 2007-10-22 13:58:46.000000000 +0200
+@@ -83,14 +83,8 @@
+ * The GDT has 32 entries
+ */
+ #define GDT_ENTRIES 32
+-
+ #define GDT_SIZE (GDT_ENTRIES * 8)
+
+-/* Matches __KERNEL_CS and __USER_CS (they must be 2 entries apart) */
+-#define SEGMENT_IS_FLAT_CODE(x) (((x) & 0xec) == GDT_ENTRY_KERNEL_CS * 8)
+-/* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
+-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
+-
+ /* Simple and small GDT entries for booting only */
+
+ #define GDT_ENTRY_BOOT_CS 2
+@@ -132,4 +126,21 @@
+ #define SEGMENT_GDT 0x0
+
+ #define get_kernel_rpl() (xen_feature(XENFEAT_supervisor_mode_kernel)?0:1)
++
++/*
++ * Matching rules for certain types of segments.
++ */
++
++/* Matches only __KERNEL_CS, ignoring PnP / USER / APM segments */
++#define SEGMENT_IS_KERNEL_CODE(x) (((x) & ~3) == GDT_ENTRY_KERNEL_CS * 8 \
++ || ((x) & ~3) == (FLAT_KERNEL_CS & ~3))
++
++/* Matches __KERNEL_CS and __USER_CS (they must be 2 entries apart) */
++#define SEGMENT_IS_FLAT_CODE(x) (((x) & ~0x13) == GDT_ENTRY_KERNEL_CS * 8 \
++ || ((x) & ~3) == (FLAT_KERNEL_CS & ~3) \
++ || ((x) & ~3) == (FLAT_USER_CS & ~3))
++
++/* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
++#define SEGMENT_IS_PNP_CODE(x) (((x) & ~0x0b) == GDT_ENTRY_PNPBIOS_BASE * 8)
++
+ #endif
+Index: 10.3-2007-11-26/include/asm-i386/mach-xen/asm/setup.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-i386/mach-xen/asm/setup.h 2007-10-22 13:53:25.000000000 +0200
++++ 10.3-2007-11-26/include/asm-i386/mach-xen/asm/setup.h 2007-10-22 13:58:46.000000000 +0200
+@@ -6,7 +6,7 @@
+ #ifndef _i386_SETUP_H
+ #define _i386_SETUP_H
+
+-#define COMMAND_LINE_SIZE 256
++#define COMMAND_LINE_SIZE 2048
+
+ #ifdef __KERNEL__
+ #include <linux/pfn.h>
+@@ -77,6 +77,8 @@ int __init sanitize_e820_map(struct e820
+ void __init add_memory_region(unsigned long long start,
+ unsigned long long size, int type);
+
++extern unsigned long init_pg_tables_end;
++
+ #endif /* __ASSEMBLY__ */
+
+ #endif /* __KERNEL__ */
+Index: 10.3-2007-11-26/include/asm-i386/mach-xen/asm/smp.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-i386/mach-xen/asm/smp.h 2007-10-22 13:53:25.000000000 +0200
++++ 10.3-2007-11-26/include/asm-i386/mach-xen/asm/smp.h 2007-10-22 13:58:46.000000000 +0200
+@@ -52,6 +52,11 @@ extern void cpu_exit_clear(void);
+ extern void cpu_uninit(void);
+ #endif
+
++#ifndef CONFIG_PARAVIRT
++#define startup_ipi_hook(phys_apicid, start_eip, start_esp) \
++do { } while (0)
++#endif
++
+ /*
+ * This function is needed by all SMP systems. It must _always_ be valid
+ * from the initial startup. We map APIC_BASE very early in page_setup(),
+Index: 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/bootsetup.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-x86_64/mach-xen/asm/bootsetup.h 2007-12-06 17:27:31.000000000 +0100
++++ 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/bootsetup.h 2007-10-22 13:58:46.000000000 +0200
+@@ -33,7 +33,7 @@ extern char x86_boot_params[BOOT_PARAM_S
+ #define EDD_MBR_SIG_NR (*(unsigned char *) (PARAM+EDD_MBR_SIG_NR_BUF))
+ #define EDD_MBR_SIGNATURE ((unsigned int *) (PARAM+EDD_MBR_SIG_BUF))
+ #define EDD_BUF ((struct edd_info *) (PARAM+EDDBUF))
+-#define COMMAND_LINE saved_command_line
++#define COMMAND_LINE boot_command_line
+
+ #define RAMDISK_IMAGE_START_MASK 0x07FF
+ #define RAMDISK_PROMPT_FLAG 0x8000
+Index: 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/dma-mapping.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-x86_64/mach-xen/asm/dma-mapping.h 2007-10-22 13:53:25.000000000 +0200
++++ 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/dma-mapping.h 2007-10-22 13:58:46.000000000 +0200
+@@ -9,7 +9,6 @@
+
+ #include <asm/scatterlist.h>
+ #include <asm/io.h>
+-#include <asm/swiotlb.h>
+
+ struct dma_mapping_ops {
+ int (*mapping_error)(dma_addr_t dma_addr);
+@@ -67,6 +66,9 @@ static inline int dma_mapping_error(dma_
+ #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
+ #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
+
++#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
++#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
++
+ extern void *dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp);
+ extern void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
+Index: 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/e820.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-x86_64/mach-xen/asm/e820.h 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/e820.h 2007-10-22 13:58:46.000000000 +0200
+@@ -46,6 +46,7 @@ extern void e820_mark_nosave_regions(voi
+ extern void e820_print_map(char *who);
+ extern int e820_any_mapped(unsigned long start, unsigned long end, unsigned type);
+ extern int e820_all_mapped(unsigned long start, unsigned long end, unsigned type);
++extern unsigned long e820_hole_size(unsigned long start, unsigned long end);
+
+ extern void e820_setup_gap(struct e820entry *e820, int nr_map);
+ extern void e820_register_active_regions(int nid,
+@@ -56,6 +57,7 @@ extern void finish_e820_parsing(void);
+ extern struct e820map e820;
+
+ extern unsigned ebda_addr, ebda_size;
++extern unsigned long nodemap_addr, nodemap_size;
+ #endif/*!__ASSEMBLY__*/
+
+ #endif/*__E820_HEADER*/
+Index: 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/hw_irq.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-x86_64/mach-xen/asm/hw_irq.h 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/hw_irq.h 2007-10-22 13:58:46.000000000 +0200
+@@ -31,10 +31,32 @@
+
+ #define IA32_SYSCALL_VECTOR 0x80
+
++#ifndef CONFIG_XEN
++
++/* Reserve the lowest usable priority level 0x20 - 0x2f for triggering
++ * cleanup after irq migration.
++ */
++#define IRQ_MOVE_CLEANUP_VECTOR FIRST_EXTERNAL_VECTOR
+
+ /*
+- * Vectors 0x20-0x2f are used for ISA interrupts.
++ * Vectors 0x30-0x3f are used for ISA interrupts.
+ */
++#define IRQ0_VECTOR FIRST_EXTERNAL_VECTOR + 0x10
++#define IRQ1_VECTOR IRQ0_VECTOR + 1
++#define IRQ2_VECTOR IRQ0_VECTOR + 2
++#define IRQ3_VECTOR IRQ0_VECTOR + 3
++#define IRQ4_VECTOR IRQ0_VECTOR + 4
++#define IRQ5_VECTOR IRQ0_VECTOR + 5
++#define IRQ6_VECTOR IRQ0_VECTOR + 6
++#define IRQ7_VECTOR IRQ0_VECTOR + 7
++#define IRQ8_VECTOR IRQ0_VECTOR + 8
++#define IRQ9_VECTOR IRQ0_VECTOR + 9
++#define IRQ10_VECTOR IRQ0_VECTOR + 10
++#define IRQ11_VECTOR IRQ0_VECTOR + 11
++#define IRQ12_VECTOR IRQ0_VECTOR + 12
++#define IRQ13_VECTOR IRQ0_VECTOR + 13
++#define IRQ14_VECTOR IRQ0_VECTOR + 14
++#define IRQ15_VECTOR IRQ0_VECTOR + 15
+
+ /*
+ * Special IRQ vectors used by the SMP architecture, 0xf0-0xff
+@@ -43,7 +65,6 @@
+ * into a single vector (CALL_FUNCTION_VECTOR) to save vector space.
+ * TLB, reschedule and local APIC vectors are performance-critical.
+ */
+-#ifndef CONFIG_XEN
+ #define SPURIOUS_APIC_VECTOR 0xff
+ #define ERROR_APIC_VECTOR 0xfe
+ #define RESCHEDULE_VECTOR 0xfd
+@@ -57,7 +78,6 @@
+ #define INVALIDATE_TLB_VECTOR_START 0xf0 /* f0-f7 used for TLB flush */
+
+ #define NUM_INVALIDATE_TLB_VECTORS 8
+-#endif
+
+ /*
+ * Local APIC timer IRQ vector is on a different priority level,
+@@ -68,12 +88,13 @@
+
+ /*
+ * First APIC vector available to drivers: (vectors 0x30-0xee)
+- * we start at 0x31 to spread out vectors evenly between priority
++ * we start at 0x41 to spread out vectors evenly between priority
+ * levels. (0x80 is the syscall vector)
+ */
+-#define FIRST_DEVICE_VECTOR 0x31
++#define FIRST_DEVICE_VECTOR (IRQ15_VECTOR + 2)
+ #define FIRST_SYSTEM_VECTOR 0xef /* duplicated in irq.h */
+
++#endif
+
+ #ifndef __ASSEMBLY__
+ typedef int vector_irq_t[NR_VECTORS];
+@@ -93,7 +114,7 @@ extern void enable_8259A_irq(unsigned in
+ extern int i8259A_irq_pending(unsigned int irq);
+ extern void make_8259A_irq(unsigned int irq);
+ extern void init_8259A(int aeoi);
+-extern void FASTCALL(send_IPI_self(int vector));
++extern void send_IPI_self(int vector);
+ extern void init_VISWS_APIC_irqs(void);
+ extern void setup_IO_APIC(void);
+ extern void disable_IO_APIC(void);
+Index: 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/io.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-x86_64/mach-xen/asm/io.h 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/io.h 2007-10-22 13:58:46.000000000 +0200
+@@ -101,7 +101,7 @@ __OUTS(l)
+
+ #define IO_SPACE_LIMIT 0xffff
+
+-#if defined(__KERNEL__) && __x86_64__
++#if defined(__KERNEL__) && defined(__x86_64__)
+
+ #include <linux/vmalloc.h>
+
+@@ -267,12 +267,6 @@ void memset_io(volatile void __iomem *a,
+ */
+ #define __ISA_IO_base ((char __iomem *)(fix_to_virt(FIX_ISAMAP_BEGIN)))
+
+-/*
+- * Again, x86-64 does not require mem IO specific function.
+- */
+-
+-#define eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),(void *)(b),(c),(d))
+-
+ /* Nothing to do */
+
+ #define dma_cache_inv(_start,_size) do { } while (0)
+Index: 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/msr.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-x86_64/mach-xen/asm/msr.h 2007-10-22 13:53:25.000000000 +0200
++++ 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/msr.h 2007-10-22 13:58:46.000000000 +0200
+@@ -160,6 +160,19 @@ static inline unsigned int cpuid_edx(uns
+ #define MSR_IA32_UCODE_WRITE 0x79
+ #define MSR_IA32_UCODE_REV 0x8b
+
++#ifdef CONFIG_SMP
++void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
++void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
++#else /* CONFIG_SMP */
++static inline void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
++{
++ rdmsr(msr_no, *l, *h);
++}
++static inline void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
++{
++ wrmsr(msr_no, l, h);
++}
++#endif /* CONFIG_SMP */
+
+ #endif
+
+Index: 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/pgtable.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-x86_64/mach-xen/asm/pgtable.h 2007-10-22 13:53:25.000000000 +0200
++++ 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/pgtable.h 2007-10-22 13:58:46.000000000 +0200
+@@ -410,15 +410,6 @@ static inline int pmd_large(pmd_t pte) {
+ #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
+ #define mk_pte_huge(entry) (pte_val(entry) |= _PAGE_PRESENT | _PAGE_PSE)
+
+-/* physical address -> PTE */
+-static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
+-{
+- unsigned long pteval;
+- pteval = physpage | pgprot_val(pgprot);
+- pteval &= __supported_pte_mask;
+- return __pte(pteval);
+-}
+-
+ /* Change flags of a PTE */
+ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+ {
+Index: 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/smp.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-x86_64/mach-xen/asm/smp.h 2007-10-22 13:53:25.000000000 +0200
++++ 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/smp.h 2007-10-22 13:58:46.000000000 +0200
+@@ -7,6 +7,7 @@
+ #include <linux/threads.h>
+ #include <linux/cpumask.h>
+ #include <linux/bitops.h>
++#include <linux/init.h>
+ extern int disable_apic;
+
+ #ifdef CONFIG_X86_LOCAL_APIC
+@@ -73,7 +74,7 @@ extern int __cpu_disable(void);
+ extern void __cpu_die(unsigned int cpu);
+ extern void prefill_possible_map(void);
+ extern unsigned num_processors;
+-extern unsigned disabled_cpus;
++extern unsigned __cpuinitdata disabled_cpus;
+
+ #define NO_PROC_ID 0xFF /* No processor magic marker */
+
+Index: 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/timer.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-x86_64/mach-xen/asm/timer.h 2007-09-03 09:52:56.000000000 +0200
++++ 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/timer.h 2007-10-22 13:58:46.000000000 +0200
+@@ -4,20 +4,4 @@
+
+ #define TICK_SIZE (tick_nsec / 1000)
+
+-extern void clock_fallback(void);
+-void setup_pit_timer(void);
+-
+-/* Modifiers for buggy PIT handling */
+-
+-extern int pit_latch_buggy;
+-
+-extern int timer_ack;
+-
+-/* list of externed timers */
+-extern unsigned long calibrate_tsc(void);
+-extern void init_cpu_khz(void);
+-#ifdef CONFIG_HPET_TIMER
+-extern unsigned long calibrate_tsc_hpet(unsigned long *tsc_hpet_quotient_ptr);
+-#endif
+-
+ #endif
diff --git a/trunk/2.6.22/20047_xen3-patch-2.6.22.patch1 b/trunk/2.6.22/20047_xen3-patch-2.6.22.patch1
new file mode 100644
index 0000000..df38df6
--- /dev/null
+++ b/trunk/2.6.22/20047_xen3-patch-2.6.22.patch1
@@ -0,0 +1,7866 @@
+From: www.kernel.org
+Subject: Update to 2.6.22
+Patch-mainline: 2.6.22
+
+Automatically created from "patches.kernel.org/patch-2.6.22" by xen-port-patches.py
+
+Acked-by: jbeulich@novell.com
+
+Index: 10.3-2007-11-26/arch/i386/Kconfig
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/Kconfig 2007-10-22 13:58:46.000000000 +0200
++++ 10.3-2007-11-26/arch/i386/Kconfig 2007-10-22 13:58:56.000000000 +0200
+@@ -922,7 +922,6 @@ config HOTPLUG_CPU
+
+ config COMPAT_VDSO
+ bool "Compat VDSO support"
+- depends on !X86_XEN
+ default y
+ help
+ Map the VDSO to the predictable old-style address too.
+@@ -1086,7 +1085,7 @@ config PCI
+ bool "PCI support" if !X86_VISWS
+ depends on !X86_VOYAGER
+ default y if X86_VISWS
+- select ARCH_SUPPORTS_MSI if (X86_LOCAL_APIC && X86_IO_APIC)
++ select ARCH_SUPPORTS_MSI if (X86_LOCAL_APIC && X86_IO_APIC && !X86_XEN)
+ help
+ Find out whether you have a PCI motherboard. PCI is the name of a
+ bus system, i.e. the way the CPU talks to the other stuff inside
+Index: 10.3-2007-11-26/arch/i386/Kconfig.cpu
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/Kconfig.cpu 2007-12-06 17:27:30.000000000 +0100
++++ 10.3-2007-11-26/arch/i386/Kconfig.cpu 2007-10-22 13:58:56.000000000 +0200
+@@ -299,7 +299,7 @@ config X86_POPAD_OK
+
+ config X86_CMPXCHG64
+ bool
+- depends on X86_PAE
++ depends on X86_PAE || X86_XEN
+ default y
+
+ config X86_ALIGNMENT_16
+Index: 10.3-2007-11-26/arch/i386/kernel/Makefile
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/Makefile 2007-10-22 13:58:46.000000000 +0200
++++ 10.3-2007-11-26/arch/i386/kernel/Makefile 2007-10-22 13:58:56.000000000 +0200
+@@ -103,5 +103,4 @@ n-obj-xen := i8253.o i8259.o reboot.o sm
+ obj-y := $(call filterxen, $(obj-y), $(n-obj-xen))
+ obj-y := $(call cherrypickxen, $(obj-y))
+ extra-y := $(call cherrypickxen, $(extra-y))
+-%/head-xen.o %/head-xen.s: EXTRA_AFLAGS :=
+ endif
+Index: 10.3-2007-11-26/arch/i386/kernel/acpi/boot-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/acpi/boot-xen.c 2007-10-22 13:58:46.000000000 +0200
++++ 10.3-2007-11-26/arch/i386/kernel/acpi/boot-xen.c 2007-10-22 13:58:57.000000000 +0200
+@@ -624,8 +624,6 @@ static int __init acpi_parse_sbf(struct
+ static int __init acpi_parse_hpet(struct acpi_table_header *table)
+ {
+ struct acpi_table_hpet *hpet_tbl;
+- struct resource *hpet_res;
+- resource_size_t res_start;
+
+ hpet_tbl = (struct acpi_table_hpet *)table;
+ if (!hpet_tbl) {
+@@ -639,29 +637,10 @@ static int __init acpi_parse_hpet(struct
+ return -1;
+ }
+
+-#define HPET_RESOURCE_NAME_SIZE 9
+- hpet_res = alloc_bootmem(sizeof(*hpet_res) + HPET_RESOURCE_NAME_SIZE);
+- if (hpet_res) {
+- memset(hpet_res, 0, sizeof(*hpet_res));
+- hpet_res->name = (void *)&hpet_res[1];
+- hpet_res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+- snprintf((char *)hpet_res->name, HPET_RESOURCE_NAME_SIZE,
+- "HPET %u", hpet_tbl->sequence);
+- hpet_res->end = (1 * 1024) - 1;
+- }
+-
+ hpet_address = hpet_tbl->address.address;
+ printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n",
+ hpet_tbl->id, hpet_address);
+
+- res_start = hpet_address;
+-
+- if (hpet_res) {
+- hpet_res->start = res_start;
+- hpet_res->end += res_start;
+- insert_resource(&iomem_resource, hpet_res);
+- }
+-
+ return 0;
+ }
+ #else
+@@ -877,7 +856,7 @@ static void __init acpi_process_madt(voi
+ acpi_ioapic = 1;
+
+ smp_found_config = 1;
+- clustered_apic_check();
++ setup_apic_routing();
+ }
+ }
+ if (error == -EINVAL) {
+Index: 10.3-2007-11-26/arch/i386/kernel/apic-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/apic-xen.c 2007-10-22 13:58:46.000000000 +0200
++++ 10.3-2007-11-26/arch/i386/kernel/apic-xen.c 2007-10-22 13:58:57.000000000 +0200
+@@ -19,7 +19,6 @@
+ #include <linux/mm.h>
+ #include <linux/delay.h>
+ #include <linux/bootmem.h>
+-#include <linux/smp_lock.h>
+ #include <linux/interrupt.h>
+ #include <linux/mc146818rtc.h>
+ #include <linux/kernel_stat.h>
+Index: 10.3-2007-11-26/arch/i386/kernel/cpu/common-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/cpu/common-xen.c 2007-10-22 13:58:46.000000000 +0200
++++ 10.3-2007-11-26/arch/i386/kernel/cpu/common-xen.c 2007-10-22 13:58:57.000000000 +0200
+@@ -22,16 +22,40 @@
+ #define phys_pkg_id(a,b) a
+ #endif
+ #endif
+-#include <asm/pda.h>
+ #include <asm/hypervisor.h>
+
+ #include "cpu.h"
+
+-DEFINE_PER_CPU(struct Xgt_desc_struct, cpu_gdt_descr);
+-EXPORT_PER_CPU_SYMBOL(cpu_gdt_descr);
++DEFINE_PER_CPU(struct gdt_page, gdt_page) = { .gdt = {
++ [GDT_ENTRY_KERNEL_CS] = { 0x0000ffff, 0x00cf9a00 },
++ [GDT_ENTRY_KERNEL_DS] = { 0x0000ffff, 0x00cf9200 },
++ [GDT_ENTRY_DEFAULT_USER_CS] = { 0x0000ffff, 0x00cffa00 },
++ [GDT_ENTRY_DEFAULT_USER_DS] = { 0x0000ffff, 0x00cff200 },
++#ifndef CONFIG_XEN
++ /*
++ * Segments used for calling PnP BIOS have byte granularity.
++ * They code segments and data segments have fixed 64k limits,
++ * the transfer segment sizes are set at run time.
++ */
++ [GDT_ENTRY_PNPBIOS_CS32] = { 0x0000ffff, 0x00409a00 },/* 32-bit code */
++ [GDT_ENTRY_PNPBIOS_CS16] = { 0x0000ffff, 0x00009a00 },/* 16-bit code */
++ [GDT_ENTRY_PNPBIOS_DS] = { 0x0000ffff, 0x00009200 }, /* 16-bit data */
++ [GDT_ENTRY_PNPBIOS_TS1] = { 0x00000000, 0x00009200 },/* 16-bit data */
++ [GDT_ENTRY_PNPBIOS_TS2] = { 0x00000000, 0x00009200 },/* 16-bit data */
++ /*
++ * The APM segments have byte granularity and their bases
++ * are set at run time. All have 64k limits.
++ */
++ [GDT_ENTRY_APMBIOS_BASE] = { 0x0000ffff, 0x00409a00 },/* 32-bit code */
++ /* 16-bit code */
++ [GDT_ENTRY_APMBIOS_BASE+1] = { 0x0000ffff, 0x00009a00 },
++ [GDT_ENTRY_APMBIOS_BASE+2] = { 0x0000ffff, 0x00409200 }, /* data */
+
+-struct i386_pda *_cpu_pda[NR_CPUS] __read_mostly;
+-EXPORT_SYMBOL(_cpu_pda);
++ [GDT_ENTRY_ESPFIX_SS] = { 0x00000000, 0x00c09200 },
++#endif
++ [GDT_ENTRY_PERCPU] = { 0x00000000, 0x00000000 },
++} };
++EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
+
+ static int cachesize_override __cpuinitdata = -1;
+ static int disable_x86_fxsr __cpuinitdata;
+@@ -373,7 +397,7 @@ __setup("serialnumber", x86_serial_nr_se
+ /*
+ * This does the hard work of actually picking apart the CPU stuff...
+ */
+-void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
++static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
+ {
+ int i;
+
+@@ -484,15 +508,22 @@ void __cpuinit identify_cpu(struct cpuin
+
+ /* Init Machine Check Exception if available. */
+ mcheck_init(c);
++}
+
+- if (c == &boot_cpu_data)
+- sysenter_setup();
++void __init identify_boot_cpu(void)
++{
++ identify_cpu(&boot_cpu_data);
++ sysenter_setup();
+ enable_sep_cpu();
++ mtrr_bp_init();
++}
+
+- if (c == &boot_cpu_data)
+- mtrr_bp_init();
+- else
+- mtrr_ap_init();
++void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c)
++{
++ BUG_ON(c == &boot_cpu_data);
++ identify_cpu(c);
++ enable_sep_cpu();
++ mtrr_ap_init();
+ }
+
+ #ifdef CONFIG_X86_HT
+@@ -606,136 +637,47 @@ void __init early_cpu_init(void)
+ #endif
+ }
+
+-/* Make sure %gs is initialized properly in idle threads */
++/* Make sure %fs is initialized properly in idle threads */
+ struct pt_regs * __devinit idle_regs(struct pt_regs *regs)
+ {
+ memset(regs, 0, sizeof(struct pt_regs));
+- regs->xfs = __KERNEL_PDA;
++ regs->xfs = __KERNEL_PERCPU;
+ return regs;
+ }
+
+-static __cpuinit int alloc_gdt(int cpu)
++/* Current gdt points %fs at the "master" per-cpu area: after this,
++ * it's on the real one. */
++void switch_to_new_gdt(void)
+ {
+- struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
+- struct desc_struct *gdt;
+- struct i386_pda *pda;
+-
+- gdt = (struct desc_struct *)cpu_gdt_descr->address;
+- pda = cpu_pda(cpu);
+-
+- /*
+- * This is a horrible hack to allocate the GDT. The problem
+- * is that cpu_init() is called really early for the boot CPU
+- * (and hence needs bootmem) but much later for the secondary
+- * CPUs, when bootmem will have gone away
+- */
+- if (NODE_DATA(0)->bdata->node_bootmem_map) {
+- BUG_ON(gdt != NULL || pda != NULL);
+-
+- gdt = alloc_bootmem_pages(PAGE_SIZE);
+- pda = alloc_bootmem(sizeof(*pda));
+- /* alloc_bootmem(_pages) panics on failure, so no check */
+-
+- memset(gdt, 0, PAGE_SIZE);
+- memset(pda, 0, sizeof(*pda));
+- } else {
+- /* GDT and PDA might already have been allocated if
+- this is a CPU hotplug re-insertion. */
+- if (gdt == NULL)
+- gdt = (struct desc_struct *)get_zeroed_page(GFP_KERNEL);
+-
+- if (pda == NULL)
+- pda = kmalloc_node(sizeof(*pda), GFP_KERNEL, cpu_to_node(cpu));
+-
+- if (unlikely(!gdt || !pda)) {
+- free_pages((unsigned long)gdt, 0);
+- kfree(pda);
+- return 0;
+- }
+- }
+-
+- cpu_gdt_descr->address = (unsigned long)gdt;
+- cpu_pda(cpu) = pda;
+-
+- return 1;
+-}
+-
+-/* Initial PDA used by boot CPU */
+-struct i386_pda boot_pda = {
+- ._pda = &boot_pda,
+- .cpu_number = 0,
+- .pcurrent = &init_task,
+-};
+-
+-static inline void set_kernel_fs(void)
+-{
+- /* Set %fs for this CPU's PDA. Memory clobber is to create a
+- barrier with respect to any PDA operations, so the compiler
+- doesn't move any before here. */
+- asm volatile ("mov %0, %%fs" : : "r" (__KERNEL_PDA) : "memory");
+-}
+-
+-/* Initialize the CPU's GDT and PDA. The boot CPU does this for
+- itself, but secondaries find this done for them. */
+-__cpuinit int init_gdt(int cpu, struct task_struct *idle)
+-{
+- struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
+- struct desc_struct *gdt;
+- struct i386_pda *pda;
+-
+- /* For non-boot CPUs, the GDT and PDA should already have been
+- allocated. */
+- if (!alloc_gdt(cpu)) {
+- printk(KERN_CRIT "CPU%d failed to allocate GDT or PDA\n", cpu);
+- return 0;
+- }
+-
+- gdt = (struct desc_struct *)cpu_gdt_descr->address;
+- pda = cpu_pda(cpu);
+-
+- BUG_ON(gdt == NULL || pda == NULL);
+-
+- /*
+- * Initialize the per-CPU GDT with the boot GDT,
+- * and set up the GDT descriptor:
+- */
+- memcpy(gdt, cpu_gdt_table, GDT_SIZE);
+- cpu_gdt_descr->size = GDT_SIZE - 1;
+-
+- pack_descriptor((u32 *)&gdt[GDT_ENTRY_PDA].a,
+- (u32 *)&gdt[GDT_ENTRY_PDA].b,
+- (unsigned long)pda, sizeof(*pda) - 1,
+- 0x80 | DESCTYPE_S | 0x2, 0); /* present read-write data segment */
+-
+- memset(pda, 0, sizeof(*pda));
+- pda->_pda = pda;
+- pda->cpu_number = cpu;
+- pda->pcurrent = idle;
+-
+- return 1;
+-}
+-
+-void __cpuinit cpu_set_gdt(int cpu)
+-{
+- struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
++ struct Xgt_desc_struct gdt_descr;
+ unsigned long va, frames[16];
+ int f;
+
+- for (va = cpu_gdt_descr->address, f = 0;
+- va < cpu_gdt_descr->address + cpu_gdt_descr->size;
++ gdt_descr.address = (long)get_cpu_gdt_table(smp_processor_id());
++ gdt_descr.size = GDT_SIZE - 1;
++
++ for (va = gdt_descr.address, f = 0;
++ va < gdt_descr.address + gdt_descr.size;
+ va += PAGE_SIZE, f++) {
+ frames[f] = virt_to_mfn(va);
+ make_lowmem_page_readonly(
+ (void *)va, XENFEAT_writable_descriptor_tables);
+ }
+- BUG_ON(HYPERVISOR_set_gdt(frames, cpu_gdt_descr->size / 8));
+-
+- set_kernel_fs();
++ if (HYPERVISOR_set_gdt(frames, gdt_descr.size / 8))
++ BUG();
++ asm("mov %0, %%fs" : : "r" (__KERNEL_PERCPU) : "memory");
+ }
+
+-/* Common CPU init for both boot and secondary CPUs */
+-static void __cpuinit _cpu_init(int cpu, struct task_struct *curr)
++/*
++ * cpu_init() initializes state that is per-CPU. Some data is already
++ * initialized (naturally) in the bootstrap process, such as the GDT
++ * and IDT. We reload them nevertheless, this function acts as a
++ * 'CPU state barrier', nothing should get across.
++ */
++void __cpuinit cpu_init(void)
+ {
++ int cpu = smp_processor_id();
++ struct task_struct *curr = current;
+ #ifndef CONFIG_X86_NO_TSS
+ struct tss_struct * t = &per_cpu(init_tss, cpu);
+ #endif
+@@ -757,6 +699,8 @@ static void __cpuinit _cpu_init(int cpu,
+ set_in_cr4(X86_CR4_TSD);
+ }
+
++ switch_to_new_gdt();
++
+ /*
+ * Set up and load the per-CPU TSS and LDT
+ */
+@@ -794,38 +738,6 @@ static void __cpuinit _cpu_init(int cpu,
+ mxcsr_feature_mask_init();
+ }
+
+-/* Entrypoint to initialize secondary CPU */
+-void __cpuinit secondary_cpu_init(void)
+-{
+- int cpu = smp_processor_id();
+- struct task_struct *curr = current;
+-
+- _cpu_init(cpu, curr);
+-}
+-
+-/*
+- * cpu_init() initializes state that is per-CPU. Some data is already
+- * initialized (naturally) in the bootstrap process, such as the GDT
+- * and IDT. We reload them nevertheless, this function acts as a
+- * 'CPU state barrier', nothing should get across.
+- */
+-void __cpuinit cpu_init(void)
+-{
+- int cpu = smp_processor_id();
+- struct task_struct *curr = current;
+-
+- /* Set up the real GDT and PDA, so we can transition from the
+- boot versions. */
+- if (!init_gdt(cpu, curr)) {
+- /* failed to allocate something; not much we can do... */
+- for (;;)
+- local_irq_enable();
+- }
+-
+- cpu_set_gdt(cpu);
+- _cpu_init(cpu, curr);
+-}
+-
+ #ifdef CONFIG_HOTPLUG_CPU
+ void __cpuinit cpu_uninit(void)
+ {
+Index: 10.3-2007-11-26/arch/i386/kernel/cpu/mtrr/main-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/cpu/mtrr/main-xen.c 2007-10-22 13:53:25.000000000 +0200
++++ 10.3-2007-11-26/arch/i386/kernel/cpu/mtrr/main-xen.c 2007-10-22 13:58:57.000000000 +0200
+@@ -166,7 +166,7 @@ mtrr_del(int reg, unsigned long base, un
+ EXPORT_SYMBOL(mtrr_add);
+ EXPORT_SYMBOL(mtrr_del);
+
+-void __init mtrr_bp_init(void)
++__init void mtrr_bp_init(void)
+ {
+ }
+
+Index: 10.3-2007-11-26/arch/i386/kernel/e820-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/e820-xen.c 2007-10-22 13:58:46.000000000 +0200
++++ 10.3-2007-11-26/arch/i386/kernel/e820-xen.c 2007-10-22 13:58:57.000000000 +0200
+@@ -162,26 +162,27 @@ static struct resource standard_io_resou
+
+ static int __init romsignature(const unsigned char *rom)
+ {
++ const unsigned short * const ptr = (const unsigned short *)rom;
+ unsigned short sig;
+
+- return probe_kernel_address((const unsigned short *)rom, sig) == 0 &&
+- sig == ROMSIGNATURE;
++ return probe_kernel_address(ptr, sig) == 0 && sig == ROMSIGNATURE;
+ }
+
+-static int __init romchecksum(unsigned char *rom, unsigned long length)
++static int __init romchecksum(const unsigned char *rom, unsigned long length)
+ {
+- unsigned char sum;
++ unsigned char sum, c;
+
+- for (sum = 0; length; length--)
+- sum += *rom++;
+- return sum == 0;
++ for (sum = 0; length && probe_kernel_address(rom++, c) == 0; length--)
++ sum += c;
++ return !length && !sum;
+ }
+
+ static void __init probe_roms(void)
+ {
++ const unsigned char *rom;
+ unsigned long start, length, upper;
+- unsigned char *rom;
+- int i;
++ unsigned char c;
++ int i;
+
+ #ifdef CONFIG_XEN
+ /* Nothing to do if not running in dom0. */
+@@ -198,8 +199,11 @@ static void __init probe_roms(void)
+
+ video_rom_resource.start = start;
+
++ if (probe_kernel_address(rom + 2, c) != 0)
++ continue;
++
+ /* 0 < length <= 0x7f * 512, historically */
+- length = rom[2] * 512;
++ length = c * 512;
+
+ /* if checksum okay, trust length byte */
+ if (length && romchecksum(rom, length))
+@@ -233,8 +237,11 @@ static void __init probe_roms(void)
+ if (!romsignature(rom))
+ continue;
+
++ if (probe_kernel_address(rom + 2, c) != 0)
++ continue;
++
+ /* 0 < length <= 0x7f * 512, historically */
+- length = rom[2] * 512;
++ length = c * 512;
+
+ /* but accept any length that fits if checksum okay */
+ if (!length || start + length > upper || !romchecksum(rom, length))
+@@ -249,7 +256,7 @@ static void __init probe_roms(void)
+ }
+
+ #ifdef CONFIG_XEN
+-static struct e820map machine_e820 __initdata;
++static struct e820map machine_e820;
+ #define e820 machine_e820
+ #endif
+
+@@ -409,10 +416,8 @@ int __init sanitize_e820_map(struct e820
+ ____________________33__
+ ______________________4_
+ */
+- printk("sanitize start\n");
+ /* if there's only one memory region, don't bother */
+ if (*pnr_map < 2) {
+- printk("sanitize bail 0\n");
+ return -1;
+ }
+
+@@ -421,7 +426,6 @@ int __init sanitize_e820_map(struct e820
+ /* bail out if we find any unreasonable addresses in bios map */
+ for (i=0; i<old_nr; i++)
+ if (biosmap[i].addr + biosmap[i].size < biosmap[i].addr) {
+- printk("sanitize bail 1\n");
+ return -1;
+ }
+
+@@ -517,7 +521,6 @@ int __init sanitize_e820_map(struct e820
+ memcpy(biosmap, new_bios, new_nr*sizeof(struct e820entry));
+ *pnr_map = new_nr;
+
+- printk("sanitize end\n");
+ return 0;
+ }
+
+@@ -552,7 +555,6 @@ int __init copy_e820_map(struct e820entr
+ unsigned long long size = biosmap->size;
+ unsigned long long end = start + size;
+ unsigned long type = biosmap->type;
+- printk("copy_e820_map() start: %016Lx size: %016Lx end: %016Lx type: %ld\n", start, size, end, type);
+
+ /* Overflow in 64 bits? Ignore the memory map. */
+ if (start > end)
+@@ -564,17 +566,11 @@ int __init copy_e820_map(struct e820entr
+ * Not right. Fix it up.
+ */
+ if (type == E820_RAM) {
+- printk("copy_e820_map() type is E820_RAM\n");
+ if (start < 0x100000ULL && end > 0xA0000ULL) {
+- printk("copy_e820_map() lies in range...\n");
+- if (start < 0xA0000ULL) {
+- printk("copy_e820_map() start < 0xA0000ULL\n");
++ if (start < 0xA0000ULL)
+ add_memory_region(start, 0xA0000ULL-start, type);
+- }
+- if (end <= 0x100000ULL) {
+- printk("copy_e820_map() end <= 0x100000ULL\n");
++ if (end <= 0x100000ULL)
+ continue;
+- }
+ start = 0x100000ULL;
+ size = end - start;
+ }
+@@ -887,6 +883,33 @@ void __init limit_regions(unsigned long
+ print_memory_map("limit_regions endfunc");
+ }
+
++/*
++ * This function checks if any part of the range <start,end> is mapped
++ * with type.
++ */
++int
++e820_any_mapped(u64 start, u64 end, unsigned type)
++{
++ int i;
++#ifndef CONFIG_XEN
++ for (i = 0; i < e820.nr_map; i++) {
++ const struct e820entry *ei = &e820.map[i];
++#else
++ if (!is_initial_xendomain())
++ return 0;
++ for (i = 0; i < machine_e820.nr_map; ++i) {
++ const struct e820entry *ei = &machine_e820.map[i];
++#endif
++ if (type && ei->type != type)
++ continue;
++ if (ei->addr >= end || ei->addr + ei->size <= start)
++ continue;
++ return 1;
++ }
++ return 0;
++}
++EXPORT_SYMBOL_GPL(e820_any_mapped);
++
+ /*
+ * This function checks if the entire range <start,end> is mapped with type.
+ *
+Index: 10.3-2007-11-26/arch/i386/kernel/entry-xen.S
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/entry-xen.S 2007-10-22 13:58:46.000000000 +0200
++++ 10.3-2007-11-26/arch/i386/kernel/entry-xen.S 2007-10-22 13:58:57.000000000 +0200
+@@ -15,7 +15,7 @@
+ * I changed all the .align's to 4 (16 byte alignment), as that's faster
+ * on a 486.
+ *
+- * Stack layout in 'ret_from_system_call':
++ * Stack layout in 'syscall_exit':
+ * ptrace needs to have all regs on the stack.
+ * if the order here is changed, it needs to be
+ * updated in fork.c:copy_process, signal.c:do_signal,
+@@ -135,7 +135,7 @@ NMI_MASK = 0x80000000
+ movl $(__USER_DS), %edx; \
+ movl %edx, %ds; \
+ movl %edx, %es; \
+- movl $(__KERNEL_PDA), %edx; \
++ movl $(__KERNEL_PERCPU), %edx; \
+ movl %edx, %fs
+
+ #define RESTORE_INT_REGS \
+@@ -308,16 +308,12 @@ sysenter_past_esp:
+ pushl $(__USER_CS)
+ CFI_ADJUST_CFA_OFFSET 4
+ /*CFI_REL_OFFSET cs, 0*/
+-#ifndef CONFIG_COMPAT_VDSO
+ /*
+ * Push current_thread_info()->sysenter_return to the stack.
+ * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
+ * pushed above; +8 corresponds to copy_thread's esp0 setting.
+ */
+ pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
+-#else
+- pushl $SYSENTER_RETURN
+-#endif
+ CFI_ADJUST_CFA_OFFSET 4
+ CFI_REL_OFFSET eip, 0
+
+@@ -345,7 +341,7 @@ sysenter_past_esp:
+ jae syscall_badsys
+ call *sys_call_table(,%eax,4)
+ movl %eax,PT_EAX(%esp)
+- DISABLE_INTERRUPTS(CLBR_ECX|CLBR_EDX)
++ DISABLE_INTERRUPTS(CLBR_ANY)
+ TRACE_IRQS_OFF
+ movl TI_flags(%ebp), %ecx
+ testw $_TIF_ALLWORK_MASK, %cx
+@@ -374,10 +370,6 @@ ENTRY(system_call)
+ CFI_ADJUST_CFA_OFFSET 4
+ SAVE_ALL
+ GET_THREAD_INFO(%ebp)
+- testl $TF_MASK,PT_EFLAGS(%esp)
+- jz no_singlestep
+- orl $_TIF_SINGLESTEP,TI_flags(%ebp)
+-no_singlestep:
+ # system call tracing in operation / emulation
+ /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
+ testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
+@@ -392,6 +384,10 @@ syscall_exit:
+ # setting need_resched or sigpending
+ # between sampling and the iret
+ TRACE_IRQS_OFF
++ testl $TF_MASK,PT_EFLAGS(%esp) # If tracing set singlestep flag on exit
++ jz no_singlestep
++ orl $_TIF_SINGLESTEP,TI_flags(%ebp)
++no_singlestep:
+ movl TI_flags(%ebp), %ecx
+ testw $_TIF_ALLWORK_MASK, %cx # current->work
+ jne syscall_exit_work
+@@ -609,9 +605,7 @@ END(syscall_badsys)
+ #ifndef CONFIG_XEN
+ #define FIXUP_ESPFIX_STACK \
+ /* since we are on a wrong stack, we cant make it a C code :( */ \
+- movl %fs:PDA_cpu, %ebx; \
+- PER_CPU(cpu_gdt_descr, %ebx); \
+- movl GDS_address(%ebx), %ebx; \
++ PER_CPU(gdt_page, %ebx); \
+ GET_DESC_BASE(GDT_ENTRY_ESPFIX_SS, %ebx, %eax, %ax, %al, %ah); \
+ addl %esp, %eax; \
+ pushl $__KERNEL_DS; \
+@@ -684,7 +678,7 @@ ENTRY(name) \
+ SAVE_ALL; \
+ TRACE_IRQS_OFF \
+ movl %esp,%eax; \
+- call smp_/**/name; \
++ call smp_##name; \
+ jmp ret_from_intr; \
+ CFI_ENDPROC; \
+ ENDPROC(name)
+@@ -692,10 +686,6 @@ ENDPROC(name)
+ /* The include is where all of the SMP etc. interrupts come from */
+ #include "entry_arch.h"
+
+-/* This alternate entry is needed because we hijack the apic LVTT */
+-#if defined(CONFIG_VMI) && defined(CONFIG_X86_LOCAL_APIC)
+-BUILD_INTERRUPT(apic_vmi_timer_interrupt,LOCAL_TIMER_VECTOR)
+-#endif
+ #else
+ #define UNWIND_ESPFIX_STACK
+ #endif
+@@ -738,7 +728,7 @@ error_code:
+ pushl %fs
+ CFI_ADJUST_CFA_OFFSET 4
+ /*CFI_REL_OFFSET fs, 0*/
+- movl $(__KERNEL_PDA), %ecx
++ movl $(__KERNEL_PERCPU), %ecx
+ movl %ecx, %fs
+ UNWIND_ESPFIX_STACK
+ popl %ecx
+Index: 10.3-2007-11-26/arch/i386/kernel/head-xen.S
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/head-xen.S 2007-10-22 13:58:46.000000000 +0200
++++ 10.3-2007-11-26/arch/i386/kernel/head-xen.S 2007-10-22 13:58:57.000000000 +0200
+@@ -37,7 +37,8 @@ ENTRY(startup_32)
+ /* Set up the stack pointer */
+ movl $(init_thread_union+THREAD_SIZE),%esp
+
+- call setup_pda
++ movl %ss,%eax
++ movl %eax,%fs # gets reset once there's real percpu
+
+ /* get vendor info */
+ xorl %eax,%eax # call CPUID with 0 -> return vendor ID
+@@ -64,55 +65,11 @@ ENTRY(startup_32)
+ xorl %eax,%eax # Clear GS
+ movl %eax,%gs
+
+- movl $(__KERNEL_PDA),%eax
+- mov %eax,%fs
+-
+ cld # gcc2 wants the direction flag cleared at all times
+
+ pushl $0 # fake return address for unwinder
+ jmp start_kernel
+
+-/*
+- * Point the GDT at this CPU's PDA. This will be
+- * cpu_gdt_table and boot_pda.
+- */
+-ENTRY(setup_pda)
+- /* get the PDA pointer */
+- movl $boot_pda, %eax
+-
+- /* slot the PDA address into the GDT */
+- mov $cpu_gdt_table, %ecx
+- mov %ax, (__KERNEL_PDA+0+2)(%ecx) /* base & 0x0000ffff */
+- shr $16, %eax
+- mov %al, (__KERNEL_PDA+4+0)(%ecx) /* base & 0x00ff0000 */
+- mov %ah, (__KERNEL_PDA+4+3)(%ecx) /* base & 0xff000000 */
+-
+- # %esi still points to start_info, and no registers
+- # need to be preserved.
+-
+- movl XEN_START_mfn_list(%esi), %ebx
+- movl $(cpu_gdt_table - __PAGE_OFFSET), %eax
+- shrl $PAGE_SHIFT, %eax
+- movl (%ebx,%eax,4), %ecx
+- pushl %ecx # frame number for set_gdt below
+-
+- xorl %esi, %esi
+- xorl %edx, %edx
+- shldl $PAGE_SHIFT, %ecx, %edx
+- shll $PAGE_SHIFT, %ecx
+- orl $0x61, %ecx
+- movl $cpu_gdt_table, %ebx
+- movl $__HYPERVISOR_update_va_mapping, %eax
+- int $0x82
+-
+- movl $(PAGE_SIZE_asm / 8), %ecx
+- movl %esp, %ebx
+- movl $__HYPERVISOR_set_gdt, %eax
+- int $0x82
+-
+- popl %ecx
+- ret
+-
+ #define HYPERCALL_PAGE_OFFSET 0x1000
+ .org HYPERCALL_PAGE_OFFSET
+ ENTRY(hypercall_page)
+@@ -138,60 +95,6 @@ ENTRY(empty_zero_page)
+ */
+ .data
+
+-/*
+- * The Global Descriptor Table contains 28 quadwords, per-CPU.
+- */
+- .section .data.page_aligned, "aw"
+- .align PAGE_SIZE_asm
+-ENTRY(cpu_gdt_table)
+- .quad 0x0000000000000000 /* NULL descriptor */
+- .quad 0x0000000000000000 /* 0x0b reserved */
+- .quad 0x0000000000000000 /* 0x13 reserved */
+- .quad 0x0000000000000000 /* 0x1b reserved */
+- .quad 0x0000000000000000 /* 0x20 unused */
+- .quad 0x0000000000000000 /* 0x28 unused */
+- .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
+- .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
+- .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
+- .quad 0x0000000000000000 /* 0x4b reserved */
+- .quad 0x0000000000000000 /* 0x53 reserved */
+- .quad 0x0000000000000000 /* 0x5b reserved */
+-
+- .quad 0x00cf9a000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
+- .quad 0x00cf92000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
+- .quad 0x00cffa000000ffff /* 0x73 user 4GB code at 0x00000000 */
+- .quad 0x00cff2000000ffff /* 0x7b user 4GB data at 0x00000000 */
+-
+- .quad 0x0000000000000000 /* 0x80 TSS descriptor */
+- .quad 0x0000000000000000 /* 0x88 LDT descriptor */
+-
+- /*
+- * Segments used for calling PnP BIOS have byte granularity.
+- * They code segments and data segments have fixed 64k limits,
+- * the transfer segment sizes are set at run time.
+- */
+- .quad 0x0000000000000000 /* 0x90 32-bit code */
+- .quad 0x0000000000000000 /* 0x98 16-bit code */
+- .quad 0x0000000000000000 /* 0xa0 16-bit data */
+- .quad 0x0000000000000000 /* 0xa8 16-bit data */
+- .quad 0x0000000000000000 /* 0xb0 16-bit data */
+-
+- /*
+- * The APM segments have byte granularity and their bases
+- * are set at run time. All have 64k limits.
+- */
+- .quad 0x0000000000000000 /* 0xb8 APM CS code */
+- .quad 0x0000000000000000 /* 0xc0 APM CS 16 code (16 bit) */
+- .quad 0x0000000000000000 /* 0xc8 APM DS data */
+-
+- .quad 0x0000000000000000 /* 0xd0 - ESPFIX SS */
+- .quad 0x00cf92000000ffff /* 0xd8 - PDA */
+- .quad 0x0000000000000000 /* 0xe0 - unused */
+- .quad 0x0000000000000000 /* 0xe8 - unused */
+- .quad 0x0000000000000000 /* 0xf0 - unused */
+- .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
+- .align PAGE_SIZE_asm
+-
+ #if CONFIG_XEN_COMPAT <= 0x030002
+ /*
+ * __xen_guest information
+Index: 10.3-2007-11-26/arch/i386/kernel/io_apic-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/io_apic-xen.c 2007-10-22 13:58:46.000000000 +0200
++++ 10.3-2007-11-26/arch/i386/kernel/io_apic-xen.c 2007-10-22 13:58:57.000000000 +0200
+@@ -25,7 +25,6 @@
+ #include <linux/init.h>
+ #include <linux/delay.h>
+ #include <linux/sched.h>
+-#include <linux/smp_lock.h>
+ #include <linux/mc146818rtc.h>
+ #include <linux/compiler.h>
+ #include <linux/acpi.h>
+@@ -35,6 +34,7 @@
+ #include <linux/msi.h>
+ #include <linux/htirq.h>
+ #include <linux/freezer.h>
++#include <linux/kthread.h>
+
+ #include <asm/io.h>
+ #include <asm/smp.h>
+@@ -705,8 +705,6 @@ static int balanced_irq(void *unused)
+ unsigned long prev_balance_time = jiffies;
+ long time_remaining = balanced_irq_interval;
+
+- daemonize("kirqd");
+-
+ /* push everything to CPU 0 to give us a starting point. */
+ for (i = 0 ; i < NR_IRQS ; i++) {
+ irq_desc[i].pending_mask = cpumask_of_cpu(0);
+@@ -766,10 +764,9 @@ static int __init balanced_irq_init(void
+ }
+
+ printk(KERN_INFO "Starting balanced_irq\n");
+- if (kernel_thread(balanced_irq, NULL, CLONE_KERNEL) >= 0)
++ if (!IS_ERR(kthread_run(balanced_irq, NULL, "kirqd")))
+ return 0;
+- else
+- printk(KERN_ERR "balanced_irq_init: failed to spawn balanced_irq");
++ printk(KERN_ERR "balanced_irq_init: failed to spawn balanced_irq");
+ failed:
+ for_each_possible_cpu(i) {
+ kfree(irq_cpu_data[i].irq_delta);
+@@ -1445,10 +1442,6 @@ static void __init setup_ExtINT_IRQ0_pin
+ enable_8259A_irq(0);
+ }
+
+-static inline void UNEXPECTED_IO_APIC(void)
+-{
+-}
+-
+ void __init print_IO_APIC(void)
+ {
+ int apic, i;
+@@ -1488,34 +1481,12 @@ void __init print_IO_APIC(void)
+ printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID);
+ printk(KERN_DEBUG "....... : Delivery Type: %X\n", reg_00.bits.delivery_type);
+ printk(KERN_DEBUG "....... : LTS : %X\n", reg_00.bits.LTS);
+- if (reg_00.bits.ID >= get_physical_broadcast())
+- UNEXPECTED_IO_APIC();
+- if (reg_00.bits.__reserved_1 || reg_00.bits.__reserved_2)
+- UNEXPECTED_IO_APIC();
+
+ printk(KERN_DEBUG ".... register #01: %08X\n", reg_01.raw);
+ printk(KERN_DEBUG "....... : max redirection entries: %04X\n", reg_01.bits.entries);
+- if ( (reg_01.bits.entries != 0x0f) && /* older (Neptune) boards */
+- (reg_01.bits.entries != 0x17) && /* typical ISA+PCI boards */
+- (reg_01.bits.entries != 0x1b) && /* Compaq Proliant boards */
+- (reg_01.bits.entries != 0x1f) && /* dual Xeon boards */
+- (reg_01.bits.entries != 0x22) && /* bigger Xeon boards */
+- (reg_01.bits.entries != 0x2E) &&
+- (reg_01.bits.entries != 0x3F)
+- )
+- UNEXPECTED_IO_APIC();
+
+ printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ);
+ printk(KERN_DEBUG "....... : IO APIC version: %04X\n", reg_01.bits.version);
+- if ( (reg_01.bits.version != 0x01) && /* 82489DX IO-APICs */
+- (reg_01.bits.version != 0x10) && /* oldest IO-APICs */
+- (reg_01.bits.version != 0x11) && /* Pentium/Pro IO-APICs */
+- (reg_01.bits.version != 0x13) && /* Xeon IO-APICs */
+- (reg_01.bits.version != 0x20) /* Intel P64H (82806 AA) */
+- )
+- UNEXPECTED_IO_APIC();
+- if (reg_01.bits.__reserved_1 || reg_01.bits.__reserved_2)
+- UNEXPECTED_IO_APIC();
+
+ /*
+ * Some Intel chipsets with IO APIC VERSION of 0x1? don't have reg_02,
+@@ -1525,8 +1496,6 @@ void __init print_IO_APIC(void)
+ if (reg_01.bits.version >= 0x10 && reg_02.raw != reg_01.raw) {
+ printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw);
+ printk(KERN_DEBUG "....... : arbitration: %02X\n", reg_02.bits.arbitration);
+- if (reg_02.bits.__reserved_1 || reg_02.bits.__reserved_2)
+- UNEXPECTED_IO_APIC();
+ }
+
+ /*
+@@ -1538,8 +1507,6 @@ void __init print_IO_APIC(void)
+ reg_03.raw != reg_01.raw) {
+ printk(KERN_DEBUG ".... register #03: %08X\n", reg_03.raw);
+ printk(KERN_DEBUG "....... : Boot DT : %X\n", reg_03.bits.boot_DT);
+- if (reg_03.bits.__reserved_1)
+- UNEXPECTED_IO_APIC();
+ }
+
+ printk(KERN_DEBUG ".... IRQ redirection table:\n");
+@@ -2679,19 +2646,19 @@ int arch_setup_msi_irq(struct pci_dev *d
+ if (irq < 0)
+ return irq;
+
+- set_irq_msi(irq, desc);
+ ret = msi_compose_msg(dev, irq, &msg);
+ if (ret < 0) {
+ destroy_irq(irq);
+ return ret;
+ }
+
++ set_irq_msi(irq, desc);
+ write_msi_msg(irq, &msg);
+
+ set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq,
+ "edge");
+
+- return irq;
++ return 0;
+ }
+
+ void arch_teardown_msi_irq(unsigned int irq)
+Index: 10.3-2007-11-26/arch/i386/kernel/ioport-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/ioport-xen.c 2007-12-06 17:27:30.000000000 +0100
++++ 10.3-2007-11-26/arch/i386/kernel/ioport-xen.c 2007-10-22 13:58:57.000000000 +0200
+@@ -12,10 +12,10 @@
+ #include <linux/types.h>
+ #include <linux/ioport.h>
+ #include <linux/smp.h>
+-#include <linux/smp_lock.h>
+ #include <linux/stddef.h>
+ #include <linux/slab.h>
+ #include <linux/thread_info.h>
++#include <linux/syscalls.h>
+ #include <xen/interface/physdev.h>
+
+ /* Set EXTENT bits starting at BASE in BITMAP to value TURN_ON. */
+Index: 10.3-2007-11-26/arch/i386/kernel/irq-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/irq-xen.c 2007-10-22 13:58:46.000000000 +0200
++++ 10.3-2007-11-26/arch/i386/kernel/irq-xen.c 2007-10-22 13:58:57.000000000 +0200
+@@ -24,6 +24,9 @@
+ DEFINE_PER_CPU(irq_cpustat_t, irq_stat) ____cacheline_internodealigned_in_smp;
+ EXPORT_PER_CPU_SYMBOL(irq_stat);
+
++DEFINE_PER_CPU(struct pt_regs *, irq_regs);
++EXPORT_PER_CPU_SYMBOL(irq_regs);
++
+ /*
+ * 'what should we do if we get a hw irq event on an illegal vector'.
+ * each architecture has to answer this themselves.
+Index: 10.3-2007-11-26/arch/i386/kernel/ldt-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/ldt-xen.c 2007-10-22 13:53:25.000000000 +0200
++++ 10.3-2007-11-26/arch/i386/kernel/ldt-xen.c 2007-10-22 13:58:57.000000000 +0200
+@@ -10,7 +10,6 @@
+ #include <linux/string.h>
+ #include <linux/mm.h>
+ #include <linux/smp.h>
+-#include <linux/smp_lock.h>
+ #include <linux/vmalloc.h>
+ #include <linux/slab.h>
+
+Index: 10.3-2007-11-26/arch/i386/kernel/microcode-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/microcode-xen.c 2007-10-22 13:58:46.000000000 +0200
++++ 10.3-2007-11-26/arch/i386/kernel/microcode-xen.c 2007-10-22 13:58:57.000000000 +0200
+@@ -135,7 +135,7 @@ static int __init microcode_dev_init (vo
+ return 0;
+ }
+
+-static void __exit microcode_dev_exit (void)
++static void microcode_dev_exit (void)
+ {
+ misc_deregister(&microcode_dev);
+ }
+Index: 10.3-2007-11-26/arch/i386/kernel/mpparse-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/mpparse-xen.c 2007-10-22 13:58:46.000000000 +0200
++++ 10.3-2007-11-26/arch/i386/kernel/mpparse-xen.c 2007-10-22 13:58:57.000000000 +0200
+@@ -18,7 +18,6 @@
+ #include <linux/acpi.h>
+ #include <linux/delay.h>
+ #include <linux/bootmem.h>
+-#include <linux/smp_lock.h>
+ #include <linux/kernel_stat.h>
+ #include <linux/mc146818rtc.h>
+ #include <linux/bitops.h>
+@@ -484,7 +483,7 @@ static int __init smp_read_mpc(struct mp
+ }
+ ++mpc_record;
+ }
+- clustered_apic_check();
++ setup_apic_routing();
+ if (!num_processors)
+ printk(KERN_ERR "SMP mptable: no processors registered!\n");
+ return num_processors;
+Index: 10.3-2007-11-26/arch/i386/kernel/pci-dma-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/pci-dma-xen.c 2007-10-22 13:58:46.000000000 +0200
++++ 10.3-2007-11-26/arch/i386/kernel/pci-dma-xen.c 2007-10-22 13:58:57.000000000 +0200
+@@ -13,6 +13,7 @@
+ #include <linux/pci.h>
+ #include <linux/module.h>
+ #include <linux/version.h>
++#include <linux/pci.h>
+ #include <asm/io.h>
+ #include <xen/balloon.h>
+ #include <xen/gnttab.h>
+@@ -251,7 +252,7 @@ int dma_declare_coherent_memory(struct d
+ {
+ void __iomem *mem_base = NULL;
+ int pages = size >> PAGE_SHIFT;
+- int bitmap_size = (pages + 31)/32;
++ int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
+
+ if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
+ goto out;
+@@ -324,6 +325,32 @@ void *dma_mark_declared_memory_occupied(
+ EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
+ #endif /* ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY */
+
++#if defined(CONFIG_PCI) && !defined(CONFIG_XEN)
++/* Many VIA bridges seem to corrupt data for DAC. Disable it here */
++
++int forbid_dac;
++EXPORT_SYMBOL(forbid_dac);
++
++static __devinit void via_no_dac(struct pci_dev *dev)
++{
++ if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) {
++ printk(KERN_INFO "PCI: VIA PCI bridge detected. Disabling DAC.\n");
++ forbid_dac = 1;
++ }
++}
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac);
++
++static int check_iommu(char *s)
++{
++ if (!strcmp(s, "usedac")) {
++ forbid_dac = -1;
++ return 1;
++ }
++ return 0;
++}
++__setup("iommu=", check_iommu);
++#endif
++
+ dma_addr_t
+ dma_map_single(struct device *dev, void *ptr, size_t size,
+ enum dma_data_direction direction)
+Index: 10.3-2007-11-26/arch/i386/kernel/process-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/process-xen.c 2007-10-22 13:58:46.000000000 +0200
++++ 10.3-2007-11-26/arch/i386/kernel/process-xen.c 2007-10-22 13:58:57.000000000 +0200
+@@ -21,7 +21,6 @@
+ #include <linux/mm.h>
+ #include <linux/elfcore.h>
+ #include <linux/smp.h>
+-#include <linux/smp_lock.h>
+ #include <linux/stddef.h>
+ #include <linux/slab.h>
+ #include <linux/vmalloc.h>
+@@ -39,6 +38,7 @@
+ #include <linux/random.h>
+ #include <linux/personality.h>
+ #include <linux/tick.h>
++#include <linux/percpu.h>
+
+ #include <asm/uaccess.h>
+ #include <asm/pgtable.h>
+@@ -61,7 +61,6 @@
+
+ #include <asm/tlbflush.h>
+ #include <asm/cpu.h>
+-#include <asm/pda.h>
+
+ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
+
+@@ -70,6 +69,12 @@ static int hlt_counter;
+ unsigned long boot_option_idle_override = 0;
+ EXPORT_SYMBOL(boot_option_idle_override);
+
++DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task;
++EXPORT_PER_CPU_SYMBOL(current_task);
++
++DEFINE_PER_CPU(int, cpu_number);
++EXPORT_PER_CPU_SYMBOL(cpu_number);
++
+ /*
+ * Return saved PC of a blocked thread.
+ */
+@@ -168,6 +173,7 @@ void cpu_idle(void)
+ if (__get_cpu_var(cpu_idle_state))
+ __get_cpu_var(cpu_idle_state) = 0;
+
++ check_pgt_cache();
+ rmb();
+ idle = xen_idle; /* no alternatives */
+
+@@ -218,18 +224,19 @@ void __devinit select_idle_routine(const
+ {
+ }
+
+-static int __init idle_setup (char *str)
++static int __init idle_setup(char *str)
+ {
+- if (!strncmp(str, "poll", 4)) {
++ if (!strcmp(str, "poll")) {
+ printk("using polling idle threads.\n");
+ pm_idle = poll_idle;
+ }
++ else
++ return -1;
+
+ boot_option_idle_override = 1;
+- return 1;
++ return 0;
+ }
+-
+-__setup("idle=", idle_setup);
++early_param("idle", idle_setup);
+
+ void show_regs(struct pt_regs * regs)
+ {
+@@ -282,7 +289,7 @@ int kernel_thread(int (*fn)(void *), voi
+
+ regs.xds = __USER_DS;
+ regs.xes = __USER_DS;
+- regs.xfs = __KERNEL_PDA;
++ regs.xfs = __KERNEL_PERCPU;
+ regs.orig_eax = -1;
+ regs.eip = (unsigned long) kernel_thread_helper;
+ regs.xcs = __KERNEL_CS | get_kernel_rpl();
+@@ -555,7 +562,7 @@ struct task_struct fastcall * __switch_t
+ * multicall to indicate FPU task switch, rather than
+ * synchronously trapping to Xen.
+ */
+- if (prev_p->thread_info->status & TS_USEDFPU) {
++ if (task_thread_info(prev_p)->status & TS_USEDFPU) {
+ __save_init_fpu(prev_p); /* _not_ save_init_fpu() */
+ mcl->op = __HYPERVISOR_fpu_taskswitch;
+ mcl->args[0] = 1;
+@@ -645,7 +652,7 @@ struct task_struct fastcall * __switch_t
+ if (prev->gs | next->gs)
+ loadsegment(gs, next->gs);
+
+- write_pda(pcurrent, next_p);
++ x86_write_percpu(current_task, next_p);
+
+ return prev_p;
+ }
+Index: 10.3-2007-11-26/arch/i386/kernel/quirks-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/quirks-xen.c 2007-10-22 13:53:25.000000000 +0200
++++ 10.3-2007-11-26/arch/i386/kernel/quirks-xen.c 2007-10-22 13:58:57.000000000 +0200
+@@ -3,12 +3,10 @@
+ */
+ #include <linux/pci.h>
+ #include <linux/irq.h>
+-#include <asm/pci-direct.h>
+-#include <asm/genapic.h>
+-#include <asm/cpu.h>
+
+ #if defined(CONFIG_X86_IO_APIC) && (defined(CONFIG_SMP) || defined(CONFIG_XEN)) && defined(CONFIG_PCI)
+-static void __devinit verify_quirk_intel_irqbalance(struct pci_dev *dev)
++
++static void __devinit quirk_intel_irqbalance(struct pci_dev *dev)
+ {
+ u8 config, rev;
+ u32 word;
+@@ -16,7 +14,7 @@ static void __devinit verify_quirk_intel
+ /* BIOS may enable hardware IRQ balancing for
+ * E7520/E7320/E7525(revision ID 0x9 and below)
+ * based platforms.
+- * For those platforms, make sure that the genapic is set to 'flat'
++ * Disable SW irqbalance/affinity on those platforms.
+ */
+ pci_read_config_byte(dev, PCI_CLASS_REVISION, &rev);
+ if (rev > 0x9)
+@@ -30,59 +28,19 @@ static void __devinit verify_quirk_intel
+ raw_pci_ops->read(0, 0, 0x40, 0x4c, 2, &word);
+
+ if (!(word & (1 << 13))) {
+-#ifndef CONFIG_XEN
+-#ifdef CONFIG_X86_64
+- if (genapic != &apic_flat)
+- panic("APIC mode must be flat on this system\n");
+-#elif defined(CONFIG_X86_GENERICARCH)
+- if (genapic != &apic_default)
+- panic("APIC mode must be default(flat) on this system. Use apic=default\n");
+-#endif
+-#endif
+- }
+-
+- /* put back the original value for config space*/
+- if (!(config & 0x2))
+- pci_write_config_byte(dev, 0xf4, config);
+-}
+-
+-void __init quirk_intel_irqbalance(void)
+-{
+- u8 config, rev;
+- u32 word;
+-
+- /* BIOS may enable hardware IRQ balancing for
+- * E7520/E7320/E7525(revision ID 0x9 and below)
+- * based platforms.
+- * Disable SW irqbalance/affinity on those platforms.
+- */
+- rev = read_pci_config_byte(0, 0, 0, PCI_CLASS_REVISION);
+- if (rev > 0x9)
+- return;
+-
+- printk(KERN_INFO "Intel E7520/7320/7525 detected.");
+-
+- /* enable access to config space */
+- config = read_pci_config_byte(0, 0, 0, 0xf4);
+- write_pci_config_byte(0, 0, 0, 0xf4, config|0x2);
+-
+- /* read xTPR register */
+- word = read_pci_config_16(0, 0, 0x40, 0x4c);
+-
+- if (!(word & (1 << 13))) {
+ struct xen_platform_op op;
+- printk(KERN_INFO "Disabling irq balancing and affinity\n");
++ printk(KERN_INFO "Intel E7520/7320/7525 detected. "
++ "Disabling irq balancing and affinity\n");
+ op.cmd = XENPF_platform_quirk;
+ op.u.platform_quirk.quirk_id = QUIRK_NOIRQBALANCING;
+ (void)HYPERVISOR_platform_op(&op);
+ }
+
+- /* put back the original value for config space */
++ /* put back the original value for config space*/
+ if (!(config & 0x2))
+- write_pci_config_byte(0, 0, 0, 0xf4, config);
++ pci_write_config_byte(dev, 0xf4, config);
+ }
+-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH, verify_quirk_intel_irqbalance);
+-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, verify_quirk_intel_irqbalance);
+-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, verify_quirk_intel_irqbalance);
+-
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH, quirk_intel_irqbalance);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, quirk_intel_irqbalance);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, quirk_intel_irqbalance);
+ #endif
+Index: 10.3-2007-11-26/arch/i386/kernel/smp-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/smp-xen.c 2007-10-22 13:58:46.000000000 +0200
++++ 10.3-2007-11-26/arch/i386/kernel/smp-xen.c 2007-10-22 13:58:57.000000000 +0200
+@@ -13,7 +13,6 @@
+ #include <linux/mm.h>
+ #include <linux/delay.h>
+ #include <linux/spinlock.h>
+-#include <linux/smp_lock.h>
+ #include <linux/kernel_stat.h>
+ #include <linux/mc146818rtc.h>
+ #include <linux/cache.h>
+@@ -216,7 +215,6 @@ static cpumask_t flush_cpumask;
+ static struct mm_struct * flush_mm;
+ static unsigned long flush_va;
+ static DEFINE_SPINLOCK(tlbstate_lock);
+-#define FLUSH_ALL 0xffffffff
+
+ /*
+ * We cannot call mmdrop() because we are in interrupt context,
+@@ -298,7 +296,7 @@ irqreturn_t smp_invalidate_interrupt(int
+
+ if (flush_mm == per_cpu(cpu_tlbstate, cpu).active_mm) {
+ if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) {
+- if (flush_va == FLUSH_ALL)
++ if (flush_va == TLB_FLUSH_ALL)
+ local_flush_tlb();
+ else
+ __flush_tlb_one(flush_va);
+@@ -314,9 +312,11 @@ out:
+ return IRQ_HANDLED;
+ }
+
+-static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
+- unsigned long va)
++void xen_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
++ unsigned long va)
+ {
++ cpumask_t cpumask = *cpumaskp;
++
+ /*
+ * A couple of (to be removed) sanity checks:
+ *
+@@ -327,10 +327,12 @@ static void flush_tlb_others(cpumask_t c
+ BUG_ON(cpu_isset(smp_processor_id(), cpumask));
+ BUG_ON(!mm);
+
++#ifdef CONFIG_HOTPLUG_CPU
+ /* If a CPU which we ran on has gone down, OK. */
+ cpus_and(cpumask, cpumask, cpu_online_map);
+- if (cpus_empty(cpumask))
++ if (unlikely(cpus_empty(cpumask)))
+ return;
++#endif
+
+ /*
+ * i'm not happy about this global shared spinlock in the
+@@ -341,17 +343,7 @@ static void flush_tlb_others(cpumask_t c
+
+ flush_mm = mm;
+ flush_va = va;
+-#if NR_CPUS <= BITS_PER_LONG
+- atomic_set_mask(cpumask, &flush_cpumask);
+-#else
+- {
+- int k;
+- unsigned long *flush_mask = (unsigned long *)&flush_cpumask;
+- unsigned long *cpu_mask = (unsigned long *)&cpumask;
+- for (k = 0; k < BITS_TO_LONGS(NR_CPUS); ++k)
+- atomic_set_mask(cpu_mask[k], &flush_mask[k]);
+- }
+-#endif
++ cpus_or(flush_cpumask, cpumask, flush_cpumask);
+ /*
+ * We have to send the IPI only to
+ * CPUs affected.
+@@ -378,7 +370,7 @@ void flush_tlb_current_task(void)
+
+ local_flush_tlb();
+ if (!cpus_empty(cpu_mask))
+- flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
++ flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);
+ preempt_enable();
+ }
+
+@@ -397,7 +389,7 @@ void flush_tlb_mm (struct mm_struct * mm
+ leave_mm(smp_processor_id());
+ }
+ if (!cpus_empty(cpu_mask))
+- flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
++ flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);
+
+ preempt_enable();
+ }
+@@ -460,7 +452,7 @@ void flush_tlb_all(void)
+ * it goes straight through and wastes no time serializing
+ * anything. Worst case is that we lose a reschedule ...
+ */
+-void smp_send_reschedule(int cpu)
++void xen_smp_send_reschedule(int cpu)
+ {
+ WARN_ON(cpu_is_offline(cpu));
+ send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
+@@ -492,36 +484,79 @@ void unlock_ipi_call_lock(void)
+
+ static struct call_data_struct *call_data;
+
++static void __smp_call_function(void (*func) (void *info), void *info,
++ int nonatomic, int wait)
++{
++ struct call_data_struct data;
++ int cpus = num_online_cpus() - 1;
++
++ if (!cpus)
++ return;
++
++ data.func = func;
++ data.info = info;
++ atomic_set(&data.started, 0);
++ data.wait = wait;
++ if (wait)
++ atomic_set(&data.finished, 0);
++
++ call_data = &data;
++ mb();
++
++ /* Send a message to all other CPUs and wait for them to respond */
++ send_IPI_allbutself(CALL_FUNCTION_VECTOR);
++
++ /* Wait for response */
++ while (atomic_read(&data.started) != cpus)
++ cpu_relax();
++
++ if (wait)
++ while (atomic_read(&data.finished) != cpus)
++ cpu_relax();
++}
++
++
+ /**
+- * smp_call_function(): Run a function on all other CPUs.
++ * smp_call_function_mask(): Run a function on a set of other CPUs.
++ * @mask: The set of cpus to run on. Must not include the current cpu.
+ * @func: The function to run. This must be fast and non-blocking.
+ * @info: An arbitrary pointer to pass to the function.
+- * @nonatomic: currently unused.
+ * @wait: If true, wait (atomically) until function has completed on other CPUs.
+ *
+- * Returns 0 on success, else a negative status code. Does not return until
+- * remote CPUs are nearly ready to execute <<func>> or are or have executed.
++ * Returns 0 on success, else a negative status code.
++ *
++ * If @wait is true, then returns once @func has returned; otherwise
++ * it returns just before the target cpu calls @func.
+ *
+ * You must not call this function with disabled interrupts or from a
+ * hardware interrupt handler or from a bottom half handler.
+ */
+-int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
+- int wait)
++int
++xen_smp_call_function_mask(cpumask_t mask,
++ void (*func)(void *), void *info,
++ int wait)
+ {
+ struct call_data_struct data;
++ cpumask_t allbutself;
+ int cpus;
+
++ /* Can deadlock when called with interrupts disabled */
++ WARN_ON(irqs_disabled());
++
+ /* Holding any lock stops cpus from going down. */
+ spin_lock(&call_lock);
+- cpus = num_online_cpus() - 1;
++
++ allbutself = cpu_online_map;
++ cpu_clear(smp_processor_id(), allbutself);
++
++ cpus_and(mask, mask, allbutself);
++ cpus = cpus_weight(mask);
++
+ if (!cpus) {
+ spin_unlock(&call_lock);
+ return 0;
+ }
+
+- /* Can deadlock when called with interrupts disabled */
+- WARN_ON(irqs_disabled());
+-
+ data.func = func;
+ data.info = info;
+ atomic_set(&data.started, 0);
+@@ -531,9 +566,12 @@ int smp_call_function (void (*func) (voi
+
+ call_data = &data;
+ mb();
+-
+- /* Send a message to all other CPUs and wait for them to respond */
+- send_IPI_allbutself(CALL_FUNCTION_VECTOR);
++
++ /* Send a message to other CPUs */
++ if (cpus_equal(mask, allbutself))
++ send_IPI_allbutself(CALL_FUNCTION_VECTOR);
++ else
++ send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
+
+ /* Wait for response */
+ while (atomic_read(&data.started) != cpus)
+@@ -546,15 +584,14 @@ int smp_call_function (void (*func) (voi
+
+ return 0;
+ }
+-EXPORT_SYMBOL(smp_call_function);
+
+ static void stop_this_cpu (void * dummy)
+ {
++ local_irq_disable();
+ /*
+ * Remove this CPU:
+ */
+ cpu_clear(smp_processor_id(), cpu_online_map);
+- local_irq_disable();
+ #if 0
+ disable_local_APIC();
+ #endif
+@@ -567,15 +604,20 @@ static void stop_this_cpu (void * dummy)
+ * this function calls the 'stop' function on all other CPUs in the system.
+ */
+
+-void smp_send_stop(void)
++void xen_smp_send_stop(void)
+ {
+- smp_call_function(stop_this_cpu, NULL, 1, 0);
++ /* Don't deadlock on the call lock in panic */
++ int nolock = !spin_trylock(&call_lock);
++ unsigned long flags;
+
+- local_irq_disable();
++ local_irq_save(flags);
++ __smp_call_function(stop_this_cpu, NULL, 0, 0);
++ if (!nolock)
++ spin_unlock(&call_lock);
+ #if 0
+ disable_local_APIC();
+ #endif
+- local_irq_enable();
++ local_irq_restore(flags);
+ }
+
+ /*
+@@ -616,74 +658,3 @@ irqreturn_t smp_call_function_interrupt(
+
+ return IRQ_HANDLED;
+ }
+-
+-/*
+- * this function sends a 'generic call function' IPI to one other CPU
+- * in the system.
+- *
+- * cpu is a standard Linux logical CPU number.
+- */
+-static void
+-__smp_call_function_single(int cpu, void (*func) (void *info), void *info,
+- int nonatomic, int wait)
+-{
+- struct call_data_struct data;
+- int cpus = 1;
+-
+- data.func = func;
+- data.info = info;
+- atomic_set(&data.started, 0);
+- data.wait = wait;
+- if (wait)
+- atomic_set(&data.finished, 0);
+-
+- call_data = &data;
+- wmb();
+- /* Send a message to all other CPUs and wait for them to respond */
+- send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNCTION_VECTOR);
+-
+- /* Wait for response */
+- while (atomic_read(&data.started) != cpus)
+- cpu_relax();
+-
+- if (!wait)
+- return;
+-
+- while (atomic_read(&data.finished) != cpus)
+- cpu_relax();
+-}
+-
+-/*
+- * smp_call_function_single - Run a function on another CPU
+- * @func: The function to run. This must be fast and non-blocking.
+- * @info: An arbitrary pointer to pass to the function.
+- * @nonatomic: Currently unused.
+- * @wait: If true, wait until function has completed on other CPUs.
+- *
+- * Retrurns 0 on success, else a negative status code.
+- *
+- * Does not return until the remote CPU is nearly ready to execute <func>
+- * or is or has executed.
+- */
+-
+-int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
+- int nonatomic, int wait)
+-{
+- /* prevent preemption and reschedule on another processor */
+- int me = get_cpu();
+- if (cpu == me) {
+- WARN_ON(1);
+- put_cpu();
+- return -EBUSY;
+- }
+-
+- /* Can deadlock when called with interrupts disabled */
+- WARN_ON(irqs_disabled());
+-
+- spin_lock_bh(&call_lock);
+- __smp_call_function_single(cpu, func, info, nonatomic, wait);
+- spin_unlock_bh(&call_lock);
+- put_cpu();
+- return 0;
+-}
+-EXPORT_SYMBOL(smp_call_function_single);
+Index: 10.3-2007-11-26/arch/i386/kernel/swiotlb.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/swiotlb.c 2007-10-22 13:58:46.000000000 +0200
++++ 10.3-2007-11-26/arch/i386/kernel/swiotlb.c 2007-10-22 13:58:57.000000000 +0200
+@@ -729,7 +729,6 @@ swiotlb_dma_supported (struct device *hw
+ return (mask >= ((1UL << dma_bits) - 1));
+ }
+
+-EXPORT_SYMBOL(swiotlb_init);
+ EXPORT_SYMBOL(swiotlb_map_single);
+ EXPORT_SYMBOL(swiotlb_unmap_single);
+ EXPORT_SYMBOL(swiotlb_map_sg);
+Index: 10.3-2007-11-26/arch/i386/kernel/time-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/time-xen.c 2007-12-06 17:32:21.000000000 +0100
++++ 10.3-2007-11-26/arch/i386/kernel/time-xen.c 2007-12-06 17:32:30.000000000 +0100
+@@ -79,7 +79,6 @@
+ #include <asm/i8253.h>
+ DEFINE_SPINLOCK(i8253_lock);
+ EXPORT_SYMBOL(i8253_lock);
+-int pit_latch_buggy; /* extern */
+ #else
+ volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
+ #endif
+@@ -593,7 +592,7 @@ irqreturn_t timer_interrupt(int irq, voi
+ return IRQ_HANDLED;
+ }
+
+-void mark_tsc_unstable(void)
++void mark_tsc_unstable(char *reason)
+ {
+ #ifndef CONFIG_XEN /* XXX Should tell the hypervisor about this fact. */
+ tsc_unstable = 1;
+@@ -812,7 +811,7 @@ static void setup_cpu0_timer_irq(void)
+ VIRQ_TIMER,
+ 0,
+ timer_interrupt,
+- SA_INTERRUPT,
++ IRQF_DISABLED,
+ "timer0",
+ NULL);
+ BUG_ON(per_cpu(timer_irq, 0) < 0);
+@@ -922,21 +921,21 @@ static void start_hz_timer(void)
+ cpu_clear(smp_processor_id(), nohz_cpu_mask);
+ }
+
+-void raw_safe_halt(void)
++void xen_safe_halt(void)
+ {
+ stop_hz_timer();
+ /* Blocking includes an implicit local_irq_enable(). */
+ HYPERVISOR_block();
+ start_hz_timer();
+ }
+-EXPORT_SYMBOL(raw_safe_halt);
++EXPORT_SYMBOL(xen_safe_halt);
+
+-void halt(void)
++void xen_halt(void)
+ {
+ if (irqs_disabled())
+ HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
+ }
+-EXPORT_SYMBOL(halt);
++EXPORT_SYMBOL(xen_halt);
+
+ /* No locking required. Interrupts are disabled on all CPUs. */
+ void time_resume(void)
+@@ -983,7 +982,7 @@ int local_setup_timer(unsigned int cpu)
+ irq = bind_virq_to_irqhandler(VIRQ_TIMER,
+ cpu,
+ timer_interrupt,
+- SA_INTERRUPT,
++ IRQF_DISABLED,
+ timer_name[cpu],
+ NULL);
+ if (irq < 0)
+Index: 10.3-2007-11-26/arch/i386/kernel/traps-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/traps-xen.c 2007-10-22 13:58:46.000000000 +0200
++++ 10.3-2007-11-26/arch/i386/kernel/traps-xen.c 2007-10-22 13:58:57.000000000 +0200
+@@ -52,7 +52,7 @@
+ #include <asm/unwind.h>
+ #include <asm/smp.h>
+ #include <asm/arch_hooks.h>
+-#include <asm/kdebug.h>
++#include <linux/kdebug.h>
+ #include <asm/stacktrace.h>
+
+ #include <linux/module.h>
+@@ -101,20 +101,6 @@ asmlinkage void machine_check(void);
+
+ int kstack_depth_to_print = 24;
+ static unsigned int code_bytes = 64;
+-ATOMIC_NOTIFIER_HEAD(i386die_chain);
+-
+-int register_die_notifier(struct notifier_block *nb)
+-{
+- vmalloc_sync_all();
+- return atomic_notifier_chain_register(&i386die_chain, nb);
+-}
+-EXPORT_SYMBOL(register_die_notifier); /* used modular by kdb */
+-
+-int unregister_die_notifier(struct notifier_block *nb)
+-{
+- return atomic_notifier_chain_unregister(&i386die_chain, nb);
+-}
+-EXPORT_SYMBOL(unregister_die_notifier); /* used modular by kdb */
+
+ static inline int valid_stack_ptr(struct thread_info *tinfo, void *p)
+ {
+@@ -325,7 +311,7 @@ void show_registers(struct pt_regs *regs
+ regs->xds & 0xffff, regs->xes & 0xffff, regs->xfs & 0xffff, gs, ss);
+ printk(KERN_EMERG "Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)",
+ TASK_COMM_LEN, current->comm, current->pid,
+- current_thread_info(), current, current->thread_info);
++ current_thread_info(), current, task_thread_info(current));
+ /*
+ * When in-kernel, we also print out the stack and code at the
+ * time of the fault..
+@@ -482,8 +468,6 @@ static void __kprobes do_trap(int trapnr
+ siginfo_t *info)
+ {
+ struct task_struct *tsk = current;
+- tsk->thread.error_code = error_code;
+- tsk->thread.trap_no = trapnr;
+
+ if (regs->eflags & VM_MASK) {
+ if (vm86)
+@@ -495,6 +479,18 @@ static void __kprobes do_trap(int trapnr
+ goto kernel_trap;
+
+ trap_signal: {
++ /*
++ * We want error_code and trap_no set for userspace faults and
++ * kernelspace faults which result in die(), but not
++ * kernelspace faults which are fixed up. die() gives the
++ * process no chance to handle the signal and notice the
++ * kernel fault information, so that won't result in polluting
++ * the information about previously queued, but not yet
++ * delivered, faults. See also do_general_protection below.
++ */
++ tsk->thread.error_code = error_code;
++ tsk->thread.trap_no = trapnr;
++
+ if (info)
+ force_sig_info(signr, info, tsk);
+ else
+@@ -503,8 +499,11 @@ static void __kprobes do_trap(int trapnr
+ }
+
+ kernel_trap: {
+- if (!fixup_exception(regs))
++ if (!fixup_exception(regs)) {
++ tsk->thread.error_code = error_code;
++ tsk->thread.trap_no = trapnr;
+ die(str, regs, error_code);
++ }
+ return;
+ }
+
+@@ -578,9 +577,6 @@ DO_ERROR_INFO(32, SIGSEGV, "iret excepti
+ fastcall void __kprobes do_general_protection(struct pt_regs * regs,
+ long error_code)
+ {
+- current->thread.error_code = error_code;
+- current->thread.trap_no = 13;
+-
+ if (regs->eflags & VM_MASK)
+ goto gp_in_vm86;
+
+@@ -599,6 +595,8 @@ gp_in_vm86:
+
+ gp_in_kernel:
+ if (!fixup_exception(regs)) {
++ current->thread.error_code = error_code;
++ current->thread.trap_no = 13;
+ if (notify_die(DIE_GPF, "general protection fault", regs,
+ error_code, 13, SIGSEGV) == NOTIFY_STOP)
+ return;
+@@ -987,9 +985,7 @@ fastcall void do_spurious_interrupt_bug(
+ fastcall unsigned long patch_espfix_desc(unsigned long uesp,
+ unsigned long kesp)
+ {
+- int cpu = smp_processor_id();
+- struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
+- struct desc_struct *gdt = (struct desc_struct *)cpu_gdt_descr->address;
++ struct desc_struct *gdt = __get_cpu_var(gdt_page).gdt;
+ unsigned long base = (kesp - uesp) & -THREAD_SIZE;
+ unsigned long new_kesp = kesp - base;
+ unsigned long lim_pages = (new_kesp | (THREAD_SIZE - 1)) >> PAGE_SHIFT;
+Index: 10.3-2007-11-26/arch/i386/mm/fault-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/mm/fault-xen.c 2007-10-22 13:58:46.000000000 +0200
++++ 10.3-2007-11-26/arch/i386/mm/fault-xen.c 2007-10-22 13:58:57.000000000 +0200
+@@ -14,19 +14,20 @@
+ #include <linux/mman.h>
+ #include <linux/mm.h>
+ #include <linux/smp.h>
+-#include <linux/smp_lock.h>
+ #include <linux/interrupt.h>
+ #include <linux/init.h>
+ #include <linux/tty.h>
+ #include <linux/vt_kern.h> /* For unblank_screen() */
+ #include <linux/highmem.h>
++#include <linux/bootmem.h> /* for max_low_pfn */
++#include <linux/vmalloc.h>
+ #include <linux/module.h>
+ #include <linux/kprobes.h>
+ #include <linux/uaccess.h>
++#include <linux/kdebug.h>
+
+ #include <asm/system.h>
+ #include <asm/desc.h>
+-#include <asm/kdebug.h>
+ #include <asm/segment.h>
+
+ extern void die(const char *,struct pt_regs *,long);
+@@ -259,25 +260,20 @@ static void dump_fault_path(unsigned lon
+ unsigned long page;
+
+ page = read_cr3();
+- page = ((unsigned long *) __va(page))[address >> 22];
+- if (oops_may_print())
+- printk(KERN_ALERT "*pde = ma %08lx pa %08lx\n", page,
+- machine_to_phys(page));
++ page = ((unsigned long *) __va(page))[address >> PGDIR_SHIFT];
++ printk(KERN_ALERT "*pde = ma %08lx pa %08lx\n", page,
++ machine_to_phys(page));
+ /*
+ * We must not directly access the pte in the highpte
+ * case if the page table is located in highmem.
+ * And lets rather not kmap-atomic the pte, just in case
+ * it's allocated already.
+ */
+-#ifdef CONFIG_HIGHPTE
+- if ((page >> PAGE_SHIFT) >= highstart_pfn)
+- return;
+-#endif
+- if ((page & 1) && oops_may_print()) {
+- page &= PAGE_MASK;
+- address &= 0x003ff000;
+- page = machine_to_phys(page);
+- page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT];
++ if ((machine_to_phys(page) >> PAGE_SHIFT) < max_low_pfn
++ && (page & _PAGE_PRESENT)) {
++ page = machine_to_phys(page & PAGE_MASK);
++ page = ((unsigned long *) __va(page))[(address >> PAGE_SHIFT)
++ & (PTRS_PER_PTE - 1)];
+ printk(KERN_ALERT "*pte = ma %08lx pa %08lx\n", page,
+ machine_to_phys(page));
+ }
+@@ -581,6 +577,11 @@ bad_area:
+ bad_area_nosemaphore:
+ /* User mode accesses just cause a SIGSEGV */
+ if (error_code & 4) {
++ /*
++ * It's possible to have interrupts off here.
++ */
++ local_irq_enable();
++
+ /*
+ * Valid to do another page fault here because this one came
+ * from user space.
+@@ -633,7 +634,7 @@ no_context:
+ bust_spinlocks(1);
+
+ if (oops_may_print()) {
+- #ifdef CONFIG_X86_PAE
++#ifdef CONFIG_X86_PAE
+ if (error_code & 16) {
+ pte_t *pte = lookup_address(address);
+
+@@ -642,7 +643,7 @@ no_context:
+ "NX-protected page - exploit attempt? "
+ "(uid: %d)\n", current->uid);
+ }
+- #endif
++#endif
+ if (address < PAGE_SIZE)
+ printk(KERN_ALERT "BUG: unable to handle kernel NULL "
+ "pointer dereference");
+@@ -652,8 +653,8 @@ no_context:
+ printk(" at virtual address %08lx\n",address);
+ printk(KERN_ALERT " printing eip:\n");
+ printk("%08lx\n", regs->eip);
++ dump_fault_path(address);
+ }
+- dump_fault_path(address);
+ tsk->thread.cr2 = address;
+ tsk->thread.trap_no = 14;
+ tsk->thread.error_code = error_code;
+@@ -694,7 +695,6 @@ do_sigbus:
+ force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk);
+ }
+
+-#if !HAVE_SHARED_KERNEL_PMD
+ void vmalloc_sync_all(void)
+ {
+ /*
+@@ -710,6 +710,9 @@ void vmalloc_sync_all(void)
+ static unsigned long start = TASK_SIZE;
+ unsigned long address;
+
++ if (SHARED_KERNEL_PMD)
++ return;
++
+ BUILD_BUG_ON(TASK_SIZE & ~PGDIR_MASK);
+ for (address = start;
+ address >= TASK_SIZE && address < hypervisor_virt_start;
+@@ -742,4 +745,3 @@ void vmalloc_sync_all(void)
+ start = address + (1UL << PMD_SHIFT);
+ }
+ }
+-#endif
+Index: 10.3-2007-11-26/arch/i386/mm/highmem-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/mm/highmem-xen.c 2007-10-22 13:58:46.000000000 +0200
++++ 10.3-2007-11-26/arch/i386/mm/highmem-xen.c 2007-10-22 13:58:57.000000000 +0200
+@@ -26,7 +26,7 @@ void kunmap(struct page *page)
+ * However when holding an atomic kmap is is not legal to sleep, so atomic
+ * kmaps are appropriate for short, tight code paths only.
+ */
+-static void *__kmap_atomic(struct page *page, enum km_type type, pgprot_t prot)
++void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
+ {
+ enum fixed_addresses idx;
+ unsigned long vaddr;
+@@ -49,15 +49,7 @@ static void *__kmap_atomic(struct page *
+
+ void *kmap_atomic(struct page *page, enum km_type type)
+ {
+- return __kmap_atomic(page, type, kmap_prot);
+-}
+-
+-/* Same as kmap_atomic but with PAGE_KERNEL_RO page protection. */
+-void *kmap_atomic_pte(struct page *page, enum km_type type)
+-{
+- return __kmap_atomic(page, type,
+- test_bit(PG_pinned, &page->flags)
+- ? PAGE_KERNEL_RO : kmap_prot);
++ return kmap_atomic_prot(page, type, kmap_prot);
+ }
+
+ void kunmap_atomic(void *kvaddr, enum km_type type)
+@@ -80,6 +72,7 @@ void kunmap_atomic(void *kvaddr, enum km
+ #endif
+ }
+
++ arch_flush_lazy_mmu_mode();
+ pagefault_enable();
+ }
+
+@@ -117,6 +110,5 @@ struct page *kmap_atomic_to_page(void *p
+ EXPORT_SYMBOL(kmap);
+ EXPORT_SYMBOL(kunmap);
+ EXPORT_SYMBOL(kmap_atomic);
+-EXPORT_SYMBOL(kmap_atomic_pte);
+ EXPORT_SYMBOL(kunmap_atomic);
+ EXPORT_SYMBOL(kmap_atomic_to_page);
+Index: 10.3-2007-11-26/arch/i386/mm/init-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/mm/init-xen.c 2007-10-22 13:58:46.000000000 +0200
++++ 10.3-2007-11-26/arch/i386/mm/init-xen.c 2007-10-22 13:58:57.000000000 +0200
+@@ -22,6 +22,7 @@
+ #include <linux/init.h>
+ #include <linux/highmem.h>
+ #include <linux/pagemap.h>
++#include <linux/pfn.h>
+ #include <linux/poison.h>
+ #include <linux/bootmem.h>
+ #include <linux/slab.h>
+@@ -67,17 +68,19 @@ static pmd_t * __init one_md_table_init(
+ pmd_t *pmd_table;
+
+ #ifdef CONFIG_X86_PAE
+- pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
+- paravirt_alloc_pd(__pa(pmd_table) >> PAGE_SHIFT);
+- make_lowmem_page_readonly(pmd_table, XENFEAT_writable_page_tables);
+- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
+- pud = pud_offset(pgd, 0);
+- if (pmd_table != pmd_offset(pud, 0))
+- BUG();
+-#else
++ if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
++ pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
++
++ paravirt_alloc_pd(__pa(pmd_table) >> PAGE_SHIFT);
++ make_lowmem_page_readonly(pmd_table, XENFEAT_writable_page_tables);
++ set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
++ pud = pud_offset(pgd, 0);
++ if (pmd_table != pmd_offset(pud, 0))
++ BUG();
++ }
++#endif
+ pud = pud_offset(pgd, 0);
+ pmd_table = pmd_offset(pud, 0);
+-#endif
+
+ return pmd_table;
+ }
+@@ -88,16 +91,18 @@ static pmd_t * __init one_md_table_init(
+ */
+ static pte_t * __init one_page_table_init(pmd_t *pmd)
+ {
++#if CONFIG_XEN_COMPAT <= 0x030002
+ if (pmd_none(*pmd)) {
++#else
++ if (!(pmd_val(*pmd) & _PAGE_PRESENT)) {
++#endif
+ pte_t *page_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
++
+ paravirt_alloc_pt(__pa(page_table) >> PAGE_SHIFT);
+ make_lowmem_page_readonly(page_table,
+ XENFEAT_writable_page_tables);
+ set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
+- if (page_table != pte_offset_kernel(pmd, 0))
+- BUG();
+-
+- return page_table;
++ BUG_ON(page_table != pte_offset_kernel(pmd, 0));
+ }
+
+ return pte_offset_kernel(pmd, 0);
+@@ -117,7 +122,6 @@ static pte_t * __init one_page_table_ini
+ static void __init page_table_range_init (unsigned long start, unsigned long end, pgd_t *pgd_base)
+ {
+ pgd_t *pgd;
+- pud_t *pud;
+ pmd_t *pmd;
+ int pgd_idx, pmd_idx;
+ unsigned long vaddr;
+@@ -128,12 +132,10 @@ static void __init page_table_range_init
+ pgd = pgd_base + pgd_idx;
+
+ for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
+- if (pgd_none(*pgd))
+- one_md_table_init(pgd);
+- pud = pud_offset(pgd, vaddr);
+- pmd = pmd_offset(pud, vaddr);
++ pmd = one_md_table_init(pgd);
++ pmd = pmd + pmd_index(vaddr);
+ for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); pmd++, pmd_idx++) {
+- if (vaddr < hypervisor_virt_start && pmd_none(*pmd))
++ if (vaddr < hypervisor_virt_start)
+ one_page_table_init(pmd);
+
+ vaddr += PMD_SIZE;
+@@ -196,24 +198,25 @@ static void __init kernel_physical_mappi
+ /* Map with big pages if possible, otherwise create normal page tables. */
+ if (cpu_has_pse) {
+ unsigned int address2 = (pfn + PTRS_PER_PTE - 1) * PAGE_SIZE + PAGE_OFFSET + PAGE_SIZE-1;
+-
+ if (is_kernel_text(address) || is_kernel_text(address2))
+ set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE_EXEC));
+ else
+ set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE));
++
+ pfn += PTRS_PER_PTE;
+ } else {
+ pte = one_page_table_init(pmd);
+
+- pte += pte_ofs;
+- for (; pte_ofs < PTRS_PER_PTE && pfn < max_low_pfn; pte++, pfn++, pte_ofs++) {
+- /* XEN: Only map initial RAM allocation. */
+- if ((pfn >= max_ram_pfn) || pte_present(*pte))
+- continue;
+- if (is_kernel_text(address))
+- set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
+- else
+- set_pte(pte, pfn_pte(pfn, PAGE_KERNEL));
++ for (pte += pte_ofs;
++ pte_ofs < PTRS_PER_PTE && pfn < max_low_pfn;
++ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
++ /* XEN: Only map initial RAM allocation. */
++ if ((pfn >= max_ram_pfn) || pte_present(*pte))
++ continue;
++ if (is_kernel_text(address))
++ set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
++ else
++ set_pte(pte, pfn_pte(pfn, PAGE_KERNEL));
+ }
+ pte_ofs = 0;
+ }
+@@ -383,18 +386,46 @@ extern void __init remap_numa_kva(void);
+
+ pgd_t *swapper_pg_dir;
+
++static void __init xen_pagetable_setup_start(pgd_t *base)
++{
++ swapper_pg_dir = base;
++ init_mm.pgd = base;
++}
++
++static void __init xen_pagetable_setup_done(pgd_t *base)
++{
++}
++
++/*
++ * Build a proper pagetable for the kernel mappings. Up until this
++ * point, we've been running on some set of pagetables constructed by
++ * the boot process.
++ *
++ * If we're booting on native hardware, this will be a pagetable
++ * constructed in arch/i386/kernel/head.S, and not running in PAE mode
++ * (even if we'll end up running in PAE). The root of the pagetable
++ * will be swapper_pg_dir.
++ *
++ * If we're booting paravirtualized under a hypervisor, then there are
++ * more options: we may already be running PAE, and the pagetable may
++ * or may not be based in swapper_pg_dir. In any case,
++ * paravirt_pagetable_setup_start() will set up swapper_pg_dir
++ * appropriately for the rest of the initialization to work.
++ *
++ * In general, pagetable_init() assumes that the pagetable may already
++ * be partially populated, and so it avoids stomping on any existing
++ * mappings.
++ */
+ static void __init pagetable_init (void)
+ {
+- unsigned long vaddr;
++ unsigned long vaddr, end;
+ pgd_t *pgd_base = (pgd_t *)xen_start_info->pt_base;
+
+- swapper_pg_dir = pgd_base;
+- init_mm.pgd = pgd_base;
++ xen_pagetable_setup_start(pgd_base);
+
+ /* Enable PSE if available */
+- if (cpu_has_pse) {
++ if (cpu_has_pse)
+ set_in_cr4(X86_CR4_PSE);
+- }
+
+ /* Enable PGE if available */
+ if (cpu_has_pge) {
+@@ -411,9 +442,12 @@ static void __init pagetable_init (void)
+ * created - mappings will be set by set_fixmap():
+ */
+ vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
+- page_table_range_init(vaddr, hypervisor_virt_start, pgd_base);
++ end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
++ page_table_range_init(vaddr, end, pgd_base);
+
+ permanent_kmaps_init(pgd_base);
++
++ xen_pagetable_setup_done(pgd_base);
+ }
+
+ #if defined(CONFIG_SOFTWARE_SUSPEND) || defined(CONFIG_ACPI_SLEEP)
+@@ -764,34 +798,29 @@ int remove_memory(u64 start, u64 size)
+ EXPORT_SYMBOL_GPL(remove_memory);
+ #endif
+
+-struct kmem_cache *pgd_cache;
+ struct kmem_cache *pmd_cache;
+
+ void __init pgtable_cache_init(void)
+ {
++ size_t pgd_size = PTRS_PER_PGD*sizeof(pgd_t);
++
+ if (PTRS_PER_PMD > 1) {
+ pmd_cache = kmem_cache_create("pmd",
+ PTRS_PER_PMD*sizeof(pmd_t),
+ PTRS_PER_PMD*sizeof(pmd_t),
+- 0,
++ SLAB_PANIC,
+ pmd_ctor,
+ NULL);
+- if (!pmd_cache)
+- panic("pgtable_cache_init(): cannot create pmd cache");
++ if (!SHARED_KERNEL_PMD) {
++ /* If we're in PAE mode and have a non-shared
++ kernel pmd, then the pgd size must be a
++ page size. This is because the pgd_list
++ links through the page structure, so there
++ can only be one pgd per page for this to
++ work. */
++ pgd_size = PAGE_SIZE;
++ }
+ }
+- pgd_cache = kmem_cache_create("pgd",
+-#ifndef CONFIG_XEN
+- PTRS_PER_PGD*sizeof(pgd_t),
+- PTRS_PER_PGD*sizeof(pgd_t),
+-#else
+- PAGE_SIZE,
+- PAGE_SIZE,
+-#endif
+- 0,
+- pgd_ctor,
+- PTRS_PER_PMD == 1 ? pgd_dtor : NULL);
+- if (!pgd_cache)
+- panic("pgtable_cache_init(): Cannot create pgd cache");
+ }
+
+ /*
+@@ -825,13 +854,26 @@ static int noinline do_test_wp_bit(void)
+
+ void mark_rodata_ro(void)
+ {
+- unsigned long addr = (unsigned long)__start_rodata;
+-
+- for (; addr < (unsigned long)__end_rodata; addr += PAGE_SIZE)
+- change_page_attr(virt_to_page(addr), 1, PAGE_KERNEL_RO);
++ unsigned long start = PFN_ALIGN(_text);
++ unsigned long size = PFN_ALIGN(_etext) - start;
+
+- printk("Write protecting the kernel read-only data: %uk\n",
+- (__end_rodata - __start_rodata) >> 10);
++#ifndef CONFIG_KPROBES
++#ifdef CONFIG_HOTPLUG_CPU
++ /* It must still be possible to apply SMP alternatives. */
++ if (num_possible_cpus() <= 1)
++#endif
++ {
++ change_page_attr(virt_to_page(start),
++ size >> PAGE_SHIFT, PAGE_KERNEL_RX);
++ printk("Write protecting the kernel text: %luk\n", size >> 10);
++ }
++#endif
++ start += size;
++ size = (unsigned long)__end_rodata - start;
++ change_page_attr(virt_to_page(start),
++ size >> PAGE_SHIFT, PAGE_KERNEL_RO);
++ printk("Write protecting the kernel read-only data: %luk\n",
++ size >> 10);
+
+ /*
+ * change_page_attr() requires a global_flush_tlb() call after it.
+@@ -854,7 +896,7 @@ void free_init_pages(char *what, unsigne
+ free_page(addr);
+ totalram_pages++;
+ }
+- printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
++ printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
+ }
+
+ void free_initmem(void)
+Index: 10.3-2007-11-26/arch/i386/mm/ioremap-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/mm/ioremap-xen.c 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/arch/i386/mm/ioremap-xen.c 2007-10-22 13:58:57.000000000 +0200
+@@ -13,6 +13,7 @@
+ #include <linux/slab.h>
+ #include <linux/module.h>
+ #include <linux/io.h>
++#include <linux/sched.h>
+ #include <asm/fixmap.h>
+ #include <asm/cacheflush.h>
+ #include <asm/tlbflush.h>
+Index: 10.3-2007-11-26/arch/i386/mm/pgtable-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/mm/pgtable-xen.c 2007-10-22 13:58:46.000000000 +0200
++++ 10.3-2007-11-26/arch/i386/mm/pgtable-xen.c 2007-10-22 13:58:57.000000000 +0200
+@@ -13,6 +13,7 @@
+ #include <linux/pagemap.h>
+ #include <linux/spinlock.h>
+ #include <linux/module.h>
++#include <linux/quicklist.h>
+
+ #include <asm/system.h>
+ #include <asm/pgtable.h>
+@@ -292,8 +293,6 @@ void pmd_ctor(void *pmd, struct kmem_cac
+ * against pageattr.c; it is the unique case in which a valid change
+ * of kernel pagetables can't be lazily synchronized by vmalloc faults.
+ * vmalloc faults work because attached pagetables are never freed.
+- * The locking scheme was chosen on the basis of manfred's
+- * recommendations and having no core impact whatsoever.
+ * -- wli
+ */
+ DEFINE_SPINLOCK(pgd_lock);
+@@ -319,37 +318,60 @@ static inline void pgd_list_del(pgd_t *p
+ set_page_private(next, (unsigned long)pprev);
+ }
+
+-void pgd_ctor(void *pgd, struct kmem_cache *cache, unsigned long unused)
++
++
++#if (PTRS_PER_PMD == 1)
++/* Non-PAE pgd constructor */
++void pgd_ctor(void *pgd)
+ {
+ unsigned long flags;
+
+- if (PTRS_PER_PMD > 1) {
+- if (HAVE_SHARED_KERNEL_PMD)
+- clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD,
+- swapper_pg_dir + USER_PTRS_PER_PGD,
+- KERNEL_PGD_PTRS);
+- } else {
+- spin_lock_irqsave(&pgd_lock, flags);
++ memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
++
++ spin_lock_irqsave(&pgd_lock, flags);
++
++ /* must happen under lock */
++ clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD,
++ swapper_pg_dir + USER_PTRS_PER_PGD,
++ KERNEL_PGD_PTRS);
++
++ paravirt_alloc_pd_clone(__pa(pgd) >> PAGE_SHIFT,
++ __pa(swapper_pg_dir) >> PAGE_SHIFT,
++ USER_PTRS_PER_PGD,
++ KERNEL_PGD_PTRS);
++ pgd_list_add(pgd);
++ spin_unlock_irqrestore(&pgd_lock, flags);
++}
++#else /* PTRS_PER_PMD > 1 */
++/* PAE pgd constructor */
++void pgd_ctor(void *pgd)
++{
++ /* PAE, kernel PMD may be shared */
++
++ if (SHARED_KERNEL_PMD) {
+ clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD,
+ swapper_pg_dir + USER_PTRS_PER_PGD,
+ KERNEL_PGD_PTRS);
+- memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
+-
+- /* must happen under lock */
+- paravirt_alloc_pd_clone(__pa(pgd) >> PAGE_SHIFT,
+- __pa(swapper_pg_dir) >> PAGE_SHIFT,
+- USER_PTRS_PER_PGD, PTRS_PER_PGD - USER_PTRS_PER_PGD);
++#ifndef CONFIG_XEN
++ } else {
++ unsigned long flags;
+
++ memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
++ spin_lock_irqsave(&pgd_lock, flags);
+ pgd_list_add(pgd);
+ spin_unlock_irqrestore(&pgd_lock, flags);
++#endif
+ }
+ }
++#endif /* PTRS_PER_PMD */
+
+-/* never called when PTRS_PER_PMD > 1 */
+-void pgd_dtor(void *pgd, struct kmem_cache *cache, unsigned long unused)
++void pgd_dtor(void *pgd)
+ {
+ unsigned long flags; /* can be called from interrupt context */
+
++ if (SHARED_KERNEL_PMD)
++ return;
++
+ paravirt_release_pd(__pa(pgd) >> PAGE_SHIFT);
+ spin_lock_irqsave(&pgd_lock, flags);
+ pgd_list_del(pgd);
+@@ -358,11 +380,46 @@ void pgd_dtor(void *pgd, struct kmem_cac
+ pgd_test_and_unpin(pgd);
+ }
+
++#define UNSHARED_PTRS_PER_PGD \
++ (SHARED_KERNEL_PMD ? USER_PTRS_PER_PGD : PTRS_PER_PGD)
++
++/* If we allocate a pmd for part of the kernel address space, then
++ make sure its initialized with the appropriate kernel mappings.
++ Otherwise use a cached zeroed pmd. */
++static pmd_t *pmd_cache_alloc(int idx)
++{
++ pmd_t *pmd;
++
++ if (idx >= USER_PTRS_PER_PGD) {
++ pmd = (pmd_t *)__get_free_page(GFP_KERNEL);
++
++#ifndef CONFIG_XEN
++ if (pmd)
++ memcpy(pmd,
++ (void *)pgd_page_vaddr(swapper_pg_dir[idx]),
++ sizeof(pmd_t) * PTRS_PER_PMD);
++#endif
++ } else
++ pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
++
++ return pmd;
++}
++
++static void pmd_cache_free(pmd_t *pmd, int idx)
++{
++ if (idx >= USER_PTRS_PER_PGD) {
++ make_lowmem_page_writable(pmd, XENFEAT_writable_page_tables);
++ memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
++ free_page((unsigned long)pmd);
++ } else
++ kmem_cache_free(pmd_cache, pmd);
++}
++
+ pgd_t *pgd_alloc(struct mm_struct *mm)
+ {
+ int i;
+- pgd_t *pgd = kmem_cache_alloc(pgd_cache, GFP_KERNEL);
+- pmd_t **pmd;
++ pgd_t *pgd = quicklist_alloc(0, GFP_KERNEL, pgd_ctor);
++ pmd_t **pmds = NULL;
+ unsigned long flags;
+
+ pgd_test_and_unpin(pgd);
+@@ -370,37 +427,40 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
+ if (PTRS_PER_PMD == 1 || !pgd)
+ return pgd;
+
+- if (HAVE_SHARED_KERNEL_PMD) {
+- for (i = 0; i < USER_PTRS_PER_PGD; ++i) {
+- pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
+- if (!pmd)
+- goto out_oom;
+- paravirt_alloc_pd(__pa(pmd) >> PAGE_SHIFT);
+- set_pgd(&pgd[i], __pgd(1 + __pa(pmd)));
++#ifdef CONFIG_XEN
++ if (!SHARED_KERNEL_PMD) {
++ /*
++ * We can race save/restore (if we sleep during a GFP_KERNEL memory
++ * allocation). We therefore store virtual addresses of pmds as they
++ * do not change across save/restore, and poke the machine addresses
++ * into the pgdir under the pgd_lock.
++ */
++ pmds = kmalloc(PTRS_PER_PGD * sizeof(pmd_t *), GFP_KERNEL);
++ if (!pmds) {
++ quicklist_free(0, pgd_dtor, pgd);
++ return NULL;
+ }
+- return pgd;
+- }
+-
+- /*
+- * We can race save/restore (if we sleep during a GFP_KERNEL memory
+- * allocation). We therefore store virtual addresses of pmds as they
+- * do not change across save/restore, and poke the machine addresses
+- * into the pgdir under the pgd_lock.
+- */
+- pmd = kmalloc(PTRS_PER_PGD * sizeof(pmd_t *), GFP_KERNEL);
+- if (!pmd) {
+- kmem_cache_free(pgd_cache, pgd);
+- return NULL;
+ }
++#endif
+
+ /* Allocate pmds, remember virtual addresses. */
+- for (i = 0; i < PTRS_PER_PGD; ++i) {
+- pmd[i] = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
+- if (!pmd[i])
++ for (i = 0; i < UNSHARED_PTRS_PER_PGD; ++i) {
++ pmd_t *pmd = pmd_cache_alloc(i);
++
++ if (!pmd)
+ goto out_oom;
++
+ paravirt_alloc_pd(__pa(pmd) >> PAGE_SHIFT);
++ if (pmds)
++ pmds[i] = pmd;
++ else
++ set_pgd(&pgd[i], __pgd(1 + __pa(pmd)));
+ }
+
++#ifdef CONFIG_XEN
++ if (SHARED_KERNEL_PMD)
++ return pgd;
++
+ spin_lock_irqsave(&pgd_lock, flags);
+
+ /* Protect against save/restore: move below 4GB under pgd_lock. */
+@@ -419,40 +479,41 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
+ pgd_t *kpgd = pgd_offset_k(v);
+ pud_t *kpud = pud_offset(kpgd, v);
+ pmd_t *kpmd = pmd_offset(kpud, v);
+- memcpy(pmd[i], kpmd, PAGE_SIZE);
++ memcpy(pmds[i], kpmd, PAGE_SIZE);
+ make_lowmem_page_readonly(
+- pmd[i], XENFEAT_writable_page_tables);
++ pmds[i], XENFEAT_writable_page_tables);
+ }
+
+ /* It is safe to poke machine addresses of pmds under the pmd_lock. */
+ for (i = 0; i < PTRS_PER_PGD; i++)
+- set_pgd(&pgd[i], __pgd(1 + __pa(pmd[i])));
++ set_pgd(&pgd[i], __pgd(1 + __pa(pmds[i])));
+
+ /* Ensure this pgd gets picked up and pinned on save/restore. */
+ pgd_list_add(pgd);
+
+ spin_unlock_irqrestore(&pgd_lock, flags);
+
+- kfree(pmd);
++ kfree(pmds);
++#endif
+
+ return pgd;
+
+ out_oom:
+- if (HAVE_SHARED_KERNEL_PMD) {
++ if (!pmds) {
+ for (i--; i >= 0; i--) {
+ pgd_t pgdent = pgd[i];
+ void* pmd = (void *)__va(pgd_val(pgdent)-1);
+ paravirt_release_pd(__pa(pmd) >> PAGE_SHIFT);
+- kmem_cache_free(pmd_cache, pmd);
++ pmd_cache_free(pmd, i);
+ }
+ } else {
+ for (i--; i >= 0; i--) {
+- paravirt_release_pd(__pa(pmd[i]) >> PAGE_SHIFT);
+- kmem_cache_free(pmd_cache, pmd[i]);
++ paravirt_release_pd(__pa(pmds[i]) >> PAGE_SHIFT);
++ pmd_cache_free(pmds[i], i);
+ }
+- kfree(pmd);
++ kfree(pmds);
+ }
+- kmem_cache_free(pgd_cache, pgd);
++ quicklist_free(0, pgd_dtor, pgd);
+ return NULL;
+ }
+
+@@ -472,35 +533,31 @@ void pgd_free(pgd_t *pgd)
+
+ /* in the PAE case user pgd entries are overwritten before usage */
+ if (PTRS_PER_PMD > 1) {
+- for (i = 0; i < USER_PTRS_PER_PGD; ++i) {
+- pgd_t pgdent = pgd[i];
+- void* pmd = (void *)__va(pgd_val(pgdent)-1);
+- paravirt_release_pd(__pa(pmd) >> PAGE_SHIFT);
+- kmem_cache_free(pmd_cache, pmd);
+- }
+-
+- if (!HAVE_SHARED_KERNEL_PMD) {
++ if (!SHARED_KERNEL_PMD) {
+ unsigned long flags;
+ spin_lock_irqsave(&pgd_lock, flags);
+ pgd_list_del(pgd);
+ spin_unlock_irqrestore(&pgd_lock, flags);
++ }
+
+- for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
+- pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1);
+- make_lowmem_page_writable(
+- pmd, XENFEAT_writable_page_tables);
+- memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
+- kmem_cache_free(pmd_cache, pmd);
+- }
+-
+- if (!xen_feature(XENFEAT_pae_pgdir_above_4gb))
+- xen_destroy_contiguous_region(
+- (unsigned long)pgd, 0);
++ for (i = 0; i < UNSHARED_PTRS_PER_PGD; ++i) {
++ pgd_t pgdent = pgd[i];
++ void* pmd = (void *)__va(pgd_val(pgdent)-1);
++ paravirt_release_pd(__pa(pmd) >> PAGE_SHIFT);
++ pmd_cache_free(pmd, i);
+ }
++
++ if (!xen_feature(XENFEAT_pae_pgdir_above_4gb))
++ xen_destroy_contiguous_region((unsigned long)pgd, 0);
+ }
+
+ /* in the non-PAE case, free_pgtables() clears user pgd entries */
+- kmem_cache_free(pgd_cache, pgd);
++ quicklist_free(0, pgd_dtor, pgd);
++}
++
++void check_pgt_cache(void)
++{
++ quicklist_trim(0, pgd_dtor, 25, 16);
+ }
+
+ void make_lowmem_page_readonly(void *va, unsigned int feature)
+@@ -719,13 +776,13 @@ void mm_pin_all(void)
+ spin_unlock_irqrestore(&pgd_lock, flags);
+ }
+
+-void _arch_dup_mmap(struct mm_struct *mm)
++void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
+ {
+ if (!test_bit(PG_pinned, &virt_to_page(mm->pgd)->flags))
+ mm_pin(mm);
+ }
+
+-void _arch_exit_mmap(struct mm_struct *mm)
++void arch_exit_mmap(struct mm_struct *mm)
+ {
+ struct task_struct *tsk = current;
+
+Index: 10.3-2007-11-26/arch/x86_64/Kconfig
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/Kconfig 2007-12-06 17:27:30.000000000 +0100
++++ 10.3-2007-11-26/arch/x86_64/Kconfig 2007-10-22 13:58:57.000000000 +0200
+@@ -599,7 +599,7 @@ config CRASH_DUMP
+
+ config RELOCATABLE
+ bool "Build a relocatable kernel(EXPERIMENTAL)"
+- depends on EXPERIMENTAL
++ depends on EXPERIMENTAL && !X86_64_XEN
+ help
+ Builds a relocatable kernel. This enables loading and running
+ a kernel binary from a different physical address than it has
+@@ -736,7 +736,7 @@ menu "Bus options (PCI etc.)"
+
+ config PCI
+ bool "PCI support"
+- select ARCH_SUPPORTS_MSI if (X86_LOCAL_APIC && X86_IO_APIC)
++ select ARCH_SUPPORTS_MSI if (X86_LOCAL_APIC && X86_IO_APIC && !X86_64_XEN)
+
+ # x86-64 doesn't support PCI BIOS access from long mode so always go direct.
+ config PCI_DIRECT
+Index: 10.3-2007-11-26/arch/x86_64/ia32/ia32entry-xen.S
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/ia32/ia32entry-xen.S 2007-10-22 13:58:46.000000000 +0200
++++ 10.3-2007-11-26/arch/x86_64/ia32/ia32entry-xen.S 2007-10-22 13:58:57.000000000 +0200
+@@ -508,11 +508,7 @@ ia32_sys_call_table:
+ .quad sys_symlink
+ .quad sys_lstat
+ .quad sys_readlink /* 85 */
+-#ifdef CONFIG_IA32_AOUT
+ .quad sys_uselib
+-#else
+- .quad quiet_ni_syscall
+-#endif
+ .quad sys_swapon
+ .quad sys_reboot
+ .quad compat_sys_old_readdir
+@@ -651,7 +647,7 @@ ia32_sys_call_table:
+ .quad quiet_ni_syscall /* tux */
+ .quad quiet_ni_syscall /* security */
+ .quad sys_gettid
+- .quad sys_readahead /* 225 */
++ .quad sys32_readahead /* 225 */
+ .quad sys_setxattr
+ .quad sys_lsetxattr
+ .quad sys_fsetxattr
+@@ -676,7 +672,7 @@ ia32_sys_call_table:
+ .quad compat_sys_io_getevents
+ .quad compat_sys_io_submit
+ .quad sys_io_cancel
+- .quad sys_fadvise64 /* 250 */
++ .quad sys32_fadvise64 /* 250 */
+ .quad quiet_ni_syscall /* free_huge_pages */
+ .quad sys_exit_group
+ .quad sys32_lookup_dcookie
+@@ -740,10 +736,14 @@ ia32_sys_call_table:
+ .quad compat_sys_set_robust_list
+ .quad compat_sys_get_robust_list
+ .quad sys_splice
+- .quad sys_sync_file_range
+- .quad sys_tee
++ .quad sys32_sync_file_range
++ .quad sys_tee /* 315 */
+ .quad compat_sys_vmsplice
+ .quad compat_sys_move_pages
+ .quad sys_getcpu
+ .quad sys_epoll_pwait
+-ia32_syscall_end:
++ .quad compat_sys_utimensat /* 320 */
++ .quad compat_sys_signalfd
++ .quad compat_sys_timerfd
++ .quad sys_eventfd
++ia32_syscall_end:
+Index: 10.3-2007-11-26/arch/x86_64/ia32/syscall32-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/ia32/syscall32-xen.c 2007-10-22 13:58:46.000000000 +0200
++++ 10.3-2007-11-26/arch/x86_64/ia32/syscall32-xen.c 2007-10-22 13:58:57.000000000 +0200
+@@ -13,6 +13,7 @@
+ #include <asm/proto.h>
+ #include <asm/tlbflush.h>
+ #include <asm/ia32_unistd.h>
++#include <asm/vsyscall32.h>
+
+ #ifdef USE_INT80
+ extern unsigned char syscall32_int80[], syscall32_int80_end[];
+Index: 10.3-2007-11-26/arch/x86_64/kernel/apic-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/kernel/apic-xen.c 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/arch/x86_64/kernel/apic-xen.c 2007-10-22 13:58:57.000000000 +0200
+@@ -19,7 +19,6 @@
+ #include <linux/mm.h>
+ #include <linux/delay.h>
+ #include <linux/bootmem.h>
+-#include <linux/smp_lock.h>
+ #include <linux/interrupt.h>
+ #include <linux/mc146818rtc.h>
+ #include <linux/kernel_stat.h>
+Index: 10.3-2007-11-26/arch/x86_64/kernel/e820-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/kernel/e820-xen.c 2007-10-22 13:58:46.000000000 +0200
++++ 10.3-2007-11-26/arch/x86_64/kernel/e820-xen.c 2007-10-22 13:58:57.000000000 +0200
+@@ -17,6 +17,8 @@
+ #include <linux/kexec.h>
+ #include <linux/module.h>
+ #include <linux/mm.h>
++#include <linux/suspend.h>
++#include <linux/pfn.h>
+
+ #include <asm/pgtable.h>
+ #include <asm/page.h>
+@@ -28,7 +30,7 @@
+
+ struct e820map e820 __initdata;
+ #ifdef CONFIG_XEN
+-struct e820map machine_e820 __initdata;
++struct e820map machine_e820;
+ #endif
+
+ /*
+@@ -105,17 +107,23 @@ static inline int bad_addr(unsigned long
+ return 0;
+ }
+
+-#ifndef CONFIG_XEN
+ /*
+ * This function checks if any part of the range <start,end> is mapped
+ * with type.
+ */
+-int __meminit
++int
+ e820_any_mapped(unsigned long start, unsigned long end, unsigned type)
+ {
+ int i;
++#ifndef CONFIG_XEN
+ for (i = 0; i < e820.nr_map; i++) {
+ struct e820entry *ei = &e820.map[i];
++#else
++ if (!is_initial_xendomain())
++ return 0;
++ for (i = 0; i < machine_e820.nr_map; i++) {
++ const struct e820entry *ei = &machine_e820.map[i];
++#endif
+ if (type && ei->type != type)
+ continue;
+ if (ei->addr >= end || ei->addr + ei->size <= start)
+@@ -124,7 +132,7 @@ e820_any_mapped(unsigned long start, uns
+ }
+ return 0;
+ }
+-#endif
++EXPORT_SYMBOL_GPL(e820_any_mapped);
+
+ /*
+ * This function checks if the entire range <start,end> is mapped with type.
+@@ -284,22 +292,6 @@ void __init e820_reserve_resources(struc
+ }
+
+ #ifndef CONFIG_XEN
+-/* Mark pages corresponding to given address range as nosave */
+-static void __init
+-e820_mark_nosave_range(unsigned long start, unsigned long end)
+-{
+- unsigned long pfn, max_pfn;
+-
+- if (start >= end)
+- return;
+-
+- printk("Nosave address range: %016lx - %016lx\n", start, end);
+- max_pfn = end >> PAGE_SHIFT;
+- for (pfn = start >> PAGE_SHIFT; pfn < max_pfn; pfn++)
+- if (pfn_valid(pfn))
+- SetPageNosave(pfn_to_page(pfn));
+-}
+-
+ /*
+ * Find the ranges of physical addresses that do not correspond to
+ * e820 RAM areas and mark the corresponding pages as nosave for software
+@@ -318,13 +310,13 @@ void __init e820_mark_nosave_regions(voi
+ struct e820entry *ei = &e820.map[i];
+
+ if (paddr < ei->addr)
+- e820_mark_nosave_range(paddr,
+- round_up(ei->addr, PAGE_SIZE));
++ register_nosave_region(PFN_DOWN(paddr),
++ PFN_UP(ei->addr));
+
+ paddr = round_down(ei->addr + ei->size, PAGE_SIZE);
+ if (ei->type != E820_RAM)
+- e820_mark_nosave_range(round_up(ei->addr, PAGE_SIZE),
+- paddr);
++ register_nosave_region(PFN_UP(ei->addr),
++ PFN_DOWN(paddr));
+
+ if (paddr >= (end_pfn << PAGE_SHIFT))
+ break;
+Index: 10.3-2007-11-26/arch/x86_64/kernel/early_printk-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/kernel/early_printk-xen.c 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/arch/x86_64/kernel/early_printk-xen.c 2007-10-22 13:58:57.000000000 +0200
+@@ -11,11 +11,10 @@
+
+ #ifdef __i386__
+ #include <asm/setup.h>
+-#define VGABASE (__ISA_IO_base + 0xb8000)
+ #else
+ #include <asm/bootsetup.h>
+-#define VGABASE ((void __iomem *)0xffffffff800b8000UL)
+ #endif
++#define VGABASE (__ISA_IO_base + 0xb8000)
+
+ #ifndef CONFIG_XEN
+ static int max_ypos = 25, max_xpos = 80;
+@@ -93,9 +92,9 @@ static int early_serial_putc(unsigned ch
+ static void early_serial_write(struct console *con, const char *s, unsigned n)
+ {
+ while (*s && n-- > 0) {
+- early_serial_putc(*s);
+ if (*s == '\n')
+ early_serial_putc('\r');
++ early_serial_putc(*s);
+ s++;
+ }
+ }
+@@ -205,7 +204,7 @@ static noinline long simnow(long cmd, lo
+ return ret;
+ }
+
+-void __init simnow_init(char *str)
++static void __init simnow_init(char *str)
+ {
+ char *fn = "klog";
+ if (*str == '=')
+@@ -277,22 +276,12 @@ static int __init setup_early_printk(cha
+ early_console = &simnow_console;
+ keep_early = 1;
+ }
++
++ if (keep_early)
++ early_console->flags &= ~CON_BOOT;
++ else
++ early_console->flags |= CON_BOOT;
+ register_console(early_console);
+ return 0;
+ }
+-
+ early_param("earlyprintk", setup_early_printk);
+-
+-void __init disable_early_printk(void)
+-{
+- if (!early_console_initialized || !early_console)
+- return;
+- if (!keep_early) {
+- printk("disabling early console\n");
+- unregister_console(early_console);
+- early_console_initialized = 0;
+- } else {
+- printk("keeping early console\n");
+- }
+-}
+-
+Index: 10.3-2007-11-26/arch/x86_64/kernel/entry-xen.S
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/kernel/entry-xen.S 2007-10-22 13:58:46.000000000 +0200
++++ 10.3-2007-11-26/arch/x86_64/kernel/entry-xen.S 2007-10-22 13:58:57.000000000 +0200
+@@ -1261,3 +1261,10 @@ ENTRY(call_softirq)
+ ret
+ CFI_ENDPROC
+ ENDPROC(call_softirq)
++
++KPROBE_ENTRY(ignore_sysret)
++ CFI_STARTPROC
++ mov $-ENOSYS,%eax
++ sysret
++ CFI_ENDPROC
++ENDPROC(ignore_sysret)
+Index: 10.3-2007-11-26/arch/x86_64/kernel/genapic-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/kernel/genapic-xen.c 2007-10-22 13:58:46.000000000 +0200
++++ 10.3-2007-11-26/arch/x86_64/kernel/genapic-xen.c 2007-10-22 13:58:57.000000000 +0200
+@@ -11,123 +11,57 @@
+ #include <linux/threads.h>
+ #include <linux/cpumask.h>
+ #include <linux/string.h>
++#include <linux/module.h>
+ #include <linux/kernel.h>
+ #include <linux/ctype.h>
+ #include <linux/init.h>
+-#include <linux/module.h>
+
+ #include <asm/smp.h>
+ #include <asm/ipi.h>
++#include <asm/genapic.h>
+
+-#if defined(CONFIG_ACPI)
++#ifdef CONFIG_ACPI
+ #include <acpi/acpi_bus.h>
+ #endif
+
+ /* which logical CPU number maps to which CPU (physical APIC ID) */
+-u8 x86_cpu_to_apicid[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = BAD_APICID };
++u8 x86_cpu_to_apicid[NR_CPUS] __read_mostly
++ = { [0 ... NR_CPUS-1] = BAD_APICID };
+ EXPORT_SYMBOL(x86_cpu_to_apicid);
+-u8 x86_cpu_to_log_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
+
+-extern struct genapic apic_cluster;
+-extern struct genapic apic_flat;
+-extern struct genapic apic_physflat;
++u8 x86_cpu_to_log_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
+
+ #ifndef CONFIG_XEN
+-struct genapic *genapic = &apic_flat;
+-struct genapic *genapic_force;
++struct genapic __read_mostly *genapic = &apic_flat;
+ #else
+ extern struct genapic apic_xen;
+-struct genapic *genapic = &apic_xen;
++struct genapic __read_mostly *genapic = &apic_xen;
+ #endif
+
+
+ /*
+ * Check the APIC IDs in bios_cpu_apicid and choose the APIC mode.
+ */
+-void __init clustered_apic_check(void)
++void __init setup_apic_routing(void)
+ {
+ #ifndef CONFIG_XEN
+- long i;
+- u8 clusters, max_cluster;
+- u8 id;
+- u8 cluster_cnt[NUM_APIC_CLUSTERS];
+- int max_apic = 0;
+-
+- /* genapic selection can be forced because of certain quirks.
+- */
+- if (genapic_force) {
+- genapic = genapic_force;
+- goto print;
+- }
+-
+-#if defined(CONFIG_ACPI)
++#ifdef CONFIG_ACPI
+ /*
+- * Some x86_64 machines use physical APIC mode regardless of how many
+- * procs/clusters are present (x86_64 ES7000 is an example).
++ * Quirk: some x86_64 machines can only use physical APIC mode
++ * regardless of how many processors are present (x86_64 ES7000
++ * is an example).
+ */
+- if (acpi_gbl_FADT.header.revision > FADT2_REVISION_ID)
+- if (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL) {
+- genapic = &apic_cluster;
+- goto print;
+- }
+-#endif
+-
+- memset(cluster_cnt, 0, sizeof(cluster_cnt));
+- for (i = 0; i < NR_CPUS; i++) {
+- id = bios_cpu_apicid[i];
+- if (id == BAD_APICID)
+- continue;
+- if (id > max_apic)
+- max_apic = id;
+- cluster_cnt[APIC_CLUSTERID(id)]++;
+- }
+-
+- /* Don't use clustered mode on AMD platforms. */
+- if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
++ if (acpi_gbl_FADT.header.revision > FADT2_REVISION_ID &&
++ (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL))
+ genapic = &apic_physflat;
+-#ifndef CONFIG_HOTPLUG_CPU
+- /* In the CPU hotplug case we cannot use broadcast mode
+- because that opens a race when a CPU is removed.
+- Stay at physflat mode in this case.
+- It is bad to do this unconditionally though. Once
+- we have ACPI platform support for CPU hotplug
+- we should detect hotplug capablity from ACPI tables and
+- only do this when really needed. -AK */
+- if (max_apic <= 8)
+- genapic = &apic_flat;
+-#endif
+- goto print;
+- }
+-
+- clusters = 0;
+- max_cluster = 0;
+-
+- for (i = 0; i < NUM_APIC_CLUSTERS; i++) {
+- if (cluster_cnt[i] > 0) {
+- ++clusters;
+- if (cluster_cnt[i] > max_cluster)
+- max_cluster = cluster_cnt[i];
+- }
+- }
++ else
++#endif
+
+- /*
+- * If we have clusters <= 1 and CPUs <= 8 in cluster 0, then flat mode,
+- * else if max_cluster <= 4 and cluster_cnt[15] == 0, clustered logical
+- * else physical mode.
+- * (We don't use lowest priority delivery + HW APIC IRQ steering, so
+- * can ignore the clustered logical case and go straight to physical.)
+- */
+- if (clusters <= 1 && max_cluster <= 8 && cluster_cnt[0] == max_cluster) {
+-#ifdef CONFIG_HOTPLUG_CPU
+- /* Don't use APIC shortcuts in CPU hotplug to avoid races */
+- genapic = &apic_physflat;
+-#else
++ if (cpus_weight(cpu_possible_map) <= 8)
+ genapic = &apic_flat;
+-#endif
+- } else
+- genapic = &apic_cluster;
++ else
++ genapic = &apic_physflat;
+
+-print:
+ #else
+ /* hardcode to xen apic functions */
+ genapic = &apic_xen;
+@@ -135,7 +69,7 @@ print:
+ printk(KERN_INFO "Setting APIC routing to %s\n", genapic->name);
+ }
+
+-/* Same for both flat and clustered. */
++/* Same for both flat and physical. */
+
+ #ifdef CONFIG_XEN
+ extern void xen_send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int dest);
+Index: 10.3-2007-11-26/arch/x86_64/kernel/genapic_xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/kernel/genapic_xen.c 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/arch/x86_64/kernel/genapic_xen.c 2007-10-22 13:58:57.000000000 +0200
+@@ -21,9 +21,8 @@
+ #include <asm/ipi.h>
+ #else
+ #include <asm/apic.h>
+-#include <asm/apicdef.h>
+-#include <asm/genapic.h>
+ #endif
++#include <asm/genapic.h>
+ #include <xen/evtchn.h>
+
+ DECLARE_PER_CPU(int, ipi_to_irq[NR_IPIS]);
+Index: 10.3-2007-11-26/arch/x86_64/kernel/head-xen.S
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/kernel/head-xen.S 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/arch/x86_64/kernel/head-xen.S 2007-10-22 13:58:57.000000000 +0200
+@@ -5,6 +5,7 @@
+ * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
+ * Copyright (C) 2000 Karsten Keil <kkeil@suse.de>
+ * Copyright (C) 2001,2002 Andi Kleen <ak@suse.de>
++ * Copyright (C) 2005 Eric Biederman <ebiederm@xmission.com>
+ * Jun Nakajima <jun.nakajima@intel.com>
+ * Modified for Xen
+ */
+@@ -37,18 +38,14 @@ ENTRY(_start)
+ pushq $0 # fake return address
+ jmp x86_64_start_kernel
+
+-ENTRY(stext)
+-ENTRY(_stext)
++.balign PAGE_SIZE
+
+- $page = 0
+ #define NEXT_PAGE(name) \
+- $page = $page + 1; \
+- .org $page * 0x1000; \
+- phys_##name = $page * 0x1000 + __PHYSICAL_START; \
++ .balign PAGE_SIZE; \
++ phys_##name = . - .bootstrap.text; \
+ ENTRY(name)
+
+ NEXT_PAGE(init_level4_pgt)
+- /* This gets initialized in x86_64_start_kernel */
+ .fill 512,8,0
+
+ /*
+@@ -125,13 +122,13 @@ gdt:
+
+ ENTRY(cpu_gdt_table)
+ .quad 0x0000000000000000 /* NULL descriptor */
++ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
++ .quad 0x00af9b000000ffff /* __KERNEL_CS */
++ .quad 0x00cf93000000ffff /* __KERNEL_DS */
++ .quad 0x00cffb000000ffff /* __USER32_CS */
++ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
++ .quad 0x00affb000000ffff /* __USER_CS */
+ .quad 0x0 /* unused */
+- .quad 0x00af9a000000ffff /* __KERNEL_CS */
+- .quad 0x00cf92000000ffff /* __KERNEL_DS */
+- .quad 0x00cffa000000ffff /* __USER32_CS */
+- .quad 0x00cff2000000ffff /* __USER_DS, __USER32_DS */
+- .quad 0x00affa000000ffff /* __USER_CS */
+- .quad 0x00cf9a000000ffff /* __KERNEL32_CS */
+ .quad 0,0 /* TSS */
+ .quad 0,0 /* LDT */
+ .quad 0,0,0 /* three TLS descriptors */
+@@ -154,14 +151,11 @@ ENTRY(empty_zero_page)
+ * __xen_guest information
+ */
+ .macro utoh value
+- .if (\value) < 0 || (\value) >= 0x10
+- utoh (((\value)>>4)&0x0fffffffffffffff)
+- .endif
+- .if ((\value) & 0xf) < 10
+- .byte '0' + ((\value) & 0xf)
+- .else
+- .byte 'A' + ((\value) & 0xf) - 10
+- .endif
++ i = 64
++ .rept 16
++ i = i - 4
++ .byte '0' + ((((\value) >> i) & 0xf) > 9) * ('0' - 'A' + 10) + (((\value) >> i) & 0xf)
++ .endr
+ .endm
+
+ .section __xen_guest
+Index: 10.3-2007-11-26/arch/x86_64/kernel/head64-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/kernel/head64-xen.c 2007-10-22 13:58:46.000000000 +0200
++++ 10.3-2007-11-26/arch/x86_64/kernel/head64-xen.c 2007-10-22 13:58:57.000000000 +0200
+@@ -22,13 +22,21 @@
+ #include <asm/setup.h>
+ #include <asm/desc.h>
+ #include <asm/pgtable.h>
++#include <asm/tlbflush.h>
+ #include <asm/sections.h>
+
+ unsigned long start_pfn;
+
++#ifndef CONFIG_XEN
++static void __init zap_identity_mappings(void)
++{
++ pgd_t *pgd = pgd_offset_k(0UL);
++ pgd_clear(pgd);
++ __flush_tlb();
++}
++
+ /* Don't add a printk in there. printk relies on the PDA which is not initialized
+ yet. */
+-#if 0
+ static void __init clear_bss(void)
+ {
+ memset(__bss_start, 0,
+@@ -37,7 +45,7 @@ static void __init clear_bss(void)
+ #endif
+
+ #define NEW_CL_POINTER 0x228 /* Relative to real mode data */
+-#define OLD_CL_MAGIC_ADDR 0x90020
++#define OLD_CL_MAGIC_ADDR 0x20
+ #define OLD_CL_MAGIC 0xA33F
+ #define OLD_CL_BASE_ADDR 0x90000
+ #define OLD_CL_OFFSET 0x90022
+@@ -45,18 +53,18 @@ static void __init clear_bss(void)
+ static void __init copy_bootdata(char *real_mode_data)
+ {
+ #ifndef CONFIG_XEN
+- int new_data;
++ unsigned long new_data;
+ char * command_line;
+
+ memcpy(x86_boot_params, real_mode_data, BOOT_PARAM_SIZE);
+- new_data = *(int *) (x86_boot_params + NEW_CL_POINTER);
++ new_data = *(u32 *) (x86_boot_params + NEW_CL_POINTER);
+ if (!new_data) {
+- if (OLD_CL_MAGIC != * (u16 *) OLD_CL_MAGIC_ADDR) {
++ if (OLD_CL_MAGIC != *(u16 *)(real_mode_data + OLD_CL_MAGIC_ADDR)) {
+ return;
+ }
+- new_data = OLD_CL_BASE_ADDR + * (u16 *) OLD_CL_OFFSET;
++ new_data = __pa(real_mode_data) + *(u16 *)(real_mode_data + OLD_CL_OFFSET);
+ }
+- command_line = (char *) ((u64)(new_data));
++ command_line = __va(new_data);
+ memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
+ #else
+ int max_cmdline;
+@@ -98,10 +106,13 @@ void __init x86_64_start_kernel(char * r
+ while ((1UL << machine_to_phys_order) < machine_to_phys_nr_ents )
+ machine_to_phys_order++;
+
+-#if 0
++#ifndef CONFIG_XEN
+ /* clear bss before set_intr_gate with early_idt_handler */
+ clear_bss();
+
++ /* Make NULL pointers segfault */
++ zap_identity_mappings();
++
+ for (i = 0; i < IDT_ENTRIES; i++)
+ set_intr_gate(i, early_idt_handler);
+ asm volatile("lidt %0" :: "m" (idt_descr));
+@@ -113,7 +124,7 @@ void __init x86_64_start_kernel(char * r
+ cpu_pda(i) = &boot_cpu_pda[i];
+
+ pda_init(0);
+- copy_bootdata(real_mode_data);
++ copy_bootdata(__va(real_mode_data));
+ #ifdef CONFIG_SMP
+ cpu_set(0, cpu_online_map);
+ #endif
+Index: 10.3-2007-11-26/arch/x86_64/kernel/io_apic-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/kernel/io_apic-xen.c 2007-10-22 13:58:46.000000000 +0200
++++ 10.3-2007-11-26/arch/x86_64/kernel/io_apic-xen.c 2007-10-22 13:58:57.000000000 +0200
+@@ -25,7 +25,6 @@
+ #include <linux/init.h>
+ #include <linux/delay.h>
+ #include <linux/sched.h>
+-#include <linux/smp_lock.h>
+ #include <linux/pci.h>
+ #include <linux/mc146818rtc.h>
+ #include <linux/acpi.h>
+@@ -897,10 +896,6 @@ static void __init setup_ExtINT_IRQ0_pin
+ enable_8259A_irq(0);
+ }
+
+-void __init UNEXPECTED_IO_APIC(void)
+-{
+-}
+-
+ void __apicdebuginit print_IO_APIC(void)
+ {
+ int apic, i;
+@@ -936,40 +931,16 @@ void __apicdebuginit print_IO_APIC(void)
+ printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].mpc_apicid);
+ printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
+ printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID);
+- if (reg_00.bits.__reserved_1 || reg_00.bits.__reserved_2)
+- UNEXPECTED_IO_APIC();
+
+ printk(KERN_DEBUG ".... register #01: %08X\n", *(int *)&reg_01);
+ printk(KERN_DEBUG "....... : max redirection entries: %04X\n", reg_01.bits.entries);
+- if ( (reg_01.bits.entries != 0x0f) && /* older (Neptune) boards */
+- (reg_01.bits.entries != 0x17) && /* typical ISA+PCI boards */
+- (reg_01.bits.entries != 0x1b) && /* Compaq Proliant boards */
+- (reg_01.bits.entries != 0x1f) && /* dual Xeon boards */
+- (reg_01.bits.entries != 0x22) && /* bigger Xeon boards */
+- (reg_01.bits.entries != 0x2E) &&
+- (reg_01.bits.entries != 0x3F) &&
+- (reg_01.bits.entries != 0x03)
+- )
+- UNEXPECTED_IO_APIC();
+
+ printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ);
+ printk(KERN_DEBUG "....... : IO APIC version: %04X\n", reg_01.bits.version);
+- if ( (reg_01.bits.version != 0x01) && /* 82489DX IO-APICs */
+- (reg_01.bits.version != 0x02) && /* 82801BA IO-APICs (ICH2) */
+- (reg_01.bits.version != 0x10) && /* oldest IO-APICs */
+- (reg_01.bits.version != 0x11) && /* Pentium/Pro IO-APICs */
+- (reg_01.bits.version != 0x13) && /* Xeon IO-APICs */
+- (reg_01.bits.version != 0x20) /* Intel P64H (82806 AA) */
+- )
+- UNEXPECTED_IO_APIC();
+- if (reg_01.bits.__reserved_1 || reg_01.bits.__reserved_2)
+- UNEXPECTED_IO_APIC();
+
+ if (reg_01.bits.version >= 0x10) {
+ printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw);
+ printk(KERN_DEBUG "....... : arbitration: %02X\n", reg_02.bits.arbitration);
+- if (reg_02.bits.__reserved_1 || reg_02.bits.__reserved_2)
+- UNEXPECTED_IO_APIC();
+ }
+
+ printk(KERN_DEBUG ".... IRQ redirection table:\n");
+@@ -1408,8 +1379,7 @@ static void irq_complete_move(unsigned i
+
+ vector = ~get_irq_regs()->orig_rax;
+ me = smp_processor_id();
+- if ((vector == cfg->vector) &&
+- cpu_isset(smp_processor_id(), cfg->domain)) {
++ if ((vector == cfg->vector) && cpu_isset(me, cfg->domain)) {
+ cpumask_t cleanup_mask;
+
+ cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
+@@ -1444,7 +1414,7 @@ static void ack_apic_level(unsigned int
+
+ /*
+ * We must acknowledge the irq before we move it or the acknowledge will
+- * not propogate properly.
++ * not propagate properly.
+ */
+ ack_APIC_irq();
+
+@@ -1527,6 +1497,7 @@ static void ack_lapic_irq (unsigned int
+ static void end_lapic_irq (unsigned int i) { /* nothing */ }
+
+ static struct hw_interrupt_type lapic_irq_type __read_mostly = {
++ .name = "local-APIC",
+ .typename = "local-APIC-edge",
+ .startup = NULL, /* startup_irq() not used for IRQ0 */
+ .shutdown = NULL, /* shutdown_irq() not used for IRQ0 */
+@@ -1996,18 +1967,18 @@ int arch_setup_msi_irq(struct pci_dev *d
+ if (irq < 0)
+ return irq;
+
+- set_irq_msi(irq, desc);
+ ret = msi_compose_msg(dev, irq, &msg);
+ if (ret < 0) {
+ destroy_irq(irq);
+ return ret;
+ }
+
++ set_irq_msi(irq, desc);
+ write_msi_msg(irq, &msg);
+
+ set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, "edge");
+
+- return irq;
++ return 0;
+ }
+
+ void arch_teardown_msi_irq(unsigned int irq)
+Index: 10.3-2007-11-26/arch/x86_64/kernel/ioport-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/kernel/ioport-xen.c 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/arch/x86_64/kernel/ioport-xen.c 2007-10-22 13:58:57.000000000 +0200
+@@ -13,10 +13,10 @@
+ #include <linux/ioport.h>
+ #include <linux/mm.h>
+ #include <linux/smp.h>
+-#include <linux/smp_lock.h>
+ #include <linux/stddef.h>
+ #include <linux/slab.h>
+ #include <linux/thread_info.h>
++#include <linux/syscalls.h>
+ #include <xen/interface/physdev.h>
+
+ /* Set EXTENT bits starting at BASE in BITMAP to value TURN_ON. */
+Index: 10.3-2007-11-26/arch/x86_64/kernel/irq-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/kernel/irq-xen.c 2007-10-22 13:58:46.000000000 +0200
++++ 10.3-2007-11-26/arch/x86_64/kernel/irq-xen.c 2007-10-22 13:58:57.000000000 +0200
+@@ -32,7 +32,7 @@ atomic_t irq_err_count;
+ */
+ static inline void stack_overflow_check(struct pt_regs *regs)
+ {
+- u64 curbase = (u64) current->thread_info;
++ u64 curbase = (u64)task_stack_page(current);
+ static unsigned long warned = -60*HZ;
+
+ if (regs->rsp >= curbase && regs->rsp <= curbase + THREAD_SIZE &&
+@@ -145,17 +145,43 @@ void fixup_irqs(cpumask_t map)
+
+ for (irq = 0; irq < NR_IRQS; irq++) {
+ cpumask_t mask;
++ int break_affinity = 0;
++ int set_affinity = 1;
++
+ if (irq == 2)
+ continue;
+
++ /* interrupt's are disabled at this point */
++ spin_lock(&irq_desc[irq].lock);
++
++ if (!irq_has_action(irq) ||
++ cpus_equal(irq_desc[irq].affinity, map)) {
++ spin_unlock(&irq_desc[irq].lock);
++ continue;
++ }
++
+ cpus_and(mask, irq_desc[irq].affinity, map);
+- if (any_online_cpu(mask) == NR_CPUS) {
+- printk("Breaking affinity for irq %i\n", irq);
++ if (cpus_empty(mask)) {
++ break_affinity = 1;
+ mask = map;
+ }
++
++ if (irq_desc[irq].chip->mask)
++ irq_desc[irq].chip->mask(irq);
++
+ if (irq_desc[irq].chip->set_affinity)
+ irq_desc[irq].chip->set_affinity(irq, mask);
+- else if (irq_desc[irq].action && !(warned++))
++ else if (!(warned++))
++ set_affinity = 0;
++
++ if (irq_desc[irq].chip->unmask)
++ irq_desc[irq].chip->unmask(irq);
++
++ spin_unlock(&irq_desc[irq].lock);
++
++ if (break_affinity && set_affinity)
++ printk("Broke affinity for irq %i\n", irq);
++ else if (!set_affinity)
+ printk("Cannot set affinity for irq %i\n", irq);
+ }
+
+Index: 10.3-2007-11-26/arch/x86_64/kernel/ldt-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/kernel/ldt-xen.c 2007-12-06 17:27:30.000000000 +0100
++++ 10.3-2007-11-26/arch/x86_64/kernel/ldt-xen.c 2007-10-22 13:58:57.000000000 +0200
+@@ -13,7 +13,6 @@
+ #include <linux/string.h>
+ #include <linux/mm.h>
+ #include <linux/smp.h>
+-#include <linux/smp_lock.h>
+ #include <linux/vmalloc.h>
+ #include <linux/slab.h>
+
+Index: 10.3-2007-11-26/arch/x86_64/kernel/mpparse-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/kernel/mpparse-xen.c 2007-10-22 13:58:46.000000000 +0200
++++ 10.3-2007-11-26/arch/x86_64/kernel/mpparse-xen.c 2007-10-22 13:58:57.000000000 +0200
+@@ -17,7 +17,6 @@
+ #include <linux/init.h>
+ #include <linux/delay.h>
+ #include <linux/bootmem.h>
+-#include <linux/smp_lock.h>
+ #include <linux/kernel_stat.h>
+ #include <linux/mc146818rtc.h>
+ #include <linux/acpi.h>
+@@ -307,7 +306,7 @@ static int __init smp_read_mpc(struct mp
+ }
+ }
+ }
+- clustered_apic_check();
++ setup_apic_routing();
+ if (!num_processors)
+ printk(KERN_ERR "MPTABLE: no processors registered!\n");
+ return num_processors;
+Index: 10.3-2007-11-26/arch/x86_64/kernel/pci-swiotlb-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/kernel/pci-swiotlb-xen.c 2007-10-22 13:58:46.000000000 +0200
++++ 10.3-2007-11-26/arch/x86_64/kernel/pci-swiotlb-xen.c 2007-10-22 13:58:57.000000000 +0200
+@@ -16,7 +16,7 @@ EXPORT_SYMBOL(swiotlb);
+
+ void swiotlb_init(void);
+
+-struct dma_mapping_ops swiotlb_dma_ops = {
++const struct dma_mapping_ops swiotlb_dma_ops = {
+ #if 0
+ .mapping_error = swiotlb_dma_mapping_error,
+ .alloc_coherent = swiotlb_alloc_coherent,
+Index: 10.3-2007-11-26/arch/x86_64/kernel/process-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/kernel/process-xen.c 2007-10-22 13:58:46.000000000 +0200
++++ 10.3-2007-11-26/arch/x86_64/kernel/process-xen.c 2007-10-22 13:58:57.000000000 +0200
+@@ -39,6 +39,7 @@
+ #include <linux/random.h>
+ #include <linux/notifier.h>
+ #include <linux/kprobes.h>
++#include <linux/kdebug.h>
+
+ #include <asm/uaccess.h>
+ #include <asm/pgtable.h>
+@@ -49,7 +50,6 @@
+ #include <asm/mmu_context.h>
+ #include <asm/pda.h>
+ #include <asm/prctl.h>
+-#include <asm/kdebug.h>
+ #include <xen/interface/platform.h>
+ #include <xen/interface/physdev.h>
+ #include <xen/interface/vcpu.h>
+@@ -232,16 +232,18 @@ void __cpuinit select_idle_routine(const
+
+ static int __init idle_setup (char *str)
+ {
+- if (!strncmp(str, "poll", 4)) {
++ if (!strcmp(str, "poll")) {
+ printk("using polling idle threads.\n");
+ pm_idle = poll_idle;
+- }
++ } else if (!strcmp(str, "mwait"))
++ force_mwait = 1;
++ else
++ return -1;
+
+ boot_option_idle_override = 1;
+- return 1;
++ return 0;
+ }
+-
+-__setup("idle=", idle_setup);
++early_param("idle", idle_setup);
+
+ /* Prints also some state that isn't saved in the pt_regs */
+ void __show_regs(struct pt_regs * regs)
+@@ -532,7 +534,7 @@ __switch_to(struct task_struct *prev_p,
+ * The AMD workaround requires it to be after DS reload, or
+ * after DS has been cleared, which we do in __prepare_arch_switch.
+ */
+- if (prev_p->thread_info->status & TS_USEDFPU) {
++ if (task_thread_info(prev_p)->status & TS_USEDFPU) {
+ __save_init_fpu(prev_p); /* _not_ save_init_fpu() */
+ mcl->op = __HYPERVISOR_fpu_taskswitch;
+ mcl->args[0] = 1;
+Index: 10.3-2007-11-26/arch/x86_64/kernel/setup-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/kernel/setup-xen.c 2007-10-22 13:58:46.000000000 +0200
++++ 10.3-2007-11-26/arch/x86_64/kernel/setup-xen.c 2007-10-22 13:58:57.000000000 +0200
+@@ -123,6 +123,8 @@ int bootloader_type;
+
+ unsigned long saved_video_mode;
+
++int force_mwait __cpuinitdata;
++
+ /*
+ * Early DMI memory
+ */
+@@ -256,10 +258,10 @@ static void discover_ebda(void)
+ * there is a real-mode segmented pointer pointing to the
+ * 4K EBDA area at 0x40E
+ */
+- ebda_addr = *(unsigned short *)EBDA_ADDR_POINTER;
++ ebda_addr = *(unsigned short *)__va(EBDA_ADDR_POINTER);
+ ebda_addr <<= 4;
+
+- ebda_size = *(unsigned short *)(unsigned long)ebda_addr;
++ ebda_size = *(unsigned short *)__va(ebda_addr);
+
+ /* Round EBDA up to pages */
+ if (ebda_size == 0)
+@@ -434,15 +436,8 @@ void __init setup_arch(char **cmdline_p)
+ #endif
+
+ #ifdef CONFIG_SMP
+- /*
+- * But first pinch a few for the stack/trampoline stuff
+- * FIXME: Don't need the extra page at 4K, but need to fix
+- * trampoline before removing it. (see the GDT stuff)
+- */
+- reserve_bootmem_generic(PAGE_SIZE, PAGE_SIZE);
+-
+ /* Reserve SMP trampoline */
+- reserve_bootmem_generic(SMP_TRAMPOLINE_BASE, PAGE_SIZE);
++ reserve_bootmem_generic(SMP_TRAMPOLINE_BASE, 2*PAGE_SIZE);
+ #endif
+ #endif
+
+@@ -563,8 +558,6 @@ void __init setup_arch(char **cmdline_p)
+ early_quirks();
+ #endif
+
+- zap_low_mappings(0);
+-
+ /*
+ * set this early, so we dont allocate cpu0
+ * if MADT list doesnt list BSP first
+@@ -868,6 +861,10 @@ static void __cpuinit init_amd(struct cp
+
+ /* RDTSC can be speculated around */
+ clear_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
++
++ /* Family 10 doesn't support C states in MWAIT so don't use it */
++ if (c->x86 == 0x10 && !force_mwait)
++ clear_bit(X86_FEATURE_MWAIT, &c->x86_capability);
+ }
+
+ static void __cpuinit detect_ht(struct cpuinfo_x86 *c)
+@@ -1150,9 +1147,7 @@ void __cpuinit identify_cpu(struct cpuin
+ #ifdef CONFIG_X86_MCE
+ mcheck_init(c);
+ #endif
+- if (c == &boot_cpu_data)
+- mtrr_bp_init();
+- else
++ if (c != &boot_cpu_data)
+ mtrr_ap_init();
+ #ifdef CONFIG_NUMA
+ numa_add_cpu(smp_processor_id());
+@@ -1243,9 +1238,8 @@ static int show_cpuinfo(struct seq_file
+ "stc",
+ "100mhzsteps",
+ "hwpstate",
+- NULL, /* tsc invariant mapped to constant_tsc */
+- NULL,
+- /* nothing */ /* constant_tsc - moved to flags */
++ "", /* tsc invariant mapped to constant_tsc */
++ /* nothing */
+ };
+
+
+Index: 10.3-2007-11-26/arch/x86_64/kernel/setup64-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/kernel/setup64-xen.c 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/arch/x86_64/kernel/setup64-xen.c 2007-10-22 13:58:57.000000000 +0200
+@@ -113,9 +113,9 @@ void __init setup_per_cpu_areas(void)
+ if (!NODE_DATA(cpu_to_node(i))) {
+ printk("cpu with no node %d, num_online_nodes %d\n",
+ i, num_online_nodes());
+- ptr = alloc_bootmem(size);
++ ptr = alloc_bootmem_pages(size);
+ } else {
+- ptr = alloc_bootmem_node(NODE_DATA(cpu_to_node(i)), size);
++ ptr = alloc_bootmem_pages_node(NODE_DATA(cpu_to_node(i)), size);
+ }
+ if (!ptr)
+ panic("Cannot allocate cpu data for CPU %d\n", i);
+@@ -206,6 +206,8 @@ char boot_exception_stacks[(N_EXCEPTION_
+ __attribute__((section(".bss.page_aligned")));
+ #endif
+
++extern asmlinkage void ignore_sysret(void);
++
+ /* May not be marked __init: used by software suspend */
+ void syscall_init(void)
+ {
+@@ -217,6 +219,7 @@ void syscall_init(void)
+ */
+ wrmsrl(MSR_STAR, ((u64)__USER32_CS)<<48 | ((u64)__KERNEL_CS)<<32);
+ wrmsrl(MSR_LSTAR, system_call);
++ wrmsrl(MSR_CSTAR, ignore_sysret);
+
+ /* Flags to clear on syscall */
+ wrmsrl(MSR_SYSCALL_MASK, EF_TF|EF_DF|EF_IE|0x3000);
+@@ -260,7 +263,6 @@ void __cpuinit cpu_init (void)
+ /* CPU 0 is initialised in head64.c */
+ if (cpu != 0) {
+ pda_init(cpu);
+- zap_low_mappings(cpu);
+ }
+ #ifndef CONFIG_X86_NO_TSS
+ else
+Index: 10.3-2007-11-26/arch/x86_64/kernel/smp-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/kernel/smp-xen.c 2007-10-22 13:53:25.000000000 +0200
++++ 10.3-2007-11-26/arch/x86_64/kernel/smp-xen.c 2007-10-22 13:58:57.000000000 +0200
+@@ -14,7 +14,6 @@
+ #include <linux/mm.h>
+ #include <linux/delay.h>
+ #include <linux/spinlock.h>
+-#include <linux/smp_lock.h>
+ #include <linux/smp.h>
+ #include <linux/kernel_stat.h>
+ #include <linux/mc146818rtc.h>
+@@ -478,48 +477,40 @@ int smp_call_function (void (*func) (voi
+ }
+ EXPORT_SYMBOL(smp_call_function);
+
+-void smp_stop_cpu(void)
++static void stop_this_cpu(void *dummy)
+ {
+- unsigned long flags;
++ local_irq_disable();
+ /*
+ * Remove this CPU:
+ */
+ cpu_clear(smp_processor_id(), cpu_online_map);
+- local_irq_save(flags);
+ #ifndef CONFIG_XEN
+ disable_local_APIC();
+ #endif
+- local_irq_restore(flags);
+-}
+-
+-static void smp_really_stop_cpu(void *dummy)
+-{
+- smp_stop_cpu();
+ for (;;)
+ halt();
+ }
+
+ void smp_send_stop(void)
+ {
+- int nolock = 0;
++ int nolock;
++ unsigned long flags;
++
+ #ifndef CONFIG_XEN
+ if (reboot_force)
+ return;
+ #endif
++
+ /* Don't deadlock on the call lock in panic */
+- if (!spin_trylock(&call_lock)) {
+- /* ignore locking because we have panicked anyways */
+- nolock = 1;
+- }
+- __smp_call_function(smp_really_stop_cpu, NULL, 0, 0);
++ nolock = !spin_trylock(&call_lock);
++ local_irq_save(flags);
++ __smp_call_function(stop_this_cpu, NULL, 0, 0);
+ if (!nolock)
+ spin_unlock(&call_lock);
+-
+- local_irq_disable();
+ #ifndef CONFIG_XEN
+ disable_local_APIC();
+ #endif
+- local_irq_enable();
++ local_irq_restore(flags);
+ }
+
+ /*
+Index: 10.3-2007-11-26/arch/x86_64/kernel/traps-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/kernel/traps-xen.c 2007-10-22 13:53:25.000000000 +0200
++++ 10.3-2007-11-26/arch/x86_64/kernel/traps-xen.c 2007-10-22 13:58:57.000000000 +0200
+@@ -32,6 +32,7 @@
+ #include <linux/unwind.h>
+ #include <linux/uaccess.h>
+ #include <linux/bug.h>
++#include <linux/kdebug.h>
+
+ #include <asm/system.h>
+ #include <asm/io.h>
+@@ -39,7 +40,6 @@
+ #include <asm/debugreg.h>
+ #include <asm/desc.h>
+ #include <asm/i387.h>
+-#include <asm/kdebug.h>
+ #include <asm/processor.h>
+ #include <asm/unwind.h>
+ #include <asm/smp.h>
+@@ -71,22 +71,6 @@ asmlinkage void alignment_check(void);
+ asmlinkage void machine_check(void);
+ asmlinkage void spurious_interrupt_bug(void);
+
+-ATOMIC_NOTIFIER_HEAD(die_chain);
+-EXPORT_SYMBOL(die_chain);
+-
+-int register_die_notifier(struct notifier_block *nb)
+-{
+- vmalloc_sync_all();
+- return atomic_notifier_chain_register(&die_chain, nb);
+-}
+-EXPORT_SYMBOL(register_die_notifier); /* used modular by kdb */
+-
+-int unregister_die_notifier(struct notifier_block *nb)
+-{
+- return atomic_notifier_chain_unregister(&die_chain, nb);
+-}
+-EXPORT_SYMBOL(unregister_die_notifier); /* used modular by kdb */
+-
+ static inline void conditional_sti(struct pt_regs *regs)
+ {
+ if (regs->eflags & X86_EFLAGS_IF)
+@@ -428,8 +412,7 @@ void show_registers(struct pt_regs *regs
+ const int cpu = smp_processor_id();
+ struct task_struct *cur = cpu_pda(cpu)->pcurrent;
+
+- rsp = regs->rsp;
+-
++ rsp = regs->rsp;
+ printk("CPU %d ", cpu);
+ __show_regs(regs);
+ printk("Process %s (pid: %d, threadinfo %p, task %p)\n",
+@@ -440,7 +423,6 @@ void show_registers(struct pt_regs *regs
+ * time of the fault..
+ */
+ if (in_kernel) {
+-
+ printk("Stack: ");
+ _show_stack(NULL, regs, (unsigned long*)rsp);
+
+@@ -485,13 +467,14 @@ static unsigned int die_nest_count;
+
+ unsigned __kprobes long oops_begin(void)
+ {
+- int cpu = smp_processor_id();
++ int cpu;
+ unsigned long flags;
+
+ oops_enter();
+
+ /* racy, but better than risking deadlock. */
+ local_irq_save(flags);
++ cpu = smp_processor_id();
+ if (!spin_trylock(&die_lock)) {
+ if (cpu == die_owner)
+ /* nested oops. should stop eventually */;
+@@ -585,10 +568,20 @@ static void __kprobes do_trap(int trapnr
+ {
+ struct task_struct *tsk = current;
+
+- tsk->thread.error_code = error_code;
+- tsk->thread.trap_no = trapnr;
+-
+ if (user_mode(regs)) {
++ /*
++ * We want error_code and trap_no set for userspace
++ * faults and kernelspace faults which result in
++ * die(), but not kernelspace faults which are fixed
++ * up. die() gives the process no chance to handle
++ * the signal and notice the kernel fault information,
++ * so that won't result in polluting the information
++ * about previously queued, but not yet delivered,
++ * faults. See also do_general_protection below.
++ */
++ tsk->thread.error_code = error_code;
++ tsk->thread.trap_no = trapnr;
++
+ if (exception_trace && unhandled_signal(tsk, signr))
+ printk(KERN_INFO
+ "%s[%d] trap %s rip:%lx rsp:%lx error:%lx\n",
+@@ -609,8 +602,11 @@ static void __kprobes do_trap(int trapnr
+ fixup = search_exception_tables(regs->rip);
+ if (fixup)
+ regs->rip = fixup->fixup;
+- else
++ else {
++ tsk->thread.error_code = error_code;
++ tsk->thread.trap_no = trapnr;
+ die(str, regs, error_code);
++ }
+ return;
+ }
+ }
+@@ -686,10 +682,10 @@ asmlinkage void __kprobes do_general_pro
+
+ conditional_sti(regs);
+
+- tsk->thread.error_code = error_code;
+- tsk->thread.trap_no = 13;
+-
+ if (user_mode(regs)) {
++ tsk->thread.error_code = error_code;
++ tsk->thread.trap_no = 13;
++
+ if (exception_trace && unhandled_signal(tsk, SIGSEGV))
+ printk(KERN_INFO
+ "%s[%d] general protection rip:%lx rsp:%lx error:%lx\n",
+@@ -708,6 +704,9 @@ asmlinkage void __kprobes do_general_pro
+ regs->rip = fixup->fixup;
+ return;
+ }
++
++ tsk->thread.error_code = error_code;
++ tsk->thread.trap_no = 13;
+ if (notify_die(DIE_GPF, "general protection fault", regs,
+ error_code, 13, SIGSEGV) == NOTIFY_STOP)
+ return;
+Index: 10.3-2007-11-26/arch/x86_64/kernel/vsyscall-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/kernel/vsyscall-xen.c 2007-10-22 13:58:46.000000000 +0200
++++ 10.3-2007-11-26/arch/x86_64/kernel/vsyscall-xen.c 2007-10-22 13:58:57.000000000 +0200
+@@ -45,14 +45,34 @@
+
+ #define __vsyscall(nr) __attribute__ ((unused,__section__(".vsyscall_" #nr)))
+ #define __syscall_clobber "r11","rcx","memory"
++#define __pa_vsymbol(x) \
++ ({unsigned long v; \
++ extern char __vsyscall_0; \
++ asm("" : "=r" (v) : "0" (x)); \
++ ((v - VSYSCALL_FIRST_PAGE) + __pa_symbol(&__vsyscall_0)); })
+
++/*
++ * vsyscall_gtod_data contains data that is :
++ * - readonly from vsyscalls
++ * - writen by timer interrupt or systcl (/proc/sys/kernel/vsyscall64)
++ * Try to keep this structure as small as possible to avoid cache line ping pongs
++ */
+ struct vsyscall_gtod_data_t {
+- seqlock_t lock;
+- int sysctl_enabled;
+- struct timeval wall_time_tv;
++ seqlock_t lock;
++
++ /* open coded 'struct timespec' */
++ time_t wall_time_sec;
++ u32 wall_time_nsec;
++
++ int sysctl_enabled;
+ struct timezone sys_tz;
+- cycle_t offset_base;
+- struct clocksource clock;
++ struct { /* extract of a clocksource struct */
++ cycle_t (*vread)(void);
++ cycle_t cycle_last;
++ cycle_t mask;
++ u32 mult;
++ u32 shift;
++ } clock;
+ };
+ int __vgetcpu_mode __section_vgetcpu_mode;
+
+@@ -68,9 +88,13 @@ void update_vsyscall(struct timespec *wa
+
+ write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
+ /* copy vsyscall data */
+- vsyscall_gtod_data.clock = *clock;
+- vsyscall_gtod_data.wall_time_tv.tv_sec = wall_time->tv_sec;
+- vsyscall_gtod_data.wall_time_tv.tv_usec = wall_time->tv_nsec/1000;
++ vsyscall_gtod_data.clock.vread = clock->vread;
++ vsyscall_gtod_data.clock.cycle_last = clock->cycle_last;
++ vsyscall_gtod_data.clock.mask = clock->mask;
++ vsyscall_gtod_data.clock.mult = clock->mult;
++ vsyscall_gtod_data.clock.shift = clock->shift;
++ vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec;
++ vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec;
+ vsyscall_gtod_data.sys_tz = sys_tz;
+ write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags);
+ }
+@@ -105,7 +129,8 @@ static __always_inline long time_syscall
+ static __always_inline void do_vgettimeofday(struct timeval * tv)
+ {
+ cycle_t now, base, mask, cycle_delta;
+- unsigned long seq, mult, shift, nsec_delta;
++ unsigned seq;
++ unsigned long mult, shift, nsec;
+ cycle_t (*vread)(void);
+ do {
+ seq = read_seqbegin(&__vsyscall_gtod_data.lock);
+@@ -121,21 +146,20 @@ static __always_inline void do_vgettimeo
+ mult = __vsyscall_gtod_data.clock.mult;
+ shift = __vsyscall_gtod_data.clock.shift;
+
+- *tv = __vsyscall_gtod_data.wall_time_tv;
+-
++ tv->tv_sec = __vsyscall_gtod_data.wall_time_sec;
++ nsec = __vsyscall_gtod_data.wall_time_nsec;
+ } while (read_seqretry(&__vsyscall_gtod_data.lock, seq));
+
+ /* calculate interval: */
+ cycle_delta = (now - base) & mask;
+ /* convert to nsecs: */
+- nsec_delta = (cycle_delta * mult) >> shift;
++ nsec += (cycle_delta * mult) >> shift;
+
+- /* convert to usecs and add to timespec: */
+- tv->tv_usec += nsec_delta / NSEC_PER_USEC;
+- while (tv->tv_usec > USEC_PER_SEC) {
++ while (nsec >= NSEC_PER_SEC) {
+ tv->tv_sec += 1;
+- tv->tv_usec -= USEC_PER_SEC;
++ nsec -= NSEC_PER_SEC;
+ }
++ tv->tv_usec = nsec / NSEC_PER_USEC;
+ }
+
+ int __vsyscall(0) vgettimeofday(struct timeval * tv, struct timezone * tz)
+@@ -151,11 +175,16 @@ int __vsyscall(0) vgettimeofday(struct t
+ * unlikely */
+ time_t __vsyscall(1) vtime(time_t *t)
+ {
++ struct timeval tv;
++ time_t result;
+ if (unlikely(!__vsyscall_gtod_data.sysctl_enabled))
+ return time_syscall(t);
+- else if (t)
+- *t = __vsyscall_gtod_data.wall_time_tv.tv_sec;
+- return __vsyscall_gtod_data.wall_time_tv.tv_sec;
++
++ vgettimeofday(&tv, 0);
++ result = tv.tv_sec;
++ if (t)
++ *t = result;
++ return result;
+ }
+
+ /* Fast way to get current CPU and node.
+@@ -224,10 +253,10 @@ static int vsyscall_sysctl_change(ctl_ta
+ return ret;
+ /* gcc has some trouble with __va(__pa()), so just do it this
+ way. */
+- map1 = ioremap(__pa_symbol(&vsysc1), 2);
++ map1 = ioremap(__pa_vsymbol(&vsysc1), 2);
+ if (!map1)
+ return -ENOMEM;
+- map2 = ioremap(__pa_symbol(&vsysc2), 2);
++ map2 = ioremap(__pa_vsymbol(&vsysc2), 2);
+ if (!map2) {
+ ret = -ENOMEM;
+ goto out;
+@@ -301,7 +330,7 @@ static int __cpuinit
+ cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg)
+ {
+ long cpu = (long)arg;
+- if (action == CPU_ONLINE)
++ if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN)
+ smp_call_function_single(cpu, cpu_vsyscall_init, NULL, 0, 1);
+ return NOTIFY_DONE;
+ }
+Index: 10.3-2007-11-26/arch/x86_64/mm/fault-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/mm/fault-xen.c 2007-10-22 13:58:46.000000000 +0200
++++ 10.3-2007-11-26/arch/x86_64/mm/fault-xen.c 2007-10-22 13:58:57.000000000 +0200
+@@ -15,22 +15,22 @@
+ #include <linux/mman.h>
+ #include <linux/mm.h>
+ #include <linux/smp.h>
+-#include <linux/smp_lock.h>
+ #include <linux/interrupt.h>
+ #include <linux/init.h>
+ #include <linux/tty.h>
+ #include <linux/vt_kern.h> /* For unblank_screen() */
+ #include <linux/compiler.h>
++#include <linux/vmalloc.h>
+ #include <linux/module.h>
+ #include <linux/kprobes.h>
+ #include <linux/uaccess.h>
++#include <linux/kdebug.h>
+
+ #include <asm/system.h>
+ #include <asm/pgalloc.h>
+ #include <asm/smp.h>
+ #include <asm/tlbflush.h>
+ #include <asm/proto.h>
+-#include <asm/kdebug.h>
+ #include <asm-generic/sections.h>
+
+ /* Page fault error code bits */
+@@ -537,6 +537,12 @@ bad_area:
+ bad_area_nosemaphore:
+ /* User mode accesses just cause a SIGSEGV */
+ if (error_code & PF_USER) {
++
++ /*
++ * It's possible to have interrupts off here.
++ */
++ local_irq_enable();
++
+ if (is_prefetch(regs, address, error_code))
+ return;
+
+@@ -646,7 +652,7 @@ do_sigbus:
+ }
+
+ DEFINE_SPINLOCK(pgd_lock);
+-struct page *pgd_list;
++LIST_HEAD(pgd_list);
+
+ void vmalloc_sync_all(void)
+ {
+@@ -666,8 +672,7 @@ void vmalloc_sync_all(void)
+ if (pgd_none(*pgd_ref))
+ continue;
+ spin_lock(&pgd_lock);
+- for (page = pgd_list; page;
+- page = (struct page *)page->index) {
++ list_for_each_entry(page, &pgd_list, lru) {
+ pgd_t *pgd;
+ pgd = (pgd_t *)page_address(page) + pgd_index(address);
+ if (pgd_none(*pgd))
+Index: 10.3-2007-11-26/arch/x86_64/mm/init-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/mm/init-xen.c 2007-10-22 13:58:46.000000000 +0200
++++ 10.3-2007-11-26/arch/x86_64/mm/init-xen.c 2007-10-22 13:58:57.000000000 +0200
+@@ -25,10 +25,12 @@
+ #include <linux/bootmem.h>
+ #include <linux/proc_fs.h>
+ #include <linux/pci.h>
++#include <linux/pfn.h>
+ #include <linux/poison.h>
+ #include <linux/dma-mapping.h>
+ #include <linux/module.h>
+ #include <linux/memory_hotplug.h>
++#include <linux/nmi.h>
+
+ #include <asm/processor.h>
+ #include <asm/system.h>
+@@ -51,7 +53,7 @@
+ #define Dprintk(x...)
+ #endif
+
+-struct dma_mapping_ops* dma_ops;
++const struct dma_mapping_ops* dma_ops;
+ EXPORT_SYMBOL(dma_ops);
+
+ #if CONFIG_XEN_COMPAT <= 0x030002
+@@ -189,6 +191,13 @@ void show_mem(void)
+
+ for_each_online_pgdat(pgdat) {
+ for (i = 0; i < pgdat->node_spanned_pages; ++i) {
++ /* this loop can take a while with 256 GB and 4k pages
++ so update the NMI watchdog */
++ if (unlikely(i % MAX_ORDER_NR_PAGES == 0)) {
++ touch_nmi_watchdog();
++ }
++ if (!pfn_valid(pgdat->node_start_pfn + i))
++ continue;
+ page = pfn_to_page(pgdat->node_start_pfn + i);
+ total++;
+ if (PageReserved(page))
+@@ -374,7 +383,7 @@ __set_fixmap_user (enum fixed_addresses
+ set_pte_phys(address, phys, prot, SET_FIXMAP_USER);
+ }
+
+-unsigned long __initdata table_start, table_end;
++unsigned long __meminitdata table_start, table_end;
+
+ static __meminit void *alloc_static_page(unsigned long *phys)
+ {
+@@ -391,7 +400,7 @@ static __meminit void *alloc_static_page
+ start_pfn++;
+ memset((void *)va, 0, PAGE_SIZE);
+ return (void *)va;
+-}
++}
+
+ #define PTE_SIZE PAGE_SIZE
+
+@@ -432,28 +441,46 @@ static inline int make_readonly(unsigned
+
+ #ifndef CONFIG_XEN
+ /* Must run before zap_low_mappings */
+-__init void *early_ioremap(unsigned long addr, unsigned long size)
++__meminit void *early_ioremap(unsigned long addr, unsigned long size)
+ {
+- unsigned long map = round_down(addr, LARGE_PAGE_SIZE);
+-
+- /* actually usually some more */
+- if (size >= LARGE_PAGE_SIZE) {
+- return NULL;
++ unsigned long vaddr;
++ pmd_t *pmd, *last_pmd;
++ int i, pmds;
++
++ pmds = ((addr & ~PMD_MASK) + size + ~PMD_MASK) / PMD_SIZE;
++ vaddr = __START_KERNEL_map;
++ pmd = level2_kernel_pgt;
++ last_pmd = level2_kernel_pgt + PTRS_PER_PMD - 1;
++ for (; pmd <= last_pmd; pmd++, vaddr += PMD_SIZE) {
++ for (i = 0; i < pmds; i++) {
++ if (pmd_present(pmd[i]))
++ goto next;
++ }
++ vaddr += addr & ~PMD_MASK;
++ addr &= PMD_MASK;
++ for (i = 0; i < pmds; i++, addr += PMD_SIZE)
++ set_pmd(pmd + i,__pmd(addr | _KERNPG_TABLE | _PAGE_PSE));
++ __flush_tlb();
++ return (void *)vaddr;
++ next:
++ ;
+ }
+- set_pmd(temp_mappings[0].pmd, __pmd(map | _KERNPG_TABLE | _PAGE_PSE));
+- map += LARGE_PAGE_SIZE;
+- set_pmd(temp_mappings[1].pmd, __pmd(map | _KERNPG_TABLE | _PAGE_PSE));
+- __flush_tlb();
+- return temp_mappings[0].address + (addr & (LARGE_PAGE_SIZE-1));
++ printk("early_ioremap(0x%lx, %lu) failed\n", addr, size);
++ return NULL;
+ }
+
+ /* To avoid virtual aliases later */
+-__init void early_iounmap(void *addr, unsigned long size)
++__meminit void early_iounmap(void *addr, unsigned long size)
+ {
+- if ((void *)round_down((unsigned long)addr, LARGE_PAGE_SIZE) != temp_mappings[0].address)
+- printk("early_iounmap: bad address %p\n", addr);
+- set_pmd(temp_mappings[0].pmd, __pmd(0));
+- set_pmd(temp_mappings[1].pmd, __pmd(0));
++ unsigned long vaddr;
++ pmd_t *pmd;
++ int i, pmds;
++
++ vaddr = (unsigned long)addr;
++ pmds = ((vaddr & ~PMD_MASK) + size + ~PMD_MASK) / PMD_SIZE;
++ pmd = level2_kernel_pgt + pmd_index(vaddr);
++ for (i = 0; i < pmds; i++)
++ pmd_clear(pmd + i);
+ __flush_tlb();
+ }
+ #endif
+@@ -787,14 +814,6 @@ void __meminit init_memory_mapping(unsig
+ __flush_tlb_all();
+ }
+
+-void __cpuinit zap_low_mappings(int cpu)
+-{
+- /* this is not required for Xen */
+-#if 0
+- swap_low_mappings();
+-#endif
+-}
+-
+ #ifndef CONFIG_NUMA
+ void __init paging_init(void)
+ {
+@@ -986,17 +1005,6 @@ void __init mem_init(void)
+ reservedpages << (PAGE_SHIFT-10),
+ datasize >> 10,
+ initsize >> 10);
+-
+-#ifndef CONFIG_XEN
+-#ifdef CONFIG_SMP
+- /*
+- * Sync boot_level4_pgt mappings with the init_level4_pgt
+- * except for the low identity mappings which are already zapped
+- * in init_level4_pgt. This sync-up is essential for AP's bringup
+- */
+- memcpy(boot_level4_pgt+1, init_level4_pgt+1, (PTRS_PER_PGD-1)*sizeof(pgd_t));
+-#endif
+-#endif
+ }
+
+ void free_init_pages(char *what, unsigned long begin, unsigned long end)
+@@ -1006,7 +1014,7 @@ void free_init_pages(char *what, unsigne
+ if (begin >= end)
+ return;
+
+- printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
++ printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
+ for (addr = begin; addr < end; addr += PAGE_SIZE) {
+ ClearPageReserved(virt_to_page(addr));
+ init_page_count(virt_to_page(addr));
+@@ -1015,24 +1023,17 @@ void free_init_pages(char *what, unsigne
+ if (addr >= __START_KERNEL_map) {
+ /* make_readonly() reports all kernel addresses. */
+ __make_page_writable(__va(__pa(addr)));
+- if (HYPERVISOR_update_va_mapping(addr, __pte(0), 0)) {
+- pgd_t *pgd = pgd_offset_k(addr);
+- pud_t *pud = pud_offset(pgd, addr);
+- pmd_t *pmd = pmd_offset(pud, addr);
+- pte_t *pte = pte_offset_kernel(pmd, addr);
+-
+- xen_l1_entry_update(pte, __pte(0)); /* fallback */
+- }
++ change_page_attr_addr(addr, 1, __pgprot(0));
+ }
+ free_page(addr);
+ totalram_pages++;
+ }
++ if (addr > __START_KERNEL_map)
++ global_flush_tlb();
+ }
+
+ void free_initmem(void)
+ {
+- memset(__initdata_begin, POISON_FREE_INITDATA,
+- __initdata_end - __initdata_begin);
+ free_init_pages("unused kernel memory",
+ (unsigned long)(&__init_begin),
+ (unsigned long)(&__init_end));
+@@ -1042,13 +1043,28 @@ void free_initmem(void)
+
+ void mark_rodata_ro(void)
+ {
+- unsigned long addr = (unsigned long)__start_rodata;
++ unsigned long start = (unsigned long)_stext, end;
++
++#ifdef CONFIG_HOTPLUG_CPU
++ /* It must still be possible to apply SMP alternatives. */
++ if (num_possible_cpus() > 1)
++ start = (unsigned long)_etext;
++#endif
++
++#ifdef CONFIG_KPROBES
++ start = (unsigned long)__start_rodata;
++#endif
++
++ end = (unsigned long)__end_rodata;
++ start = (start + PAGE_SIZE - 1) & PAGE_MASK;
++ end &= PAGE_MASK;
++ if (end <= start)
++ return;
+
+- for (; addr < (unsigned long)__end_rodata; addr += PAGE_SIZE)
+- change_page_attr_addr(addr, 1, PAGE_KERNEL_RO);
++ change_page_attr_addr(start, (end - start) >> PAGE_SHIFT, PAGE_KERNEL_RO);
+
+- printk ("Write protecting the kernel read-only data: %luk\n",
+- (__end_rodata - __start_rodata) >> 10);
++ printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
++ (end - start) >> 10);
+
+ /*
+ * change_page_attr_addr() requires a global_flush_tlb() call after it.
+@@ -1201,3 +1217,11 @@ int in_gate_area_no_task(unsigned long a
+ {
+ return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END);
+ }
++
++#ifndef CONFIG_XEN
++void *alloc_bootmem_high_node(pg_data_t *pgdat, unsigned long size)
++{
++ return __alloc_bootmem_core(pgdat->bdata, size,
++ SMP_CACHE_BYTES, (4UL*1024*1024*1024), 0);
++}
++#endif
+Index: 10.3-2007-11-26/arch/x86_64/mm/pageattr-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/mm/pageattr-xen.c 2007-10-22 13:58:46.000000000 +0200
++++ 10.3-2007-11-26/arch/x86_64/mm/pageattr-xen.c 2007-10-22 13:58:57.000000000 +0200
+@@ -146,13 +146,13 @@ void mm_pin_all(void)
+ preempt_enable();
+ }
+
+-void _arch_dup_mmap(struct mm_struct *mm)
++void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
+ {
+ if (!mm->context.pinned)
+ mm_pin(mm);
+ }
+
+-void _arch_exit_mmap(struct mm_struct *mm)
++void arch_exit_mmap(struct mm_struct *mm)
+ {
+ struct task_struct *tsk = current;
+
+@@ -268,10 +268,11 @@ static void flush_kernel_map(void *arg)
+ struct page *pg;
+
+ /* When clflush is available always use it because it is
+- much cheaper than WBINVD */
+- if (!cpu_has_clflush)
++ much cheaper than WBINVD. Disable clflush for now because
++ the high level code is not ready yet */
++ if (1 || !cpu_has_clflush)
+ asm volatile("wbinvd" ::: "memory");
+- list_for_each_entry(pg, l, lru) {
++ else list_for_each_entry(pg, l, lru) {
+ void *adr = page_address(pg);
+ if (cpu_has_clflush)
+ cache_flush_page(adr);
+@@ -385,16 +386,24 @@ __change_page_attr(unsigned long address
+ */
+ int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot)
+ {
+- int err = 0;
++ int err = 0, kernel_map = 0;
+ int i;
+
++ if (address >= __START_KERNEL_map
++ && address < __START_KERNEL_map + KERNEL_TEXT_SIZE) {
++ address = (unsigned long)__va(__pa(address));
++ kernel_map = 1;
++ }
++
+ down_write(&init_mm.mmap_sem);
+ for (i = 0; i < numpages; i++, address += PAGE_SIZE) {
+ unsigned long pfn = __pa(address) >> PAGE_SHIFT;
+
+- err = __change_page_attr(address, pfn, prot, PAGE_KERNEL);
+- if (err)
+- break;
++ if (!kernel_map || pte_present(pfn_pte(0, prot))) {
++ err = __change_page_attr(address, pfn, prot, PAGE_KERNEL);
++ if (err)
++ break;
++ }
+ /* Handle kernel mapping too which aliases part of the
+ * lowmem */
+ if (__pa(address) < KERNEL_TEXT_SIZE) {
+Index: 10.3-2007-11-26/drivers/char/tpm/tpm_xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/drivers/char/tpm/tpm_xen.c 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/drivers/char/tpm/tpm_xen.c 2007-10-22 13:58:57.000000000 +0200
+@@ -462,7 +462,7 @@ static int tpmif_connect(struct xenbus_d
+ tp->backend_id = domid;
+
+ err = bind_listening_port_to_irqhandler(
+- domid, tpmif_int, SA_SAMPLE_RANDOM, "tpmif", tp);
++ domid, tpmif_int, IRQF_SAMPLE_RANDOM, "tpmif", tp);
+ if (err <= 0) {
+ WPRINTK("bind_listening_port_to_irqhandler failed "
+ "(err=%d)\n", err);
+Index: 10.3-2007-11-26/drivers/xen/blkfront/blkfront.c
+===================================================================
+--- 10.3-2007-11-26.orig/drivers/xen/blkfront/blkfront.c 2007-10-22 13:53:25.000000000 +0200
++++ 10.3-2007-11-26/drivers/xen/blkfront/blkfront.c 2007-10-22 13:58:57.000000000 +0200
+@@ -236,7 +236,7 @@ static int setup_blkring(struct xenbus_d
+ info->ring_ref = err;
+
+ err = bind_listening_port_to_irqhandler(
+- dev->otherend_id, blkif_int, SA_SAMPLE_RANDOM, "blkif", info);
++ dev->otherend_id, blkif_int, IRQF_SAMPLE_RANDOM, "blkif", info);
+ if (err <= 0) {
+ xenbus_dev_fatal(dev, err,
+ "bind_listening_port_to_irqhandler");
+Index: 10.3-2007-11-26/drivers/xen/char/mem.c
+===================================================================
+--- 10.3-2007-11-26.orig/drivers/xen/char/mem.c 2007-10-22 13:53:25.000000000 +0200
++++ 10.3-2007-11-26/drivers/xen/char/mem.c 2007-10-22 13:58:57.000000000 +0200
+@@ -18,7 +18,6 @@
+ #include <linux/raw.h>
+ #include <linux/tty.h>
+ #include <linux/capability.h>
+-#include <linux/smp_lock.h>
+ #include <linux/ptrace.h>
+ #include <linux/device.h>
+ #include <asm/pgalloc.h>
+Index: 10.3-2007-11-26/drivers/xen/core/hypervisor_sysfs.c
+===================================================================
+--- 10.3-2007-11-26.orig/drivers/xen/core/hypervisor_sysfs.c 2007-09-03 09:52:56.000000000 +0200
++++ 10.3-2007-11-26/drivers/xen/core/hypervisor_sysfs.c 2007-10-22 13:58:57.000000000 +0200
+@@ -49,7 +49,7 @@ static int __init hypervisor_subsys_init
+ if (!is_running_on_xen())
+ return -ENODEV;
+
+- hypervisor_subsys.kset.kobj.ktype = &hyp_sysfs_kobj_type;
++ hypervisor_subsys.kobj.ktype = &hyp_sysfs_kobj_type;
+ return 0;
+ }
+
+Index: 10.3-2007-11-26/drivers/xen/core/smpboot.c
+===================================================================
+--- 10.3-2007-11-26.orig/drivers/xen/core/smpboot.c 2007-10-22 13:53:25.000000000 +0200
++++ 10.3-2007-11-26/drivers/xen/core/smpboot.c 2007-10-22 13:58:57.000000000 +0200
+@@ -127,7 +127,7 @@ static int xen_smp_intr_init(unsigned in
+ rc = bind_ipi_to_irqhandler(RESCHEDULE_VECTOR,
+ cpu,
+ smp_reschedule_interrupt,
+- SA_INTERRUPT,
++ IRQF_DISABLED,
+ resched_name[cpu],
+ NULL);
+ if (rc < 0)
+@@ -138,7 +138,7 @@ static int xen_smp_intr_init(unsigned in
+ rc = bind_ipi_to_irqhandler(CALL_FUNCTION_VECTOR,
+ cpu,
+ smp_call_function_interrupt,
+- SA_INTERRUPT,
++ IRQF_DISABLED,
+ callfunc_name[cpu],
+ NULL);
+ if (rc < 0)
+@@ -171,12 +171,7 @@ static void xen_smp_intr_exit(unsigned i
+
+ void cpu_bringup(void)
+ {
+-#ifdef __i386__
+- cpu_set_gdt(current_thread_info()->cpu);
+- secondary_cpu_init();
+-#else
+ cpu_init();
+-#endif
+ touch_softlockup_watchdog();
+ preempt_disable();
+ local_irq_enable();
+@@ -194,8 +189,6 @@ static void cpu_initialize_context(unsig
+ struct task_struct *idle = idle_task(cpu);
+ #ifdef __x86_64__
+ struct desc_ptr *gdt_descr = &cpu_gdt_descr[cpu];
+-#else
+- struct Xgt_desc_struct *gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
+ #endif
+
+ if (cpu_test_and_set(cpu, cpu_initialized_map))
+@@ -218,16 +211,18 @@ static void cpu_initialize_context(unsig
+
+ ctxt.ldt_ents = 0;
+
+- ctxt.gdt_frames[0] = virt_to_mfn(gdt_descr->address);
+- ctxt.gdt_ents = gdt_descr->size / 8;
+-
+ #ifdef __i386__
++ ctxt.gdt_frames[0] = virt_to_mfn(get_cpu_gdt_table(cpu));
++ ctxt.gdt_ents = GDT_SIZE / 8;
++
+ ctxt.user_regs.cs = __KERNEL_CS;
+ ctxt.user_regs.esp = idle->thread.esp0 - sizeof(struct pt_regs);
+
+ ctxt.kernel_ss = __KERNEL_DS;
+ ctxt.kernel_sp = idle->thread.esp0;
+
++ ctxt.user_regs.fs = __KERNEL_PERCPU;
++
+ ctxt.event_callback_cs = __KERNEL_CS;
+ ctxt.event_callback_eip = (unsigned long)hypervisor_callback;
+ ctxt.failsafe_callback_cs = __KERNEL_CS;
+@@ -235,6 +230,9 @@ static void cpu_initialize_context(unsig
+
+ ctxt.ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir));
+ #else /* __x86_64__ */
++ ctxt.gdt_frames[0] = virt_to_mfn(gdt_descr->address);
++ ctxt.gdt_ents = gdt_descr->size / 8;
++
+ ctxt.user_regs.cs = __KERNEL_CS;
+ ctxt.user_regs.esp = idle->thread.rsp0 - sizeof(struct pt_regs);
+
+@@ -259,9 +257,8 @@ void __init smp_prepare_cpus(unsigned in
+ struct task_struct *idle;
+ #ifdef __x86_64__
+ struct desc_ptr *gdt_descr;
+-#else
+- struct Xgt_desc_struct *gdt_descr;
+ #endif
++ void *gdt_addr;
+
+ boot_cpu_data.apicid = 0;
+ cpu_data[0] = boot_cpu_data;
+@@ -308,14 +305,13 @@ void __init smp_prepare_cpus(unsigned in
+ }
+ gdt_descr->size = GDT_SIZE;
+ memcpy((void *)gdt_descr->address, cpu_gdt_table, GDT_SIZE);
++ gdt_addr = (void *)gdt_descr->address;
+ #else
+- if (unlikely(!init_gdt(cpu, idle)))
+- continue;
+- gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
++ init_gdt(cpu);
++ gdt_addr = get_cpu_gdt_table(cpu);
+ #endif
+- make_page_readonly(
+- (void *)gdt_descr->address,
+- XENFEAT_writable_descriptor_tables);
++ make_page_readonly(gdt_addr,
++ XENFEAT_writable_descriptor_tables);
+
+ cpu_data[cpu] = boot_cpu_data;
+ cpu_data[cpu].apicid = cpu;
+@@ -326,7 +322,9 @@ void __init smp_prepare_cpus(unsigned in
+ #ifdef __x86_64__
+ cpu_pda(cpu)->pcurrent = idle;
+ cpu_pda(cpu)->cpunumber = cpu;
+- clear_ti_thread_flag(idle->thread_info, TIF_FORK);
++ clear_ti_thread_flag(task_thread_info(idle), TIF_FORK);
++#else
++ per_cpu(current_task, cpu) = idle;
+ #endif
+
+ irq_ctx_init(cpu);
+@@ -351,8 +349,12 @@ void __init smp_prepare_cpus(unsigned in
+ #endif
+ }
+
+-void __devinit smp_prepare_boot_cpu(void)
++void __init smp_prepare_boot_cpu(void)
+ {
++#ifdef __i386__
++ init_gdt(smp_processor_id());
++ switch_to_new_gdt();
++#endif
+ prefill_possible_map();
+ }
+
+Index: 10.3-2007-11-26/drivers/xen/core/xen_sysfs.c
+===================================================================
+--- 10.3-2007-11-26.orig/drivers/xen/core/xen_sysfs.c 2007-12-06 17:27:30.000000000 +0100
++++ 10.3-2007-11-26/drivers/xen/core/xen_sysfs.c 2007-10-22 13:58:57.000000000 +0200
+@@ -28,12 +28,12 @@ HYPERVISOR_ATTR_RO(type);
+
+ static int __init xen_sysfs_type_init(void)
+ {
+- return sysfs_create_file(&hypervisor_subsys.kset.kobj, &type_attr.attr);
++ return sysfs_create_file(&hypervisor_subsys.kobj, &type_attr.attr);
+ }
+
+ static void xen_sysfs_type_destroy(void)
+ {
+- sysfs_remove_file(&hypervisor_subsys.kset.kobj, &type_attr.attr);
++ sysfs_remove_file(&hypervisor_subsys.kobj, &type_attr.attr);
+ }
+
+ /* xen version attributes */
+@@ -89,13 +89,13 @@ static struct attribute_group version_gr
+
+ static int __init xen_sysfs_version_init(void)
+ {
+- return sysfs_create_group(&hypervisor_subsys.kset.kobj,
++ return sysfs_create_group(&hypervisor_subsys.kobj,
+ &version_group);
+ }
+
+ static void xen_sysfs_version_destroy(void)
+ {
+- sysfs_remove_group(&hypervisor_subsys.kset.kobj, &version_group);
++ sysfs_remove_group(&hypervisor_subsys.kobj, &version_group);
+ }
+
+ /* UUID */
+@@ -121,12 +121,12 @@ HYPERVISOR_ATTR_RO(uuid);
+
+ static int __init xen_sysfs_uuid_init(void)
+ {
+- return sysfs_create_file(&hypervisor_subsys.kset.kobj, &uuid_attr.attr);
++ return sysfs_create_file(&hypervisor_subsys.kobj, &uuid_attr.attr);
+ }
+
+ static void xen_sysfs_uuid_destroy(void)
+ {
+- sysfs_remove_file(&hypervisor_subsys.kset.kobj, &uuid_attr.attr);
++ sysfs_remove_file(&hypervisor_subsys.kobj, &uuid_attr.attr);
+ }
+
+ /* xen compilation attributes */
+@@ -199,13 +199,13 @@ static struct attribute_group xen_compil
+
+ int __init static xen_compilation_init(void)
+ {
+- return sysfs_create_group(&hypervisor_subsys.kset.kobj,
++ return sysfs_create_group(&hypervisor_subsys.kobj,
+ &xen_compilation_group);
+ }
+
+ static void xen_compilation_destroy(void)
+ {
+- sysfs_remove_group(&hypervisor_subsys.kset.kobj,
++ sysfs_remove_group(&hypervisor_subsys.kobj,
+ &xen_compilation_group);
+ }
+
+@@ -320,13 +320,13 @@ static struct attribute_group xen_proper
+
+ static int __init xen_properties_init(void)
+ {
+- return sysfs_create_group(&hypervisor_subsys.kset.kobj,
++ return sysfs_create_group(&hypervisor_subsys.kobj,
+ &xen_properties_group);
+ }
+
+ static void xen_properties_destroy(void)
+ {
+- sysfs_remove_group(&hypervisor_subsys.kset.kobj,
++ sysfs_remove_group(&hypervisor_subsys.kobj,
+ &xen_properties_group);
+ }
+
+Index: 10.3-2007-11-26/drivers/xen/netback/netback.c
+===================================================================
+--- 10.3-2007-11-26.orig/drivers/xen/netback/netback.c 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/drivers/xen/netback/netback.c 2007-10-22 13:58:57.000000000 +0200
+@@ -156,7 +156,7 @@ static struct sk_buff *netbk_copy_skb(st
+ goto err;
+
+ skb_reserve(nskb, 16 + NET_IP_ALIGN);
+- headlen = nskb->end - nskb->data;
++ headlen = skb_end_pointer(nskb) - nskb->data;
+ if (headlen > skb_headlen(skb))
+ headlen = skb_headlen(skb);
+ ret = skb_copy_bits(skb, 0, __skb_put(nskb, headlen), headlen);
+@@ -202,11 +202,15 @@ static struct sk_buff *netbk_copy_skb(st
+ len -= copy;
+ }
+
++#ifdef NET_SKBUFF_DATA_USES_OFFSET
++ offset = 0;
++#else
+ offset = nskb->data - skb->data;
++#endif
+
+- nskb->h.raw = skb->h.raw + offset;
+- nskb->nh.raw = skb->nh.raw + offset;
+- nskb->mac.raw = skb->mac.raw + offset;
++ nskb->transport_header = skb->transport_header + offset;
++ nskb->network_header = skb->network_header + offset;
++ nskb->mac_header = skb->mac_header + offset;
+
+ return nskb;
+
+@@ -1483,7 +1487,7 @@ static int __init netback_init(void)
+ (void)bind_virq_to_irqhandler(VIRQ_DEBUG,
+ 0,
+ netif_be_dbg,
+- SA_SHIRQ,
++ IRQF_SHARED,
+ "net-be-dbg",
+ &netif_be_dbg);
+ #endif
+Index: 10.3-2007-11-26/drivers/xen/netfront/netfront.c
+===================================================================
+--- 10.3-2007-11-26.orig/drivers/xen/netfront/netfront.c 2007-10-22 13:58:46.000000000 +0200
++++ 10.3-2007-11-26/drivers/xen/netfront/netfront.c 2007-10-22 13:58:57.000000000 +0200
+@@ -533,7 +533,7 @@ static int setup_device(struct xenbus_de
+ memcpy(netdev->dev_addr, info->mac, ETH_ALEN);
+
+ err = bind_listening_port_to_irqhandler(
+- dev->otherend_id, netif_int, SA_SAMPLE_RANDOM, netdev->name,
++ dev->otherend_id, netif_int, IRQF_SAMPLE_RANDOM, netdev->name,
+ netdev);
+ if (err < 0)
+ goto fail;
+Index: 10.3-2007-11-26/drivers/xen/pciback/xenbus.c
+===================================================================
+--- 10.3-2007-11-26.orig/drivers/xen/pciback/xenbus.c 2007-10-22 13:53:25.000000000 +0200
++++ 10.3-2007-11-26/drivers/xen/pciback/xenbus.c 2007-10-22 13:58:57.000000000 +0200
+@@ -86,7 +86,7 @@ static int pciback_do_attach(struct pcib
+
+ err = bind_interdomain_evtchn_to_irqhandler(
+ pdev->xdev->otherend_id, remote_evtchn, pciback_handle_event,
+- SA_SAMPLE_RANDOM, "pciback", pdev);
++ IRQF_SAMPLE_RANDOM, "pciback", pdev);
+ if (err < 0) {
+ xenbus_dev_fatal(pdev->xdev, err,
+ "Error binding event channel to IRQ");
+Index: 10.3-2007-11-26/drivers/xen/xenoprof/xenoprofile.c
+===================================================================
+--- 10.3-2007-11-26.orig/drivers/xen/xenoprof/xenoprofile.c 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/drivers/xen/xenoprof/xenoprofile.c 2007-10-22 13:58:57.000000000 +0200
+@@ -219,7 +219,7 @@ static int bind_virq(void)
+ result = bind_virq_to_irqhandler(VIRQ_XENOPROF,
+ i,
+ xenoprof_ovf_interrupt,
+- SA_INTERRUPT,
++ IRQF_DISABLED,
+ "xenoprof",
+ NULL);
+
+Index: 10.3-2007-11-26/include/asm-i386/mach-xen/asm/agp.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-i386/mach-xen/asm/agp.h 2007-12-06 17:27:30.000000000 +0100
++++ 10.3-2007-11-26/include/asm-i386/mach-xen/asm/agp.h 2007-10-22 13:58:57.000000000 +0200
+@@ -13,8 +13,15 @@
+ * data corruption on some CPUs.
+ */
+
+-int map_page_into_agp(struct page *page);
+-int unmap_page_from_agp(struct page *page);
++/* Caller's responsibility to call global_flush_tlb() for
++ * performance reasons */
++#define map_page_into_agp(page) ( \
++ xen_create_contiguous_region((unsigned long)page_address(page), 0, 32) \
++ ?: change_page_attr(page, 1, PAGE_KERNEL_NOCACHE))
++#define unmap_page_from_agp(page) ( \
++ xen_destroy_contiguous_region((unsigned long)page_address(page), 0), \
++ /* only a fallback: xen_destroy_contiguous_region uses PAGE_KERNEL */ \
++ change_page_attr(page, 1, PAGE_KERNEL))
+ #define flush_agp_mappings() global_flush_tlb()
+
+ /* Could use CLFLUSH here if the cpu supports it. But then it would
+Index: 10.3-2007-11-26/include/asm-i386/mach-xen/asm/desc.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-i386/mach-xen/asm/desc.h 2007-10-22 13:58:46.000000000 +0200
++++ 10.3-2007-11-26/include/asm-i386/mach-xen/asm/desc.h 2007-10-22 13:58:57.000000000 +0200
+@@ -11,23 +11,24 @@
+
+ #include <asm/mmu.h>
+
+-extern struct desc_struct cpu_gdt_table[GDT_ENTRIES];
+-
+ struct Xgt_desc_struct {
+ unsigned short size;
+ unsigned long address __attribute__((packed));
+ unsigned short pad;
+ } __attribute__ ((packed));
+
+-extern struct Xgt_desc_struct idt_descr;
+-DECLARE_PER_CPU(struct Xgt_desc_struct, cpu_gdt_descr);
+-extern struct Xgt_desc_struct early_gdt_descr;
++struct gdt_page
++{
++ struct desc_struct gdt[GDT_ENTRIES];
++} __attribute__((aligned(PAGE_SIZE)));
++DECLARE_PER_CPU(struct gdt_page, gdt_page);
+
+ static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
+ {
+- return (struct desc_struct *)per_cpu(cpu_gdt_descr, cpu).address;
++ return per_cpu(gdt_page, cpu).gdt;
+ }
+
++extern struct Xgt_desc_struct idt_descr;
+ extern struct desc_struct idt_table[];
+ extern void set_intr_gate(unsigned int irq, void * addr);
+
+@@ -55,51 +56,32 @@ static inline void pack_gate(__u32 *a, _
+ #define DESCTYPE_S 0x10 /* !system */
+
+ #ifndef CONFIG_XEN
+-#define load_TR_desc() __asm__ __volatile__("ltr %w0"::"q" (GDT_ENTRY_TSS*8))
+-
+-#define load_gdt(dtr) __asm__ __volatile("lgdt %0"::"m" (*dtr))
+-#define load_idt(dtr) __asm__ __volatile("lidt %0"::"m" (*dtr))
++#define load_TR_desc() native_load_tr_desc()
++#define load_gdt(dtr) native_load_gdt(dtr)
++#define load_idt(dtr) native_load_idt(dtr)
+ #define load_tr(tr) __asm__ __volatile("ltr %0"::"m" (tr))
+ #define load_ldt(ldt) __asm__ __volatile("lldt %0"::"m" (ldt))
+
+-#define store_gdt(dtr) __asm__ ("sgdt %0":"=m" (*dtr))
+-#define store_idt(dtr) __asm__ ("sidt %0":"=m" (*dtr))
+-#define store_tr(tr) __asm__ ("str %0":"=m" (tr))
++#define store_gdt(dtr) native_store_gdt(dtr)
++#define store_idt(dtr) native_store_idt(dtr)
++#define store_tr(tr) (tr = native_store_tr())
+ #define store_ldt(ldt) __asm__ ("sldt %0":"=m" (ldt))
+-#endif
+
+-#if TLS_SIZE != 24
+-# error update this code.
+-#endif
+-
+-static inline void load_TLS(struct thread_struct *t, unsigned int cpu)
+-{
+-#define C(i) HYPERVISOR_update_descriptor(virt_to_machine(&get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i]), *(u64 *)&t->tls_array[i])
+- C(0); C(1); C(2);
+-#undef C
+-}
++#define load_TLS(t, cpu) native_load_tls(t, cpu)
++#define set_ldt native_set_ldt
+
+-#ifndef CONFIG_XEN
+ #define write_ldt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b)
+ #define write_gdt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b)
+ #define write_idt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b)
+
+-static inline void write_dt_entry(void *dt, int entry, __u32 entry_a, __u32 entry_b)
++static inline void write_dt_entry(struct desc_struct *dt,
++ int entry, u32 entry_low, u32 entry_high)
+ {
+- __u32 *lp = (__u32 *)((char *)dt + entry*8);
+- *lp = entry_a;
+- *(lp+1) = entry_b;
++ dt[entry].a = entry_low;
++ dt[entry].b = entry_high;
+ }
+-#define set_ldt native_set_ldt
+-#else
+-extern int write_ldt_entry(void *ldt, int entry, __u32 entry_a, __u32 entry_b);
+-extern int write_gdt_entry(void *gdt, int entry, __u32 entry_a, __u32 entry_b);
+-#define set_ldt(addr, entries) xen_set_ldt((unsigned long)(addr), entries)
+-#endif
+
+-#ifndef CONFIG_XEN
+-static inline fastcall void native_set_ldt(const void *addr,
+- unsigned int entries)
++static inline void native_set_ldt(const void *addr, unsigned int entries)
+ {
+ if (likely(entries == 0))
+ __asm__ __volatile__("lldt %w0"::"q" (0));
+@@ -114,6 +96,64 @@ static inline fastcall void native_set_l
+ __asm__ __volatile__("lldt %w0"::"q" (GDT_ENTRY_LDT*8));
+ }
+ }
++
++
++static inline void native_load_tr_desc(void)
++{
++ asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
++}
++
++static inline void native_load_gdt(const struct Xgt_desc_struct *dtr)
++{
++ asm volatile("lgdt %0"::"m" (*dtr));
++}
++
++static inline void native_load_idt(const struct Xgt_desc_struct *dtr)
++{
++ asm volatile("lidt %0"::"m" (*dtr));
++}
++
++static inline void native_store_gdt(struct Xgt_desc_struct *dtr)
++{
++ asm ("sgdt %0":"=m" (*dtr));
++}
++
++static inline void native_store_idt(struct Xgt_desc_struct *dtr)
++{
++ asm ("sidt %0":"=m" (*dtr));
++}
++
++static inline unsigned long native_store_tr(void)
++{
++ unsigned long tr;
++ asm ("str %0":"=r" (tr));
++ return tr;
++}
++
++static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
++{
++ unsigned int i;
++ struct desc_struct *gdt = get_cpu_gdt_table(cpu);
++
++ for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
++ gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
++}
++#else
++#define load_TLS(t, cpu) xen_load_tls(t, cpu)
++#define set_ldt(addr, entries) xen_set_ldt((unsigned long)(addr), entries)
++
++extern int write_ldt_entry(void *ldt, int entry, __u32 entry_a, __u32 entry_b);
++extern int write_gdt_entry(void *gdt, int entry, __u32 entry_a, __u32 entry_b);
++
++static inline void xen_load_tls(struct thread_struct *t, unsigned int cpu)
++{
++ unsigned int i;
++ struct desc_struct *gdt = get_cpu_gdt_table(cpu);
++
++ for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
++ HYPERVISOR_update_descriptor(virt_to_machine(&gdt[GDT_ENTRY_TLS_MIN + i]),
++ *(u64 *)&t->tls_array[i]);
++}
+ #endif
+
+ #ifndef CONFIG_X86_NO_IDT
+Index: 10.3-2007-11-26/include/asm-i386/mach-xen/asm/fixmap.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-i386/mach-xen/asm/fixmap.h 2007-10-22 13:53:25.000000000 +0200
++++ 10.3-2007-11-26/include/asm-i386/mach-xen/asm/fixmap.h 2007-10-22 13:58:57.000000000 +0200
+@@ -19,10 +19,8 @@
+ * the start of the fixmap.
+ */
+ extern unsigned long __FIXADDR_TOP;
+-#ifdef CONFIG_COMPAT_VDSO
+-#define FIXADDR_USER_START __fix_to_virt(FIX_VDSO)
+-#define FIXADDR_USER_END __fix_to_virt(FIX_VDSO - 1)
+-#endif
++#define FIXADDR_USER_START __fix_to_virt(FIX_VDSO)
++#define FIXADDR_USER_END __fix_to_virt(FIX_VDSO - 1)
+
+ #ifndef __ASSEMBLY__
+ #include <linux/kernel.h>
+@@ -85,6 +83,9 @@ enum fixed_addresses {
+ #ifdef CONFIG_PCI_MMCONFIG
+ FIX_PCIE_MCFG,
+ #endif
++#ifdef CONFIG_PARAVIRT
++ FIX_PARAVIRT_BOOTMAP,
++#endif
+ FIX_SHARED_INFO,
+ #define NR_FIX_ISAMAPS 256
+ FIX_ISAMAP_END,
+Index: 10.3-2007-11-26/include/asm-i386/mach-xen/asm/highmem.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-i386/mach-xen/asm/highmem.h 2007-12-06 17:27:30.000000000 +0100
++++ 10.3-2007-11-26/include/asm-i386/mach-xen/asm/highmem.h 2007-10-22 13:58:57.000000000 +0200
+@@ -67,12 +67,18 @@ extern void FASTCALL(kunmap_high(struct
+
+ void *kmap(struct page *page);
+ void kunmap(struct page *page);
++void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot);
+ void *kmap_atomic(struct page *page, enum km_type type);
+ void *kmap_atomic_pte(struct page *page, enum km_type type);
+ void kunmap_atomic(void *kvaddr, enum km_type type);
+ void *kmap_atomic_pfn(unsigned long pfn, enum km_type type);
+ struct page *kmap_atomic_to_page(void *ptr);
+
++#define kmap_atomic_pte(page, type) \
++ kmap_atomic_prot(page, type, \
++ test_bit(PG_pinned, &(page)->flags) \
++ ? PAGE_KERNEL_RO : kmap_prot)
++
+ #define flush_cache_kmaps() do { } while (0)
+
+ #endif /* __KERNEL__ */
+Index: 10.3-2007-11-26/include/asm-i386/mach-xen/asm/io.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-i386/mach-xen/asm/io.h 2007-10-22 13:58:46.000000000 +0200
++++ 10.3-2007-11-26/include/asm-i386/mach-xen/asm/io.h 2007-10-22 13:58:57.000000000 +0200
+@@ -264,15 +264,18 @@ static inline void flush_write_buffers(v
+
+ #endif /* __KERNEL__ */
+
+-#define __SLOW_DOWN_IO "outb %%al,$0x80;"
++static inline void xen_io_delay(void)
++{
++ asm volatile("outb %%al,$0x80" : : : "memory");
++}
+
+ static inline void slow_down_io(void) {
+- __asm__ __volatile__(
+- __SLOW_DOWN_IO
++ xen_io_delay();
+ #ifdef REALLY_SLOW_IO
+- __SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO
++ xen_io_delay();
++ xen_io_delay();
++ xen_io_delay();
+ #endif
+- : : );
+ }
+
+ #ifdef CONFIG_X86_NUMAQ
+Index: 10.3-2007-11-26/include/asm-i386/mach-xen/asm/irqflags.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-i386/mach-xen/asm/irqflags.h 2007-10-22 13:53:25.000000000 +0200
++++ 10.3-2007-11-26/include/asm-i386/mach-xen/asm/irqflags.h 2007-10-22 13:58:57.000000000 +0200
+@@ -11,6 +11,43 @@
+ #define _ASM_IRQFLAGS_H
+
+ #ifndef __ASSEMBLY__
++#define xen_save_fl(void) (current_vcpu_info()->evtchn_upcall_mask)
++
++#define xen_restore_fl(f) \
++do { \
++ vcpu_info_t *_vcpu; \
++ barrier(); \
++ _vcpu = current_vcpu_info(); \
++ if ((_vcpu->evtchn_upcall_mask = (f)) == 0) { \
++ barrier(); /* unmask then check (avoid races) */\
++ if (unlikely(_vcpu->evtchn_upcall_pending)) \
++ force_evtchn_callback(); \
++ } \
++} while (0)
++
++#define xen_irq_disable() \
++do { \
++ current_vcpu_info()->evtchn_upcall_mask = 1; \
++ barrier(); \
++} while (0)
++
++#define xen_irq_enable() \
++do { \
++ vcpu_info_t *_vcpu; \
++ barrier(); \
++ _vcpu = current_vcpu_info(); \
++ _vcpu->evtchn_upcall_mask = 0; \
++ barrier(); /* unmask then check (avoid races) */ \
++ if (unlikely(_vcpu->evtchn_upcall_pending)) \
++ force_evtchn_callback(); \
++} while (0)
++
++void xen_safe_halt(void);
++
++void xen_halt(void);
++#endif /* __ASSEMBLY__ */
++
++#ifndef __ASSEMBLY__
+
+ /*
+ * The use of 'barrier' in the following reflects their use as local-lock
+@@ -20,48 +57,31 @@
+ * includes these barriers, for example.
+ */
+
+-#define __raw_local_save_flags() (current_vcpu_info()->evtchn_upcall_mask)
++#define __raw_local_save_flags(void) xen_save_fl()
+
+-#define raw_local_irq_restore(x) \
+-do { \
+- vcpu_info_t *_vcpu; \
+- barrier(); \
+- _vcpu = current_vcpu_info(); \
+- if ((_vcpu->evtchn_upcall_mask = (x)) == 0) { \
+- barrier(); /* unmask then check (avoid races) */ \
+- if (unlikely(_vcpu->evtchn_upcall_pending)) \
+- force_evtchn_callback(); \
+- } \
+-} while (0)
++#define raw_local_irq_restore(flags) xen_restore_fl(flags)
+
+-#define raw_local_irq_disable() \
+-do { \
+- current_vcpu_info()->evtchn_upcall_mask = 1; \
+- barrier(); \
+-} while (0)
++#define raw_local_irq_disable() xen_irq_disable()
+
+-#define raw_local_irq_enable() \
+-do { \
+- vcpu_info_t *_vcpu; \
+- barrier(); \
+- _vcpu = current_vcpu_info(); \
+- _vcpu->evtchn_upcall_mask = 0; \
+- barrier(); /* unmask then check (avoid races) */ \
+- if (unlikely(_vcpu->evtchn_upcall_pending)) \
+- force_evtchn_callback(); \
+-} while (0)
++#define raw_local_irq_enable() xen_irq_enable()
+
+ /*
+ * Used in the idle loop; sti takes one instruction cycle
+ * to complete:
+ */
+-void raw_safe_halt(void);
++static inline void raw_safe_halt(void)
++{
++ xen_safe_halt();
++}
+
+ /*
+ * Used when interrupts are already enabled or to
+ * shutdown the processor:
+ */
+-void halt(void);
++static inline void halt(void)
++{
++ xen_halt();
++}
+
+ /*
+ * For spinlocks, etc:
+Index: 10.3-2007-11-26/include/asm-i386/mach-xen/asm/mmu.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-i386/mach-xen/asm/mmu.h 2007-12-06 17:27:30.000000000 +0100
++++ 10.3-2007-11-26/include/asm-i386/mach-xen/asm/mmu.h 2007-10-22 13:58:57.000000000 +0200
+@@ -18,12 +18,4 @@ typedef struct {
+ #endif
+ } mm_context_t;
+
+-/* mm/memory.c:exit_mmap hook */
+-extern void _arch_exit_mmap(struct mm_struct *mm);
+-#define arch_exit_mmap(_mm) _arch_exit_mmap(_mm)
+-
+-/* kernel/fork.c:dup_mmap hook */
+-extern void _arch_dup_mmap(struct mm_struct *mm);
+-#define arch_dup_mmap(mm, oldmm) ((void)(oldmm), _arch_dup_mmap(mm))
+-
+ #endif
+Index: 10.3-2007-11-26/include/asm-i386/mach-xen/asm/mmu_context.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-i386/mach-xen/asm/mmu_context.h 2007-10-22 13:58:46.000000000 +0200
++++ 10.3-2007-11-26/include/asm-i386/mach-xen/asm/mmu_context.h 2007-10-22 13:58:57.000000000 +0200
+@@ -6,6 +6,20 @@
+ #include <asm/pgalloc.h>
+ #include <asm/tlbflush.h>
+
++void arch_exit_mmap(struct mm_struct *mm);
++void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm);
++
++void mm_pin(struct mm_struct *mm);
++void mm_unpin(struct mm_struct *mm);
++void mm_pin_all(void);
++
++static inline void xen_activate_mm(struct mm_struct *prev,
++ struct mm_struct *next)
++{
++ if (!test_bit(PG_pinned, &virt_to_page(next->pgd)->flags))
++ mm_pin(next);
++}
++
+ /*
+ * Used for LDT copy/destruction.
+ */
+@@ -37,10 +51,6 @@ static inline void __prepare_arch_switch
+ : : "r" (0) );
+ }
+
+-extern void mm_pin(struct mm_struct *mm);
+-extern void mm_unpin(struct mm_struct *mm);
+-void mm_pin_all(void);
+-
+ static inline void switch_mm(struct mm_struct *prev,
+ struct mm_struct *next,
+ struct task_struct *tsk)
+@@ -97,11 +107,10 @@ static inline void switch_mm(struct mm_s
+ #define deactivate_mm(tsk, mm) \
+ asm("movl %0,%%gs": :"r" (0));
+
+-static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
+-{
+- if (!test_bit(PG_pinned, &virt_to_page(next->pgd)->flags))
+- mm_pin(next);
+- switch_mm(prev, next, NULL);
+-}
++#define activate_mm(prev, next) \
++ do { \
++ xen_activate_mm(prev, next); \
++ switch_mm((prev),(next),NULL); \
++ } while(0)
+
+ #endif
+Index: 10.3-2007-11-26/include/asm-i386/mach-xen/asm/page.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-i386/mach-xen/asm/page.h 2007-10-22 13:53:25.000000000 +0200
++++ 10.3-2007-11-26/include/asm-i386/mach-xen/asm/page.h 2007-10-22 13:58:57.000000000 +0200
+@@ -66,6 +66,7 @@
+ * These are used to make use of C type-checking..
+ */
+ extern int nx_enabled;
++
+ #ifdef CONFIG_X86_PAE
+ extern unsigned long long __supported_pte_mask;
+ typedef struct { unsigned long pte_low, pte_high; } pte_t;
+@@ -74,69 +75,117 @@ typedef struct { unsigned long long pgd;
+ typedef struct { unsigned long long pgprot; } pgprot_t;
+ #define pgprot_val(x) ((x).pgprot)
+ #include <asm/maddr.h>
+-#define __pte(x) ({ unsigned long long _x = (x); \
+- if (_x & _PAGE_PRESENT) _x = pte_phys_to_machine(_x); \
+- ((pte_t) {(unsigned long)(_x), (unsigned long)(_x>>32)}); })
+-#define __pgd(x) ({ unsigned long long _x = (x); \
+- (pgd_t) {((_x) & _PAGE_PRESENT) ? pte_phys_to_machine(_x) : (_x)}; })
+-#define __pmd(x) ({ unsigned long long _x = (x); \
+- (pmd_t) {((_x) & _PAGE_PRESENT) ? pte_phys_to_machine(_x) : (_x)}; })
+-static inline unsigned long long pte_val_ma(pte_t x)
+-{
+- return ((unsigned long long)x.pte_high << 32) | x.pte_low;
+-}
+-static inline unsigned long long pte_val(pte_t x)
++
++static inline unsigned long long xen_pgd_val(pgd_t pgd)
+ {
+- unsigned long long ret = pte_val_ma(x);
+- if (x.pte_low & _PAGE_PRESENT) ret = pte_machine_to_phys(ret);
++ unsigned long long ret = pgd.pgd;
++ if (ret & _PAGE_PRESENT)
++ ret = pte_machine_to_phys(ret);
+ return ret;
+ }
+-static inline unsigned long long pmd_val(pmd_t x)
++
++static inline unsigned long long xen_pmd_val(pmd_t pmd)
+ {
+- unsigned long long ret = x.pmd;
++ unsigned long long ret = pmd.pmd;
+ #if CONFIG_XEN_COMPAT <= 0x030002
+- if (ret) ret = pte_machine_to_phys(ret) | _PAGE_PRESENT;
++ if (ret)
++ ret = pte_machine_to_phys(ret) | _PAGE_PRESENT;
+ #else
+- if (ret & _PAGE_PRESENT) ret = pte_machine_to_phys(ret);
++ if (ret & _PAGE_PRESENT)
++ ret = pte_machine_to_phys(ret);
+ #endif
+ return ret;
+ }
+-static inline unsigned long long pgd_val(pgd_t x)
++
++static inline unsigned long long pte_val_ma(pte_t pte)
+ {
+- unsigned long long ret = x.pgd;
+- if (ret & _PAGE_PRESENT) ret = pte_machine_to_phys(ret);
++ return ((unsigned long long)pte.pte_high << 32) | pte.pte_low;
++}
++static inline unsigned long long xen_pte_val(pte_t pte)
++{
++ unsigned long long ret = pte_val_ma(pte);
++ if (pte.pte_low & _PAGE_PRESENT)
++ ret = pte_machine_to_phys(ret);
+ return ret;
+ }
++
++static inline pgd_t xen_make_pgd(unsigned long long val)
++{
++ if (val & _PAGE_PRESENT)
++ val = pte_phys_to_machine(val);
++ return (pgd_t) { val };
++}
++
++static inline pmd_t xen_make_pmd(unsigned long long val)
++{
++ if (val & _PAGE_PRESENT)
++ val = pte_phys_to_machine(val);
++ return (pmd_t) { val };
++}
++
++static inline pte_t xen_make_pte(unsigned long long val)
++{
++ if (val & _PAGE_PRESENT)
++ val = pte_phys_to_machine(val);
++ return (pte_t) { .pte_low = val, .pte_high = (val >> 32) } ;
++}
++
++#define pmd_val(x) xen_pmd_val(x)
++#define __pmd(x) xen_make_pmd(x)
++
+ #define HPAGE_SHIFT 21
+ #include <asm-generic/pgtable-nopud.h>
+-#else
++#else /* !CONFIG_X86_PAE */
+ typedef struct { unsigned long pte_low; } pte_t;
+ typedef struct { unsigned long pgd; } pgd_t;
+ typedef struct { unsigned long pgprot; } pgprot_t;
+ #define pgprot_val(x) ((x).pgprot)
+-#include <asm/maddr.h>
+ #define boot_pte_t pte_t /* or would you rather have a typedef */
+-#define pte_val(x) (((x).pte_low & _PAGE_PRESENT) ? \
+- machine_to_phys((x).pte_low) : \
+- (x).pte_low)
+-#define pte_val_ma(x) ((x).pte_low)
+-#define __pte(x) ({ unsigned long _x = (x); \
+- (pte_t) {((_x) & _PAGE_PRESENT) ? phys_to_machine(_x) : (_x)}; })
+-#define __pgd(x) ({ unsigned long _x = (x); \
+- (pgd_t) {((_x) & _PAGE_PRESENT) ? phys_to_machine(_x) : (_x)}; })
+-static inline unsigned long pgd_val(pgd_t x)
++#include <asm/maddr.h>
++
++static inline unsigned long xen_pgd_val(pgd_t pgd)
+ {
+- unsigned long ret = x.pgd;
++ unsigned long ret = pgd.pgd;
+ #if CONFIG_XEN_COMPAT <= 0x030002
+- if (ret) ret = machine_to_phys(ret) | _PAGE_PRESENT;
++ if (ret)
++ ret = machine_to_phys(ret) | _PAGE_PRESENT;
+ #else
+- if (ret & _PAGE_PRESENT) ret = machine_to_phys(ret);
++ if (ret & _PAGE_PRESENT)
++ ret = machine_to_phys(ret);
+ #endif
+ return ret;
+ }
++
++static inline unsigned long pte_val_ma(pte_t pte)
++{
++ return pte.pte_low;
++}
++static inline unsigned long xen_pte_val(pte_t pte)
++{
++ unsigned long ret = pte_val_ma(pte);
++ if (ret & _PAGE_PRESENT)
++ ret = machine_to_phys(ret);
++ return ret;
++}
++
++static inline pgd_t xen_make_pgd(unsigned long val)
++{
++ if (val & _PAGE_PRESENT)
++ val = phys_to_machine(val);
++ return (pgd_t) { val };
++}
++
++static inline pte_t xen_make_pte(unsigned long val)
++{
++ if (val & _PAGE_PRESENT)
++ val = phys_to_machine(val);
++ return (pte_t) { .pte_low = val };
++}
++
+ #define HPAGE_SHIFT 22
+ #include <asm-generic/pgtable-nopmd.h>
+-#endif
++#endif /* CONFIG_X86_PAE */
++
+ #define PTE_MASK PHYSICAL_PAGE_MASK
+
+ #ifdef CONFIG_HUGETLB_PAGE
+@@ -148,6 +197,11 @@ static inline unsigned long pgd_val(pgd_
+
+ #define __pgprot(x) ((pgprot_t) { (x) } )
+
++#define pgd_val(x) xen_pgd_val(x)
++#define __pgd(x) xen_make_pgd(x)
++#define pte_val(x) xen_pte_val(x)
++#define __pte(x) xen_make_pte(x)
++
+ #endif /* !__ASSEMBLY__ */
+
+ /* to align the pointer to the (next) page boundary */
+@@ -188,6 +242,7 @@ extern int page_is_ram(unsigned long pag
+ #define __PAGE_OFFSET ((unsigned long)CONFIG_PAGE_OFFSET)
+ #endif
+
++
+ #define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
+ #define VMALLOC_RESERVE ((unsigned long)__VMALLOC_RESERVE)
+ #define MAXMEM (-__PAGE_OFFSET-__VMALLOC_RESERVE)
+@@ -212,9 +267,7 @@ extern int page_is_ram(unsigned long pag
+ #include <asm-generic/memory_model.h>
+ #include <asm-generic/page.h>
+
+-#ifndef CONFIG_COMPAT_VDSO
+ #define __HAVE_ARCH_GATE_AREA 1
+-#endif
+ #endif /* __KERNEL__ */
+
+ #endif /* _I386_PAGE_H */
+Index: 10.3-2007-11-26/include/asm-i386/mach-xen/asm/pgalloc.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-i386/mach-xen/asm/pgalloc.h 2007-10-22 13:58:46.000000000 +0200
++++ 10.3-2007-11-26/include/asm-i386/mach-xen/asm/pgalloc.h 2007-10-22 13:58:57.000000000 +0200
+@@ -1,7 +1,6 @@
+ #ifndef _I386_PGALLOC_H
+ #define _I386_PGALLOC_H
+
+-#include <asm/fixmap.h>
+ #include <linux/threads.h>
+ #include <linux/mm.h> /* for struct page */
+ #include <asm/io.h> /* for phys_to_virt and page_to_pseudophys */
+@@ -69,6 +68,4 @@ do { \
+ #define pud_populate(mm, pmd, pte) BUG()
+ #endif
+
+-#define check_pgt_cache() do { } while (0)
+-
+ #endif /* _I386_PGALLOC_H */
+Index: 10.3-2007-11-26/include/asm-i386/mach-xen/asm/pgtable-2level-defs.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-i386/mach-xen/asm/pgtable-2level-defs.h 2007-12-06 17:27:30.000000000 +0100
++++ /dev/null 1970-01-01 00:00:00.000000000 +0000
+@@ -1,20 +0,0 @@
+-#ifndef _I386_PGTABLE_2LEVEL_DEFS_H
+-#define _I386_PGTABLE_2LEVEL_DEFS_H
+-
+-#define HAVE_SHARED_KERNEL_PMD 0
+-
+-/*
+- * traditional i386 two-level paging structure:
+- */
+-
+-#define PGDIR_SHIFT 22
+-#define PTRS_PER_PGD 1024
+-
+-/*
+- * the i386 is two-level, so we don't really have any
+- * PMD directory physically.
+- */
+-
+-#define PTRS_PER_PTE 1024
+-
+-#endif /* _I386_PGTABLE_2LEVEL_DEFS_H */
+Index: 10.3-2007-11-26/include/asm-i386/mach-xen/asm/pgtable-2level.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-i386/mach-xen/asm/pgtable-2level.h 2007-10-22 13:54:57.000000000 +0200
++++ 10.3-2007-11-26/include/asm-i386/mach-xen/asm/pgtable-2level.h 2007-10-22 13:58:57.000000000 +0200
+@@ -11,22 +11,43 @@
+ * within a page table are directly modified. Thus, the following
+ * hook is made available.
+ */
+-#define set_pte(pteptr, pteval) (*(pteptr) = pteval)
+-
+-#define set_pte_at(_mm,addr,ptep,pteval) do { \
+- if (((_mm) != current->mm && (_mm) != &init_mm) || \
+- HYPERVISOR_update_va_mapping((addr), (pteval), 0)) \
+- set_pte((ptep), (pteval)); \
+-} while (0)
+-
+-#define set_pmd(pmdptr, pmdval) xen_l2_entry_update((pmdptr), (pmdval))
++static inline void xen_set_pte(pte_t *ptep , pte_t pte)
++{
++ *ptep = pte;
++}
++static inline void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
++ pte_t *ptep , pte_t pte)
++{
++ if ((mm != current->mm && mm != &init_mm) ||
++ HYPERVISOR_update_va_mapping(addr, pte, 0))
++ xen_set_pte(ptep, pte);
++}
++static inline void xen_set_pmd(pmd_t *pmdp, pmd_t pmd)
++{
++ xen_l2_entry_update(pmdp, pmd);
++}
++#define set_pte(pteptr, pteval) xen_set_pte(pteptr, pteval)
++#define set_pte_at(mm,addr,ptep,pteval) xen_set_pte_at(mm, addr, ptep, pteval)
++#define set_pmd(pmdptr, pmdval) xen_set_pmd(pmdptr, pmdval)
+
+ #define set_pte_atomic(pteptr, pteval) set_pte(pteptr,pteval)
+
+ #define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
+ #define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
+
+-#define raw_ptep_get_and_clear(xp, pte) __pte_ma(xchg(&(xp)->pte_low, 0))
++static inline void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *xp)
++{
++ xen_set_pte_at(mm, addr, xp, __pte(0));
++}
++
++#ifdef CONFIG_SMP
++static inline pte_t xen_ptep_get_and_clear(pte_t *xp, pte_t res)
++{
++ return __pte_ma(xchg(&xp->pte_low, 0));
++}
++#else
++#define xen_ptep_get_and_clear(xp, res) xen_local_ptep_get_and_clear(xp, res)
++#endif
+
+ #define __HAVE_ARCH_PTEP_CLEAR_FLUSH
+ #define ptep_clear_flush(vma, addr, ptep) \
+@@ -91,6 +112,4 @@ static inline int pte_exec_kernel(pte_t
+ #define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low })
+ #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+
+-void vmalloc_sync_all(void);
+-
+ #endif /* _I386_PGTABLE_2LEVEL_H */
+Index: 10.3-2007-11-26/include/asm-i386/mach-xen/asm/pgtable-3level-defs.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-i386/mach-xen/asm/pgtable-3level-defs.h 2007-12-06 17:27:30.000000000 +0100
++++ 10.3-2007-11-26/include/asm-i386/mach-xen/asm/pgtable-3level-defs.h 2007-10-22 13:58:57.000000000 +0200
+@@ -1,7 +1,7 @@
+ #ifndef _I386_PGTABLE_3LEVEL_DEFS_H
+ #define _I386_PGTABLE_3LEVEL_DEFS_H
+
+-#define HAVE_SHARED_KERNEL_PMD 0
++#define SHARED_KERNEL_PMD 0
+
+ /*
+ * PGDIR_SHIFT determines what a top-level page table entry can map
+Index: 10.3-2007-11-26/include/asm-i386/mach-xen/asm/pgtable-3level.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-i386/mach-xen/asm/pgtable-3level.h 2007-10-22 13:58:00.000000000 +0200
++++ 10.3-2007-11-26/include/asm-i386/mach-xen/asm/pgtable-3level.h 2007-10-22 13:58:57.000000000 +0200
+@@ -49,32 +49,40 @@ static inline int pte_exec_kernel(pte_t
+ * value and then use set_pte to update it. -ben
+ */
+
+-static inline void set_pte(pte_t *ptep, pte_t pte)
++static inline void xen_set_pte(pte_t *ptep, pte_t pte)
+ {
+ ptep->pte_high = pte.pte_high;
+ smp_wmb();
+ ptep->pte_low = pte.pte_low;
+ }
+-#define set_pte_atomic(pteptr,pteval) \
+- set_64bit((unsigned long long *)(pteptr),pte_val_ma(pteval))
+
+-#define set_pte_at(_mm,addr,ptep,pteval) do { \
+- if (((_mm) != current->mm && (_mm) != &init_mm) || \
+- HYPERVISOR_update_va_mapping((addr), (pteval), 0)) \
+- set_pte((ptep), (pteval)); \
+-} while (0)
+-
+-#define set_pmd(pmdptr,pmdval) \
+- xen_l2_entry_update((pmdptr), (pmdval))
+-#define set_pud(pudptr,pudval) \
+- xen_l3_entry_update((pudptr), (pudval))
++static inline void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
++ pte_t *ptep , pte_t pte)
++{
++ if ((mm != current->mm && mm != &init_mm) ||
++ HYPERVISOR_update_va_mapping(addr, pte, 0))
++ xen_set_pte(ptep, pte);
++}
++
++static inline void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
++{
++ set_64bit((unsigned long long *)(ptep),pte_val_ma(pte));
++}
++static inline void xen_set_pmd(pmd_t *pmdp, pmd_t pmd)
++{
++ xen_l2_entry_update(pmdp, pmd);
++}
++static inline void xen_set_pud(pud_t *pudp, pud_t pud)
++{
++ xen_l3_entry_update(pudp, pud);
++}
+
+ /*
+ * For PTEs and PDEs, we must clear the P-bit first when clearing a page table
+ * entry, so clear the bottom half first and enforce ordering with a compiler
+ * barrier.
+ */
+-static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
++static inline void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+ {
+ if ((mm != current->mm && mm != &init_mm)
+ || HYPERVISOR_update_va_mapping(addr, __pte(0), 0)) {
+@@ -84,7 +92,18 @@ static inline void pte_clear(struct mm_s
+ }
+ }
+
+-#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
++static inline void xen_pmd_clear(pmd_t *pmd)
++{
++ xen_l2_entry_update(pmd, __pmd(0));
++}
++
++#define set_pte(ptep, pte) xen_set_pte(ptep, pte)
++#define set_pte_at(mm, addr, ptep, pte) xen_set_pte_at(mm, addr, ptep, pte)
++#define set_pte_atomic(ptep, pte) xen_set_pte_atomic(ptep, pte)
++#define set_pmd(pmdp, pmd) xen_set_pmd(pmdp, pmd)
++#define set_pud(pudp, pud) xen_set_pud(pudp, pud)
++#define pte_clear(mm, addr, ptep) xen_pte_clear(mm, addr, ptep)
++#define pmd_clear(pmd) xen_pmd_clear(pmd)
+
+ /*
+ * Pentium-II erratum A13: in PAE mode we explicitly have to flush
+@@ -105,7 +124,8 @@ static inline void pud_clear (pud_t * pu
+ #define pmd_offset(pud, address) ((pmd_t *) pud_page(*(pud)) + \
+ pmd_index(address))
+
+-static inline pte_t raw_ptep_get_and_clear(pte_t *ptep, pte_t res)
++#ifdef CONFIG_SMP
++static inline pte_t xen_ptep_get_and_clear(pte_t *ptep, pte_t res)
+ {
+ uint64_t val = pte_val_ma(res);
+ if (__cmpxchg64(ptep, val, 0) != val) {
+@@ -116,6 +136,9 @@ static inline pte_t raw_ptep_get_and_cle
+ }
+ return res;
+ }
++#else
++#define xen_ptep_get_and_clear(xp, pte) xen_local_ptep_get_and_clear(xp, pte)
++#endif
+
+ #define __HAVE_ARCH_PTEP_CLEAR_FLUSH
+ #define ptep_clear_flush(vma, addr, ptep) \
+@@ -160,13 +183,13 @@ extern unsigned long long __supported_pt
+ static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
+ {
+ return __pte((((unsigned long long)page_nr << PAGE_SHIFT) |
+- pgprot_val(pgprot)) & __supported_pte_mask);
++ pgprot_val(pgprot)) & __supported_pte_mask);
+ }
+
+ static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
+ {
+ return __pmd((((unsigned long long)page_nr << PAGE_SHIFT) |
+- pgprot_val(pgprot)) & __supported_pte_mask);
++ pgprot_val(pgprot)) & __supported_pte_mask);
+ }
+
+ /*
+@@ -186,6 +209,4 @@ static inline pmd_t pfn_pmd(unsigned lon
+
+ #define __pmd_free_tlb(tlb, x) do { } while (0)
+
+-void vmalloc_sync_all(void);
+-
+ #endif /* _I386_PGTABLE_3LEVEL_H */
+Index: 10.3-2007-11-26/include/asm-i386/mach-xen/asm/pgtable.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-i386/mach-xen/asm/pgtable.h 2007-10-22 14:08:56.000000000 +0200
++++ 10.3-2007-11-26/include/asm-i386/mach-xen/asm/pgtable.h 2007-10-22 14:09:14.000000000 +0200
+@@ -24,11 +24,11 @@
+ #include <linux/slab.h>
+ #include <linux/list.h>
+ #include <linux/spinlock.h>
++#include <linux/sched.h>
+
+ /* Is this pagetable pinned? */
+ #define PG_pinned PG_arch_1
+
+-struct mm_struct;
+ struct vm_area_struct;
+
+ /*
+@@ -38,17 +38,16 @@ struct vm_area_struct;
+ #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
+ extern unsigned long empty_zero_page[1024];
+ extern pgd_t *swapper_pg_dir;
+-extern struct kmem_cache *pgd_cache;
+ extern struct kmem_cache *pmd_cache;
+ extern spinlock_t pgd_lock;
+ extern struct page *pgd_list;
++void check_pgt_cache(void);
+
+ void pmd_ctor(void *, struct kmem_cache *, unsigned long);
+-void pgd_ctor(void *, struct kmem_cache *, unsigned long);
+-void pgd_dtor(void *, struct kmem_cache *, unsigned long);
+ void pgtable_cache_init(void);
+ void paging_init(void);
+
++
+ /*
+ * The Linux x86 paging architecture is 'compile-time dual-mode', it
+ * implements both the traditional 2-level x86 page tables and the
+@@ -162,6 +161,7 @@ void paging_init(void);
+
+ extern unsigned long long __PAGE_KERNEL, __PAGE_KERNEL_EXEC;
+ #define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
++#define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW)
+ #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD)
+ #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
+ #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
+@@ -169,6 +169,7 @@ extern unsigned long long __PAGE_KERNEL,
+ #define PAGE_KERNEL __pgprot(__PAGE_KERNEL)
+ #define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO)
+ #define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
++#define PAGE_KERNEL_RX __pgprot(__PAGE_KERNEL_RX)
+ #define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE)
+ #define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE)
+ #define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC)
+@@ -271,7 +272,13 @@ static inline pte_t pte_mkhuge(pte_t pte
+ */
+ #define pte_update(mm, addr, ptep) do { } while (0)
+ #define pte_update_defer(mm, addr, ptep) do { } while (0)
+-#define paravirt_map_pt_hook(slot, va, pfn) do { } while (0)
++
++/* local pte updates need not use xchg for locking */
++static inline pte_t xen_local_ptep_get_and_clear(pte_t *ptep, pte_t res)
++{
++ xen_set_pte(ptep, __pte(0));
++ return res;
++}
+
+ /*
+ * We only update the dirty/accessed state if we set
+@@ -282,17 +289,34 @@ static inline pte_t pte_mkhuge(pte_t pte
+ */
+ #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
+ #define ptep_set_access_flags(vma, address, ptep, entry, dirty) \
+-do { \
+- if (dirty) \
++({ \
++ int __changed = !pte_same(*(ptep), entry); \
++ if (__changed && (dirty)) \
+ ptep_establish(vma, address, ptep, entry); \
+-} while (0)
++ __changed; \
++})
+
+-/*
+- * We don't actually have these, but we want to advertise them so that
+- * we can encompass the flush here.
+- */
+ #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
++#define ptep_test_and_clear_dirty(vma, addr, ptep) ({ \
++ int __ret = 0; \
++ if (pte_dirty(*(ptep))) \
++ __ret = test_and_clear_bit(_PAGE_BIT_DIRTY, \
++ &(ptep)->pte_low); \
++ if (__ret) \
++ pte_update((vma)->vm_mm, addr, ptep); \
++ __ret; \
++})
++
+ #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
++#define ptep_test_and_clear_young(vma, addr, ptep) ({ \
++ int __ret = 0; \
++ if (pte_young(*(ptep))) \
++ __ret = test_and_clear_bit(_PAGE_BIT_ACCESSED, \
++ &(ptep)->pte_low); \
++ if (__ret) \
++ pte_update((vma)->vm_mm, addr, ptep); \
++ __ret; \
++})
+
+ /*
+ * Rules for using ptep_establish: the pte MUST be a user pte, and
+@@ -319,7 +343,7 @@ do { \
+ int __dirty = pte_dirty(__pte); \
+ __pte = pte_mkclean(__pte); \
+ if (test_bit(PG_pinned, &virt_to_page((vma)->vm_mm->pgd)->flags)) \
+- ptep_set_access_flags(vma, address, ptep, __pte, __dirty); \
++ (void)ptep_set_access_flags(vma, address, ptep, __pte, __dirty); \
+ else if (__dirty) \
+ (ptep)->pte_low = __pte.pte_low; \
+ __dirty; \
+@@ -332,7 +356,7 @@ do { \
+ int __young = pte_young(__pte); \
+ __pte = pte_mkold(__pte); \
+ if (test_bit(PG_pinned, &virt_to_page((vma)->vm_mm->pgd)->flags)) \
+- ptep_set_access_flags(vma, address, ptep, __pte, __young); \
++ (void)ptep_set_access_flags(vma, address, ptep, __pte, __young); \
+ else if (__young) \
+ (ptep)->pte_low = __pte.pte_low; \
+ __young; \
+@@ -345,7 +369,7 @@ static inline pte_t ptep_get_and_clear(s
+ if (!pte_none(pte)
+ && (mm != &init_mm
+ || HYPERVISOR_update_va_mapping(addr, __pte(0), 0))) {
+- pte = raw_ptep_get_and_clear(ptep, pte);
++ pte = xen_ptep_get_and_clear(ptep, pte);
+ pte_update(mm, addr, ptep);
+ }
+ return pte;
+@@ -487,24 +511,10 @@ extern pte_t *lookup_address(unsigned lo
+ #endif
+
+ #if defined(CONFIG_HIGHPTE)
+-#define pte_offset_map(dir, address) \
+-({ \
+- pte_t *__ptep; \
+- unsigned pfn = pmd_val(*(dir)) >> PAGE_SHIFT; \
+- __ptep = (pte_t *)kmap_atomic_pte(pfn_to_page(pfn),KM_PTE0); \
+- paravirt_map_pt_hook(KM_PTE0,__ptep, pfn); \
+- __ptep = __ptep + pte_index(address); \
+- __ptep; \
+-})
+-#define pte_offset_map_nested(dir, address) \
+-({ \
+- pte_t *__ptep; \
+- unsigned pfn = pmd_val(*(dir)) >> PAGE_SHIFT; \
+- __ptep = (pte_t *)kmap_atomic_pte(pfn_to_page(pfn),KM_PTE1); \
+- paravirt_map_pt_hook(KM_PTE1,__ptep, pfn); \
+- __ptep = __ptep + pte_index(address); \
+- __ptep; \
+-})
++#define pte_offset_map(dir, address) \
++ ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE0) + pte_index(address))
++#define pte_offset_map_nested(dir, address) \
++ ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE1) + pte_index(address))
+ #define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
+ #define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
+ #else
+@@ -574,10 +584,6 @@ int touch_pte_range(struct mm_struct *mm
+ #define io_remap_pfn_range(vma,from,pfn,size,prot) \
+ direct_remap_pfn_range(vma,from,pfn,size,prot,DOMID_IO)
+
+-#define MK_IOSPACE_PFN(space, pfn) (pfn)
+-#define GET_IOSPACE(pfn) 0
+-#define GET_PFN(pfn) (pfn)
+-
+ #include <asm-generic/pgtable.h>
+
+ #endif /* _I386_PGTABLE_H */
+Index: 10.3-2007-11-26/include/asm-i386/mach-xen/asm/processor.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-i386/mach-xen/asm/processor.h 2007-10-22 13:58:46.000000000 +0200
++++ 10.3-2007-11-26/include/asm-i386/mach-xen/asm/processor.h 2007-10-22 13:58:57.000000000 +0200
+@@ -21,6 +21,7 @@
+ #include <asm/percpu.h>
+ #include <linux/cpumask.h>
+ #include <linux/init.h>
++#include <asm/processor-flags.h>
+ #include <xen/interface/physdev.h>
+
+ /* flag for disabling the tsc */
+@@ -118,7 +119,8 @@ extern char ignore_fpu_irq;
+
+ void __init cpu_detect(struct cpuinfo_x86 *c);
+
+-extern void identify_cpu(struct cpuinfo_x86 *);
++extern void identify_boot_cpu(void);
++extern void identify_secondary_cpu(struct cpuinfo_x86 *);
+ extern void print_cpu_info(struct cpuinfo_x86 *);
+ extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
+ extern unsigned short num_cache_leaves;
+@@ -129,29 +131,8 @@ extern void detect_ht(struct cpuinfo_x86
+ static inline void detect_ht(struct cpuinfo_x86 *c) {}
+ #endif
+
+-/*
+- * EFLAGS bits
+- */
+-#define X86_EFLAGS_CF 0x00000001 /* Carry Flag */
+-#define X86_EFLAGS_PF 0x00000004 /* Parity Flag */
+-#define X86_EFLAGS_AF 0x00000010 /* Auxillary carry Flag */
+-#define X86_EFLAGS_ZF 0x00000040 /* Zero Flag */
+-#define X86_EFLAGS_SF 0x00000080 /* Sign Flag */
+-#define X86_EFLAGS_TF 0x00000100 /* Trap Flag */
+-#define X86_EFLAGS_IF 0x00000200 /* Interrupt Flag */
+-#define X86_EFLAGS_DF 0x00000400 /* Direction Flag */
+-#define X86_EFLAGS_OF 0x00000800 /* Overflow Flag */
+-#define X86_EFLAGS_IOPL 0x00003000 /* IOPL mask */
+-#define X86_EFLAGS_NT 0x00004000 /* Nested Task */
+-#define X86_EFLAGS_RF 0x00010000 /* Resume Flag */
+-#define X86_EFLAGS_VM 0x00020000 /* Virtual Mode */
+-#define X86_EFLAGS_AC 0x00040000 /* Alignment Check */
+-#define X86_EFLAGS_VIF 0x00080000 /* Virtual Interrupt Flag */
+-#define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */
+-#define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */
+-
+-static inline fastcall void xen_cpuid(unsigned int *eax, unsigned int *ebx,
+- unsigned int *ecx, unsigned int *edx)
++static inline void xen_cpuid(unsigned int *eax, unsigned int *ebx,
++ unsigned int *ecx, unsigned int *edx)
+ {
+ /* ecx is often an input as well as an output. */
+ __asm__(XEN_CPUID
+@@ -165,21 +146,6 @@ static inline fastcall void xen_cpuid(un
+ #define load_cr3(pgdir) write_cr3(__pa(pgdir))
+
+ /*
+- * Intel CPU features in CR4
+- */
+-#define X86_CR4_VME 0x0001 /* enable vm86 extensions */
+-#define X86_CR4_PVI 0x0002 /* virtual interrupts flag enable */
+-#define X86_CR4_TSD 0x0004 /* disable time stamp at ipl 3 */
+-#define X86_CR4_DE 0x0008 /* enable debugging extensions */
+-#define X86_CR4_PSE 0x0010 /* enable page size extensions */
+-#define X86_CR4_PAE 0x0020 /* enable physical address extensions */
+-#define X86_CR4_MCE 0x0040 /* Machine check enable */
+-#define X86_CR4_PGE 0x0080 /* enable global pages */
+-#define X86_CR4_PCE 0x0100 /* enable performance counters at ipl 3 */
+-#define X86_CR4_OSFXSR 0x0200 /* enable fast FPU save and restore */
+-#define X86_CR4_OSXMMEXCPT 0x0400 /* enable unmasked SSE exceptions */
+-
+-/*
+ * Save the cr4 feature set we're using (ie
+ * Pentium 4MB enable and PPro Global page
+ * enable), so that any CPU's that boot up
+@@ -206,26 +172,6 @@ static inline void clear_in_cr4 (unsigne
+ }
+
+ /*
+- * NSC/Cyrix CPU configuration register indexes
+- */
+-
+-#define CX86_PCR0 0x20
+-#define CX86_GCR 0xb8
+-#define CX86_CCR0 0xc0
+-#define CX86_CCR1 0xc1
+-#define CX86_CCR2 0xc2
+-#define CX86_CCR3 0xc3
+-#define CX86_CCR4 0xe8
+-#define CX86_CCR5 0xe9
+-#define CX86_CCR6 0xea
+-#define CX86_CCR7 0xeb
+-#define CX86_PCR1 0xf0
+-#define CX86_DIR0 0xfe
+-#define CX86_DIR1 0xff
+-#define CX86_ARR_BASE 0xc4
+-#define CX86_RCR_BASE 0xdc
+-
+-/*
+ * NSC/Cyrix CPU indexed register access macros
+ */
+
+@@ -351,7 +297,8 @@ typedef struct {
+ struct thread_struct;
+
+ #ifndef CONFIG_X86_NO_TSS
+-struct tss_struct {
++/* This is the TSS defined by the hardware. */
++struct i386_hw_tss {
+ unsigned short back_link,__blh;
+ unsigned long esp0;
+ unsigned short ss0,__ss0h;
+@@ -375,6 +322,11 @@ struct tss_struct {
+ unsigned short gs, __gsh;
+ unsigned short ldt, __ldth;
+ unsigned short trace, io_bitmap_base;
++} __attribute__((packed));
++
++struct tss_struct {
++ struct i386_hw_tss x86_tss;
++
+ /*
+ * The extra 1 is there because the CPU will access an
+ * additional byte beyond the end of the IO permission
+@@ -428,10 +380,11 @@ struct thread_struct {
+ };
+
+ #define INIT_THREAD { \
++ .esp0 = sizeof(init_stack) + (long)&init_stack, \
+ .vm86_info = NULL, \
+ .sysenter_cs = __KERNEL_CS, \
+ .io_bitmap_ptr = NULL, \
+- .fs = __KERNEL_PDA, \
++ .fs = __KERNEL_PERCPU, \
+ }
+
+ /*
+@@ -441,10 +394,12 @@ struct thread_struct {
+ * be within the limit.
+ */
+ #define INIT_TSS { \
+- .esp0 = sizeof(init_stack) + (long)&init_stack, \
+- .ss0 = __KERNEL_DS, \
+- .ss1 = __KERNEL_CS, \
+- .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
++ .x86_tss = { \
++ .esp0 = sizeof(init_stack) + (long)&init_stack, \
++ .ss0 = __KERNEL_DS, \
++ .ss1 = __KERNEL_CS, \
++ .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
++ }, \
+ .io_bitmap = { [ 0 ... IO_BITMAP_LONGS] = ~0 }, \
+ }
+
+@@ -551,36 +506,31 @@ static inline void rep_nop(void)
+
+ #define cpu_relax() rep_nop()
+
+-#define paravirt_enabled() 0
+-#define __cpuid xen_cpuid
+-
+ #ifndef CONFIG_X86_NO_TSS
+-static inline void __load_esp0(struct tss_struct *tss, struct thread_struct *thread)
++static inline void native_load_esp0(struct tss_struct *tss, struct thread_struct *thread)
+ {
+- tss->esp0 = thread->esp0;
++ tss->x86_tss.esp0 = thread->esp0;
+ /* This can only happen when SEP is enabled, no need to test "SEP"arately */
+- if (unlikely(tss->ss1 != thread->sysenter_cs)) {
+- tss->ss1 = thread->sysenter_cs;
++ if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) {
++ tss->x86_tss.ss1 = thread->sysenter_cs;
+ wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
+ }
+ }
+-#define load_esp0(tss, thread) \
+- __load_esp0(tss, thread)
+ #else
+-#define load_esp0(tss, thread) \
++#define xen_load_esp0(tss, thread) \
+ HYPERVISOR_stack_switch(__KERNEL_DS, (thread)->esp0)
+ #endif
+
+
+-/*
+- * These special macros can be used to get or set a debugging register
+- */
+-#define get_debugreg(var, register) \
+- (var) = HYPERVISOR_get_debugreg((register))
+-#define set_debugreg(value, register) \
+- HYPERVISOR_set_debugreg((register), (value))
++static inline unsigned long xen_get_debugreg(int regno)
++{
++ return HYPERVISOR_get_debugreg(regno);
++}
+
+-#define set_iopl_mask xen_set_iopl_mask
++static inline void xen_set_debugreg(int regno, unsigned long value)
++{
++ HYPERVISOR_set_debugreg(regno, value);
++}
+
+ /*
+ * Set IOPL bits in EFLAGS from given mask
+@@ -595,6 +545,21 @@ static inline void xen_set_iopl_mask(uns
+ }
+
+
++#define paravirt_enabled() 0
++#define __cpuid xen_cpuid
++
++#define load_esp0 xen_load_esp0
++
++/*
++ * These special macros can be used to get or set a debugging register
++ */
++#define get_debugreg(var, register) \
++ (var) = xen_get_debugreg(register)
++#define set_debugreg(value, register) \
++ xen_set_debugreg(register, value)
++
++#define set_iopl_mask xen_set_iopl_mask
++
+ /*
+ * Generic CPUID function
+ * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
+@@ -747,8 +712,14 @@ extern unsigned long boot_option_idle_ov
+ extern void enable_sep_cpu(void);
+ extern int sysenter_setup(void);
+
+-extern int init_gdt(int cpu, struct task_struct *idle);
++/* Defined in head.S */
++extern struct Xgt_desc_struct early_gdt_descr;
++
+ extern void cpu_set_gdt(int);
+-extern void secondary_cpu_init(void);
++extern void switch_to_new_gdt(void);
++extern void cpu_init(void);
++extern void init_gdt(int cpu);
++
++extern int force_mwait;
+
+ #endif /* __ASM_I386_PROCESSOR_H */
+Index: 10.3-2007-11-26/include/asm-i386/mach-xen/asm/scatterlist.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-i386/mach-xen/asm/scatterlist.h 2007-12-06 17:27:30.000000000 +0100
++++ 10.3-2007-11-26/include/asm-i386/mach-xen/asm/scatterlist.h 2007-10-22 13:58:57.000000000 +0200
+@@ -1,6 +1,8 @@
+ #ifndef _I386_SCATTERLIST_H
+ #define _I386_SCATTERLIST_H
+
++#include <asm/types.h>
++
+ struct scatterlist {
+ struct page *page;
+ unsigned int offset;
+Index: 10.3-2007-11-26/include/asm-i386/mach-xen/asm/segment.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-i386/mach-xen/asm/segment.h 2007-10-22 13:58:46.000000000 +0200
++++ 10.3-2007-11-26/include/asm-i386/mach-xen/asm/segment.h 2007-10-22 13:58:57.000000000 +0200
+@@ -39,7 +39,7 @@
+ * 25 - APM BIOS support
+ *
+ * 26 - ESPFIX small SS
+- * 27 - PDA [ per-cpu private data area ]
++ * 27 - per-cpu [ offset to per-cpu data area ]
+ * 28 - unused
+ * 29 - unused
+ * 30 - unused
+@@ -74,8 +74,12 @@
+ #define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14)
+ #define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)
+
+-#define GDT_ENTRY_PDA (GDT_ENTRY_KERNEL_BASE + 15)
+-#define __KERNEL_PDA (GDT_ENTRY_PDA * 8)
++#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
++#ifdef CONFIG_SMP
++#define __KERNEL_PERCPU (GDT_ENTRY_PERCPU * 8)
++#else
++#define __KERNEL_PERCPU 0
++#endif
+
+ #define GDT_ENTRY_DOUBLEFAULT_TSS 31
+
+Index: 10.3-2007-11-26/include/asm-i386/mach-xen/asm/smp.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-i386/mach-xen/asm/smp.h 2007-10-22 13:58:46.000000000 +0200
++++ 10.3-2007-11-26/include/asm-i386/mach-xen/asm/smp.h 2007-10-22 13:58:57.000000000 +0200
+@@ -8,19 +8,15 @@
+ #include <linux/kernel.h>
+ #include <linux/threads.h>
+ #include <linux/cpumask.h>
+-#include <asm/pda.h>
+ #endif
+
+-#ifdef CONFIG_X86_LOCAL_APIC
+-#ifndef __ASSEMBLY__
+-#include <asm/fixmap.h>
++#if defined(CONFIG_X86_LOCAL_APIC) && !defined(__ASSEMBLY__)
+ #include <asm/bitops.h>
+ #include <asm/mpspec.h>
++#include <asm/apic.h>
+ #ifdef CONFIG_X86_IO_APIC
+ #include <asm/io_apic.h>
+ #endif
+-#include <asm/apic.h>
+-#endif
+ #endif
+
+ #define BAD_APICID 0xFFu
+@@ -52,9 +48,76 @@ extern void cpu_exit_clear(void);
+ extern void cpu_uninit(void);
+ #endif
+
+-#ifndef CONFIG_PARAVIRT
++#ifndef CONFIG_XEN
++struct smp_ops
++{
++ void (*smp_prepare_boot_cpu)(void);
++ void (*smp_prepare_cpus)(unsigned max_cpus);
++ int (*cpu_up)(unsigned cpu);
++ void (*smp_cpus_done)(unsigned max_cpus);
++
++ void (*smp_send_stop)(void);
++ void (*smp_send_reschedule)(int cpu);
++ int (*smp_call_function_mask)(cpumask_t mask,
++ void (*func)(void *info), void *info,
++ int wait);
++};
++
++extern struct smp_ops smp_ops;
++
++static inline void smp_prepare_boot_cpu(void)
++{
++ smp_ops.smp_prepare_boot_cpu();
++}
++static inline void smp_prepare_cpus(unsigned int max_cpus)
++{
++ smp_ops.smp_prepare_cpus(max_cpus);
++}
++static inline int __cpu_up(unsigned int cpu)
++{
++ return smp_ops.cpu_up(cpu);
++}
++static inline void smp_cpus_done(unsigned int max_cpus)
++{
++ smp_ops.smp_cpus_done(max_cpus);
++}
++
++static inline void smp_send_stop(void)
++{
++ smp_ops.smp_send_stop();
++}
++static inline void smp_send_reschedule(int cpu)
++{
++ smp_ops.smp_send_reschedule(cpu);
++}
++static inline int smp_call_function_mask(cpumask_t mask,
++ void (*func) (void *info), void *info,
++ int wait)
++{
++ return smp_ops.smp_call_function_mask(mask, func, info, wait);
++}
++
++void native_smp_prepare_boot_cpu(void);
++void native_smp_prepare_cpus(unsigned int max_cpus);
++int native_cpu_up(unsigned int cpunum);
++void native_smp_cpus_done(unsigned int max_cpus);
++
+ #define startup_ipi_hook(phys_apicid, start_eip, start_esp) \
+ do { } while (0)
++
++#else
++
++
++void xen_smp_send_stop(void);
++void xen_smp_send_reschedule(int cpu);
++int xen_smp_call_function_mask(cpumask_t mask,
++ void (*func) (void *info), void *info,
++ int wait);
++
++#define smp_send_stop xen_smp_send_stop
++#define smp_send_reschedule xen_smp_send_reschedule
++#define smp_call_function_mask xen_smp_call_function_mask
++
+ #endif
+
+ /*
+@@ -62,7 +125,8 @@ do { } while (0)
+ * from the initial startup. We map APIC_BASE very early in page_setup(),
+ * so this is correct in the x86 case.
+ */
+-#define raw_smp_processor_id() (read_pda(cpu_number))
++DECLARE_PER_CPU(int, cpu_number);
++#define raw_smp_processor_id() (x86_read_percpu(cpu_number))
+
+ extern cpumask_t cpu_possible_map;
+ #define cpu_callin_map cpu_possible_map
+@@ -73,20 +137,6 @@ static inline int num_booting_cpus(void)
+ return cpus_weight(cpu_possible_map);
+ }
+
+-#ifdef CONFIG_X86_LOCAL_APIC
+-
+-#ifdef APIC_DEFINITION
+-extern int hard_smp_processor_id(void);
+-#else
+-#include <mach_apicdef.h>
+-static inline int hard_smp_processor_id(void)
+-{
+- /* we don't want to mark this access volatile - bad code generation */
+- return GET_APIC_ID(*(unsigned long *)(APIC_BASE+APIC_ID));
+-}
+-#endif
+-#endif
+-
+ extern int safe_smp_processor_id(void);
+ extern int __cpu_disable(void);
+ extern void __cpu_die(unsigned int cpu);
+@@ -102,10 +152,31 @@ extern unsigned int num_processors;
+
+ #define NO_PROC_ID 0xFF /* No processor magic marker */
+
+-#endif
++#endif /* CONFIG_SMP */
+
+ #ifndef __ASSEMBLY__
+
++#ifdef CONFIG_X86_LOCAL_APIC
++
++#ifdef APIC_DEFINITION
++extern int hard_smp_processor_id(void);
++#else
++#include <mach_apicdef.h>
++static inline int hard_smp_processor_id(void)
++{
++ /* we don't want to mark this access volatile - bad code generation */
++ return GET_APIC_ID(*(unsigned long *)(APIC_BASE+APIC_ID));
++}
++#endif /* APIC_DEFINITION */
++
++#else /* CONFIG_X86_LOCAL_APIC */
++
++#ifndef CONFIG_SMP
++#define hard_smp_processor_id() 0
++#endif
++
++#endif /* CONFIG_X86_LOCAL_APIC */
++
+ extern u8 apicid_2_node[];
+
+ #ifdef CONFIG_X86_LOCAL_APIC
+Index: 10.3-2007-11-26/include/asm-i386/mach-xen/asm/system.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-i386/mach-xen/asm/system.h 2007-10-22 13:53:25.000000000 +0200
++++ 10.3-2007-11-26/include/asm-i386/mach-xen/asm/system.h 2007-10-22 13:58:57.000000000 +0200
+@@ -4,7 +4,7 @@
+ #include <linux/kernel.h>
+ #include <asm/segment.h>
+ #include <asm/cpufeature.h>
+-#include <linux/bitops.h> /* for LOCK_PREFIX */
++#include <asm/cmpxchg.h>
+ #include <asm/synch_bitops.h>
+ #include <asm/hypervisor.h>
+
+@@ -90,308 +90,102 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t"
+ #define savesegment(seg, value) \
+ asm volatile("mov %%" #seg ",%0":"=rm" (value))
+
+-#define read_cr0() ({ \
+- unsigned int __dummy; \
+- __asm__ __volatile__( \
+- "movl %%cr0,%0\n\t" \
+- :"=r" (__dummy)); \
+- __dummy; \
+-})
+-#define write_cr0(x) \
+- __asm__ __volatile__("movl %0,%%cr0": :"r" (x))
+-
+-#define read_cr2() (current_vcpu_info()->arch.cr2)
+-#define write_cr2(x) \
+- __asm__ __volatile__("movl %0,%%cr2": :"r" (x))
+-
+-#define read_cr3() ({ \
+- unsigned int __dummy; \
+- __asm__ ( \
+- "movl %%cr3,%0\n\t" \
+- :"=r" (__dummy)); \
+- __dummy = xen_cr3_to_pfn(__dummy); \
+- mfn_to_pfn(__dummy) << PAGE_SHIFT; \
+-})
+-#define write_cr3(x) ({ \
+- unsigned int __dummy = pfn_to_mfn((x) >> PAGE_SHIFT); \
+- __dummy = xen_pfn_to_cr3(__dummy); \
+- __asm__ __volatile__("movl %0,%%cr3": :"r" (__dummy)); \
+-})
+-#define read_cr4() ({ \
+- unsigned int __dummy; \
+- __asm__( \
+- "movl %%cr4,%0\n\t" \
+- :"=r" (__dummy)); \
+- __dummy; \
+-})
+-#define read_cr4_safe() ({ \
+- unsigned int __dummy; \
+- /* This could fault if %cr4 does not exist */ \
+- __asm__("1: movl %%cr4, %0 \n" \
+- "2: \n" \
+- ".section __ex_table,\"a\" \n" \
+- ".long 1b,2b \n" \
+- ".previous \n" \
+- : "=r" (__dummy): "0" (0)); \
+- __dummy; \
+-})
+-
+-#define write_cr4(x) \
+- __asm__ __volatile__("movl %0,%%cr4": :"r" (x))
+-
+-#define wbinvd() \
+- __asm__ __volatile__ ("wbinvd": : :"memory")
+-
+-/* Clear the 'TS' bit */
+-#define clts() (HYPERVISOR_fpu_taskswitch(0))
+-
+-/* Set the 'TS' bit */
+-#define stts() (HYPERVISOR_fpu_taskswitch(1))
+-
+-#endif /* __KERNEL__ */
+-
+-static inline unsigned long get_limit(unsigned long segment)
++static inline void xen_clts(void)
+ {
+- unsigned long __limit;
+- __asm__("lsll %1,%0"
+- :"=r" (__limit):"r" (segment));
+- return __limit+1;
++ HYPERVISOR_fpu_taskswitch(0);
+ }
+
+-#define nop() __asm__ __volatile__ ("nop")
+-
+-#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
+-
+-#define tas(ptr) (xchg((ptr),1))
+-
+-struct __xchg_dummy { unsigned long a[100]; };
+-#define __xg(x) ((struct __xchg_dummy *)(x))
++static inline unsigned long xen_read_cr0(void)
++{
++ unsigned long val;
++ asm volatile("movl %%cr0,%0\n\t" :"=r" (val));
++ return val;
++}
+
++static inline void xen_write_cr0(unsigned long val)
++{
++ asm volatile("movl %0,%%cr0": :"r" (val));
++}
+
+-#ifdef CONFIG_X86_CMPXCHG64
++#define xen_read_cr2() (current_vcpu_info()->arch.cr2)
+
+-/*
+- * The semantics of XCHGCMP8B are a bit strange, this is why
+- * there is a loop and the loading of %%eax and %%edx has to
+- * be inside. This inlines well in most cases, the cached
+- * cost is around ~38 cycles. (in the future we might want
+- * to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that
+- * might have an implicit FPU-save as a cost, so it's not
+- * clear which path to go.)
+- *
+- * cmpxchg8b must be used with the lock prefix here to allow
+- * the instruction to be executed atomically, see page 3-102
+- * of the instruction set reference 24319102.pdf. We need
+- * the reader side to see the coherent 64bit value.
+- */
+-static inline void __set_64bit (unsigned long long * ptr,
+- unsigned int low, unsigned int high)
++static inline void xen_write_cr2(unsigned long val)
+ {
+- __asm__ __volatile__ (
+- "\n1:\t"
+- "movl (%0), %%eax\n\t"
+- "movl 4(%0), %%edx\n\t"
+- "lock cmpxchg8b (%0)\n\t"
+- "jnz 1b"
+- : /* no outputs */
+- : "D"(ptr),
+- "b"(low),
+- "c"(high)
+- : "ax","dx","memory");
++ asm volatile("movl %0,%%cr2": :"r" (val));
+ }
+
+-static inline void __set_64bit_constant (unsigned long long *ptr,
+- unsigned long long value)
++static inline unsigned long xen_read_cr3(void)
+ {
+- __set_64bit(ptr,(unsigned int)(value), (unsigned int)((value)>>32ULL));
++ unsigned long val;
++ asm volatile("movl %%cr3,%0\n\t" :"=r" (val));
++ return mfn_to_pfn(xen_cr3_to_pfn(val)) << PAGE_SHIFT;
+ }
+-#define ll_low(x) *(((unsigned int*)&(x))+0)
+-#define ll_high(x) *(((unsigned int*)&(x))+1)
+
+-static inline void __set_64bit_var (unsigned long long *ptr,
+- unsigned long long value)
++static inline void xen_write_cr3(unsigned long val)
+ {
+- __set_64bit(ptr,ll_low(value), ll_high(value));
++ val = xen_pfn_to_cr3(pfn_to_mfn(val >> PAGE_SHIFT));
++ asm volatile("movl %0,%%cr3": :"r" (val));
+ }
+
+-#define set_64bit(ptr,value) \
+-(__builtin_constant_p(value) ? \
+- __set_64bit_constant(ptr, value) : \
+- __set_64bit_var(ptr, value) )
+-
+-#define _set_64bit(ptr,value) \
+-(__builtin_constant_p(value) ? \
+- __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \
+- __set_64bit(ptr, ll_low(value), ll_high(value)) )
+-
+-#endif
+-
+-/*
+- * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
+- * Note 2: xchg has side effect, so that attribute volatile is necessary,
+- * but generally the primitive is invalid, *ptr is output argument. --ANK
+- */
+-static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
++static inline unsigned long xen_read_cr4(void)
+ {
+- switch (size) {
+- case 1:
+- __asm__ __volatile__("xchgb %b0,%1"
+- :"=q" (x)
+- :"m" (*__xg(ptr)), "0" (x)
+- :"memory");
+- break;
+- case 2:
+- __asm__ __volatile__("xchgw %w0,%1"
+- :"=r" (x)
+- :"m" (*__xg(ptr)), "0" (x)
+- :"memory");
+- break;
+- case 4:
+- __asm__ __volatile__("xchgl %0,%1"
+- :"=r" (x)
+- :"m" (*__xg(ptr)), "0" (x)
+- :"memory");
+- break;
+- }
+- return x;
++ unsigned long val;
++ asm volatile("movl %%cr4,%0\n\t" :"=r" (val));
++ return val;
+ }
+
+-/*
+- * Atomic compare and exchange. Compare OLD with MEM, if identical,
+- * store NEW in MEM. Return the initial value in MEM. Success is
+- * indicated by comparing RETURN with OLD.
+- */
+-
+-#ifdef CONFIG_X86_CMPXCHG
+-#define __HAVE_ARCH_CMPXCHG 1
+-#define cmpxchg(ptr,o,n)\
+- ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
+- (unsigned long)(n),sizeof(*(ptr))))
+-#define sync_cmpxchg(ptr,o,n)\
+- ((__typeof__(*(ptr)))__sync_cmpxchg((ptr),(unsigned long)(o),\
+- (unsigned long)(n),sizeof(*(ptr))))
+-#endif
++static inline unsigned long xen_read_cr4_safe(void)
++{
++ unsigned long val;
++ /* This could fault if %cr4 does not exist */
++ asm("1: movl %%cr4, %0 \n"
++ "2: \n"
++ ".section __ex_table,\"a\" \n"
++ ".long 1b,2b \n"
++ ".previous \n"
++ : "=r" (val): "0" (0));
++ return val;
++}
+
+-static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
+- unsigned long new, int size)
++static inline void xen_write_cr4(unsigned long val)
+ {
+- unsigned long prev;
+- switch (size) {
+- case 1:
+- __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
+- : "=a"(prev)
+- : "q"(new), "m"(*__xg(ptr)), "0"(old)
+- : "memory");
+- return prev;
+- case 2:
+- __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
+- : "=a"(prev)
+- : "r"(new), "m"(*__xg(ptr)), "0"(old)
+- : "memory");
+- return prev;
+- case 4:
+- __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2"
+- : "=a"(prev)
+- : "r"(new), "m"(*__xg(ptr)), "0"(old)
+- : "memory");
+- return prev;
+- }
+- return old;
++ asm volatile("movl %0,%%cr4": :"r" (val));
+ }
+
+-/*
+- * Always use locked operations when touching memory shared with a
+- * hypervisor, since the system may be SMP even if the guest kernel
+- * isn't.
+- */
+-static inline unsigned long __sync_cmpxchg(volatile void *ptr,
+- unsigned long old,
+- unsigned long new, int size)
+-{
+- unsigned long prev;
+- switch (size) {
+- case 1:
+- __asm__ __volatile__("lock; cmpxchgb %b1,%2"
+- : "=a"(prev)
+- : "q"(new), "m"(*__xg(ptr)), "0"(old)
+- : "memory");
+- return prev;
+- case 2:
+- __asm__ __volatile__("lock; cmpxchgw %w1,%2"
+- : "=a"(prev)
+- : "r"(new), "m"(*__xg(ptr)), "0"(old)
+- : "memory");
+- return prev;
+- case 4:
+- __asm__ __volatile__("lock; cmpxchgl %1,%2"
+- : "=a"(prev)
+- : "r"(new), "m"(*__xg(ptr)), "0"(old)
+- : "memory");
+- return prev;
+- }
+- return old;
++static inline void xen_wbinvd(void)
++{
++ asm volatile("wbinvd": : :"memory");
+ }
+
+-#ifndef CONFIG_X86_CMPXCHG
+-/*
+- * Building a kernel capable running on 80386. It may be necessary to
+- * simulate the cmpxchg on the 80386 CPU. For that purpose we define
+- * a function for each of the sizes we support.
+- */
++#define read_cr0() (xen_read_cr0())
++#define write_cr0(x) (xen_write_cr0(x))
++#define read_cr2() (xen_read_cr2())
++#define write_cr2(x) (xen_write_cr2(x))
++#define read_cr3() (xen_read_cr3())
++#define write_cr3(x) (xen_write_cr3(x))
++#define read_cr4() (xen_read_cr4())
++#define read_cr4_safe() (xen_read_cr4_safe())
++#define write_cr4(x) (xen_write_cr4(x))
++#define wbinvd() (xen_wbinvd())
+
+-extern unsigned long cmpxchg_386_u8(volatile void *, u8, u8);
+-extern unsigned long cmpxchg_386_u16(volatile void *, u16, u16);
+-extern unsigned long cmpxchg_386_u32(volatile void *, u32, u32);
+-
+-static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old,
+- unsigned long new, int size)
+-{
+- switch (size) {
+- case 1:
+- return cmpxchg_386_u8(ptr, old, new);
+- case 2:
+- return cmpxchg_386_u16(ptr, old, new);
+- case 4:
+- return cmpxchg_386_u32(ptr, old, new);
+- }
+- return old;
+-}
+-
+-#define cmpxchg(ptr,o,n) \
+-({ \
+- __typeof__(*(ptr)) __ret; \
+- if (likely(boot_cpu_data.x86 > 3)) \
+- __ret = __cmpxchg((ptr), (unsigned long)(o), \
+- (unsigned long)(n), sizeof(*(ptr))); \
+- else \
+- __ret = cmpxchg_386((ptr), (unsigned long)(o), \
+- (unsigned long)(n), sizeof(*(ptr))); \
+- __ret; \
+-})
+-#endif
++/* Clear the 'TS' bit */
++#define clts() (xen_clts())
+
+-#ifdef CONFIG_X86_CMPXCHG64
++/* Set the 'TS' bit */
++#define stts() (HYPERVISOR_fpu_taskswitch(1))
+
+-static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long long old,
+- unsigned long long new)
++#endif /* __KERNEL__ */
++
++static inline unsigned long get_limit(unsigned long segment)
+ {
+- unsigned long long prev;
+- __asm__ __volatile__(LOCK_PREFIX "cmpxchg8b %3"
+- : "=A"(prev)
+- : "b"((unsigned long)new),
+- "c"((unsigned long)(new >> 32)),
+- "m"(*__xg(ptr)),
+- "0"(old)
+- : "memory");
+- return prev;
+-}
+-
+-#define cmpxchg64(ptr,o,n)\
+- ((__typeof__(*(ptr)))__cmpxchg64((ptr),(unsigned long long)(o),\
+- (unsigned long long)(n)))
++ unsigned long __limit;
++ __asm__("lsll %1,%0"
++ :"=r" (__limit):"r" (segment));
++ return __limit+1;
++}
++
++#define nop() __asm__ __volatile__ ("nop")
+
+-#endif
+-
+ /*
+ * Force strict CPU ordering.
+ * And yes, this is required on UP too when we're talking
+Index: 10.3-2007-11-26/include/asm-i386/mach-xen/asm/tlbflush.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-i386/mach-xen/asm/tlbflush.h 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/include/asm-i386/mach-xen/asm/tlbflush.h 2007-10-22 13:58:57.000000000 +0200
+@@ -24,13 +24,19 @@
+ * - flush_tlb_range(vma, start, end) flushes a range of pages
+ * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
+ * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
++ * - flush_tlb_others(cpumask, mm, va) flushes a TLBs on other cpus
+ *
+ * ..but the i386 has somewhat limited tlb flushing capabilities,
+ * and page-granular flushes are available only on i486 and up.
+ */
+
++#define TLB_FLUSH_ALL 0xffffffff
++
++
+ #ifndef CONFIG_SMP
+
++#include <linux/sched.h>
++
+ #define flush_tlb() __flush_tlb()
+ #define flush_tlb_all() __flush_tlb_all()
+ #define local_flush_tlb() __flush_tlb()
+@@ -55,7 +61,12 @@ static inline void flush_tlb_range(struc
+ __flush_tlb();
+ }
+
+-#else
++static inline void xen_flush_tlb_others(const cpumask_t *cpumask,
++ struct mm_struct *mm, unsigned long va)
++{
++}
++
++#else /* SMP */
+
+ #include <asm/smp.h>
+
+@@ -74,6 +85,9 @@ static inline void flush_tlb_range(struc
+ flush_tlb_mm(vma->vm_mm);
+ }
+
++void xen_flush_tlb_others(const cpumask_t *cpumask, struct mm_struct *mm,
++ unsigned long va);
++
+ #define TLBSTATE_OK 1
+ #define TLBSTATE_LAZY 2
+
+@@ -84,9 +98,10 @@ struct tlb_state
+ char __cacheline_padding[L1_CACHE_BYTES-8];
+ };
+ DECLARE_PER_CPU(struct tlb_state, cpu_tlbstate);
++#endif /* SMP */
+
+-
+-#endif
++#define flush_tlb_others(mask, mm, va) \
++ xen_flush_tlb_others(&mask, mm, va)
+
+ #define flush_tlb_kernel_range(start, end) flush_tlb_all()
+
+Index: 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/agp.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-x86_64/mach-xen/asm/agp.h 2007-12-06 17:27:30.000000000 +0100
++++ 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/agp.h 2007-10-22 13:58:57.000000000 +0200
+@@ -11,8 +11,15 @@
+ * with different cachability attributes for the same page.
+ */
+
+-int map_page_into_agp(struct page *page);
+-int unmap_page_from_agp(struct page *page);
++/* Caller's responsibility to call global_flush_tlb() for
++ * performance reasons */
++#define map_page_into_agp(page) ( \
++ xen_create_contiguous_region((unsigned long)page_address(page), 0, 32) \
++ ?: change_page_attr(page, 1, PAGE_KERNEL_NOCACHE))
++#define unmap_page_from_agp(page) ( \
++ xen_destroy_contiguous_region((unsigned long)page_address(page), 0), \
++ /* only a fallback: xen_destroy_contiguous_region uses PAGE_KERNEL */ \
++ change_page_attr(page, 1, PAGE_KERNEL))
+ #define flush_agp_mappings() global_flush_tlb()
+
+ /* Could use CLFLUSH here if the cpu supports it. But then it would
+Index: 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/desc.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-x86_64/mach-xen/asm/desc.h 2007-10-22 13:53:25.000000000 +0200
++++ 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/desc.h 2007-10-22 13:58:57.000000000 +0200
+@@ -127,16 +127,6 @@ static inline void set_ldt_desc(unsigned
+ DESC_LDT, size * 8 - 1);
+ }
+
+-static inline void set_seg_base(unsigned cpu, int entry, void *base)
+-{
+- struct desc_struct *d = &cpu_gdt(cpu)[entry];
+- u32 addr = (u32)(u64)base;
+- BUG_ON((u64)base >> 32);
+- d->base0 = addr & 0xffff;
+- d->base1 = (addr >> 16) & 0xff;
+- d->base2 = (addr >> 24) & 0xff;
+-}
+-
+ #define LDT_entry_a(info) \
+ ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff))
+ /* Don't allow setting of the lm bit. It is useless anyways because
+@@ -165,23 +155,13 @@ static inline void set_seg_base(unsigned
+ (info)->useable == 0 && \
+ (info)->lm == 0)
+
+-#if TLS_SIZE != 24
+-# error update this code.
+-#endif
+-
+ static inline void load_TLS(struct thread_struct *t, unsigned int cpu)
+ {
+-#if 0
++ unsigned int i;
+ u64 *gdt = (u64 *)(cpu_gdt(cpu) + GDT_ENTRY_TLS_MIN);
+- gdt[0] = t->tls_array[0];
+- gdt[1] = t->tls_array[1];
+- gdt[2] = t->tls_array[2];
+-#endif
+-#define C(i) \
+- HYPERVISOR_update_descriptor(virt_to_machine(&cpu_gdt(cpu)[GDT_ENTRY_TLS_MIN + i]), t->tls_array[i])
+
+- C(0); C(1); C(2);
+-#undef C
++ for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
++ HYPERVISOR_update_descriptor(virt_to_machine(&gdt[i]), t->tls_array[i]);
+ }
+
+ /*
+Index: 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/dma-mapping.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-x86_64/mach-xen/asm/dma-mapping.h 2007-10-22 13:58:46.000000000 +0200
++++ 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/dma-mapping.h 2007-10-22 13:58:57.000000000 +0200
+@@ -51,7 +51,7 @@ struct dma_mapping_ops {
+ };
+
+ extern dma_addr_t bad_dma_address;
+-extern struct dma_mapping_ops* dma_ops;
++extern const struct dma_mapping_ops* dma_ops;
+ extern int iommu_merge;
+
+ #if 0
+Index: 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/fixmap.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-x86_64/mach-xen/asm/fixmap.h 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/fixmap.h 2007-10-22 13:58:57.000000000 +0200
+@@ -15,7 +15,6 @@
+ #include <asm/apicdef.h>
+ #include <asm/page.h>
+ #include <asm/vsyscall.h>
+-#include <asm/vsyscall32.h>
+ #include <asm/acpi.h>
+
+ /*
+Index: 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/irqflags.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-x86_64/mach-xen/asm/irqflags.h 2007-12-06 17:27:30.000000000 +0100
++++ 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/irqflags.h 2007-10-22 13:58:57.000000000 +0200
+@@ -9,6 +9,7 @@
+ */
+ #ifndef _ASM_IRQFLAGS_H
+ #define _ASM_IRQFLAGS_H
++#include <asm/processor-flags.h>
+
+ #ifndef __ASSEMBLY__
+ /*
+@@ -50,19 +51,19 @@ static inline void raw_local_irq_disable
+ {
+ unsigned long flags = __raw_local_save_flags();
+
+- raw_local_irq_restore((flags & ~(1 << 9)) | (1 << 18));
++ raw_local_irq_restore((flags & ~X86_EFLAGS_IF) | X86_EFLAGS_AC);
+ }
+
+ static inline void raw_local_irq_enable(void)
+ {
+ unsigned long flags = __raw_local_save_flags();
+
+- raw_local_irq_restore((flags | (1 << 9)) & ~(1 << 18));
++ raw_local_irq_restore((flags | X86_EFLAGS_IF) & (~X86_EFLAGS_AC));
+ }
+
+ static inline int raw_irqs_disabled_flags(unsigned long flags)
+ {
+- return !(flags & (1<<9)) || (flags & (1 << 18));
++ return !(flags & X86_EFLAGS_IF) || (flags & X86_EFLAGS_AC);
+ }
+
+ #else /* CONFIG_X86_VSMP */
+@@ -118,13 +119,21 @@ static inline int raw_irqs_disabled_flag
+ * Used in the idle loop; sti takes one instruction cycle
+ * to complete:
+ */
+-void raw_safe_halt(void);
++void xen_safe_halt(void);
++static inline void raw_safe_halt(void)
++{
++ xen_safe_halt();
++}
+
+ /*
+ * Used when interrupts are already enabled or to
+ * shutdown the processor:
+ */
+-void halt(void);
++void xen_halt(void);
++static inline void halt(void)
++{
++ xen_halt();
++}
+
+ #else /* __ASSEMBLY__: */
+ # ifdef CONFIG_TRACE_IRQFLAGS
+Index: 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/mmu.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-x86_64/mach-xen/asm/mmu.h 2007-12-06 17:27:30.000000000 +0100
++++ 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/mmu.h 2007-10-22 13:58:57.000000000 +0200
+@@ -25,14 +25,6 @@ typedef struct {
+ #ifdef CONFIG_XEN
+ extern struct list_head mm_unpinned;
+ extern spinlock_t mm_unpinned_lock;
+-
+-/* mm/memory.c:exit_mmap hook */
+-extern void _arch_exit_mmap(struct mm_struct *mm);
+-#define arch_exit_mmap(_mm) _arch_exit_mmap(_mm)
+-
+-/* kernel/fork.c:dup_mmap hook */
+-extern void _arch_dup_mmap(struct mm_struct *mm);
+-#define arch_dup_mmap(mm, oldmm) ((void)(oldmm), _arch_dup_mmap(mm))
+ #endif
+
+ #endif
+Index: 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/mmu_context.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-x86_64/mach-xen/asm/mmu_context.h 2007-12-06 17:27:30.000000000 +0100
++++ 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/mmu_context.h 2007-10-22 13:58:57.000000000 +0200
+@@ -9,6 +9,9 @@
+ #include <asm/pgtable.h>
+ #include <asm/tlbflush.h>
+
++void arch_exit_mmap(struct mm_struct *mm);
++void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm);
++
+ /*
+ * possibly do the LDT unload here?
+ */
+Index: 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/msr.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-x86_64/mach-xen/asm/msr.h 2007-10-22 13:58:46.000000000 +0200
++++ 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/msr.h 2007-10-22 13:58:57.000000000 +0200
+@@ -1,7 +1,10 @@
+ #ifndef X86_64_MSR_H
+ #define X86_64_MSR_H 1
+
++#include <asm/msr-index.h>
++
+ #ifndef __ASSEMBLY__
++#include <linux/errno.h>
+ /*
+ * Access to machine-specific registers (available on 586 and better only)
+ * Note: the rd* operations modify the parameters directly (without using
+@@ -157,12 +160,11 @@ static inline unsigned int cpuid_edx(uns
+ return edx;
+ }
+
+-#define MSR_IA32_UCODE_WRITE 0x79
+-#define MSR_IA32_UCODE_REV 0x8b
+-
+ #ifdef CONFIG_SMP
+ void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
+ void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
++int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
++int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
+ #else /* CONFIG_SMP */
+ static inline void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
+ {
+@@ -172,269 +174,14 @@ static inline void wrmsr_on_cpu(unsigned
+ {
+ wrmsr(msr_no, l, h);
+ }
+-#endif /* CONFIG_SMP */
+-
+-#endif
+-
+-/* AMD/K8 specific MSRs */
+-#define MSR_EFER 0xc0000080 /* extended feature register */
+-#define MSR_STAR 0xc0000081 /* legacy mode SYSCALL target */
+-#define MSR_LSTAR 0xc0000082 /* long mode SYSCALL target */
+-#define MSR_CSTAR 0xc0000083 /* compatibility mode SYSCALL target */
+-#define MSR_SYSCALL_MASK 0xc0000084 /* EFLAGS mask for syscall */
+-#define MSR_FS_BASE 0xc0000100 /* 64bit FS base */
+-#define MSR_GS_BASE 0xc0000101 /* 64bit GS base */
+-#define MSR_KERNEL_GS_BASE 0xc0000102 /* SwapGS GS shadow (or USER_GS from kernel) */
+-/* EFER bits: */
+-#define _EFER_SCE 0 /* SYSCALL/SYSRET */
+-#define _EFER_LME 8 /* Long mode enable */
+-#define _EFER_LMA 10 /* Long mode active (read-only) */
+-#define _EFER_NX 11 /* No execute enable */
+-
+-#define EFER_SCE (1<<_EFER_SCE)
+-#define EFER_LME (1<<_EFER_LME)
+-#define EFER_LMA (1<<_EFER_LMA)
+-#define EFER_NX (1<<_EFER_NX)
+-
+-/* Intel MSRs. Some also available on other CPUs */
+-#define MSR_IA32_TSC 0x10
+-#define MSR_IA32_PLATFORM_ID 0x17
+-
+-#define MSR_IA32_PERFCTR0 0xc1
+-#define MSR_IA32_PERFCTR1 0xc2
+-#define MSR_FSB_FREQ 0xcd
+-
+-#define MSR_MTRRcap 0x0fe
+-#define MSR_IA32_BBL_CR_CTL 0x119
+-
+-#define MSR_IA32_SYSENTER_CS 0x174
+-#define MSR_IA32_SYSENTER_ESP 0x175
+-#define MSR_IA32_SYSENTER_EIP 0x176
+-
+-#define MSR_IA32_MCG_CAP 0x179
+-#define MSR_IA32_MCG_STATUS 0x17a
+-#define MSR_IA32_MCG_CTL 0x17b
+-
+-#define MSR_IA32_EVNTSEL0 0x186
+-#define MSR_IA32_EVNTSEL1 0x187
+-
+-#define MSR_IA32_DEBUGCTLMSR 0x1d9
+-#define MSR_IA32_LASTBRANCHFROMIP 0x1db
+-#define MSR_IA32_LASTBRANCHTOIP 0x1dc
+-#define MSR_IA32_LASTINTFROMIP 0x1dd
+-#define MSR_IA32_LASTINTTOIP 0x1de
+-
+-#define MSR_IA32_PEBS_ENABLE 0x3f1
+-#define MSR_IA32_DS_AREA 0x600
+-#define MSR_IA32_PERF_CAPABILITIES 0x345
+-
+-#define MSR_MTRRfix64K_00000 0x250
+-#define MSR_MTRRfix16K_80000 0x258
+-#define MSR_MTRRfix16K_A0000 0x259
+-#define MSR_MTRRfix4K_C0000 0x268
+-#define MSR_MTRRfix4K_C8000 0x269
+-#define MSR_MTRRfix4K_D0000 0x26a
+-#define MSR_MTRRfix4K_D8000 0x26b
+-#define MSR_MTRRfix4K_E0000 0x26c
+-#define MSR_MTRRfix4K_E8000 0x26d
+-#define MSR_MTRRfix4K_F0000 0x26e
+-#define MSR_MTRRfix4K_F8000 0x26f
+-#define MSR_MTRRdefType 0x2ff
+-
+-#define MSR_IA32_MC0_CTL 0x400
+-#define MSR_IA32_MC0_STATUS 0x401
+-#define MSR_IA32_MC0_ADDR 0x402
+-#define MSR_IA32_MC0_MISC 0x403
+-
+-#define MSR_P6_PERFCTR0 0xc1
+-#define MSR_P6_PERFCTR1 0xc2
+-#define MSR_P6_EVNTSEL0 0x186
+-#define MSR_P6_EVNTSEL1 0x187
+-
+-/* K7/K8 MSRs. Not complete. See the architecture manual for a more complete list. */
+-#define MSR_K7_EVNTSEL0 0xC0010000
+-#define MSR_K7_PERFCTR0 0xC0010004
+-#define MSR_K7_EVNTSEL1 0xC0010001
+-#define MSR_K7_PERFCTR1 0xC0010005
+-#define MSR_K7_EVNTSEL2 0xC0010002
+-#define MSR_K7_PERFCTR2 0xC0010006
+-#define MSR_K7_EVNTSEL3 0xC0010003
+-#define MSR_K7_PERFCTR3 0xC0010007
+-#define MSR_K8_TOP_MEM1 0xC001001A
+-#define MSR_K8_TOP_MEM2 0xC001001D
+-#define MSR_K8_SYSCFG 0xC0010010
+-#define MSR_K8_HWCR 0xC0010015
+-
+-/* K6 MSRs */
+-#define MSR_K6_EFER 0xC0000080
+-#define MSR_K6_STAR 0xC0000081
+-#define MSR_K6_WHCR 0xC0000082
+-#define MSR_K6_UWCCR 0xC0000085
+-#define MSR_K6_PSOR 0xC0000087
+-#define MSR_K6_PFIR 0xC0000088
+-
+-/* Centaur-Hauls/IDT defined MSRs. */
+-#define MSR_IDT_FCR1 0x107
+-#define MSR_IDT_FCR2 0x108
+-#define MSR_IDT_FCR3 0x109
+-#define MSR_IDT_FCR4 0x10a
+-
+-#define MSR_IDT_MCR0 0x110
+-#define MSR_IDT_MCR1 0x111
+-#define MSR_IDT_MCR2 0x112
+-#define MSR_IDT_MCR3 0x113
+-#define MSR_IDT_MCR4 0x114
+-#define MSR_IDT_MCR5 0x115
+-#define MSR_IDT_MCR6 0x116
+-#define MSR_IDT_MCR7 0x117
+-#define MSR_IDT_MCR_CTRL 0x120
+-
+-/* VIA Cyrix defined MSRs*/
+-#define MSR_VIA_FCR 0x1107
+-#define MSR_VIA_LONGHAUL 0x110a
+-#define MSR_VIA_RNG 0x110b
+-#define MSR_VIA_BCR2 0x1147
+-
+-/* Intel defined MSRs. */
+-#define MSR_IA32_P5_MC_ADDR 0
+-#define MSR_IA32_P5_MC_TYPE 1
+-#define MSR_IA32_PLATFORM_ID 0x17
+-#define MSR_IA32_EBL_CR_POWERON 0x2a
+-
+-#define MSR_IA32_APICBASE 0x1b
+-#define MSR_IA32_APICBASE_BSP (1<<8)
+-#define MSR_IA32_APICBASE_ENABLE (1<<11)
+-#define MSR_IA32_APICBASE_BASE (0xfffff<<12)
+-
+-/* P4/Xeon+ specific */
+-#define MSR_IA32_MCG_EAX 0x180
+-#define MSR_IA32_MCG_EBX 0x181
+-#define MSR_IA32_MCG_ECX 0x182
+-#define MSR_IA32_MCG_EDX 0x183
+-#define MSR_IA32_MCG_ESI 0x184
+-#define MSR_IA32_MCG_EDI 0x185
+-#define MSR_IA32_MCG_EBP 0x186
+-#define MSR_IA32_MCG_ESP 0x187
+-#define MSR_IA32_MCG_EFLAGS 0x188
+-#define MSR_IA32_MCG_EIP 0x189
+-#define MSR_IA32_MCG_RESERVED 0x18A
+-
+-#define MSR_P6_EVNTSEL0 0x186
+-#define MSR_P6_EVNTSEL1 0x187
+-
+-#define MSR_IA32_PERF_STATUS 0x198
+-#define MSR_IA32_PERF_CTL 0x199
+-
+-#define MSR_IA32_MPERF 0xE7
+-#define MSR_IA32_APERF 0xE8
+-
+-#define MSR_IA32_THERM_CONTROL 0x19a
+-#define MSR_IA32_THERM_INTERRUPT 0x19b
+-#define MSR_IA32_THERM_STATUS 0x19c
+-#define MSR_IA32_MISC_ENABLE 0x1a0
+-
+-#define MSR_IA32_DEBUGCTLMSR 0x1d9
+-#define MSR_IA32_LASTBRANCHFROMIP 0x1db
+-#define MSR_IA32_LASTBRANCHTOIP 0x1dc
+-#define MSR_IA32_LASTINTFROMIP 0x1dd
+-#define MSR_IA32_LASTINTTOIP 0x1de
+-
+-#define MSR_IA32_MC0_CTL 0x400
+-#define MSR_IA32_MC0_STATUS 0x401
+-#define MSR_IA32_MC0_ADDR 0x402
+-#define MSR_IA32_MC0_MISC 0x403
+-
+-/* Pentium IV performance counter MSRs */
+-#define MSR_P4_BPU_PERFCTR0 0x300
+-#define MSR_P4_BPU_PERFCTR1 0x301
+-#define MSR_P4_BPU_PERFCTR2 0x302
+-#define MSR_P4_BPU_PERFCTR3 0x303
+-#define MSR_P4_MS_PERFCTR0 0x304
+-#define MSR_P4_MS_PERFCTR1 0x305
+-#define MSR_P4_MS_PERFCTR2 0x306
+-#define MSR_P4_MS_PERFCTR3 0x307
+-#define MSR_P4_FLAME_PERFCTR0 0x308
+-#define MSR_P4_FLAME_PERFCTR1 0x309
+-#define MSR_P4_FLAME_PERFCTR2 0x30a
+-#define MSR_P4_FLAME_PERFCTR3 0x30b
+-#define MSR_P4_IQ_PERFCTR0 0x30c
+-#define MSR_P4_IQ_PERFCTR1 0x30d
+-#define MSR_P4_IQ_PERFCTR2 0x30e
+-#define MSR_P4_IQ_PERFCTR3 0x30f
+-#define MSR_P4_IQ_PERFCTR4 0x310
+-#define MSR_P4_IQ_PERFCTR5 0x311
+-#define MSR_P4_BPU_CCCR0 0x360
+-#define MSR_P4_BPU_CCCR1 0x361
+-#define MSR_P4_BPU_CCCR2 0x362
+-#define MSR_P4_BPU_CCCR3 0x363
+-#define MSR_P4_MS_CCCR0 0x364
+-#define MSR_P4_MS_CCCR1 0x365
+-#define MSR_P4_MS_CCCR2 0x366
+-#define MSR_P4_MS_CCCR3 0x367
+-#define MSR_P4_FLAME_CCCR0 0x368
+-#define MSR_P4_FLAME_CCCR1 0x369
+-#define MSR_P4_FLAME_CCCR2 0x36a
+-#define MSR_P4_FLAME_CCCR3 0x36b
+-#define MSR_P4_IQ_CCCR0 0x36c
+-#define MSR_P4_IQ_CCCR1 0x36d
+-#define MSR_P4_IQ_CCCR2 0x36e
+-#define MSR_P4_IQ_CCCR3 0x36f
+-#define MSR_P4_IQ_CCCR4 0x370
+-#define MSR_P4_IQ_CCCR5 0x371
+-#define MSR_P4_ALF_ESCR0 0x3ca
+-#define MSR_P4_ALF_ESCR1 0x3cb
+-#define MSR_P4_BPU_ESCR0 0x3b2
+-#define MSR_P4_BPU_ESCR1 0x3b3
+-#define MSR_P4_BSU_ESCR0 0x3a0
+-#define MSR_P4_BSU_ESCR1 0x3a1
+-#define MSR_P4_CRU_ESCR0 0x3b8
+-#define MSR_P4_CRU_ESCR1 0x3b9
+-#define MSR_P4_CRU_ESCR2 0x3cc
+-#define MSR_P4_CRU_ESCR3 0x3cd
+-#define MSR_P4_CRU_ESCR4 0x3e0
+-#define MSR_P4_CRU_ESCR5 0x3e1
+-#define MSR_P4_DAC_ESCR0 0x3a8
+-#define MSR_P4_DAC_ESCR1 0x3a9
+-#define MSR_P4_FIRM_ESCR0 0x3a4
+-#define MSR_P4_FIRM_ESCR1 0x3a5
+-#define MSR_P4_FLAME_ESCR0 0x3a6
+-#define MSR_P4_FLAME_ESCR1 0x3a7
+-#define MSR_P4_FSB_ESCR0 0x3a2
+-#define MSR_P4_FSB_ESCR1 0x3a3
+-#define MSR_P4_IQ_ESCR0 0x3ba
+-#define MSR_P4_IQ_ESCR1 0x3bb
+-#define MSR_P4_IS_ESCR0 0x3b4
+-#define MSR_P4_IS_ESCR1 0x3b5
+-#define MSR_P4_ITLB_ESCR0 0x3b6
+-#define MSR_P4_ITLB_ESCR1 0x3b7
+-#define MSR_P4_IX_ESCR0 0x3c8
+-#define MSR_P4_IX_ESCR1 0x3c9
+-#define MSR_P4_MOB_ESCR0 0x3aa
+-#define MSR_P4_MOB_ESCR1 0x3ab
+-#define MSR_P4_MS_ESCR0 0x3c0
+-#define MSR_P4_MS_ESCR1 0x3c1
+-#define MSR_P4_PMH_ESCR0 0x3ac
+-#define MSR_P4_PMH_ESCR1 0x3ad
+-#define MSR_P4_RAT_ESCR0 0x3bc
+-#define MSR_P4_RAT_ESCR1 0x3bd
+-#define MSR_P4_SAAT_ESCR0 0x3ae
+-#define MSR_P4_SAAT_ESCR1 0x3af
+-#define MSR_P4_SSU_ESCR0 0x3be
+-#define MSR_P4_SSU_ESCR1 0x3bf /* guess: not defined in manual */
+-#define MSR_P4_TBPU_ESCR0 0x3c2
+-#define MSR_P4_TBPU_ESCR1 0x3c3
+-#define MSR_P4_TC_ESCR0 0x3c4
+-#define MSR_P4_TC_ESCR1 0x3c5
+-#define MSR_P4_U2L_ESCR0 0x3b0
+-#define MSR_P4_U2L_ESCR1 0x3b1
+-
+-/* Intel Core-based CPU performance counters */
+-#define MSR_CORE_PERF_FIXED_CTR0 0x309
+-#define MSR_CORE_PERF_FIXED_CTR1 0x30a
+-#define MSR_CORE_PERF_FIXED_CTR2 0x30b
+-#define MSR_CORE_PERF_FIXED_CTR_CTRL 0x38d
+-#define MSR_CORE_PERF_GLOBAL_STATUS 0x38e
+-#define MSR_CORE_PERF_GLOBAL_CTRL 0x38f
+-#define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x390
+-
+-#endif
++static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
++{
++ return rdmsr_safe(msr_no, l, h);
++}
++static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
++{
++ return wrmsr_safe(msr_no, l, h);
++}
++#endif /* CONFIG_SMP */
++#endif /* __ASSEMBLY__ */
++#endif /* X86_64_MSR_H */
+Index: 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/nmi.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-x86_64/mach-xen/asm/nmi.h 2007-10-22 13:53:25.000000000 +0200
++++ 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/nmi.h 2007-10-22 13:58:57.000000000 +0200
+@@ -96,4 +96,13 @@ extern int unknown_nmi_panic;
+ void __trigger_all_cpu_backtrace(void);
+ #define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace()
+
++
++void lapic_watchdog_stop(void);
++int lapic_watchdog_init(unsigned nmi_hz);
++int lapic_wd_event(unsigned nmi_hz);
++unsigned lapic_adjust_nmi_hz(unsigned hz);
++int lapic_watchdog_ok(void);
++void disable_lapic_nmi_watchdog(void);
++void enable_lapic_nmi_watchdog(void);
++
+ #endif /* ASM_NMI_H */
+Index: 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/page.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-x86_64/mach-xen/asm/page.h 2007-05-31 14:39:08.000000000 +0200
++++ 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/page.h 2007-10-22 13:58:57.000000000 +0200
+@@ -7,6 +7,7 @@
+ #include <linux/types.h>
+ #include <asm/bug.h>
+ #endif
++#include <linux/const.h>
+ #include <xen/interface/xen.h>
+
+ /*
+@@ -18,18 +19,14 @@
+
+ /* PAGE_SHIFT determines the page size */
+ #define PAGE_SHIFT 12
+-#ifdef __ASSEMBLY__
+-#define PAGE_SIZE (0x1 << PAGE_SHIFT)
+-#else
+-#define PAGE_SIZE (1UL << PAGE_SHIFT)
+-#endif
++#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
+ #define PAGE_MASK (~(PAGE_SIZE-1))
+
+ /* See Documentation/x86_64/mm.txt for a description of the memory map. */
+ #define __PHYSICAL_MASK_SHIFT 46
+-#define __PHYSICAL_MASK ((1UL << __PHYSICAL_MASK_SHIFT) - 1)
++#define __PHYSICAL_MASK ((_AC(1,UL) << __PHYSICAL_MASK_SHIFT) - 1)
+ #define __VIRTUAL_MASK_SHIFT 48
+-#define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1)
++#define __VIRTUAL_MASK ((_AC(1,UL) << __VIRTUAL_MASK_SHIFT) - 1)
+
+ #define PHYSICAL_PAGE_MASK (~(PAGE_SIZE-1) & __PHYSICAL_MASK)
+
+@@ -54,10 +51,10 @@
+ #define N_EXCEPTION_STACKS 5 /* hw limit: 7 */
+
+ #define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1))
+-#define LARGE_PAGE_SIZE (1UL << PMD_SHIFT)
++#define LARGE_PAGE_SIZE (_AC(1,UL) << PMD_SHIFT)
+
+ #define HPAGE_SHIFT PMD_SHIFT
+-#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
++#define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT)
+ #define HPAGE_MASK (~(HPAGE_SIZE - 1))
+ #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
+
+@@ -146,17 +143,23 @@ static inline pgd_t __pgd(unsigned long
+
+ #define __pgprot(x) ((pgprot_t) { (x) } )
+
+-#define __PHYSICAL_START ((unsigned long)CONFIG_PHYSICAL_START)
+-#define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START)
+-#define __START_KERNEL_map 0xffffffff80000000UL
+-#define __PAGE_OFFSET 0xffff880000000000UL
++#endif /* !__ASSEMBLY__ */
+
+-#else
+ #define __PHYSICAL_START CONFIG_PHYSICAL_START
++#define __KERNEL_ALIGN 0x200000
++
++/*
++ * Make sure kernel is aligned to 2MB address. Catching it at compile
++ * time is better. Change your config file and compile the kernel
++ * for a 2MB aligned address (CONFIG_PHYSICAL_START)
++ */
++#if (CONFIG_PHYSICAL_START % __KERNEL_ALIGN) != 0
++#error "CONFIG_PHYSICAL_START must be a multiple of 2MB"
++#endif
++
+ #define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START)
+-#define __START_KERNEL_map 0xffffffff80000000
+-#define __PAGE_OFFSET 0xffff880000000000
+-#endif /* !__ASSEMBLY__ */
++#define __START_KERNEL_map _AC(0xffffffff80000000, UL)
++#define __PAGE_OFFSET _AC(0xffff880000000000, UL)
+
+ #if CONFIG_XEN_COMPAT <= 0x030002
+ #undef LOAD_OFFSET
+@@ -166,20 +169,20 @@ static inline pgd_t __pgd(unsigned long
+ /* to align the pointer to the (next) page boundary */
+ #define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
+
+-#define KERNEL_TEXT_SIZE (40UL*1024*1024)
+-#define KERNEL_TEXT_START 0xffffffff80000000UL
++#define KERNEL_TEXT_SIZE (40*1024*1024)
++#define KERNEL_TEXT_START _AC(0xffffffff80000000, UL)
++
++#define PAGE_OFFSET __PAGE_OFFSET
+
+-#define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
++#ifndef __ASSEMBLY__
++static inline unsigned long __phys_addr(unsigned long x)
++{
++ return x - (x >= __START_KERNEL_map ? __START_KERNEL_map : PAGE_OFFSET);
++}
++#endif
+
+-/* Note: __pa(&symbol_visible_to_c) should be always replaced with __pa_symbol.
+- Otherwise you risk miscompilation. */
+-#define __pa(x) (((unsigned long)(x)>=__START_KERNEL_map)?(unsigned long)(x) - (unsigned long)__START_KERNEL_map:(unsigned long)(x) - PAGE_OFFSET)
+-/* __pa_symbol should be used for C visible symbols.
+- This seems to be the official gcc blessed way to do such arithmetic. */
+-#define __pa_symbol(x) \
+- ({unsigned long v; \
+- asm("" : "=r" (v) : "0" (x)); \
+- __pa(v); })
++#define __pa(x) __phys_addr((unsigned long)(x))
++#define __pa_symbol(x) __phys_addr((unsigned long)(x))
+
+ #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
+ #define __boot_va(x) __va(x)
+Index: 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/pgalloc.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-x86_64/mach-xen/asm/pgalloc.h 2007-12-06 17:27:30.000000000 +0100
++++ 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/pgalloc.h 2007-10-22 13:58:57.000000000 +0200
+@@ -1,7 +1,6 @@
+ #ifndef _X86_64_PGALLOC_H
+ #define _X86_64_PGALLOC_H
+
+-#include <asm/fixmap.h>
+ #include <asm/pda.h>
+ #include <linux/threads.h>
+ #include <linux/mm.h>
+@@ -100,24 +99,16 @@ static inline void pgd_list_add(pgd_t *p
+ struct page *page = virt_to_page(pgd);
+
+ spin_lock(&pgd_lock);
+- page->index = (pgoff_t)pgd_list;
+- if (pgd_list)
+- pgd_list->private = (unsigned long)&page->index;
+- pgd_list = page;
+- page->private = (unsigned long)&pgd_list;
++ list_add(&page->lru, &pgd_list);
+ spin_unlock(&pgd_lock);
+ }
+
+ static inline void pgd_list_del(pgd_t *pgd)
+ {
+- struct page *next, **pprev, *page = virt_to_page(pgd);
++ struct page *page = virt_to_page(pgd);
+
+ spin_lock(&pgd_lock);
+- next = (struct page *)page->index;
+- pprev = (struct page **)page->private;
+- *pprev = next;
+- if (next)
+- next->private = (unsigned long)pprev;
++ list_del(&page->lru);
+ spin_unlock(&pgd_lock);
+ }
+
+Index: 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/pgtable.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-x86_64/mach-xen/asm/pgtable.h 2007-10-22 13:58:46.000000000 +0200
++++ 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/pgtable.h 2007-10-22 13:58:57.000000000 +0200
+@@ -1,12 +1,14 @@
+ #ifndef _X86_64_PGTABLE_H
+ #define _X86_64_PGTABLE_H
+
++#include <linux/const.h>
++#ifndef __ASSEMBLY__
++
+ /*
+ * This file contains the functions and defines necessary to modify and use
+ * the x86-64 page table tree.
+ */
+ #include <asm/processor.h>
+-#include <asm/fixmap.h>
+ #include <asm/bitops.h>
+ #include <linux/threads.h>
+ #include <linux/sched.h>
+@@ -35,11 +37,9 @@ extern void xen_init_pt(void);
+ #endif
+
+ extern pud_t level3_kernel_pgt[512];
+-extern pud_t level3_physmem_pgt[512];
+ extern pud_t level3_ident_pgt[512];
+ extern pmd_t level2_kernel_pgt[512];
+ extern pgd_t init_level4_pgt[];
+-extern pgd_t boot_level4_pgt[];
+ extern unsigned long __supported_pte_mask;
+
+ #define swapper_pg_dir init_level4_pgt
+@@ -54,6 +54,8 @@ extern void clear_kernel_mapping(unsigne
+ extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
+ #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
+
++#endif /* !__ASSEMBLY__ */
++
+ /*
+ * PGDIR_SHIFT determines what a top-level page table entry can map
+ */
+@@ -78,6 +80,8 @@ extern unsigned long empty_zero_page[PAG
+ */
+ #define PTRS_PER_PTE 512
+
++#ifndef __ASSEMBLY__
++
+ #define pte_ERROR(e) \
+ printk("%s:%d: bad pte %p(%016lx).\n", __FILE__, __LINE__, &(e), pte_val(e))
+ #define pmd_ERROR(e) \
+@@ -116,22 +120,23 @@ static inline void pgd_clear (pgd_t * pg
+
+ #define pte_pgprot(a) (__pgprot((a).pte & ~PHYSICAL_PAGE_MASK))
+
+-#define PMD_SIZE (1UL << PMD_SHIFT)
++#endif /* !__ASSEMBLY__ */
++
++#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
+ #define PMD_MASK (~(PMD_SIZE-1))
+-#define PUD_SIZE (1UL << PUD_SHIFT)
++#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
+ #define PUD_MASK (~(PUD_SIZE-1))
+-#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
++#define PGDIR_SIZE (_AC(1,UL) << PGDIR_SHIFT)
+ #define PGDIR_MASK (~(PGDIR_SIZE-1))
+
+ #define USER_PTRS_PER_PGD ((TASK_SIZE-1)/PGDIR_SIZE+1)
+ #define FIRST_USER_ADDRESS 0
+
+-#ifndef __ASSEMBLY__
+-#define MAXMEM 0x3fffffffffffUL
+-#define VMALLOC_START 0xffffc20000000000UL
+-#define VMALLOC_END 0xffffe1ffffffffffUL
+-#define MODULES_VADDR 0xffffffff88000000UL
+-#define MODULES_END 0xfffffffffff00000UL
++#define MAXMEM _AC(0x3fffffffffff, UL)
++#define VMALLOC_START _AC(0xffffc20000000000, UL)
++#define VMALLOC_END _AC(0xffffe1ffffffffff, UL)
++#define MODULES_VADDR _AC(0xffffffff88000000, UL)
++#define MODULES_END _AC(0xfffffffffff00000, UL)
+ #define MODULES_LEN (MODULES_END - MODULES_VADDR)
+
+ #define _PAGE_BIT_PRESENT 0
+@@ -157,7 +162,7 @@ static inline void pgd_clear (pgd_t * pg
+ #define _PAGE_GLOBAL 0x100 /* Global TLB entry */
+
+ #define _PAGE_PROTNONE 0x080 /* If not present */
+-#define _PAGE_NX (1UL<<_PAGE_BIT_NX)
++#define _PAGE_NX (_AC(1,UL)<<_PAGE_BIT_NX)
+
+ #if CONFIG_XEN_COMPAT <= 0x030002
+ extern unsigned int __kernel_page_user;
+@@ -228,6 +233,8 @@ extern unsigned int __kernel_page_user;
+ #define __S110 PAGE_SHARED_EXEC
+ #define __S111 PAGE_SHARED_EXEC
+
++#ifndef __ASSEMBLY__
++
+ static inline unsigned long pgd_bad(pgd_t pgd)
+ {
+ return pgd_val(pgd) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER);
+@@ -339,6 +346,20 @@ static inline pte_t pte_mkwrite(pte_t pt
+ static inline pte_t pte_mkhuge(pte_t pte) { __pte_val(pte) |= _PAGE_PSE; return pte; }
+ static inline pte_t pte_clrhuge(pte_t pte) { __pte_val(pte) &= ~_PAGE_PSE; return pte; }
+
++static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
++{
++ if (!pte_dirty(*ptep))
++ return 0;
++ return test_and_clear_bit(_PAGE_BIT_DIRTY, &ptep->pte);
++}
++
++static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
++{
++ if (!pte_young(*ptep))
++ return 0;
++ return test_and_clear_bit(_PAGE_BIT_ACCESSED, &ptep->pte);
++}
++
+ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+ {
+ pte_t pte = *ptep;
+@@ -464,18 +485,12 @@ static inline pte_t pte_modify(pte_t pte
+ * bit at the same time. */
+ #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
+ #define ptep_set_access_flags(vma, address, ptep, entry, dirty) \
+- do { \
+- if (dirty) \
+- ptep_establish(vma, address, ptep, entry); \
+- } while (0)
+-
+-
+-/*
+- * i386 says: We don't actually have these, but we want to advertise
+- * them so that we can encompass the flush here.
+- */
+-#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
+-#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
++({ \
++ int __changed = !pte_same(*(ptep), entry); \
++ if (__changed && (dirty)) \
++ ptep_establish(vma, address, ptep, entry); \
++ __changed; \
++})
+
+ #define __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH
+ #define ptep_clear_flush_dirty(vma, address, ptep) \
+@@ -484,7 +499,7 @@ static inline pte_t pte_modify(pte_t pte
+ int __dirty = pte_dirty(__pte); \
+ __pte = pte_mkclean(__pte); \
+ if ((vma)->vm_mm->context.pinned) \
+- ptep_set_access_flags(vma, address, ptep, __pte, __dirty); \
++ (void)ptep_set_access_flags(vma, address, ptep, __pte, __dirty); \
+ else if (__dirty) \
+ set_pte(ptep, __pte); \
+ __dirty; \
+@@ -497,7 +512,7 @@ static inline pte_t pte_modify(pte_t pte
+ int __young = pte_young(__pte); \
+ __pte = pte_mkold(__pte); \
+ if ((vma)->vm_mm->context.pinned) \
+- ptep_set_access_flags(vma, address, ptep, __pte, __young); \
++ (void)ptep_set_access_flags(vma, address, ptep, __pte, __young); \
+ else if (__young) \
+ set_pte(ptep, __pte); \
+ __young; \
+@@ -511,10 +526,7 @@ static inline pte_t pte_modify(pte_t pte
+ #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+
+ extern spinlock_t pgd_lock;
+-extern struct page *pgd_list;
+-void vmalloc_sync_all(void);
+-
+-#endif /* !__ASSEMBLY__ */
++extern struct list_head pgd_list;
+
+ extern int kern_addr_valid(unsigned long addr);
+
+@@ -546,10 +558,6 @@ int touch_pte_range(struct mm_struct *mm
+ #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
+ direct_remap_pfn_range(vma,vaddr,pfn,size,prot,DOMID_IO)
+
+-#define MK_IOSPACE_PFN(space, pfn) (pfn)
+-#define GET_IOSPACE(pfn) 0
+-#define GET_PFN(pfn) (pfn)
+-
+ #define HAVE_ARCH_UNMAPPED_AREA
+
+ #define pgtable_cache_init() do { } while (0)
+@@ -563,11 +571,14 @@ int touch_pte_range(struct mm_struct *mm
+ #define kc_offset_to_vaddr(o) \
+ (((o) & (1UL << (__VIRTUAL_MASK_SHIFT-1))) ? ((o) | (~__VIRTUAL_MASK)) : (o))
+
++#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
++#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
+ #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
+ #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
+ #define __HAVE_ARCH_PTEP_CLEAR_FLUSH
+ #define __HAVE_ARCH_PTEP_SET_WRPROTECT
+ #define __HAVE_ARCH_PTE_SAME
+ #include <asm-generic/pgtable.h>
++#endif /* !__ASSEMBLY__ */
+
+ #endif /* _X86_64_PGTABLE_H */
+Index: 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/processor.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-x86_64/mach-xen/asm/processor.h 2007-10-22 13:53:25.000000000 +0200
++++ 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/processor.h 2007-10-22 13:58:57.000000000 +0200
+@@ -20,6 +20,7 @@
+ #include <asm/percpu.h>
+ #include <linux/personality.h>
+ #include <linux/cpumask.h>
++#include <asm/processor-flags.h>
+
+ #define TF_MASK 0x00000100
+ #define IF_MASK 0x00000200
+@@ -103,42 +104,6 @@ extern unsigned int init_intel_cacheinfo
+ extern unsigned short num_cache_leaves;
+
+ /*
+- * EFLAGS bits
+- */
+-#define X86_EFLAGS_CF 0x00000001 /* Carry Flag */
+-#define X86_EFLAGS_PF 0x00000004 /* Parity Flag */
+-#define X86_EFLAGS_AF 0x00000010 /* Auxillary carry Flag */
+-#define X86_EFLAGS_ZF 0x00000040 /* Zero Flag */
+-#define X86_EFLAGS_SF 0x00000080 /* Sign Flag */
+-#define X86_EFLAGS_TF 0x00000100 /* Trap Flag */
+-#define X86_EFLAGS_IF 0x00000200 /* Interrupt Flag */
+-#define X86_EFLAGS_DF 0x00000400 /* Direction Flag */
+-#define X86_EFLAGS_OF 0x00000800 /* Overflow Flag */
+-#define X86_EFLAGS_IOPL 0x00003000 /* IOPL mask */
+-#define X86_EFLAGS_NT 0x00004000 /* Nested Task */
+-#define X86_EFLAGS_RF 0x00010000 /* Resume Flag */
+-#define X86_EFLAGS_VM 0x00020000 /* Virtual Mode */
+-#define X86_EFLAGS_AC 0x00040000 /* Alignment Check */
+-#define X86_EFLAGS_VIF 0x00080000 /* Virtual Interrupt Flag */
+-#define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */
+-#define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */
+-
+-/*
+- * Intel CPU features in CR4
+- */
+-#define X86_CR4_VME 0x0001 /* enable vm86 extensions */
+-#define X86_CR4_PVI 0x0002 /* virtual interrupts flag enable */
+-#define X86_CR4_TSD 0x0004 /* disable time stamp at ipl 3 */
+-#define X86_CR4_DE 0x0008 /* enable debugging extensions */
+-#define X86_CR4_PSE 0x0010 /* enable page size extensions */
+-#define X86_CR4_PAE 0x0020 /* enable physical address extensions */
+-#define X86_CR4_MCE 0x0040 /* Machine check enable */
+-#define X86_CR4_PGE 0x0080 /* enable global pages */
+-#define X86_CR4_PCE 0x0100 /* enable performance counters at ipl 3 */
+-#define X86_CR4_OSFXSR 0x0200 /* enable fast FPU save and restore */
+-#define X86_CR4_OSXMMEXCPT 0x0400 /* enable unmasked SSE exceptions */
+-
+-/*
+ * Save the cr4 feature set we're using (ie
+ * Pentium 4MB enable and PPro Global page
+ * enable), so that any CPU's that boot up
+@@ -209,7 +174,7 @@ struct i387_fxsave_struct {
+ u32 mxcsr;
+ u32 mxcsr_mask;
+ u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
+- u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 128 bytes */
++ u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */
+ u32 padding[24];
+ } __attribute__ ((aligned (16)));
+
+@@ -440,22 +405,6 @@ static inline void prefetchw(void *x)
+ #define cpu_relax() rep_nop()
+
+ /*
+- * NSC/Cyrix CPU configuration register indexes
+- */
+-#define CX86_CCR0 0xc0
+-#define CX86_CCR1 0xc1
+-#define CX86_CCR2 0xc2
+-#define CX86_CCR3 0xc3
+-#define CX86_CCR4 0xe8
+-#define CX86_CCR5 0xe9
+-#define CX86_CCR6 0xea
+-#define CX86_CCR7 0xeb
+-#define CX86_DIR0 0xfe
+-#define CX86_DIR1 0xff
+-#define CX86_ARR_BASE 0xc4
+-#define CX86_RCR_BASE 0xdc
+-
+-/*
+ * NSC/Cyrix CPU indexed register access macros
+ */
+
+Index: 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/smp.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-x86_64/mach-xen/asm/smp.h 2007-10-22 13:58:46.000000000 +0200
++++ 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/smp.h 2007-10-22 13:58:57.000000000 +0200
+@@ -11,12 +11,11 @@
+ extern int disable_apic;
+
+ #ifdef CONFIG_X86_LOCAL_APIC
+-#include <asm/fixmap.h>
+ #include <asm/mpspec.h>
++#include <asm/apic.h>
+ #ifdef CONFIG_X86_IO_APIC
+ #include <asm/io_apic.h>
+ #endif
+-#include <asm/apic.h>
+ #include <asm/thread_info.h>
+ #endif
+
+@@ -41,7 +40,6 @@ extern void lock_ipi_call_lock(void);
+ extern void unlock_ipi_call_lock(void);
+ extern int smp_num_siblings;
+ extern void smp_send_reschedule(int cpu);
+-void smp_stop_cpu(void);
+
+ extern cpumask_t cpu_sibling_map[NR_CPUS];
+ extern cpumask_t cpu_core_map[NR_CPUS];
+@@ -62,14 +60,6 @@ static inline int num_booting_cpus(void)
+
+ #define raw_smp_processor_id() read_pda(cpunumber)
+
+-#ifdef CONFIG_X86_LOCAL_APIC
+-static inline int hard_smp_processor_id(void)
+-{
+- /* we don't want to mark this access volatile - bad code generation */
+- return GET_APIC_ID(*(unsigned int *)(APIC_BASE+APIC_ID));
+-}
+-#endif
+-
+ extern int __cpu_disable(void);
+ extern void __cpu_die(unsigned int cpu);
+ extern void prefill_possible_map(void);
+@@ -78,6 +68,14 @@ extern unsigned __cpuinitdata disabled_c
+
+ #define NO_PROC_ID 0xFF /* No processor magic marker */
+
++#endif /* CONFIG_SMP */
++
++#ifdef CONFIG_X86_LOCAL_APIC
++static inline int hard_smp_processor_id(void)
++{
++ /* we don't want to mark this access volatile - bad code generation */
++ return GET_APIC_ID(*(unsigned int *)(APIC_BASE+APIC_ID));
++}
+ #endif
+
+ /*
+Index: 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/system.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-x86_64/mach-xen/asm/system.h 2007-10-22 13:53:08.000000000 +0200
++++ 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/system.h 2007-10-22 13:58:57.000000000 +0200
+@@ -3,7 +3,7 @@
+
+ #include <linux/kernel.h>
+ #include <asm/segment.h>
+-#include <asm/alternative.h>
++#include <asm/cmpxchg.h>
+
+ #include <asm/synch_bitops.h>
+ #include <asm/hypervisor.h>
+@@ -49,7 +49,7 @@
+ [threadrsp] "i" (offsetof(struct task_struct, thread.rsp)), \
+ [ti_flags] "i" (offsetof(struct thread_info, flags)),\
+ [tif_fork] "i" (TIF_FORK), \
+- [thread_info] "i" (offsetof(struct task_struct, thread_info)), \
++ [thread_info] "i" (offsetof(struct task_struct, stack)), \
+ [pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent)) \
+ : "memory", "cc" __EXTRA_CLOBBER)
+
+@@ -98,6 +98,12 @@ static inline void write_cr0(unsigned lo
+ machine_to_phys(__dummy); \
+ })
+
++static inline void write_cr3(unsigned long val)
++{
++ val = phys_to_machine(val);
++ asm volatile("movq %0,%%cr3" :: "r" (val) : "memory");
++}
++
+ static inline unsigned long read_cr4(void)
+ {
+ unsigned long cr4;
+@@ -107,7 +113,7 @@ static inline unsigned long read_cr4(voi
+
+ static inline void write_cr4(unsigned long val)
+ {
+- asm volatile("movq %0,%%cr4" :: "r" (val));
++ asm volatile("movq %0,%%cr4" :: "r" (val) : "memory");
+ }
+
+ #define stts() (HYPERVISOR_fpu_taskswitch(1))
+@@ -128,100 +134,6 @@ static inline void sched_cacheflush(void
+
+ #define nop() __asm__ __volatile__ ("nop")
+
+-#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
+-
+-#define tas(ptr) (xchg((ptr),1))
+-
+-#define __xg(x) ((volatile long *)(x))
+-
+-static inline void set_64bit(volatile unsigned long *ptr, unsigned long val)
+-{
+- *ptr = val;
+-}
+-
+-#define _set_64bit set_64bit
+-
+-/*
+- * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
+- * Note 2: xchg has side effect, so that attribute volatile is necessary,
+- * but generally the primitive is invalid, *ptr is output argument. --ANK
+- */
+-static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
+-{
+- switch (size) {
+- case 1:
+- __asm__ __volatile__("xchgb %b0,%1"
+- :"=q" (x)
+- :"m" (*__xg(ptr)), "0" (x)
+- :"memory");
+- break;
+- case 2:
+- __asm__ __volatile__("xchgw %w0,%1"
+- :"=r" (x)
+- :"m" (*__xg(ptr)), "0" (x)
+- :"memory");
+- break;
+- case 4:
+- __asm__ __volatile__("xchgl %k0,%1"
+- :"=r" (x)
+- :"m" (*__xg(ptr)), "0" (x)
+- :"memory");
+- break;
+- case 8:
+- __asm__ __volatile__("xchgq %0,%1"
+- :"=r" (x)
+- :"m" (*__xg(ptr)), "0" (x)
+- :"memory");
+- break;
+- }
+- return x;
+-}
+-
+-/*
+- * Atomic compare and exchange. Compare OLD with MEM, if identical,
+- * store NEW in MEM. Return the initial value in MEM. Success is
+- * indicated by comparing RETURN with OLD.
+- */
+-
+-#define __HAVE_ARCH_CMPXCHG 1
+-
+-static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
+- unsigned long new, int size)
+-{
+- unsigned long prev;
+- switch (size) {
+- case 1:
+- __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
+- : "=a"(prev)
+- : "q"(new), "m"(*__xg(ptr)), "0"(old)
+- : "memory");
+- return prev;
+- case 2:
+- __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
+- : "=a"(prev)
+- : "r"(new), "m"(*__xg(ptr)), "0"(old)
+- : "memory");
+- return prev;
+- case 4:
+- __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %k1,%2"
+- : "=a"(prev)
+- : "r"(new), "m"(*__xg(ptr)), "0"(old)
+- : "memory");
+- return prev;
+- case 8:
+- __asm__ __volatile__(LOCK_PREFIX "cmpxchgq %1,%2"
+- : "=a"(prev)
+- : "r"(new), "m"(*__xg(ptr)), "0"(old)
+- : "memory");
+- return prev;
+- }
+- return old;
+-}
+-
+-#define cmpxchg(ptr,o,n)\
+- ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
+- (unsigned long)(n),sizeof(*(ptr))))
+-
+ #ifdef CONFIG_SMP
+ #define smp_mb() mb()
+ #define smp_rmb() rmb()
+Index: 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/tlbflush.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-x86_64/mach-xen/asm/tlbflush.h 2007-12-06 17:27:30.000000000 +0100
++++ 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/tlbflush.h 2007-10-22 13:58:57.000000000 +0200
+@@ -2,7 +2,9 @@
+ #define _X8664_TLBFLUSH_H
+
+ #include <linux/mm.h>
++#include <linux/sched.h>
+ #include <asm/processor.h>
++#include <asm/system.h>
+
+ #define __flush_tlb() xen_tlb_flush()
+
+Index: 10.3-2007-11-26/mm/highmem.c
+===================================================================
+--- 10.3-2007-11-26.orig/mm/highmem.c 2007-12-06 17:27:30.000000000 +0100
++++ 10.3-2007-11-26/mm/highmem.c 2007-10-22 13:58:57.000000000 +0200
+@@ -158,17 +158,6 @@ start:
+ return vaddr;
+ }
+
+-#ifdef CONFIG_XEN
+-void kmap_flush_unused(void)
+-{
+- spin_lock(&kmap_lock);
+- flush_all_zero_pkmaps();
+- spin_unlock(&kmap_lock);
+-}
+-
+-EXPORT_SYMBOL(kmap_flush_unused);
+-#endif
+-
+ void fastcall *kmap_high(struct page *page)
+ {
+ unsigned long vaddr;
+Index: 10.3-2007-11-26/net/core/dev.c
+===================================================================
+--- 10.3-2007-11-26.orig/net/core/dev.c 2007-10-22 13:53:25.000000000 +0200
++++ 10.3-2007-11-26/net/core/dev.c 2007-10-22 13:58:57.000000000 +0200
+@@ -1466,12 +1466,16 @@ out_kfree_skb:
+ inline int skb_checksum_setup(struct sk_buff *skb)
+ {
+ if (skb->proto_csum_blank) {
++ struct iphdr *iph;
++
+ if (skb->protocol != htons(ETH_P_IP))
+ goto out;
+- skb->h.raw = (unsigned char *)skb->nh.iph + 4*skb->nh.iph->ihl;
+- if (skb->h.raw >= skb->tail)
++ iph = ip_hdr(skb);
++ skb->transport_header = skb->network_header + 4 * iph->ihl;
++ if (skb->transport_header >= skb->tail)
+ goto out;
+- switch (skb->nh.iph->protocol) {
++ skb->csum_start = skb_transport_header(skb) - skb->head;
++ switch (iph->protocol) {
+ case IPPROTO_TCP:
+ skb->csum_offset = offsetof(struct tcphdr, check);
+ break;
+@@ -1482,10 +1486,10 @@ inline int skb_checksum_setup(struct sk_
+ if (net_ratelimit())
+ printk(KERN_ERR "Attempting to checksum a non-"
+ "TCP/UDP packet, dropping a protocol"
+- " %d packet", skb->nh.iph->protocol);
++ " %d packet", iph->protocol);
+ goto out;
+ }
+- if ((skb->h.raw + skb->csum_offset + 2) > skb->tail)
++ if ((skb->transport_header + skb->csum_offset + 2) > skb->tail)
+ goto out;
+ skb->ip_summed = CHECKSUM_PARTIAL;
+ skb->proto_csum_blank = 0;
diff --git a/trunk/2.6.22/20048_xen3-patch-2.6.22.5-6.patch1 b/trunk/2.6.22/20048_xen3-patch-2.6.22.5-6.patch1
new file mode 100644
index 0000000..f9a09e9
--- /dev/null
+++ b/trunk/2.6.22/20048_xen3-patch-2.6.22.5-6.patch1
@@ -0,0 +1,30 @@
+Subject: Linux 2.6.22.6
+From: Greg Kroah-Hartman <gregkh@suse.de>
+
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+Automatically created from "patches.kernel.org/patch-2.6.22.5-6" by xen-port-patches.py
+
+Index: head-2007-09-25/arch/i386/mm/fault-xen.c
+===================================================================
+--- head-2007-09-25.orig/arch/i386/mm/fault-xen.c 2007-09-25 14:36:50.000000000 +0200
++++ head-2007-09-25/arch/i386/mm/fault-xen.c 2007-09-25 14:38:06.000000000 +0200
+@@ -346,7 +346,7 @@ static inline pmd_t *vmalloc_sync_one(pg
+ pmd_k = pmd_offset(pud_k, address);
+ if (!pmd_present(*pmd_k))
+ return NULL;
+- if (!pmd_present(*pmd))
++ if (!pmd_present(*pmd)) {
+ #ifndef CONFIG_XEN
+ set_pmd(pmd, *pmd_k);
+ #else
+@@ -356,7 +356,8 @@ static inline pmd_t *vmalloc_sync_one(pg
+ */
+ set_pmd(pmd, __pmd(pmd_val(*pmd_k)));
+ #endif
+- else
++ arch_flush_lazy_mmu_mode();
++ } else
+ BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
+ return pmd_k;
+ }
diff --git a/trunk/2.6.22/20003_fix-ia32entry-xen.patch b/trunk/2.6.22/20049_xen3-patch-2.6.22.6-7.patch1
index 0e3cf88..011351d 100644
--- a/trunk/2.6.22/20003_fix-ia32entry-xen.patch
+++ b/trunk/2.6.22/20049_xen3-patch-2.6.22.6-7.patch1
@@ -1,9 +1,17 @@
-diff -Nur custom-source-xen.orig/arch/x86_64/ia32/ia32entry-xen.S custom-source-xen/arch/x86_64/ia32/ia32entry-xen.S
---- custom-source-xen.orig/arch/x86_64/ia32/ia32entry-xen.S 2007-10-03 00:32:28.000000000 -0400
-+++ custom-source-xen/arch/x86_64/ia32/ia32entry-xen.S 2007-10-03 00:38:23.000000000 -0400
-@@ -40,6 +40,18 @@
- movq %rax,R8(%rsp)
- .endm
+Subject: Linux 2.6.22.7
+From: Greg Kroah-Hartman <gregkh@suse.de>
+
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+Automatically created from "patches.kernel.org/patch-2.6.22.6-7" by xen-port-patches.py
+
+Index: head-2007-10-08/arch/x86_64/ia32/ia32entry-xen.S
+===================================================================
+--- head-2007-10-08.orig/arch/x86_64/ia32/ia32entry-xen.S 2007-10-09 10:20:22.000000000 +0200
++++ head-2007-10-08/arch/x86_64/ia32/ia32entry-xen.S 2007-10-09 10:20:59.000000000 +0200
+@@ -55,6 +55,18 @@
+ #define __sti sti
+ #endif
+ .macro LOAD_ARGS32 offset
+ movl \offset(%rsp),%r11d
@@ -17,10 +25,10 @@ diff -Nur custom-source-xen.orig/arch/x86_64/ia32/ia32entry-xen.S custom-source-
+ movl \offset+72(%rsp),%eax
+ .endm
+
- #if defined (__XEN_X86_64)
- #include "../kernel/xen_entry.S"
-
-@@ -172,7 +184,7 @@
+ .macro CFI_STARTPROC32 simple
+ CFI_STARTPROC \simple
+ CFI_UNDEFINED r8
+@@ -172,7 +184,7 @@ sysenter_tracesys:
movq $-ENOSYS,RAX(%rsp) /* really needed? */
movq %rsp,%rdi /* &pt_regs -> arg1 */
call syscall_trace_enter
@@ -29,7 +37,7 @@ diff -Nur custom-source-xen.orig/arch/x86_64/ia32/ia32entry-xen.S custom-source-
RESTORE_REST
movl %ebp, %ebp
/* no need to do an access_ok check here because rbp has been
-@@ -277,7 +289,7 @@
+@@ -277,7 +289,7 @@ cstar_tracesys:
movq $-ENOSYS,RAX(%rsp) /* really needed? */
movq %rsp,%rdi /* &pt_regs -> arg1 */
call syscall_trace_enter
@@ -38,7 +46,7 @@ diff -Nur custom-source-xen.orig/arch/x86_64/ia32/ia32entry-xen.S custom-source-
RESTORE_REST
movl RSP-ARGOFFSET(%rsp), %r8d
/* no need to do an access_ok check here because r8 has been
-@@ -360,7 +372,7 @@
+@@ -360,7 +372,7 @@ ia32_tracesys:
movq $-ENOSYS,RAX(%rsp) /* really needed? */
movq %rsp,%rdi /* &pt_regs -> arg1 */
call syscall_trace_enter
diff --git a/trunk/2.6.22/20050_xen3-patch-2.6.22.11-12.patch1 b/trunk/2.6.22/20050_xen3-patch-2.6.22.11-12.patch1
new file mode 100644
index 0000000..a82be3e
--- /dev/null
+++ b/trunk/2.6.22/20050_xen3-patch-2.6.22.11-12.patch1
@@ -0,0 +1,88 @@
+Subject: Linux 2.6.22.12
+From: Greg Kroah-Hartman <gregkh@suse.de>
+
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+Automatically created from "patches.kernel.org/patch-2.6.22.11-12" by xen-port-patches.py
+
+Index: 10.3-2007-11-26/arch/i386/kernel/io_apic-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/io_apic-xen.c 2007-11-26 14:07:48.000000000 +0100
++++ 10.3-2007-11-26/arch/i386/kernel/io_apic-xen.c 2007-11-26 14:07:57.000000000 +0100
+@@ -1313,12 +1313,15 @@ static struct irq_chip ioapic_chip;
+ static void ioapic_register_intr(int irq, int vector, unsigned long trigger)
+ {
+ if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
+- trigger == IOAPIC_LEVEL)
++ trigger == IOAPIC_LEVEL) {
++ irq_desc[irq].status |= IRQ_LEVEL;
+ set_irq_chip_and_handler_name(irq, &ioapic_chip,
+ handle_fasteoi_irq, "fasteoi");
+- else
++ } else {
++ irq_desc[irq].status &= ~IRQ_LEVEL;
+ set_irq_chip_and_handler_name(irq, &ioapic_chip,
+ handle_edge_irq, "edge");
++ }
+ set_intr_gate(vector, interrupt[irq]);
+ }
+ #else
+Index: 10.3-2007-11-26/arch/x86_64/kernel/io_apic-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/kernel/io_apic-xen.c 2007-11-26 14:07:48.000000000 +0100
++++ 10.3-2007-11-26/arch/x86_64/kernel/io_apic-xen.c 2007-11-26 14:07:57.000000000 +0100
+@@ -758,12 +758,15 @@ static struct irq_chip ioapic_chip;
+
+ static void ioapic_register_intr(int irq, unsigned long trigger)
+ {
+- if (trigger)
++ if (trigger) {
++ irq_desc[irq].status |= IRQ_LEVEL;
+ set_irq_chip_and_handler_name(irq, &ioapic_chip,
+ handle_fasteoi_irq, "fasteoi");
+- else
++ } else {
++ irq_desc[irq].status &= ~IRQ_LEVEL;
+ set_irq_chip_and_handler_name(irq, &ioapic_chip,
+ handle_edge_irq, "edge");
++ }
+ }
+ #else
+ #define ioapic_register_intr(irq,trigger) ((void)0)
+Index: 10.3-2007-11-26/arch/x86_64/mm/init-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/mm/init-xen.c 2007-11-26 14:07:48.000000000 +0100
++++ 10.3-2007-11-26/arch/x86_64/mm/init-xen.c 2007-11-26 14:08:42.000000000 +0100
+@@ -1217,11 +1217,3 @@ int in_gate_area_no_task(unsigned long a
+ {
+ return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END);
+ }
+-
+-#ifndef CONFIG_XEN
+-void *alloc_bootmem_high_node(pg_data_t *pgdat, unsigned long size)
+-{
+- return __alloc_bootmem_core(pgdat->bdata, size,
+- SMP_CACHE_BYTES, (4UL*1024*1024*1024), 0);
+-}
+-#endif
+Index: 10.3-2007-11-26/arch/x86_64/mm/pageattr-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/mm/pageattr-xen.c 2007-11-26 14:07:48.000000000 +0100
++++ 10.3-2007-11-26/arch/x86_64/mm/pageattr-xen.c 2007-11-26 14:07:57.000000000 +0100
+@@ -432,9 +432,14 @@ void global_flush_tlb(void)
+ struct page *pg, *next;
+ struct list_head l;
+
+- down_read(&init_mm.mmap_sem);
++ /*
++ * Write-protect the semaphore, to exclude two contexts
++ * doing a list_replace_init() call in parallel and to
++ * exclude new additions to the deferred_pages list:
++ */
++ down_write(&init_mm.mmap_sem);
+ list_replace_init(&deferred_pages, &l);
+- up_read(&init_mm.mmap_sem);
++ up_write(&init_mm.mmap_sem);
+
+ flush_map(&l);
+
diff --git a/trunk/2.6.22/20051_xen3-x86-early-quirks-unificiation.patch1 b/trunk/2.6.22/20051_xen3-x86-early-quirks-unificiation.patch1
new file mode 100644
index 0000000..db56bd3
--- /dev/null
+++ b/trunk/2.6.22/20051_xen3-x86-early-quirks-unificiation.patch1
@@ -0,0 +1,25 @@
+Subject: x86: Unify i386 and x86-64 early quirks
+
+They were already very similar; just use the same file now.
+
+Cc: lenb@kernel.org
+
+Signed-off-by: Andi Kleen <ak@suse.de>
+
+Automatically created from "patches.arch/x86-early-quirks-unificiation" by xen-port-patches.py
+
+Index: head-2007-08-22/arch/i386/kernel/setup-xen.c
+===================================================================
+--- head-2007-08-22.orig/arch/i386/kernel/setup-xen.c 2007-08-22 09:54:05.000000000 +0200
++++ head-2007-08-22/arch/i386/kernel/setup-xen.c 2007-08-22 09:54:29.000000000 +0200
+@@ -786,9 +786,7 @@ void __init setup_arch(char **cmdline_p)
+ #endif
+
+ #ifdef CONFIG_PCI
+-#ifdef CONFIG_X86_IO_APIC
+- check_acpi_pci(); /* Checks more than just ACPI actually */
+-#endif
++ early_quirks();
+ #endif
+
+ #ifdef CONFIG_ACPI
diff --git a/trunk/2.6.22/20052_xen3-x86-fam10-l3cache.patch1 b/trunk/2.6.22/20052_xen3-x86-fam10-l3cache.patch1
new file mode 100644
index 0000000..b54659c
--- /dev/null
+++ b/trunk/2.6.22/20052_xen3-x86-fam10-l3cache.patch1
@@ -0,0 +1,28 @@
+Subject: i386: Add L3 cache support to AMD CPUID4 emulation
+
+With that an L3 cache is correctly reported in the cache information in /sys
+
+With fixes from Andreas Herrmann and Dean Gaudet and Joachim Deguara
+
+Signed-off-by: Andi Kleen <ak@suse.de>
+
+Automatically created from "patches.arch/x86-fam10-l3cache" by xen-port-patches.py
+
+Index: head-2007-08-22/arch/x86_64/kernel/setup-xen.c
+===================================================================
+--- head-2007-08-22.orig/arch/x86_64/kernel/setup-xen.c 2007-08-22 09:54:14.000000000 +0200
++++ head-2007-08-22/arch/x86_64/kernel/setup-xen.c 2007-08-22 09:56:51.000000000 +0200
+@@ -856,8 +856,11 @@ static void __cpuinit init_amd(struct cp
+ if (c->extended_cpuid_level >= 0x80000008)
+ amd_detect_cmp(c);
+
+- /* Fix cpuid4 emulation for more */
+- num_cache_leaves = 3;
++ if (c->extended_cpuid_level >= 0x80000006 &&
++ (cpuid_edx(0x80000006) & 0xf000))
++ num_cache_leaves = 4;
++ else
++ num_cache_leaves = 3;
+
+ /* RDTSC can be speculated around */
+ clear_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
diff --git a/trunk/2.6.22/20053_xen3-aux-at_vector_size.patch1 b/trunk/2.6.22/20053_xen3-aux-at_vector_size.patch1
new file mode 100644
index 0000000..ae19711
--- /dev/null
+++ b/trunk/2.6.22/20053_xen3-aux-at_vector_size.patch1
@@ -0,0 +1,47 @@
+From: Olaf Hering <olh@suse.de>
+Subject: Make the size of mm_struct->saved_auxv arch dependend
+References: 310037
+
+include/asm-powerpc/elf.h has 6 entries in ARCH_DLINFO.
+fs/binfmt_elf.c has 14 unconditional NEW_AUX_ENT entries and 2
+conditional NEW_AUX_ENT entries.
+So in the worst case, saved_auxv does not get an AT_NULL entry at the
+end.
+
+The saved_auxv array must be terminated with an AT_NULL entry.
+Make the size of mm_struct->saved_auxv arch dependend,
+based on the number of ARCH_DLINFO entries.
+
+Signed-off-by: Olaf Hering <olh@suse.de>
+
+Automatically created from "patches.fixes/aux-at_vector_size.patch" by xen-port-patches.py
+
+Index: 10.3-2007-11-26/include/asm-i386/mach-xen/asm/system.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-i386/mach-xen/asm/system.h 2007-10-22 13:58:57.000000000 +0200
++++ 10.3-2007-11-26/include/asm-i386/mach-xen/asm/system.h 2007-09-25 14:43:19.000000000 +0200
+@@ -9,6 +9,7 @@
+ #include <asm/hypervisor.h>
+
+ #ifdef __KERNEL__
++#define AT_VECTOR_SIZE_ARCH 2
+
+ struct task_struct; /* one of the stranger aspects of C forward declarations.. */
+ extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next));
+Index: 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/system.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-x86_64/mach-xen/asm/system.h 2007-10-22 13:58:57.000000000 +0200
++++ 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/system.h 2007-12-04 14:00:57.000000000 +0100
+@@ -11,6 +11,12 @@
+
+ #ifdef __KERNEL__
+
++#ifdef CONFIG_IA32_EMULATION
++#define AT_VECTOR_SIZE_ARCH 2
++#else
++#define AT_VECTOR_SIZE_ARCH 1
++#endif
++
+ #ifdef CONFIG_SMP
+ #define __vcpu_id smp_processor_id()
+ #else
diff --git a/trunk/2.6.22/20054_xen-balloon-min.patch1 b/trunk/2.6.22/20054_xen-balloon-min.patch1
new file mode 100644
index 0000000..ddd3cdf
--- /dev/null
+++ b/trunk/2.6.22/20054_xen-balloon-min.patch1
@@ -0,0 +1,77 @@
+From: ksrinivasan@novell.com
+Subject: Don't allow ballooning down a domain below a reasonable limit.
+References: 172482
+
+Reasonable is hard to judge; we don't want to disallow small domains.
+But the system needs a reasonable amount of memory to perform its
+duties, set up tables, etc. If on the other hand, the admin is able
+to set up and boot up correctly a very small domain, there's no point
+in forcing it to be larger.
+We end up with some kind of logarithmic function, approximated.
+
+Memory changes are logged, so making domains too small should at least
+result in a trace.
+
+Signed-off-by: Kurt Garloff <garloff@suse.de>
+
+Index: head-2007-10-08/drivers/xen/balloon/balloon.c
+===================================================================
+--- head-2007-10-08.orig/drivers/xen/balloon/balloon.c 2007-10-09 09:31:50.000000000 +0200
++++ head-2007-10-08/drivers/xen/balloon/balloon.c 2007-10-09 09:32:02.000000000 +0200
+@@ -181,6 +181,38 @@ static unsigned long current_target(void
+ return target;
+ }
+
++static unsigned long minimum_target(void)
++{
++ unsigned long min_pages;
++ unsigned long curr_pages = current_target();
++
++#define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT))
++ /* Simple continuous piecewiese linear function:
++ * max MiB -> min MiB gradient
++ * 0 0
++ * 16 16
++ * 32 24
++ * 128 72 (1/2)
++ * 512 168 (1/4)
++ * 2048 360 (1/8)
++ * 8192 552 (1/32)
++ * 32768 1320
++ * 131072 4392
++ */
++ if (max_pfn < MB2PAGES(128))
++ min_pages = MB2PAGES(8) + (max_pfn >> 1);
++ else if (max_pfn < MB2PAGES(512))
++ min_pages = MB2PAGES(40) + (max_pfn >> 2);
++ else if (max_pfn < MB2PAGES(2048))
++ min_pages = MB2PAGES(104) + (max_pfn >> 3);
++ else
++ min_pages = MB2PAGES(296) + (max_pfn >> 5);
++#undef MB2PAGES
++
++ /* Don't enforce growth */
++ return min_pages < curr_pages ? min_pages : curr_pages;
++}
++
+ static int increase_reservation(unsigned long nr_pages)
+ {
+ unsigned long pfn, i, flags;
+@@ -369,6 +401,17 @@ static void balloon_process(struct work_
+ /* Resets the Xen limit, sets new target, and kicks off processing. */
+ void balloon_set_new_target(unsigned long target)
+ {
++ /* First make sure that we are not lowering the value below the
++ * "minimum".
++ */
++ unsigned long min_pages = minimum_target();
++
++ if (target < min_pages)
++ target = min_pages;
++
++ printk(KERN_INFO "Setting mem allocation to %lu kiB\n",
++ PAGES2KB(target));
++
+ /* No need for lock. Not read-modify-write updates. */
+ bs.hard_limit = ~0UL;
+ bs.target_pages = target;
diff --git a/trunk/2.6.22/20055_xen-modular-blktap.patch1 b/trunk/2.6.22/20055_xen-modular-blktap.patch1
new file mode 100644
index 0000000..4c02f9b
--- /dev/null
+++ b/trunk/2.6.22/20055_xen-modular-blktap.patch1
@@ -0,0 +1,41 @@
+From: ccoffing@novell.com
+Subject: Allow actually using CONFIG_XEN_BLKDEV_TAP=m
+Patch-mainline: obsolete
+
+---
+ drivers/xen/blktap/Makefile | 4 ++--
+ drivers/xen/blktap/blktap.c | 8 +-------
+ drivers/xen/blktap/blocktap.c | 1 +
+ 3 files changed, 4 insertions(+), 9 deletions(-)
+
+--- a/drivers/xen/blktap/Makefile 2007-08-27 14:01:25.000000000 -0400
++++ b/drivers/xen/blktap/Makefile 2007-08-27 14:01:27.000000000 -0400
+@@ -1,5 +1,5 @@
+ LINUXINCLUDE += -I../xen/include/public/io
+
+-obj-$(CONFIG_XEN_BLKDEV_TAP) := xenblktap.o
++obj-$(CONFIG_XEN_BLKDEV_TAP) := blktap.o
+
+-xenblktap-y := xenbus.o interface.o blktap.o
++blktap-y := xenbus.o interface.o blocktap.o
+--- a/drivers/xen/blktap/blktap.c 2007-08-27 14:01:26.000000000 -0400
++++ b/drivers/xen/blktap/blktap.c 2007-08-27 14:01:27.000000000 -0400
+@@ -116,13 +116,7 @@ typedef struct tap_blkif {
+ static struct tap_blkif *tapfds[MAX_TAP_DEV];
+ static int blktap_next_minor;
+
+-static int __init set_blkif_reqs(char *str)
+-{
+- get_option(&str, &blkif_reqs);
+- return 1;
+-}
+-__setup("blkif_reqs=", set_blkif_reqs);
+-
++module_param(blkif_reqs, int, 0);
+ /* Run-time switchable: /sys/module/blktap/parameters/ */
+ static unsigned int log_stats = 0;
+ static unsigned int debug_lvl = 0;
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/blktap/blocktap.c 2007-08-27 14:01:27.000000000 -0400
+@@ -0,0 +1 @@
++#include "blktap.c"
diff --git a/trunk/2.6.22/20056_xen-x86-panic-no-reboot.patch1 b/trunk/2.6.22/20056_xen-x86-panic-no-reboot.patch1
new file mode 100644
index 0000000..ccc42e1
--- /dev/null
+++ b/trunk/2.6.22/20056_xen-x86-panic-no-reboot.patch1
@@ -0,0 +1,58 @@
+From: jbeulich@novell.com
+Subject: Don't automatically reboot Dom0 on panic (match native)
+Patch-mainline: obsolete
+
+$subject says it all.
+
+---
+ arch/i386/kernel/setup-xen.c | 10 ++++++----
+ arch/x86_64/kernel/setup-xen.c | 9 +++++----
+ 2 files changed, 11 insertions(+), 8 deletions(-)
+
+--- a/arch/i386/kernel/setup-xen.c 2007-08-27 14:01:27.000000000 -0400
++++ b/arch/i386/kernel/setup-xen.c 2007-08-27 14:01:46.000000000 -0400
+@@ -568,11 +568,13 @@ void __init setup_arch(char **cmdline_p)
+
+ /* Force a quick death if the kernel panics (not domain 0). */
+ extern int panic_timeout;
+- if (!panic_timeout && !is_initial_xendomain())
+- panic_timeout = 1;
++ if (!is_initial_xendomain()) {
++ if (!panic_timeout)
++ panic_timeout = 1;
+
+- /* Register a call for panic conditions. */
+- atomic_notifier_chain_register(&panic_notifier_list, &xen_panic_block);
++ /* Register a call for panic conditions. */
++ atomic_notifier_chain_register(&panic_notifier_list, &xen_panic_block);
++ }
+
+ HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments);
+ HYPERVISOR_vm_assist(VMASST_CMD_enable,
+--- a/arch/x86_64/kernel/setup-xen.c 2007-08-27 14:01:27.000000000 -0400
++++ b/arch/x86_64/kernel/setup-xen.c 2007-08-27 14:01:52.000000000 -0400
+@@ -282,9 +282,6 @@ void __init setup_arch(char **cmdline_p)
+
+ printk(KERN_INFO "Command line: %s\n", boot_command_line);
+
+- /* Register a call for panic conditions. */
+- atomic_notifier_chain_register(&panic_notifier_list, &xen_panic_block);
+-
+ ROOT_DEV = MKDEV(RAMDISK_MAJOR,0);
+ screen_info = SCREEN_INFO;
+
+@@ -308,9 +305,13 @@ void __init setup_arch(char **cmdline_p)
+ }
+ xen_start_info->console.domU.mfn = 0;
+ xen_start_info->console.domU.evtchn = 0;
+- } else
++ } else {
+ screen_info.orig_video_isVGA = 0;
+
++ /* Register a call for panic conditions. */
++ atomic_notifier_chain_register(&panic_notifier_list, &xen_panic_block);
++ }
++
+ copy_edid();
+ saved_video_mode = SAVED_VIDEO_MODE;
+ bootloader_type = LOADER_TYPE;
diff --git a/trunk/2.6.22/20057_xen-i386-panic-on-oops.patch1 b/trunk/2.6.22/20057_xen-i386-panic-on-oops.patch1
new file mode 100644
index 0000000..ba43a84
--- /dev/null
+++ b/trunk/2.6.22/20057_xen-i386-panic-on-oops.patch1
@@ -0,0 +1,27 @@
+From: jbeulich@novell.com
+Subject: Parse oops=panic (match x86-64)
+Patch-mainline: obsolete
+
+---
+ arch/i386/kernel/traps-xen.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+--- a/arch/i386/kernel/traps-xen.c 2007-08-27 14:01:27.000000000 -0400
++++ b/arch/i386/kernel/traps-xen.c 2007-08-27 14:01:51.000000000 -0400
+@@ -1124,6 +1124,16 @@ void smp_trap_init(trap_info_t *trap_ctx
+ }
+ }
+
++static int __init oops_setup(char *s)
++{
++ if (!s)
++ return -EINVAL;
++ if (!strcmp(s, "panic"))
++ panic_on_oops = 1;
++ return 0;
++}
++early_param("oops", oops_setup);
++
+ static int __init kstack_setup(char *s)
+ {
+ kstack_depth_to_print = simple_strtoul(s, NULL, 0);
diff --git a/trunk/2.6.22/20058_xen-x86-kconfig-no-cpu_freq.patch1 b/trunk/2.6.22/20058_xen-x86-kconfig-no-cpu_freq.patch1
new file mode 100644
index 0000000..1a7f2e1
--- /dev/null
+++ b/trunk/2.6.22/20058_xen-x86-kconfig-no-cpu_freq.patch1
@@ -0,0 +1,35 @@
+From: jbeulich@novell.com
+Subject: disallow CPUFREQ config options
+Patch-mainline: obsolete
+
+$subject says it all.
+
+---
+ arch/i386/Kconfig | 2 ++
+ arch/x86_64/Kconfig | 2 ++
+ 2 files changed, 4 insertions(+)
+
+--- a/arch/i386/Kconfig 2007-08-27 14:01:27.000000000 -0400
++++ b/arch/i386/Kconfig 2007-08-27 14:01:48.000000000 -0400
+@@ -1075,7 +1075,9 @@ config APM_REAL_MODE_POWER_OFF
+
+ endif # APM
+
++if !X86_XEN
+ source "arch/i386/kernel/cpu/cpufreq/Kconfig"
++endif
+
+ endmenu
+
+--- a/arch/x86_64/Kconfig 2007-08-27 14:01:27.000000000 -0400
++++ b/arch/x86_64/Kconfig 2007-08-27 14:01:27.000000000 -0400
+@@ -728,7 +728,9 @@ endif
+
+ source "drivers/acpi/Kconfig"
+
++if !X86_64_XEN
+ source "arch/x86_64/kernel/cpufreq/Kconfig"
++endif
+
+ endmenu
+
diff --git a/trunk/2.6.22/20059_xen-configurable-console.patch1 b/trunk/2.6.22/20059_xen-configurable-console.patch1
new file mode 100644
index 0000000..2315cd5
--- /dev/null
+++ b/trunk/2.6.22/20059_xen-configurable-console.patch1
@@ -0,0 +1,181 @@
+From: jbeulich@novell.com
+Subject: Allow Xen console to be independently configured
+Patch-mainline: obsolete
+
+---
+ drivers/xen/Kconfig | 6 +++
+ drivers/xen/console/Makefile | 3 +
+ drivers/xen/console/console.c | 35 ---------------------
+ drivers/xen/console/dom0.c | 70 ++++++++++++++++++++++++++++++++++++++++++
+ include/xen/xencons.h | 14 ++++++++
+ 5 files changed, 92 insertions(+), 36 deletions(-)
+
+--- a/drivers/xen/Kconfig 2007-08-27 14:01:26.000000000 -0400
++++ b/drivers/xen/Kconfig 2007-08-27 14:01:27.000000000 -0400
+@@ -195,6 +195,12 @@ config XEN_KEYBOARD
+ domain. If you've said Y to CONFIG_XEN_FRAMEBUFFER, you probably
+ want to say Y here.
+
++config XEN_CONSOLE
++ bool "Xen virtual console"
++ default y
++ help
++ The Xen virtual console is ...
++
+ config XEN_SCRUB_PAGES
+ bool "Scrub memory before freeing it to Xen"
+ default y
+--- a/drivers/xen/console/Makefile 2007-08-27 14:01:25.000000000 -0400
++++ b/drivers/xen/console/Makefile 2007-08-27 14:01:27.000000000 -0400
+@@ -1,2 +1,3 @@
+
+-obj-y := console.o xencons_ring.o
++obj-$(CONFIG_XEN_CONSOLE) := console.o xencons_ring.o
++obj-$(CONFIG_XEN_PRIVILEGED_GUEST) += dom0.o
+--- a/drivers/xen/console/console.c 2007-08-27 14:01:26.000000000 -0400
++++ b/drivers/xen/console/console.c 2007-08-27 14:01:50.000000000 -0400
+@@ -283,41 +283,6 @@ void xencons_force_flush(void)
+ }
+
+
+-void dom0_init_screen_info(const struct dom0_vga_console_info *info)
+-{
+- switch (info->video_type) {
+- case XEN_VGATYPE_TEXT_MODE_3:
+- screen_info.orig_video_mode = 3;
+- screen_info.orig_video_ega_bx = 3;
+- screen_info.orig_video_isVGA = 1;
+- screen_info.orig_video_lines = info->u.text_mode_3.rows;
+- screen_info.orig_video_cols = info->u.text_mode_3.columns;
+- screen_info.orig_x = info->u.text_mode_3.cursor_x;
+- screen_info.orig_y = info->u.text_mode_3.cursor_y;
+- screen_info.orig_video_points =
+- info->u.text_mode_3.font_height;
+- break;
+- case XEN_VGATYPE_VESA_LFB:
+- screen_info.orig_video_isVGA = VIDEO_TYPE_VLFB;
+- screen_info.lfb_width = info->u.vesa_lfb.width;
+- screen_info.lfb_height = info->u.vesa_lfb.height;
+- screen_info.lfb_depth = info->u.vesa_lfb.bits_per_pixel;
+- screen_info.lfb_base = info->u.vesa_lfb.lfb_base;
+- screen_info.lfb_size = info->u.vesa_lfb.lfb_size;
+- screen_info.lfb_linelength = info->u.vesa_lfb.bytes_per_line;
+- screen_info.red_size = info->u.vesa_lfb.red_size;
+- screen_info.red_pos = info->u.vesa_lfb.red_pos;
+- screen_info.green_size = info->u.vesa_lfb.green_size;
+- screen_info.green_pos = info->u.vesa_lfb.green_pos;
+- screen_info.blue_size = info->u.vesa_lfb.blue_size;
+- screen_info.blue_pos = info->u.vesa_lfb.blue_pos;
+- screen_info.rsvd_size = info->u.vesa_lfb.rsvd_size;
+- screen_info.rsvd_pos = info->u.vesa_lfb.rsvd_pos;
+- break;
+- }
+-}
+-
+-
+ /******************** User-space console driver (/dev/console) ************/
+
+ #define DRV(_d) (_d)
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/console/dom0.c 2007-08-27 14:01:27.000000000 -0400
+@@ -0,0 +1,70 @@
++/******************************************************************************
++ * dom0.c
++ *
++ * Dom0 console parameter initialization.
++ *
++ * Copyright (c) 2002-2004, K A Fraser.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <linux/types.h>
++#include <linux/screen_info.h>
++#include <xen/interface/xen.h>
++#include <xen/xencons.h>
++
++void dom0_init_screen_info(const struct dom0_vga_console_info *info)
++{
++ switch (info->video_type) {
++ case XEN_VGATYPE_TEXT_MODE_3:
++ screen_info.orig_video_mode = 3;
++ screen_info.orig_video_ega_bx = 3;
++ screen_info.orig_video_isVGA = 1;
++ screen_info.orig_video_lines = info->u.text_mode_3.rows;
++ screen_info.orig_video_cols = info->u.text_mode_3.columns;
++ screen_info.orig_x = info->u.text_mode_3.cursor_x;
++ screen_info.orig_y = info->u.text_mode_3.cursor_y;
++ screen_info.orig_video_points =
++ info->u.text_mode_3.font_height;
++ break;
++ case XEN_VGATYPE_VESA_LFB:
++ screen_info.orig_video_isVGA = VIDEO_TYPE_VLFB;
++ screen_info.lfb_width = info->u.vesa_lfb.width;
++ screen_info.lfb_height = info->u.vesa_lfb.height;
++ screen_info.lfb_depth = info->u.vesa_lfb.bits_per_pixel;
++ screen_info.lfb_base = info->u.vesa_lfb.lfb_base;
++ screen_info.lfb_size = info->u.vesa_lfb.lfb_size;
++ screen_info.lfb_linelength = info->u.vesa_lfb.bytes_per_line;
++ screen_info.red_size = info->u.vesa_lfb.red_size;
++ screen_info.red_pos = info->u.vesa_lfb.red_pos;
++ screen_info.green_size = info->u.vesa_lfb.green_size;
++ screen_info.green_pos = info->u.vesa_lfb.green_pos;
++ screen_info.blue_size = info->u.vesa_lfb.blue_size;
++ screen_info.blue_pos = info->u.vesa_lfb.blue_pos;
++ screen_info.rsvd_size = info->u.vesa_lfb.rsvd_size;
++ screen_info.rsvd_pos = info->u.vesa_lfb.rsvd_pos;
++ break;
++ }
++}
+--- a/include/xen/xencons.h 2007-08-27 14:01:25.000000000 -0400
++++ b/include/xen/xencons.h 2007-08-27 14:01:27.000000000 -0400
+@@ -1,8 +1,14 @@
+ #ifndef __ASM_XENCONS_H__
+ #define __ASM_XENCONS_H__
+
++#ifdef CONFIG_XEN_PRIVILEGED_GUEST
+ struct dom0_vga_console_info;
+ void dom0_init_screen_info(const struct dom0_vga_console_info *info);
++#else
++#define dom0_init_screen_info(info) ((void)(info))
++#endif
++
++#ifdef CONFIG_XEN_CONSOLE
+
+ void xencons_force_flush(void);
+ void xencons_resume(void);
+@@ -16,4 +22,12 @@ int xencons_ring_send(const char *data,
+
+ void xencons_early_setup(void);
+
++#else
++
++static inline void xencons_force_flush(void) {}
++static inline void xencons_resume(void) {}
++static inline void xencons_early_setup(void) {}
++
++#endif
++
+ #endif /* __ASM_XENCONS_H__ */
diff --git a/trunk/2.6.22/20060_xen-x86_64-init-cleanup.patch1 b/trunk/2.6.22/20060_xen-x86_64-init-cleanup.patch1
new file mode 100644
index 0000000..072c512
--- /dev/null
+++ b/trunk/2.6.22/20060_xen-x86_64-init-cleanup.patch1
@@ -0,0 +1,294 @@
+From: jbeulich@novell.com
+Subject: miscellaneous initialization code cleanup
+Patch-mainline: obsolete
+
+Index: 10.3-2007-10-22/arch/x86_64/kernel/head-xen.S
+===================================================================
+--- 10.3-2007-10-22.orig/arch/x86_64/kernel/head-xen.S 2007-10-22 13:58:57.000000000 +0200
++++ 10.3-2007-10-22/arch/x86_64/kernel/head-xen.S 2007-10-22 14:00:10.000000000 +0200
+@@ -47,15 +47,13 @@ ENTRY(name)
+
+ NEXT_PAGE(init_level4_pgt)
+ .fill 512,8,0
+-
+ /*
+ * We update two pgd entries to make kernel and user pgd consistent
+ * at pgd_populate(). It can be used for kernel modules. So we place
+ * this page here for those cases to avoid memory corruption.
+- * We also use this page to establish the initiali mapping for
++ * We also use this page to establish the initial mapping for the
+ * vsyscall area.
+ */
+-NEXT_PAGE(init_level4_user_pgt)
+ .fill 512,8,0
+
+ NEXT_PAGE(level3_kernel_pgt)
+Index: 10.3-2007-10-22/arch/x86_64/kernel/setup-xen.c
+===================================================================
+--- 10.3-2007-10-22.orig/arch/x86_64/kernel/setup-xen.c 2007-10-22 14:00:08.000000000 +0200
++++ 10.3-2007-10-22/arch/x86_64/kernel/setup-xen.c 2007-10-22 14:00:10.000000000 +0200
+@@ -448,28 +448,18 @@ void __init setup_arch(char **cmdline_p)
+ */
+ acpi_reserve_bootmem();
+ #endif
+-#ifdef CONFIG_XEN
+ #ifdef CONFIG_BLK_DEV_INITRD
++#ifndef CONFIG_XEN
++ if (LOADER_TYPE && INITRD_START) {
++#else
+ if (xen_start_info->mod_start) {
+- if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
+- /*reserve_bootmem_generic(INITRD_START, INITRD_SIZE);*/
+- initrd_start = INITRD_START + PAGE_OFFSET;
+- initrd_end = initrd_start+INITRD_SIZE;
+- initrd_below_start_ok = 1;
+- } else {
+- printk(KERN_ERR "initrd extends beyond end of memory "
+- "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
+- (unsigned long)(INITRD_START + INITRD_SIZE),
+- (unsigned long)(end_pfn << PAGE_SHIFT));
+- initrd_start = 0;
+- }
+- }
+ #endif
+-#else /* CONFIG_XEN */
+-#ifdef CONFIG_BLK_DEV_INITRD
+- if (LOADER_TYPE && INITRD_START) {
+ if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
++#ifndef CONFIG_XEN
+ reserve_bootmem_generic(INITRD_START, INITRD_SIZE);
++#else
++ initrd_below_start_ok = 1;
++#endif
+ initrd_start = INITRD_START + PAGE_OFFSET;
+ initrd_end = initrd_start+INITRD_SIZE;
+ }
+@@ -482,7 +472,6 @@ void __init setup_arch(char **cmdline_p)
+ }
+ }
+ #endif
+-#endif /* !CONFIG_XEN */
+ #ifdef CONFIG_KEXEC
+ #ifdef CONFIG_XEN
+ xen_machine_kexec_setup_resources();
+Index: 10.3-2007-10-22/arch/x86_64/kernel/setup64-xen.c
+===================================================================
+--- 10.3-2007-10-22.orig/arch/x86_64/kernel/setup64-xen.c 2007-10-22 13:58:57.000000000 +0200
++++ 10.3-2007-10-22/arch/x86_64/kernel/setup64-xen.c 2007-10-22 14:00:10.000000000 +0200
+@@ -127,8 +127,8 @@ void __init setup_per_cpu_areas(void)
+ #ifdef CONFIG_XEN
+ static void switch_pt(void)
+ {
+- xen_pt_switch(__pa(init_level4_pgt));
+- xen_new_user_pt(__pa(init_level4_user_pgt));
++ xen_pt_switch(__pa_symbol(init_level4_pgt));
++ xen_new_user_pt(__pa_symbol(__user_pgd(init_level4_pgt)));
+ }
+
+ void __cpuinit cpu_gdt_init(struct desc_ptr *gdt_descr)
+Index: 10.3-2007-10-22/arch/x86_64/kernel/vsyscall-xen.c
+===================================================================
+--- 10.3-2007-10-22.orig/arch/x86_64/kernel/vsyscall-xen.c 2007-10-22 13:58:57.000000000 +0200
++++ 10.3-2007-10-22/arch/x86_64/kernel/vsyscall-xen.c 2007-10-22 14:00:10.000000000 +0200
+@@ -344,17 +344,6 @@ static void __init map_vsyscall(void)
+ __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_page0, PAGE_KERNEL_VSYSCALL);
+ }
+
+-#ifdef CONFIG_XEN
+-static void __init map_vsyscall_user(void)
+-{
+- extern void __set_fixmap_user(enum fixed_addresses, unsigned long, pgprot_t);
+- extern char __vsyscall_0;
+- unsigned long physaddr_page0 = __pa_symbol(&__vsyscall_0);
+-
+- __set_fixmap_user(VSYSCALL_FIRST_PAGE, physaddr_page0, PAGE_KERNEL_VSYSCALL);
+-}
+-#endif
+-
+ static int __init vsyscall_init(void)
+ {
+ BUG_ON(((unsigned long) &vgettimeofday !=
+@@ -364,7 +353,6 @@ static int __init vsyscall_init(void)
+ BUG_ON((unsigned long) &vgetcpu != VSYSCALL_ADDR(__NR_vgetcpu));
+ map_vsyscall();
+ #ifdef CONFIG_XEN
+- map_vsyscall_user();
+ vsyscall_gtod_data.sysctl_enabled = 0; /* disable vgettimeofay() */
+ if (boot_cpu_has(X86_FEATURE_RDTSCP))
+ vgetcpu_mode = VGETCPU_RDTSCP;
+Index: 10.3-2007-10-22/arch/x86_64/mm/init-xen.c
+===================================================================
+--- 10.3-2007-10-22.orig/arch/x86_64/mm/init-xen.c 2007-10-22 13:58:57.000000000 +0200
++++ 10.3-2007-10-22/arch/x86_64/mm/init-xen.c 2007-10-22 14:00:10.000000000 +0200
+@@ -61,6 +61,8 @@ unsigned int __kernel_page_user;
+ EXPORT_SYMBOL(__kernel_page_user);
+ #endif
+
++int after_bootmem;
++
+ extern unsigned long *contiguous_bitmap;
+
+ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
+@@ -86,6 +88,11 @@ static void __meminit early_make_page_re
+ if (xen_feature(feature))
+ return;
+
++ if (after_bootmem) {
++ make_page_readonly(va, feature);
++ return;
++ }
++
+ addr = (unsigned long) page[pgd_index(_va)];
+ addr_to_page(addr, page);
+
+@@ -214,8 +221,6 @@ void show_mem(void)
+ printk(KERN_INFO "%lu pages swap cached\n",cached);
+ }
+
+-int after_bootmem;
+-
+ static __init void *spp_getpage(void)
+ {
+ void *ptr;
+@@ -234,14 +239,8 @@ static __init void *spp_getpage(void)
+ return ptr;
+ }
+
+-#define pgd_offset_u(address) (pgd_t *)(init_level4_user_pgt + pgd_index(address))
+-
+-static inline pud_t *pud_offset_u(unsigned long address)
+-{
+- pud_t *pud = level3_user_pgt;
+-
+- return pud + pud_index(address);
+-}
++#define pgd_offset_u(address) (__user_pgd(init_level4_pgt) + pgd_index(address))
++#define pud_offset_u(address) (level3_user_pgt + pud_index(address))
+
+ static __init void set_pte_phys(unsigned long vaddr,
+ unsigned long phys, pgprot_t prot, int user_mode)
+@@ -344,9 +343,6 @@ static __init void set_pte_phys_ma(unsig
+ __flush_tlb_one(vaddr);
+ }
+
+-#define SET_FIXMAP_KERNEL 0
+-#define SET_FIXMAP_USER 1
+-
+ /* NOTE: this is meant to be run only at boot */
+ void __init
+ __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
+@@ -359,7 +355,8 @@ __set_fixmap (enum fixed_addresses idx,
+ }
+ switch (idx) {
+ case VSYSCALL_LAST_PAGE ... VSYSCALL_FIRST_PAGE:
+- set_pte_phys(address, phys, prot, SET_FIXMAP_KERNEL);
++ set_pte_phys(address, phys, prot, 0);
++ set_pte_phys(address, phys, prot, 1);
+ break;
+ default:
+ set_pte_phys_ma(address, phys, prot);
+@@ -367,22 +364,6 @@ __set_fixmap (enum fixed_addresses idx,
+ }
+ }
+
+-/*
+- * This only supports vsyscall area.
+- */
+-void __init
+-__set_fixmap_user (enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
+-{
+- unsigned long address = __fix_to_virt(idx);
+-
+- if (idx >= __end_of_fixed_addresses) {
+- printk("Invalid __set_fixmap\n");
+- return;
+- }
+-
+- set_pte_phys(address, phys, prot, SET_FIXMAP_USER);
+-}
+-
+ unsigned long __meminitdata table_start, table_end;
+
+ static __meminit void *alloc_static_page(unsigned long *phys)
+@@ -509,9 +490,9 @@ phys_pmd_init(pmd_t *pmd_page, unsigned
+ pte = alloc_static_page(&pte_phys);
+ pte_save = pte;
+ for (k = 0; k < PTRS_PER_PTE; pte++, k++, address += PTE_SIZE) {
+- if ((address >= end) ||
+- ((address >> PAGE_SHIFT) >=
+- xen_start_info->nr_pages)) {
++ if (address >= (after_bootmem
++ ? end
++ : xen_start_info->nr_pages << PAGE_SHIFT)) {
+ __set_pte(pte, __pte(0));
+ continue;
+ }
+@@ -607,13 +588,15 @@ void __init xen_init_pt(void)
+ init_level4_pgt[pgd_index(__START_KERNEL_map)] =
+ __pgd(__pa_symbol(level3_kernel_pgt) | _PAGE_TABLE);
+ level3_kernel_pgt[pud_index(__START_KERNEL_map)] =
+- __pud(__pa_symbol(level2_kernel_pgt) |
+- _KERNPG_TABLE);
+- memcpy((void *)level2_kernel_pgt, page, PAGE_SIZE);
++ __pud(__pa_symbol(level2_kernel_pgt) | _PAGE_TABLE);
++ memcpy(level2_kernel_pgt, page, PAGE_SIZE);
++
++ __user_pgd(init_level4_pgt)[pgd_index(VSYSCALL_START)] =
++ __pgd(__pa_symbol(level3_user_pgt) | _PAGE_TABLE);
+
+ early_make_page_readonly(init_level4_pgt,
+ XENFEAT_writable_page_tables);
+- early_make_page_readonly(init_level4_user_pgt,
++ early_make_page_readonly(__user_pgd(init_level4_pgt),
+ XENFEAT_writable_page_tables);
+ early_make_page_readonly(level3_kernel_pgt,
+ XENFEAT_writable_page_tables);
+@@ -624,11 +607,8 @@ void __init xen_init_pt(void)
+
+ if (!xen_feature(XENFEAT_writable_page_tables)) {
+ xen_pgd_pin(__pa_symbol(init_level4_pgt));
+- xen_pgd_pin(__pa_symbol(init_level4_user_pgt));
++ xen_pgd_pin(__pa_symbol(__user_pgd(init_level4_pgt)));
+ }
+-
+- set_pgd((pgd_t *)(init_level4_user_pgt + 511),
+- mk_kernel_pgd(__pa_symbol(level3_user_pgt)));
+ }
+
+ static void __init extend_init_mapping(unsigned long tables_space)
+Index: 10.3-2007-10-22/include/asm-x86_64/mach-xen/asm/pgalloc.h
+===================================================================
+--- 10.3-2007-10-22.orig/include/asm-x86_64/mach-xen/asm/pgalloc.h 2007-10-22 13:58:57.000000000 +0200
++++ 10.3-2007-10-22/include/asm-x86_64/mach-xen/asm/pgalloc.h 2007-10-22 14:00:10.000000000 +0200
+@@ -137,8 +137,8 @@ static inline pgd_t *pgd_alloc(struct mm
+ /*
+ * Set level3_user_pgt for vsyscall area
+ */
+- set_pgd(__user_pgd(pgd) + pgd_index(VSYSCALL_START),
+- __pgd(__pa_symbol(level3_user_pgt) | _PAGE_TABLE));
++ __user_pgd(pgd)[pgd_index(VSYSCALL_START)] =
++ __pgd(__pa_symbol(level3_user_pgt) | _PAGE_TABLE);
+ return pgd;
+ }
+
+Index: 10.3-2007-10-22/include/asm-x86_64/mach-xen/asm/pgtable.h
+===================================================================
+--- 10.3-2007-10-22.orig/include/asm-x86_64/mach-xen/asm/pgtable.h 2007-10-22 13:58:57.000000000 +0200
++++ 10.3-2007-10-22/include/asm-x86_64/mach-xen/asm/pgtable.h 2007-10-22 14:00:10.000000000 +0200
+@@ -17,7 +17,6 @@
+ #include <asm/hypervisor.h>
+
+ extern pud_t level3_user_pgt[512];
+-extern pud_t init_level4_user_pgt[];
+
+ extern void xen_init_pt(void);
+
+@@ -390,7 +389,7 @@ static inline int pmd_large(pmd_t pte) {
+ #define pgd_page(pgd) (pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT))
+ #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
+ #define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
+-#define pgd_offset_k(address) (pgd_t *)(init_level4_pgt + pgd_index(address))
++#define pgd_offset_k(address) (init_level4_pgt + pgd_index(address))
+ #define pgd_present(pgd) (pgd_val(pgd) & _PAGE_PRESENT)
+ #define mk_kernel_pgd(address) __pgd((address) | _KERNPG_TABLE)
+
diff --git a/trunk/2.6.22/20061_xen-balloon-max-target.patch1 b/trunk/2.6.22/20061_xen-balloon-max-target.patch1
new file mode 100644
index 0000000..c3aaebe
--- /dev/null
+++ b/trunk/2.6.22/20061_xen-balloon-max-target.patch1
@@ -0,0 +1,32 @@
+From: ccoffing@novell.com
+Subject: Expose min/max limits of domain ballooning
+Patch-mainline: obsolete
+References: 152667, 184727
+
+Index: head-2007-10-08/drivers/xen/balloon/balloon.c
+===================================================================
+--- head-2007-10-08.orig/drivers/xen/balloon/balloon.c 2007-10-09 09:32:02.000000000 +0200
++++ head-2007-10-08/drivers/xen/balloon/balloon.c 2007-10-09 09:32:20.000000000 +0200
+@@ -90,6 +90,7 @@ extern unsigned long totalhigh_pages;
+ #undef totalhigh_pages
+ #define totalhigh_pages(op)
+ #endif
++extern unsigned long num_physpages;
+
+ /* List of ballooned pages, threaded through the mem_map array. */
+ static LIST_HEAD(ballooned_pages);
+@@ -489,11 +490,14 @@ static int balloon_read(char *page, char
+ page,
+ "Current allocation: %8lu kB\n"
+ "Requested target: %8lu kB\n"
++ "Minimum target: %8lu kB\n"
++ "Maximum target: %8lu kB\n"
+ "Low-mem balloon: %8lu kB\n"
+ "High-mem balloon: %8lu kB\n"
+ "Driver pages: %8lu kB\n"
+ "Xen hard limit: ",
+ PAGES2KB(bs.current_pages), PAGES2KB(bs.target_pages),
++ PAGES2KB(minimum_target()), PAGES2KB(num_physpages),
+ PAGES2KB(bs.balloon_low), PAGES2KB(bs.balloon_high),
+ PAGES2KB(bs.driver_pages));
+
diff --git a/trunk/2.6.22/20062_xen-x86-dcr-fallback.patch1 b/trunk/2.6.22/20062_xen-x86-dcr-fallback.patch1
new file mode 100644
index 0000000..3054100
--- /dev/null
+++ b/trunk/2.6.22/20062_xen-x86-dcr-fallback.patch1
@@ -0,0 +1,158 @@
+Subject: Add fallback when XENMEM_exchange fails to replace contiguous region
+From: jbeulich@novell.com
+Patch-mainline: obsolete
+References: 181869
+
+This avoids losing precious special memory in places where any memory can be
+used.
+
+Index: 10.3-2007-10-22/arch/i386/mm/hypervisor.c
+===================================================================
+--- 10.3-2007-10-22.orig/arch/i386/mm/hypervisor.c 2007-10-22 13:49:28.000000000 +0200
++++ 10.3-2007-10-22/arch/i386/mm/hypervisor.c 2007-10-22 14:00:22.000000000 +0200
+@@ -41,6 +41,7 @@
+ #include <xen/interface/memory.h>
+ #include <linux/module.h>
+ #include <linux/percpu.h>
++#include <linux/highmem.h>
+ #include <asm/tlbflush.h>
+
+ #ifdef CONFIG_X86_64
+@@ -442,6 +443,83 @@ void xen_destroy_contiguous_region(unsig
+ BUG();
+
+ balloon_unlock(flags);
++
++ if (unlikely(!success)) {
++ /* Try hard to get the special memory back to Xen. */
++ exchange.in.extent_order = 0;
++ set_xen_guest_handle(exchange.in.extent_start, &in_frame);
++
++ for (i = 0; i < (1UL<<order); i++) {
++ struct page *page = alloc_page(__GFP_HIGHMEM);
++ unsigned long pfn;
++ mmu_update_t mmu;
++ unsigned int j = 0;
++
++ if (!page) {
++ printk(KERN_WARNING "Xen and kernel out of memory "
++ "while trying to release an order %u "
++ "contiguous region\n", order);
++ break;
++ }
++ pfn = page_to_pfn(page);
++
++ balloon_lock(flags);
++
++ if (!PageHighMem(page)) {
++ void *v = __va(pfn << PAGE_SHIFT);
++
++ scrub_pages(v, 1);
++ MULTI_update_va_mapping(cr_mcl + j, (unsigned long)v,
++ __pte_ma(0), UVMF_INVLPG|UVMF_ALL);
++ ++j;
++ }
++#ifdef CONFIG_XEN_SCRUB_PAGES
++ else {
++ scrub_pages(kmap(page), 1);
++ kunmap(page);
++ kmap_flush_unused();
++ }
++#endif
++
++ frame = pfn_to_mfn(pfn);
++ set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
++
++ MULTI_update_va_mapping(cr_mcl + j, vstart,
++ pfn_pte_ma(frame, PAGE_KERNEL),
++ UVMF_INVLPG|UVMF_ALL);
++ ++j;
++
++ pfn = __pa(vstart) >> PAGE_SHIFT;
++ set_phys_to_machine(pfn, frame);
++ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
++ mmu.ptr = ((uint64_t)frame << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
++ mmu.val = pfn;
++ cr_mcl[j].op = __HYPERVISOR_mmu_update;
++ cr_mcl[j].args[0] = (unsigned long)&mmu;
++ cr_mcl[j].args[1] = 1;
++ cr_mcl[j].args[2] = 0;
++ cr_mcl[j].args[3] = DOMID_SELF;
++ ++j;
++ }
++
++ cr_mcl[j].op = __HYPERVISOR_memory_op;
++ cr_mcl[j].args[0] = XENMEM_decrease_reservation;
++ cr_mcl[j].args[1] = (unsigned long)&exchange.in;
++
++ if (HYPERVISOR_multicall(cr_mcl, j + 1))
++ BUG();
++ BUG_ON(cr_mcl[j].result != 1);
++ while (j--)
++ BUG_ON(cr_mcl[j].result != 0);
++
++ balloon_unlock(flags);
++
++ free_empty_pages(&page, 1);
++
++ in_frame++;
++ vstart += PAGE_SIZE;
++ }
++ }
+ }
+ EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
+
+Index: 10.3-2007-10-22/drivers/xen/balloon/balloon.c
+===================================================================
+--- 10.3-2007-10-22.orig/drivers/xen/balloon/balloon.c 2007-10-22 14:00:20.000000000 +0200
++++ 10.3-2007-10-22/drivers/xen/balloon/balloon.c 2007-10-22 14:00:22.000000000 +0200
+@@ -680,7 +680,7 @@ struct page **alloc_empty_pages_and_page
+ goto out;
+ }
+
+-void free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages)
++static void _free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages, int free_vec)
+ {
+ unsigned long flags;
+ int i;
+@@ -695,11 +695,24 @@ void free_empty_pages_and_pagevec(struct
+ }
+ balloon_unlock(flags);
+
+- kfree(pagevec);
++ if (free_vec)
++ kfree(pagevec);
++ else
++ totalram_pages = bs.current_pages -= nr_pages;
+
+ schedule_work(&balloon_worker);
+ }
+
++void free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages)
++{
++ _free_empty_pages_and_pagevec(pagevec, nr_pages, 1);
++}
++
++void free_empty_pages(struct page **pagevec, int nr_pages)
++{
++ _free_empty_pages_and_pagevec(pagevec, nr_pages, 0);
++}
++
+ void balloon_release_driver_page(struct page *page)
+ {
+ unsigned long flags;
+Index: 10.3-2007-10-22/include/xen/balloon.h
+===================================================================
+--- 10.3-2007-10-22.orig/include/xen/balloon.h 2007-10-22 13:48:11.000000000 +0200
++++ 10.3-2007-10-22/include/xen/balloon.h 2007-10-22 14:00:22.000000000 +0200
+@@ -44,6 +44,10 @@ void balloon_update_driver_allowance(lon
+ struct page **alloc_empty_pages_and_pagevec(int nr_pages);
+ void free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages);
+
++/* Free an empty page range (not allocated through
++ alloc_empty_pages_and_pagevec), adding to the balloon. */
++void free_empty_pages(struct page **pagevec, int nr_pages);
++
+ void balloon_release_driver_page(struct page *page);
+
+ /*
diff --git a/trunk/2.6.22/20063_xen-x86-consistent-nmi.patch1 b/trunk/2.6.22/20063_xen-x86-consistent-nmi.patch1
new file mode 100644
index 0000000..859498d
--- /dev/null
+++ b/trunk/2.6.22/20063_xen-x86-consistent-nmi.patch1
@@ -0,0 +1,345 @@
+From: jbeulich@novell.com
+Subject: make i386 and x86 NMI code consistent, disable all APIC-related stuff
+Patch-mainline: obsolete
+References: 191115
+
+Index: head-2007-09-03/arch/i386/kernel/cpu/Makefile
+===================================================================
+--- head-2007-09-03.orig/arch/i386/kernel/cpu/Makefile 2007-09-03 09:44:16.000000000 +0200
++++ head-2007-09-03/arch/i386/kernel/cpu/Makefile 2007-09-03 09:53:53.000000000 +0200
+@@ -22,5 +22,7 @@ obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-
+
+ ifdef CONFIG_XEN
+ include $(srctree)/scripts/Makefile.xen
++n-obj-xen := perfctr-watchdog.o
++obj-y := $(call filterxen, $(obj-y), $(n-obj-xen))
+ obj-y := $(call cherrypickxen, $(obj-y), $(src))
+ endif
+Index: head-2007-09-03/arch/i386/kernel/nmi.c
+===================================================================
+--- head-2007-09-03.orig/arch/i386/kernel/nmi.c 2007-09-03 09:42:53.000000000 +0200
++++ head-2007-09-03/arch/i386/kernel/nmi.c 2007-09-03 09:53:53.000000000 +0200
+@@ -30,7 +30,15 @@
+
+ #include "mach_traps.h"
+
++#ifdef CONFIG_SYSCTL
+ int unknown_nmi_panic;
++static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu);
++#endif
++
++extern void die_nmi(struct pt_regs *, const char *msg);
++
++#ifndef CONFIG_XEN
++
+ int nmi_watchdog_enabled;
+
+ static cpumask_t backtrace_mask = CPU_MASK_NONE;
+@@ -48,9 +56,6 @@ static unsigned int nmi_hz = HZ;
+
+ static DEFINE_PER_CPU(short, wd_enabled);
+
+-/* local prototypes */
+-static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu);
+-
+ static int endflag __initdata = 0;
+
+ #ifdef CONFIG_SMP
+@@ -315,8 +320,6 @@ void touch_nmi_watchdog (void)
+ }
+ EXPORT_SYMBOL(touch_nmi_watchdog);
+
+-extern void die_nmi(struct pt_regs *, const char *msg);
+-
+ __kprobes int nmi_watchdog_tick(struct pt_regs * regs, unsigned reason)
+ {
+
+@@ -387,6 +390,8 @@ __kprobes int nmi_watchdog_tick(struct p
+ return rc;
+ }
+
++#endif /* CONFIG_XEN */
++
+ int do_nmi_callback(struct pt_regs * regs, int cpu)
+ {
+ #ifdef CONFIG_SYSCTL
+@@ -408,6 +413,7 @@ static int unknown_nmi_panic_callback(st
+ return 0;
+ }
+
++#ifndef CONFIG_XEN
+ /*
+ * proc handler for /proc/sys/kernel/nmi
+ */
+@@ -446,9 +452,11 @@ int proc_nmi_enabled(struct ctl_table *t
+ }
+ return 0;
+ }
++#endif
+
+ #endif
+
++#ifndef CONFIG_XEN
+ void __trigger_all_cpu_backtrace(void)
+ {
+ int i;
+@@ -464,3 +472,4 @@ void __trigger_all_cpu_backtrace(void)
+
+ EXPORT_SYMBOL(nmi_active);
+ EXPORT_SYMBOL(nmi_watchdog);
++#endif
+Index: head-2007-09-03/arch/i386/kernel/traps-xen.c
+===================================================================
+--- head-2007-09-03.orig/arch/i386/kernel/traps-xen.c 2007-09-03 09:53:48.000000000 +0200
++++ head-2007-09-03/arch/i386/kernel/traps-xen.c 2007-09-03 09:53:53.000000000 +0200
+@@ -696,12 +696,14 @@ static __kprobes void default_do_nmi(str
+ == NOTIFY_STOP)
+ return;
+ #ifdef CONFIG_X86_LOCAL_APIC
++#ifndef CONFIG_XEN
+ /*
+ * Ok, so this is none of the documented NMI sources,
+ * so it must be the NMI watchdog.
+ */
+ if (nmi_watchdog_tick(regs, reason))
+ return;
++#endif
+ if (!do_nmi_callback(regs, smp_processor_id()))
+ #endif
+ unknown_nmi_error(reason, regs);
+Index: head-2007-09-03/arch/x86_64/kernel/Makefile
+===================================================================
+--- head-2007-09-03.orig/arch/x86_64/kernel/Makefile 2007-09-03 09:53:19.000000000 +0200
++++ head-2007-09-03/arch/x86_64/kernel/Makefile 2007-09-03 09:53:53.000000000 +0200
+@@ -68,7 +68,7 @@ pci-dma-y += ../../i386/kernel/pci-dma
+ microcode-$(subst m,y,$(CONFIG_MICROCODE)) := ../../i386/kernel/microcode-xen.o
+ quirks-y := ../../i386/kernel/quirks-xen.o
+
+-n-obj-xen := early-quirks.o i8259.o reboot.o i8237.o smpboot.o trampoline.o tsc.o tsc_sync.o
++n-obj-xen := early-quirks.o i8259.o reboot.o i8237.o perfctr-watchdog.o smpboot.o trampoline.o tsc.o tsc_sync.o
+
+ include $(srctree)/scripts/Makefile.xen
+
+Index: head-2007-09-03/arch/x86_64/kernel/nmi.c
+===================================================================
+--- head-2007-09-03.orig/arch/x86_64/kernel/nmi.c 2007-09-03 09:42:53.000000000 +0200
++++ head-2007-09-03/arch/x86_64/kernel/nmi.c 2007-09-03 09:53:53.000000000 +0200
+@@ -28,10 +28,17 @@
+ #include <asm/proto.h>
+ #include <asm/mce.h>
+
++#ifdef CONFIG_SYSCTL
+ int unknown_nmi_panic;
+-int nmi_watchdog_enabled;
++static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu);
++#endif
++
+ int panic_on_unrecovered_nmi;
+
++#ifndef CONFIG_XEN
++
++int nmi_watchdog_enabled;
++
+ static cpumask_t backtrace_mask = CPU_MASK_NONE;
+
+ /* nmi_active:
+@@ -48,9 +55,6 @@ static unsigned int nmi_hz = HZ;
+
+ static DEFINE_PER_CPU(short, wd_enabled);
+
+-/* local prototypes */
+-static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu);
+-
+ /* Run after command line and cpu_init init, but before all other checks */
+ void nmi_watchdog_default(void)
+ {
+@@ -382,6 +386,8 @@ int __kprobes nmi_watchdog_tick(struct p
+ return rc;
+ }
+
++#endif /* CONFIG_XEN */
++
+ asmlinkage __kprobes void do_nmi(struct pt_regs * regs, long error_code)
+ {
+ nmi_enter();
+@@ -411,6 +417,7 @@ static int unknown_nmi_panic_callback(st
+ return 0;
+ }
+
++#ifndef CONFIG_XEN
+ /*
+ * proc handler for /proc/sys/kernel/nmi
+ */
+@@ -445,9 +452,11 @@ int proc_nmi_enabled(struct ctl_table *t
+ }
+ return 0;
+ }
++#endif
+
+ #endif
+
++#ifndef CONFIG_XEN
+ void __trigger_all_cpu_backtrace(void)
+ {
+ int i;
+@@ -464,3 +473,4 @@ void __trigger_all_cpu_backtrace(void)
+ EXPORT_SYMBOL(nmi_active);
+ EXPORT_SYMBOL(nmi_watchdog);
+ EXPORT_SYMBOL(touch_nmi_watchdog);
++#endif /* CONFIG_XEN */
+Index: head-2007-09-03/arch/x86_64/kernel/traps-xen.c
+===================================================================
+--- head-2007-09-03.orig/arch/x86_64/kernel/traps-xen.c 2007-09-03 09:53:45.000000000 +0200
++++ head-2007-09-03/arch/x86_64/kernel/traps-xen.c 2007-09-03 09:53:53.000000000 +0200
+@@ -780,7 +780,7 @@ asmlinkage __kprobes void default_do_nmi
+ if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT)
+ == NOTIFY_STOP)
+ return;
+-#ifdef CONFIG_X86_LOCAL_APIC
++#if defined(CONFIG_X86_LOCAL_APIC) && !defined(CONFIG_XEN)
+ /*
+ * Ok, so this is none of the documented NMI sources,
+ * so it must be the NMI watchdog.
+Index: head-2007-09-03/include/asm-i386/irq.h
+===================================================================
+--- head-2007-09-03.orig/include/asm-i386/irq.h 2007-09-03 09:42:53.000000000 +0200
++++ head-2007-09-03/include/asm-i386/irq.h 2007-09-03 09:53:53.000000000 +0200
+@@ -20,7 +20,7 @@ static __inline__ int irq_canonicalize(i
+ return ((irq == 2) ? 9 : irq);
+ }
+
+-#ifdef CONFIG_X86_LOCAL_APIC
++#if defined(CONFIG_X86_LOCAL_APIC) && !defined(CONFIG_XEN)
+ # define ARCH_HAS_NMI_WATCHDOG /* See include/linux/nmi.h */
+ #endif
+
+Index: head-2007-09-03/include/asm-i386/nmi.h
+===================================================================
+--- head-2007-09-03.orig/include/asm-i386/nmi.h 2007-09-03 09:42:53.000000000 +0200
++++ head-2007-09-03/include/asm-i386/nmi.h 2007-09-03 09:53:53.000000000 +0200
+@@ -7,8 +7,6 @@
+ #include <linux/pm.h>
+ #include <asm/irq.h>
+
+-#ifdef ARCH_HAS_NMI_WATCHDOG
+-
+ /**
+ * do_nmi_callback
+ *
+@@ -17,6 +15,8 @@
+ */
+ int do_nmi_callback(struct pt_regs *regs, int cpu);
+
++#ifdef ARCH_HAS_NMI_WATCHDOG
++
+ extern int nmi_watchdog_enabled;
+ extern int avail_to_resrv_perfctr_nmi_bit(unsigned int);
+ extern int avail_to_resrv_perfctr_nmi(unsigned int);
+@@ -43,13 +43,10 @@ struct ctl_table;
+ struct file;
+ extern int proc_nmi_enabled(struct ctl_table *, int , struct file *,
+ void __user *, size_t *, loff_t *);
+-extern int unknown_nmi_panic;
+
+ void __trigger_all_cpu_backtrace(void);
+ #define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace()
+
+-#endif
+-
+ void lapic_watchdog_stop(void);
+ int lapic_watchdog_init(unsigned nmi_hz);
+ int lapic_wd_event(unsigned nmi_hz);
+@@ -58,4 +55,8 @@ int lapic_watchdog_ok(void);
+ void disable_lapic_nmi_watchdog(void);
+ void enable_lapic_nmi_watchdog(void);
+
++#endif
++
++extern int unknown_nmi_panic;
++
+ #endif /* ASM_NMI_H */
+Index: head-2007-09-03/include/asm-x86_64/mach-xen/asm/irq.h
+===================================================================
+--- head-2007-09-03.orig/include/asm-x86_64/mach-xen/asm/irq.h 2007-09-03 09:42:53.000000000 +0200
++++ head-2007-09-03/include/asm-x86_64/mach-xen/asm/irq.h 2007-09-03 09:53:53.000000000 +0200
+@@ -20,7 +20,7 @@ static __inline__ int irq_canonicalize(i
+ return ((irq == 2) ? 9 : irq);
+ }
+
+-#ifdef CONFIG_X86_LOCAL_APIC
++#ifndef CONFIG_XEN
+ #define ARCH_HAS_NMI_WATCHDOG /* See include/linux/nmi.h */
+ #endif
+
+Index: head-2007-09-03/include/asm-x86_64/mach-xen/asm/nmi.h
+===================================================================
+--- head-2007-09-03.orig/include/asm-x86_64/mach-xen/asm/nmi.h 2007-09-03 09:53:30.000000000 +0200
++++ head-2007-09-03/include/asm-x86_64/mach-xen/asm/nmi.h 2007-09-03 09:53:53.000000000 +0200
+@@ -59,6 +59,9 @@ static inline unsigned char get_nmi_reas
+
+ extern int panic_on_timeout;
+ extern int unknown_nmi_panic;
++
++#ifndef CONFIG_XEN
++
+ extern int nmi_watchdog_enabled;
+
+ extern int check_nmi_watchdog(void);
+@@ -105,4 +108,6 @@ int lapic_watchdog_ok(void);
+ void disable_lapic_nmi_watchdog(void);
+ void enable_lapic_nmi_watchdog(void);
+
++#endif
++
+ #endif /* ASM_NMI_H */
+Index: head-2007-09-03/include/asm-x86_64/mach-xen/setup_arch_post.h
+===================================================================
+--- head-2007-09-03.orig/include/asm-x86_64/mach-xen/setup_arch_post.h 2007-09-03 09:42:53.000000000 +0200
++++ head-2007-09-03/include/asm-x86_64/mach-xen/setup_arch_post.h 2007-09-03 09:53:53.000000000 +0200
+@@ -27,12 +27,10 @@ static void __init machine_specific_arch
+ .type = CALLBACKTYPE_syscall,
+ .address = (unsigned long)system_call,
+ };
+-#ifdef CONFIG_X86_LOCAL_APIC
+ static struct callback_register __initdata nmi_cb = {
+ .type = CALLBACKTYPE_nmi,
+ .address = (unsigned long)nmi,
+ };
+-#endif
+
+ ret = HYPERVISOR_callback_op(CALLBACKOP_register, &event);
+ if (ret == 0)
+@@ -48,7 +46,6 @@ static void __init machine_specific_arch
+ #endif
+ BUG_ON(ret);
+
+-#ifdef CONFIG_X86_LOCAL_APIC
+ ret = HYPERVISOR_callback_op(CALLBACKOP_register, &nmi_cb);
+ #if CONFIG_XEN_COMPAT <= 0x030002
+ if (ret == -ENOSYS) {
+@@ -59,5 +56,4 @@ static void __init machine_specific_arch
+ HYPERVISOR_nmi_op(XENNMI_register_callback, &cb);
+ }
+ #endif
+-#endif
+ }
+Index: head-2007-09-03/kernel/sysctl.c
+===================================================================
+--- head-2007-09-03.orig/kernel/sysctl.c 2007-09-03 09:42:53.000000000 +0200
++++ head-2007-09-03/kernel/sysctl.c 2007-09-03 09:53:53.000000000 +0200
+@@ -522,6 +522,7 @@ static ctl_table kern_table[] = {
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ },
++#ifndef CONFIG_XEN
+ {
+ .ctl_name = KERN_NMI_WATCHDOG,
+ .procname = "nmi_watchdog",
+@@ -531,6 +532,7 @@ static ctl_table kern_table[] = {
+ .proc_handler = &proc_nmi_enabled,
+ },
+ #endif
++#endif
+ #if defined(CONFIG_X86)
+ {
+ .ctl_name = KERN_PANIC_ON_NMI,
diff --git a/trunk/2.6.22/20064_xen-x86-no-lapic.patch1 b/trunk/2.6.22/20064_xen-x86-no-lapic.patch1
new file mode 100644
index 0000000..0306b95
--- /dev/null
+++ b/trunk/2.6.22/20064_xen-x86-no-lapic.patch1
@@ -0,0 +1,1426 @@
+From: jbeulich@novell.com
+Subject: Disallow all accesses to the local APIC page
+Patch-mainline: obsolete
+References: 191115
+
+Index: 10.3-2007-11-26/arch/i386/kernel/acpi/boot-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/acpi/boot-xen.c 2007-11-26 14:07:48.000000000 +0100
++++ 10.3-2007-11-26/arch/i386/kernel/acpi/boot-xen.c 2007-11-26 14:09:27.000000000 +0100
+@@ -85,7 +85,7 @@ int acpi_sci_override_gsi __initdata;
+ int acpi_skip_timer_override __initdata;
+ int acpi_use_timer_override __initdata;
+
+-#ifdef CONFIG_X86_LOCAL_APIC
++#if defined(CONFIG_X86_LOCAL_APIC) && !defined(CONFIG_XEN)
+ static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
+ #endif
+
+@@ -227,12 +227,14 @@ static int __init acpi_parse_madt(struct
+ return -ENODEV;
+ }
+
++#ifndef CONFIG_XEN
+ if (madt->address) {
+ acpi_lapic_addr = (u64) madt->address;
+
+ printk(KERN_DEBUG PREFIX "Local APIC address 0x%08x\n",
+ madt->address);
+ }
++#endif
+
+ acpi_madt_oem_check(madt->header.oem_id, madt->header.oem_table_id);
+
+@@ -268,6 +270,7 @@ static int __init
+ acpi_parse_lapic_addr_ovr(struct acpi_subtable_header * header,
+ const unsigned long end)
+ {
++#ifndef CONFIG_XEN
+ struct acpi_madt_local_apic_override *lapic_addr_ovr = NULL;
+
+ lapic_addr_ovr = (struct acpi_madt_local_apic_override *)header;
+@@ -276,6 +279,7 @@ acpi_parse_lapic_addr_ovr(struct acpi_su
+ return -EINVAL;
+
+ acpi_lapic_addr = lapic_addr_ovr->address;
++#endif
+
+ return 0;
+ }
+@@ -724,7 +728,9 @@ static int __init acpi_parse_madt_lapic_
+ return count;
+ }
+
++#ifndef CONFIG_XEN
+ mp_register_lapic_address(acpi_lapic_addr);
++#endif
+
+ count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC, acpi_parse_lapic,
+ MAX_APICS);
+Index: 10.3-2007-11-26/arch/i386/kernel/apic-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/apic-xen.c 2007-11-26 14:07:48.000000000 +0100
++++ 10.3-2007-11-26/arch/i386/kernel/apic-xen.c 2007-11-26 14:09:27.000000000 +0100
+@@ -1,81 +1,37 @@
+ /*
+- * Local APIC handling, local APIC timers
+- *
+- * (c) 1999, 2000 Ingo Molnar <mingo@redhat.com>
+- *
+- * Fixes
+- * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
+- * thanks to Eric Gilmore
+- * and Rolf G. Tews
+- * for testing these extensively.
+- * Maciej W. Rozycki : Various updates and fixes.
+- * Mikael Pettersson : Power Management for UP-APIC.
+- * Pavel Machek and
+- * Mikael Pettersson : PM converted to driver model.
++ * Local APIC handling stubs
+ */
+
+ #include <linux/init.h>
+
+-#include <linux/mm.h>
+-#include <linux/delay.h>
+-#include <linux/bootmem.h>
+-#include <linux/interrupt.h>
+-#include <linux/mc146818rtc.h>
+-#include <linux/kernel_stat.h>
+-#include <linux/sysdev.h>
+-#include <linux/cpu.h>
+-#include <linux/clockchips.h>
+-#include <linux/acpi_pmtmr.h>
+-#include <linux/module.h>
+-
+-#include <asm/atomic.h>
+-#include <asm/smp.h>
+-#include <asm/mtrr.h>
+-#include <asm/mpspec.h>
+-#include <asm/desc.h>
+-#include <asm/arch_hooks.h>
+-#include <asm/hpet.h>
+-#include <asm/i8253.h>
+-#include <asm/nmi.h>
+-
+-#include <mach_apic.h>
+-#include <mach_apicdef.h>
+-#include <mach_ipi.h>
+-
+-#include "io_ports.h"
+-
+-#ifndef CONFIG_XEN
+-/*
+- * cpu_mask that denotes the CPUs that needs timer interrupt coming in as
+- * IPIs in place of local APIC timers
+- */
+-static cpumask_t timer_bcast_ipi;
+-#endif
+-
+-/*
+- * Knob to control our willingness to enable the local APIC.
+- */
++#include <asm/hw_irq.h>
+
+ /*
+ * Debug level, exported for io_apic.c
+ */
+ int apic_verbosity;
+
+-#ifndef CONFIG_XEN
+-static int modern_apic(void)
++static int __init apic_set_verbosity(char *str)
+ {
+- /* AMD systems use old APIC versions, so check the CPU */
+- if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
+- boot_cpu_data.x86 >= 0xf)
+- return 1;
+- return lapic_get_version() >= 0x14;
++ if (strcmp("debug", str) == 0)
++ apic_verbosity = APIC_DEBUG;
++ else if (strcmp("verbose", str) == 0)
++ apic_verbosity = APIC_VERBOSE;
++ return 1;
+ }
+-#endif /* !CONFIG_XEN */
+
+-int get_physical_broadcast(void)
++__setup("apic=", apic_set_verbosity);
++
++#ifdef CONFIG_X86_64
++/*
++ * 'what should we do if we get a hw irq event on an illegal vector'.
++ * each architecture has to answer this themselves.
++ */
++void ack_bad_irq(unsigned int irq)
+ {
+- return 0xff;
++ printk("unexpected IRQ trap at irq %02x\n", irq);
+ }
++#endif
+
+ int setup_profiling_timer(unsigned int multiplier)
+ {
+Index: 10.3-2007-11-26/arch/i386/kernel/cpu/amd.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/cpu/amd.c 2007-11-26 14:07:20.000000000 +0100
++++ 10.3-2007-11-26/arch/i386/kernel/cpu/amd.c 2007-11-26 14:09:27.000000000 +0100
+@@ -23,7 +23,7 @@
+ extern void vide(void);
+ __asm__(".align 4\nvide: ret");
+
+-#ifdef CONFIG_X86_LOCAL_APIC
++#if defined(CONFIG_X86_LOCAL_APIC) && !defined(CONFIG_XEN)
+ #define ENABLE_C1E_MASK 0x18000000
+ #define CPUID_PROCESSOR_SIGNATURE 1
+ #define CPUID_XFAM 0x0ff00000
+@@ -282,7 +282,7 @@ static void __cpuinit init_amd(struct cp
+ num_cache_leaves = 3;
+ }
+
+-#ifdef CONFIG_X86_LOCAL_APIC
++#if defined(CONFIG_X86_LOCAL_APIC) && !defined(CONFIG_XEN)
+ if (amd_apic_timer_broken())
+ local_apic_timer_disabled = 1;
+ #endif
+Index: 10.3-2007-11-26/arch/i386/kernel/io_apic-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/io_apic-xen.c 2007-11-26 14:07:57.000000000 +0100
++++ 10.3-2007-11-26/arch/i386/kernel/io_apic-xen.c 2007-11-26 14:09:27.000000000 +0100
+@@ -62,10 +62,10 @@
+ unsigned long io_apic_irqs;
+
+ #define clear_IO_APIC() ((void)0)
+-#endif /* CONFIG_XEN */
+-
++#else
+ int (*ioapic_renumber_irq)(int ioapic, int irq);
+ atomic_t irq_mis_count;
++#endif /* CONFIG_XEN */
+
+ /* Where if anywhere is the i8259 connect in external int mode */
+ static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
+@@ -73,7 +73,9 @@ static struct { int pin, apic; } ioapic_
+ static DEFINE_SPINLOCK(ioapic_lock);
+ static DEFINE_SPINLOCK(vector_lock);
+
++#ifndef CONFIG_XEN
+ int timer_over_8254 __initdata = 1;
++#endif
+
+ /*
+ * Is the SiS APIC rmw bug present ?
+@@ -86,7 +88,9 @@ int sis_apic_bug = -1;
+ */
+ int nr_ioapic_registers[MAX_IO_APICS];
+
++#ifndef CONFIG_XEN
+ static int disable_timer_pin_1 __initdata;
++#endif
+
+ /*
+ * Rough estimation of how many shared IRQs there are, can
+@@ -1216,12 +1220,13 @@ static int pin_2_irq(int idx, int apic,
+ irq += nr_ioapic_registers[i++];
+ irq += pin;
+
++#ifndef CONFIG_XEN
+ /*
+ * For MPS mode, so far only needed by ES7000 platform
+ */
+ if (ioapic_renumber_irq)
+ irq = ioapic_renumber_irq(apic, irq);
+-
++#endif
+ break;
+ }
+ default:
+@@ -2397,6 +2402,7 @@ void __init setup_IO_APIC(void)
+ print_IO_APIC();
+ }
+
++#ifndef CONFIG_XEN
+ static int __init setup_disable_8254_timer(char *s)
+ {
+ timer_over_8254 = -1;
+@@ -2410,6 +2416,7 @@ static int __init setup_enable_8254_time
+
+ __setup("disable_8254_timer", setup_disable_8254_timer);
+ __setup("enable_8254_timer", setup_enable_8254_timer);
++#endif
+
+ /*
+ * Called after all the initialization is done. If we didnt find any
+@@ -2921,6 +2928,7 @@ int io_apic_set_pci_routing (int ioapic,
+
+ #endif /* CONFIG_ACPI */
+
++#ifndef CONFIG_XEN
+ static int __init parse_disable_timer_pin_1(char *arg)
+ {
+ disable_timer_pin_1 = 1;
+@@ -2934,6 +2942,7 @@ static int __init parse_enable_timer_pin
+ return 0;
+ }
+ early_param("enable_timer_pin_1", parse_enable_timer_pin_1);
++#endif
+
+ static int __init parse_noapic(char *arg)
+ {
+Index: 10.3-2007-11-26/arch/i386/kernel/irq-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/irq-xen.c 2007-11-26 14:07:48.000000000 +0100
++++ 10.3-2007-11-26/arch/i386/kernel/irq-xen.c 2007-11-26 14:09:27.000000000 +0100
+@@ -243,7 +243,9 @@ EXPORT_SYMBOL(do_softirq);
+ * Interrupt statistics:
+ */
+
++#ifndef CONFIG_XEN
+ atomic_t irq_err_count;
++#endif
+
+ /*
+ * /proc/interrupts printing:
+@@ -289,6 +291,7 @@ skip:
+ for_each_online_cpu(j)
+ seq_printf(p, "%10u ", nmi_count(j));
+ seq_putc(p, '\n');
++#ifndef CONFIG_XEN
+ #ifdef CONFIG_X86_LOCAL_APIC
+ seq_printf(p, "LOC: ");
+ for_each_online_cpu(j)
+@@ -300,6 +303,7 @@ skip:
+ #if defined(CONFIG_X86_IO_APIC)
+ seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count));
+ #endif
++#endif
+ }
+ return 0;
+ }
+Index: 10.3-2007-11-26/arch/i386/kernel/mpparse-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/mpparse-xen.c 2007-11-26 14:07:48.000000000 +0100
++++ 10.3-2007-11-26/arch/i386/kernel/mpparse-xen.c 2007-11-26 14:09:27.000000000 +0100
+@@ -61,7 +61,9 @@ int mp_irq_entries;
+ int nr_ioapics;
+
+ int pic_mode;
++#ifndef CONFIG_XEN
+ unsigned long mp_lapic_addr;
++#endif
+
+ unsigned int def_to_bigsmp = 0;
+
+@@ -412,6 +414,7 @@ static int __init smp_read_mpc(struct mp
+
+ mps_oem_check(mpc, oem, str);
+
++#ifndef CONFIG_XEN
+ printk("APIC at: 0x%lX\n",mpc->mpc_lapic);
+
+ /*
+@@ -420,6 +423,7 @@ static int __init smp_read_mpc(struct mp
+ */
+ if (!acpi_lapic)
+ mp_lapic_addr = mpc->mpc_lapic;
++#endif
+
+ /*
+ * Now process the configuration blocks.
+@@ -572,10 +576,12 @@ static inline void __init construct_defa
+ int linttypes[2] = { mp_ExtINT, mp_NMI };
+ int i;
+
++#ifndef CONFIG_XEN
+ /*
+ * local APIC has default address
+ */
+ mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
++#endif
+
+ /*
+ * 2 CPUs, numbered 0 & 1.
+@@ -825,9 +831,9 @@ int es7000_plat;
+
+ #ifdef CONFIG_ACPI
+
++#ifndef CONFIG_XEN
+ void __init mp_register_lapic_address(u64 address)
+ {
+-#ifndef CONFIG_XEN
+ mp_lapic_addr = (unsigned long) address;
+
+ set_fixmap_nocache(FIX_APIC_BASE, mp_lapic_addr);
+@@ -836,8 +842,8 @@ void __init mp_register_lapic_address(u6
+ boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
+
+ Dprintk("Boot CPU = %d\n", boot_cpu_physical_apicid);
+-#endif
+ }
++#endif
+
+ void __cpuinit mp_register_lapic (u8 id, u8 enabled)
+ {
+@@ -921,11 +927,11 @@ void __init mp_register_ioapic(u8 id, u3
+
+ #ifndef CONFIG_XEN
+ set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
+-#endif
+ if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
+ && !APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
+ tmpid = io_apic_get_unique_id(idx, id);
+ else
++#endif
+ tmpid = id;
+ if (tmpid == -1) {
+ nr_ioapics--;
+@@ -1089,8 +1095,10 @@ int mp_register_gsi(u32 gsi, int trigger
+
+ ioapic_pin = gsi - mp_ioapic_routing[ioapic].gsi_base;
+
++#ifndef CONFIG_XEN
+ if (ioapic_renumber_irq)
+ gsi = ioapic_renumber_irq(ioapic, gsi);
++#endif
+
+ /*
+ * Avoid pin reprogramming. PRTs typically include entries
+Index: 10.3-2007-11-26/arch/i386/kernel/smp-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/i386/kernel/smp-xen.c 2007-11-26 14:07:48.000000000 +0100
++++ 10.3-2007-11-26/arch/i386/kernel/smp-xen.c 2007-11-26 14:09:27.000000000 +0100
+@@ -114,6 +114,7 @@ DEFINE_PER_CPU(struct tlb_state, cpu_tlb
+ * We use 'broadcast', CPU->CPU IPIs and self-IPIs too.
+ */
+
++#ifndef CONFIG_XEN
+ static inline int __prepare_ICR (unsigned int shortcut, int vector)
+ {
+ unsigned int icr = shortcut | APIC_DEST_LOGICAL;
+@@ -133,6 +134,7 @@ static inline int __prepare_ICR2 (unsign
+ {
+ return SET_APIC_DEST_FIELD(mask);
+ }
++#endif
+
+ DECLARE_PER_CPU(int, ipi_to_irq[NR_IPIS]);
+
+Index: 10.3-2007-11-26/arch/x86_64/kernel/Makefile
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/kernel/Makefile 2007-11-26 14:09:26.000000000 +0100
++++ 10.3-2007-11-26/arch/x86_64/kernel/Makefile 2007-11-26 14:09:27.000000000 +0100
+@@ -63,12 +63,14 @@ pcspeaker-y += ../../i386/kernel/pcspe
+ perfctr-watchdog-y += ../../i386/kernel/cpu/perfctr-watchdog.o
+
+ ifdef CONFIG_XEN
++apic-y += ../../i386/kernel/apic-xen.o
+ time-y += ../../i386/kernel/time-xen.o
+ pci-dma-y += ../../i386/kernel/pci-dma-xen.o
+ microcode-$(subst m,y,$(CONFIG_MICROCODE)) := ../../i386/kernel/microcode-xen.o
+ quirks-y := ../../i386/kernel/quirks-xen.o
+
+-n-obj-xen := early-quirks.o i8259.o reboot.o i8237.o perfctr-watchdog.o smpboot.o trampoline.o tsc.o tsc_sync.o
++n-obj-xen := early-quirks.o genapic_flat.o i8237.o i8259.o perfctr-watchdog.o \
++ reboot.o smpboot.o trampoline.o tsc.o tsc_sync.o
+
+ include $(srctree)/scripts/Makefile.xen
+
+Index: 10.3-2007-11-26/arch/x86_64/kernel/apic-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/kernel/apic-xen.c 2007-11-26 14:07:48.000000000 +0100
++++ /dev/null 1970-01-01 00:00:00.000000000 +0000
+@@ -1,198 +0,0 @@
+-/*
+- * Local APIC handling, local APIC timers
+- *
+- * (c) 1999, 2000 Ingo Molnar <mingo@redhat.com>
+- *
+- * Fixes
+- * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
+- * thanks to Eric Gilmore
+- * and Rolf G. Tews
+- * for testing these extensively.
+- * Maciej W. Rozycki : Various updates and fixes.
+- * Mikael Pettersson : Power Management for UP-APIC.
+- * Pavel Machek and
+- * Mikael Pettersson : PM converted to driver model.
+- */
+-
+-#include <linux/init.h>
+-
+-#include <linux/mm.h>
+-#include <linux/delay.h>
+-#include <linux/bootmem.h>
+-#include <linux/interrupt.h>
+-#include <linux/mc146818rtc.h>
+-#include <linux/kernel_stat.h>
+-#include <linux/sysdev.h>
+-#include <linux/module.h>
+-
+-#include <asm/atomic.h>
+-#include <asm/smp.h>
+-#include <asm/mtrr.h>
+-#include <asm/mpspec.h>
+-#include <asm/desc.h>
+-#include <asm/arch_hooks.h>
+-#include <asm/hpet.h>
+-#include <asm/idle.h>
+-
+-int apic_verbosity;
+-
+-/*
+- * 'what should we do if we get a hw irq event on an illegal vector'.
+- * each architecture has to answer this themselves.
+- */
+-void ack_bad_irq(unsigned int irq)
+-{
+- printk("unexpected IRQ trap at irq %02x\n", irq);
+- /*
+- * Currently unexpected vectors happen only on SMP and APIC.
+- * We _must_ ack these because every local APIC has only N
+- * irq slots per priority level, and a 'hanging, unacked' IRQ
+- * holds up an irq slot - in excessive cases (when multiple
+- * unexpected vectors occur) that might lock up the APIC
+- * completely.
+- * But don't ack when the APIC is disabled. -AK
+- */
+- if (!disable_apic)
+- ack_APIC_irq();
+-}
+-
+-int setup_profiling_timer(unsigned int multiplier)
+-{
+- return -EINVAL;
+-}
+-
+-void smp_local_timer_interrupt(void)
+-{
+- profile_tick(CPU_PROFILING);
+-#ifndef CONFIG_XEN
+-#ifdef CONFIG_SMP
+- update_process_times(user_mode(get_irq_regs()));
+-#endif
+-#endif
+- /*
+- * We take the 'long' return path, and there every subsystem
+- * grabs the appropriate locks (kernel lock/ irq lock).
+- *
+- * We might want to decouple profiling from the 'long path',
+- * and do the profiling totally in assembly.
+- *
+- * Currently this isn't too much of an issue (performance wise),
+- * we can take more than 100K local irqs per second on a 100 MHz P5.
+- */
+-}
+-
+-/*
+- * Local APIC timer interrupt. This is the most natural way for doing
+- * local interrupts, but local timer interrupts can be emulated by
+- * broadcast interrupts too. [in case the hw doesn't support APIC timers]
+- *
+- * [ if a single-CPU system runs an SMP kernel then we call the local
+- * interrupt as well. Thus we cannot inline the local irq ... ]
+- */
+-void smp_apic_timer_interrupt(struct pt_regs *regs)
+-{
+- struct pt_regs *old_regs = set_irq_regs(regs);
+-
+- /*
+- * the NMI deadlock-detector uses this.
+- */
+- add_pda(apic_timer_irqs, 1);
+-
+- /*
+- * NOTE! We'd better ACK the irq immediately,
+- * because timer handling can be slow.
+- */
+- ack_APIC_irq();
+- /*
+- * update_process_times() expects us to have done irq_enter().
+- * Besides, if we don't timer interrupts ignore the global
+- * interrupt lock, which is the WrongThing (tm) to do.
+- */
+- exit_idle();
+- irq_enter();
+- smp_local_timer_interrupt();
+- irq_exit();
+- set_irq_regs(old_regs);
+-}
+-
+-/*
+- * This interrupt should _never_ happen with our APIC/SMP architecture
+- */
+-asmlinkage void smp_spurious_interrupt(void)
+-{
+- unsigned int v;
+- exit_idle();
+- irq_enter();
+- /*
+- * Check if this really is a spurious interrupt and ACK it
+- * if it is a vectored one. Just in case...
+- * Spurious interrupts should not be ACKed.
+- */
+- v = apic_read(APIC_ISR + ((SPURIOUS_APIC_VECTOR & ~0x1f) >> 1));
+- if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f)))
+- ack_APIC_irq();
+-
+-#if 0
+- static unsigned long last_warning;
+- static unsigned long skipped;
+-
+- /* see sw-dev-man vol 3, chapter 7.4.13.5 */
+- if (time_before(last_warning+30*HZ,jiffies)) {
+- printk(KERN_INFO "spurious APIC interrupt on CPU#%d, %ld skipped.\n",
+- smp_processor_id(), skipped);
+- last_warning = jiffies;
+- skipped = 0;
+- } else {
+- skipped++;
+- }
+-#endif
+- irq_exit();
+-}
+-
+-/*
+- * This interrupt should never happen with our APIC/SMP architecture
+- */
+-
+-asmlinkage void smp_error_interrupt(void)
+-{
+- unsigned int v, v1;
+-
+- exit_idle();
+- irq_enter();
+- /* First tickle the hardware, only then report what went on. -- REW */
+- v = apic_read(APIC_ESR);
+- apic_write(APIC_ESR, 0);
+- v1 = apic_read(APIC_ESR);
+- ack_APIC_irq();
+- atomic_inc(&irq_err_count);
+-
+- /* Here is what the APIC error bits mean:
+- 0: Send CS error
+- 1: Receive CS error
+- 2: Send accept error
+- 3: Receive accept error
+- 4: Reserved
+- 5: Send illegal vector
+- 6: Received illegal vector
+- 7: Illegal register address
+- */
+- printk (KERN_DEBUG "APIC error on CPU%d: %02x(%02x)\n",
+- smp_processor_id(), v , v1);
+- irq_exit();
+-}
+-
+-int disable_apic;
+-
+-/*
+- * This initializes the IO-APIC and APIC hardware if this is
+- * a UP kernel.
+- */
+-int __init APIC_init_uniprocessor (void)
+-{
+-#ifdef CONFIG_X86_IO_APIC
+- if (smp_found_config && !skip_ioapic_setup && nr_ioapics)
+- setup_IO_APIC();
+-#endif
+-
+- return 1;
+-}
+Index: 10.3-2007-11-26/arch/x86_64/kernel/early-quirks.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/kernel/early-quirks.c 2007-11-26 14:07:20.000000000 +0100
++++ 10.3-2007-11-26/arch/x86_64/kernel/early-quirks.c 2007-11-26 14:09:27.000000000 +0100
+@@ -55,6 +55,7 @@ static void __init nvidia_bugs(void)
+
+ static void __init ati_bugs(void)
+ {
++#ifndef CONFIG_XEN
+ #ifdef CONFIG_X86_IO_APIC
+ if (timer_over_8254 == 1) {
+ timer_over_8254 = 0;
+@@ -62,6 +63,7 @@ static void __init ati_bugs(void)
+ "ATI board detected. Disabling timer routing over 8254.\n");
+ }
+ #endif
++#endif
+ }
+
+ struct chipset {
+Index: 10.3-2007-11-26/arch/x86_64/kernel/genapic-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/kernel/genapic-xen.c 2007-11-26 14:07:48.000000000 +0100
++++ 10.3-2007-11-26/arch/x86_64/kernel/genapic-xen.c 2007-11-26 14:09:27.000000000 +0100
+@@ -17,7 +17,6 @@
+ #include <linux/init.h>
+
+ #include <asm/smp.h>
+-#include <asm/ipi.h>
+ #include <asm/genapic.h>
+
+ #ifdef CONFIG_ACPI
+@@ -72,7 +71,7 @@ void __init setup_apic_routing(void)
+ /* Same for both flat and physical. */
+
+ #ifdef CONFIG_XEN
+-extern void xen_send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int dest);
++extern void xen_send_IPI_shortcut(unsigned int shortcut, int vector);
+ #endif
+
+ void send_IPI_self(int vector)
+@@ -80,6 +79,6 @@ void send_IPI_self(int vector)
+ #ifndef CONFIG_XEN
+ __send_IPI_shortcut(APIC_DEST_SELF, vector, APIC_DEST_PHYSICAL);
+ #else
+- xen_send_IPI_shortcut(APIC_DEST_SELF, vector, APIC_DEST_PHYSICAL);
++ xen_send_IPI_shortcut(APIC_DEST_SELF, vector);
+ #endif
+ }
+Index: 10.3-2007-11-26/arch/x86_64/kernel/genapic_xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/kernel/genapic_xen.c 2007-11-26 14:07:48.000000000 +0100
++++ 10.3-2007-11-26/arch/x86_64/kernel/genapic_xen.c 2007-11-26 14:09:27.000000000 +0100
+@@ -18,7 +18,6 @@
+ #include <linux/init.h>
+ #ifdef CONFIG_XEN_PRIVILEGED_GUEST
+ #include <asm/smp.h>
+-#include <asm/ipi.h>
+ #else
+ #include <asm/apic.h>
+ #endif
+@@ -34,7 +33,7 @@ static inline void __send_IPI_one(unsign
+ notify_remote_via_irq(irq);
+ }
+
+-void xen_send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int dest)
++void xen_send_IPI_shortcut(unsigned int shortcut, int vector)
+ {
+ int cpu;
+
+@@ -96,13 +95,13 @@ static void xen_send_IPI_allbutself(int
+ */
+ Dprintk("%s\n", __FUNCTION__);
+ if (num_online_cpus() > 1)
+- xen_send_IPI_shortcut(APIC_DEST_ALLBUT, vector, APIC_DEST_LOGICAL);
++ xen_send_IPI_shortcut(APIC_DEST_ALLBUT, vector);
+ }
+
+ static void xen_send_IPI_all(int vector)
+ {
+ Dprintk("%s\n", __FUNCTION__);
+- xen_send_IPI_shortcut(APIC_DEST_ALLINC, vector, APIC_DEST_LOGICAL);
++ xen_send_IPI_shortcut(APIC_DEST_ALLINC, vector);
+ }
+
+ static void xen_send_IPI_mask(cpumask_t cpumask, int vector)
+@@ -152,7 +151,7 @@ struct genapic apic_xen = {
+ #ifdef CONFIG_XEN_PRIVILEGED_GUEST
+ .int_delivery_mode = dest_LowestPrio,
+ #endif
+- .int_dest_mode = (APIC_DEST_LOGICAL != 0),
++ .int_dest_mode = 1,
+ .target_cpus = xen_target_cpus,
+ .vector_allocation_domain = xen_vector_allocation_domain,
+ #ifdef CONFIG_XEN_PRIVILEGED_GUEST
+Index: 10.3-2007-11-26/arch/x86_64/kernel/io_apic-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/kernel/io_apic-xen.c 2007-11-26 14:07:57.000000000 +0100
++++ 10.3-2007-11-26/arch/x86_64/kernel/io_apic-xen.c 2007-11-26 14:09:27.000000000 +0100
+@@ -66,10 +66,6 @@ static int assign_irq_vector(int irq, cp
+
+ int sis_apic_bug; /* not actually supported, dummy for compile */
+
+-static int no_timer_check;
+-
+-static int disable_timer_pin_1 __initdata;
+-
+ #ifdef CONFIG_XEN
+ #include <xen/interface/xen.h>
+ #include <xen/interface/physdev.h>
+@@ -83,6 +79,10 @@ unsigned long io_apic_irqs;
+
+ #define clear_IO_APIC() ((void)0)
+ #else
++static int no_timer_check;
++
++static int disable_timer_pin_1 __initdata;
++
+ int timer_over_8254 __initdata = 1;
+
+ /* Where if anywhere is the i8259 connect in external int mode */
+@@ -412,6 +412,7 @@ static int __init disable_ioapic_setup(c
+ }
+ early_param("noapic", disable_ioapic_setup);
+
++#ifndef CONFIG_XEN
+ /* Actually the next is obsolete, but keep it for paranoid reasons -AK */
+ static int __init disable_timer_pin_setup(char *arg)
+ {
+@@ -420,7 +421,6 @@ static int __init disable_timer_pin_setu
+ }
+ __setup("disable_timer_pin_1", disable_timer_pin_setup);
+
+-#ifndef CONFIG_XEN
+ static int __init setup_disable_8254_timer(char *s)
+ {
+ timer_over_8254 = -1;
+@@ -1709,10 +1709,6 @@ static inline void check_timer(void)
+ apic_printk(APIC_VERBOSE," failed :(.\n");
+ panic("IO-APIC + timer doesn't work! Try using the 'noapic' kernel parameter\n");
+ }
+-#else
+-#define check_timer() ((void)0)
+-int timer_uses_ioapic_pin_0 = 0;
+-#endif /* !CONFIG_XEN */
+
+ static int __init notimercheck(char *s)
+ {
+@@ -1720,6 +1716,10 @@ static int __init notimercheck(char *s)
+ return 1;
+ }
+ __setup("no_timer_check", notimercheck);
++#else
++#define check_timer() ((void)0)
++int timer_uses_ioapic_pin_0 = 0;
++#endif /* !CONFIG_XEN */
+
+ /*
+ *
+Index: 10.3-2007-11-26/arch/x86_64/kernel/irq-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/kernel/irq-xen.c 2007-11-26 14:07:48.000000000 +0100
++++ 10.3-2007-11-26/arch/x86_64/kernel/irq-xen.c 2007-11-26 14:09:27.000000000 +0100
+@@ -20,7 +20,9 @@
+ #include <asm/idle.h>
+ #include <asm/smp.h>
+
++#ifndef CONFIG_XEN
+ atomic_t irq_err_count;
++#endif
+
+ #ifdef CONFIG_DEBUG_STACKOVERFLOW
+ /*
+@@ -89,6 +91,7 @@ skip:
+ for_each_online_cpu(j)
+ seq_printf(p, "%10u ", cpu_pda(j)->__nmi_count);
+ seq_putc(p, '\n');
++#ifndef CONFIG_XEN
+ #ifdef CONFIG_X86_LOCAL_APIC
+ seq_printf(p, "LOC: ");
+ for_each_online_cpu(j)
+@@ -96,6 +99,7 @@ skip:
+ seq_putc(p, '\n');
+ #endif
+ seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
++#endif
+ }
+ return 0;
+ }
+Index: 10.3-2007-11-26/arch/x86_64/kernel/mpparse-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/kernel/mpparse-xen.c 2007-11-26 14:07:48.000000000 +0100
++++ 10.3-2007-11-26/arch/x86_64/kernel/mpparse-xen.c 2007-11-26 14:09:27.000000000 +0100
+@@ -52,8 +52,9 @@ struct mpc_config_intsrc mp_irqs[MAX_IRQ
+ int mp_irq_entries;
+
+ int nr_ioapics;
++#ifndef CONFIG_XEN
+ unsigned long mp_lapic_addr = 0;
+-
++#endif
+
+
+ /* Processor that is doing the boot up */
+@@ -246,11 +247,13 @@ static int __init smp_read_mpc(struct mp
+ str[12] = 0;
+ printk("MPTABLE: Product ID: %s ",str);
+
++#ifndef CONFIG_XEN
+ printk("MPTABLE: APIC at: 0x%X\n",mpc->mpc_lapic);
+
+ /* save the local APIC address, it might be non-default */
+ if (!acpi_lapic)
+ mp_lapic_addr = mpc->mpc_lapic;
++#endif
+
+ /*
+ * Now process the configuration blocks.
+@@ -395,10 +398,12 @@ static inline void __init construct_defa
+ int linttypes[2] = { mp_ExtINT, mp_NMI };
+ int i;
+
++#ifndef CONFIG_XEN
+ /*
+ * local APIC has default address
+ */
+ mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
++#endif
+
+ /*
+ * 2 CPUs, numbered 0 & 1.
+@@ -599,15 +604,15 @@ void __init find_smp_config(void)
+
+ #ifdef CONFIG_ACPI
+
++#ifndef CONFIG_XEN
+ void __init mp_register_lapic_address(u64 address)
+ {
+-#ifndef CONFIG_XEN
+ mp_lapic_addr = (unsigned long) address;
+ set_fixmap_nocache(FIX_APIC_BASE, mp_lapic_addr);
+ if (boot_cpu_id == -1U)
+ boot_cpu_id = GET_APIC_ID(apic_read(APIC_ID));
+-#endif
+ }
++#endif
+
+ void __cpuinit mp_register_lapic (u8 id, u8 enabled)
+ {
+Index: 10.3-2007-11-26/arch/x86_64/kernel/traps-xen.c
+===================================================================
+--- 10.3-2007-11-26.orig/arch/x86_64/kernel/traps-xen.c 2007-11-26 14:09:26.000000000 +0100
++++ 10.3-2007-11-26/arch/x86_64/kernel/traps-xen.c 2007-11-26 14:09:27.000000000 +0100
+@@ -1051,15 +1051,15 @@ asmlinkage void do_spurious_interrupt_bu
+ {
+ }
+
+-#if 0
++#ifndef CONFIG_XEN
+ asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void)
+ {
+ }
+-#endif
+
+ asmlinkage void __attribute__((weak)) mce_threshold_interrupt(void)
+ {
+ }
++#endif
+
+ /*
+ * 'math_state_restore()' saves the current math information in the
+Index: 10.3-2007-11-26/include/asm-i386/apic.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-i386/apic.h 2007-11-26 14:07:20.000000000 +0100
++++ 10.3-2007-11-26/include/asm-i386/apic.h 2007-11-26 14:09:27.000000000 +0100
+@@ -3,7 +3,9 @@
+
+ #include <linux/pm.h>
+ #include <linux/delay.h>
++#ifndef CONFIG_XEN
+ #include <asm/fixmap.h>
++#endif
+ #include <asm/apicdef.h>
+ #include <asm/processor.h>
+ #include <asm/system.h>
+@@ -33,7 +35,7 @@ extern int apic_verbosity;
+
+ extern void generic_apic_probe(void);
+
+-#ifdef CONFIG_X86_LOCAL_APIC
++#if defined(CONFIG_X86_LOCAL_APIC) && !defined(CONFIG_XEN)
+
+ /*
+ * Basic functions accessing APICs.
+@@ -111,18 +113,21 @@ extern int APIC_init_uniprocessor (void)
+
+ extern void enable_NMI_through_LVT0 (void * dummy);
+
+-#ifndef CONFIG_XEN
+ #define ARCH_APICTIMER_STOPS_ON_C3 1
+-#endif
+
+ extern int timer_over_8254;
+ extern int local_apic_timer_c2_ok;
+
+ extern int local_apic_timer_disabled;
+
+-#else /* !CONFIG_X86_LOCAL_APIC */
++#else /* !CONFIG_X86_LOCAL_APIC || CONFIG_XEN */
++
+ static inline void lapic_shutdown(void) { }
+
++#ifdef CONFIG_X86_LOCAL_APIC
++extern int APIC_init_uniprocessor (void);
++#endif
++
+ #endif /* !CONFIG_X86_LOCAL_APIC */
+
+ #endif /* __ASM_APIC_H */
+Index: 10.3-2007-11-26/include/asm-i386/apicdef.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-i386/apicdef.h 2007-11-26 14:07:20.000000000 +0100
++++ 10.3-2007-11-26/include/asm-i386/apicdef.h 2007-11-26 14:09:27.000000000 +0100
+@@ -1,6 +1,8 @@
+ #ifndef __ASM_APICDEF_H
+ #define __ASM_APICDEF_H
+
++#ifndef CONFIG_XEN
++
+ /*
+ * Constants for various Intel APICs. (local APIC, IOAPIC, etc.)
+ *
+@@ -111,8 +113,20 @@
+
+ #define APIC_BASE (fix_to_virt(FIX_APIC_BASE))
+
++#else /* CONFIG_XEN */
++
++enum {
++ APIC_DEST_ALLBUT = 0x1,
++ APIC_DEST_SELF,
++ APIC_DEST_ALLINC
++};
++
++#endif /* CONFIG_XEN */
++
+ #define MAX_IO_APICS 64
+
++#ifndef CONFIG_XEN
++
+ /*
+ * the local APIC register structure, memory mapped. Not terribly well
+ * tested, but we might eventually use this one in the future - the
+@@ -372,4 +386,6 @@ struct local_apic {
+
+ #undef u32
+
++#endif /* CONFIG_XEN */
++
+ #endif
+Index: 10.3-2007-11-26/include/asm-i386/mach-xen/asm/fixmap.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-i386/mach-xen/asm/fixmap.h 2007-11-26 14:07:48.000000000 +0100
++++ 10.3-2007-11-26/include/asm-i386/mach-xen/asm/fixmap.h 2007-11-26 14:09:27.000000000 +0100
+@@ -53,13 +53,15 @@ extern unsigned long __FIXADDR_TOP;
+ enum fixed_addresses {
+ FIX_HOLE,
+ FIX_VDSO,
++#ifndef CONFIG_XEN
+ #ifdef CONFIG_X86_LOCAL_APIC
+ FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */
+ #endif
+-#if defined(CONFIG_X86_IO_APIC) && !defined(CONFIG_XEN)
++#ifdef CONFIG_X86_IO_APIC
+ FIX_IO_APIC_BASE_0,
+ FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS-1,
+ #endif
++#endif
+ #ifdef CONFIG_X86_VISWS_APIC
+ FIX_CO_CPU, /* Cobalt timer */
+ FIX_CO_APIC, /* Cobalt APIC Redirection Table */
+Index: 10.3-2007-11-26/include/asm-i386/mach-xen/asm/hw_irq.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-i386/mach-xen/asm/hw_irq.h 2007-11-26 14:07:37.000000000 +0100
++++ 10.3-2007-11-26/include/asm-i386/mach-xen/asm/hw_irq.h 2007-11-26 14:09:27.000000000 +0100
+@@ -17,8 +17,6 @@
+ #include <asm/irq.h>
+ #include <asm/sections.h>
+
+-#define NMI_VECTOR 0x02
+-
+ /*
+ * Various low-level irq details needed by irq.c, process.c,
+ * time.c, io_apic.c and smp.c
+@@ -58,9 +56,6 @@ void setup_ioapic_dest(void);
+
+ extern unsigned long io_apic_irqs;
+
+-extern atomic_t irq_err_count;
+-extern atomic_t irq_mis_count;
+-
+ #define IO_APIC_IRQ(x) (((x) >= 16) || ((1<<(x)) & io_apic_irqs))
+
+ #endif /* _ASM_HW_IRQ_H */
+Index: 10.3-2007-11-26/include/asm-i386/mach-xen/asm/smp.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-i386/mach-xen/asm/smp.h 2007-11-26 14:07:48.000000000 +0100
++++ 10.3-2007-11-26/include/asm-i386/mach-xen/asm/smp.h 2007-11-26 14:09:27.000000000 +0100
+@@ -156,7 +156,7 @@ extern unsigned int num_processors;
+
+ #ifndef __ASSEMBLY__
+
+-#ifdef CONFIG_X86_LOCAL_APIC
++#if defined(CONFIG_X86_LOCAL_APIC) && !defined(CONFIG_XEN)
+
+ #ifdef APIC_DEFINITION
+ extern int hard_smp_processor_id(void);
+@@ -179,7 +179,7 @@ static inline int hard_smp_processor_id(
+
+ extern u8 apicid_2_node[];
+
+-#ifdef CONFIG_X86_LOCAL_APIC
++#if defined(CONFIG_X86_LOCAL_APIC) && !defined(CONFIG_XEN)
+ static __inline int logical_smp_processor_id(void)
+ {
+ /* we don't want to mark this access volatile - bad code generation */
+Index: 10.3-2007-11-26/include/asm-i386/mach-xen/irq_vectors.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-i386/mach-xen/irq_vectors.h 2007-11-26 14:07:20.000000000 +0100
++++ 10.3-2007-11-26/include/asm-i386/mach-xen/irq_vectors.h 2007-11-26 14:09:27.000000000 +0100
+@@ -34,7 +34,7 @@
+ * Vectors 0x20-0x2f are used for ISA interrupts.
+ */
+
+-#if 0
++#ifndef CONFIG_XEN
+ /*
+ * Special IRQ vectors used by the SMP architecture, 0xf0-0xff
+ *
+@@ -57,7 +57,6 @@
+ * sources per level' errata.
+ */
+ #define LOCAL_TIMER_VECTOR 0xef
+-#endif
+
+ #define SPURIOUS_APIC_VECTOR 0xff
+ #define ERROR_APIC_VECTOR 0xfe
+@@ -79,6 +78,7 @@
+ * Since vectors 0x00-0x1f are used/reserved for the CPU,
+ * the usable vector space is 0x20-0xff (224 vectors)
+ */
++#endif
+
+ #define RESCHEDULE_VECTOR 0
+ #define CALL_FUNCTION_VECTOR 1
+Index: 10.3-2007-11-26/include/asm-i386/mach-xen/mach_apic.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ 10.3-2007-11-26/include/asm-i386/mach-xen/mach_apic.h 2007-11-26 14:09:27.000000000 +0100
+@@ -0,0 +1,44 @@
++#ifndef __ASM_MACH_APIC_H
++#define __ASM_MACH_APIC_H
++
++#include <mach_apicdef.h>
++#include <asm/smp.h>
++
++static inline cpumask_t target_cpus(void)
++{
++#ifdef CONFIG_SMP
++ return cpu_online_map;
++#else
++ return cpumask_of_cpu(0);
++#endif
++}
++#define TARGET_CPUS (target_cpus())
++
++#define INT_DELIVERY_MODE dest_LowestPrio
++#define INT_DEST_MODE 1 /* logical delivery broadcast to all procs */
++
++static inline void setup_apic_routing(void)
++{
++}
++
++static inline int multi_timer_check(int apic, int irq)
++{
++ return 0;
++}
++
++static inline int apicid_to_node(int logical_apicid)
++{
++ return 0;
++}
++
++static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
++{
++ return cpus_addr(cpumask)[0];
++}
++
++static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
++{
++ return cpuid_apic >> index_msb;
++}
++
++#endif /* __ASM_MACH_APIC_H */
+Index: 10.3-2007-11-26/include/asm-x86_64/apic.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-x86_64/apic.h 2007-11-26 14:07:20.000000000 +0100
++++ 10.3-2007-11-26/include/asm-x86_64/apic.h 2007-11-26 14:09:27.000000000 +0100
+@@ -3,7 +3,9 @@
+
+ #include <linux/pm.h>
+ #include <linux/delay.h>
++#ifndef CONFIG_XEN
+ #include <asm/fixmap.h>
++#endif
+ #include <asm/apicdef.h>
+ #include <asm/system.h>
+
+@@ -32,6 +34,8 @@ extern int apic_mapped;
+ printk(s, ##a); \
+ } while (0)
+
++#ifndef CONFIG_XEN
++
+ struct pt_regs;
+
+ /*
+@@ -93,13 +97,18 @@ extern void setup_APIC_extened_lvt(unsig
+ #define K8_APIC_EXT_INT_MSG_EXT 0x7
+ #define K8_APIC_EXT_LVT_ENTRY_THRESHOLD 0
+
+-#ifndef CONFIG_XEN
+ void smp_send_timer_broadcast_ipi(void);
+ void switch_APIC_timer_to_ipi(void *cpumask);
+ void switch_ipi_to_APIC_timer(void *cpumask);
+
+ #define ARCH_APICTIMER_STOPS_ON_C3 1
+-#endif
++
++#elif defined(CONFIG_X86_LOCAL_APIC)
++
++extern int APIC_init_uniprocessor (void);
++extern void setup_apic_routing(void);
++
++#endif /* CONFIG_XEN / CONFIG_X86_LOCAL_APIC */
+
+ extern unsigned boot_cpu_id;
+ extern int local_apic_timer_c2_ok;
+Index: 10.3-2007-11-26/include/asm-x86_64/apicdef.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-x86_64/apicdef.h 2007-11-26 14:07:20.000000000 +0100
++++ 10.3-2007-11-26/include/asm-x86_64/apicdef.h 2007-11-26 14:09:27.000000000 +0100
+@@ -1,6 +1,8 @@
+ #ifndef __ASM_APICDEF_H
+ #define __ASM_APICDEF_H
+
++#ifndef CONFIG_XEN
++
+ /*
+ * Constants for various Intel APICs. (local APIC, IOAPIC, etc.)
+ *
+@@ -114,7 +116,22 @@
+
+ #define APIC_BASE (fix_to_virt(FIX_APIC_BASE))
+
++#else /* CONFIG_XEN */
++
++#define APIC_ALL_CPUS 0xFFu
++
++enum {
++ APIC_DEST_ALLBUT = 0x1,
++ APIC_DEST_SELF,
++ APIC_DEST_ALLINC
++};
++
++#endif /* CONFIG_XEN */
++
+ #define MAX_IO_APICS 128
++
++#ifndef CONFIG_XEN
++
+ #define MAX_LOCAL_APIC 256
+
+ /*
+@@ -387,6 +404,8 @@ struct local_apic {
+
+ #undef u32
+
++#endif /* CONFIG_XEN */
++
+ #define BAD_APICID 0xFFu
+
+ #endif
+Index: 10.3-2007-11-26/include/asm-x86_64/io_apic.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-x86_64/io_apic.h 2007-11-26 14:07:20.000000000 +0100
++++ 10.3-2007-11-26/include/asm-x86_64/io_apic.h 2007-11-26 14:09:27.000000000 +0100
+@@ -125,8 +125,10 @@ extern int sis_apic_bug; /* dummy */
+
+ void enable_NMI_through_LVT0 (void * dummy);
+
++#ifndef CONFIG_XEN
+ extern spinlock_t i8259A_lock;
+
+ extern int timer_over_8254;
++#endif
+
+ #endif
+Index: 10.3-2007-11-26/include/asm-x86_64/ipi.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-x86_64/ipi.h 2007-11-26 14:07:20.000000000 +0100
++++ 10.3-2007-11-26/include/asm-x86_64/ipi.h 2007-11-26 14:09:27.000000000 +0100
+@@ -1,6 +1,8 @@
+ #ifndef __ASM_IPI_H
+ #define __ASM_IPI_H
+
++#ifndef CONFIG_XEN
++
+ /*
+ * Copyright 2004 James Cleverdon, IBM.
+ * Subject to the GNU Public License, v.2
+@@ -125,4 +127,6 @@ static inline void send_IPI_mask_sequenc
+ local_irq_restore(flags);
+ }
+
++#endif /* CONFIG_XEN */
++
+ #endif /* __ASM_IPI_H */
+Index: 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/fixmap.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-x86_64/mach-xen/asm/fixmap.h 2007-11-26 14:07:48.000000000 +0100
++++ 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/fixmap.h 2007-11-26 14:09:27.000000000 +0100
+@@ -12,7 +12,6 @@
+ #define _ASM_FIXMAP_H
+
+ #include <linux/kernel.h>
+-#include <asm/apicdef.h>
+ #include <asm/page.h>
+ #include <asm/vsyscall.h>
+ #include <asm/acpi.h>
+@@ -37,10 +36,8 @@ enum fixed_addresses {
+ VSYSCALL_FIRST_PAGE = VSYSCALL_LAST_PAGE + ((VSYSCALL_END-VSYSCALL_START) >> PAGE_SHIFT) - 1,
+ VSYSCALL_HPET,
+ FIX_HPET_BASE,
+-#ifdef CONFIG_X86_LOCAL_APIC
+- FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */
+-#endif
+ #ifndef CONFIG_XEN
++ FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */
+ FIX_IO_APIC_BASE_0,
+ FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS-1,
+ #endif
+Index: 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/hw_irq.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-x86_64/mach-xen/asm/hw_irq.h 2007-11-26 14:07:44.000000000 +0100
++++ 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/hw_irq.h 2007-11-26 14:09:27.000000000 +0100
+@@ -22,7 +22,6 @@
+ #include <linux/percpu.h>
+ #endif
+
+-#define NMI_VECTOR 0x02
+ /*
+ * IDT vectors usable for external interrupt sources start
+ * at 0x20:
+@@ -125,9 +124,6 @@ extern void setup_ioapic_dest(void);
+
+ extern unsigned long io_apic_irqs;
+
+-extern atomic_t irq_err_count;
+-extern atomic_t irq_mis_count;
+-
+ #define IO_APIC_IRQ(x) (((x) >= 16) || ((1<<(x)) & io_apic_irqs))
+
+ #define __STR(x) #x
+Index: 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/smp.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-x86_64/mach-xen/asm/smp.h 2007-11-26 14:07:48.000000000 +0100
++++ 10.3-2007-11-26/include/asm-x86_64/mach-xen/asm/smp.h 2007-11-26 14:09:27.000000000 +0100
+@@ -8,6 +8,7 @@
+ #include <linux/cpumask.h>
+ #include <linux/bitops.h>
+ #include <linux/init.h>
++#include <linux/thread_info.h>
+ extern int disable_apic;
+
+ #ifdef CONFIG_X86_LOCAL_APIC
+@@ -16,7 +17,6 @@ extern int disable_apic;
+ #ifdef CONFIG_X86_IO_APIC
+ #include <asm/io_apic.h>
+ #endif
+-#include <asm/thread_info.h>
+ #endif
+
+ #ifdef CONFIG_SMP
+@@ -70,7 +70,7 @@ extern unsigned __cpuinitdata disabled_c
+
+ #endif /* CONFIG_SMP */
+
+-#ifdef CONFIG_X86_LOCAL_APIC
++#if defined(CONFIG_X86_LOCAL_APIC) && !defined(CONFIG_XEN)
+ static inline int hard_smp_processor_id(void)
+ {
+ /* we don't want to mark this access volatile - bad code generation */
+@@ -109,7 +109,7 @@ static inline int cpu_present_to_apicid(
+ })
+ #endif
+
+-#ifdef CONFIG_X86_LOCAL_APIC
++#if defined(CONFIG_X86_LOCAL_APIC) && !defined(CONFIG_XEN)
+ static __inline int logical_smp_processor_id(void)
+ {
+ /* we don't want to mark this access volatile - bad code generation */
+Index: 10.3-2007-11-26/include/asm-x86_64/mach-xen/irq_vectors.h
+===================================================================
+--- 10.3-2007-11-26.orig/include/asm-x86_64/mach-xen/irq_vectors.h 2007-11-26 14:07:20.000000000 +0100
++++ 10.3-2007-11-26/include/asm-x86_64/mach-xen/irq_vectors.h 2007-11-26 14:09:27.000000000 +0100
+@@ -4,16 +4,10 @@
+ *
+ * In addition, there are some standard defines:
+ *
+- * FIRST_EXTERNAL_VECTOR:
+- * The first free place for external interrupts
+- *
+ * SYSCALL_VECTOR:
+ * The IRQ vector a syscall makes the user to kernel transition
+ * under.
+ *
+- * TIMER_IRQ:
+- * The IRQ number the timer interrupt comes in at.
+- *
+ * NR_IRQS:
+ * The total number of interrupt vectors (including all the
+ * architecture specific interrupts) needed.
+@@ -22,62 +16,8 @@
+ #ifndef _ASM_IRQ_VECTORS_H
+ #define _ASM_IRQ_VECTORS_H
+
+-/*
+- * IDT vectors usable for external interrupt sources start
+- * at 0x20:
+- */
+-#define FIRST_EXTERNAL_VECTOR 0x20
+-
+ #define SYSCALL_VECTOR 0x80
+
+-/*
+- * Vectors 0x20-0x2f are used for ISA interrupts.
+- */
+-
+-#if 0
+-/*
+- * Special IRQ vectors used by the SMP architecture, 0xf0-0xff
+- *
+- * some of the following vectors are 'rare', they are merged
+- * into a single vector (CALL_FUNCTION_VECTOR) to save vector space.
+- * TLB, reschedule and local APIC vectors are performance-critical.
+- *
+- * Vectors 0xf0-0xfa are free (reserved for future Linux use).
+- */
+-#define INVALIDATE_TLB_VECTOR 0xfd
+-#define RESCHEDULE_VECTOR 0xfc
+-#define CALL_FUNCTION_VECTOR 0xfb
+-
+-#define THERMAL_APIC_VECTOR 0xf0
+-/*
+- * Local APIC timer IRQ vector is on a different priority level,
+- * to work around the 'lost local interrupt if more than 2 IRQ
+- * sources per level' errata.
+- */
+-#define LOCAL_TIMER_VECTOR 0xef
+-#endif
+-
+-#define SPURIOUS_APIC_VECTOR 0xff
+-#define ERROR_APIC_VECTOR 0xfe
+-
+-/*
+- * First APIC vector available to drivers: (vectors 0x30-0xee)
+- * we start at 0x31 to spread out vectors evenly between priority
+- * levels. (0x80 is the syscall vector)
+- */
+-#define FIRST_DEVICE_VECTOR 0x31
+-#define FIRST_SYSTEM_VECTOR 0xef
+-
+-/*
+- * 16 8259A IRQ's, 208 potential APIC interrupt sources.
+- * Right now the APIC is mostly only used for SMP.
+- * 256 vectors is an architectural limit. (we can have
+- * more than 256 devices theoretically, but they will
+- * have to use shared interrupts)
+- * Since vectors 0x00-0x1f are used/reserved for the CPU,
+- * the usable vector space is 0x20-0xff (224 vectors)
+- */
+-
+ #define RESCHEDULE_VECTOR 0
+ #define CALL_FUNCTION_VECTOR 1
+ #define NR_IPIS 2
diff --git a/trunk/2.6.22/20065_xen-no-video-select.patch1 b/trunk/2.6.22/20065_xen-no-video-select.patch1
new file mode 100644
index 0000000..a02f8af
--- /dev/null
+++ b/trunk/2.6.22/20065_xen-no-video-select.patch1
@@ -0,0 +1,21 @@
+From: jbeulich@novell.com
+Subject: Properly suppress VIDEO_SELECT config option
+Patch-mainline: obsolete
+
+... as it's meaningless for Xen kernels.
+
+---
+ drivers/video/Kconfig | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/video/Kconfig 2007-08-27 14:01:23.000000000 -0400
++++ b/drivers/video/Kconfig 2007-08-27 14:01:27.000000000 -0400
+@@ -1477,7 +1477,7 @@ config FB_CYBLA
+ tristate "Cyberblade/i1 support"
+ depends on FB && PCI && X86_32 && !64BIT
+ select FB_CFB_IMAGEBLIT
+- select VIDEO_SELECT
++ select VIDEO_SELECT if !XEN
+ ---help---
+ This driver is supposed to support the Trident Cyberblade/i1
+ graphics core integrated in the VIA VT8601A North Bridge,
diff --git a/trunk/2.6.22/20066_xen-blkback-bimodal-suse.patch1 b/trunk/2.6.22/20066_xen-blkback-bimodal-suse.patch1
new file mode 100644
index 0000000..026ea55
--- /dev/null
+++ b/trunk/2.6.22/20066_xen-blkback-bimodal-suse.patch1
@@ -0,0 +1,39 @@
+Subject: backward compatibility
+Signed-off-by: Gerd Hoffmann <kraxel@suse.de>
+Patch-mainline: obsolete
+
+---
+ drivers/xen/blkback/xenbus.c | 6 ++++++
+ drivers/xen/blktap/xenbus.c | 6 ++++++
+ 2 files changed, 12 insertions(+)
+
+--- a/drivers/xen/blkback/xenbus.c 2007-08-27 14:01:25.000000000 -0400
++++ b/drivers/xen/blkback/xenbus.c 2007-08-27 14:01:49.000000000 -0400
+@@ -488,6 +488,12 @@ static int connect_ring(struct backend_i
+ be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_32;
+ else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_64))
+ be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_64;
++#if 1 /* maintain compatibility with early sles10-sp1 and paravirt netware betas */
++ else if (0 == strcmp(protocol, "1"))
++ be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_32;
++ else if (0 == strcmp(protocol, "2"))
++ be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_64;
++#endif
+ else {
+ xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol);
+ return -1;
+--- a/drivers/xen/blktap/xenbus.c 2007-08-27 14:01:25.000000000 -0400
++++ b/drivers/xen/blktap/xenbus.c 2007-08-27 14:01:49.000000000 -0400
+@@ -428,6 +428,12 @@ static int connect_ring(struct backend_i
+ be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_32;
+ else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_64))
+ be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_64;
++#if 1 /* maintain compatibility with early sles10-sp1 and paravirt netware betas */
++ else if (0 == strcmp(protocol, "1"))
++ be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_32;
++ else if (0 == strcmp(protocol, "2"))
++ be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_64;
++#endif
+ else {
+ xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol);
+ return -1;
diff --git a/trunk/2.6.22/20067_xen-console-default.patch1 b/trunk/2.6.22/20067_xen-console-default.patch1
new file mode 100644
index 0000000..239f1af
--- /dev/null
+++ b/trunk/2.6.22/20067_xen-console-default.patch1
@@ -0,0 +1,41 @@
+From: jbeulich@novell.com
+Subject: Make Xen console default to vfb if that is built into the kernel
+Patch-mainline: obsolete
+
+---
+ drivers/xen/console/console.c | 12 ++++++++++--
+ 1 file changed, 10 insertions(+), 2 deletions(-)
+
+--- a/drivers/xen/console/console.c 2007-08-27 14:01:27.000000000 -0400
++++ b/drivers/xen/console/console.c 2007-08-27 14:01:27.000000000 -0400
+@@ -82,13 +82,19 @@ static int xc_num = -1;
+
+ void xencons_early_setup(void)
+ {
+- extern int console_use_vt;
+-
+ if (is_initial_xendomain()) {
++#ifdef CONFIG_XEN_DISABLE_SERIAL
+ xc_mode = XC_SERIAL;
++#endif
+ } else {
++#ifdef CONFIG_XEN_FRAMEBUFFER
++ xc_mode = XC_XVC;
++#else
++ extern int console_use_vt;
++
+ xc_mode = XC_TTY;
+ console_use_vt = 0;
++#endif
+ }
+ }
+
+@@ -630,6 +636,8 @@ static int __init xencons_init(void)
+
+ switch (xc_mode) {
+ case XC_XVC:
++ printk(KERN_INFO "xencons_init: Initializing xen vfb;"
++ " pass xencons=tty to prevent this\n");
+ DRV(xencons_driver)->name = "xvc";
+ DRV(xencons_driver)->major = XEN_XVC_MAJOR;
+ DRV(xencons_driver)->minor_start = XEN_XVC_MINOR;
diff --git a/trunk/2.6.22/20068_xen-x86-panic-smp.patch1 b/trunk/2.6.22/20068_xen-x86-panic-smp.patch1
new file mode 100644
index 0000000..c94f625
--- /dev/null
+++ b/trunk/2.6.22/20068_xen-x86-panic-smp.patch1
@@ -0,0 +1,96 @@
+From: jbeulich@novell.com
+Subject: panic/shutdown handling adjustments
+Patch-mainline: obsolete
+
+Prevent interrupts (and hence possibly scheduler operations) from
+occuring on (against) a CPU after removing it from cpu_online_map
+during panic/shutdown.
+(Background: I found it quite annoying to see scheduler related
+badness or BUG messages after a panic, eventually even leading to
+important information scrolling off the screen.)
+
+---
+ arch/i386/kernel/smp-xen.c | 8 ++------
+ arch/x86_64/kernel/smp-xen.c | 8 ++------
+ drivers/xen/core/evtchn.c | 10 ++++++++++
+ include/xen/evtchn.h | 2 ++
+ 4 files changed, 16 insertions(+), 12 deletions(-)
+
+--- a/arch/i386/kernel/smp-xen.c 2007-08-27 14:01:27.000000000 -0400
++++ b/arch/i386/kernel/smp-xen.c 2007-08-27 14:01:27.000000000 -0400
+@@ -594,9 +594,7 @@ static void stop_this_cpu (void * dummy)
+ * Remove this CPU:
+ */
+ cpu_clear(smp_processor_id(), cpu_online_map);
+-#if 0
+- disable_local_APIC();
+-#endif
++ mask_evtchn_local();
+ if (cpu_data[smp_processor_id()].hlt_works_ok)
+ for(;;) halt();
+ for (;;);
+@@ -616,9 +614,7 @@ void xen_smp_send_stop(void)
+ __smp_call_function(stop_this_cpu, NULL, 0, 0);
+ if (!nolock)
+ spin_unlock(&call_lock);
+-#if 0
+- disable_local_APIC();
+-#endif
++ mask_evtchn_local();
+ local_irq_restore(flags);
+ }
+
+--- a/arch/x86_64/kernel/smp-xen.c 2007-08-27 14:01:27.000000000 -0400
++++ b/arch/x86_64/kernel/smp-xen.c 2007-08-27 14:01:27.000000000 -0400
+@@ -484,9 +484,7 @@ static void stop_this_cpu(void *dummy)
+ * Remove this CPU:
+ */
+ cpu_clear(smp_processor_id(), cpu_online_map);
+-#ifndef CONFIG_XEN
+- disable_local_APIC();
+-#endif
++ mask_evtchn_local();
+ for (;;)
+ halt();
+ }
+@@ -507,9 +505,7 @@ void smp_send_stop(void)
+ __smp_call_function(stop_this_cpu, NULL, 0, 0);
+ if (!nolock)
+ spin_unlock(&call_lock);
+-#ifndef CONFIG_XEN
+- disable_local_APIC();
+-#endif
++ mask_evtchn_local();
+ local_irq_restore(flags);
+ }
+
+--- a/drivers/xen/core/evtchn.c 2007-08-27 14:01:27.000000000 -0400
++++ b/drivers/xen/core/evtchn.c 2007-08-27 14:01:27.000000000 -0400
+@@ -154,6 +154,16 @@ static inline unsigned int cpu_from_evtc
+ return cpu_evtchn[evtchn];
+ }
+
++void mask_evtchn_local(void)
++{
++ unsigned i, cpu = smp_processor_id();
++ shared_info_t *s = HYPERVISOR_shared_info;
++
++ for (i = 0; i < NR_EVENT_CHANNELS; ++i)
++ if (cpu_evtchn[i] == cpu)
++ synch_set_bit(i, &s->evtchn_mask[0]);
++}
++
+ #else
+
+ static inline unsigned long active_evtchns(unsigned int cpu, shared_info_t *sh,
+--- a/include/xen/evtchn.h 2007-08-27 14:01:25.000000000 -0400
++++ b/include/xen/evtchn.h 2007-08-27 14:01:27.000000000 -0400
+@@ -104,6 +104,8 @@ void evtchn_device_upcall(int port);
+ void mask_evtchn(int port);
+ void unmask_evtchn(int port);
+
++extern void mask_evtchn_local(void);
++
+ static inline void clear_evtchn(int port)
+ {
+ shared_info_t *s = HYPERVISOR_shared_info;
diff --git a/trunk/2.6.22/20069_xen-split-pt-lock.patch1 b/trunk/2.6.22/20069_xen-split-pt-lock.patch1
new file mode 100644
index 0000000..0b45a81
--- /dev/null
+++ b/trunk/2.6.22/20069_xen-split-pt-lock.patch1
@@ -0,0 +1,220 @@
+From: jbeulich@novell.com
+Subject: allow use of split page table locks
+Patch-mainline: obsolete
+
+---
+ arch/i386/mm/pgtable-xen.c | 66 +++++++++++++++++++++++++++++++++++++++---
+ arch/x86_64/mm/pageattr-xen.c | 66 +++++++++++++++++++++++++++++++++++++++---
+ mm/Kconfig | 3 -
+ 3 files changed, 124 insertions(+), 11 deletions(-)
+
+--- a/arch/i386/mm/pgtable-xen.c 2007-08-27 14:01:27.000000000 -0400
++++ b/arch/i386/mm/pgtable-xen.c 2007-08-27 14:01:27.000000000 -0400
+@@ -658,6 +658,64 @@ void make_pages_writable(void *va, unsig
+ }
+ }
+
++static void _pin_lock(struct mm_struct *mm, int lock) {
++ if (lock)
++ spin_lock(&mm->page_table_lock);
++#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
++ /* While mm->page_table_lock protects us against insertions and
++ * removals of higher level page table pages, it doesn't protect
++ * against updates of pte-s. Such updates, however, require the
++ * pte pages to be in consistent state (unpinned+writable or
++ * pinned+readonly). The pinning and attribute changes, however
++ * cannot be done atomically, which is why such updates must be
++ * prevented from happening concurrently.
++ * Note that no pte lock can ever elsewhere be acquired nesting
++ * with an already acquired one in the same mm, or with the mm's
++ * page_table_lock already acquired, as that would break in the
++ * non-split case (where all these are actually resolving to the
++ * one page_table_lock). Thus acquiring all of them here is not
++ * going to result in dead locks, and the order of acquires
++ * doesn't matter.
++ */
++ {
++ pgd_t *pgd = mm->pgd;
++ unsigned g;
++
++ for (g = 0; g < USER_PTRS_PER_PGD; g++, pgd++) {
++ pud_t *pud;
++ unsigned u;
++
++ if (pgd_none(*pgd))
++ continue;
++ pud = pud_offset(pgd, 0);
++ for (u = 0; u < PTRS_PER_PUD; u++, pud++) {
++ pmd_t *pmd;
++ unsigned m;
++
++ if (pud_none(*pud))
++ continue;
++ pmd = pmd_offset(pud, 0);
++ for (m = 0; m < PTRS_PER_PMD; m++, pmd++) {
++ spinlock_t *ptl;
++
++ if (pmd_none(*pmd))
++ continue;
++ ptl = pte_lockptr(0, pmd);
++ if (lock)
++ spin_lock(ptl);
++ else
++ spin_unlock(ptl);
++ }
++ }
++ }
++ }
++#endif
++ if (!lock)
++ spin_unlock(&mm->page_table_lock);
++}
++#define pin_lock(mm) _pin_lock(mm, 1)
++#define pin_unlock(mm) _pin_lock(mm, 0)
++
+ static inline void pgd_walk_set_prot(struct page *page, pgprot_t flags)
+ {
+ unsigned long pfn = page_to_pfn(page);
+@@ -740,18 +798,18 @@ void mm_pin(struct mm_struct *mm)
+ {
+ if (xen_feature(XENFEAT_writable_page_tables))
+ return;
+- spin_lock(&mm->page_table_lock);
++ pin_lock(mm);
+ __pgd_pin(mm->pgd);
+- spin_unlock(&mm->page_table_lock);
++ pin_unlock(mm);
+ }
+
+ void mm_unpin(struct mm_struct *mm)
+ {
+ if (xen_feature(XENFEAT_writable_page_tables))
+ return;
+- spin_lock(&mm->page_table_lock);
++ pin_lock(mm);
+ __pgd_unpin(mm->pgd);
+- spin_unlock(&mm->page_table_lock);
++ pin_unlock(mm);
+ }
+
+ void mm_pin_all(void)
+--- a/arch/x86_64/mm/pageattr-xen.c 2007-08-27 14:01:27.000000000 -0400
++++ b/arch/x86_64/mm/pageattr-xen.c 2007-08-27 14:01:27.000000000 -0400
+@@ -20,6 +20,64 @@
+ LIST_HEAD(mm_unpinned);
+ DEFINE_SPINLOCK(mm_unpinned_lock);
+
++static void _pin_lock(struct mm_struct *mm, int lock) {
++ if (lock)
++ spin_lock(&mm->page_table_lock);
++#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
++ /* While mm->page_table_lock protects us against insertions and
++ * removals of higher level page table pages, it doesn't protect
++ * against updates of pte-s. Such updates, however, require the
++ * pte pages to be in consistent state (unpinned+writable or
++ * pinned+readonly). The pinning and attribute changes, however
++ * cannot be done atomically, which is why such updates must be
++ * prevented from happening concurrently.
++ * Note that no pte lock can ever elsewhere be acquired nesting
++ * with an already acquired one in the same mm, or with the mm's
++ * page_table_lock already acquired, as that would break in the
++ * non-split case (where all these are actually resolving to the
++ * one page_table_lock). Thus acquiring all of them here is not
++ * going to result in dead locks, and the order of acquires
++ * doesn't matter.
++ */
++ {
++ pgd_t *pgd = mm->pgd;
++ unsigned g;
++
++ for (g = 0; g <= ((TASK_SIZE64-1) / PGDIR_SIZE); g++, pgd++) {
++ pud_t *pud;
++ unsigned u;
++
++ if (pgd_none(*pgd))
++ continue;
++ pud = pud_offset(pgd, 0);
++ for (u = 0; u < PTRS_PER_PUD; u++, pud++) {
++ pmd_t *pmd;
++ unsigned m;
++
++ if (pud_none(*pud))
++ continue;
++ pmd = pmd_offset(pud, 0);
++ for (m = 0; m < PTRS_PER_PMD; m++, pmd++) {
++ spinlock_t *ptl;
++
++ if (pmd_none(*pmd))
++ continue;
++ ptl = pte_lockptr(0, pmd);
++ if (lock)
++ spin_lock(ptl);
++ else
++ spin_unlock(ptl);
++ }
++ }
++ }
++ }
++#endif
++ if (!lock)
++ spin_unlock(&mm->page_table_lock);
++}
++#define pin_lock(mm) _pin_lock(mm, 1)
++#define pin_unlock(mm) _pin_lock(mm, 0)
++
+ static inline void mm_walk_set_prot(void *pt, pgprot_t flags)
+ {
+ struct page *page = virt_to_page(pt);
+@@ -76,7 +134,7 @@ void mm_pin(struct mm_struct *mm)
+ if (xen_feature(XENFEAT_writable_page_tables))
+ return;
+
+- spin_lock(&mm->page_table_lock);
++ pin_lock(mm);
+
+ mm_walk(mm, PAGE_KERNEL_RO);
+ if (HYPERVISOR_update_va_mapping(
+@@ -97,7 +155,7 @@ void mm_pin(struct mm_struct *mm)
+ list_del(&mm->context.unpinned);
+ spin_unlock(&mm_unpinned_lock);
+
+- spin_unlock(&mm->page_table_lock);
++ pin_unlock(mm);
+ }
+
+ void mm_unpin(struct mm_struct *mm)
+@@ -105,7 +163,7 @@ void mm_unpin(struct mm_struct *mm)
+ if (xen_feature(XENFEAT_writable_page_tables))
+ return;
+
+- spin_lock(&mm->page_table_lock);
++ pin_lock(mm);
+
+ xen_pgd_unpin(__pa(mm->pgd));
+ xen_pgd_unpin(__pa(__user_pgd(mm->pgd)));
+@@ -125,7 +183,7 @@ void mm_unpin(struct mm_struct *mm)
+ list_add(&mm->context.unpinned, &mm_unpinned);
+ spin_unlock(&mm_unpinned_lock);
+
+- spin_unlock(&mm->page_table_lock);
++ pin_unlock(mm);
+ }
+
+ void mm_pin_all(void)
+--- a/mm/Kconfig 2007-08-27 14:01:25.000000000 -0400
++++ b/mm/Kconfig 2007-08-27 14:01:27.000000000 -0400
+@@ -132,14 +132,11 @@ config MEMORY_HOTPLUG_SPARSE
+ # Default to 4 for wider testing, though 8 might be more appropriate.
+ # ARM's adjust_pte (unused if VIPT) depends on mm-wide page_table_lock.
+ # PA-RISC 7xxx's spinlock_t would enlarge struct page from 32 to 44 bytes.
+-# XEN on x86 architecture uses the mapping field on pagetable pages to store a
+-# pointer to the destructor. This conflicts with pte_lock_deinit().
+ #
+ config SPLIT_PTLOCK_CPUS
+ int
+ default "4096" if ARM && !CPU_CACHE_VIPT
+ default "4096" if PARISC && !PA20
+- default "4096" if X86_XEN || X86_64_XEN
+ default "4"
+
+ #
diff --git a/trunk/2.6.22/20070_xen-blkif-protocol-fallback-hack.patch1 b/trunk/2.6.22/20070_xen-blkif-protocol-fallback-hack.patch1
new file mode 100644
index 0000000..9b2b0b1
--- /dev/null
+++ b/trunk/2.6.22/20070_xen-blkif-protocol-fallback-hack.patch1
@@ -0,0 +1,229 @@
+Subject: 32-on-64 blkif protocol negotiation fallback for old guests.
+From: kraxel@suse.de
+References: 244055
+Patch-mainline: never.
+
+See the comment below. Oh well.
+
+---
+ drivers/xen/blkback/xenbus.c | 8 +-
+ drivers/xen/blktap/xenbus.c | 8 +-
+ drivers/xen/core/Makefile | 2
+ drivers/xen/core/domctl.c | 133 +++++++++++++++++++++++++++++++++++++++++++
+ drivers/xen/core/domctl.h | 2
+ 5 files changed, 146 insertions(+), 7 deletions(-)
+
+--- a/drivers/xen/blkback/xenbus.c 2007-08-27 14:01:27.000000000 -0400
++++ b/drivers/xen/blkback/xenbus.c 2007-08-27 14:01:47.000000000 -0400
+@@ -21,6 +21,7 @@
+ #include <linux/module.h>
+ #include <linux/kthread.h>
+ #include "common.h"
++#include "../core/domctl.h"
+
+ #undef DPRINTK
+ #define DPRINTK(fmt, args...) \
+@@ -457,7 +458,6 @@ again:
+ xenbus_transaction_end(xbt, 1);
+ }
+
+-
+ static int connect_ring(struct backend_info *be)
+ {
+ struct xenbus_device *dev = be->dev;
+@@ -480,8 +480,10 @@ static int connect_ring(struct backend_i
+ be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE;
+ err = xenbus_gather(XBT_NIL, dev->otherend, "protocol",
+ "%63s", protocol, NULL);
+- if (err)
+- strcpy(protocol, "unspecified, assuming native");
++ if (err) {
++ strcpy(protocol, "unspecified");
++ be->blkif->blk_protocol = xen_guest_blkif_protocol(be->blkif->domid);
++ }
+ else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_NATIVE))
+ be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE;
+ else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_32))
+--- a/drivers/xen/blktap/xenbus.c 2007-08-27 14:01:27.000000000 -0400
++++ b/drivers/xen/blktap/xenbus.c 2007-08-27 14:01:27.000000000 -0400
+@@ -39,6 +39,7 @@
+ #include <linux/kthread.h>
+ #include <xen/xenbus.h>
+ #include "common.h"
++#include "../core/domctl.h"
+
+
+ struct backend_info
+@@ -397,7 +398,6 @@ static void connect(struct backend_info
+ return;
+ }
+
+-
+ static int connect_ring(struct backend_info *be)
+ {
+ struct xenbus_device *dev = be->dev;
+@@ -420,8 +420,10 @@ static int connect_ring(struct backend_i
+ be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE;
+ err = xenbus_gather(XBT_NIL, dev->otherend, "protocol",
+ "%63s", protocol, NULL);
+- if (err)
+- strcpy(protocol, "unspecified, assuming native");
++ if (err) {
++ strcpy(protocol, "unspecified");
++ be->blkif->blk_protocol = xen_guest_blkif_protocol(be->blkif->domid);
++ }
+ else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_NATIVE))
+ be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE;
+ else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_32))
+--- a/drivers/xen/core/Makefile 2007-08-27 14:01:25.000000000 -0400
++++ b/drivers/xen/core/Makefile 2007-08-27 14:01:27.000000000 -0400
+@@ -2,7 +2,7 @@
+ # Makefile for the linux kernel.
+ #
+
+-obj-y := evtchn.o gnttab.o features.o reboot.o machine_reboot.o firmware.o
++obj-y := evtchn.o gnttab.o features.o reboot.o machine_reboot.o firmware.o domctl.o
+
+ obj-$(CONFIG_PROC_FS) += xen_proc.o
+ obj-$(CONFIG_SYS_HYPERVISOR) += hypervisor_sysfs.o
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/core/domctl.c 2007-08-27 14:01:27.000000000 -0400
+@@ -0,0 +1,133 @@
++/*
++ * !!! dirty hack alert !!!
++ *
++ * Problem: old guests kernels don't have a "protocol" node
++ * in the frontend xenstore directory, so mixing
++ * 32 and 64bit domains doesn't work.
++ *
++ * Upstream plans to solve this in the tools, by letting them
++ * create a protocol node. Which certainly makes sense.
++ * But it isn't trivial and isn't done yet. Too bad.
++ *
++ * So for the time being we use the get_address_size domctl
++ * hypercall for a pretty good guess. Not nice as the domctl
++ * hypercall isn't supposed to be used by the kernel. Because
++ * we don't want to have dependencies between dom0 kernel and
++ * xen kernel versions. Now we have one. Ouch.
++ */
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <asm/hypervisor.h>
++#include <xen/blkif.h>
++
++#include "domctl.h"
++
++/* stuff copied from xen/interface/domctl.h, which we can't
++ * include directly for the reasons outlined above .... */
++
++#define XEN_DOMCTL_set_address_size 35
++#define XEN_DOMCTL_get_address_size 36
++typedef struct xen_domctl_address_size {
++ uint32_t size;
++} xen_domctl_address_size_t;
++
++#define native_address_size (sizeof(unsigned long)*8)
++
++/* v4: sles10 sp1: xen 3.0.4 + 32-on-64 patches */
++struct xen_domctl_v4 {
++ uint32_t cmd;
++ uint32_t interface_version; /* XEN_DOMCTL_INTERFACE_VERSION */
++ domid_t domain;
++ union {
++ /* left out lots of other struct xen_domctl_foobar */
++ struct xen_domctl_address_size address_size;
++ uint64_t dummy_align;
++ uint8_t dummy_pad[128];
++ } u;
++};
++
++/* v5: upstream: xen 3.0.5 */
++typedef __attribute__((aligned(8))) uint64_t uint64_aligned_t;
++struct xen_domctl_v5 {
++ uint32_t cmd;
++ uint32_t interface_version;
++ domid_t domain;
++ union {
++ struct xen_domctl_address_size address_size;
++ uint64_aligned_t dummy_align;
++ uint8_t dummy_pad[128];
++ } u;
++};
++
++/* The actual code comes here */
++
++static int xen_guest_address_size_v4(int domid)
++{
++ struct xen_domctl_v4 domctl;
++ int rc;
++
++ memset(&domctl, 0, sizeof(domctl));
++ domctl.cmd = XEN_DOMCTL_get_address_size;
++ domctl.interface_version = 4;
++ domctl.domain = domid;
++ if (0 != (rc = _hypercall1(int, domctl, &domctl)))
++ return rc;
++ return domctl.u.address_size.size;
++}
++
++static int xen_guest_address_size_v5(int domid)
++{
++ struct xen_domctl_v5 domctl;
++ int rc;
++
++ memset(&domctl, 0, sizeof(domctl));
++ domctl.cmd = XEN_DOMCTL_get_address_size;
++ domctl.interface_version = 5;
++ domctl.domain = domid;
++ if (0 != (rc = _hypercall1(int, domctl, &domctl)))
++ return rc;
++ return domctl.u.address_size.size;
++}
++
++int xen_guest_address_size(int domid)
++{
++ int ret;
++
++ ret = xen_guest_address_size_v4(domid);
++ if (ret == 32 || ret == 64) {
++ printk("%s: v4 domctl worked ok: %d\n", __FUNCTION__, ret);
++ goto done;
++ }
++
++ ret = xen_guest_address_size_v5(domid);
++ if (ret == 32 || ret == 64) {
++ printk("%s: v5 domctl worked ok: %d\n", __FUNCTION__, ret);
++ goto done;
++ }
++
++ ret = native_address_size;
++ printk("%s: v4,v5 domctls failed, assuming native: %d\n",
++ __FUNCTION__, ret);
++
++ done:
++ return ret;
++}
++EXPORT_SYMBOL_GPL(xen_guest_address_size);
++
++int xen_guest_blkif_protocol(int domid)
++{
++ int address_size;
++
++ address_size = xen_guest_address_size(domid);
++ printk(KERN_DEBUG "%s: domain %d: got address size %d\n",
++ __FUNCTION__, domid, address_size);
++ if (address_size == native_address_size)
++ return BLKIF_PROTOCOL_NATIVE;
++ if (address_size == 32)
++ return BLKIF_PROTOCOL_X86_32;
++ if (address_size == 64)
++ return BLKIF_PROTOCOL_X86_64;
++ return BLKIF_PROTOCOL_NATIVE;
++}
++EXPORT_SYMBOL_GPL(xen_guest_blkif_protocol);
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/xen/core/domctl.h 2007-08-27 14:01:27.000000000 -0400
+@@ -0,0 +1,2 @@
++int xen_guest_address_size(int domid);
++int xen_guest_blkif_protocol(int domid);
diff --git a/trunk/2.6.22/20071_xen-x86-pXX_val.patch1 b/trunk/2.6.22/20071_xen-x86-pXX_val.patch1
new file mode 100644
index 0000000..f7b5849
--- /dev/null
+++ b/trunk/2.6.22/20071_xen-x86-pXX_val.patch1
@@ -0,0 +1,434 @@
+From: jbeulich@novell.com
+Subject: consolidate pte_val/p[mug]d_val replacements
+Patch-mainline: obsolete
+
+- replace incomplete pXX_val_ma() set with complete __pXX_val() set
+- use __pXX_val() instead of pXX_val() when only flags are accessed or
+ the frame number is only compared against zero
+
+Index: 10.3-2007-10-22/arch/i386/mm/hypervisor.c
+===================================================================
+--- 10.3-2007-10-22.orig/arch/i386/mm/hypervisor.c 2007-10-22 14:00:22.000000000 +0200
++++ 10.3-2007-10-22/arch/i386/mm/hypervisor.c 2007-10-22 14:02:29.000000000 +0200
+@@ -44,17 +44,6 @@
+ #include <linux/highmem.h>
+ #include <asm/tlbflush.h>
+
+-#ifdef CONFIG_X86_64
+-#define pmd_val_ma(v) (v).pmd
+-#else
+-#ifdef CONFIG_X86_PAE
+-# define pmd_val_ma(v) ((v).pmd)
+-# define pud_val_ma(v) ((v).pgd.pgd)
+-#else
+-# define pmd_val_ma(v) ((v).pud.pgd.pgd)
+-#endif
+-#endif
+-
+ void xen_l1_entry_update(pte_t *ptr, pte_t val)
+ {
+ mmu_update_t u;
+@@ -64,7 +53,7 @@ void xen_l1_entry_update(pte_t *ptr, pte
+ #else
+ u.ptr = virt_to_machine(ptr);
+ #endif
+- u.val = pte_val_ma(val);
++ u.val = __pte_val(val);
+ BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
+ }
+
+@@ -72,34 +61,26 @@ void xen_l2_entry_update(pmd_t *ptr, pmd
+ {
+ mmu_update_t u;
+ u.ptr = virt_to_machine(ptr);
+- u.val = pmd_val_ma(val);
++ u.val = __pmd_val(val);
+ BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
+ }
+
+-#ifdef CONFIG_X86_PAE
++#if defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
+ void xen_l3_entry_update(pud_t *ptr, pud_t val)
+ {
+ mmu_update_t u;
+ u.ptr = virt_to_machine(ptr);
+- u.val = pud_val_ma(val);
++ u.val = __pud_val(val);
+ BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
+ }
+ #endif
+
+ #ifdef CONFIG_X86_64
+-void xen_l3_entry_update(pud_t *ptr, pud_t val)
+-{
+- mmu_update_t u;
+- u.ptr = virt_to_machine(ptr);
+- u.val = val.pud;
+- BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
+-}
+-
+ void xen_l4_entry_update(pgd_t *ptr, pgd_t val)
+ {
+ mmu_update_t u;
+ u.ptr = virt_to_machine(ptr);
+- u.val = val.pgd;
++ u.val = __pgd_val(val);
+ BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
+ }
+ #endif /* CONFIG_X86_64 */
+Index: 10.3-2007-10-22/arch/i386/mm/ioremap-xen.c
+===================================================================
+--- 10.3-2007-10-22.orig/arch/i386/mm/ioremap-xen.c 2007-10-22 13:58:57.000000000 +0200
++++ 10.3-2007-10-22/arch/i386/mm/ioremap-xen.c 2007-10-22 14:01:59.000000000 +0200
+@@ -77,7 +77,7 @@ static int __direct_remap_pfn_range(stru
+ * Fill in the machine address: PTE ptr is done later by
+ * __direct_remap_area_pages().
+ */
+- v->val = pte_val_ma(pfn_pte_ma(mfn, prot));
++ v->val = __pte_val(pfn_pte_ma(mfn, prot));
+
+ mfn++;
+ address += PAGE_SIZE;
+Index: 10.3-2007-10-22/include/asm-i386/mach-xen/asm/page.h
+===================================================================
+--- 10.3-2007-10-22.orig/include/asm-i386/mach-xen/asm/page.h 2007-10-22 13:58:57.000000000 +0200
++++ 10.3-2007-10-22/include/asm-i386/mach-xen/asm/page.h 2007-10-22 14:01:59.000000000 +0200
+@@ -76,17 +76,21 @@ typedef struct { unsigned long long pgpr
+ #define pgprot_val(x) ((x).pgprot)
+ #include <asm/maddr.h>
+
++#define __pgd_val(x) ((x).pgd)
+ static inline unsigned long long xen_pgd_val(pgd_t pgd)
+ {
+- unsigned long long ret = pgd.pgd;
++ unsigned long long ret = __pgd_val(pgd);
+ if (ret & _PAGE_PRESENT)
+ ret = pte_machine_to_phys(ret);
+ return ret;
+ }
+
++#define __pud_val(x) __pgd_val((x).pgd)
++
++#define __pmd_val(x) ((x).pmd)
+ static inline unsigned long long xen_pmd_val(pmd_t pmd)
+ {
+- unsigned long long ret = pmd.pmd;
++ unsigned long long ret = __pmd_val(pmd);
+ #if CONFIG_XEN_COMPAT <= 0x030002
+ if (ret)
+ ret = pte_machine_to_phys(ret) | _PAGE_PRESENT;
+@@ -97,13 +101,13 @@ static inline unsigned long long xen_pmd
+ return ret;
+ }
+
+-static inline unsigned long long pte_val_ma(pte_t pte)
++static inline unsigned long long __pte_val(pte_t pte)
+ {
+ return ((unsigned long long)pte.pte_high << 32) | pte.pte_low;
+ }
+ static inline unsigned long long xen_pte_val(pte_t pte)
+ {
+- unsigned long long ret = pte_val_ma(pte);
++ unsigned long long ret = __pte_val(pte);
+ if (pte.pte_low & _PAGE_PRESENT)
+ ret = pte_machine_to_phys(ret);
+ return ret;
+@@ -143,9 +147,10 @@ typedef struct { unsigned long pgprot; }
+ #define boot_pte_t pte_t /* or would you rather have a typedef */
+ #include <asm/maddr.h>
+
++#define __pgd_val(x) ((x).pgd)
+ static inline unsigned long xen_pgd_val(pgd_t pgd)
+ {
+- unsigned long ret = pgd.pgd;
++ unsigned long ret = __pgd_val(pgd);
+ #if CONFIG_XEN_COMPAT <= 0x030002
+ if (ret)
+ ret = machine_to_phys(ret) | _PAGE_PRESENT;
+@@ -156,13 +161,16 @@ static inline unsigned long xen_pgd_val(
+ return ret;
+ }
+
+-static inline unsigned long pte_val_ma(pte_t pte)
++#define __pud_val(x) __pgd_val((x).pgd)
++#define __pmd_val(x) __pud_val((x).pud)
++
++static inline unsigned long __pte_val(pte_t pte)
+ {
+ return pte.pte_low;
+ }
+ static inline unsigned long xen_pte_val(pte_t pte)
+ {
+- unsigned long ret = pte_val_ma(pte);
++ unsigned long ret = __pte_val(pte);
+ if (ret & _PAGE_PRESENT)
+ ret = machine_to_phys(ret);
+ return ret;
+Index: 10.3-2007-10-22/include/asm-i386/mach-xen/asm/pgtable.h
+===================================================================
+--- 10.3-2007-10-22.orig/include/asm-i386/mach-xen/asm/pgtable.h 2007-10-22 14:09:14.000000000 +0200
++++ 10.3-2007-10-22/include/asm-i386/mach-xen/asm/pgtable.h 2007-10-22 14:09:48.000000000 +0200
+@@ -210,15 +210,16 @@ extern unsigned long pg0[];
+ #define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE))
+
+ /* To avoid harmful races, pmd_none(x) should check only the lower when PAE */
+-#define pmd_none(x) (!(unsigned long)pmd_val(x))
++#define pmd_none(x) (!(unsigned long)__pmd_val(x))
+ #if CONFIG_XEN_COMPAT <= 0x030002
+ /* pmd_present doesn't just test the _PAGE_PRESENT bit since wr.p.t.
+ can temporarily clear it. */
+-#define pmd_present(x) (pmd_val(x))
++#define pmd_present(x) (__pmd_val(x))
++#define pmd_bad(x) ((__pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER & ~_PAGE_PRESENT)) != (_KERNPG_TABLE & ~_PAGE_PRESENT))
+ #else
+-#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
++#define pmd_present(x) (__pmd_val(x) & _PAGE_PRESENT)
++#define pmd_bad(x) ((__pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
+ #endif
+-#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER & ~_PAGE_PRESENT)) != (_KERNPG_TABLE & ~_PAGE_PRESENT))
+
+
+ #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
+@@ -442,7 +443,7 @@ static inline pte_t pte_modify(pte_t pte
+ }
+
+ #define pmd_large(pmd) \
+-((pmd_val(pmd) & (_PAGE_PSE|_PAGE_PRESENT)) == (_PAGE_PSE|_PAGE_PRESENT))
++((__pmd_val(pmd) & (_PAGE_PSE|_PAGE_PRESENT)) == (_PAGE_PSE|_PAGE_PRESENT))
+
+ /*
+ * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
+Index: 10.3-2007-10-22/include/asm-i386/mach-xen/asm/pgtable-2level.h
+===================================================================
+--- 10.3-2007-10-22.orig/include/asm-i386/mach-xen/asm/pgtable-2level.h 2007-10-22 13:58:57.000000000 +0200
++++ 10.3-2007-10-22/include/asm-i386/mach-xen/asm/pgtable-2level.h 2007-10-22 14:01:59.000000000 +0200
+@@ -2,9 +2,11 @@
+ #define _I386_PGTABLE_2LEVEL_H
+
+ #define pte_ERROR(e) \
+- printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte_low)
++ printk("%s:%d: bad pte %08lx (pfn %05lx).\n", __FILE__, __LINE__, \
++ __pte_val(e), pte_pfn(e))
+ #define pgd_ERROR(e) \
+- printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
++ printk("%s:%d: bad pgd %08lx (pfn %05lx).\n", __FILE__, __LINE__, \
++ __pgd_val(e), pgd_val(e) >> PAGE_SHIFT)
+
+ /*
+ * Certain architectures need to do special things when PTEs
+Index: 10.3-2007-10-22/include/asm-i386/mach-xen/asm/pgtable-3level.h
+===================================================================
+--- 10.3-2007-10-22.orig/include/asm-i386/mach-xen/asm/pgtable-3level.h 2007-10-22 13:58:57.000000000 +0200
++++ 10.3-2007-10-22/include/asm-i386/mach-xen/asm/pgtable-3level.h 2007-10-22 14:01:59.000000000 +0200
+@@ -9,11 +9,14 @@
+ */
+
+ #define pte_ERROR(e) \
+- printk("%s:%d: bad pte %p(%08lx%08lx).\n", __FILE__, __LINE__, &(e), (e).pte_high, (e).pte_low)
++ printk("%s:%d: bad pte %p(%016Lx pfn %08lx).\n", __FILE__, __LINE__, \
++ &(e), __pte_val(e), pte_pfn(e))
+ #define pmd_ERROR(e) \
+- printk("%s:%d: bad pmd %p(%016Lx).\n", __FILE__, __LINE__, &(e), pmd_val(e))
++ printk("%s:%d: bad pmd %p(%016Lx pfn %08Lx).\n", __FILE__, __LINE__, \
++ &(e), __pmd_val(e), (pmd_val(e) & PTE_MASK) >> PAGE_SHIFT)
+ #define pgd_ERROR(e) \
+- printk("%s:%d: bad pgd %p(%016Lx).\n", __FILE__, __LINE__, &(e), pgd_val(e))
++ printk("%s:%d: bad pgd %p(%016Lx pfn %08Lx).\n", __FILE__, __LINE__, \
++ &(e), __pgd_val(e), (pgd_val(e) & PTE_MASK) >> PAGE_SHIFT)
+
+ #define pud_none(pud) 0
+ #define pud_bad(pud) 0
+@@ -24,7 +27,7 @@
+ */
+ static inline int pte_x(pte_t pte)
+ {
+- return !(pte_val(pte) & _PAGE_NX);
++ return !(__pte_val(pte) & _PAGE_NX);
+ }
+
+ /*
+@@ -66,7 +69,7 @@ static inline void xen_set_pte_at(struct
+
+ static inline void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
+ {
+- set_64bit((unsigned long long *)(ptep),pte_val_ma(pte));
++ set_64bit((unsigned long long *)(ptep),__pte_val(pte));
+ }
+ static inline void xen_set_pmd(pmd_t *pmdp, pmd_t pmd)
+ {
+@@ -127,7 +130,7 @@ static inline void pud_clear (pud_t * pu
+ #ifdef CONFIG_SMP
+ static inline pte_t xen_ptep_get_and_clear(pte_t *ptep, pte_t res)
+ {
+- uint64_t val = pte_val_ma(res);
++ uint64_t val = __pte_val(res);
+ if (__cmpxchg64(ptep, val, 0) != val) {
+ /* xchg acts as a barrier before the setting of the high bits */
+ res.pte_low = xchg(&ptep->pte_low, 0);
+Index: 10.3-2007-10-22/include/asm-x86_64/mach-xen/asm/page.h
+===================================================================
+--- 10.3-2007-10-22.orig/include/asm-x86_64/mach-xen/asm/page.h 2007-10-22 13:58:57.000000000 +0200
++++ 10.3-2007-10-22/include/asm-x86_64/mach-xen/asm/page.h 2007-10-22 14:01:59.000000000 +0200
+@@ -85,14 +85,15 @@ typedef struct { unsigned long pgd; } pg
+
+ typedef struct { unsigned long pgprot; } pgprot_t;
+
+-#define pte_val(x) (((x).pte & _PAGE_PRESENT) ? \
+- pte_machine_to_phys((x).pte) : \
+- (x).pte)
+-#define pte_val_ma(x) ((x).pte)
++#define __pte_val(x) ((x).pte)
++#define pte_val(x) ((__pte_val(x) & _PAGE_PRESENT) ? \
++ pte_machine_to_phys(__pte_val(x)) : \
++ __pte_val(x))
+
++#define __pmd_val(x) ((x).pmd)
+ static inline unsigned long pmd_val(pmd_t x)
+ {
+- unsigned long ret = x.pmd;
++ unsigned long ret = __pmd_val(x);
+ #if CONFIG_XEN_COMPAT <= 0x030002
+ if (ret) ret = pte_machine_to_phys(ret) | _PAGE_PRESENT;
+ #else
+@@ -101,16 +102,18 @@ static inline unsigned long pmd_val(pmd_
+ return ret;
+ }
+
++#define __pud_val(x) ((x).pud)
+ static inline unsigned long pud_val(pud_t x)
+ {
+- unsigned long ret = x.pud;
++ unsigned long ret = __pud_val(x);
+ if (ret & _PAGE_PRESENT) ret = pte_machine_to_phys(ret);
+ return ret;
+ }
+
++#define __pgd_val(x) ((x).pgd)
+ static inline unsigned long pgd_val(pgd_t x)
+ {
+- unsigned long ret = x.pgd;
++ unsigned long ret = __pgd_val(x);
+ if (ret & _PAGE_PRESENT) ret = pte_machine_to_phys(ret);
+ return ret;
+ }
+Index: 10.3-2007-10-22/include/asm-x86_64/mach-xen/asm/pgtable.h
+===================================================================
+--- 10.3-2007-10-22.orig/include/asm-x86_64/mach-xen/asm/pgtable.h 2007-10-22 14:00:10.000000000 +0200
++++ 10.3-2007-10-22/include/asm-x86_64/mach-xen/asm/pgtable.h 2007-10-22 14:01:59.000000000 +0200
+@@ -82,16 +82,20 @@ extern unsigned long empty_zero_page[PAG
+ #ifndef __ASSEMBLY__
+
+ #define pte_ERROR(e) \
+- printk("%s:%d: bad pte %p(%016lx).\n", __FILE__, __LINE__, &(e), pte_val(e))
++ printk("%s:%d: bad pte %p(%016lx pfn %010lx).\n", __FILE__, __LINE__, \
++ &(e), __pte_val(e), pte_pfn(e))
+ #define pmd_ERROR(e) \
+- printk("%s:%d: bad pmd %p(%016lx).\n", __FILE__, __LINE__, &(e), pmd_val(e))
++ printk("%s:%d: bad pmd %p(%016lx pfn %010lx).\n", __FILE__, __LINE__, \
++ &(e), __pmd_val(e), pmd_pfn(e))
+ #define pud_ERROR(e) \
+- printk("%s:%d: bad pud %p(%016lx).\n", __FILE__, __LINE__, &(e), pud_val(e))
++ printk("%s:%d: bad pud %p(%016lx pfn %010lx).\n", __FILE__, __LINE__, \
++ &(e), __pud_val(e), (pud_val(e) & __PHYSICAL_MASK) >> PAGE_SHIFT)
+ #define pgd_ERROR(e) \
+- printk("%s:%d: bad pgd %p(%016lx).\n", __FILE__, __LINE__, &(e), pgd_val(e))
++ printk("%s:%d: bad pgd %p(%016lx pfn %010lx).\n", __FILE__, __LINE__, \
++ &(e), __pgd_val(e), (pgd_val(e) & __PHYSICAL_MASK) >> PAGE_SHIFT)
+
+-#define pgd_none(x) (!pgd_val(x))
+-#define pud_none(x) (!pud_val(x))
++#define pgd_none(x) (!__pgd_val(x))
++#define pud_none(x) (!__pud_val(x))
+
+ static inline void set_pte(pte_t *dst, pte_t val)
+ {
+@@ -236,17 +240,17 @@ extern unsigned int __kernel_page_user;
+
+ static inline unsigned long pgd_bad(pgd_t pgd)
+ {
+- return pgd_val(pgd) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER);
++ return __pgd_val(pgd) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER);
+ }
+
+ static inline unsigned long pud_bad(pud_t pud)
+ {
+- return pud_val(pud) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER);
++ return __pud_val(pud) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER);
+ }
+
+ static inline unsigned long pmd_bad(pmd_t pmd)
+ {
+- return pmd_val(pmd) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER);
++ return __pmd_val(pmd) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER);
+ }
+
+ #define set_pte_at(_mm,addr,ptep,pteval) do { \
+@@ -320,8 +324,6 @@ static inline pte_t ptep_get_and_clear_f
+ * The following only work if pte_present() is true.
+ * Undefined behaviour if not..
+ */
+-#define __pte_val(x) ((x).pte)
+-
+ #define __LARGE_PTE (_PAGE_PSE|_PAGE_PRESENT)
+ static inline int pte_user(pte_t pte) { return __pte_val(pte) & _PAGE_USER; }
+ static inline int pte_read(pte_t pte) { return __pte_val(pte) & _PAGE_USER; }
+@@ -372,7 +374,7 @@ static inline void ptep_set_wrprotect(st
+ #define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT))
+
+ static inline int pmd_large(pmd_t pte) {
+- return (pmd_val(pte) & __LARGE_PTE) == __LARGE_PTE;
++ return (__pmd_val(pte) & __LARGE_PTE) == __LARGE_PTE;
+ }
+
+
+@@ -390,7 +392,7 @@ static inline int pmd_large(pmd_t pte) {
+ #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
+ #define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
+ #define pgd_offset_k(address) (init_level4_pgt + pgd_index(address))
+-#define pgd_present(pgd) (pgd_val(pgd) & _PAGE_PRESENT)
++#define pgd_present(pgd) (__pgd_val(pgd) & _PAGE_PRESENT)
+ #define mk_kernel_pgd(address) __pgd((address) | _KERNPG_TABLE)
+
+ /* PUD - Level3 access */
+@@ -399,7 +401,7 @@ static inline int pmd_large(pmd_t pte) {
+ #define pud_page(pud) (pfn_to_page(pud_val(pud) >> PAGE_SHIFT))
+ #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
+ #define pud_offset(pgd, address) ((pud_t *) pgd_page_vaddr(*(pgd)) + pud_index(address))
+-#define pud_present(pud) (pud_val(pud) & _PAGE_PRESENT)
++#define pud_present(pud) (__pud_val(pud) & _PAGE_PRESENT)
+
+ /* PMD - Level 2 access */
+ #define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PTE_MASK))
+@@ -408,19 +410,19 @@ static inline int pmd_large(pmd_t pte) {
+ #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
+ #define pmd_offset(dir, address) ((pmd_t *) pud_page_vaddr(*(dir)) + \
+ pmd_index(address))
+-#define pmd_none(x) (!pmd_val(x))
++#define pmd_none(x) (!__pmd_val(x))
+ #if CONFIG_XEN_COMPAT <= 0x030002
+ /* pmd_present doesn't just test the _PAGE_PRESENT bit since wr.p.t.
+ can temporarily clear it. */
+-#define pmd_present(x) (pmd_val(x))
++#define pmd_present(x) (__pmd_val(x))
+ #else
+-#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
++#define pmd_present(x) (__pmd_val(x) & _PAGE_PRESENT)
+ #endif
+ #define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
+ #define pfn_pmd(nr,prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val(prot)))
+ #define pmd_pfn(x) ((pmd_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT)
+
+-#define pte_to_pgoff(pte) ((pte_val(pte) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT)
++#define pte_to_pgoff(pte) ((__pte_val(pte) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT)
+ #define pgoff_to_pte(off) ((pte_t) { ((off) << PAGE_SHIFT) | _PAGE_FILE })
+ #define PTE_FILE_MAX_BITS __PHYSICAL_MASK_SHIFT
+
+@@ -428,7 +430,7 @@ static inline int pmd_large(pmd_t pte) {
+
+ /* page, protection -> pte */
+ #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
+-#define mk_pte_huge(entry) (pte_val(entry) |= _PAGE_PRESENT | _PAGE_PSE)
++#define mk_pte_huge(entry) (__pte_val(entry) |= _PAGE_PRESENT | _PAGE_PSE)
+
+ /* Change flags of a PTE */
+ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
diff --git a/trunk/2.6.22/20072_xen-x86_64-physmap-nx.patch1 b/trunk/2.6.22/20072_xen-x86_64-physmap-nx.patch1
new file mode 100644
index 0000000..0da1777
--- /dev/null
+++ b/trunk/2.6.22/20072_xen-x86_64-physmap-nx.patch1
@@ -0,0 +1,36 @@
+From: jbeulich@novell.com
+Subject: force _PAGE_NX on the 1:1 mapping (matching native)
+Patch-mainline: obsolete
+
+---
+ arch/x86_64/mm/init-xen.c | 17 +++++++----------
+ 1 file changed, 7 insertions(+), 10 deletions(-)
+
+--- a/arch/x86_64/mm/init-xen.c 2007-08-27 14:01:27.000000000 -0400
++++ b/arch/x86_64/mm/init-xen.c 2007-08-27 14:01:27.000000000 -0400
+@@ -490,18 +490,15 @@ phys_pmd_init(pmd_t *pmd_page, unsigned
+ pte = alloc_static_page(&pte_phys);
+ pte_save = pte;
+ for (k = 0; k < PTRS_PER_PTE; pte++, k++, address += PTE_SIZE) {
++ unsigned long pteval = address | _PAGE_NX | _KERNPG_TABLE;
++
+ if (address >= (after_bootmem
+ ? end
+- : xen_start_info->nr_pages << PAGE_SHIFT)) {
+- __set_pte(pte, __pte(0));
+- continue;
+- }
+- if (make_readonly(address)) {
+- __set_pte(pte,
+- __pte(address | (_KERNPG_TABLE & ~_PAGE_RW)));
+- continue;
+- }
+- __set_pte(pte, __pte(address | _KERNPG_TABLE));
++ : xen_start_info->nr_pages << PAGE_SHIFT))
++ pteval = 0;
++ else if (make_readonly(address))
++ pteval &= ~_PAGE_RW;
++ __set_pte(pte, __pte(pteval & __supported_pte_mask));
+ }
+ pte = pte_save;
+ early_make_page_readonly(pte, XENFEAT_writable_page_tables);
diff --git a/trunk/2.6.22/20073_xen-i386-kconfig-msr.patch1 b/trunk/2.6.22/20073_xen-i386-kconfig-msr.patch1
new file mode 100644
index 0000000..a5a862d
--- /dev/null
+++ b/trunk/2.6.22/20073_xen-i386-kconfig-msr.patch1
@@ -0,0 +1,18 @@
+From: jbeulich@novell.com
+Subject: allow the /dev/cpu/*/msr devices
+Patch-mainline: obsolete
+
+---
+ arch/i386/Kconfig | 1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/arch/i386/Kconfig 2007-08-27 14:01:27.000000000 -0400
++++ b/arch/i386/Kconfig 2007-08-27 14:01:27.000000000 -0400
+@@ -486,7 +486,6 @@ config MICROCODE_OLD_INTERFACE
+
+ config X86_MSR
+ tristate "/dev/cpu/*/msr - Model-specific register support"
+- depends on !X86_XEN
+ help
+ This device gives privileged processes access to the x86
+ Model-Specific Registers (MSRs). It is a character device with
diff --git a/trunk/2.6.22/20074_xen-x86_64-entry.patch1 b/trunk/2.6.22/20074_xen-x86_64-entry.patch1
new file mode 100644
index 0000000..914050d
--- /dev/null
+++ b/trunk/2.6.22/20074_xen-x86_64-entry.patch1
@@ -0,0 +1,42 @@
+From: jbeulich@novell.com
+Subject: remove non-native entry point name
+Patch-mainline: obsolete
+
+Index: head-2007-09-03/arch/x86_64/Makefile
+===================================================================
+--- head-2007-09-03.orig/arch/x86_64/Makefile 2007-09-03 09:44:29.000000000 +0200
++++ head-2007-09-03/arch/x86_64/Makefile 2007-09-03 10:02:28.000000000 +0200
+@@ -94,7 +94,7 @@ PHONY += bzImage bzlilo install archmrpr
+ ifdef CONFIG_XEN
+ CPPFLAGS := -Iinclude$(if $(KBUILD_SRC),2)/asm/mach-xen $(CPPFLAGS)
+ head-y := arch/x86_64/kernel/head-xen.o arch/x86_64/kernel/head64-xen.o arch/x86_64/kernel/init_task.o
+-LDFLAGS_vmlinux := -e _start
++LDFLAGS_vmlinux := -e startup_64
+ boot := arch/i386/boot-xen
+ .PHONY: vmlinuz
+ #Default target when executing "make"
+Index: head-2007-09-03/arch/x86_64/kernel/head-xen.S
+===================================================================
+--- head-2007-09-03.orig/arch/x86_64/kernel/head-xen.S 2007-09-03 09:53:50.000000000 +0200
++++ head-2007-09-03/arch/x86_64/kernel/head-xen.S 2007-09-03 10:02:28.000000000 +0200
+@@ -25,11 +25,8 @@
+
+ .section .bootstrap.text, "ax", @progbits
+ .code64
+-#define VIRT_ENTRY_OFFSET 0x0
+-.org VIRT_ENTRY_OFFSET
+ .globl startup_64
+ startup_64:
+-ENTRY(_start)
+ movq $(init_thread_union+THREAD_SIZE-8),%rsp
+
+ /* rsi is pointer to startup info structure.
+@@ -164,7 +161,7 @@ ENTRY(empty_zero_page)
+ .ascii ",ELF_PADDR_OFFSET=0x"
+ utoh __START_KERNEL_map
+ .ascii ",VIRT_ENTRY=0x"
+- utoh (__START_KERNEL_map + __PHYSICAL_START + VIRT_ENTRY_OFFSET)
++ utoh (__START_KERNEL_map + __PHYSICAL_START + startup_64 - .bootstrap.text)
+ .ascii ",HYPERCALL_PAGE=0x"
+ utoh (phys_hypercall_page >> PAGE_SHIFT)
+ .ascii ",FEATURES=writable_page_tables"
diff --git a/trunk/2.6.22/20075_xen-intel-agp.patch1 b/trunk/2.6.22/20075_xen-intel-agp.patch1
new file mode 100644
index 0000000..4009ce6
--- /dev/null
+++ b/trunk/2.6.22/20075_xen-intel-agp.patch1
@@ -0,0 +1,33 @@
+From: jbeulich@novell.com
+Subject: fix intel-agp address handling
+Patch-mainline: obsolete
+References: 254208
+
+Index: head-2007-08-22/drivers/char/agp/intel-agp.c
+===================================================================
+--- head-2007-08-22.orig/drivers/char/agp/intel-agp.c 2007-08-22 09:32:13.000000000 +0200
++++ head-2007-08-22/drivers/char/agp/intel-agp.c 2007-08-22 10:10:29.000000000 +0200
+@@ -208,6 +208,13 @@ static void *i8xx_alloc_pages(void)
+ if (page == NULL)
+ return NULL;
+
++#ifdef CONFIG_XEN
++ if (xen_create_contiguous_region((unsigned long)page_address(page), 2, 32)) {
++ __free_pages(page, 2);
++ return NULL;
++ }
++#endif
++
+ if (change_page_attr(page, 4, PAGE_KERNEL_NOCACHE) < 0) {
+ change_page_attr(page, 4, PAGE_KERNEL);
+ global_flush_tlb();
+@@ -231,6 +238,9 @@ static void i8xx_destroy_pages(void *add
+ page = virt_to_page(addr);
+ change_page_attr(page, 4, PAGE_KERNEL);
+ global_flush_tlb();
++#ifdef CONFIG_XEN
++ xen_destroy_contiguous_region((unsigned long)page_address(page), 2);
++#endif
+ put_page(page);
+ unlock_page(page);
+ __free_pages(page, 2);
diff --git a/trunk/2.6.22/20076_xen-blkback-cdrom.patch1 b/trunk/2.6.22/20076_xen-blkback-cdrom.patch1
new file mode 100644
index 0000000..1c69142
--- /dev/null
+++ b/trunk/2.6.22/20076_xen-blkback-cdrom.patch1
@@ -0,0 +1,277 @@
+Subject: CDROM removable media-present attribute plus handling code
+From: plc@novell.com
+Patch-mainline: obsolete
+References: 159907
+
+Index: head-2007-08-07/drivers/xen/blkback/Makefile
+===================================================================
+--- head-2007-08-07.orig/drivers/xen/blkback/Makefile 2007-08-07 09:47:07.000000000 +0200
++++ head-2007-08-07/drivers/xen/blkback/Makefile 2007-08-07 10:14:50.000000000 +0200
+@@ -1,3 +1,3 @@
+ obj-$(CONFIG_XEN_BLKDEV_BACKEND) := blkbk.o
+
+-blkbk-y := blkback.o xenbus.o interface.o vbd.o
++blkbk-y := blkback.o xenbus.o interface.o vbd.o cdrom.o
+Index: head-2007-08-07/drivers/xen/blkback/cdrom.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2007-08-07/drivers/xen/blkback/cdrom.c 2007-08-07 10:14:50.000000000 +0200
+@@ -0,0 +1,169 @@
++/******************************************************************************
++ * blkback/cdrom.c
++ *
++ * Routines for managing cdrom watch and media-present attribute of a
++ * cdrom type virtual block device (VBD).
++ *
++ * Copyright (c) 2003-2005, Keir Fraser & Steve Hand
++ * Copyright (c) 2007 Pat Campbell
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include "common.h"
++
++#undef DPRINTK
++#define DPRINTK(_f, _a...) \
++ printk("(%s() file=%s, line=%d) " _f "\n", \
++ __PRETTY_FUNCTION__, __FILE__ , __LINE__ , ##_a )
++
++
++#define MEDIA_PRESENT "media-present"
++
++static void cdrom_media_changed(struct xenbus_watch *, const char **, unsigned int);
++
++/**
++ * Writes media-present=1 attribute for the given vbd device if not
++ * already there
++ */
++static int cdrom_xenstore_write_media_present(struct backend_info *be)
++{
++ struct xenbus_device *dev = be->dev;
++ struct xenbus_transaction xbt;
++ int err;
++ int media_present;
++
++ DPRINTK(" ");
++
++ err = xenbus_scanf(XBT_NIL, dev->nodename, MEDIA_PRESENT, "%d",
++ &media_present);
++ if ( 0 < err) {
++ DPRINTK("already written err%d", err);
++ return(0);
++ }
++ media_present = 1;
++
++again:
++ err = xenbus_transaction_start(&xbt);
++ if (err) {
++ xenbus_dev_fatal(dev, err, "starting transaction");
++ return(-1);
++ }
++
++ err = xenbus_printf(xbt, dev->nodename, MEDIA_PRESENT, "%d", media_present );
++ if (err) {
++ xenbus_dev_fatal(dev, err, "writing %s/%s",
++ dev->nodename, MEDIA_PRESENT);
++ goto abort;
++ }
++ err = xenbus_transaction_end(xbt, 0);
++ if (err == -EAGAIN)
++ goto again;
++ if (err)
++ xenbus_dev_fatal(dev, err, "ending transaction");
++ return(0);
++ abort:
++ xenbus_transaction_end(xbt, 1);
++ return(-1);
++}
++
++/**
++ *
++ */
++int cdrom_is_type(struct backend_info *be)
++{
++ DPRINTK( "type:%x", be->blkif->vbd.type );
++ if ( be->blkif->vbd.type & VDISK_CDROM && be->blkif->vbd.type & GENHD_FL_REMOVABLE){
++ return(1);
++ }
++ return(0);
++}
++
++/**
++ *
++ */
++void cdrom_add_media_watch(struct backend_info *be)
++{
++ struct xenbus_device *dev = be->dev;
++ int err;
++
++ DPRINTK( "nodename:%s", dev->nodename);
++ if (cdrom_is_type(be)) {
++ DPRINTK("is a cdrom");
++ if ( cdrom_xenstore_write_media_present(be) == 0 ) {
++ DPRINTK( "xenstore wrote OK");
++ err = xenbus_watch_path2(dev, dev->nodename, MEDIA_PRESENT,
++ &be->backend_cdrom_watch, cdrom_media_changed);
++ if (err) {
++ DPRINTK( "media_present watch add failed" );
++ }
++ }
++ }
++}
++
++/**
++ * Callback received when the "media_present" xenstore node is changed
++ */
++static void cdrom_media_changed(struct xenbus_watch *watch,
++ const char **vec, unsigned int len)
++{
++ int err;
++ unsigned media_present;
++ struct backend_info *be
++ = container_of(watch, struct backend_info, backend_cdrom_watch);
++ struct xenbus_device *dev = be->dev;
++
++ DPRINTK(" ");
++
++ if ( !(cdrom_is_type(be))) {
++ DPRINTK("callback not for a cdrom" );
++ return;
++ }
++
++ err = xenbus_scanf(XBT_NIL, dev->nodename, MEDIA_PRESENT, "%d",
++ &media_present);
++ if (err == 0 || err == -ENOENT) {
++ DPRINTK("xenbus_read of cdrom media_present node error:%d",err);
++ return;
++ }
++
++ if (media_present == 0) {
++ vbd_free(&be->blkif->vbd);
++ }
++ else {
++ char *p = strrchr(dev->otherend, '/') + 1;
++ long handle = simple_strtoul(p, NULL, 0);
++
++ if (be->blkif->vbd.bdev == NULL) {
++ err = vbd_create(be->blkif, handle, be->major, be->minor,
++ (NULL == strchr(be->mode, 'w')));
++ if (err) {
++ be->major = be->minor = 0;
++ xenbus_dev_fatal(dev, err, "creating vbd structure");
++ return;
++ }
++ }
++ }
++}
+Index: head-2007-08-07/drivers/xen/blkback/common.h
+===================================================================
+--- head-2007-08-07.orig/drivers/xen/blkback/common.h 2007-08-07 10:02:52.000000000 +0200
++++ head-2007-08-07/drivers/xen/blkback/common.h 2007-08-07 10:14:50.000000000 +0200
+@@ -96,6 +96,17 @@ typedef struct blkif_st {
+ grant_ref_t shmem_ref;
+ } blkif_t;
+
++struct backend_info
++{
++ struct xenbus_device *dev;
++ blkif_t *blkif;
++ struct xenbus_watch backend_watch;
++ struct xenbus_watch backend_cdrom_watch;
++ unsigned major;
++ unsigned minor;
++ char *mode;
++};
++
+ blkif_t *blkif_alloc(domid_t domid);
+ void blkif_disconnect(blkif_t *blkif);
+ void blkif_free(blkif_t *blkif);
+@@ -136,4 +147,8 @@ int blkif_schedule(void *arg);
+ int blkback_barrier(struct xenbus_transaction xbt,
+ struct backend_info *be, int state);
+
++/* cdrom media change */
++int cdrom_is_type(struct backend_info *be);
++void cdrom_add_media_watch(struct backend_info *be);
++
+ #endif /* __BLKIF__BACKEND__COMMON_H__ */
+Index: head-2007-08-07/drivers/xen/blkback/vbd.c
+===================================================================
+--- head-2007-08-07.orig/drivers/xen/blkback/vbd.c 2007-08-07 09:47:07.000000000 +0200
++++ head-2007-08-07/drivers/xen/blkback/vbd.c 2007-08-07 10:14:50.000000000 +0200
+@@ -106,6 +106,9 @@ int vbd_translate(struct phys_req *req,
+ if ((operation != READ) && vbd->readonly)
+ goto out;
+
++ if (vbd->bdev == NULL)
++ goto out;
++
+ if (unlikely((req->sector_number + req->nr_sects) > vbd_sz(vbd)))
+ goto out;
+
+Index: head-2007-08-07/drivers/xen/blkback/xenbus.c
+===================================================================
+--- head-2007-08-07.orig/drivers/xen/blkback/xenbus.c 2007-08-07 10:14:45.000000000 +0200
++++ head-2007-08-07/drivers/xen/blkback/xenbus.c 2007-08-07 10:14:50.000000000 +0200
+@@ -28,16 +28,6 @@
+ pr_debug("blkback/xenbus (%s:%d) " fmt ".\n", \
+ __FUNCTION__, __LINE__, ##args)
+
+-struct backend_info
+-{
+- struct xenbus_device *dev;
+- blkif_t *blkif;
+- struct xenbus_watch backend_watch;
+- unsigned major;
+- unsigned minor;
+- char *mode;
+-};
+-
+ static void connect(struct backend_info *);
+ static int connect_ring(struct backend_info *);
+ static void backend_changed(struct xenbus_watch *, const char **,
+@@ -183,6 +173,12 @@ static int blkback_remove(struct xenbus_
+ be->backend_watch.node = NULL;
+ }
+
++ if (be->backend_cdrom_watch.node) {
++ unregister_xenbus_watch(&be->backend_cdrom_watch);
++ kfree(be->backend_cdrom_watch.node);
++ be->backend_cdrom_watch.node = NULL;
++ }
++
+ if (be->blkif) {
+ blkif_disconnect(be->blkif);
+ vbd_free(&be->blkif->vbd);
+@@ -331,6 +327,9 @@ static void backend_changed(struct xenbu
+
+ /* We're potentially connected now */
+ update_blkif_status(be->blkif);
++
++ /* Add watch for cdrom media status if necessay */
++ cdrom_add_media_watch(be);
+ }
+ }
+
diff --git a/trunk/2.6.22/20077_xen-isa-dma.patch1 b/trunk/2.6.22/20077_xen-isa-dma.patch1
new file mode 100644
index 0000000..3e80ec7
--- /dev/null
+++ b/trunk/2.6.22/20077_xen-isa-dma.patch1
@@ -0,0 +1,543 @@
+From: jbeulich@novell.com
+Subject: Suppress all use of ISA DMA on Xen.
+Patch-mainline: obsolete (non-Xen parts 2.6.23-rc4-mm1)
+
+The kernel's ISA DMA API is just not fitting virtualization requirements.
+
+Index: head-2007-09-03/arch/i386/kernel/setup-xen.c
+===================================================================
+--- head-2007-09-03.orig/arch/i386/kernel/setup-xen.c 2007-09-03 09:53:47.000000000 +0200
++++ head-2007-09-03/arch/i386/kernel/setup-xen.c 2007-09-03 10:02:35.000000000 +0200
+@@ -758,6 +758,11 @@ void __init setup_arch(char **cmdline_p)
+ virt_to_mfn(pfn_to_mfn_frame_list_list);
+ }
+
++ /* Mark all ISA DMA channels in-use - using them wouldn't work. */
++ for (i = 0; i < MAX_DMA_CHANNELS; ++i)
++ if (i != 4 && request_dma(i, "xen") != 0)
++ BUG();
++
+ /*
+ * NOTE: at this point the bootmem allocator is fully available.
+ */
+Index: head-2007-09-03/arch/x86_64/kernel/Makefile
+===================================================================
+--- head-2007-09-03.orig/arch/x86_64/kernel/Makefile 2007-09-03 10:01:35.000000000 +0200
++++ head-2007-09-03/arch/x86_64/kernel/Makefile 2007-09-03 10:03:00.000000000 +0200
+@@ -69,7 +69,7 @@ pci-dma-y += ../../i386/kernel/pci-dma
+ microcode-$(subst m,y,$(CONFIG_MICROCODE)) := ../../i386/kernel/microcode-xen.o
+ quirks-y := ../../i386/kernel/quirks-xen.o
+
+-n-obj-xen := early-quirks.o genapic_flat.o i8237.o i8259.o perfctr-watchdog.o \
++n-obj-xen := early-quirks.o genapic_flat.o i8259.o perfctr-watchdog.o \
+ reboot.o smpboot.o trampoline.o tsc.o tsc_sync.o
+
+ include $(srctree)/scripts/Makefile.xen
+Index: head-2007-09-03/arch/x86_64/kernel/setup-xen.c
+===================================================================
+--- head-2007-09-03.orig/arch/x86_64/kernel/setup-xen.c 2007-09-03 09:53:50.000000000 +0200
++++ head-2007-09-03/arch/x86_64/kernel/setup-xen.c 2007-09-03 10:02:35.000000000 +0200
+@@ -534,6 +534,10 @@ void __init setup_arch(char **cmdline_p)
+ virt_to_mfn(pfn_to_mfn_frame_list_list);
+ }
+
++ /* Mark all ISA DMA channels in-use - using them wouldn't work. */
++ for (i = 0; i < MAX_DMA_CHANNELS; ++i)
++ if (i != 4 && request_dma(i, "xen") != 0)
++ BUG();
+ }
+
+ #ifdef CONFIG_ACPI
+Index: head-2007-09-03/drivers/block/floppy.c
+===================================================================
+--- head-2007-09-03.orig/drivers/block/floppy.c 2007-07-09 01:32:17.000000000 +0200
++++ head-2007-09-03/drivers/block/floppy.c 2007-09-03 10:02:35.000000000 +0200
+@@ -4397,11 +4397,15 @@ static int floppy_grab_irq_and_dma(void)
+ if (fd_request_dma()) {
+ DPRINT("Unable to grab DMA%d for the floppy driver\n",
+ FLOPPY_DMA);
+- fd_free_irq();
+- spin_lock_irqsave(&floppy_usage_lock, flags);
+- usage_count--;
+- spin_unlock_irqrestore(&floppy_usage_lock, flags);
+- return -1;
++ if (can_use_virtual_dma & 2)
++ use_virtual_dma = can_use_virtual_dma = 1;
++ if (!(can_use_virtual_dma & 1)) {
++ fd_free_irq();
++ spin_lock_irqsave(&floppy_usage_lock, flags);
++ usage_count--;
++ spin_unlock_irqrestore(&floppy_usage_lock, flags);
++ return -1;
++ }
+ }
+
+ for (fdc = 0; fdc < N_FDC; fdc++) {
+Index: head-2007-09-03/drivers/pnp/manager.c
+===================================================================
+--- head-2007-09-03.orig/drivers/pnp/manager.c 2007-04-26 05:08:32.000000000 +0200
++++ head-2007-09-03/drivers/pnp/manager.c 2007-09-03 10:02:35.000000000 +0200
+@@ -168,7 +168,7 @@ static int pnp_assign_irq(struct pnp_dev
+ return 0;
+ }
+
+-static int pnp_assign_dma(struct pnp_dev *dev, struct pnp_dma *rule, int idx)
++static void pnp_assign_dma(struct pnp_dev *dev, struct pnp_dma *rule, int idx)
+ {
+ resource_size_t *start, *end;
+ unsigned long *flags;
+@@ -179,18 +179,14 @@ static int pnp_assign_dma(struct pnp_dev
+ 1, 3, 5, 6, 7, 0, 2, 4
+ };
+
+- if (!dev || !rule)
+- return -EINVAL;
+-
+ if (idx >= PNP_MAX_DMA) {
+ pnp_err("More than 2 dmas is incompatible with pnp specifications.");
+- /* pretend we were successful so at least the manager won't try again */
+- return 1;
++ return;
+ }
+
+ /* check if this resource has been manually set, if so skip */
+ if (!(dev->res.dma_resource[idx].flags & IORESOURCE_AUTO))
+- return 1;
++ return;
+
+ start = &dev->res.dma_resource[idx].start;
+ end = &dev->res.dma_resource[idx].end;
+@@ -200,19 +196,17 @@ static int pnp_assign_dma(struct pnp_dev
+ *flags |= rule->flags | IORESOURCE_DMA;
+ *flags &= ~IORESOURCE_UNSET;
+
+- if (!rule->map) {
+- *flags |= IORESOURCE_DISABLED;
+- return 1; /* skip disabled resource requests */
+- }
+-
+ for (i = 0; i < 8; i++) {
+ if(rule->map & (1<<xtab[i])) {
+ *start = *end = xtab[i];
+ if(pnp_check_dma(dev, idx))
+- return 1;
++ return;
+ }
+ }
+- return 0;
++#ifdef MAX_DMA_CHANNELS
++ *start = *end = MAX_DMA_CHANNELS;
++#endif
++ *flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED;
+ }
+
+ /**
+@@ -331,8 +325,7 @@ static int pnp_assign_resources(struct p
+ irq = irq->next;
+ }
+ while (dma) {
+- if (!pnp_assign_dma(dev, dma, ndma))
+- goto fail;
++ pnp_assign_dma(dev, dma, ndma);
+ ndma++;
+ dma = dma->next;
+ }
+@@ -367,8 +360,7 @@ static int pnp_assign_resources(struct p
+ irq = irq->next;
+ }
+ while (dma) {
+- if (!pnp_assign_dma(dev, dma, ndma))
+- goto fail;
++ pnp_assign_dma(dev, dma, ndma);
+ ndma++;
+ dma = dma->next;
+ }
+Index: head-2007-09-03/include/asm-i386/mach-xen/asm/floppy.h
+===================================================================
+--- head-2007-09-03.orig/include/asm-i386/mach-xen/asm/floppy.h 2007-09-03 09:53:00.000000000 +0200
++++ /dev/null 1970-01-01 00:00:00.000000000 +0000
+@@ -1,147 +0,0 @@
+-/*
+- * Architecture specific parts of the Floppy driver
+- *
+- * This file is subject to the terms and conditions of the GNU General Public
+- * License. See the file "COPYING" in the main directory of this archive
+- * for more details.
+- *
+- * Copyright (C) 1995
+- *
+- * Modifications for Xen are Copyright (c) 2004, Keir Fraser.
+- */
+-#ifndef __ASM_XEN_I386_FLOPPY_H
+-#define __ASM_XEN_I386_FLOPPY_H
+-
+-#include <linux/vmalloc.h>
+-
+-/* XEN: Hit DMA paths on the head. This trick from asm-m68k/floppy.h. */
+-#include <asm/dma.h>
+-#undef MAX_DMA_ADDRESS
+-#define MAX_DMA_ADDRESS 0
+-#define CROSS_64KB(a,s) (0)
+-
+-#define fd_inb(port) inb_p(port)
+-#define fd_outb(value,port) outb_p(value,port)
+-
+-#define fd_request_dma() (0)
+-#define fd_free_dma() ((void)0)
+-#define fd_enable_irq() enable_irq(FLOPPY_IRQ)
+-#define fd_disable_irq() disable_irq(FLOPPY_IRQ)
+-#define fd_free_irq() free_irq(FLOPPY_IRQ, NULL)
+-#define fd_get_dma_residue() (virtual_dma_count + virtual_dma_residue)
+-#define fd_dma_setup(addr, size, mode, io) vdma_dma_setup(addr, size, mode, io)
+-/*
+- * Do not use vmalloc/vfree: floppy_release_irq_and_dma() gets called from
+- * softirq context via motor_off_callback. A generic bug we happen to trigger.
+- */
+-#define fd_dma_mem_alloc(size) __get_free_pages(GFP_KERNEL, get_order(size))
+-#define fd_dma_mem_free(addr, size) free_pages(addr, get_order(size))
+-
+-static int virtual_dma_count;
+-static int virtual_dma_residue;
+-static char *virtual_dma_addr;
+-static int virtual_dma_mode;
+-static int doing_pdma;
+-
+-static irqreturn_t floppy_hardint(int irq, void *dev_id)
+-{
+- register unsigned char st;
+- register int lcount;
+- register char *lptr;
+-
+- if (!doing_pdma)
+- return floppy_interrupt(irq, dev_id);
+-
+- st = 1;
+- for(lcount=virtual_dma_count, lptr=virtual_dma_addr;
+- lcount; lcount--, lptr++) {
+- st=inb(virtual_dma_port+4) & 0xa0 ;
+- if(st != 0xa0)
+- break;
+- if(virtual_dma_mode)
+- outb_p(*lptr, virtual_dma_port+5);
+- else
+- *lptr = inb_p(virtual_dma_port+5);
+- }
+- virtual_dma_count = lcount;
+- virtual_dma_addr = lptr;
+- st = inb(virtual_dma_port+4);
+-
+- if(st == 0x20)
+- return IRQ_HANDLED;
+- if(!(st & 0x20)) {
+- virtual_dma_residue += virtual_dma_count;
+- virtual_dma_count=0;
+- doing_pdma = 0;
+- floppy_interrupt(irq, dev_id);
+- return IRQ_HANDLED;
+- }
+- return IRQ_HANDLED;
+-}
+-
+-static void fd_disable_dma(void)
+-{
+- doing_pdma = 0;
+- virtual_dma_residue += virtual_dma_count;
+- virtual_dma_count=0;
+-}
+-
+-static int fd_request_irq(void)
+-{
+- return request_irq(FLOPPY_IRQ, floppy_hardint,
+- IRQF_DISABLED, "floppy", NULL);
+-}
+-
+-static int vdma_dma_setup(char *addr, unsigned long size, int mode, int io)
+-{
+- doing_pdma = 1;
+- virtual_dma_port = io;
+- virtual_dma_mode = (mode == DMA_MODE_WRITE);
+- virtual_dma_addr = addr;
+- virtual_dma_count = size;
+- virtual_dma_residue = 0;
+- return 0;
+-}
+-
+-/* XEN: This trick to force 'virtual DMA' is from include/asm-m68k/floppy.h. */
+-#define FDC1 xen_floppy_init()
+-static int FDC2 = -1;
+-
+-static int xen_floppy_init(void)
+-{
+- use_virtual_dma = 1;
+- can_use_virtual_dma = 1;
+- return 0x3f0;
+-}
+-
+-/*
+- * Floppy types are stored in the rtc's CMOS RAM and so rtc_lock
+- * is needed to prevent corrupted CMOS RAM in case "insmod floppy"
+- * coincides with another rtc CMOS user. Paul G.
+- */
+-#define FLOPPY0_TYPE ({ \
+- unsigned long flags; \
+- unsigned char val; \
+- spin_lock_irqsave(&rtc_lock, flags); \
+- val = (CMOS_READ(0x10) >> 4) & 15; \
+- spin_unlock_irqrestore(&rtc_lock, flags); \
+- val; \
+-})
+-
+-#define FLOPPY1_TYPE ({ \
+- unsigned long flags; \
+- unsigned char val; \
+- spin_lock_irqsave(&rtc_lock, flags); \
+- val = CMOS_READ(0x10) & 15; \
+- spin_unlock_irqrestore(&rtc_lock, flags); \
+- val; \
+-})
+-
+-#define N_FDC 2
+-#define N_DRIVE 8
+-
+-#define FLOPPY_MOTOR_MASK 0xf0
+-
+-#define EXTRA_FLOPPY_PARAMS
+-
+-#endif /* __ASM_XEN_I386_FLOPPY_H */
+Index: head-2007-09-03/include/asm-i386/mach-xen/asm/io.h
+===================================================================
+--- head-2007-09-03.orig/include/asm-i386/mach-xen/asm/io.h 2007-09-03 09:53:30.000000000 +0200
++++ head-2007-09-03/include/asm-i386/mach-xen/asm/io.h 2007-09-03 10:02:35.000000000 +0200
+@@ -152,7 +152,7 @@ extern void bt_iounmap(void *addr, unsig
+ /*
+ * ISA I/O bus memory addresses are 1:1 with the physical address.
+ */
+-#define isa_virt_to_bus(_x) isa_virt_to_bus_is_UNSUPPORTED->x
++#define isa_virt_to_bus(_x) ({ BUG(); virt_to_bus(_x); })
+ #define isa_page_to_bus(_x) isa_page_to_bus_is_UNSUPPORTED->x
+ #define isa_bus_to_virt(_x) (void *)(__fix_to_virt(FIX_ISAMAP_BEGIN) + (_x))
+
+Index: head-2007-09-03/include/asm-x86_64/mach-xen/asm/floppy.h
+===================================================================
+--- head-2007-09-03.orig/include/asm-x86_64/mach-xen/asm/floppy.h 2007-09-03 09:53:00.000000000 +0200
++++ /dev/null 1970-01-01 00:00:00.000000000 +0000
+@@ -1,206 +0,0 @@
+-/*
+- * Architecture specific parts of the Floppy driver
+- *
+- * This file is subject to the terms and conditions of the GNU General Public
+- * License. See the file "COPYING" in the main directory of this archive
+- * for more details.
+- *
+- * Copyright (C) 1995
+- *
+- * Modifications for Xen are Copyright (c) 2004, Keir Fraser.
+- */
+-#ifndef __ASM_XEN_X86_64_FLOPPY_H
+-#define __ASM_XEN_X86_64_FLOPPY_H
+-
+-#include <linux/vmalloc.h>
+-
+-/*
+- * The DMA channel used by the floppy controller cannot access data at
+- * addresses >= 16MB
+- *
+- * Went back to the 1MB limit, as some people had problems with the floppy
+- * driver otherwise. It doesn't matter much for performance anyway, as most
+- * floppy accesses go through the track buffer.
+- */
+-#define _CROSS_64KB(a,s,vdma) \
+-(!(vdma) && ((unsigned long)(a)/K_64 != ((unsigned long)(a) + (s) - 1) / K_64))
+-
+-/* XEN: Hit DMA paths on the head. This trick from asm-m68k/floppy.h. */
+-#include <asm/dma.h>
+-#undef MAX_DMA_ADDRESS
+-#define MAX_DMA_ADDRESS 0
+-#define CROSS_64KB(a,s) (0)
+-
+-#define fd_inb(port) inb_p(port)
+-#define fd_outb(value,port) outb_p(value,port)
+-
+-#define fd_request_dma() (0)
+-#define fd_free_dma() ((void)0)
+-#define fd_enable_irq() enable_irq(FLOPPY_IRQ)
+-#define fd_disable_irq() disable_irq(FLOPPY_IRQ)
+-#define fd_free_irq() free_irq(FLOPPY_IRQ, NULL)
+-#define fd_get_dma_residue() vdma_get_dma_residue(FLOPPY_DMA)
+-/*
+- * Do not use vmalloc/vfree: floppy_release_irq_and_dma() gets called from
+- * softirq context via motor_off_callback. A generic bug we happen to trigger.
+- */
+-#define fd_dma_mem_alloc(size) __get_free_pages(GFP_KERNEL|__GFP_NORETRY, get_order(size))
+-#define fd_dma_mem_free(addr, size) free_pages(addr, get_order(size))
+-#define fd_dma_setup(addr, size, mode, io) vdma_dma_setup(addr, size, mode, io)
+-
+-static int virtual_dma_count;
+-static int virtual_dma_residue;
+-static char *virtual_dma_addr;
+-static int virtual_dma_mode;
+-static int doing_pdma;
+-
+-static irqreturn_t floppy_hardint(int irq, void *dev_id)
+-{
+- register unsigned char st;
+-
+-#undef TRACE_FLPY_INT
+-
+-#ifdef TRACE_FLPY_INT
+- static int calls=0;
+- static int bytes=0;
+- static int dma_wait=0;
+-#endif
+- if (!doing_pdma)
+- return floppy_interrupt(irq, dev_id);
+-
+-#ifdef TRACE_FLPY_INT
+- if(!calls)
+- bytes = virtual_dma_count;
+-#endif
+-
+- {
+- register int lcount;
+- register char *lptr;
+-
+- st = 1;
+- for(lcount=virtual_dma_count, lptr=virtual_dma_addr;
+- lcount; lcount--, lptr++) {
+- st=inb(virtual_dma_port+4) & 0xa0 ;
+- if(st != 0xa0)
+- break;
+- if(virtual_dma_mode)
+- outb_p(*lptr, virtual_dma_port+5);
+- else
+- *lptr = inb_p(virtual_dma_port+5);
+- }
+- virtual_dma_count = lcount;
+- virtual_dma_addr = lptr;
+- st = inb(virtual_dma_port+4);
+- }
+-
+-#ifdef TRACE_FLPY_INT
+- calls++;
+-#endif
+- if(st == 0x20)
+- return IRQ_HANDLED;
+- if(!(st & 0x20)) {
+- virtual_dma_residue += virtual_dma_count;
+- virtual_dma_count=0;
+-#ifdef TRACE_FLPY_INT
+- printk("count=%x, residue=%x calls=%d bytes=%d dma_wait=%d\n",
+- virtual_dma_count, virtual_dma_residue, calls, bytes,
+- dma_wait);
+- calls = 0;
+- dma_wait=0;
+-#endif
+- doing_pdma = 0;
+- floppy_interrupt(irq, dev_id);
+- return IRQ_HANDLED;
+- }
+-#ifdef TRACE_FLPY_INT
+- if(!virtual_dma_count)
+- dma_wait++;
+-#endif
+- return IRQ_HANDLED;
+-}
+-
+-static void fd_disable_dma(void)
+-{
+- doing_pdma = 0;
+- virtual_dma_residue += virtual_dma_count;
+- virtual_dma_count=0;
+-}
+-
+-static int vdma_get_dma_residue(unsigned int dummy)
+-{
+- return virtual_dma_count + virtual_dma_residue;
+-}
+-
+-
+-static int fd_request_irq(void)
+-{
+- return request_irq(FLOPPY_IRQ, floppy_hardint,
+- IRQF_DISABLED, "floppy", NULL);
+-}
+-
+-#if 0
+-static unsigned long vdma_mem_alloc(unsigned long size)
+-{
+- return (unsigned long) vmalloc(size);
+-
+-}
+-
+-static void vdma_mem_free(unsigned long addr, unsigned long size)
+-{
+- vfree((void *)addr);
+-}
+-#endif
+-
+-static int vdma_dma_setup(char *addr, unsigned long size, int mode, int io)
+-{
+- doing_pdma = 1;
+- virtual_dma_port = io;
+- virtual_dma_mode = (mode == DMA_MODE_WRITE);
+- virtual_dma_addr = addr;
+- virtual_dma_count = size;
+- virtual_dma_residue = 0;
+- return 0;
+-}
+-
+-/* XEN: This trick to force 'virtual DMA' is from include/asm-m68k/floppy.h. */
+-#define FDC1 xen_floppy_init()
+-static int FDC2 = -1;
+-
+-static int xen_floppy_init(void)
+-{
+- use_virtual_dma = 1;
+- can_use_virtual_dma = 1;
+- return 0x3f0;
+-}
+-
+-/*
+- * Floppy types are stored in the rtc's CMOS RAM and so rtc_lock
+- * is needed to prevent corrupted CMOS RAM in case "insmod floppy"
+- * coincides with another rtc CMOS user. Paul G.
+- */
+-#define FLOPPY0_TYPE ({ \
+- unsigned long flags; \
+- unsigned char val; \
+- spin_lock_irqsave(&rtc_lock, flags); \
+- val = (CMOS_READ(0x10) >> 4) & 15; \
+- spin_unlock_irqrestore(&rtc_lock, flags); \
+- val; \
+-})
+-
+-#define FLOPPY1_TYPE ({ \
+- unsigned long flags; \
+- unsigned char val; \
+- spin_lock_irqsave(&rtc_lock, flags); \
+- val = CMOS_READ(0x10) & 15; \
+- spin_unlock_irqrestore(&rtc_lock, flags); \
+- val; \
+-})
+-
+-#define N_FDC 2
+-#define N_DRIVE 8
+-
+-#define FLOPPY_MOTOR_MASK 0xf0
+-
+-#define EXTRA_FLOPPY_PARAMS
+-
+-#endif /* __ASM_XEN_X86_64_FLOPPY_H */
+Index: head-2007-09-03/include/asm-x86_64/mach-xen/asm/io.h
+===================================================================
+--- head-2007-09-03.orig/include/asm-x86_64/mach-xen/asm/io.h 2007-09-03 09:53:19.000000000 +0200
++++ head-2007-09-03/include/asm-x86_64/mach-xen/asm/io.h 2007-09-03 10:02:35.000000000 +0200
+@@ -167,7 +167,7 @@ extern void iounmap(volatile void __iome
+ * ISA I/O bus memory addresses are 1:1 with the physical address.
+ */
+
+-#define isa_virt_to_bus(_x) isa_virt_to_bus_is_UNSUPPORTED->x
++#define isa_virt_to_bus(_x) ({ BUG(); virt_to_bus(_x); })
+ #define isa_page_to_bus(_x) isa_page_to_bus_is_UNSUPPORTED->x
+ #define isa_bus_to_virt(_x) (void *)(__fix_to_virt(FIX_ISAMAP_BEGIN) + (_x))
+
diff --git a/trunk/2.6.22/20078_xen-i386-set-fixmap.patch1 b/trunk/2.6.22/20078_xen-i386-set-fixmap.patch1
new file mode 100644
index 0000000..eb01d0a
--- /dev/null
+++ b/trunk/2.6.22/20078_xen-i386-set-fixmap.patch1
@@ -0,0 +1,126 @@
+From: jbeulich@novell.com
+Subject: i386/PAE: avoid temporarily inconsistent pte-s
+Patch-mainline: obsolete
+
+Index: head-2007-09-03/arch/i386/mm/pgtable-xen.c
+===================================================================
+--- head-2007-09-03.orig/arch/i386/mm/pgtable-xen.c 2007-06-01 16:57:12.000000000 +0200
++++ head-2007-09-03/arch/i386/mm/pgtable-xen.c 2007-09-19 11:04:16.000000000 +0200
+@@ -77,87 +77,6 @@ void show_mem(void)
+ }
+
+ /*
+- * Associate a virtual page frame with a given physical page frame
+- * and protection flags for that frame.
+- */
+-static void set_pte_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
+-{
+- pgd_t *pgd;
+- pud_t *pud;
+- pmd_t *pmd;
+- pte_t *pte;
+-
+- pgd = swapper_pg_dir + pgd_index(vaddr);
+- if (pgd_none(*pgd)) {
+- BUG();
+- return;
+- }
+- pud = pud_offset(pgd, vaddr);
+- if (pud_none(*pud)) {
+- BUG();
+- return;
+- }
+- pmd = pmd_offset(pud, vaddr);
+- if (pmd_none(*pmd)) {
+- BUG();
+- return;
+- }
+- pte = pte_offset_kernel(pmd, vaddr);
+- if (pgprot_val(flags))
+- /* <pfn,flags> stored as-is, to permit clearing entries */
+- set_pte(pte, pfn_pte(pfn, flags));
+- else
+- pte_clear(&init_mm, vaddr, pte);
+-
+- /*
+- * It's enough to flush this one mapping.
+- * (PGE mappings get flushed as well)
+- */
+- __flush_tlb_one(vaddr);
+-}
+-
+-/*
+- * Associate a virtual page frame with a given physical page frame
+- * and protection flags for that frame.
+- */
+-static void set_pte_pfn_ma(unsigned long vaddr, unsigned long pfn,
+- pgprot_t flags)
+-{
+- pgd_t *pgd;
+- pud_t *pud;
+- pmd_t *pmd;
+- pte_t *pte;
+-
+- pgd = swapper_pg_dir + pgd_index(vaddr);
+- if (pgd_none(*pgd)) {
+- BUG();
+- return;
+- }
+- pud = pud_offset(pgd, vaddr);
+- if (pud_none(*pud)) {
+- BUG();
+- return;
+- }
+- pmd = pmd_offset(pud, vaddr);
+- if (pmd_none(*pmd)) {
+- BUG();
+- return;
+- }
+- pte = pte_offset_kernel(pmd, vaddr);
+- if (pgprot_val(flags))
+- /* <pfn,flags> stored as-is, to permit clearing entries */
+- set_pte(pte, pfn_pte_ma(pfn, flags));
+- else
+- pte_clear(&init_mm, vaddr, pte);
+-
+- /*
+- * It's enough to flush this one mapping.
+- * (PGE mappings get flushed as well)
+- */
+- __flush_tlb_one(vaddr);
+-}
+-
+-/*
+ * Associate a large virtual page frame with a given physical page frame
+ * and protection flags for that frame. pfn is for the base of the page,
+ * vaddr is what the page gets mapped to - both must be properly aligned.
+@@ -200,6 +119,7 @@ EXPORT_SYMBOL(__FIXADDR_TOP);
+ void __set_fixmap (enum fixed_addresses idx, maddr_t phys, pgprot_t flags)
+ {
+ unsigned long address = __fix_to_virt(idx);
++ pte_t pte;
+
+ if (idx >= __end_of_fixed_addresses) {
+ BUG();
+@@ -207,16 +127,16 @@ void __set_fixmap (enum fixed_addresses
+ }
+ switch (idx) {
+ case FIX_WP_TEST:
+-#ifdef CONFIG_X86_F00F_BUG
+- case FIX_F00F_IDT:
+-#endif
+ case FIX_VDSO:
+- set_pte_pfn(address, phys >> PAGE_SHIFT, flags);
++ pte = pfn_pte(phys >> PAGE_SHIFT, flags);
+ break;
+ default:
+- set_pte_pfn_ma(address, phys >> PAGE_SHIFT, flags);
++ pte = pfn_pte_ma(phys >> PAGE_SHIFT, flags);
+ break;
+ }
++ if (HYPERVISOR_update_va_mapping(address, pte,
++ UVMF_INVLPG|UVMF_ALL))
++ BUG();
+ fixmaps++;
+ }
+
diff --git a/trunk/2.6.22/20079_xenfb-module-param.patch1 b/trunk/2.6.22/20079_xenfb-module-param.patch1
new file mode 100644
index 0000000..e83ae6e
--- /dev/null
+++ b/trunk/2.6.22/20079_xenfb-module-param.patch1
@@ -0,0 +1,108 @@
+From: Derek Murray <Derek.Murray@cl.cam.ac.uk>
+Subject: Re: Patching Xen virtual framebuffer
+Date: Thu, 06 Dec 2007 12:12:42 +0000
+
+[...]
+the attached patch replaces the compile-time constants with kernel
+command-line parameters.
+
+ From what I can tell in the kernel docs, you would add the following to
+your kernel cmdline:
+
+xenfb.fb_width=1024 xenfb.fb_height=768 xenfb.fb_depth=32
+
+[...]
+It's based on http://xenbits.xensource.com/linux-2.6.18-xen.hg, [...]
+
+# HG changeset patch
+# User dgm36@ise.cl.cam.ac.uk
+# Date 1196942777 0
+# Node ID 4ccc3f6b544aa40f8fa24fd45b81bcc5c01ef4dd
+# Parent df7d0555ec3847bd5915063d8ee79123d6ebc67a
+Added command-line parameters for PVFB height, width and depth.
+
+Signed-off-by: Derek Murray <Derek.Murray@cl.cam.ac.uk>
+Acked-by: Torsten Duwe <duwe@suse.de>
+
+Index: 10.3-2008-02-05/drivers/xen/fbfront/xenfb.c
+===================================================================
+--- 10.3-2008-02-05.orig/drivers/xen/fbfront/xenfb.c 2008-02-05 14:02:26.000000000 +0100
++++ 10.3-2008-02-05/drivers/xen/fbfront/xenfb.c 2008-02-05 14:03:16.000000000 +0100
+@@ -33,6 +33,21 @@
+ #include <xen/xenbus.h>
+ #include <linux/kthread.h>
+
++
++
++/* Kernel cmdline parameters for resolution. */
++int fb_width = XENFB_WIDTH;
++module_param(fb_width, int, 0);
++#undef XENFB_WIDTH
++EXPORT_SYMBOL_GPL(fb_width);
++int fb_height = XENFB_HEIGHT;
++module_param(fb_height, int, 0);
++#undef XENFB_HEIGHT
++EXPORT_SYMBOL_GPL(fb_height);
++int fb_depth = XENFB_DEPTH;
++module_param(fb_depth, int, 0);
++#undef XENFB_DEPTH
++
+ struct xenfb_mapping
+ {
+ struct list_head link;
+@@ -132,7 +147,7 @@ struct xenfb_info
+ */
+
+ static int xenfb_fps = 20;
+-static unsigned long xenfb_mem_len = XENFB_WIDTH * XENFB_HEIGHT * XENFB_DEPTH / 8;
++static unsigned long xenfb_mem_len = -1;
+
+ static int xenfb_remove(struct xenbus_device *);
+ static void xenfb_init_shared_page(struct xenfb_info *);
+@@ -447,6 +462,8 @@ static int __devinit xenfb_probe(struct
+ struct fb_info *fb_info;
+ int ret;
+
++ xenfb_mem_len = fb_width * fb_height * fb_depth / 8;
++
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (info == NULL) {
+ xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure");
+@@ -604,9 +621,9 @@ static void xenfb_init_shared_page(struc
+
+ info->page->pd[0] = vmalloc_to_mfn(info->mfns);
+ info->page->pd[1] = 0;
+- info->page->width = XENFB_WIDTH;
+- info->page->height = XENFB_HEIGHT;
+- info->page->depth = XENFB_DEPTH;
++ info->page->width = fb_width;
++ info->page->height = fb_height;
++ info->page->depth = fb_depth;
+ info->page->line_length = (info->page->depth / 8) * info->page->width;
+ info->page->mem_length = xenfb_mem_len;
+ info->page->in_cons = info->page->in_prod = 0;
+Index: 10.3-2008-02-05/drivers/xen/fbfront/xenkbd.c
+===================================================================
+--- 10.3-2008-02-05.orig/drivers/xen/fbfront/xenkbd.c 2008-02-05 14:02:26.000000000 +0100
++++ 10.3-2008-02-05/drivers/xen/fbfront/xenkbd.c 2008-02-05 14:00:50.000000000 +0100
+@@ -27,6 +27,9 @@
+ #include <xen/interface/io/kbdif.h>
+ #include <xen/xenbus.h>
+
++extern unsigned int fb_height;
++extern unsigned int fb_width;
++
+ struct xenkbd_info
+ {
+ struct input_dev *kbd;
+@@ -153,8 +156,8 @@ int __devinit xenkbd_probe(struct xenbus
+ for (i = BTN_LEFT; i <= BTN_TASK; i++)
+ set_bit(i, ptr->keybit);
+ ptr->relbit[0] = BIT(REL_X) | BIT(REL_Y);
+- input_set_abs_params(ptr, ABS_X, 0, XENFB_WIDTH, 0, 0);
+- input_set_abs_params(ptr, ABS_Y, 0, XENFB_HEIGHT, 0, 0);
++ input_set_abs_params(ptr, ABS_X, 0, fb_width, 0, 0);
++ input_set_abs_params(ptr, ABS_Y, 0, fb_height, 0, 0);
+
+ ret = input_register_device(ptr);
+ if (ret) {